We all, most programmers included, have effective intuitions about human relations
Tasks are independent entities
Why do servers need attendant tasks?
Proprietor `owns' a service, which usually means a resource.
Kernel is handling hardware in this example
Receive( &serverTid, eventId );
Reply( serverTid, ... );
FOREVER {
data = AwaitEvent( eventid ); // data includes event type and volatile data
switch( event-type ) {
case RCV_INT:
Send( serverTid, {NOT_RCV, byte}, ... );
break;
case XMT_INT:
// test transmitter?
Send( serverTid, NOT_XMIT, ); // byte is to be transmitted
break;
default: // This will never happen because your kernel is bug-free.
}
// queues & fifos
notifierPid = Create( notifier ); //Should notifier code name be hard coded?
Send( notifierTid, MyTid( ), ... ); //On return notifier is known to be okay
RegisterAs( ); //On return requests can begin.
FOREVER {
requesterTid = Receive( request, {request-type, data} );
switch ( request-type ) {
case NOT_RCV:
Reply( requesterTid, ... );
enqueue( rcvfifo, data );
if ( ! empty( rcvQ ) ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case NOT_XMIT:
Reply( requesterTid, ... );
if ( ! empty( xmitfifo ) ) write( UART, dequeue( xmitfifo ) );
else xmitRdy = true;
break;
case CLIENT_RCV:
enqueue( rcvQ, requesterTid );
if ( !empty( rcvfifo ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case CLIENT_XMIT:
Reply( requesterTid, ... );
enqueue ( xmitfifo, data );
if ( xmitRdy ) { write( UART, dequeue( xmitfifo ) ); xmitRdy = false; }
break;
default:
Reply( requesterTid, "Sorry. I don't have any spare change.\n" );
}
}
Simplest is best
Receive( &courierTid, ... );
Reply( courierTid, ... );
FOREVER {
Receive( &courierTid, ... );
data = AwaitEvent( eventid ); // data includes event-type and volatile data
switch( event-type ) {
case RCV_INT:
Reply( courierTid, data );
break;
case XMT_INT:
// test transmitter?
Reply( courierTid, NOT_XMIT, ); // byte is to be transmitted
break;
default: // This will never happen because your kernel is bug-free.
}
Receive( &serverTid, notifierTid, ... ); Send( notifierTid, ... ); Reply( serverTid );
FOREVER {
Send( notifierTid, ..., {req, data} );
Send( serverTid, {req, data} );
}
// queues & fifos
notifierTid = Create( notifier ); //Should notifier code name be hard coded?
courierTid = Create( courier );
Send( courierTid, notifierTid, ... ); // On return courier & notifier are known to be okay
RegisterAs( ); //On return client requests can begin.
FOREVER {
requesterTid = Receive( request, {request-type, data} );
switch ( request-type ) {
case NOT_RCV:
Reply( requesterTid, ... );
enqueue( rcvfifo, data );
if ( ! empty( rcvQ ) ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case NOT_XMIT:
Reply( requesterTid, ... );
if ( ! empty( xmitfifo ) ) write( UART, dequeue( xmitfifo ) );
else xmitRdy = true;
break;
case CLIENT_RCV:
enqueue( rcvQ, requesterTid );
if ( !empty( rcvfifo ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case CLIENT_XMIT:
Reply( requesterTid, ... );
enqueue ( xmitfifo, data );
if ( xmitRdy ) { write( UART, dequeue( xmitfifo ) ); xmitRdy = false; }
break;
default:
Reply( requesterTid, "Sorry. I don't have any spare change.\n" );
}
}
This gets you through a bottleneck where no more than two events come too fast.
Remember that all the calls provide error returns. You can/should use them for error recovery
Another possible arrangement for initialization
Distributed gating
Add a warehouse between the courier and the notifier.
Receive( &warhouseTid, ... );
Reply( warhouseTid, ... );
FOREVER {
data = AwaitEvent( eventid ); // data includes event-type and volatile data
switch( event-type ) {
case RCV_INT:
Send( warehouseTid, data );
break;
case XMT_INT:
// test transmitter?
Reply( warehouseTid, NOT_XMIT, ); // byte is to be transmitted
break;
default: // This will never happen because your kernel is bug-free.
}
// data structures Receive( &courierTid, notifierTid, ... ); Send( notifierTid, ... ); Reply( courierTid, ... );
FOREVER {
Receive( &requester, {req-type, data}, );
switch( req-type ) {
case COUR_RCV:
enqueue( rcvQ, requester );
if( !empty( msgbuf ) ) Reply( dequeue( rcvQ ), extract( msgbuf ), ... );
break;
case COUR_XMIT:
Reply( requester, ... );
enqueue( xmitfifo, unpack( data ) );
if ( xmitRdy ) { write( UART, dequeue( xmitfifo ) ); xmitRdy = false; }
break;
case NOT_RCV:
Reply( requester , ... );
install( msgbuf, pack( data ) );
if( !empty( rcvQ ) && !empty( msgfifo ) ) Reply( dequeue( rcvQ ), extract( msgbuf ), );
break;
case NOT_XMIT:
Reply( requester, ... );
if( !empty( xmitfifo ) ) write( UART, dequeue( xmitfifo ) );
else xmitRdy = true;
break;
default:
}
}
Receive( &serverTid, {notifierTid, warehouseTid} ... );
Send( warehouseTid, notifierTid, ... );
Reply( serverTid );
FOREVER {
Send( warehouseTid, {req, data} );
Send( serverTid, {req, data} );
}
// queues & fifos
notifierTid = Create( notifier ); // Should notifier code name be hard coded?
warehouseTid = Create( warehouse );
courierTid = Create( courier );
Send( courierTid, notifierTid, ... ); // On return courier, warehouse & notifier are known to be okay
RegisterAs( ); // On return client requests can begin.
FOREVER {
Receive( &requesterTid, {request-type, data} );
switch ( request-type ) {
case COUR_RCV:
Reply( requesterTid, ... );
enqueue( rcvfifo, data );
if ( !empty( rcvQ ) ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case COUR_XMIT:
enqueue( xmitQ, requesterTid );
if ( !empty( xmitfifo ) ) Reply( dequeue( xmitQ ), dequeue( xmitfifo ) );
break;
case CLIENT_RCV:
enqueue( rcvQ, requesterTid );
if ( !empty( rcvfifo ) ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case CLIENT_XMIT:
Reply( requesterTid, ... );
enqueue( xmitfifo, data );
if ( !empty( xmitQ ) ) Reply( dequeue( xmitQ ), dequeue( xmitfifo ) );
break;
default:
Reply( requesterTid, "Sorry. I don't have any spare change.\n" );
}
}
This structure clears up problems when the notifier runs too fast for the server.
Two issues:
Define `bottleneck'.
Called a guard.
What this amounts to is
Return to: