Why do servers need attendant tasks?
Receive( &serverTid, eventId );
Reply( serverTid, ... );
FOREVER {
data = AwaitEvent( eventid ); // data includes event type and volatile data
switch( event-type ) {
case RCV_INT:
Send( serverTid, {NOT_RCV, byte}, ... );
break;
case XMT_INT:
// test transmitter?
Send( serverTid, NOT_XMIT, ); // byte is to be transmitted
break;
default: // This will never happen because your kernel is bug-free.
}
}
// queues & fifos
notifierPid = Create( notifier );
Send( notifierTid, MyTid( ), ... ); //On return notifier is known to be okay
RegisterAs( ); //On return requests can begin.
FOREVER {
requesterTid = Receive( request, {request-type, data} );
switch ( request-type ) {
case NOT_RCV:
Reply( requesterTid, ... );
enqueue( rcvfifo, data );
if ( ! empty( rcvQ ) ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case NOT_XMIT:
Reply( requesterTid, ... );
if ( ! empty( xmitfifo ) ) write( UART, dequeue( xmitfifo ) );
else xmitRdy = true;
break;
case CLIENT_RCV:
enqueue( rcvQ, requesterTid );
if ( !empty( rcvfifo ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case CLIENT_XMIT:
Reply( requesterTid, ... );
enqueue ( xmitfifo, data );
if ( xmitRdy ) { write( UART, dequeue( xmitfifo ) ); xmitRdy = false; }
break;
default:
Reply( requesterTid, "Sorry. I don't have any spare change.\n" );
}
}
Simplest is best
Receive( &courierTid, ... );
Reply( courierTid, ... );
FOREVER {
Receive( &courierTid, ... );
data = AwaitEvent( eventid ); // data includes event-type and volatile data
switch( event-type ) {
case RCV_INT:
Reply( courierTid, data );
break;
case XMT_INT:
// test transmitter?
Reply( courierTid, NOT_XMIT, ); // byte is to be transmitted
break;
default: // This will never happen because your kernel is bug-free.
}
}
Receive( &serverTid, notifierTid, ... );
Send( notifierTid, ... );
Reply( serverTid );
FOREVER {
Send( notifierTid, ..., {req, data} );
Send( serverTid, {req, data} );
}
// queues & fifos
notifierTid = Create( notifier );
courierTid = Create( courier );
Send( courierTid, notifierTid, ... ); // On return courier & notifier are known to be okay
RegisterAs( ); // On return client requests can begin.
FOREVER {
requesterTid = Receive( request, {request-type, data} );
switch ( request-type ) {
case COUR_RCV:
Reply( requesterTid, ... );
enqueue( rcvfifo, data );
if ( ! empty( rcvQ ) ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case COUR_XMIT:
Reply( requesterTid, ... );
if ( ! empty( xmitfifo ) ) write( UART, dequeue( xmitfifo ) );
else xmitRdy = true;
break;
case CLIENT_RCV:
enqueue( rcvQ, requesterTid );
if ( !empty( rcvfifo ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case CLIENT_XMIT:
Reply( requesterTid, ... );
enqueue ( xmitfifo, data );
if ( xmitRdy ) { write( UART, dequeue( xmitfifo ) ); xmitRdy = false; }
break;
default:
Reply( requesterTid, "Sorry. I don't have any spare change.\n" );
}
}
case COUR_XMIT:
if ( ! empty( xmitfifo ) ) Reply( requesterTid, { PROP_XMIT, UART, dequeue( xmitfifo ) } );
else xmitRdy = true;
break;
...
case CLIENT_XMIT:
Reply( requesterTid, ... );
enqueue ( xmitfifo, data );
if ( xmitRdy ) {
Reply( requesterTid, { PROP_XMIT, UART, dequeue( xmitfifo ) } );
xmitRdy = false;
} else Reply( RequesterTid, ... );
break;
and n the Notifier
Receive( &courierTid, { req-type, uart, byte }, ... );
if ( req-type == PROP_XMIT ) write( uart, byte );
This gets you through a bottleneck where no more than two events come too fast.
Remember that all the calls provide error returns. You can/should use them for error recovery
Another possible arrangement for initialization
Distributed gating
Add a warehouse between the courier and the notifier.
Receive( &warhouseTid, ... );
Reply( warhouseTid, ... );
FOREVER {
data = AwaitEvent( eventid ); // data includes event-type and volatile data
switch( event-type ) {
case RCV_INT:
Send( warehouseTid, data );
break;
case XMT_INT:
// test transmitter?
Send( warehouseTid, NOT_XMIT, ); // byte is to be transmitted
break;
default: // This will never happen because your kernel is bug-free.
}
}
// data structures
Receive( &courierTid, notifierTid, ... );
Send( notifierTid, ... );
Reply( courierTid, ... );
FOREVER {
Receive( &requester, {req-type, data}, );
switch( req-type ) {
case COUR_RCV:
enqueue( rcvQ, requester );
if( !empty( msgbuf ) ) Reply( dequeue( rcvQ ), extract( msgbuf ), ... );
break;
case COUR_XMIT:
Reply( requester, ... );
enqueue( xmitfifo, unpack( data ) );
if ( xmitRdy ) { write( UART, dequeue( xmitfifo ) ); xmitRdy = false; }
break;
case NOT_RCV:
Reply( requester , ... );
install( msgbuf, pack( data ) );
if( !empty( rcvQ ) && !empty( msgfifo ) ) Reply( dequeue( rcvQ ), extract( msgbuf ), );
break;
case NOT_XMIT:
Reply( requester, ... );
if( !empty( xmitfifo ) ) write( UART, dequeue( xmitfifo ) );
else xmitRdy = true;
break;
default:
}
}
Receive( &serverTid, {notifierTid, warehouseTid} ... );
Send( warehouseTid, notifierTid, ... );
Reply( serverTid );
FOREVER {
Send( warehouseTid, {req, data} );
Send( serverTid, {req, data} );
}
// queues & fifos
notifierTid = Create( notifier ); // Should notifier code name be hard coded?
warehouseTid = Create( warehouse );
courierTid = Create( courier );
Send( courierTid, {warehouseTid, notifierTid}, ... );
// On return courier, warehouse & notifier are known to be okay
RegisterAs( ); // On return client requests can begin.
FOREVER {
Receive( &requesterTid, { request-type, message } );
switch ( request-type ) {
case COUR_RCV:
Reply( requesterTid, ... );
enqueue( rcvfifo, message );
if ( !empty( rcvQ ) ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case COUR_XMIT:
enqueue( xmitQ, requesterTid );
if ( !empty( xmitfifo ) ) Reply( dequeue( xmitQ ), dequeue( xmitfifo ) );
break;
case CLIENT_RCV:
enqueue( rcvQ, requesterTid );
if ( !empty( rcvfifo ) ) Reply( dequeue( rcvQ ), dequeue( rcvfifo ) );
break;
case CLIENT_XMIT:
Reply( requesterTid, ... );
enqueue( xmitfifo, message );
if ( !empty( xmitQ ) ) Reply( dequeue( xmitQ ), dequeue( xmitfifo ) );
break;
default:
Reply( requesterTid, "Sorry. I don't have any spare change.\n" );
}
}
This structure clears up problems when the notifier runs too fast for the server.
Two issues:
Define `bottleneck'.
In the solution above there should be, on average,
We also know that the Warehouse will buffer messages coming and going from the hardware.
A task that can screen requests can lower the buffering required in the Proprietor. We call such a task a receptionist.
// queues & fifos
propTid = MyParentTid( );
Send( propTid( ), ..., { notifierTid, warehouseTid, courierTid }, ... );
// On return Proprietor is known to be okay
Send( courierTid, { notifierTid, warehouseTid }, ... );
// On return courier, warehouse & notifier are known to be okay
RegisterAs( ); // On return client requests can begin.
FOREVER {
Receive( &requesterTid, { request-type, message } );
switch ( request-type ) {
case COUR_RCV:
Reply( requesterTid, ... );
enqueue( rcvfifo, message );
if ( !empty( rcvQ ) ) Reply( propTid, { dequeue( rcvQ ), dequeue( rcvfifo ) }, ... );
break;
case COUR_XMIT:
enqueue( xmitQ, requesterTid );
if ( !empty( xmitfifo ) ) Reply( dequeue( xmitQ ), dequeue( xmitfifo ) );
break;
case PROP_RCV:
enqueue( rcvQ, requesterTid );
if ( !empty( rcvfifo ) ) Reply( propTid, { dequeue( rcvQ ), dequeue( rcvfifo ) }, ... );
break;
case PROP_XMIT:
Reply( requesterTid, ... );
enqueue( xmitfifo, message );
if ( !empty( xmitQ ) ) Reply( dequeue( xmitQ ), dequeue( xmitfifo ), ... );
break;
default:
Reply( requesterTid, "Sorry. I don't have any spare change.\n" );
}
}
// queues & fifos
notifierTid = Create( notifier );
warehouseTid = Create( warehouse );
courierTid = Create( courier );
receptionistTid = Create( receptionist );
Receive( &receptionistTid, ... );
Reply( receptionistTid, { notifierTid,warehouseTid, courierTid } ... );
FOREVER {
Send( receptionistTid, ..., { req-type, tid, message }, ... );
switch ( req-type ) {
case RCV:
something( tid, message );
Reply( tid, message );
break;
case XMIT:
something( tid, message );
Reply( tid, message );
break;
default:
Twiddle-thumbs( );
}
}
something( ) might be maintaining a database.
Return to: