Exemplo n.º 1
0
 def __init__(self, exchange, response_queue, request_routing_key, max_retries=3, timeout_ms=5000):
     self.exchange = exchange
     self.response_queue = response_queue
     self.request_routing_key = request_routing_key
     self.messagebus = MessageBus()
     self.max_retries = max_retries # maximum number of times to retry on RPC timeout.  -1 indicates infinite retries
     self.timeout_ms = timeout_ms # Set to -1 for no timeout
     
     self.polling_interval = 0.01 # in seconds
     
     self.sync_sequence_number = 0
     
     self.messagebus.exchange_declare(exchange, 'direct')
     #self.messagebus.queue_delete(queue=response_queue) # clear it in case there are backed up messages (EDIT: it *should* autodelete)
     self.messagebus.queue_declare(queue=response_queue, auto_delete=True)
     self.messagebus.queue_purge(response_queue)
     self.messagebus.queue_bind(response_queue, exchange, routing_key=response_queue)
     logger.debug("Response queue '%s' is bound to key '%s' on exchange '%s'" % (response_queue, response_queue, exchange))
Exemplo n.º 2
0
 def __init__(self, 
     amqp_channel=None,              # If None, a new connection & channel will be created.
     exchange=DEFAULT_RPC_EXCHANGE,
     request_routing_key=None,       # Required
     reply_queue=None,               # Required
     timeout_ms=10000,
     max_retries=3):
     
     self.amqp_channel = amqp_channel or MessageBus().channel
     self.exchange = exchange
     
     # Required parameters:
     for param in ('reply_queue', 'request_routing_key'):
         if locals()[param]:
             setattr(self, param, locals()[param])
         else:
             raise AmqpService.ParameterMissing("%s is a required parameter." % param)
             
     self.rpc_channel = RpcChannel(self.exchange, self.reply_queue, self.request_routing_key, max_retries=max_retries, timeout_ms=timeout_ms)
     self.amqp_rpc_controller = AmqpRpcController()
Exemplo n.º 3
0
class FakeJobCommand(object):

    current_footprints = []
    messagebus = MessageBus()
    messagebus.exchange_declare('ngt.platefile.index', 'direct')

    @classmethod
    def get_plate_info(klass, somevar):
        return (12345, 67890)

    @classmethod
    def postprocess_job(klass, job, state):
        if job.uuid in klass.current_footprints:
            del klass.current_footprints[job.uuid]
        if state == 'failed':
            transaction_id, platefile_id = klass.get_plate_info(job.output)
            if transaction_id and platefile_id:
                idx_transaction_failed = {
                    'platefile_id': platefile_id,
                    'transaction_id': transaction_id
                }
                request = {
                    'sequence_number':
                    0,
                    'requestor':
                    '',
                    'method':
                    'IndexTransactionFailed',
                    'payload':
                    protocols.pack(protobuf.IndexTransactionFailed,
                                   idx_transaction_failed),
                }
                msg = Message(
                    protocols.pack(protobuf.BroxtonRequestWrapper, request))
                klass.messagebus.basic_publish(msg,
                                               exchange='ngt.platefile.index',
                                               routing_key='index')
                logger.debug("Message published: " + str(msg))

        return job
Exemplo n.º 4
0
from ngt.messaging import messagebus
from ngt.messaging.messagebus import MessageBus
from amqplib.client_0_8 import Message
logger = logging.getLogger('dispatch')
logger.setLevel(logging.INFO)
d_logger = logging.getLogger('dispatch_debug')
#d_logger.addHandler(logging.FileHandler('dispatch.log', 'w') )
#d_logger.setLevel(logging.DEBUG)
#logging.getLogger().setLevel(logging.DEBUG)
#logging.getLogger('protocol').setLevel(logging.DEBUG)

REAPER_SWEEP_INTERVAL = 5 * 60 # Time in seconds between reaper garbage collection sweeps
REAPER_SWEEP_MAX_TIMEOUTS = 1 # Number of times to try pinging a reaper upon sweep before giving up.


mb = MessageBus()

sys.path.insert(0, '../..')
from django.core.management import setup_environ
from ngt import settings
setup_environ(settings)
from models import Reaper
from ngt.jobs.models import Job, JobSet
from django import db
from django.db.models import Q
from commands import jobcommands


command_map = {
    'registerReaper': 'register_reaper',
    'unregisterReaper': 'unregister_reaper',
Exemplo n.º 5
0
from django.http import HttpResponse
from django.shortcuts import render_to_response
from ngt.messaging.messagebus import MessageBus
from ngt.dispatch.forms import JobForm

mbus = MessageBus()

def index(request):
    return HttpResponse('Hi from the Master Control Program.')

def initiate_job(command, args = []):
    msg = ' '.join([command] + args)
    return mbus.basic_publish(msg, routing_key='command')
    
def test_view(request):
    s = initiate_job('ls -l')
    return HttpResponse(str(s))

def jobber(request):
    form = JobForm()
    if request.META['REQUEST_METHOD'] == 'GET':
        return render_to_response('dispatch/jobform.html', {'form':form})
    elif request.META['REQUEST_METHOD'] == 'POST':
        #start the job
        initiate_job(request.POST['command'], request.POST['params'].split(' '))
        return render_to_response('dispatch/jobform.html', {'form':form, 'message':'Job enqueued.'})
Exemplo n.º 6
0
class MosaicJobCommand(JobCommand):
    commandname = 'mosaic'
    number_of_args = 2
    current_footprints = {}

    messagebus = MessageBus()
    messagebus.exchange_declare('ngt.platefile.index', 'direct')

    def check_readiness(self):
        if self.job.assets.all()[0].footprint:
            footprint = self.job.assets.all()[0].footprint.prepared
            for other_footprint in self.current_footprints.values():
                if other_footprint.touches(footprint):
                    return False
            else:
                return True
        else:
            return True

    def preprocess(self):
        if self.job.assets.all()[0].footprint:
            self.current_footprints[
                self.job.uuid] = self.job.assets.all()[0].footprint.prepared
        return self.job

    def _get_plate_info(self, output):
        m = re.search('Transaction ID: (\d+)', output)
        if m:
            transaction_id = int(m.groups()[0])
        else:
            transaction_id = None
        m = re.search('Platefile ID: (\d+)', output)
        if m:
            platefile_id = int(m.groups()[0])
        else:
            platefile_id = None
        return transaction_id, platefile_id

    def postprocess(self):
        if self.job.uuid in self.current_footprints:
            del self.current_footprints[self.job.uuid]
        if self.job.status == 'failed':
            transaction_id, platefile_id = self._get_plate_info(
                self.job.output)
            if transaction_id and platefile_id:
                idx_transaction_failed = {
                    'platefile_id': platefile_id,
                    'transaction_id': transaction_id
                }
                request = {
                    'sequence_number':
                    0,
                    'requestor':
                    '',
                    'method':
                    'TransactionFailed',
                    'payload':
                    protocols.pack(protobuf.IndexTransactionFailed,
                                   idx_transaction_failed),
                }
                msg = Message(
                    protocols.pack(protobuf.BroxtonRequestWrapper, request))
                self.messagebus.basic_publish(msg,
                                              exchange='ngt.platefile.index_0',
                                              routing_key='index')

        return self.job
Exemplo n.º 7
0
class RpcChannel(object):

  """Abstract interface for an RPC channel.

  An RpcChannel represents a communication line to a service which can be used
  to call that service's methods.  The service may be running on another
  machine. Normally, you should not use an RpcChannel directly, but instead
  construct a stub {@link Service} wrapping it.  Example:

  Example:
    RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
    RpcController controller = rpcImpl.Controller()
    MyService service = MyService_Stub(channel)
    service.MyMethod(controller, request, callback)
    
    
  """
  def __init__(self, exchange, response_queue, request_routing_key, max_retries=3, timeout_ms=5000):
      self.exchange = exchange
      self.response_queue = response_queue
      self.request_routing_key = request_routing_key
      self.messagebus = MessageBus()
      self.max_retries = max_retries # maximum number of times to retry on RPC timeout.  -1 indicates infinite retries
      self.timeout_ms = timeout_ms # Set to -1 for no timeout
      
      self.polling_interval = 0.01 # in seconds
      
      self.sync_sequence_number = 0
      
      self.messagebus.exchange_declare(exchange, 'direct')
      #self.messagebus.queue_delete(queue=response_queue) # clear it in case there are backed up messages (EDIT: it *should* autodelete)
      self.messagebus.queue_declare(queue=response_queue, auto_delete=True)
      self.messagebus.queue_purge(response_queue)
      self.messagebus.queue_bind(response_queue, exchange, routing_key=response_queue)
      logger.debug("Response queue '%s' is bound to key '%s' on exchange '%s'" % (response_queue, response_queue, exchange))
      

  def CallMethod(self, method_descriptor, rpc_controller,
                 request, response_class, done):
    """Calls the method identified by the descriptor.

    Call the given method of the remote service.  The signature of this
    procedure looks the same as Service.CallMethod(), but the requirements
    are less strict in one important way:  the request object doesn't have to
    be of any specific class as long as its descriptor is method.input_type.
    """
    rpc_controller.Reset()
    self.sync_sequence_number += 1
    wrapped_request_bytes = protocols.pack(protocols.RpcRequestWrapper,
        {   'requestor': self.response_queue,
            'method': method_descriptor.name,
            'payload': request.SerializeToString(),
            'sequence_number': self.sync_sequence_number
        }
        )
    #print ' '.join([hex(ord(c))[2:] for c in request.SerializeToString()])    
    #print ' '.join([hex(ord(c))[2:] for c in request_wrapper])
    
    retries = 0
    while True: # begin retry loop
        if self.max_retries > -1 and retries > self.max_retries:
            rpc_controller.SetFailed("Too many retries. (Max was %d)" % self.max_retries)
            #if done:
            #    done(None)
            
            # raise RPCFailure("Too many retries")
            return None # Still not too sure about this whole return None on failure business
        if retries > 0:
            logger.info("Retrying (%d)." % retries)
    
        logger.debug("About to publish to exchange '%s' with key '%s'" % (self.exchange, self.request_routing_key))
        self.messagebus.basic_publish(amqp.Message(wrapped_request_bytes),
                        exchange=self.exchange,
                        routing_key=self.request_routing_key)
        
        # Wait for a response
        logger.debug("Waiting for a response on queue '%s'" % self.response_queue)
        timeout_flag = False
        sync_ok = False
        t0 = time.time()
        # begin sync loop
        while not sync_ok:
            # begin response loop
            response = None
            while not response: 
                delta_t = time.time() - t0
                if self.timeout_ms >= 0 and delta_t * 1000.0 > self.timeout_ms:
                    timeout_flag = True
                    break
                response = self.messagebus.basic_get(self.response_queue, no_ack=True) # returns a message or None
                if not response: time.sleep(self.polling_interval) # polling interval
            # end response loop
            
            #self.messagebus.basic_ack(response.delivery_tag)
            if timeout_flag:
                logger.warning("RPC method '%s' timed out," % method_descriptor.name)
                retries += 1
                break # from the sync loop out to retry loop.  resets timer

            logger.info("Got a response in %s secs" % str(time.time() - t0))
        
            response_wrapper = protocols.unpack(protocols.RpcResponseWrapper, response.body)
            if response_wrapper.sequence_number == self.sync_sequence_number:
                logger.debug("Sync OK!")
                sync_ok = True
                break # from the sync loop
            else:
                sync_delta = self.sync_sequence_number - response_wrapper.sequence_number
                logger.warning("Message sync error.  Sync delta: %d" % sync_delta)
                logger.debug("Expected %d but got %d" % (self.sync_sequence_number, response_wrapper.sequence_number))
                if sync_delta > 0:
                    logger.warning("Trying to catch up.")
                    t0 = time.time() # reset the timeout clock
                    continue # to "while not sync_ok"
                elif sync_delta < 0:
                    logger.error("The message queue is ahead of us!  Purging.")
                    purged = self.messagebus.queue_purge(queue=self.response_queue) # clear the response queue and try again
                    logger.error("Purged %d messages from %s" % (purged, self.response_queue))
                    time.sleep(0.1)
                    retries += 1
                    break
        #end sync loop
        if timeout_flag:
            continue # jump to the top of the retry loop
        if sync_ok:
            break # from the retry loop
                       
    if response_wrapper.error:
        logger.error("RPC response error: %s" % response_wrapper.error_string)
        rpc_controller.SetFailed(response_wrapper.error)
        #if done:
        #    done(None)
        raise RPCFailure("RPC response error: %s" % response_wrapper.error_string)
                
    response = protocols.unpack(response_class, response_wrapper.payload)
    logger.debug("Response is: %s" % str(response))
    if done:
        done(response)
    return response