예제 #1
0
 def send(self, destination, message, persistent='true'):
     """
         Send a message to a queue
         @param destination: name of the queue
         @param message: message content
     """
     logging.debug("Send: %s" % destination)
     if self._send_connection is not None:
         if stomp.__version__[0]<4:
             self._send_connection.send(destination=destination, 
                                        message=message, 
                                        persistent=persistent)
         else:
             self._send_connection.send(destination, message, persistent=persistent)
         headers = {'destination': destination, 
                    'message-id': ''}
         transactions.add_status_entry(headers, message)
     else:
         logging.error("No AMQ connection to send to %s" % destination)
         headers = {'destination': '/queue/%s' % POSTPROCESS_ERROR, 
                    'message-id': ''}
         data_dict = json.loads(message)
         data_dict['error'] = "No AMQ connection: Could not send to %s" % destination
         message = json.dumps(data_dict)
         transactions.add_status_entry(headers, message)
예제 #2
0
 def process_function(self, headers, message):
     # See if we have a JSON message
     try:
         data = json.loads(message)
     except:
         data = decode_message(message)
         message = json.dumps(data)
         
     destination = headers["destination"].replace('/queue/','')
     logging.debug("%s r%d: %s" % (data["instrument"],
                                   data["run_number"],
                                   destination))
     transactions.add_status_entry(headers, message)
     return action(self, headers, message)
예제 #3
0
 def process_function(self, headers, message):
     # See if we have a JSON message
     try:
         data = json.loads(message)
     except:
         data = decode_message(message)
         message = json.dumps(data)
     
     destination = headers["destination"].replace('/queue/','')
     logging.info("%s r%s: %s: %s" % (data["instrument"],
                                      data["run_number"],
                                      destination,
                                      str(data)))
     transactions.add_status_entry(headers, message)
     
     # Clean up the extra information 
     if data.has_key('information'):
         del data['information']
     if data.has_key('error'):
         del data['error']
     message = json.dumps(data)
     
     return action(self, headers, message)
예제 #4
0
 def _call_db_task(self, task_data, headers, message):
     """
         @param task_data: JSON-encoded task definition
         @param headers: message headers
         @param message: JSON-encoded message content
     """
     task_def = json.loads(task_data)
     if 'task_class' in task_def and len(task_def['task_class'].strip())>0:
         try:
             toks = task_def['task_class'].strip().split('.')
             module = '.'.join(toks[:len(toks)-1])
             cls = toks[len(toks)-1]
             exec "from %s import %s as action_cls" % (module, cls)
             action_cls(connection=self._send_connection)(headers, message)
         except:
             logging.error("Task [%s] failed: %s" % (headers["destination"], sys.exc_value))
     if 'task_queues' in task_def:
         for item in task_def['task_queues']:
             destination = '/queue/%s' % item
             self.send(destination=destination, message=message, persistent='true')
     
             headers = {'destination': destination, 
                       'message-id': ''}
             transactions.add_status_entry(headers, message)
예제 #5
0
 def verify_workflow(self):
     """
         Walk through the data runs and make sure they have
         gone through the whole workflow.            
     """    
     logging.info("Verifying workflow completeness")
     # Get a list of run with an incomplete workflow
     run_list = WorkflowSummary.objects.incomplete()
     logging.info(" - list generated")
     
     # Dummy header for information logging
     logging_headers = {'destination': '/queue/%s' % POSTPROCESS_INFO, 
                        'message-id': ''}
     
     for r in run_list:            
         r.update()
         # Identify a problem only if the last message received is more
         # than a minimum amount of time
         now = datetime.datetime.utcnow().replace(tzinfo=utc)
         if r.complete is False and \
             now-RunStatus.objects.last_timestamp(r.run_id)>self._allowed_lag:
             # The workflow for this run is still incomplete
             # Generate a JSON description of the run, to be used
             # when sending a message
             message = r.run_id.json_encode()
             data_dict = json.loads(message)
             
             # Run is not cataloged
             if r.cataloged is False:
                 data_dict["information"] = "Cataloging incomplete for %s" % str(r)
                 logging.warn(data_dict["information"])
                 message = json.dumps(data_dict)
                 # Log this information
                 transactions.add_status_entry(logging_headers, message)
                 if self._recovery:
                     self.send(destination='/queue/%s' % CATALOG_DATA_READY,
                                        message=message, persistent='true')
         
             # Run hasn't been reduced
             if r.reduction_needed is True and r.reduced is False:
                 data_dict["information"] = "Reduction incomplete for %s" % str(r)
                 logging.warn(data_dict["information"])
                 message = json.dumps(data_dict)
                 # Log this information
                 transactions.add_status_entry(logging_headers, message)
                 if self._recovery:
                     self.send(destination='/queue/%s' % REDUCTION_DATA_READY,
                                        message=message, persistent='true')                    
             
             # Reduced data hasn't been cataloged
             if r.reduction_needed is True and r.reduced is True and \
                 r.reduction_cataloged is False:
                 data_dict["information"] = "Reduction cataloging incomplete for %s" % str(r)
                 logging.warn(data_dict["information"])
                 message = json.dumps(data_dict)
                 # Log this information
                 transactions.add_status_entry(logging_headers, message)
                 if self._recovery:
                     self.send(destination='/queue/%s' % REDUCTION_CATALOG_DATA_READY,
                                        message=message, persistent='true')                    
     logging.info(" - verification completed")