Esempio n. 1
0
 def retrieve_instrument(self, instrument_name):
     """
         Retrieve or create an instrument given its name
     """
     # Get or create the instrument object from the DB
     for instrument in self._instruments:
         if str(instrument) == instrument_name:
             return instrument
     # If we haven't found it, create it.
     instrument = Instrument(name=instrument_name)
     instrument.save()
     self._instruments.append(instrument)
     return instrument
Esempio n. 2
0
 def on_message(self, headers, message):
     """
         Process a message.
         @param headers: message headers
         @param message: JSON-encoded message content
     """
     destination = headers["destination"]
     try:
         data_dict = json.loads(message)
     except:
         logging.error("Could not decode message from %s" % headers["destination"])
         return
        
     try:
         instrument_id = Instrument.objects.get(name=self._instrument)
     except Instrument.DoesNotExist:
         instrument_id = Instrument(name=self._instrument)
         instrument_id.save()
         
     for key in data_dict:
         # If we find a dictionary, process its entries
         if type(data_dict[key])==dict:
             # The key is now an entry type and the entry itself
             # should be another dictionary for instances of that type
             for item in data_dict[key]:
                 if type(data_dict[key][item])==dict:
                     identifier = None
                     counts = None
                     if "id" in data_dict[key][item]:
                         identifier = data_dict[key][item]["id"]
                     if "counts" in data_dict[key][item]:
                         counts = data_dict[key][item]["counts"]
                     if identifier is not None and counts is not None:
                         parameter_name = "%s_count_%s" % (item, identifier)
                         store_and_cache(instrument_id, parameter_name, counts)
         else:
             store_and_cache(instrument_id, key, data_dict[key])                
Esempio n. 3
0
    def listen_and_wait(self, waiting_period=1.0):
        """
            Listen for the next message from the brokers.
            This method will simply return once the connection is
            terminated.
            @param waiting_period: sleep time between connection to a broker
        """
        # Get or create the "common" instrument object from the DB.
        # This dummy instrument is used for heartbeats and central services.
        try:
            common_instrument = Instrument.objects.get(name='common')
        except Instrument.DoesNotExist:
            common_instrument = Instrument(name='common')
            common_instrument.save()

        # Retrieve the Parameter object for our own heartbeat
        try:
            pid_key_id = Parameter.objects.get(name="system_dasmon_listener_pid")
        except:
            pid_key_id = Parameter(name="system_dasmon_listener_pid")
            pid_key_id.save()

        last_purge_time = None
        last_heartbeat = 0
        while True:
            try:
                if self._connection is None or self._connection.is_connected() is False:
                    self.connect()
                if last_purge_time is None or time.time() - last_purge_time > PURGE_DELAY:
                    last_purge_time = time.time()
                    # Remove old entries
                    delta_time = datetime.timedelta(days=PURGE_TIMEOUT)
                    cutoff = timezone.now() - delta_time
                    StatusVariable.objects.filter(timestamp__lte=cutoff).delete()
                    #StatusCache.objects.filter(timestamp__lte=cutoff).delete()

                    # Remove old PVMON entries: first, the float values
                    PV.objects.filter(update_time__lte=time.time() - PURGE_TIMEOUT * 24 * 60 * 60).delete()
                    old_entries = PVCache.objects.filter(update_time__lte=time.time() - PURGE_TIMEOUT * 24 * 60 * 60)
                    for item in old_entries:
                        if len(MonitoredVariable.objects.filter(instrument=item.instrument,
                                                                pv_name=item.name)) == 0:
                            item.delete()
                    # Remove old PVMON entries: second, the string values
                    PVString.objects.filter(update_time__lte=time.time() - PURGE_TIMEOUT * 24 * 60 * 60).delete()
                    old_entries = PVStringCache.objects.filter(update_time__lte=time.time() - PURGE_TIMEOUT * 24 * 60 * 60)
                    for item in old_entries:
                        if len(MonitoredVariable.objects.filter(instrument=item.instrument,
                                                                pv_name=item.name)) == 0:
                            item.delete()
                    # Remove old images
                    delta_time = datetime.timedelta(days=IMAGE_PURGE_TIMEOUT)
                    cutoff = timezone.now() - delta_time
                    ReducedImage.objects.filter(created_on__lte=cutoff).delete()
                time.sleep(waiting_period)
                try:
                    if time.time() - last_heartbeat > HEARTBEAT_DELAY:
                        last_heartbeat = time.time()
                        store_and_cache(common_instrument, pid_key_id, str(os.getpid()))
                        # Send ping request
                        if hasattr(settings, "PING_TOPIC"):
                            from settings import PING_TOPIC, ACK_TOPIC
                            payload = {"reply_to": ACK_TOPIC,
                                       "request_time": time.time()}
                            t0 = time.time()
                            self.send(PING_TOPIC, json.dumps(payload))
                            t=time.time() - t0
                            logging.error("Send time: %s", t)
                            process_ack()
                        else:
                            logging.error("settings.PING_TOPIC is not defined")
                except:
                    logging.error("Problem writing heartbeat %s", sys.exc_value)
            except:
                logging.error("Problem connecting to AMQ broker %s", sys.exc_value)
                time.sleep(5.0)
Esempio n. 4
0
def add_status_entry(headers, data):
    """
        Populate the reporting database with the contents
        of a status message of the following format:
        
        @param headers: ActiveMQ message header dictionary
        @param data: JSON encoded message content
        
        headers: {'expires': '0', 'timestamp': '1344613053723', 
                  'destination': '/queue/POSTPROCESS.DATA_READY', 
                  'persistent': 'true',
                  'priority': '5', 
                  'message-id': 'ID:mac83086.ornl.gov-59780-1344536680877-8:2:1:1:1'}
                  
        The data is a dictionary in a JSON format.
        
        data: {"instrument": tokens[2],
               "ipts": tokens[3],
               "run_number": run_number,
               "data_file": message}
    """
    # Find the DB entry for this queue
    destination = headers["destination"].replace('/queue/','')
    status_id = StatusQueue.objects.filter(name__startswith=destination)
    if len(status_id)==0:
        status_id = StatusQueue(name=destination)
        status_id.save()
    else:
        status_id = status_id[0]
    
    # Process the data
    data_dict = json.loads(data)
    
    # Look for instrument
    instrument = data_dict["instrument"].lower()
    try:
        instrument_id = Instrument.objects.get(name=instrument)
    except Instrument.DoesNotExist:
        instrument_id = Instrument(name=instrument)
        instrument_id.save()

    # Look for IPTS ID
    ipts = data_dict["ipts"].upper()
    try:
        ipts_id = IPTS.objects.get(expt_name=ipts)
    except IPTS.DoesNotExist:
        ipts_id = IPTS(expt_name=ipts)
        ipts_id.save()
            
    # Add instrument to IPTS if not already in there
    try:
        if IPTS.objects.filter(id=ipts_id.id, instruments__in=[instrument_id]).count()==0:
            ipts_id.instruments.add(instrument_id)
            ipts_id.save()
    except:
        traceback.print_exc()
        logging.error(sys.exc_value)

    # Check whether we already have an entry for this run
    run_number = data_dict["run_number"]
    try:
        run_id = DataRun.objects.get(run_number=run_number, instrument_id=instrument_id)
    except DataRun.DoesNotExist:
        logging.info("Creating entry for run %s-%d" % (instrument, run_number))
        run_id = DataRun(run_number=run_number,
                         instrument_id=instrument_id,
                         ipts_id=ipts_id,
                         file=data_dict["data_file"])
        run_id.save()
    
    
    # Add a workflow summary for this new run
    try:
        summary_id = WorkflowSummary.objects.get(run_id=run_id)
    except WorkflowSummary.DoesNotExist:
        summary_id = WorkflowSummary(run_id=run_id)
        summary_id.save()
    
    # Create a run status object in the DB
    run_status = RunStatus(run_id=run_id,
                           queue_id=status_id,
                           message_id=headers["message-id"])
    run_status.save()
    
    # Create an information entry as necessary
    # Truncate to max length of DB character field
    if "information" in data_dict:
        data = data_dict["information"]
        mesg = (data[:198] + '..') if len(data) > 200 else data
        info = Information(run_status_id=run_status,
                           description=mesg)
        info.save()
    
    # Create error entry as necessary
    if "error" in data_dict:
        data = data_dict["error"]
        mesg = (data[:198] + '..') if len(data) > 200 else data
        error = Error(run_status_id=run_status,
                      description=mesg)
        error.save()
    
    # Update the workflow summary
    summary_id = WorkflowSummary.objects.get_summary(run_id)
    summary_id.update()