def store_and_cache(instrument_id, key, value): """ Store and cache a DASMON parameter @param instrument_id: Instrument object @param key: key string @param value: value for the given key """ try: key_id = Parameter.objects.get(name=key) except: key_id = Parameter(name=key) key_id.save() status_entry = StatusVariable(instrument_id=instrument_id, key_id=key_id, value=value) status_entry.save() # Update the latest value try: last_value = StatusCache.objects.filter(instrument_id=instrument_id, key_id=key_id).latest('timestamp') last_value.value = value = status_entry.value last_value.timestamp = status_entry.timestamp last_value.save() except: last_value = StatusCache(instrument_id=instrument_id, key_id=key_id, value=status_entry.value, timestamp=status_entry.timestamp) last_value.save()
def retrieve_parameter(self, key): """ Retrieve of create a Parameter entry """ for key_id in self._parameters: if str(key_id) == key: return key_id # If we haven't found it, create it. key_id = Parameter(name=key) key_id.save() self._parameters.append(key_id) return key_id
def listen_and_wait(self, waiting_period=1.0): """ Listen for the next message from the brokers. This method will simply return once the connection is terminated. @param waiting_period: sleep time between connection to a broker """ # Get or create the "common" instrument object from the DB. # This dummy instrument is used for heartbeats and central services. try: common_instrument = Instrument.objects.get(name='common') except Instrument.DoesNotExist: common_instrument = Instrument(name='common') common_instrument.save() # Retrieve the Parameter object for our own heartbeat try: pid_key_id = Parameter.objects.get(name="system_dasmon_listener_pid") except: pid_key_id = Parameter(name="system_dasmon_listener_pid") pid_key_id.save() last_purge_time = None last_heartbeat = 0 while True: try: if self._connection is None or self._connection.is_connected() is False: self.connect() if last_purge_time is None or time.time() - last_purge_time > PURGE_DELAY: last_purge_time = time.time() # Remove old entries delta_time = datetime.timedelta(days=PURGE_TIMEOUT) cutoff = timezone.now() - delta_time StatusVariable.objects.filter(timestamp__lte=cutoff).delete() #StatusCache.objects.filter(timestamp__lte=cutoff).delete() # Remove old PVMON entries: first, the float values PV.objects.filter(update_time__lte=time.time() - PURGE_TIMEOUT * 24 * 60 * 60).delete() old_entries = PVCache.objects.filter(update_time__lte=time.time() - PURGE_TIMEOUT * 24 * 60 * 60) for item in old_entries: if len(MonitoredVariable.objects.filter(instrument=item.instrument, pv_name=item.name)) == 0: item.delete() # Remove old PVMON entries: second, the string values PVString.objects.filter(update_time__lte=time.time() - PURGE_TIMEOUT * 24 * 60 * 60).delete() old_entries = PVStringCache.objects.filter(update_time__lte=time.time() - PURGE_TIMEOUT * 24 * 60 * 60) for item in old_entries: if len(MonitoredVariable.objects.filter(instrument=item.instrument, pv_name=item.name)) == 0: item.delete() # Remove old images delta_time = datetime.timedelta(days=IMAGE_PURGE_TIMEOUT) cutoff = timezone.now() - delta_time ReducedImage.objects.filter(created_on__lte=cutoff).delete() time.sleep(waiting_period) try: if time.time() - last_heartbeat > HEARTBEAT_DELAY: last_heartbeat = time.time() store_and_cache(common_instrument, pid_key_id, str(os.getpid())) # Send ping request if hasattr(settings, "PING_TOPIC"): from settings import PING_TOPIC, ACK_TOPIC payload = {"reply_to": ACK_TOPIC, "request_time": time.time()} t0 = time.time() self.send(PING_TOPIC, json.dumps(payload)) t=time.time() - t0 logging.error("Send time: %s", t) process_ack() else: logging.error("settings.PING_TOPIC is not defined") except: logging.error("Problem writing heartbeat %s", sys.exc_value) except: logging.error("Problem connecting to AMQ broker %s", sys.exc_value) time.sleep(5.0)