def handle_field_setting_changes(self, msg): if msg == 'loop_time': self.loop_time = FieldSetting.get_loop_time(self.db_session) elif msg == 'cultivation_start': self.cultivation_start = FieldSetting.get_cultivation_start(self.db_session) # make shure calendars are recomputed for key in self.parameters: self.parameters[key].current_calendar_entry = None for key in self.devices: self.devices[key].current_calendar_entry = None
def __init__(self, name, config_uri): super(FarmProcess, self).__init__() self.name = name settings = get_appsettings(config_uri) db_engine = engine_from_config(settings, 'sqlalchemy.') db_sm = sessionmaker(bind=db_engine) Base.metadata.bind = db_engine logging.basicConfig(filename=settings['log_directory'] + '/farm_manager.log', format='%(levelname)s:%(asctime)s: %(message)s', datefmt='%Y.%m.%d %H:%M:%S', level=logging.DEBUG) self.redis_conn = get_redis_conn(config_uri) self.db_sessionmaker = db_sm self.db_session = self.db_sessionmaker(expire_on_commit=False, autoflush=False) self.loop_time = FieldSetting.get_loop_time(self.db_session)
def main(argv=sys.argv): if len(argv) < 2: usage(argv) config_uri = argv[1] settings = get_appsettings(config_uri) # setup logging logging.basicConfig(filename=settings['log_directory'] + '/oafd.log', format='%(levelname)s:%(asctime)s: %(message)s', datefmt='%Y.%m.%d %H:%M:%S', level=logging.DEBUG) logging.info('D: Initializing') # connect to database db_engine = engine_from_config(settings, 'sqlalchemy.') db_sessionmaker = sessionmaker(bind=db_engine) Base.metadata.bind = db_engine db_session = db_sessionmaker(expire_on_commit=False, autoflush=False) # connect to redis redis_conn = get_redis_conn(config_uri) loop_time = FieldSetting.get_loop_time(db_session) process_timeout = 30 * loop_time logging.info('D: starting farm processes') children = [] for proc in farm_process_generator(config_uri, redis_conn, process_timeout): children.append((proc, ProcessMonitor(proc.worker.pid, proc.worker.name, redis_conn, loop_time))) logging.info('D: Initialisation finished') last_run = datetime.now() - loop_time while True: # sleep while datetime.now() - last_run < loop_time: sleep(0.05) last_run = datetime.now() # make sure all processes are running and update monitors for proc, mon in children: if not proc.check_watchdog(): proc.restart() mon.change_pid(proc.worker.pid) mon.update()
def handle_messages(self): message = self.pubsub.get_message() if message is not None: # something in the database changed data = message["data"].decode("UTF-8") print("message: " + str(message)) if message["channel"] == b"periphery_controller_changes": change_type, pc_id_str = data.split(" ") if change_type == "deleted": dev_name = None for key in self.controller_ids: if self.controller_ids[key] == int(pc_id_str): dev_name = key break try: self.controller_ids.pop(dev_name) self.periphery_controllers.pop(dev_name) self.shells[dev_name].close() self.shells.pop(dev_name) self.dev_names.pop(self.dev_names.index(dev_name)) except KeyError: pass elif message["channel"] == b"field_setting_changes": self.loop_time = FieldSetting.get_loop_time(self.db_session)
def handle_messages(self): message = self.pubsub.get_message() if message is not None: # something in the database changed data = message['data'].decode('UTF-8') print('message: ' + str(message)) if message['channel'] == b'periphery_controller_changes': change_type, pc_id_str = data.split(' ') if change_type == 'deleted': dev_name = None for key in self.controller_ids: if self.controller_ids[key] == int(pc_id_str): dev_name = key break try: self.controller_ids.pop(dev_name) self.periphery_controllers.pop(dev_name) self.shells[dev_name].close() self.shells.pop(dev_name) self.dev_names.pop(self.dev_names.index(dev_name)) except KeyError: pass elif message['channel'] == b'field_setting_changes': self.loop_time = FieldSetting.get_loop_time(self.db_session)