def main(): logging.basicConfig(filename='log/lims_sql.log', format='%(levelname)s: %(message)s', level=logging.DEBUG) logging.info(local_datetime_string() + '- App Initiated') kwargs_task_hourly = { "cutoff_month": 6, "tablename_dispo_history": 'dispo_history', "tablename_sample_results": 'sample_results', "tablename_update_date": 'update_date' } kwargs_task_daily = { "cutoff_month": 36, "tablename_dispo_history": 'dispo_history_3_years', "tablename_sample_results": 'sample_results_3_years', "tablename_update_date": 'update_date_3_years' } def hourly(): return SampleUpdateTask(**kwargs_task_hourly) def daily(): return SampleUpdateTask(**kwargs_task_daily) Scheduler([hourly]).on(hour=11, minute=55).every(hours=1, minutes=0).run() Scheduler([daily]).on(hour=23, minute=55).run() return None
class FAQueue: def __init__(self): # Load our config first config = ConfigParser() config.read(os.path.join(FA_HOME, "etc", "config.ini")) # Create the logging directory if it does not exist logging_dir = config.get("general", "logging_dir") if not os.path.exists(logging_dir): print('Creating log directory: {}'.format(logging_dir)) os.makedirs(logging_dir) # Initialize logging log_path = os.path.join(FA_HOME, 'etc', 'logging.ini') try: logging.config.fileConfig(log_path) except Exception as e: sys.exit('unable to load logging configuration file {}: {}'.format( log_path, str(e))) # Quick hack logging.getLogger('requests.packages.urllib3.connectionpool').setLevel( logging.WARNING) # Set our environment proxy variables os.environ['http_proxy'] = config.get('proxy', 'http_proxy') os.environ['https_proxy'] = config.get('proxy', 'https_proxy') log.info('Using http_proxy {}'.format(os.environ['http_proxy'])) log.info('Using https_proxy {}'.format(os.environ['https_proxy'])) # Create directories if they do not exist working_dir = config.get("general", "working_dir") if not os.path.exists(working_dir): log.info("Working directory does not exist. Creating...") os.makedirs(working_dir) self.scheduler = None # Register the signal handler for SIGINT. signal.signal(signal.SIGINT, self.signal_handler) def run(self): self.scheduler = Scheduler() self.scheduler.start() def signal_handler(self, signum, frame): """ Signal handler so the process pool can complete gracefully. """ log.warning( 'Caught signal to terminate! Waiting for pool to finish processing.' ) if self.scheduler: self.scheduler.stop() log.warning('Goodbye.') else: log.warning('Scheduler not running. Goodbye.') sys.exit()
def main(): def exit(): logger.debug("Stopping active friends") for f in ui.friend_list: f.close() logger.debug("Shutdown scheduler") ui.scheduler.must_run = False while ui.scheduler.isRunning(): logger.debug("Waiting scheduler") if ui.scheduler.waiting_authorized_contact == True: break sleep(0.1) logger.debug("Restore profile") config.restore_profile() if config.useusb == QtCore.Qt.Checked: logger.debug("Restore USB") config.restore_useusb() opt_parser = OptionParser() opt_parser.add_option("-d", "--debug", dest="debug_mode", action="store_true", default='False', help="Debug mode") app = QtGui.QApplication(sys.argv) # app.setOrganizationName("HeySms") # app.setOrganizationDomain("HeySms") app.setApplicationName("HeySms") (options, args) = opt_parser.parse_args([str(i) for i in app.arguments()]) logger.set_debug(options.debug_mode) main_window = QtGui.QMainWindow() ui = Ui_MainWindow(app) ui.setupUi(main_window) config.parent = ui ui.call_listener = Call_listener(ui) ui.call_listener.start() logger.debug("Call_listener started") ui.sms_listener = Sms_listener(ui) ui.sms_listener.start() logger.debug("Sms_listener started") ui.scheduler = Scheduler(ui) ui.scheduler.start() logger.debug("Scheduler started") main_window.setWindowTitle("HeySms") main_window.setAttribute(QtCore.Qt.WA_Maemo5AutoOrientation, True) main_window.show() main_window.repaint() ui.central_widget.repaint() ui.central_widget.reload_contacts() QtCore.QObject.connect(app, QtCore.SIGNAL("aboutToQuit()"), exit) sys.exit(app.exec_())
def __init__(self): # !! Cannot initialze self.logics here, because at startup logics are initialized after plugins !! self.logics = Logics.get_instance() self.logger.info("BackendLogics __init__ self.logics = {}".format(self.logics)) self.plugins = Plugins.get_instance() self.logger.info("BackendLogics __init__ self.plugins = {}".format(str(self.plugins))) self.scheduler = Scheduler.get_instance() self.logger.info("BackendLogics __init__ self.scheduler = {}".format(self.scheduler))
def __init__(self, smarthome, userlogicconf, envlogicconf): logger.info('Start Logics') self.shtime = Shtime.get_instance() self.items = Items.get_instance() self.plugins = Plugins.get_instance() self.scheduler = Scheduler.get_instance() self._sh = smarthome self._userlogicconf = userlogicconf self._env_dir = smarthome._env_dir self._envlogicconf = envlogicconf self._etc_dir = smarthome._etc_dir self._logic_dir = smarthome._logic_dir self._workers = [] self._logics = {} self._bytecode = {} self.alive = True global _logics_instance if _logics_instance is not None: import inspect curframe = inspect.currentframe() calframe = inspect.getouterframes(curframe, 4) logger.critical( "A second 'logics' object has been created. There should only be ONE instance of class 'Logics'!!! Called from: {} ({})" .format(calframe[1][1], calframe[1][3])) _logics_instance = self self.scheduler = Scheduler.get_instance() _config = {} self._systemlogics = self._read_logics(envlogicconf, self._env_dir) _config.update(self._systemlogics) self._userlogics = self._read_logics(userlogicconf, self._logic_dir) _config.update(self._userlogics) for name in _config: self._load_logic(name, _config)
def __init__(self, server_id, server_data, client): self.server_id = server_id self.client = client self.server_os = 'w' self.gamemap = 'chernarus+' self.hive = 'public' self.servername = DEFAULT_SERVERNAME self.max_players = 1 self.gametime = '0:00' self.time_acceleration = 0 self._update(server_data) self.players = {} self.first_players_fetch = True self.create_callbacks( 'player', ['connect', 'disconnect', 'guid', 'chat', 'kick', 'ping_update']) self.create_callbacks('tool', [ 'started', 'stopped', 'halted', 'error', 'config_update', 'module_update', 'notice' ]) self.client.api.server_state(self.server_id, ServerStates.STARTING) self.setup_rcon() self.scheduler = Scheduler() self.tasks_created = False self.checkalive_fails = 0 self.reconnects = 0 self.start() self._modules = [] self.load_modules()
def __init__(self, smarthome, userlogicconf, envlogicconf): logger.info('Start Logics') self.shtime = Shtime.get_instance() self.items = Items.get_instance() self.plugins = Plugins.get_instance() self.scheduler = Scheduler.get_instance() self._sh = smarthome self._userlogicconf = userlogicconf self._env_dir = smarthome._env_dir self._envlogicconf = envlogicconf self._etc_dir = smarthome._etc_dir self._logic_dir = smarthome._logic_dir self._workers = [] self._logics = {} self._bytecode = {} self.alive = True global _logics_instance if _logics_instance is not None: import inspect curframe = inspect.currentframe() calframe = inspect.getouterframes(curframe, 4) logger.critical("A second 'logics' object has been created. There should only be ONE instance of class 'Logics'!!! Called from: {} ({})".format(calframe[1][1], calframe[1][3])) _logics_instance = self self.scheduler = Scheduler.get_instance() _config = {} self._systemlogics = self._read_logics(envlogicconf, self._env_dir) _config.update(self._systemlogics) self._userlogics = self._read_logics(userlogicconf, self._logic_dir) _config.update(self._userlogics) for name in _config: self._load_logic(name, _config)
def __init__(self, smarthome, data_file, callers=None): self.logger = logging.getLogger(__name__) self.logger.info('Init Simulation release 1.5.0.6') self._sh = smarthome self.shtime = Shtime.get_instance() self._datafile = data_file self.lastday = '' self.items = Items.get_instance() self.scheduler = Scheduler.get_instance() self._callers = callers self._items = [] self.scheduler_add('midnight', self._midnight, cron='0 0 * *', prio=3) if not self.init_webinterface(): self._init_complete = False
def __init__(self, sh, *args, **kwargs): self.logger = logging.getLogger(__name__) self.logger.info('Init Simulation release %s' % self.PLUGIN_VERSION) self.shtime = Shtime.get_instance() self._datafile = self.get_parameter_value('data_file') self.lastday = '' self.items = Items.get_instance() self.scheduler = Scheduler.get_instance() if len(self.get_parameter_value('callers')) == 0: self._callers = None else: self._callers = self.get_parameter_value('callers') self._items = [] self.scheduler_add('midnight', self._midnight, cron='0 0 * *', prio=3) if not self.init_webinterface(): self._init_complete = False
def read(self, logicname=None): """ return an object with type info about all logics """ # create a list of dicts, where each dict contains the information for one logic self.logger.info("LogicsController.read()") if self.plugins is None: self.plugins = Plugins.get_instance() if self.scheduler is None: self.scheduler = Scheduler.get_instance() self.logics_initialize() if self.logics is None: # SmartHomeNG has not yet initialized the logics module (still starting up) raise cherrypy.NotFound if logicname is None: return self.get_logics_info() else: return self.get_logic_info(logicname)
def main(): global scheduler, schedulerThread, downloader setup() downloader = Downloader() scheduler = Scheduler() schedulerThread = None Pusher() t = threading.Thread(target=send_heartbeat) t.daemon = True t.start() t = threading.Thread(target=websocket_server) t.daemon = True t.start() watchdog_thread = threading.Event() notify_systemd(watchdog_thread) if is_under_voltage(): browser_template('under_voltage') sleep(5) logging.debug('Entering infinite loop.') while True: if not scheduler.slides or len( scheduler.slides ) - 1 == scheduler.index or scheduler.state != scheduler.STATE_OK: schedulerThread = threading.Thread(target=run_scheduler) schedulerThread.start() if not scheduler.slides and schedulerThread.isAlive(): wait_for_scheduler() broadcast_loop(scheduler) if scheduler.index is 0 and schedulerThread and schedulerThread.isAlive( ): wait_for_scheduler()
def __init__(self, module): self._sh = module._sh self.module = module self.base_dir = self._sh.get_basedir() self.logger = logging.getLogger(__name__) self.etc_dir = self._sh._etc_dir self.logics_dir = os.path.join(self.base_dir, 'logics') self.logics = Logics.get_instance() self.logger.info("__init__ self.logics = {}".format(self.logics)) self.plugins = Plugins.get_instance() self.logger.info("__init__ self.plugins = {}".format(str( self.plugins))) self.scheduler = Scheduler.get_instance() self.logger.info("__init__ self.scheduler = {}".format(self.scheduler)) self.blockly_plugin_loaded = None self.logics_data = {} self.logics = Logics.get_instance() return
def update(self, logicname='', action='', filename=''): """ Handle PUT requests for logics API """ self.logger.info( "LogicsController.update(logicname='{}', action='{}')".format( logicname, action)) if self.plugins is None: self.plugins = Plugins.get_instance() if self.scheduler is None: self.scheduler = Scheduler.get_instance() self.logics_initialize() if self.logics is None: return json.dumps({ 'result': 'Error', 'description': "SmartHomeNG is still initializing" }) if (action == 'saveparameters') and (logicname != ''): return self.save_logic_parameters(logicname) elif not action in ['create', 'load', 'delete']: mylogic = self.logics.return_logic(logicname) if mylogic is None: return json.dumps({ 'result': 'Error', 'description': "No logic with name '" + logicname + "' found" }) if logicname != '': return self.set_logic_state(logicname, action, filename) return None
def __init__(self, smarthome, userlogicconf, envlogicconf): logger.info('Start Logics') self._sh = smarthome self._userlogicconf = userlogicconf self._env_dir = smarthome._env_dir self._envlogicconf = envlogicconf self._etc_dir = smarthome._etc_dir self._logic_dir = smarthome._logic_dir self._workers = [] self._logics = {} self._bytecode = {} self.alive = True global _logics_instance _logics_instance = self self.scheduler = Scheduler.get_instance() _config = {} self._systemlogics = self._read_logics(envlogicconf, self._env_dir) _config.update(self._systemlogics) self._userlogics = self._read_logics(userlogicconf, self._logic_dir) _config.update(self._userlogics) for name in _config: self._load_logic(name, _config)
pubSub = PubSub() def import_events() -> [Event]: events = [] events_path = Path('events.json').resolve() with open(events_path, 'r') as f: data = f.read() events_json = json.loads(data) for event_json in events_json: event = Event(**event_json) event.date = datetime.strptime(event_json['date'], '%Y-%m-%d %H:%M') # format date events.append(event) return events def subscribe_events_to_functions(): pubSub.subscribe('PRINT_CONSOLE', print_console) pubSub.subscribe('WRITE_QUOTE_FILE', write_quote_file) if __name__ == '__main__': subscribe_events_to_functions() events = import_events() # Just for testing, we will publish_missed_events one time only scheduler = Scheduler(waitSec=1, events=events) scheduler.publish_missed_events() scheduler.active = False
def __init__(self, name, igh=None): Instrument.__init__(self, name, tags=['virtual']) self._instruments = instruments.get_instruments() self._igh = self._instruments.get(igh) self.add_parameter('timeout', type=types.FloatType, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('idle_mintime', type=types.FloatType, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('slow_mintime', type=types.FloatType, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('disk_mintime', type=types.FloatType, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('timeout_mode', type=types.BooleanType, flags=Instrument.FLAG_GET) self.add_parameter('idle_mode', type=types.BooleanType, flags=Instrument.FLAG_GET) self.add_parameter('plot_enable', type=types.BooleanType, flags=Instrument.FLAG_GET) self.add_parameter('status', type=types.StringType, flags=Instrument.FLAG_GET) self.add_function('start') self.add_function('stop') self.add_function('plot_start') self.add_function('plot_stop') self.add_function('set_default_fast') self.add_function('set_default_slow') self._debug_counter = 0 self._timeout = 10 self._idle_mintime = 10 self._slow_mintime = 300 self._disk_mintime = 10 self._slow_lasttime = 0 self._disk_lasttime = 0 self.plot_stop() self._dir = os.path.join(qt.config.get('datadir'), 'Tlog') self._filebasename = 'temperature_log' self._this_month = None if not os.path.isdir(self._dir): os.makedirs(self._dir) self._last_hour = TimeBuffer(60*60, self._dir, 'last_hour.dat') self._last_12hour = TimeBuffer(60*60*12, self._dir, 'last_12hour.dat') self._task = Scheduler(self._run_all, self._timeout, self._idle_mintime, timeout_mode=True, idle_mode=True) self._status = 'running' self.get_all()
class OmegaWorker(Callback): def __init__(self, server_id, server_data, client): self.server_id = server_id self.client = client self.server_os = 'w' self.gamemap = 'chernarus+' self.hive = 'public' self.servername = DEFAULT_SERVERNAME self.max_players = 1 self.gametime = '0:00' self.time_acceleration = 0 self._update(server_data) self.players = {} self.first_players_fetch = True self.create_callbacks( 'player', ['connect', 'disconnect', 'guid', 'chat', 'kick', 'ping_update']) self.create_callbacks('tool', [ 'started', 'stopped', 'halted', 'error', 'config_update', 'module_update', 'notice' ]) self.client.api.server_state(self.server_id, ServerStates.STARTING) self.setup_rcon() self.scheduler = Scheduler() self.tasks_created = False self.checkalive_fails = 0 self.reconnects = 0 self.start() self._modules = [] self.load_modules() def get_module_config(self, module_config_id): if module_config_id not in self.config.get('modules'): return False else: return self.config.get('modules').get(module_config_id) def load_modules(self): module_files = [] for module in os.listdir(MODULE_DIR): if module == '__init__.py' or module[-3:] != '.py': continue module_files.append(module[:-3]) for file in module_files: try: name, author, reference = importlib.import_module(file).hook() except Exception as e: print '[MODULE] Error importing module {} ({})'.format(file, e) continue module = { 'name': name, 'author': author, 'reference': reference, 'instance': reference(self) } self._modules.append(module) def _update(self, server_data): self.config = server_data.get('config') self.container = self.config.get('container') self.steam_data = server_data.get('steam') if self.steam_data: gametype_params = self.steam_data.get('restricted').get( 'gametype').split(',') self.hive = 'private' if 'privHive' in gametype_params else 'public' self.third_pp = False if 'no3rd' in gametype_params else True if self.hive == 'public': self.gametime = gametype_params[5] self.time_acceleration = int( float(gametype_params[4].replace('etm', ''))) else: self.gametime = gametype_params[6] self.time_acceleration = int( float(gametype_params[5].replace('etm', ''))) self.server_os = self.steam_data.get('restricted').get('os') self.gamemap = self.steam_data.get('restricted').get('map') self.servername = self.steam_data.get('name') self.max_players = int(self.steam_data.get('max_players')) def get_player_by_name(self, name): for slot, player in self.players.iteritems(): if player.name == name: return player return None def find_player_by_name(self, name): players = [] for slot, player in self.players.iteritems(): if name in player.name: players.append(player) return players def get_player_by_omega_id(self, omega_id): for slot, player in self.players.iteritems(): if player.omega_id == omega_id: return player return None def get_player_by_guid(self, guid): for slot, player in self.players.iteritems(): if player.guid == guid: return player return None def search_player_by_name(self, name): results = [] if len(name) < 3: return results for slot, player in self.players.iteritems(): if name.lower() in player.name.lower(): results.append(player) return results def construct_message(self, message): message = str(message) message = message.replace('%SERVERNAME%', self.servername) message = message.replace('%HIVE%', self.hive) message = message.replace('%MAXPLAYERS%', str(self.max_players)) message = message.replace('%PLAYERS%', str(len(self.players))) return message def setup_rcon(self): self.server = DayZServer(self.config.get('host'), self.config.get('port'), self.config.get('password')) self.server.register_callback('connection', 'authenticated', self._cb_rcon_authenticated) self.server.register_callback('connection', 'authentication_failed', self._cb_rcon_authenticated_failed) self.server.register_callback('connection', 'keepalive_acknowledged', self._cb_rcon_keepalive_acknowledged) self.server.register_callback('event', 'player_connect', self._cb_player_connected) self.server.register_callback('event', 'player_disconnect', self._cb_player_disconnected) self.server.register_callback('event', 'player_guid', self._cb_player_guid) self.server.register_callback('event', 'player_list', self._cb_player_list) self.server.register_callback('event', 'player_chat', self._cb_player_chat) self.server.register_callback('event', 'be_kick', self._cb_player_kick) self.server.register_callback('error', 'connection_refused', self._cb_error_connection_refused) self.server.register_callback('error', 'connection_closed', self._cb_error_connection_closed) self.server.register_callback('error', 'checkalive_failed', self._cb_error_checkalive_failed) def start(self): self.server.start() def halt(self, reason='halted'): self.scheduler.suspend() self.server.stop() self.client.api.server_state(self.server_id, ServerStates.STOPPED, reason) self.trigger_callback('tool', 'halted', reason) self.first_players_fetch = True for slot in dict(self.players): self.trigger_callback('player', 'disconnect', self.players[slot]) del self.players[slot] def stop(self, reason='shutdown'): self.scheduler.suspend() self.server.stop() self.client.api.server_state(self.server_id, ServerStates.STOPPED, reason) self.trigger_callback('tool', 'stopped', reason) self.first_players_fetch = True for slot in dict(self.players): self.trigger_callback('player', 'disconnect', self.players[slot]) del self.players[slot] def _cb_rcon_authenticated(self, *_): if not self.tasks_created: self.scheduler.add_task(self.server.keepalive, 0, 10) self.scheduler.add_task(self.server.request_playerlist, 30, 30) self.tasks_created = True else: self.scheduler.resume() self.client.api.server_state(self.server_id, ServerStates.ACTIVE) self.server.say_all('------- CFTools --------') self.server.say_all('Starting up on {}'.format(self.server_id)) self.server.say_all( '{} server, time: {} (acceleration: {}), max players: {}, map: {}'. format('windows' if self.server_os == 'w' else 'linux', self.gametime, self.time_acceleration, self.max_players, self.gamemap)) self.server.say_all('------- CFTools --------') self.reconnects = 0 self.trigger_callback('tool', 'started') def _cb_rcon_authenticated_failed(self, *_): self.stop('rcon_authentication_failed') self.client.kill_worker(self.server_id) def _cb_error_connection_refused(self, data): self.client.api.server_state(self.server_id, ServerStates.STOPPED, reason='rcon_connection_refused') self.client.kill_worker(self.server_id, 'rcon_connection_refused') def _cb_error_connection_closed(self, data): self.halt('rcon_connection_closed') self.reconnects += 1 if self.reconnects == 5: return self.server.trigger_callback('error', 'connection_refused') self.start() def _cb_rcon_keepalive_acknowledged(self, *_): self.checkalive_fails = 0 def _cb_error_checkalive_failed(self, *_): self.checkalive_fails += 1 self.trigger_callback( 'tool', 'error', '_cb_error_checkalive_failed ({}/2)'.format(self.checkalive_fails)) if self.checkalive_fails >= 2: self.checkalive_fails = 0 self.server.trigger_callback('error', 'connection_closed') def _cb_player_connected(self, player_data): player_data = { 'slot': player_data[0], 'name': player_data[1], 'ip': player_data[2] } player_object = OmegaPlayer(self, player_data.get('slot'), player_data.get('name'), player_data.get('ip')) self.players[player_data.get('slot')] = player_object self.trigger_callback('player', 'connect', player_object) def _cb_player_disconnected(self, player_data): player_data = {'slot': player_data[0], 'name': player_data[1]} if player_data.get('slot') not in self.players: return self.trigger_callback('player', 'disconnect', self.players[player_data.get('slot')]) del self.players[player_data.get('slot')] def _cb_player_guid(self, player_data): player_data = { 'guid': player_data[0], 'slot': player_data[1], 'name': player_data[2] } if player_data.get('slot') not in self.players: return self.players[player_data.get('slot')].set_guid(player_data.get('guid')) if player_data.get('slot') in self.players: self.trigger_callback('player', 'guid', self.players[player_data.get('slot')]) def _cb_player_list(self, raw_players): players = [] raw_players = raw_players.split('\n') del raw_players[0:3] del raw_players[-1] for player in raw_players: lobby = True player_data = re.search( RCON_PLAYERLIST_REGEX_EXTRA.get('player_list_lobby'), player) if player_data == None: lobby = False player_data = re.search( RCON_PLAYERLIST_REGEX_EXTRA.get('player_list_ingame'), player) if player_data == None: continue else: player_data = player_data.groups() try: players.append({ 'slot': player_data[0], 'ip': player_data[1].split(':')[0], 'ping': int(player_data[2]), 'guid': player_data[3], 'name': player_data[4], 'lobby': lobby }) except: continue for player in players: if player.get('slot') not in self.players or player.get( 'guid') != self.players[player.get('slot')].guid: if self.first_players_fetch: self._cb_player_connected( (player.get('slot'), player.get('name'), player.get('ip'))) self._cb_player_guid( (player.get('guid'), player.get('slot'), player.get('name'))) else: if player.get('ping') != self.players[player.get('slot')].ping: self.players[player.get('slot')].ping = player.get('ping') self.trigger_callback('player', 'ping_update', self.players[player.get('slot')]) self.first_players_fetch = False def _fake_kick_event( self, player): # Kick event for public hives, for printing reason kick_data = {'player': player, 'reason': player.reason} self.trigger_callback('player', 'kick', kick_data) del self.players[kick_data.get('player').slot] def _cb_player_kick(self, kick_data): kick_data = { 'player': self.get_player_by_guid(kick_data[0]), 'reason': kick_data[1] } self.trigger_callback('player', 'kick', kick_data) del self.players[kick_data.get('player').slot] def _cb_player_chat(self, chat_data): chat_data = { 'destination': chat_data[0].lower(), 'name': chat_data[1], 'message': chat_data[2] } player = self.get_player_by_name(chat_data.get('name')) self.client.api.player_chat(player.guid, player.omega_id, chat_data.get('name'), chat_data.get('message'), chat_data.get('destination'), self.server_id) self.trigger_callback('player', 'chat', { 'player': player, 'message_data': chat_data })
def __init__(self, sh): """ Initialization Routine for the module """ # TO DO: Shortname anders setzen (oder warten bis der Modul Loader es beim Laden setzt) self._shortname = self.__class__.__name__ self._shortname = self._shortname.lower() self.logger = logging.getLogger(__name__) self._sh = sh self.shtime = Shtime.get_instance() self.logger.debug("Module '{}': Initializing".format(self._shortname)) # get the parameters for the plugin (as defined in metadata plugin.yaml): self.logger.debug("Module '{}': Parameters = '{}'".format( self._shortname, dict(self._parameters))) try: self.broker_hostname = self._parameters['broker_host'] self.broker_port = self._parameters['broker_port'] self.broker_monitoring = self._parameters['broker_monitoring'] self.qos = self._parameters['qos'] self.last_will_topic = self._parameters['last_will_topic'] self.last_will_payload = self._parameters['last_will_payload'] self.birth_topic = self._parameters['birth_topic'] self.birth_payload = self._parameters['birth_payload'] self.bool_values = self._parameters['bool_values'] # self.publish_items = self._parameters['publish_items'] # self.items_topic_prefix = self._parameters['items_topic_prefix'] self.username = self._parameters['user'] self.password = self._parameters['password'] # self.tls = self._parameters['tls'] # self.ca_certs = self._parameters['ca_certs'] # self.acl = self._parameters['acl'].lower() except KeyError as e: self.logger.critical( "Module '{}': Inconsistent module (invalid metadata definition: {} not defined)" .format(self._shortname, e)) self._init_complete = False return # resolve broker name, is no ip address is specified try: self.broker_ip = socket.gethostbyname(self.broker_hostname) except Exception as e: self.logger.error("Error resolving '{}': {}".format( self.broker_hostname, e)) self._init_complete = False return if self.broker_ip == self.broker_hostname: self.broker_hostname = '' # handle last_will and birth topic configuration if (self.last_will_topic != '') and (self.last_will_topic[-1] == '/'): self.last_will_topic = self.last_will_topic[:-1] if self.birth_topic == '': self.birth_topic = self.last_will_topic else: if self.birth_topic[-1] == '/': self.birth_topic = self.birth_topic[:-1] # if self.items_topic_prefix [-1] != '/': # self.items_topic_prefix = self.items_topic_prefix + '/' if self.password == '': self.password = None # tls ... # ca_certs ... # _subscribed_topics is a datastructure to keep track of subscribed topics # and the needed additional information # - who subscribed to the topic # - kind of subscriber (logic, plugin, ...) # - datatype of payload # # <topic1>: # <subscriber1_name>: # subsciber_type: 'logic' # callback: 'logic1name' # payload_type: 'str' # <subscriber2_name>: # subsciber_type: 'logic' # callback: 'logic2name' # payload_type: 'dict' # <topic2>: # <subscriber3_name>: # subsciber_type: 'plugin' # callback: obj_callback3 # payload_type: 'str' # <subscriber4_name>: # self._subscribed_topics_lock = threading.Lock() self._subscribed_topics = {} # subscribed topics self.logicpayloadtypes = { } # payload types for subscribed topics for triggering logics # ONLY used for multiinstance handling of plugins? # # needed because self.set_attr_value() can only set but not add attributes # self.at_instance_name = self.get_instance_name() # if self.at_instance_name != '': # self.at_instance_name = '@' + self.at_instance_name self.scheduler = Scheduler.get_instance() self._network_connected_to_broker = False self._connected = False self._connect_result = '' # tls ... # ca_certs ... if not self._connect_to_broker(): # self._init_complete = False # return pass
def __init__(self, name, igh=None): Instrument.__init__(self, name, tags=['virtual']) self._igh = qkit.instruments.get(igh) self.add_parameter('timeout', type=float, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('idle_mintime', type=float, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('slow_mintime', type=float, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('disk_mintime', type=float, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('timeout_mode', type=bool, flags=Instrument.FLAG_GET) self.add_parameter('idle_mode', type=bool, flags=Instrument.FLAG_GET) self.add_parameter('plot_enable', type=bool, flags=Instrument.FLAG_GET) self.add_parameter('status', type=str, flags=Instrument.FLAG_GET) self.add_function('start') self.add_function('stop') self.add_function('plot_start') self.add_function('plot_stop') self.add_function('set_default_fast') self.add_function('set_default_slow') self._debug_counter = 0 self._timeout = 10 self._idle_mintime = 10 self._slow_mintime = 300 self._disk_mintime = 10 self._slow_lasttime = 0 self._disk_lasttime = 0 self.plot_stop() self._dir = os.path.join(qkit.cfg.get('datadir'), 'Tlog') self._filebasename = 'temperature_log' self._this_month = None if not os.path.isdir(self._dir): os.makedirs(self._dir) self._last_hour = TimeBuffer(60 * 60, self._dir, 'last_hour.dat') self._last_12hour = TimeBuffer(60 * 60 * 12, self._dir, 'last_12hour.dat') self._task = Scheduler(self._run_all, self._timeout, self._idle_mintime, timeout_mode=True, idle_mode=True) self._status = 'running' self.get_all()
class virtual_Tlogger(Instrument): def __init__(self, name, igh=None): Instrument.__init__(self, name, tags=['virtual']) self._igh = qkit.instruments.get(igh) self.add_parameter('timeout', type=float, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('idle_mintime', type=float, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('slow_mintime', type=float, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('disk_mintime', type=float, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('timeout_mode', type=bool, flags=Instrument.FLAG_GET) self.add_parameter('idle_mode', type=bool, flags=Instrument.FLAG_GET) self.add_parameter('plot_enable', type=bool, flags=Instrument.FLAG_GET) self.add_parameter('status', type=str, flags=Instrument.FLAG_GET) self.add_function('start') self.add_function('stop') self.add_function('plot_start') self.add_function('plot_stop') self.add_function('set_default_fast') self.add_function('set_default_slow') self._debug_counter = 0 self._timeout = 10 self._idle_mintime = 10 self._slow_mintime = 300 self._disk_mintime = 10 self._slow_lasttime = 0 self._disk_lasttime = 0 self.plot_stop() self._dir = os.path.join(qkit.cfg.get('datadir'), 'Tlog') self._filebasename = 'temperature_log' self._this_month = None if not os.path.isdir(self._dir): os.makedirs(self._dir) self._last_hour = TimeBuffer(60 * 60, self._dir, 'last_hour.dat') self._last_12hour = TimeBuffer(60 * 60 * 12, self._dir, 'last_12hour.dat') self._task = Scheduler(self._run_all, self._timeout, self._idle_mintime, timeout_mode=True, idle_mode=True) self._status = 'running' self.get_all() def get_all(self): self.get_timeout() self.get_idle_mintime() self.get_slow_mintime() self.get_disk_mintime() self.get_timeout_mode() self.get_idle_mode() self.get_plot_enable() self.get_status() def set_default_fast(self): self.set_timeout(10) self.set_idle_mintime(10) self.set_slow_mintime(300) self.set_disk_mintime(10) def set_default_slow(self): self.set_timeout(300) self.set_idle_mintime(300) self.set_slow_mintime(300) self.set_disk_mintime(300) def _do_set_timeout(self, to): self._task.set_timeout(to) def _do_get_timeout(self): return self._task.get_timeout() def _do_set_idle_mintime(self, imt): self._task.set_idle_mintime(imt) def _do_get_idle_mintime(self): return self._task.get_idle_mintime() def _do_set_slow_mintime(self, smt): self._slow_mintime = smt def _do_get_slow_mintime(self): return self._slow_mintime def _do_set_disk_mintime(self, dmt): self._disk_mintime = dmt def _do_get_disk_mintime(self): return self._disk_mintime def _do_get_timeout_mode(self): return self._task.get_timeout_mode() def _do_get_idle_mode(self): return self._task.get_idle_mode() def _do_get_status(self): return self._status def _do_get_plot_enable(self): return self._plot_enable def start_idle(self): self._task.set_idle_mode(True) self.get_idle_mode() def stop_idle(self): self._task.set_idle_mode(False) self.get_idle_mode() def start_timeout(self): self._task.set_timeout_mode(True) self.get_timeout_mode() def stop_timeout(self): self._task.set_timeout_mode(False) self.get_timeout_mode() def start(self): self._task.start() self._status = 'running' self.get_status() def stop(self): self._task.stop() self._status = 'stopped' self.get_status() def _get_all_sensors(self): try: self._temperature = self._igh.get_temp_mc() except: logging.error(__name__ + ': failed to retrieve temperature.') def _get_all_sensors_dummy(self): self._temperature = numpy.sin(time.time() / 60 / 10 * 2 * numpy.pi) def _run_all(self): self._debug_counter += 1 now = time.time() # get temperature # self._get_all_sensors_dummy() self._get_all_sensors() # add last points to 'fast' buffer (last hour). # self.fast_time, turn off fast? self._last_hour.add([now, self._temperature]) # add points to 'slow' buffer (last 24 hour) if last point was written more then self.slow_time ago. if (now - self._slow_lasttime) > self._slow_mintime: self._slow_lasttime = now self._last_12hour.add([now, self._temperature]) # add points to diskfile if last write was more then self.disk_time ago if (now - self._disk_lasttime) > self._disk_mintime: self._disk_lasttime = now self.write_to_logfile() # update plot, if plotting is enabled. self.plot() # leave here for checking timing: # print 'time it took: %f' % (time.time() - now) def plot_start(self): self._plot_enable = True self.get_plot_enable() def plot_stop(self): self._plot_enable = False self.get_plot_enable() def plot(self): plot(self._last_hour.get(), name='Temperature 1 hr', clear=True) plot(self._last_12hour.get(), name='Temperature 12 hr', clear=True) def write_to_logfile(self): # TODO close file? probably not necessary now = time.time() now_tuple = time.localtime(now) this_month = now_tuple[0:1] if self._this_month is None: self._this_month = this_month file_prefix = time.strftime('%Y_%m', now_tuple) self.filename = '%s_%s.txt' % (file_prefix, self._filebasename) self.filepath = os.path.join(self._dir, self.filename) self.file = file(self.filepath, 'a') elif self._this_month is not this_month: self.file.close() self._this_month = this_month file_prefix = time.strftime('%Y_%m', now_tuple) self.filename = '%s_%s.txt' % (file_prefix, self._filebasename) self.filepath = os.path.join(self._dir, self.filename) self.file = file(self.filepath, 'a') self.file.write('%f\t%s\t%f\n' % (now, time.asctime(now_tuple), self._temperature)) self.file.flush()
import sys from json import dumps from flask import request, render_template, redirect, url_for, session from flask_wtf.csrf import CSRFError from config import * from . import app, csrf, apscheduler from lib.login import login_check from lib import common from lib.scheduler import Scheduler from lib.dependency import Dependency reload(sys) sys.setdefaultencoding('utf8') scheduler = Scheduler() dependency = Dependency() @app.template_filter(name='unicode2str') def unicode2str(data): convert = [] for i in data: convert.append(str(i)) return convert # 镜像详情 @app.route('/images_details') @app.route('/tmp_link') def images_details():
def __init__(self): self.scheduler = Scheduler.get_instance() self.logger.info( "BackendSchedulers __init__ self.scheduler = {}".format( self.scheduler))
from lib.libvirt_utils import LibvirtUtils, VirtDupXML, SnapshotManager from lib.scheduler import Scheduler from lib.config import Config from pprint import pprint import libvirt from lib.qemu_utils import img_info import logging #config_path = "/etc/virt-dup.yml" config_path = "/home/spencer/git-repos/virt-dup/virt-dup.yml" config = Config(config_path) Scheduler(config) """ lv = LibvirtUtils(config) domain = lv.domain_search('ubuntu18.04') xml = VirtDupXML(config, domain) #xml.delete_all_jobs() #xml.add_job() for job in xml.loaded_jobs: # pass sm = SnapshotManager(config, xml, job) sm.stage_image(staging=False) # sm.block_commit()
class virtual_Tlogger(Instrument): def __init__(self, name, igh=None): Instrument.__init__(self, name, tags=['virtual']) self._instruments = instruments.get_instruments() self._igh = self._instruments.get(igh) self.add_parameter('timeout', type=types.FloatType, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('idle_mintime', type=types.FloatType, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('slow_mintime', type=types.FloatType, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('disk_mintime', type=types.FloatType, flags=Instrument.FLAG_GETSET, units='sec') self.add_parameter('timeout_mode', type=types.BooleanType, flags=Instrument.FLAG_GET) self.add_parameter('idle_mode', type=types.BooleanType, flags=Instrument.FLAG_GET) self.add_parameter('plot_enable', type=types.BooleanType, flags=Instrument.FLAG_GET) self.add_parameter('status', type=types.StringType, flags=Instrument.FLAG_GET) self.add_function('start') self.add_function('stop') self.add_function('plot_start') self.add_function('plot_stop') self.add_function('set_default_fast') self.add_function('set_default_slow') self._debug_counter = 0 self._timeout = 10 self._idle_mintime = 10 self._slow_mintime = 300 self._disk_mintime = 10 self._slow_lasttime = 0 self._disk_lasttime = 0 self.plot_stop() self._dir = os.path.join(qt.config.get('datadir'), 'Tlog') self._filebasename = 'temperature_log' self._this_month = None if not os.path.isdir(self._dir): os.makedirs(self._dir) self._last_hour = TimeBuffer(60*60, self._dir, 'last_hour.dat') self._last_12hour = TimeBuffer(60*60*12, self._dir, 'last_12hour.dat') self._task = Scheduler(self._run_all, self._timeout, self._idle_mintime, timeout_mode=True, idle_mode=True) self._status = 'running' self.get_all() def get_all(self): self.get_timeout() self.get_idle_mintime() self.get_slow_mintime() self.get_disk_mintime() self.get_timeout_mode() self.get_idle_mode() self.get_plot_enable() self.get_status() def set_default_fast(self): self.set_timeout(10) self.set_idle_mintime(10) self.set_slow_mintime(300) self.set_disk_mintime(10) def set_default_slow(self): self.set_timeout(300) self.set_idle_mintime(300) self.set_slow_mintime(300) self.set_disk_mintime(300) def _do_set_timeout(self, to): self._task.set_timeout(to) def _do_get_timeout(self): return self._task.get_timeout() def _do_set_idle_mintime(self, imt): self._task.set_idle_mintime(imt) def _do_get_idle_mintime(self): return self._task.get_idle_mintime() def _do_set_slow_mintime(self, smt): self._slow_mintime = smt def _do_get_slow_mintime(self): return self._slow_mintime def _do_set_disk_mintime(self, dmt): self._disk_mintime = dmt def _do_get_disk_mintime(self): return self._disk_mintime def _do_get_timeout_mode(self): return self._task.get_timeout_mode() def _do_get_idle_mode(self): return self._task.get_idle_mode() def _do_get_status(self): return self._status def _do_get_plot_enable(self): return self._plot_enable def start_idle(self): self._task.set_idle_mode(True) self.get_idle_mode() def stop_idle(self): self._task.set_idle_mode(False) self.get_idle_mode() def start_timeout(self): self._task.set_timeout_mode(True) self.get_timeout_mode() def stop_timeout(self): self._task.set_timeout_mode(False) self.get_timeout_mode() def start(self): self._task.start() self._status = 'running' self.get_status() def stop(self): self._task.stop() self._status = 'stopped' self.get_status() def _get_all_sensors(self): try: self._temperature = self._igh.get_temp_mc() except: logging.error(__name__ + ': failed to retrieve temperature.') def _get_all_sensors_dummy(self): self._temperature = numpy.sin(time.time()/60/10*2*numpy.pi) def _run_all(self): self._debug_counter += 1 now = time.time() # get temperature #self._get_all_sensors_dummy() self._get_all_sensors() # add last points to 'fast' buffer (last hour). # self.fast_time, turn off fast? self._last_hour.add([ now, self._temperature]) # add points to 'slow' buffer (last 24 hour) if last point was written more then self.slow_time ago. if (now - self._slow_lasttime) > self._slow_mintime: self._slow_lasttime = now self._last_12hour.add([ now, self._temperature]) # add points to diskfile if last write was more then self.disk_time ago if (now - self._disk_lasttime) > self._disk_mintime: self._disk_lasttime = now self.write_to_logfile() # update plot, if plotting is enabled. self.plot() # leave here for checking timing: # print 'time it took: %f' % (time.time() - now) def plot_start(self): self._plot_enable = True self.get_plot_enable() def plot_stop(self): self._plot_enable = False self.get_plot_enable() def plot(self): plot(self._last_hour.get(), name='Temperature 1 hr', clear=True) plot(self._last_12hour.get(), name='Temperature 12 hr', clear=True) def write_to_logfile(self): # TODO close file? probably not necessary now = time.time() now_tuple = time.localtime(now) this_month = now_tuple[0:1] if self._this_month is None: self._this_month = this_month file_prefix = time.strftime('%Y_%m',now_tuple) self.filename = '%s_%s.txt' % (file_prefix, self._filebasename) self.filepath = os.path.join(self._dir, self.filename) self.file = file(self.filepath, 'a') elif self._this_month is not this_month: self.file.close() self._this_month = this_month file_prefix = time.strftime('%Y_%m',now_tuple) self.filename = '%s_%s.txt' % (file_prefix, self._filebasename) self.filepath = os.path.join(self._dir, self.filename) self.file = file(self.filepath, 'a') self.file.write('%f\t%s\t%f\n' % ( now, time.asctime(now_tuple), self._temperature )) self.file.flush()
def run(self): self.scheduler = Scheduler() self.scheduler.start()