def try_very_bad_load(cls, mod_dir): """Try to load module in a bad way (Inserting mod_dir to sys.path) then try to import module (module.py file) in this directory with importlib :param mod_dir: module directory to load :type mod_dir: str :return: None """ prev_module = sys.modules.get('module') # cache locally any previously imported 'module' .. logger.warning( "Trying to load %r as an (very-)old-style alignak \"module\" : " "by adding its path to sys.path. This can be (very) bad in case " "of name conflicts within the files part of %s and others " "top-level python modules; I'll try to limit that.", # by removing the mod_dir from sys.path after while. mod_dir, mod_dir ) sys.path.insert(0, mod_dir) try: return importlib.import_module('module') except Exception as err: logger.exception("Could not import bare 'module.py' from %s : %s", mod_dir, err) return finally: sys.path.remove(mod_dir) if prev_module is not None: # and restore it after we have loaded our one (or not) sys.modules['module'] = prev_module
def _thread_run(self): con = None while not self._stop_requested: if con is None: try: con = self._connect_to_mongo() db = con[self._db_name] db.collection_names() except PyMongoError as err: logger.error("Could not connect to mongo: %s", err) time.sleep(1) continue objects = self.test_and_get_objects_updates() if not objects: time.sleep(1) continue # as we don't use any lock around _objects_updated, # this little sleep should ensure that no more threads # will be able to use the previous self._objects_updated # stored locally here in 'objects'. time.sleep(0.1) try: self.do_updates(db, objects) except Exception as err: logger.exception("Fatal error updating objects in mongo: %s", err) con = None
def do_insert(self, arbiter): try: with self._connect_to_mongo() as conn: self._do_insert(conn, arbiter) except Exception as err: logger.exception("I got a fatal error: %s", err) sys.exit("I'm in devel/beta mode and I prefer to exit for now," "please open a ticket with this exception details, thx :)")
def do_insert(self, arbiter): try: with self._connect_to_mongo() as conn: self._do_insert(conn, arbiter) except Exception as err: logger.exception("I got a fatal error: %s", err) sys.exit( "I'm in devel/beta mode and I prefer to exit for now," "please open a ticket with this exception details, thx :)")
def do_manage_returns(self): """Manage the checks and then send a HTTP request to schedulers (POST /put_results) REF: doc/alignak-action-queues.png (6) :return: None """ # For all schedulers, we check for wait_homerun # and we send back results for sched_id, sched in self.schedulers.iteritems(): if not sched['active']: continue results = sched['wait_homerun'] # NB: it's **mostly** safe for us to not use some lock around # this 'results' / sched['wait_homerun']. # Because it can only be modified (for adding new values) by the # same thread running this function (that is the main satellite # thread), and this occurs exactly in self.manage_action_return(). # Another possibility is for the sched['wait_homerun'] to be # cleared within/by : # ISchedulers.get_returns() -> Satelitte.get_return_for_passive() # This can so happen in an (http) client thread. if not results: return # So, at worst, some results would be received twice on the # scheduler level, which shouldn't be a problem given they are # indexed by their "action_id". send_ok = False try: con = sched.get('con') if con is None: # None = not initialized con = self.pynag_con_init(sched_id) if con: con.post('put_results', {'results': results.values()}) send_ok = True except HTTPEXCEPTIONS as err: logger.error('Could not send results to scheduler %s : %s', sched['name'], err) except Exception as err: logger.exception( "Unhandled exception trying to send results " "to scheduler %s: %s", sched['name'], err) raise finally: if send_ok: results.clear() else: # if - and only if - send was not ok, # then "de-init" the sched connection: sched['con'] = None
def do_manage_returns(self): """Manage the checks and then send a HTTP request to schedulers (POST /put_results) REF: doc/alignak-action-queues.png (6) :return: None """ # For all schedulers, we check for wait_homerun # and we send back results for sched_id, sched in self.schedulers.iteritems(): if not sched['active']: continue results = sched['wait_homerun'] # NB: it's **mostly** safe for us to not use some lock around # this 'results' / sched['wait_homerun']. # Because it can only be modified (for adding new values) by the # same thread running this function (that is the main satellite # thread), and this occurs exactly in self.manage_action_return(). # Another possibility is for the sched['wait_homerun'] to be # cleared within/by : # ISchedulers.get_returns() -> Satelitte.get_return_for_passive() # This can so happen in an (http) client thread. if not results: return # So, at worst, some results would be received twice on the # scheduler level, which shouldn't be a problem given they are # indexed by their "action_id". send_ok = False try: con = sched.get('con') if con is None: # None = not initialized con = self.pynag_con_init(sched_id) if con: con.post('put_results', {'results': results.values()}) send_ok = True except HTTPEXCEPTIONS as err: logger.error('Could not send results to scheduler %s : %s', sched['name'], err) except Exception as err: logger.exception("Unhandled exception trying to send results " "to scheduler %s: %s", sched['name'], err) raise finally: if send_ok: results.clear() else: # if - and only if - send was not ok, # then "de-init" the sched connection: sched['con'] = None