def _search_artist(self, provider, known): logger.info('In _search_artist(provider={}, {})'.format(provider, known)) RESULTS_TO_REVIEW = 3 search = [lambda: self.lastfm.search_for_artist(known).get_next_page(), lambda: brainz.search_artists(known)['artist-list']][provider] Artist = [LastArtist, BrainzArtist][provider] output = None try: output = search() except Exception as exc: logger.critical('Exception in search') logger.exception(exc) return None if output: logger.debug('got output') for i, result in zip(range(RESULTS_TO_REVIEW), output): artist = Artist(result) logger.info('Got artist result ' + str(artist.name)) if (diff(artist.name, known) < 0.3 or (provider == BRAINZ and 'alias-list' in result and any(diff(alist, known) < 0.25 for alist in result['alias-list']))): return artist else: logger.info(artist.name + ' differs from ' + known) else: logger.debug('no output') return None
def play(self): ''' Starts the game, executes master game program and updates game_controller, then finishes the game and kills all the bots. ''' try: self._create_bots() start_time = time.time() #game_master = config.GameMaster(self._game_controller, # self._start_state) game_master = config.GameMaster(self, self._start_state) while not self._game_controller.is_finished: copied_js =\ copy.deepcopy(self._game_controller.jury_states[-1]) try: game_master.tick(copied_js) except: logger.critical('game master was raised an ' 'unhandled exception, aborting') self._kill_bots() logger.critical('re-raising game master\'s exception') raise end_time = time.time() logger.info('time spent on the game: %f sec', end_time - start_time) return self._game_controller finally: self._kill_bots()
def get_java_cmd(self): """ Build complete JVM startup command line """ cmd = [] cmd.append(self._conf['m2ee'].get('javabin', 'java')) if 'javaopts' in self._conf['m2ee']: if isinstance(self._conf['m2ee']['javaopts'], list): cmd.extend(self._conf['m2ee']['javaopts']) else: logger.warn("javaopts option in m2ee section in configuration " "is not a list") if self.runtime_version >= 7: cmd.extend([ '-jar', os.path.join(self._runtime_path, 'runtime/launcher/runtimelauncher.jar'), self.get_app_base(), ]) elif self._classpath: cmd.extend(['-cp', self._classpath]) if self.runtime_version >= 5: cmd.append('-Dfelix.config.properties=file:%s' % self.get_felix_config_file()) cmd.append(self._get_appcontainer_mainclass()) elif self._appcontainer_version: cmd.extend(['-jar', self._appcontainer_jar]) else: logger.critical("Unable to determine JVM startup parameters.") return None return cmd
def subscriber_services(mode, user_name, acct_session_id, services, client): """Activate/Deactivate subscriber services :param mode: 'activate' or 'deactivate' :param user_name: Subscriber User_Name :param acct_session_id: Subscriber Session ID :param services: Services required for activation or deactivation :param client: Basic RADIUS client :type mode: string :type user_name: string :type acct_session_id: string :type services: list :type client: pyrad.Client :return: RADIUS Attributes or None if fail :rtype: pyrad.packet.Packet """ logger.debug("%s:%s for user %s@%s[%s]", mode, str(services), user_name, client.server, acct_session_id) result = True for svc in services: req = client.CreateCoAPacket(User_Name=user_name, Acct_Session_Id=acct_session_id, Cisco_AVPair=["subscriber:command=" + mode + "-service"] + ["subscriber:service-name=" + svc]) add_message_authenticator(req) try: reply = client.SendPacket(req) except: logger.critical("NAS:%s CoA %s-service %s error!!!", client.server, mode, svc) return False result = result and reply.code == packet.CoAACK return result
def get_java_cmd(self): """ Build complete JVM startup command line """ cmd = ['java'] if 'javaopts' in self._conf['m2ee']: if isinstance(self._conf['m2ee']['javaopts'], list): cmd.extend(self._conf['m2ee']['javaopts']) else: logger.warn("javaopts option in m2ee section in configuration " "is not a list") if self._classpath: cmd.extend([ '-cp', self._classpath, self._get_appcontainer_mainclass() ]) elif self._appcontainer_version: cmd.extend(['-jar', self._appcontainer_jar]) else: logger.critical("Unable to determine JVM startup parameters.") return None logger.trace("Command line to be used when starting the JVM: %s" % ' '.join(cmd)) return cmd
def _check_runtime_config(self): self._run_from_source = self._conf.get( 'mxnode', {}).get('run_from_source', False) if (not self._run_from_source or self._run_from_source == 'appcontainer'): if not self._conf.get('mxnode', {}).get('mxjar_repo', None): logger.warn("mxnode/mxjar_repo is not specified!") # ensure mxjar_repo is a list, multiple locations are allowed for # searching if not type(self._conf.get('mxnode', {})['mxjar_repo']) == list: self._conf['mxnode']['mxjar_repo'] = [ self._conf['mxnode']['mxjar_repo']] # m2ee for option in ['app_name', 'app_base', 'runtime_port']: if not self._conf['m2ee'].get(option, None): logger.warn("Option %s in configuration section m2ee is not " "defined!" % option) # check some locations for existance and permissions basepath = self._conf['m2ee']['app_base'] if not os.path.exists(basepath): logger.critical("Application base directory %s does not exist!" % basepath) sys.exit(1) # model_upload_path if not 'model_upload_path' in self._conf['m2ee']: self._conf['m2ee']['model_upload_path'] = os.path.join( self._conf['m2ee']['app_base'], 'data', 'model-upload') if not os.path.isdir(self._conf['m2ee']['model_upload_path']): logger.warn("Model upload path %s is not a directory" % self._conf['m2ee']['model_upload_path'])
def _check_appcontainer_config(self): # did we load any configuration at all? if not self._conf: logger.critical("No configuration present. Please put a m2ee.yaml " "configuration file at the default location " "~/.m2ee/m2ee.yaml or specify an alternate " "configuration file using the -c option.") sys.exit(1) # TODO: better exceptions self._run_from_source = self._conf.get( 'mxnode', {}).get('run_from_source', False) # mxnode if self._run_from_source: if not self._conf['mxnode'].get('source_workspace', None): logger.critical("Run from source was selected, but " "source_workspace is not specified!") sys.exit(1) if not self._conf['mxnode'].get('source_projects', None): logger.critical("Run from source was selected, but " "source_projects is not specified!") sys.exit(1) # m2ee for option in ['app_base', 'admin_port', 'admin_pass']: if not self._conf['m2ee'].get(option, None): logger.critical("Option %s in configuration section m2ee is " "not defined!" % option) sys.exit(1) # force admin_pass to a string, prevent TypeError when base64-ing it # before sending to m2ee api self._conf['m2ee']['admin_pass'] = str( self._conf['m2ee']['admin_pass']) # Mendix >= 4.3: admin and runtime port only bind to localhost by # default self._conf['m2ee']['admin_listen_addresses'] = ( self._conf['m2ee'].get('admin_listen_addresses', "")) self._conf['m2ee']['runtime_listen_addresses'] = ( self._conf['m2ee'].get('runtime_listen_addresses', "")) # check admin_pass 1 or password... refuse to accept when users don't # change default passwords if (self._conf['m2ee']['admin_pass'] == '1' or self._conf['m2ee']['admin_pass'] == 'password'): logger.critical("Using admin_pass '1' or 'password' is not " "allowed. Please put a long, random password into " "the admin_pass configuration option. At least " "change the default!") sys.exit(1) # database_dump_path if 'database_dump_path' not in self._conf['m2ee']: self._conf['m2ee']['database_dump_path'] = os.path.join( self._conf['m2ee']['app_base'], 'data', 'database') if not os.path.isdir(self._conf['m2ee']['database_dump_path']): logger.warn("Database dump path %s is not a directory" % self._conf['m2ee']['database_dump_path'])
def get_java_cmd(self): """ Build complete JVM startup command line """ cmd = [] cmd.append(self._conf['m2ee'].get('javabin', 'java')) if 'javaopts' in self._conf['m2ee']: if isinstance(self._conf['m2ee']['javaopts'], list): cmd.extend(self._conf['m2ee']['javaopts']) else: logger.warn("javaopts option in m2ee section in configuration " "is not a list") if self._classpath: cmd.extend(['-cp', self._classpath]) if self.runtime_version >= 5: cmd.append('-Dfelix.config.properties=file:%s' % self.get_felix_config_file()) cmd.append(self._get_appcontainer_mainclass()) elif self._appcontainer_version: cmd.extend(['-jar', self._appcontainer_jar]) else: logger.critical("Unable to determine JVM startup parameters.") return None return cmd
def set_speed(user_name, new_speed, tariff): """Set new radgout for user. :param user_name: User name to change radgroup :param new_speed: New user speed :param tariff: User tariff??? :type user_name: string :type new_speed: string :type tariff: integer :return: True or False :rtype: bool """ logger.debug("set speed %s:%s for user %s", new_speed, tariff, user_name) with SessionPool.acquire() as con: cur = con.cursor() code = cur.var(cx_Oracle.NUMBER) message = cur.var(cx_Oracle.STRING) try: cur.callproc('RADIUS.CHANGE_USER_SPEED', (user_name, new_speed, tariff, code, message)) except: logger.critical("database error!!!") return False rcode = code.getvalue() if rcode != 0: logger.error("can't set speed %s:%s", rcode, message.getvalue()) return False return True
def __init__(self, load_default_files=True, yaml_files=None, config=None): _yaml_files = [] if load_default_files: _yaml_files.extend(find_yaml_files()) if yaml_files: _yaml_files.extend(yaml_files) self._mtimes, self._conf = read_yaml_files(_yaml_files) if config: self._conf = merge_config(self._conf, config) self._all_systems_are_go = True self._check_appcontainer_config() self._check_runtime_config() self._conf['mxruntime'].setdefault( 'BasePath', self._conf['m2ee']['app_base']) self._conf['mxruntime'].setdefault('DTAPMode', 'P') self.fix_permissions() self._appcontainer_version = self._conf['m2ee'].get( 'appcontainer_version', None) # >= 3.0: application information (e.g. runtime version) # if this file does not exist (i.e. < 3.0) try_load_json returns {} self._model_metadata = self._try_load_json( os.path.join(self._conf['m2ee']['app_base'], 'model', 'metadata.json')) self.runtime_version = self._lookup_runtime_version() self._conf['mxruntime'] = self._merge_microflow_constants() self._runtime_path = None if (not self._run_from_source or self._run_from_source == 'appcontainer'): if self.runtime_version is None: logger.critical("Unable to look up mendix runtime files " "because product version is unknown.") self._all_systems_are_go = False else: self._runtime_path = self.lookup_in_mxjar_repo( str(self.runtime_version)) if self._runtime_path is None: logger.error("Mendix Runtime not found for version %s" % str(self.runtime_version)) self._all_systems_are_go = False self._setup_classpath() if self._runtime_path and not 'RuntimePath' in self._conf['mxruntime']: runtimePath = os.path.join(self._runtime_path, 'runtime') logger.debug("Setting RuntimePath runtime config option to %s" % runtimePath) self._conf['mxruntime']['RuntimePath'] = runtimePath
def device_names(mac_list, wifi_devices): names = set() for mac in mac_list: device = wifi_devices.get(mac.upper()) if not device: logger.critical( "Unknown device MAC: {} has accessed network".format(mac)) else: names.add(device['name']) return names
def create_process(self): """ Starts bot's process. """ logger.info("executing '%s'", self._player_command) try: self._process = psutil.Popen(self._player_command.split(), stdout=PIPE, stdin=PIPE, stderr=PIPE) except OSError: logger.critical("executing of '%s' failed: invalid command", self._player_command) raise ExecuteError logger.info("executing successful")
def save_basket(self, basket_dict): connection = self.get_connection() for trans_id, drink_order in basket_dict.items(): for drink in drink_order: args = (trans_id, drink) sql_query = "INSERT INTO basket (trans_id, drink_id) VALUES (%s, %s)" try: cursor = self.update_sql(sql_query, args, connection) except Exception as error: logger.critical(f"DOOP! {error}") connection.commit() cursor.close()
def fix_permissions(self): basepath = self._conf['m2ee']['app_base'] for directory, mode in { "model": 0700, "web": 0755, "data": 0700}.iteritems(): fullpath = os.path.join(basepath, directory) if not os.path.exists(fullpath): logger.critical("Directory %s does not exist!" % fullpath) sys.exit(1) # TODO: detect permissions and tell user if changing is needed os.chmod(fullpath, mode)
async def track_door_sensor(): ''' continously track door sensor used async generator, see https://www.python.org/dev/peps/pep-0525/ ''' async for i in read_door_sensor(): door_change_msg = "Door state changed to: " + ("open" if i else "closed") logger.warning(door_change_msg) change_dt = dt.now() if is_system_armed(change_dt): logger.critical("System is armed and door state changed!") schedule_notification(door_change_msg + " at " + change_dt.strftime("%Y-%m-%d %H:%M:%S"))
def writeMessage(): lines = [] try: while True: line = input() lines.append(line) except KeyboardInterrupt as e: logger.critical(e) text = '\n'.join(lines) return text
def fix_permissions(self): basepath = self._conf['m2ee']['app_base'] for directory, mode in { "model": 0700, "web": 0755, "data": 0700 }.iteritems(): fullpath = os.path.join(basepath, directory) if not os.path.exists(fullpath): logger.critical("Directory %s does not exist!" % fullpath) sys.exit(1) # TODO: detect permissions and tell user if changing is needed os.chmod(fullpath, mode)
def get_connection(self): # function to get the connection string using: pymysql.connect(host, username, password, database) try: db_connection = pymysql.connect( environ.get("DB_HOST2"), # host environ.get("DB_USER2"), # username environ.get("DB_PW2"), # password environ.get("DB_NAME2") # database ) logger.info("Connection successful LOL") return db_connection except Exception as error: logger.critical(f"Connection failed lol {error}") print(f"didn't work lol {error}")
def start(self, rule_types): rules = get_rules(rule_types) if len(rules) == 0: logger.critical('get rules failed, rule types not found!') exit(0) self.rulenum = len(rules) logger.info('rules length: {rl}'.format(rl=len(rules))) #tmptime=self.getNowtime() #dirname = self.recorddir + tmptime self.dirname = self.createdir() #dirname = self.recorddir #pool = mp.Pool() result_list = [] webshotargs = [] for idx, rule_object in enumerate(rules): #print(idx, rule_object.url) logger.info('>>>>>>>>>>>>> {n} > {r} >>>>>>'.format( n=rule_object.types, r=rule_object.url)) urlname = self.url2name(rule_object.url) + '-' + self.getNowtime() content = self.openWebPage(rule_object.url) sourcefile = None #html = '<h1>网页监控报告: {rule_regex} Count: {count} Datetime: {datetime}</h1>'.format( # rule_regex=self.rule_object.keyword, datetime=time.strftime("%Y-%m-%d %H:%M:%S"), # count=len(self.content)) tmp = [] tmp.append(rule_object.url) if content: sourcefile = self.dirname + os.path.sep + urlname + '.txt' #sourcemd5= self.md5_ncrypt(content) with open(sourcefile, 'w', encoding='utf-8') as f: f.write(content) sourcemd5 = self.md5sum(sourcefile) #oldmd5 = queryUrlMd5(self.dbfile, self.table_name, rule_object.url) picname = self.dirname + os.path.sep + urlname + '.png' filename = self.dirname + os.path.sep + urlname #webshot(filename, rule_object.url) webshotargs.append((picname, rule_object.url)) tmp = (rule_object.url, sourcemd5, sourcefile, picname) result_list.append(tmp) else: tmp = (rule_object.url, 'null', 'null', 'null') result_list.append(tmp) webshotmp(webshotargs) self.checkdiff(result_list)
def save_location_menu(self, new_locations): connection = self.get_connection() logger.info(f"The number of unique locations processed: {len(new_locations)}") for location, location_id in new_locations.items(): args = (location_id, location) sql_query = "INSERT INTO locations (id, location) VALUES (%s, %s)" try: cursor = self.update_sql(sql_query, args, connection) except Exception as error: logger.critical(f"DOOP! {error}") connection.commit() try: cursor.close() except Exception as error: print("no new locals lmao, where's ches??????")
def get_default_dotm2ee_directory(self): dotm2ee = os.path.join(pwd.getpwuid(os.getuid())[5], ".m2ee") if not os.path.isdir(dotm2ee): try: os.mkdir(dotm2ee) except OSError, e: logger.debug("Got %s: %s" % (type(e), e)) import traceback logger.debug(traceback.format_exc()) logger.critical("Directory %s does not exist, and cannot be " "created!") logger.critical("If you do not want to use .m2ee in your home " "directory, you have to specify pidfile, " "munin -> config_cache in your configuration " "file") sys.exit(1)
def _search_album(self, provider, title, artist='', tracks=[], min_tracks=0): logger.info('In _search_album(provider={}, title={}, artist={})'.format(provider, title, artist)) RESULTS_TO_REVIEW = 5 if artist else 40 def searchlast(): if artist: return [i.item for i in self.artist(LASTFM, artist)._link.get_top_albums()] else: search_results = self.lastfm.search_for_album(title) return search_results.get_next_page() + search_results.get_next_page() search = [searchlast, lambda: brainz.search_releases(title, artist=artist)['release-list']][provider] Album = [LastAlbum, BrainzAlbum][provider] output = None try: output = search() except Exception as exc: logger.critical('Exception in search') logger.exception(exc) return None if output: for i, result in zip(range(RESULTS_TO_REVIEW), output): logger.info('Album: attempt #{}'.format(i + 1)) album = Album(result) if artist and diff(album.artist, artist) > 0.4: logger.info('Omitting because {} != {}'.format(album.artist, artist)) continue if diff(album.name, title) > 0.4: logger.info('Omitting because of title: {}'.format(album.name)) continue if min_tracks and len(album.tracks()) < min_tracks: logger.info('Omitting because of min_tracks: only {}'.format(len(album.tracks()))) continue if tracks: album_tracks = [normalcase(i.name) for i in album.tracks()] if any(known not in album_tracks for known in tracks): logger.info('Omitting because track not found') if False: logger.debug('fetched ' + repr(album_tracks) + '\n\n known ' + repr(tracks)) for known in tracks: if known not in album_tracks: logger.debug(known + ' not found in fetched') continue return album return None
def _lookup_appcontainer_jar(self): if self._appcontainer_version is None: # this probably means a bug in this program logger.critical("Trying to look up appcontainer jar, but " "_appcontainer_version is not defined.") self._all_systems_are_go = False return "" appcontainer_path = self.lookup_in_mxjar_repo( 'appcontainer-%s' % self._appcontainer_version) if appcontainer_path is None: logger.critical("AppContainer not found for version %s" % self._appcontainer_version) self._all_systems_are_go = False return "" return os.path.join(appcontainer_path, 'appcontainer.jar')
def main(): """Method responsible only for initializing the application.""" try: logger.info("Starting...") pid = os.getpid() so = platform.system() logger.info("PID: {} -> Platform: {}.".format(pid, so)) ctrl.Controller(DEFAULT_HOST, DEFAULT_PORT).StartServer() except Exception as e: err = wrapError(startApplicationError, e) err = setErrorContext(err, "pid", pid) err = setErrorContext(err, "platform", so) logger.critical(err) finally: logger.info("Finished.")
def get_connection( self ): # function to get the connection string using: pymysql.connect(host, username, password, database) if environ.get("ENVIRONMENT") == "prod": host, username, password, db_name = get_secret()[0:5] else: host, username, password, db_name = environ.get( "DB_HOST2"), environ.get("DB_USER2"), environ.get( "DB_PW2"), environ.get("DB_NAME2") try: db_connection = pymysql.connect(host, username, password, db_name) print("Got connection") logger.info("Load connection successful LOL") return db_connection except Exception as error: logger.critical("Load connection failed LOL") print(f"didn't work lol {error}")
def resolve(self, path, solve_type=2): for x in range(TRIES): try: if solve_type: result = self._client.decode(path, type=solve_type) else: result = self._client.decode(path, 60) if result: logger.info("result form deathbycaptcha {}".format(result)) return result else: logger.warning("None response from deathbycaptcha") except deathbycaptcha.AccessDeniedException: logger.critical("deathbycaptcha.AccessDeniedException") self._deathbycaptcha_client = None except: logger.exception("Error from deathbycaptcha") time.sleep(INTERVAL)
def create_process(self): ''' Starts bot's process. ''' logger.info('executing \'%s\'', self._player_command) try: self._process = psutil.Popen( self._player_command.split(), stdout=PIPE, stdin=PIPE, stderr=PIPE ) except OSError: logger.critical('executing of \'%s\' failed: invalid command', self._player_command) raise ExecuteError logger.info('executing successful')
def _check_runtime_config(self): self._run_from_source = self._conf.get('mxnode', {}).get('run_from_source', False) if (not self._run_from_source or self._run_from_source == 'appcontainer'): # ensure mxjar_repo is a list, multiple locations are allowed for # searching if not self._conf.get('mxnode', {}).get('mxjar_repo', None): self._conf['mxnode']['mxjar_repo'] = [] elif not type(self._conf.get('mxnode', {})['mxjar_repo']) == list: self._conf['mxnode']['mxjar_repo'] = [ self._conf['mxnode']['mxjar_repo'] ] # m2ee for option in ['app_name', 'app_base', 'runtime_port']: if not self._conf['m2ee'].get(option, None): logger.warn("Option %s in configuration section m2ee is not " "defined!" % option) # check some locations for existance and permissions basepath = self._conf['m2ee']['app_base'] if not os.path.exists(basepath): logger.critical("Application base directory %s does not exist!" % basepath) sys.exit(1) # model_upload_path if 'model_upload_path' not in self._conf['m2ee']: self._conf['m2ee']['model_upload_path'] = os.path.join( self._conf['m2ee']['app_base'], 'data', 'model-upload') if not os.path.isdir(self._conf['m2ee']['model_upload_path']): logger.warn("Model upload path %s is not a directory" % self._conf['m2ee']['model_upload_path']) # magically add app_base/runtimes to mxjar_repo when it's present magic_runtimes = os.path.join(self._conf['m2ee']['app_base'], 'runtimes') if ((magic_runtimes not in self._conf['mxnode']['mxjar_repo'] and os.path.isdir(magic_runtimes))): self._conf['mxnode']['mxjar_repo'].insert(0, magic_runtimes)
def write_felix_config(self): felix_config_file = self.get_felix_config_file() felix_config_path = os.path.dirname(felix_config_file) if not os.access(felix_config_path, os.W_OK): logger.critical("felix_config_file is not in a writable " "location: %s" % felix_config_path) return False project_bundles_path = os.path.join( self._conf['m2ee']['app_base'], 'model', 'bundles' ) osgi_storage_path = os.path.join( self._conf['m2ee']['app_base'], 'data', 'tmp', 'felixcache' ) felix_template_file = os.path.join( self._runtime_path, 'runtime', 'felixconfig.properties.template' ) if os.path.exists(felix_template_file): logger.debug("writing felix configuration template from %s " "to %s" % (felix_template_file, felix_config_file)) try: input_file = open(felix_template_file) template = input_file.read() except IOError, e: logger.error("felix configuration template could not be " "read: %s", e) return False try: output_file = open(felix_config_file, 'w') render = template.format( ProjectBundlesDir=project_bundles_path, InstallDir=self._runtime_path, FrameworkStorage=osgi_storage_path ) output_file.write(render) except IOError, e: logger.error("felix configuration file could not be " "written: %s", e) return False
def isg_thread(user_name, acct_session_id, old_radgroup, new_radgroup, tariff, client): """ Therad for main job. :param user_name: Subscriber User_Name :param acct_session_id: Subscriber Session ID :param old_radgroup: Current user RADIUS group :param new_radgroup: New user RADIUS group :param tariff: User tariff :param client: Basic RADIUS client :type user_name: string :type acct_session_id: string :type old_radgroup: string :type new_radgroup: string :type tariff: integer :type client: pyrad.Client """ new_speed = new_radgroup.split('_')[1] old_speed = old_radgroup.split('_')[1] logger.info("start thread for %s@%s, speed: %s -> %s", user_name, client.server, old_speed, new_speed) # Get User session info if client.server is not None: session = coa.session_info(user_name, acct_session_id, client) if session is None: logger.critical("can't connect to NAS:%s", client.server) return elif session.code == packet.CoANAK: logger.warning("can't find active user %s@%s[%s]", user_name, client.server, acct_session_id) elif session.code == packet.CoAACK: new_services = db.get_services(new_radgroup) if not coa.update_subscriber_services(user_name, acct_session_id, session, new_services, client): logger.error("can't update user services!") # TODO: Disconnect user casuse CISCO 7200 warm reboot, fix it before use POD packets # disconnect = coa.disconnect_user(user_name, acct_session_id, client, logger) # if disconnect is not None and disconnect.code == packet.CoANAK: # logger.critical("can't disconnect user!") # return None return None db.set_speed(user_name, new_speed, tariff)
def get_services(user_group): """Get services from radgroup :param user_group: User group with ISG services :type user_group: string :return: list of radgroup services :rtype: list """ logger.debug("get services from usergroup %s", user_group) with SessionPool.acquire() as con: cur = con.cursor() try: cur.execute("""SELECT LTRIM(VALUE, 'A') FROM RADIUS.RADGROUPREPLY r WHERE r.GROUPNAME = :grp AND r.VALUE LIKE '%SVC%'""", grp=user_group) except: logger.critical("database error!!!") return [] services = [svc[0] for svc in cur.fetchall()] logger.debug("%s: %s", user_group, str(services)) return services
def write_felix_config(self): felix_config_file = self.get_felix_config_file() felix_config_path = os.path.dirname(felix_config_file) if not os.access(felix_config_path, os.W_OK): logger.critical("felix_config_file is not in a writable " "location: %s" % felix_config_path) return False project_bundles_path = os.path.join(self._conf['m2ee']['app_base'], 'model', 'bundles') osgi_storage_path = os.path.join(self._conf['m2ee']['app_base'], 'data', 'tmp', 'felixcache') felix_template_file = os.path.join(self._runtime_path, 'runtime', 'felixconfig.properties.template') if os.path.exists(felix_template_file): logger.debug("writing felix configuration template from %s " "to %s" % (felix_template_file, felix_config_file)) try: input_file = open(felix_template_file) template = input_file.read() except IOError, e: logger.error( "felix configuration template could not be " "read: %s", e) return False try: output_file = open(felix_config_file, 'w') render = template.format( ProjectBundlesDir=project_bundles_path, InstallDir=self._runtime_path, FrameworkStorage=osgi_storage_path) output_file.write(render) except IOError, e: logger.error( "felix configuration file could not be " "written: %s", e) return False
def init_config(): try: config = toml.load(Path.joinpath(BASE_DIR, 'config.toml')) except TypeError as e: logger.critical(f'加载配置文件错误 - {e}') raise e except FileNotFoundError as e: logger.critical(f'找不到指定的配置文件 - {e}') raise e except toml.TomlDecodeError as e: logger.critical(f'配置文件解析错误 - {e}') raise e return config
def detach_user_conn(self, user_id): if user_id not in self.user_conn_map: logger.critical("user not exist") return False self.user_conn_map.pop(user_id) return True
__all__ = ['set_speed', 'get_services'] import os import sys import cx_Oracle from log import logger # create oracle connection pool try: SessionPool = cx_Oracle.SessionPool(user=os.getenv('DB_USER'), password=os.getenv('DB_PASSWORD'), dsn="{!s}/{!s}".format(os.getenv('DB_ADDRESS'), os.getenv('DB_NAME')), min=int(os.getenv('DB_MIN')), max=int(os.getenv('DB_MAX')), increment=int(os.getenv('DB_INC')), threaded=True) except: logger.critical("Can't create database SessionPool !!!") sys.exit(1) def set_speed(user_name, new_speed, tariff): """Set new radgout for user. :param user_name: User name to change radgroup :param new_speed: New user speed :param tariff: User tariff??? :type user_name: string :type new_speed: string :type tariff: integer :return: True or False :rtype: bool """ logger.debug("set speed %s:%s for user %s", new_speed, tariff, user_name)
import httplib from log import logger try: import readline # allow - in filenames we're completing without messing up completion readline.set_completer_delims( readline.get_completer_delims().replace('-', '') ) except ImportError: pass try: import httplib2 except ImportError: logger.critical("Failed to import httplib2. This module is needed by " "m2ee. Please povide it on the python library path") raise def unpack(config, mda_name): mda_file_name = os.path.join(config.get_model_upload_path(), mda_name) if not os.path.isfile(mda_file_name): logger.error("file %s does not exist" % mda_file_name) return False logger.debug("Testing archive...") cmd = ("unzip", "-tqq", mda_file_name) logger.trace("Executing %s" % str(cmd)) try: proc = subprocess.Popen(cmd,
def __init__(self, yaml_files=None, config=None): self._conf = defaultdict(dict) if yaml_files: (self._mtimes, yaml_config) = read_yaml_files(yaml_files) self._conf = merge_config(self._conf, yaml_config) else: self._mtimes = {} if config: self._conf = merge_config(self._conf, config) # disable flag during pre-flight check if launch would fail self._all_systems_are_go = True self._check_appcontainer_config() self._check_runtime_config() self._conf['mxruntime'].setdefault( 'BasePath', self._conf['m2ee']['app_base']) self._conf['mxruntime'].setdefault('DTAPMode', 'P') self.fix_permissions() self._appcontainer_version = self._conf['m2ee'].get( 'appcontainer_version', None) # 3.0: application information (e.g. runtime version) # if this file does not exist (i.e. < 3.0) try_load_json returns {} self._model_metadata = self._try_load_json( os.path.join(self._conf['m2ee']['app_base'], 'model', 'metadata.json' )) # Dirty hack to tell if we're on 2.5 or not self._dirty_hack_is_25 = len(self._model_metadata) == 0 # 3.0: config.json "contains the configuration settings of the active # configuration (in the Modeler) at the time of deployment." It also # contains default values for microflow constants. D/T configuration is # not stored in the mdp anymore, so for D/T we need to insert it into # the configuration we read from yaml (yay!) # { "Configuration": { "key": "value", ... }, "Constants": { # "Module.Constant": "value", ... } } also... move the custom section # into the MicroflowConstants runtime config option where 3.0 now # expects them to be! yay... (when running 2.5, the MicroflowConstants # part of runtime config will be sent using the old # update_custom_configuration m2ee api call. self._conf['mxruntime'] = self._merge_runtime_configuration() # look up MxRuntime version self._runtime_version = self._lookup_runtime_version() # if running from binary distribution, try to find where m2ee/runtime # jars live self._runtime_path = None if (not self._run_from_source or self._run_from_source == 'appcontainer'): if self._runtime_version is None: # this probably means reading version information from the # modeler file failed logger.critical("Unable to look up mendix runtime files " "because product version is unknown.") self._all_systems_are_go = False else: self._runtime_path = self._lookup_in_mxjar_repo( self._runtime_version) if self._runtime_path is None: logger.critical("Mendix Runtime not found for version %s" % self._runtime_version) self._all_systems_are_go = False if not self._appcontainer_version: # 3.0: appcontainer information (e.g. M2EE main class name) self._appcontainer_environment = ( self._load_appcontainer_environment()) else: # b0rk self._appcontainer_environment = {} logger.debug("Determining classpath to be used...") classpath = [] # search for server files and build classpath if not self._run_from_source and self._appcontainer_version: # start appcontainer from jars, which starts runtime from jars # start without classpath and main class, using java -jar logger.debug("Hybrid appcontainer from jars does not need a " "classpath.") self._appcontainer_jar = self._lookup_appcontainer_jar() elif self._run_from_source: logger.debug("Building classpath to run hybrid appcontainer from " "source.") # start appcontainer from source, which starts runtime from jars classpath = self._setup_classpath_from_source() elif not self._run_from_source and not self._appcontainer_version: logger.debug("Building classpath to run appcontainer/runtime from " "jars.") # start appcontainer/runtime together from jars classpath = self._setup_classpath_runtime_binary() classpath.extend(self._setup_classpath_model()) self._classpath = ":".join(classpath) if classpath: logger.debug("Using classpath: %s" % self._classpath) else: logger.debug("No classpath will be used") # If running runtime from source, this location needs to be set # manually else, if not set yet and running from jars (_runtime_path is # known) set it here. if self._runtime_path and not 'RuntimePath' in self._conf['mxruntime']: runtimePath = os.path.join(self._runtime_path, 'runtime') logger.debug("Setting RuntimePath runtime config option to %s" % runtimePath) self._conf['mxruntime']['RuntimePath'] = runtimePath
print("config.ini file configure failed.\nError: {0}".format(e)) return value # Rules Structure Design # # 'rule keywords': { # 'mode': '' // RuleMode: normal-match(default)/only-match/full-match/mail # 'extension': '' // search extension: (default)/txt/md/java/python/etc... # } # try: with open(rules_path) as f: rules_dict = json.load(f) except Exception as e: logger.critical('please config rules.json!') logger.critical(traceback.format_exc()) #print(rules_dict.items()) class Rule(object): def __init__(self, types=None, url=None, mode=None): self.types = types self.url = url #self.filename = url.replace("/","-") self.mode = mode # 读取配置文件 def get_rules(rule_type='singlepage'):
from log import logger logger.debug('this is a logger debug message') logger.info('this is a logger info message') logger.warning('this is a logger warning message') logger.error('this is a logger error message') logger.critical('this is a logger critical message')
# # Copyright (c) 2009-2013, Mendix bv # All Rights Reserved. # # http://www.mendix.com/ # from base64 import b64encode import socket from log import logger try: import httplib2 except ImportError: logger.critical("Failed to import httplib2. This module is needed by " "m2ee. Please povide it on the python library path") raise # Use json if available. If not (python 2.5) we need to import # the simplejson module instead, which has to be available. try: import json except ImportError: try: import simplejson as json except ImportError, ie: logger.critical("Failed to import json as well as simplejson. If " "using python 2.5, you need to provide the simplejson " "module in your python library path.") raise
import datetime from log import logger from profileutildp import format_dict_table # Use json if available. If not (python 2.5) we need to import the simplejson # module instead, which has to be available. try: import json except ImportError: try: import simplejson as json except ImportError, ie: logger.critical("Failed to import json as well as simplejson. If " "using python 2.5, you need to provide the simplejson " "module in your python library path.") raise class Log: def __init__(self, request_id, data): self.__dict__.update(data) self.request_id = request_id self.action = json.loads(data['request_content'])['action'] self.queries = data['database_queries'] if hasattr(self, "start_time"): self.end_time_formatted = datetime.datetime.fromtimestamp( (self.start_time + self.duration) // 1000) self.start_time_formatted = datetime.datetime.fromtimestamp( self.start_time // 1000)
scheduler.add_job(func=app.reset_user_list, trigger='cron', hour='*', misfire_grace_time=30) scheduler.start() logger.info('Scheduler has been activated ...') while True: logger.info('Bot running count: %d' % (bot_restart_counter)) restart_interval = 3 try: asyncio.run(app.run()) except ClientConnectorError: logger.warning( 'The connection is down, please check your connection ...') restart_interval = 30 except KeyboardInterrupt: break except Exception: logger.critical(traceback.format_exc()) restart_interval = 300 bot_restart_counter += 1 logger.info('Restarting the bot in %d seconds ...' % restart_interval) time.sleep(restart_interval) logger.info('Bot stopped now ...') scheduler.shutdown() logger.info('Scheduler has been deactivated ...')
lambda domain, tar_member: extract_domain(domain, tar_member)), ("Post-Restore", False, lambda domain, tar_member: execute_restore( domain, is_pre_restore=False)) ]: errors = [] for name, tar_member in tar_domains.items(): if name in domains: try: dom_errors = handler(domains[name], tar_member) errors.extend(dom_errors or []) except Exception, e: # This should NEVER happen err_info = "Restore-Phase: %s, Domain: %s\nError: %s" % ( what, name, traceback.format_exc()) errors.append(err_info) logger.critical(err_info) if abort_on_error == False: # At this state, the restored data is broken. # We still try to apply the rest of the snapshot # Hopefully the log entry helps in identifying the problem.. logger.critical( "Snapshot restore FAILED! (possible loss of snapshot data)" ) continue break if errors: if what == "Permissions": errors = list(set(errors)) errors.append( _("<br>If there are permission problems, please ensure the site user has write permissions."
def main(): args = docopt(__doc__, version='zenfeed ' + VERSION) log_arg, log_level = args['--log'].rsplit(':', 1) if log_arg not in ('stderr', 'syslog'): setup_logger(type='file', filename=path(log_arg).abspath(), level=log_level) else: setup_logger(type=log_arg, level=log_level) logger.info('Zenfeed %s booting...', VERSION) if args['genstatic']: return genstatic(args['PATH']) port = int(args['--port']) cache_disabled = args['--no-cache'] path_prefix = args['--prefix'] if path_prefix.endswith('/'): path_prefix = path_prefix[:-1] if path_prefix and not path_prefix.startswith('/'): path_prefix = '/' + path_prefix fixed_language = args['--lang'] if fixed_language == 'browser': fixed_language = None else: logger.info('Language fixed to "%s"', fixed_language) if (fixed_language not in LANGUAGES and fixed_language.split('_', 1)[0] not in LANGUAGES): return logger.critical('Fixed language not supported !') fixed_timezone = args['--tz'] logger.info('Timezone fixed to "%s"', fixed_timezone) if fixed_timezone not in all_timezones: return logger.critical('Fixed timezone not supported !') db_uri = args['--database'] if db_uri == ':memory:': db_uri = 'sqlite://' elif not "://" in db_uri: db_uri = 'sqlite:///%s' % path(db_uri).abspath() import app as app_module app = app_module.create_flask_app(prefix=path_prefix) app.config.update( DEBUG=args['--debug'], SQL_DEBUG=False, SECRET_KEY=urandom(32), SQLALCHEMY_DATABASE_URI=db_uri, FAVICON_DIR=path(args['--favicons']).abspath(), FIXED_LANGUAGE=fixed_language, FIXED_TIMEZONE=fixed_timezone, CACHE_ENABLED=not cache_disabled, PATH_PREFIX=path_prefix, ) Cache(app) from models import setup_tables, Feed patch_socket() patch_ssl() setup_tables() from deadline_manager import deadlineManager import views from werkzeug.contrib.fixers import ProxyFix app.wsgi_app = ProxyFix(app.wsgi_app) feeds = Feed.query.all() deadlineManager.favicon_dir = path(args['--favicons']).abspath() deadlineManager.launch_deadline_workers(feeds) deadlineManager.start() logger.info("Server started at port %d (prefix: %s/)", port, path_prefix) if args['--debug']: logger.warning("DEBUG mode activated") app.run(host='0.0.0.0', port=port, debug=True) else: from gevent.wsgi import WSGIServer http_server = WSGIServer(('0.0.0.0', port), app) try: http_server.serve_forever() except KeyboardInterrupt: pass
# # Copyright (c) 2009-2015, Mendix bv # All Rights Reserved. # # http://www.mendix.com/ # from base64 import b64encode import socket from log import logger try: import httplib2 except ImportError: logger.critical("Failed to import httplib2. This module is needed by " "m2ee. Please povide it on the python library path") raise # Use json if available. If not (python 2.5) we need to import # the simplejson module instead, which has to be available. try: import json except ImportError: try: import simplejson as json except ImportError, ie: logger.critical("Failed to import json as well as simplejson. If " "using python 2.5, you need to provide the simplejson " "module in your python library path.") raise
import pandas as pd import settings import os.path import write_to_db import datetime from log import logger # Get current time current_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') # Request data from API response = requests.get('https://api.onegov.nsw.gov.au/FuelPriceCheck/v1/fuel/prices/new', headers=settings.headers) if response.status_code == 200: logger.debug(f'Response code: {response.status_code}') else: logger.critical(f'Response code: {response.status_code}') exit() # Convert data to JSON file data = response.json() stations = pd.DataFrame(data['stations']) prices = pd.DataFrame(data['prices']) # Stop the program if there's no data if prices.empty: logger.debug('No new prices found') exit() # Set date type as datetime prices['lastupdated'] = prices['lastupdated'].apply(lambda x: datetime.datetime.strptime(x, '%d/%m/%Y %H:%M:%S').strftime('%Y-%m-%d %H:%M:%S'))
print('\nWrite your body - Press ctrl+c for once done', end='\n') body = writeMessage() for name, email in zip(names, emails): msg = MIMEMultipart() # create a message # add in the actual person name to the message template message = message_template.substitute(PERSON_NAME=name.title(), WORK_STATUS=body) # setup the parameters of the message msg['From'] = MY_ADDRESS msg['To'] = email msg['Subject'] = subject # add in the message body msg.attach(MIMEText(message, 'plain')) # send the message via the server set up earlier. try: s.send_message(msg) logger.info('sent success: {}'.format(msg)) print('Mail Sent...') except Exception as e: res = cacheMessage(msg) logger.critical(e) print('sent failed: {},\nmsg save status: {}'.format(e, res)) del msg
def main(): args = docopt(__doc__, version='zenfeed ' + VERSION) log_arg, log_level = args['--log'].rsplit(':', 1) if log_arg not in ('stderr', 'syslog'): setup_logger(type='file', filename=path(log_arg).abspath(), level=log_level) else: setup_logger(type=log_arg, level=log_level) logger.info('Zenfeed %s booting...', VERSION) if args['genstatic']: return genstatic(args['PATH']) port = int(args['--port']) cache_disabled = args['--no-cache'] path_prefix = args['--prefix'] if path_prefix.endswith('/'): path_prefix = path_prefix[:-1] if path_prefix and not path_prefix.startswith('/'): path_prefix = '/' + path_prefix fixed_language = args['--lang'] if fixed_language == 'browser': fixed_language = None else: logger.info('Language fixed to "%s"', fixed_language) if (fixed_language not in LANGUAGES and fixed_language.split('_', 1)[0] not in LANGUAGES): return logger.critical('Fixed language not supported !') fixed_timezone = args['--tz'] logger.info('Timezone fixed to "%s"', fixed_timezone) if fixed_timezone not in all_timezones: return logger.critical('Fixed timezone not supported !') db_uri = args['--database'] if db_uri == ':memory:': db_uri = 'sqlite://' elif not "://" in db_uri: db_uri = 'sqlite:///%s' % path(db_uri).abspath() import app as app_module app = app_module.create_flask_app(prefix=path_prefix) app.config.update( DEBUG = args['--debug'], SQL_DEBUG = False, SECRET_KEY = urandom(32), SQLALCHEMY_DATABASE_URI = db_uri, FAVICON_DIR = path(args['--favicons']).abspath(), FIXED_LANGUAGE = fixed_language, FIXED_TIMEZONE = fixed_timezone, CACHE_ENABLED = not cache_disabled, PATH_PREFIX = path_prefix, ) Cache(app) from models import setup_tables, Feed patch_socket() patch_ssl() setup_tables() from deadline_manager import deadlineManager import views from werkzeug.contrib.fixers import ProxyFix app.wsgi_app = ProxyFix(app.wsgi_app) feeds = Feed.query.all() deadlineManager.favicon_dir = path(args['--favicons']).abspath() deadlineManager.launch_deadline_workers(feeds) deadlineManager.start() logger.info("Server started at port %d (prefix: %s/)", port, path_prefix) if args['--debug']: logger.warning("DEBUG mode activated") app.run(host='0.0.0.0', port=port, debug=True) else: from gevent.wsgi import WSGIServer http_server = WSGIServer(('0.0.0.0', port), app) try: http_server.serve_forever() except KeyboardInterrupt: pass
def my_excepthook(exctype, value, traceback): sys.__excepthook__(exctype, value, traceback) if not issubclass(exctype, KeyboardInterrupt): logger.critical(value)
def celery_delay(func, *args, **kwargs): try: return func.delay(*args, **kwargs) except Exception as e: logger.critical('error, e:%s, func:%s', e, func, exc_info=True) return e