def query_oid_val(self, oid): ''' Query value for oid ''' # Lookup in cache file first value = self.__query_oid_in_cachefile(oid) if value != "": return value # Lookup in snmprec file # open file try: fdh = open(self.__sim_file, 'rw') while True: line = fdh.readline() if not line: break # oid-type-value record_list = line.strip(os.linesep).split('|') if record_list[0] == oid: fdh.close() if "value" in record_list[2]: val = record_list[2].split(',')[0].split('=')[1].strip() else: val = record_list[2] return val except IOError as e: print e logger.error("Not found oid %s" % oid) return ""
def main_loop(self): rlist = [] rlist.append(self.__pipe.inform) timeout = 10 print "Total threads: {0}".format(threading.activeCount()) try: while self.__running: readable, _, _ = select.select(rlist, [], [], timeout) if not readable: continue if self.__pipe.inform in readable: try: message = self.__pipe.read(256) except OSError, exc: logger.warn("[Error %d] appeared at reading pipe" % exc.errno) continue if len(message) == 0: continue pdu_id = message.split()[0].split('.')[-2] pdu_index = self.to_index(int(pdu_id)) logger.info("Assign message to pdu {0}".format(pdu_id)) self.__pdus[pdu_index].handle_message(message) except KeyboardInterrupt: logger.error("Break by user.") except Exception, ex: logger.error("{0}: {1}".format(sys._getframe().f_code.co_name, ex))
def new_station(self): """ Update the current station from ident and display new main screen """ try: avwx.core.valid_station(self.station) except avwx.exceptions.BadStation: return self.error_station() self.draw_loading_screen() new_metar = avwx.Metar(self.station) try: new_metar.update() except (TimeoutError, ConnectionError, avwx.exceptions.SourceError): self.error_connection() except avwx.exceptions.InvalidRequest: self.error_station() except Exception as exc: logger.error(f'An unknown error has occured: {exc}') self.error_unknown() else: logger.info(new_metar.raw) self.metar = new_metar self.old_ident = copy(self.ident) self.reset_update_time() self.export_session() self.draw_main()
def main_loop(self): rlist = [] rlist.append(self.__pipe.inform) timeout = 10 try: while self.__running: readable, _, _ = select.select(rlist, [], [], timeout) if not readable: continue if self.__pipe.inform in readable: try: message = self.__pipe.read(256) except OSError, exc: logger.warn("[Error %d] appeared at reading pipe" % exc.errno) continue if len(message) == 0: continue self.handle_message(message) except KeyboardInterrupt: logger.error("Break by user.") except Exception, ex: logger.error("{0}: {1}".format(sys._getframe().f_code.co_name, ex))
def update_snmprec_file(self, oid, val): old_file = os.path.join(self.config_instance.snmp_data_dir, "public.snmprec") new_file = os.path.join(self.config_instance.snmp_data_dir, "new.snmprec") logger.info("update oid %s, val %s" % (oid, str(val))) # open file try: old_fdh = open(old_file, 'r') new_fdh = open(new_file, 'w') while True: line = old_fdh.readline() if not line: break record_list = line.strip(os.linesep).split('|') if record_list[0] == oid: record_list[2] = val new_line = '|'.join(["%s" % x for x in record_list]) new_fdh.write(new_line + os.linesep) else: new_fdh.write(line) except IOError as e: logger.error("Exception in updating snmprec file, exception: {}". format(e)) return new_fdh.close() old_fdh.close() os.rename(new_file, old_file)
def unpack(): """ APK to DEX """ logger.info('Unpacking %s', common.apkPath) #Get the directory to unpack to try: dirname, extension = common.apkPath.rsplit(".",1) #workaround for cases where path may include whitespace file_temp = open(common.apkPath,'r') zf = zipfile.ZipFile(file_temp) logger.info('Zipfile: %s', zf) for filename in [ zf.namelist()]: if not os.path.exists(dirname + "/"): os.makedirs(dirname + "/") zf.extractall(dirname + "/", zf.namelist(), ) logger.info('Extracted APK to %s', dirname + '/') common.pathToDEX = dirname + "/classes.dex" common.pathToUnpackedAPK = dirname + '/' return True except Exception as e: if not common.interactive_mode: logger.error(common.args.pathtoapk + common.config.get('qarkhelper', 'NOT_A_VALID_APK')) exit() logger.error(common.config.get('qarkhelper', 'NOT_A_VALID_APK_INTERACTIVE')) raise
def start(self): if not os.path.exists('/usr/bin/snmpsimd.py') \ and not os.path.exists('/bin/snmpsimd.py') \ and not os.path.exists('/usr/local/bin/snmpsimd.py'): logger.error("snmpsimd.py does not exist!") return -1 if self.__alive(): self.stop() if not os.path.exists("/var/run/snmpsim"): os.mkdir("/var/run/snmpsim") if not os.path.exists("/var/log/snmpsim"): os.mkdir("/var/log/snmpsim") data_dir = self.__config_instance.snmp_data_dir db_path = os.path.join(data_dir, self.__config_instance.db_file) args_list = ["snmpsimd.py"] endpoint_param = "--agent-udpv4-endpoint=0.0.0.0" args_list.append(endpoint_param) process_user = "******" args_list.append(process_user) process_group = "--process-group=root" args_list.append(process_group) logging_option = "--logging-method=file:/var/log/snmpsim/snmpsimd.log" args_list.append(logging_option) pid_option = "--pid-file=" + snmpsim_pid_file args_list.append(pid_option) daemonize_option = "--daemonize" args_list.append(daemonize_option) data_dir_option = "--data-dir=" + data_dir args_list.append(data_dir_option) if self.__db_type == "SQLITE": variation_modules_dir = "--variation-modules-dir=" + \ self.__config_instance.variation_modules_dir args_list.append(variation_modules_dir) sql_option = "--variation-module-options=sql:dbtype:sqlite3,database:" + db_path args_list.append(sql_option) elif self.__db_type == "WRITECACHE": writecache_option = "--variation-module-options=writecache:file:" + db_path args_list.append(writecache_option) else: return -1 logger.info("Start snmpsimd service for {0}.".format(self.__pdu_name)) logger.info(' '.join(args_list)) retcode = subprocess.call(args_list) if retcode != 0: return -1 time.sleep(1) pid = self.getpid() if pid < 0: logger.error("Failed to start snmpsim service!") return -1 logger.info("Succeed to start snmpsim service, pid: %d." % pid) return 0
def run(self): self.init() retcode = self.__snmp_sim_serv.start() if retcode < 0: logger.error("Failed to start snmpsimd service!") sys.exit(1) self.__vpdu_handler.main_loop()
def verifyCallback(connection, x509, errnum, errdepth, ok): user = x509.get_subject().commonName user = user.lower().strip() if not ok: # if we haven't seen CN before (username), # add them to our CLIENT_CERTS_FILE and return True trusted_certs_str = open(CLIENT_CERTS_FILE, 'r').read() # Ugly, ugly, bad hack, because M2Crypto and pyOpenSSL # didn't figure out how to either 1) trust a directory of certs # or 2) load a single file with multiple certs into an array of # X509 objects. END_CERT_SENTINAL = '-----END CERTIFICATE-----\n' for cert_str in trusted_certs_str.split(END_CERT_SENTINAL): if (cert_str.strip() == ''): break cert_str += END_CERT_SENTINAL logger.trace('Loading certificate: \n%s' % cert_str) cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_str) cert_user = cert.get_subject().commonName logger.debug("Cert Store: User '%s' %s" % (cert_user, cert_fingerprint(cert))) if cert_user.lower().strip() == user.lower().strip(): # User already in cert store, error logger.error("User '%s' already in cert store; rejecting" % user) logger.debug("User '%s' == '%s'" % (user, cert_user)) logger.debug("Digest %s , %s already in store" % \ (cert_fingerprint(x509), cert_fingerprint(cert))) return False # User not already in cert store; add them x509_str = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, x509) with open(CLIENT_CERTS_FILE, 'a') as f: f.write(x509_str) # Reload the cert store connection.get_context().load_verify_locations(CLIENT_CERTS_FILE, None) logger.info("Added user '%s' to cert store, fingerprint %s" % \ (user, cert_fingerprint(x509))) # This verify function will get called again with ok == True return True game_listener = connection.get_app_data() if game_listener: game_listener.verificationComplete(user, cert_fingerprint(x509)) logger.debug("User '%s' authenticated (%s)" % (user, cert_fingerprint(x509))) return True
def send(self, res): # res, resa = self.compacting(data) now = int(time.time()*1000) logger.info('Monitor results: %s', str(res)) addResults = self.__api.addMonitorResults(self.__monitorId, now, res ) if isinstance(addResults, dict): logger.info('Result.Response: %s', str(addResults)) if 'error' in addResults : if addResults.get('errorCode', 0) == 4 : #Invalid or expired auth token logger.warn('Result.Response: %s; refresh token and try again..', str(addResults)) self.__api.updateAuthToken() addResults = self.__api.addMonitorResults(self.__monitorId, now, res ) else : logger.error('Undefined error: %s', str(addResults))
def write_password(pdu, port, password): _content = '' try: matched = False password_file = config.get_conf_instance().password_file fd = open(password_file, 'r+') lines = fd.readlines() fd.close() for line in lines: # Ignore blank line if line == os.linesep: _content += line continue # Ignore comments which begins with '#' result_obj = re.search(r"^#.*", line) if result_obj: _content += line continue l = line.split(':') # If the password is already in configuration file, then update it. if pdu == int(l[1]) and port == int(l[2]): matched = True # Update password line = ':'.join([str(time.time()), str(pdu), str(port), str(password)]) line += os.linesep logger.info("Update password %s for PDU %d port %d" % (password, pdu, port)) _content += line # If the pdu and port have not been assigned a password, # then added the password if not matched: new_line = ':'.join([str(time.time()), str(pdu), str(port), str(password)]) new_line += os.linesep _content = _content + new_line logger.info("Add password %s for PDU %d port %d" % (password, pdu, port)) # Write the password settings back to the configuration file fd = open(config.get_conf_instance().password_file, 'w') fd.writelines(_content) fd.close() except IOError as e: logger.error("Error in open password file.exception: {}".format(e))
def current_song(self): if len(self.current_playlist) > 0: song = self.current_playlist[self.current_cur] #TODO support more property here if not song.get('song_url'): song_url = u'http://{douban_fm_host}/?start={sid}g{ssid}g{channel}&cid={channel}'.format( \ douban_fm_host = self.douban_fm_host , sid = song.get('sid') , ssid = song.get('ssid', 'None') , channel=self.channel_id , ) song.update({'song_url': song_url}) return song else: l.error('Ops...playlist is empty! exit') sys.exit()
def handler(event, context): """Lambda handler.""" logger.info(event) try: if event.get("httpMethod") == "GET": return get_random.handler(event, context) elif event.get("httpMethod") == "PUT": return put_random.handler(event, context) else: raise NotImplementedError("400: Method Not Allowed: %s" % context.httpMethod) except Exception as e: logger.error("%s\n%s" % (e, traceback.format_exc())) m = re.match(r"\A\d{3}: ", e.__str__()) if (not m) or (m and m.group(0)[0:3] not in ["400", "500"]): e = Exception("500: %s" % e) raise e
def handle_outlet(self, args): ''' 1. Get current outlet state 2. Get the current outlet action ''' # self.logger.info("handle outlet {0}/{1}".format(outlet, self.pdu)) outlet = args[0] action = args[1] logger.info("handle outlet {0}/{1}, action: {2}" .format(outlet, self.pdu, self.actions[int(action)])) vmname = self.__node_control_handler.get_node_name(1, int(outlet)) if vmname is None: self.set_outlet_field(self.outlet_action_oid_offset, outlet, 0) logger.error("No virtual node found for outlet {0}".format(outlet)) return datastore = self.__node_control_handler.get_node_datastore(vmname) if datastore is None: self.set_outlet_field(self.outlet_action_oid_offset, outlet, 0) logger.error("No datastore found for virtual node {0}" .format(vmname)) return # action = self.get_outlet_field(self.outlet_action_oid_offset, outlet) state = self.get_outlet_field(self.outlet_state_oid_offset, outlet) if self.actions[int(action)] == 'none' or \ self.actions[int(action)] == self.states[int(state)]: logger.warn("No need to execute the action: {}" .format(self.actions[int(action)])) return # restore the action default to "none" if self.actions[int(action)] == 'on': # 'on' state self.set_outlet_field(self.outlet_state_oid_offset, outlet, 5) status = self.__node_control_handler.power_on_node(datastore, vmname) elif self.actions[int(action)] == 'off': # 'off' state self.set_outlet_field(self.outlet_state_oid_offset, outlet, 4) status = self.__node_control_handler.power_off_node(datastore, vmname) elif self.actions[int(action)] == 'reboot': # 'off' state self.set_outlet_field(self.outlet_state_oid_offset, outlet, 8) status = self.__node_control_handler.reboot_node(datastore, vmname) # 'on' state self.set_outlet_field(self.outlet_state_oid_offset, outlet, 5) else: logger.error("Unknown action: {0}".format(action)) return if status != 0: logger.error("Failed to {0} virtual node." .format(self.actions[int(action)])) return self.set_outlet_field(self.outlet_action_oid_offset, outlet, 0)
def update_oid_val(self, oid, val): ''' Update value for oid ''' if not os.path.exists(self.__db_file): logger.error("Database %s does not exist!" % self.__db_file) sys.exit(1) # open db conn = sqlite3.connect(self.__db_file) cur = conn.cursor() sql_oid = '.'.join(['%10s' % x for x in str(oid).split('.')]) update_statement = 'update %s set value = \'%s\' where oid=\'%s\'' % \ (self.config_instance.default_table_name, val, sql_oid) cur.execute(update_statement) conn.commit() conn.close()
def query_oid_val(self, oid): ''' Query value for oid ''' if not os.path.exists(self.__db_file): logger.error("Database %s does not exist!" % self.__db_file) sys.exit(1) # open db conn = sqlite3.connect(self.__db_file) cur = conn.cursor() sql_oid = '.'.join(['%10s' % x for x in str(oid).split('.')]) query_statement = 'select value from %s where oid=\'%s\'' % \ (self.config_instance.default_table_name, sql_oid) cur.execute(query_statement) resultset = cur.fetchone() conn.close() if resultset: return resultset[0]
def refresh_data(self, force_main: bool = False): """ Refresh existing station """ try: updated = self.metar.update() except (TimeoutError, requests.exceptions.ConnectionError): self.error_connection() except avwx.exceptions.InvalidRequest: self.error_station() except Exception as exc: logger.error(f'An unknown error has occured: {exc}') self.error_unknown() else: logger.info(self.metar.raw) self.reset_update_time() if updated and (self.on_main or force_main): self.draw_main()
def __update_oid_in_cachefile(self, oid, val): found = False try: s = shelve.open(self.__cache_file, "rw", writeback=True) for key in s.keys(): if oid == key: found = True break if found: s[key] = rfc1902.Integer(val, s[key].getTagSet(), s[key].getSubtypeSpec(), s[key].getNamedValues()) else: s[oid] = rfc1902.Integer(val) s.sync() s.close() except: logger.error("Update oid in cachefile failed.")
def catchStateChanged(self, new_state, old_state): ''' possible state sequences: [init]: loading -> [next] [next]: stop -> paused -> playing -> stop(*) [skip],[trash]: playing -> paused -> stop -> pause -> playing(*) ''' l.debug(u'old_state: {0}, new_state: {1}'.format(phonon_state_label.get(old_state), phonon_state_label.get(new_state))) #http://harmattan-dev.nokia.com/docs/library/html/qt4/phonon.html if new_state == Phonon.PlayingState: self.set_ui_state(GUIState.Playing) elif new_state == Phonon.PausedState: self.set_ui_state(GUIState.Paused) elif new_state == Phonon.StoppedState: if old_state == Phonon.PlayingState:#auto next song self.next_song() elif new_state == Phonon.ErrorState: l.error('error while playing back') self.next_song()
def init(self): self.__cf.read(self.__pdu_mapping) for section in self.__cf.sections(): vm_list = {} vm_list[section] = [] for option in self.__cf.options(section): try: node_info = {} pdu_port_list = self.__cf.get(section, option).split('.') node_info['node_name'] = option node_info['control_pdu'] = int(pdu_port_list[0]) node_info['control_port'] = int(pdu_port_list[1]) vm_list[section].append(node_info) except Exception as ex: logger.error("Exception: {0}".format(ex)) continue self.__nodes_control_list.append(vm_list)
def handle_outlet(self, args): outlet = args[0] action = args[1] logger.info("handle outlet {0}/{1}, action: {2}". format(outlet, self.pdu, self.actions[int(action)])) on_offset = self.pduouton_oid_offset + "." + str(self.to_oid_pdu(self.pdu)) action_in_oid = self.extract(self.get_outlet_field(on_offset, outlet)) logger.warn("action: {0}, action_in_oid: {1}". format(self.actions[int(action)], self.actions[int(action_in_oid)])) vmname = self.__node_control_handler.get_node_name(int(self.pdu), int(outlet)) if vmname is None: logger.error("No virtual node found for outlet {0}".format(outlet)) return datastore = self.__node_control_handler.get_node_datastore(vmname) if datastore is None: logger.error("No datastore found for virtual node {0}". format(vmname)) return # Make sure the action as the last one logger.info("last action: {0}, current action: {1}". format(self.action_list[int(outlet) - 1], self.actions[int(action)])) if self.action_list[int(outlet) - 1] == self.actions[int(action)]: logger.warn("No need to execute action for {0}/{1}". format(outlet, self.pdu)) return if self.actions[int(action)] == 'on': status = self.__node_control_handler.power_on_node(datastore, vmname) elif self.actions[int(action)] == 'off': status = self.__node_control_handler.power_off_node(datastore, vmname) elif self.actions[int(action)] == 'reboot': status = self.__node_control_handler.reboot(datastore, vmname) else: logger.error("Unknown action: {0}".format(action)) if status != 0: logger.error("Failed to {0} virtual node.". format(self.actions[int(action)])) return self.action_list[int(outlet) - 1] = self.actions[int(action)]
def catchStateChanged(self, new_state, old_state): ''' possible state sequences: [init]: loading -> [next] [next]: stop -> paused -> playing -> stop(*) [skip],[trash]: playing -> paused -> stop -> pause -> playing(*) ''' l.debug(u'old_state: {0}, new_state: {1}'.format( phonon_state_label.get(old_state), phonon_state_label.get(new_state))) #http://harmattan-dev.nokia.com/docs/library/html/qt4/phonon.html if new_state == Phonon.PlayingState: self.set_ui_state(GUIState.Playing) elif new_state == Phonon.PausedState: self.set_ui_state(GUIState.Paused) elif new_state == Phonon.StoppedState: if old_state == Phonon.PlayingState: #auto next song self.next_song() elif new_state == Phonon.ErrorState: l.error('error while playing back') self.next_song()
def build_frame(canstr): if not '#' in canstr: logger.error('build_frame: missing #') return 'Err!' cansplit = canstr.split('#') lcanid = len(cansplit[0]) RTR = '#R' in canstr if lcanid == 3: canid = struct.pack('I', int(cansplit[0], 16) + 0x40000000 * RTR) elif lcanid == 8: canid = struct.pack( 'I', int(cansplit[0], 16) + 0x80000000 + 0x40000000 * RTR) else: logger.error('build_frame: cansend frame id format error: ' + canstr) return 'Err!' can_dlc = 0 len_datstr = len(cansplit[1]) if not RTR and len_datstr <= 16 and not len_datstr & 1: candat = binascii.unhexlify(cansplit[1]) can_dlc = len(candat) candat = candat.ljust(8, b'\x00') elif not len_datstr or RTR: candat = b'\x00\x00\x00\x00\x00\x00\x00\x00' else: logger.error('build_frame: cansend data format error: ' + canstr) return 'Err!' return canid + struct.pack("B", can_dlc & 0xF) + b'\x00\x00\x00' + candat
def main(argv): try: parser = ArgsParser() ret = parser.parse(argv) if not ret: sys.exit(40) if parser.action in ACTION_MAP: if parser.directive: software = parser.directive["software"] if isinstance(software, str): software = json_loads(software) if software is None: logger.error("Failed to load software[%s] to json!", parser.directive) sys.exit(40) ret = ACTION_MAP[parser.action](software) else: ret = ACTION_MAP[parser.action]() sys.exit(ret) else: logger.error("can not handle the action[%s].", parser.action) sys.exit(40) except Exception: logger.error("Failed to update software: [%s]", traceback.format_exc()) sys.exit(1)
def handle_outlet(self, args): outlet = args[0] action = args[1] logger.info("handle outlet {0}/{1}, action: {2}".format( outlet, self.pdu, self.actions[int(action)])) on_offset = self.pduouton_oid_offset + "." + str( self.to_oid_pdu(self.pdu)) action_in_oid = self.extract(self.get_outlet_field(on_offset, outlet)) logger.warn("action: {0}, action_in_oid: {1}".format( self.actions[int(action)], self.actions[int(action_in_oid)])) vmname = self.__node_control_handler.get_node_name( int(self.pdu), int(outlet)) if vmname is None: logger.error("No virtual node found for outlet {0}".format(outlet)) return datastore = self.__node_control_handler.get_node_datastore(vmname) if datastore is None: logger.error( "No datastore found for virtual node {0}".format(vmname)) return # Make sure the action as the last one logger.info("last action: {0}, current action: {1}".format( self.action_list[int(outlet) - 1], self.actions[int(action)])) if self.action_list[int(outlet) - 1] == self.actions[int(action)]: logger.warn("No need to execute action for {0}/{1}".format( outlet, self.pdu)) return if self.actions[int(action)] == 'on': status = self.__node_control_handler.power_on_node( datastore, vmname) elif self.actions[int(action)] == 'off': status = self.__node_control_handler.power_off_node( datastore, vmname) elif self.actions[int(action)] == 'reboot': status = self.__node_control_handler.reboot(datastore, vmname) else: logger.error("Unknown action: {0}".format(action)) if status != 0: logger.error("Failed to {0} virtual node.".format( self.actions[int(action)])) return self.action_list[int(outlet) - 1] = self.actions[int(action)]
def main(argv): try: # 只在一个master controller节点执行此命令 role = get_role() cluster_info = get_cluster_info() if role != ROLE_CONTROLLER or \ int(cluster_info["sid"]) != MASTER_CONTROLLER_SID: return parser = ArgsParser() ret = parser.parse(argv) if not ret: sys.exit(40) if parser.action in ACTION_MAP: if parser.directive: software = parser.directive["software"] if isinstance(software, str): software = json_loads(software) if software is None: logger.error("Failed to load software[%s] to json!", parser.directive) sys.exit(40) ret = ACTION_MAP[parser.action](software) else: ret = ACTION_MAP[parser.action]() sys.exit(ret) else: logger.error("can not handle the action[%s].", parser.action) sys.exit(40) except Exception: logger.error("Failed to update software: [%s]", traceback.format_exc()) sys.exit(1)
def myorder(bot, update): try: # Source user and chat user_id = update.message.from_user.id chat_id = update.message.chat.id # Retrieve order order = Order.select().where((Order.user_id == user_id) & (Order.chat_id == chat_id)) if not order.exists(): update.message.reply_text("You order is empty") return total_price = 0 text = "" for order_item in OrderItem.select().where(OrderItem.order == order): text += "{item_name} <b>x{quantity}</b>\n".format( item_name=order_item.item.name, quantity=order_item.quantity) total_price += order_item.item.price * order_item.quantity text += "\nTotal price: {total:.2f} €".format(total=total_price) update.message.reply_text(text=text, parse_mode=telegram.ParseMode.HTML) except Error as e: logger.error("Failed 'myorder' handler") logger.error("Error context: {}".format(update)) logger.error(e)
def post(self): username = self.get_argument('username', '') password = self.get_argument('password', '') res = { 'code': 0, } msg = make_check(username, password) if msg: res['code'] = 1 res['msg'] = msg return self.finish(res) sql = 'select username from users where username = "******"' % (username) data = db.get_one(sql) if data: logger.warning('[ERROR] %s 用户名已存在' % username) res['msg'] = '用户名已存在!' else: try: sql = 'insert into users (username, password) values ("%s", "%s")' % (username, encryption(password)) count = db.insert(sql) if count: logger.info('[SUCCESS] %s 注册成功' % username) res['msg'] = '注册成功!' else: raise MysqlError except MysqlError as e: logger.error('[ERROR] %s 注册失败' % username) res['code'] = 1 res['msg'] = '注册失败,请重新注册!' print(e) except Exception as e: logger.error('[ERROR] %s 注册失败' % username) res['code'] = 1 res['msg'] = '注册失败,请重新注册!' print(e) return self.finish(res)
def __find_ele(self, locator): """ 通过定位器找到元素 :param locator: xpath :return: 返回找到的元素ele """ count = 100 while 1: try: #以%{开头,说明需要find_elements (xpath) if locator.startswith('%{'): try: #获取索引值 i = locator[locator.find('{') + 1:locator.find('}')] i = int(i) locator = locator[locator.find('}') + 1:] ele = self.__find_eles(locator, i) except Exception as e: self.e = traceback.format_exc() logger.error(str(e)) return None #如果以/开头,说明为xpath定位 elif locator.startswith("/"): ele = self.driver.find_element_by_xpath(locator) # 如果存在:id/,说明为id定位 elif locator.index(':id/') > 0: ele = self.driver.find_element_by_id(locator) #否则为accessibility_id定位 else: ele = self.driver.find_element_by_accessibility_id(locator) return ele except Exception as e: count -= 1 logger.info(f'{count}:寻找元素中:{locator}') time.sleep(0.5) if not count: self.e = traceback.format_exc() return None
def copy(): logger.debug('files upload started') for d, dirs, files in os.walk(settings.LOCAL_DIR): for file in files: file_path = os.path.join(d, file) str_path = str(file_path) str_path = settings.DROPBOX_DIR + \ str_path[len(settings.LOCAL_DIR):] with open(file_path, 'rb') as f: try: logger.debug('uploading:{}'.format(str_path)) dbx.files_upload(f.read(), str_path, mode=WriteMode('overwrite')) logger.debug('success uploaded:{}'.format(str_path)) except ApiError as err: if (err.error.is_path() and err.error.get_path().reason. is_insufficient_space()): sys.exit(1) logger.error('Cannot upload, insufficient space.') elif err.user_message_text: logger.error(err.user_message_text) sys.exit(1) else: logger.error(err) sys.exit(1) logger.debug('files upload finished')
def add(params): user_name = params.get("user_name", None) password = params.get("password", None) if user_name and password: ldap_client = None try: ldap_client = new_ldap_client() if ldap_client.user_exist(user_name): logger.error("The user[%s] already exist.", user_name) sys.exit(45) cluster_info = get_cluster_info() gid = cluster_info["admin_gid"] # gname = cluster_info["user"] # if not ldap_client.group_exist(gid): # ldap_client.create_group(gname, gid) uid = ldap_client.generate_uid_number() home_dir = get_home_dir(user_name) ldap_client.create_user(user_name, uid, password, home_dir, gid) logger.info("create user[%s], done.", user_name) except Exception: logger.error("Failed to create user: [%s]", traceback.format_exc()) sys.exit(1) finally: if ldap_client: ldap_client.close() else: logger.error("Required params: user_name: [%s], password: [%s]", user_name, password) sys.exit(40)
def create(self, db: Session, response: Response, *, decode_token: dict, obj_in: schema_section.SectionCreateIn): """ 创建板块 :params token: 用户token :params name: 板块名称 :params description: 描述 :return: 板块信息, 提示信息 """ dict_obj_in = { "userid": decode_token.get("sub"), "name": obj_in.name, "description": obj_in.description } db_section = crud_section.create(db, obj_in=dict_obj_in) if not db_section: message = "创建板块失败" response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR logger.error(message) else: message = "创建板块成功" response.status_code = status.HTTP_201_CREATED return db_section, message
def install(software_list, ignore_exist=False): logger.info("install software[%s]..", software_list) software_home = SOFTWARE_HOME_FMT.format(get_admin_user()) if software_list: run_shell("mkdir -p {}".format(software_home)) exist_info = {} for s in software_list: # software exist if os.path.exists("{}/{}".format(software_home, s["name"])): exist_info[s["name"]] = True logger.error("The software[%s] already exist!", s["name"]) if not ignore_exist: return 55 for s in software_list: if not exist_info.get(s["name"], False): ret = _install(s["name"], s["source"], software_home, s.get("installer")) if ret is not 0: return ret return 0
def main(): logger.info("Bot starting...") # Initialize bot updater = Updater(settings.TOKEN) bot_proxy.initialize(updater) job_queue_proxy.initialize(updater.job_queue) """Dynamically imports all applications""" for application in settings.APPLICATIONS: try: logger.info("Importing application " + application) importlib.import_module(application) # Dinamic import except Exception as e: logger.error(e) logger.error("Failed to import application {application} ".format( application=application)) raise updater.dispatcher.add_handler(CommandHandler("help", help)) # Start the Bot bot_proxy.start_polling()
def register(): if current_user.is_authenticated: return redirect(url_for("main_bp.user_details")) if request.method == "POST": existing_user = User.query(db_session, 'query', username=request.form["username"]).first() if existing_user is None: user = User(username=request.form["username"], f_name=request.form["f_name"], l_name=request.form["l_name"], email=request.form["email"]) user.set_password(request.form["password"]) db_session.add(user) db_session.commit() login_user(user) return redirect(url_for("main_bp.user_details")) logger.error("A user already exists with that username") flash("A user already exists with that username") else: return render_template("register.html")
def save_metadata(self, filename=None): if filename is None: filename = os.path.join(self.images_path, "metadata_cache.h5") try: if os.path.exists(filename): shutil.copyfile( filename, "%s_backup_%s" % (filename, datetime.now().strftime("%d_%m_%Y_%H_%M_%S"))) with h5py.File(filename, "r+") as hf: hf.attrs["Last changed"] = datetime.now().strftime( "%d.%m.%Y, %H:%M:%S") indices = np.argwhere( np.isin(hf["times"], self.metadata_changed[:, 0, 0].times)) for index, frame in zip(indices, self.metadata_changed[:, 0, 0]): for meta in self.__metadata_attrs__: hf[meta][index] = frame[meta] return True except: logger.error(traceback.format_exc()) return False
def startappium(self, apppath): def run(cmd): os.popen(cmd).read() logger.info('appium已经停止') cmd = 'node ' + apppath + \ '\\resources\\app\\node_modules\\appium\\build\\lib\\main.js -g %s >> null' \ % './app/appium.log' res = os.popen('netstat -aon | findstr 4723').read().split() print(res) if len(res) > 0 and len(res[0]) > 1: self.__write_excel_res('FAIL', '端口被占用:\n' + str(res)) logger.error('端口被占用:') logger.error(res) exit(-1) else: # 创建一个线程 th = threading.Thread(target=run, args=(cmd, )) th.start() time.sleep(6) self.__write_excel_res('PASS', 'appium 正在运行') logger.info('appium 正在运行')
def finish_scraper_log_upload(self): # stop and remove previous (watch) container. might be already stopped try: stop_container(self.docker, self.log_uploader.name, timeout=60 * 10) remove_container(self.docker, self.log_uploader.name, force=True) except docker.errors.NotFound as exc: # failure here is unexpected (container should exist) but happens randomly # catching this to prevent task from crashing while there might be # ZIM files to continue uploading after logger.warning(f"Log uploader container missing: {exc}") logger.warning("Expect full-log upload next (long)") self.log_uploader = None try: # restart without watch to make sure it's complete self.upload_log(watch=False) self.log_uploader.reload() # should log uploader above have been gone, we might expect this to fail # on super large mwoffliner with verbose mode on (20mn not enough for 20GB) exit_code = wait_container(self.docker, self.log_uploader.name, timeout=20 * 60)["StatusCode"] # connexion exception can be thrown by socket, urllib, requests except Exception as exc: logger.error(f"log upload could not complete. {exc}") stop_container(self.docker, self.log_uploader.name) exit_code = -1 finally: logger.info(f"Scraper log upload complete: {exit_code}") if exit_code != 0: logger.error( f"Log Uploader:: {get_container_logs(self.docker, self.log_uploader.name)}" ) remove_container(self.docker, self.log_uploader.name, force=True)
def update_user_by_token(self, db: Session, response: Response, *, decode_token: dict, obj_in: schema_user.UserUpdateIn): """ 通过 token 修改用户信息 :param decode_token: 解析之后的 token :param username: 用户名 :param realname: 真实姓名 :param nickname: 昵称 :param sex: 性别 :param birthday: 出生年月日 :param address: 联系地址 :param id_number: 身份证号 :param wechat: 微信 :param qq: QQ :return: 更新后的用户对象信息, 提示信息 """ # 检查用户名是否已被使用(系统中用户名必须唯一) search_username = crud_user.get_user_by_username( db, username=obj_in.username) if search_username: message = "用户名已被使用" response.status_code = status.HTTP_400_BAD_REQUEST return None, message # --------- 修改用户 ---------- # token_userid = decode_token.get("sub") db_user = crud_user.update_user_by_id(db, id=token_userid, obj_in=obj_in) if not db_user: message = "修改失败" response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR logger.error(message) else: message = "修改成功" response.status_code = status.HTTP_200_OK return db_user, message
def on_message(self, data): msg = self.Message() if not msg.parse(data, self.qq): logger.warn("解析消息失败:%r", data) return None else: # 记录这个群的消息到日志文件 logger.history("qq", str(msg.group), msg.sender, msg.msg) #不在我注册的群组里,不予处理 if not str(msg.group) in self.group_list: return None logger.debug('收到消息[message]:%s', msg.msg) logger.debug('发消息者[sender]:%s', msg.sender) logger.debug('发送群[group]:%s', msg.group) #被群内其他成员 @ 的通知 if not msg.is_at: logger.debug("不是@我的消息") return None logger.debug("检查bot上注册的群/讨论组:group[%s],sender[%s]", msg.group, msg.sender) # 得到我们的业务处理组件 | route(self, client, user, group, msg): biz_comp, context = self.bizManager.route("qq", msg.sender, msg.group, msg.msg) if biz_comp is None: logger.error("无法找到对应的业务处理器![QQ],user[%s],group[%s]", msg.sender, msg.group) return "不理解您的回复,请再对我说点啥" logger.debug("成功加载业务处理器[%r]", biz_comp) # 调用业务组件的接口方法来处理消息 return biz_comp.bot2system(self, "qq", context, msg.sender, msg.group, msg.msg)
def delete(self, db: Session, response: Response, *, id: int): """ 通过 id 删除板块(逻辑删除 板块 及所有关联的 主贴 和 评论/回复 信息) :params id: 板块 id :return: 板块信息, 提示信息 """ # ------------ 查询一次在进行删除 ----------- # db_section = crud_section.get_section_by_id(db, id=id) if not db_section: message = f"id 为 {id} 的板块信息不存在" response.status_code = status.HTTP_404_NOT_FOUND return None, message # ------------- 删除 --------------- # db_section_obj = crud_section.delete(db, id=id) if not db_section_obj: message = f"id 为 {id} 的板块信息删除失败" response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR logger.error(message) else: message = f"id 为 {id} 的板块信息删除成功" response.status_code = status.HTTP_200_OK return db_section_obj, message
def jwt_token_auth(self, db: Session, response: Response, *, decode_token: dict): """ JWT 认证(获取当前用户信息) :param decode_token: 解析之后的 token :return: 用户信息,提示信息 """ # ---------- 判断当前用户的状态信息 --------- # token_userid = decode_token.get("sub") db_user = crud_user.get_user_by_id(db, id=token_userid) if not db_user: message = "该账号已被注销" response.status_code = status.HTTP_403_FORBIDDEN else: if db_user.status == 1: message = "该账号已被禁用" response.status_code = status.HTTP_403_FORBIDDEN else: message = "获取当前用户信息成功" response.status_code = status.HTTP_200_OK return db_user, message logger.error(message) return None, message
def modify(queue_list, ignore_exist=False): logger.info("Do modify queue[%s] info..", queue_list) try: queues_info = get_queues_details() # queue exist for queue in queue_list: if not queues_info.get(queue["name"]): logger.error("The queue[%s] doesn't exist!", queue["name"]) if not ignore_exist: return 55 else: queues_info[queue["name"]].update(queue) update_queues_conf(queues_info) logger.info("the queue detail: [%s] after modify [%s]!", queues_info, queue_list) return 0 except Exception: logger.error("Failed to get queue [%s]: \n%s", queue_list, traceback.format_exc()) return 1
def get_image_urls(url): result = set() logger.debug('Getting urls from %s ...' % (url,)) try: cmd_res = check_output([ 'gallery-dl', '-g', '-q', '--proxy', HTTP_PROXY, '--no-check-certificate', '-R' '1', '--http-timeout', '600', url ], timeout=60 * 30, stderr=subprocess.DEVNULL).decode() for image_url in cmd_res.split('\n'): if image_url.strip() != "": result.add(image_url) except CalledProcessError as e: logger.error('Error in get_image_url: %s', (e,)) except TimeoutExpired as e: logger.error('Timeout in get_image_url: %s', (e,)) logger.debug('Got %d urls from %s' % (len(result), url)) return result
def runapp(self, caps, t=''): """ 连接appium服务器,并根据conf配置,启用待测试app :param conf: APP的启动配置,格式尽量为json格式字符串(尽量所有的值都用字符串,少用布尔值等其他格式) :return: """ try: caps = eval(caps) if t == '': t = 20 else: t = int(t) self.driver = webdriver.Remote( "http://localhost:" + self.port + "/wd/hub", caps) self.driver.implicitly_wait(t) logger.info("启动APP成功 %s" % caps) self.writer.write(self.writer.row, 7, "PASS") self.writer.write(self.writer.row, 8, "") except Exception as e: logger.error("启动APP失败 %s" % caps) logger.error(traceback.format_exc()) self.writer.write(self.writer.row, 7, "FAIL") self.writer.write(self.writer.row, 8, str(traceback.format_exc()))
def delete(self, db: Session, response: Response, *, id: int): """ 通过 主贴id 删除主贴信息(逻辑删除 主贴 及所有关联的 评论/回复 信息) :params id: 主贴 id :return: 主贴信息, 提示信息 """ # ------------ 查询一次在进行删除 ----------- # db_topic = crud_topic.get_topic_by_id(db, id=id) if not db_topic: message = f"id 为 {id} 的主贴信息不存在" response.status_code = status.HTTP_404_NOT_FOUND return None, message # ------------- 删除 --------------- # db_topic_obj = crud_topic.delete(db, id=id) if not db_topic_obj: message = f"id 为 {id} 的主贴信息删除失败" response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR logger.error(message) else: message = f"id 为 {id} 的主贴信息删除成功" response.status_code = status.HTTP_200_OK return db_topic_obj, message
def create(self, db: Session, request: Request, response: Response, *, decode_token: dict, obj_in: schema_forwarding.ForwardingCreateIn): """ 添加转发信息 :params token: 用户 token :params topicid: 主贴 id :return: 转发信息, 提示信息 """ dict_obj_in = { "userid": decode_token.get("sub"), "topicid": obj_in.topicid, "ip": request.client.host } db_forwarding = crud_forwarding.create(db, obj_in=dict_obj_in) if not db_forwarding: message = "添加转发失败" response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR logger.error(message) else: message = "添加转发成功" response.status_code = status.HTTP_200_OK return db_forwarding, message
def delete(self, db: Session, response: Response, *, id: int): """ 通过 id 删除转发(删除 评论/回复 及所有关联的 回复 信息) :params id: 转发 id :return: 转发信息, 提示信息 """ # ------------ 查询一次在进行删除 ----------- # db_forwarding = crud_forwarding.get_forwarding_by_id(db, id=id) if not db_forwarding: message = f"id 为 {id} 的转发信息不存在" response.status_code = status.HTTP_404_NOT_FOUND return None, message # ------------- 删除 --------------- # db_forwarding_obj = crud_forwarding.delete(db, id=id) if not db_forwarding_obj: message = f"id 为 {id} 的转发信息删除失败" response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR logger.error(message) else: message = f"id 为 {id} 的转发信息删除成功" response.status_code = status.HTTP_200_OK return db_forwarding_obj, message
def request(self, method, url, data=None): method = method.upper() # 将字符转成全部大写 config = ReadConfig() pre_url = config.get('api', 'pre_url') # 拼接 url = pre_url + url # URL拼接 if data is not None and type(data) == str: data = eval(data) # 如果是字符串就转成字典 logger.info('method: {0} url: {1}'.format(method, url)) logger.info('data: {0}'.format(data)) if method == 'GET': resp = self.session.request(method, url=url, params=data) # 调用get方法,使用params传参 # log.info('response: {0}'.format(resp.text)) return resp elif method == 'POST': resp = self.session.request(method, url=url, data=data) # 调用post方法,使用data传参 logger.info('response: {0}'.format(resp.text)) return resp else: logger.error('Un-support method !!!') pass
def kill_child_processes(self): file_size = os.path.getsize(self.file_name) / 1024 / 1024 / 1024 if file_size <= self.last_file_size: logger.error('下载卡死' + self.file_name) if self.numc == 0: self.parent.terminate() else: self.terminate() time.sleep(1) if os.path.isfile(self.file_name): logger.info('卡死下载进程可能未成功退出') return else: self.stop() logger.info('卡死下载进程成功退出') return self.last_file_size = file_size if file_size >= 0.8: if self.numc == 0: self.flag.set() else: self.terminate() logger.info('分段下载' + self.file_name)
def delete(self, db: Session, response: Response, *, id: int): """ 通过 id 删除点赞 :params id: 点赞 id :return: 删除结果(0: 删除失败,1: 删除成功), 提示信息 """ # ------------ 查询一次在进行删除 ----------- # db_like = crud_like.get_like_by_id(db, id=id) if not db_like: message = f"id 为 {id} 的点赞信息不存在" response.status_code = status.HTTP_404_NOT_FOUND return None, message # ------------- 删除 --------------- # db_result_like = crud_like.delete(db, id=id) if not db_result_like: message = f"id 为 {id} 的点赞信息删除失败" response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR logger.error(message) else: message = f"id 为 {id} 的点赞信息删除成功" response.status_code = status.HTTP_200_OK return db_result_like, message
def send_sms(mobile_list, content): mobiles = None if isinstance(mobile_list, str): mobiles = mobile_list else: mobiles = ','.join(mobile_list) if not mobiles or not content: return None if current_app.config['SMS_FAKE']: return True try: resp = requests.post(current_app.config['SMS_ADDR'], data={ 'name': current_app.config['SMS_USERNAME'], 'pwd': current_app.config['SMS_PASSWORD'], 'content': content, 'mobile': mobiles, 'sign': current_app.config['SMS_SIGN'], 'type': 'pt', }, timeout=5) except Exception, e: logger.error(e.message) return False
def init(self): dir = helper.get_install_dir() if not dir: logger.error("Don't find the configuration dir.") sys.exit(1) p = pipe.Pipe() conf = config.Config(dir) config.set_conf_instance(conf) # Create mapping file handle mapping_file.set_mapping_file_handle(mapping_file.MappingFileHandle(dir)) pdu_device = conf.pdu_name if pdu_device == "": logger.error("Not found pdu device in config file.") sys.exit(1) db_type = conf.db_type # Create OID handler if db_type == "SQLITE": oid_handler = SqliteOIDHandler() elif db_type == "WRITECACHE": oid_handler = FileOIDHandler() else: logger.error("DB type {} is not supported!".format(db_type)) sys.exit(1) # Create VM handler vm_handler = VMwareHandler() # Create vPDU instance. if pdu_device == "SENTRY": self.__vpdu_handler = vsentry.vSentry(oid_handler, vm_handler, p) else: self.__vpdu_handler = vipiapp.vIPIAppliance(oid_handler, vm_handler, p) # Create SNMP simulator service self.__snmp_sim_serv = SNMPSimService()
def read_password(pdu, port): try: password_file = config.get_conf_instance().password_file fd = open(password_file, 'r') while True: line = fd.readline() if not line: break # Ignore blank line if line == os.linesep: continue # Ignore comments which begins with "#" result_obj = re.search(r"^#.*", line) if result_obj: continue # The format should be: # <timestamp> <pdu number> <pdu port> <password> l = line.strip(os.linesep).split(':') try: lpdu = int(l[1]) lport = int(l[2]) except ValueError: logger.error("Converting int or float error from string.") return "" password = l[3] if lpdu == pdu and lport == port: fd.close() logger.info("Return password %s for PDU %d port %d" % (password, pdu, port)) return password fd.close() logger.error("Not found password for PDU %d port %d" % (pdu, port)) return "" except IOError as e: logger.error("Error in open password file.exception: {}".format(e)) return ""
# currency name cname = None explorer = None n = 0 with open(info_file) as fp: lines = fp.readlines() for line in lines: n += 1 sym = None name = None link = None try: sym, name, link = line.split(',') except: logger.error('failed to unpack line {} in {}: "{}"'.format(n, info_file, line.strip())) exit(0) if (sym.upper() == symbol.upper()): cname = name.strip() if len(link.strip()) > 0: explorer = link.strip() if args['name'] is not None: cname = args['name'][0] if cname is not None: logger.debug('NAME {}'.format(cname)) else: logger.critical('could not determine currency name for symbol {}'.format(symbol)) exit(1)
def onError(errorIqEntity, originalIqEntity): logger.error("Error updating Group picture")
def onRequestUploadError(self, jid, path, errorRequestUploadIqProtocolEntity, requestUploadIqProtocolEntity): logger.error("Request upload for file %s for %s failed" % (path, jid))
def onUploadError(self, filePath, jid, url): logger.error("Upload file %s to %s for %s failed!" % (filePath, url, jid))
def __call__(self): info("this is info slf4j") error("error=%s" % (grinder.processNumber)) warn("warn=%s" % (grinder.processNumber)) debug("debug=%s" % (grinder.processNumber))
if __name__ == '__main__': parser = argparse.ArgumentParser(description='Simply searches Google for the input terms.') parser.add_argument('search_term', nargs='?', help='The phrase to search for.') parser.add_argument('--file','-f', help='A file containing search terms on each line.') parser.add_argument('--restrict','-r', nargs='*',help='A list of terms to filter results by.') args = parser.parse_args() logger = common.logger.getLogger('simple_search','info') if args.restrict: gs = GoogleSearch(logger, restrict_to=args.restrict) else: gs = GoogleSearch(logger) if args.file: results = gs.search_all(args.file) if not results: print("No results.") exit() for term in results: print("\n\"{}\":".format(term)) for item in results[term]: print("\t{}".format(item)) elif args.search_term: results = gs.search(args.search_term) if not results: exit("No results.") for item in results: print(item) else: logger.error("No input search terms.")