def send_message_ws(userlist, message): for handler in WebsocketStatusHandler.socket_handlers: if handler.username in userlist: try: handler.write_message(message) except: logger.warn('Error sending message')
def bin_path(bin_file_name): path = os.path.join(os.getcwd(), 'bin', bin_file_name) if os.path.isfile(path): return path else: from log import logger logger.warn('binary path does not exist: %s' % path)
def _setup_classpath(self): logger.debug("Determining classpath to be used...") classpath = [] if self._run_from_source: logger.debug("Building classpath to run hybrid appcontainer from " "source.") classpath = self._setup_classpath_from_source() elif self.use_hybrid_appcontainer() and self.runtime_version < 5: logger.debug("Hybrid appcontainer from jars does not need a " "classpath.") self._appcontainer_jar = self._lookup_appcontainer_jar() elif not self._appcontainer_version or self.runtime_version >= 5: logger.debug("Building classpath to run appcontainer/runtime from " "jars.") classpath = self._setup_classpath_runtime_binary() classpath.extend(self._setup_classpath_model()) if 'extend_classpath' in self._conf['m2ee']: if isinstance(self._conf['m2ee']['extend_classpath'], list): classpath.extend(self._conf['m2ee']['extend_classpath']) else: logger.warn("extend_classpath option in m2ee section in " "configuration is not a list") self._classpath = ":".join(classpath) if self._classpath: logger.debug("Using classpath: %s" % self._classpath) else: logger.debug("No classpath will be used")
def get_java_cmd(self): """ Build complete JVM startup command line """ cmd = ['java'] if 'javaopts' in self._conf['m2ee']: if isinstance(self._conf['m2ee']['javaopts'], list): cmd.extend(self._conf['m2ee']['javaopts']) else: logger.warn("javaopts option in m2ee section in configuration " "is not a list") if self._classpath: cmd.extend([ '-cp', self._classpath, self._get_appcontainer_mainclass() ]) elif self._appcontainer_version: cmd.extend(['-jar', self._appcontainer_jar]) else: logger.critical("Unable to determine JVM startup parameters.") return None logger.trace("Command line to be used when starting the JVM: %s" % ' '.join(cmd)) return cmd
def showOpenFile(self): dlg = QFileDialog(self) dlg.setWindowTitle(self.tr("Open Video Files")) dlg.setWindowModality(Qt.WindowModal) mimeFilters = [ "video/mp4", "video/quicktime", "video/x-msvideo", "video/x-ms-wmv" ] globPatterns = [] db = QMimeDatabase() for m in mimeFilters: mimeType = db.mimeTypeForName(m) if not mimeType.isValid(): logger.warn("Invalid MIME type: {}".format(m)) continue globPatterns.extend(mimeType.globPatterns()) globText = ' '.join(globPatterns) logger.debug("Video glob patterns: {}".format(globText)) dlg.setNameFilters([ self.tr("Video Files ({})").format(globText), self.tr("All Files (*)") ]) dlg.setAcceptMode(QFileDialog.AcceptOpen) dlg.setOption(QFileDialog.ReadOnly, True) dlg.setOption(QFileDialog.DontUseCustomDirectoryIcons, True) dlg.setLabelText(QFileDialog.Accept, self.tr("Open Movie")) dlg.setFileMode(QFileDialog.ExistingFiles) if dlg.exec_(): filenames = dlg.selectedFiles() self.processVideoFiles(filenames)
def _createVersionLabels(self) -> List[Tuple[QWidget, QWidget]]: version_labels = [] # my_layout = QFormLayout(self) for d in AboutDialog._VERSION_LABELS: label_text = d['label'] if 'value' in d: value = d['value'] else: if 'file' not in d: logger.warn( f"No value or file for version label {label_text}") continue value = AboutDialog._readFile(d['file']) if not value: continue versionLabel = QLabel(label_text) versionLabel.setStyleSheet("font-size: 10pt") versionLabel.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum) versionValueLabel = self._createSelectableLabel(value) versionValueLabel.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum) versionValueLabel.setStyleSheet("font-size: 10pt") version_labels.append((versionLabel, versionValueLabel)) return version_labels
def _configure_logging(self): # try configure logging # catch: # - logsubscriber already exists -> ignore # (TODO:functions to restart logging when config is changed?) # - logging already started -> ignore logger.debug("Setting up logging...") logging_config = self.config.get_logging_config() if len(logging_config) == 0: logger.warn("No logging settings found, this is probably not what " "you want.") return for log_subscriber in logging_config: if log_subscriber["name"] != "*": m2eeresponse = self.client.create_log_subscriber( log_subscriber) result = m2eeresponse.get_result() if result == 3: # logsubscriber name exists pass elif result != 0: m2eeresponse.display_error() if "nodes" in log_subscriber: self.set_log_levels(log_subscriber["name"], log_subscriber["nodes"], force=True) self.client.start_logging()
def index(): taskform = TaskForm() if taskform.validate_on_submit(): description = None filename = None if taskform.description.data: description = taskform.description.data if taskform.image.data: filename = secure_filename(taskform.image.data.filename) # avoid overwriting existing files while path.exists(path.join(taskattachdir, filename)): fname, fext = path.splitext(filename) filename = '%s_%s' %(fname, fext) image_path = path.join(taskattachdir, filename) taskform.image.data.save(image_path) if AUTO_ROTATE: try: angle = fix_orientation(image_path, save_over=True) logger.info('image %s rotated by %s degrees' %(filename, angle)) except ValueError as e: logger.warn('image %s has no EXIF data: %s' %(filename, e)) store(description, filename) recent = readjson(taskjson) if recent is not None: recent = sorted(recent, key=lambda r: r['timestamp'], reverse=True) return render_template('main.html', title='Today I ...', taskform=taskform, recent=recent)
def get_java_cmd(self): """ Build complete JVM startup command line """ cmd = [] cmd.append(self._conf['m2ee'].get('javabin', 'java')) if 'javaopts' in self._conf['m2ee']: if isinstance(self._conf['m2ee']['javaopts'], list): cmd.extend(self._conf['m2ee']['javaopts']) else: logger.warn("javaopts option in m2ee section in configuration " "is not a list") if self.runtime_version >= 7: cmd.extend([ '-jar', os.path.join(self._runtime_path, 'runtime/launcher/runtimelauncher.jar'), self.get_app_base(), ]) elif self._classpath: cmd.extend(['-cp', self._classpath]) if self.runtime_version >= 5: cmd.append('-Dfelix.config.properties=file:%s' % self.get_felix_config_file()) cmd.append(self._get_appcontainer_mainclass()) elif self._appcontainer_version: cmd.extend(['-jar', self._appcontainer_jar]) else: logger.critical("Unable to determine JVM startup parameters.") return None return cmd
def get_json_arg(self, name=None, *args): """Returns the value of the argument with the given name, from JSON-formated body""" headers = self.request.headers if not ('Content-Type' in headers and 'application/json' in headers['Content-Type']): logger.warn('Content-Type is not JSON, ignored.') try: obj = json.loads(self.request.body) except ValueError: raise HTTPError(400, 'Request body is not JSON formatted!' ) if not name: return obj try: return obj[name] except KeyError: if len(args) > 0: return args[0] else: raise HTTPError(400, 'Missing argument [%s]!' % name )
def local_diff_server_time_microseconds(self): # 获取 本地时间 (减去) 京东服务器时间 的差值 min_diff = 1000000000000000 # 注意:abs比较,所以默认设置一个非常大的,不能设置为0 # 循环次数,为4-1,=3次 for sync_count in range(1, 4): # 多次获得差值,取最小值 try: jd_server_timestamp_13 = self.get_jd_server_timestamp_13() local_time_stamp_13_float = get_local_time_stamp_13_float() # 注意:本地时间 (减去) 京东服务器时间 diff_jd_server_time = local_time_stamp_13_float - jd_server_timestamp_13 # print(diff_jd_server_time) # 有点疑惑,为什么第一次的总是最快的//todo ????? logger.info("diff %s", diff_jd_server_time) if abs(diff_jd_server_time) < abs(min_diff): min_diff = diff_jd_server_time except Exception as e: # 如果出现异常,很可能说明结果已经不可信了。再次请求还是不可信,直接返回个默认的 min_diff = 0 logger.warn("获取京东时间异常 %s,直接认为0差距", e) return min_diff time.sleep(0.5) return min_diff
async def put(self, tid=None): """ params: exp reason """ ip = self.request.headers[ 'X-Forwarded-For'] if 'X-Forwarded-For' in self.request.headers else '0000' try: data = json.loads(self.request.body.decode()) except Exception as ex: logger.warn('{!r}'.format(ex)) self.send_error(400, reason='Invalid json format') return logger.info(f'Request: {data}') if tid is None: self.send_error(400, reason='Need tid') return # if 'exp' not in data or 'reason' not in data: # self.send_error(400, 'Missing key field') # return db = self.settings['db'] await db.test_results[tid].insert_one(data) self.finish()
def get(self): increment_hit_counter(datastore_key_hits_streams_json) # Get database db = memcache.get(memcache_key_database) if db is None: logger.warn( 'memcache failed on key: {}'.format(memcache_key_database)) db_json = ndb_get_entity(JsonDatabase, datastore_key_database).value db = utils.json_to_dict(db_json) memcache.set(memcache_key_database, db) # Get last update time last_update_time = memcache.get(memcache_key_last_update) if last_update_time is None: logger.warn( 'memcache failed on key: {}'.format(memcache_key_last_update)) last_update_time = ndb_get_entity(Time, datastore_key_last_update).value memcache.set(memcache_key_last_update, last_update_time) json_obj = {'streams': db, 'last_update': last_update_time} json_str = utils.dict_to_json(json_obj) self.response.headers['Content-Type'] = 'application/json' self.response.out.write(json_str)
def get_response(url: str, headers: dict, params={}, use_proxy=False): max_retry = 3 for i in range(max_retry + 1): try: if use_proxy: rs = requests.request("GET", url=url, params=params, headers=headers, timeout=15, proxies=PROXY) else: rs = requests.request( "GET", url=url, params=params, headers=headers, timeout=4, ) except Exception: logger.warn(f"url:{url}, 第 {i} 次请求失败,正在重试") else: if rs.status_code < 200 or rs.status_code > 400: logger.error(f"url:{url}, 请求返回值异常") raise NetworkStatusError else: return rs logger.error(f"url: {url}, 多次请求失败") raise NetworkStatusError
def connect(self): if self.openmv is None or not self.openmv.connected: logger.debug('try to connect') try: self.openmv = OpenMV() self.openmv.connect() if self.openmv.connected: self.ui.btn_connect.setText('DisConnect') self.ui.set_disable(False) logger.info('Connect OpenMV success from {}'.format(self.openmv.port)) logger.info('Firmware Version: {}'.format(self.openmv.fw_version)) if self.openmv.running: self.ui.btn_run_openmv.setText('Stop') self.fb_thread = FrameThread(self) self.fb_thread.signal.connect(self.update_fb) self.fb_thread.start() self.output_thread = OutputThread(self) self.output_thread.signal.connect(self.log_output) self.output_thread.start() return True else: self.ui.btn_connect.setText('Connect') self.openmv = None logger.error('connect failed') return False except Exception as e: self.ui.btn_connect.setText('Connect') self.openmv = None logger.error('connect failed: {}'.format(e)) return False else: logger.warn('openmv is connected') return True
def extract_id_atom(idstr): path = urlparse(idstr).path start = path.find("ModAction") if start < 0: # should never be True logger.warn("something is wrong with this Atom entry id" + idstr) return idstr return path[start:]
def get_java_cmd(self): """ Build complete JVM startup command line """ cmd = [] cmd.append(self._conf['m2ee'].get('javabin', 'java')) if 'javaopts' in self._conf['m2ee']: if isinstance(self._conf['m2ee']['javaopts'], list): cmd.extend(self._conf['m2ee']['javaopts']) else: logger.warn("javaopts option in m2ee section in configuration " "is not a list") if self._classpath: cmd.extend(['-cp', self._classpath]) if self.runtime_version >= 5: cmd.append('-Dfelix.config.properties=file:%s' % self.get_felix_config_file()) cmd.append(self._get_appcontainer_mainclass()) elif self._appcontainer_version: cmd.extend(['-jar', self._appcontainer_jar]) else: logger.critical("Unable to determine JVM startup parameters.") return None return cmd
def _check_appcontainer_config(self): # did we load any configuration at all? if not self._conf: logger.critical("No configuration present. Please put a m2ee.yaml " "configuration file at the default location " "~/.m2ee/m2ee.yaml or specify an alternate " "configuration file using the -c option.") sys.exit(1) # TODO: better exceptions self._run_from_source = self._conf.get( 'mxnode', {}).get('run_from_source', False) # mxnode if self._run_from_source: if not self._conf['mxnode'].get('source_workspace', None): logger.critical("Run from source was selected, but " "source_workspace is not specified!") sys.exit(1) if not self._conf['mxnode'].get('source_projects', None): logger.critical("Run from source was selected, but " "source_projects is not specified!") sys.exit(1) # m2ee for option in ['app_base', 'admin_port', 'admin_pass']: if not self._conf['m2ee'].get(option, None): logger.critical("Option %s in configuration section m2ee is " "not defined!" % option) sys.exit(1) # force admin_pass to a string, prevent TypeError when base64-ing it # before sending to m2ee api self._conf['m2ee']['admin_pass'] = str( self._conf['m2ee']['admin_pass']) # Mendix >= 4.3: admin and runtime port only bind to localhost by # default self._conf['m2ee']['admin_listen_addresses'] = ( self._conf['m2ee'].get('admin_listen_addresses', "")) self._conf['m2ee']['runtime_listen_addresses'] = ( self._conf['m2ee'].get('runtime_listen_addresses', "")) # check admin_pass 1 or password... refuse to accept when users don't # change default passwords if (self._conf['m2ee']['admin_pass'] == '1' or self._conf['m2ee']['admin_pass'] == 'password'): logger.critical("Using admin_pass '1' or 'password' is not " "allowed. Please put a long, random password into " "the admin_pass configuration option. At least " "change the default!") sys.exit(1) # database_dump_path if 'database_dump_path' not in self._conf['m2ee']: self._conf['m2ee']['database_dump_path'] = os.path.join( self._conf['m2ee']['app_base'], 'data', 'database') if not os.path.isdir(self._conf['m2ee']['database_dump_path']): logger.warn("Database dump path %s is not a directory" % self._conf['m2ee']['database_dump_path'])
def get_pg_environment(self): if not self.is_using_postgresql(): logger.warn("Only PostgreSQL databases are supported right now.") # rip additional :port from hostName, but allow occurrence of plain # ipv6 address between []-brackets (simply assume [ipv6::] when ']' is # found in string (also see JDBCDataStoreConfiguration in MxRuntime) host = self._conf['mxruntime']['DatabaseHost'] port = "5432" ipv6end = host.rfind(']') lastcolon = host.rfind(':') if ipv6end != -1 and lastcolon > ipv6end: # "]" found and ":" exists after the "]" port = host[lastcolon + 1:] host = host[1:ipv6end] elif ipv6end != -1: # "]" found but no ":" exists after the "]" host = host[1:ipv6end] elif ipv6end == -1 and lastcolon != -1: # no "]" found and ":" exists, simply split on ":" port = host[lastcolon + 1:] host = host[:lastcolon] # TODO: sanity checks pg_env = { 'PGHOST': host, 'PGPORT': port, 'PGUSER': self._conf['mxruntime']['DatabaseUserName'], 'PGPASSWORD': self._conf['mxruntime']['DatabasePassword'], 'PGDATABASE': self._conf['mxruntime']['DatabaseName'], } logger.trace("PostgreSQL environment variables: %s" % str(pg_env)) return pg_env
def invoke_sources(self): """ Execute each source class generating domains. """ for klass in self.sources: count = 0 source = None if 'config' in inspect.getargspec(klass.__init__).args: source = klass(self.config) else: source = klass() try: for entry in source.domains: labels = tldextract.extract(entry['domain']) if all(labels[1:]) and not any( [c for c in entry['domain'] if ord(c) > 128]): count += 1 yield entry, source.name else: logger.warn("invalid domain: %s from %s" % (entry['domain'], source.name)) logger.info("aggregated %d domains from %s." % (count, source.name or '')) except urllib2.URLError, ex: logger.error("failed to retrieve domain blacklist - %s" % (source.name, ))
def callback(ch, method, properties, body): try: f1 = build_fact(e1, body) #decoded = json.loads(body) # f1 = e1.Assert("(ServerFact \"" + str(decoded[SERVERID]) + "\" " + str(decoded['cpu']) # + " " + str(decoded['mem']) + " " + str(decoded['hdd']) + " " + str(decoded['net']) # + ")") logger.info("received fact: %s" % body) get_rules_from_db(tenantId) saveout = sys.stdout fsock = open(LOGGING_PATH + '/CLIPSout.log', 'w') sys.stdout = fsock e1.PrintFacts() e1.PrintRules() e1.Run() sys.stdout = saveout fsock.close() f1.Retract() except ValueError: logger.error("receiving an invalid body: " + body) except clips.ClipsError: logger.error(clips.ErrorStream.Read()) except Exception as ex: logger.warn("FACT: already exists or " + ex.message)
def fix_mxclientsystem_symlink(self): # check mxclientsystem symlink and refresh if necessary if self.config.get_symlink_mxclientsystem(): mxclient_symlink = os.path.join( self.config.get_public_webroot_path(), 'mxclientsystem') real_mxclient_location = self.config.get_real_mxclientsystem_path() if os.path.islink(mxclient_symlink): current_real_mxclient_location = os.path.realpath( mxclient_symlink) if current_real_mxclient_location != real_mxclient_location: logger.debug("mxclientsystem symlink exists, but points " "to %s" % current_real_mxclient_location) logger.debug("redirecting symlink to %s" % real_mxclient_location) os.unlink(mxclient_symlink) os.symlink(real_mxclient_location, mxclient_symlink) elif not os.path.exists(mxclient_symlink): logger.debug("creating mxclientsystem symlink pointing to %s" % real_mxclient_location) try: os.symlink(real_mxclient_location, mxclient_symlink) except OSError, e: logger.error("creating symlink failed: %s" % e) else: logger.warn("Not touching mxclientsystem symlink: file exists " "and is not a symlink")
async def get(self, tid=None): if tid is None: logger.warn('Need a test id') self.send_error(500, reason='Need a test id') return self.render('index.html')
def _open(self): try: self.fp = open(self.name, 'r') self.fp.seek(0, 2) self.ino = os.fstat(self.fp.fileno()).st_ino except IOError, e: logger.warn('open local file [%s] failed', self.name)
def _lookup_runtime_version(self): # force to a specific version if self._conf['m2ee'].get('runtime_version', None): return self._conf['m2ee']['runtime_version'] # 3.0 has runtime version in metadata.json if 'RuntimeVersion' in self._model_metadata: return self._model_metadata['RuntimeVersion'] # else, 2.5: try to read from model.mdp using sqlite model_mdp = os.path.join( self._conf['m2ee']['app_base'], 'model', 'model.mdp' ) if not os.path.isfile(model_mdp): logger.warn("%s is not a file!" % model_mdp) return None version = None try: conn = sqlite3.connect(model_mdp) c = conn.cursor() c.execute('SELECT _ProductVersion FROM _MetaData LIMIT 1;') version = c.fetchone()[0] c.close() conn.close() except sqlite3.Error, e: logger.error("An error occured while trying to read mendix " "version number from model.mdp: %s" % e) return None
def rsync_fb_conf(): logger.info('Sync conf...') cluster_id = config.get_cur_cluster_id() if not validate_id(cluster_id): logger.warn('Invalid cluster id: {}'.format(cluster_id)) return cluster_list = get_cluster_list() if cluster_id not in cluster_list: logger.warn('Cluster not exist: {}'.format(cluster_id)) return my_address = config.get_local_ip_list() path_of_fb = config.get_path_of_fb(cluster_id) props_path = path_of_fb['redis_properties'] key = 'sr2_redis_master_hosts' nodes = config.get_props(props_path, key, []) meta = [['HOST', 'RESULT']] path_of_fb = config.get_path_of_fb(cluster_id) conf_path = path_of_fb['conf_path'] cluster_path = path_of_fb['cluster_path'] for node in nodes: if net.get_ip(node) in my_address: meta.append([node, color.green('OK')]) continue client = net.get_ssh(node) if not client: meta.append([node, color.red('SSH ERROR')]) continue if not net.is_dir(client, cluster_path): meta.append([node, color.red('NO CLUSTER')]) continue net.copy_dir_to_remote(client, conf_path, conf_path) meta.append([node, color.green('OK')]) utils.print_table(meta)
def insert_source(self, path): source_element = ET.SubElement(self.tree, 'source') if self.disk_type == "file": source_element.attrib['file'] = path pass else: logger.warn("disk_type is invalid") return None
def requests(url, method='GET', body=None): try: request = urllib2.Request(url, body) request.get_method = lambda: method response = urllib2.urlopen(request, timeout=3) except urllib2.HTTPError, e: from log import logger logger.warn('Push HTTPError: %s %s ' % e.code, e.reason)
def get_xmpp_credentials(self): if 'xmpp' in self._conf['m2ee']: if isinstance(self._conf['m2ee']['xmpp'], dict): return self._conf['m2ee']['xmpp'] else: logger.warn("xmpp option in m2ee section in configuration is " "not a dictionary") return None
def _safe_put(self, q): try: self.queue.put(q, True, self.PUT_TIMEOUT) self.total += 1 except Queue.Full: logger.warn('put queue full') return False return True
def get_canonical_name(f): # Remove all non-printable characters f = filter(lambda x: x in string.printable, f) if f in translate_dict: return translate_dict[f] else: logger.warn("Unmapped field: \"{0}\"".format(f.encode("utf8"))) return [f, "Unknown"]
def schedule_notification(message): global notif_queue try: logger.info("Put notification into queue, current notif count: " + str(notif_queue.qsize())) notif_queue.put_nowait(message) except QueueFull: logger.warn("Cannot schedule more than " + notif_queue.maxsize + "notifications. Queue is full!")
def processVideoFiles(self, filePaths: List[str]): # TODO: Handle multiple files filePath = filePaths[0] logger.warn(f"Multiple files not supported, only using the first one: '{filePath}'") logger.debug(f"_processFile(): Running worker for filePath '{filePath}'") self._schedule("Hash", partial(self._subService.calculate_hash, filePath), onSuccess=partial(self._onHashCalculated, filePath), onError=self._errorHandler)
def _do_parse(parse_fn, line): secs = line.split() if len(secs) < 2: # at least date-time & query string logger.warn('invalid log: %s', line) return None parse_fn = parse_fn or _default_parse tm, qs = parse_fn(line) if tm == 0: return None return Query(tm, qs)
def backup_database(backup_key): db = memcache.get(memcache_key_database) if db is None: logger.warn('memcache failed on key: {}'.format(memcache_key_database)) db_json = ndb_get_entity(JsonDatabase, datastore_key_database).value db = utils.json_to_dict(db_json) memcache.set(memcache_key_database, db) db_json = utils.dict_to_json(db) logger.info('Backup database to key: {}'.format(backup_key)) ndb_set_value(JsonDatabase, backup_key, db_json)
def _check_timeout(self): now = int(time.time() * 1000) to_kill = [] for task in self.tasks.greenlets: if task.tm + 2 * 1000 * self.timeout < now: logger.warn('timeout task found %d ms', now - task.tm) to_kill.append(task) # target greenlet will receive exception for task in to_kill: self.tasks.killone(task, block = False)
def _restoreWindowSettings(self): settings = Settings() geometryValue: QByteArray = settings.get(Settings.WINDOW_GEOMETRY) stateValue: QByteArray = settings.get(Settings.WINDOW_STATE) try: self.restoreGeometry(geometryValue.data()) self.restoreState(stateValue) except Exception as e: logger.warn(f"Error restoring window geometry or state: {e}") return
def _read_pidfile(self): pidfile = self._config.get_pidfile() try: pf = file(pidfile, 'r') self._pid = int(pf.read().strip()) pf.close() except IOError, e: if e.errno != errno.ENOENT: logger.warn("Cannot read pidfile: %s" % e) self._pid = None
def get_http_delay_microseconds(self): url = 'https://a.jd.com//ajax/queryServerData.html' http_delay_microseconds = 50 headers = { 'User-Agent': self.user_agent } try: http_delay_microseconds = requests.get(url, headers=headers).elapsed.microseconds / 1000 except Exception as e: logger.warn("测试网络延迟 异常,返回默认延迟 %s %s", http_delay_microseconds, e) return http_delay_microseconds
def use(self, cluster_id): """Change selected cluster :param cluster_id: target cluster # """ success, message = _change_cluster(cluster_id) if success: cluster_id = '-' if cluster_id == -1 else cluster_id self._print('Cluster %s selected.' % cluster_id) else: logger.warn('Fail: %s' % message)
def _do_get(self, url, cb): try: resp = self.impl.get(url, headers = self.headers) cb(resp) except socket.timeout: cb(None) except socket.error: cb(None) except Exception, e: logger.warn('http exception: %r', e) cb(None)
async def get(self, tid=None): if tid is None: logger.warn('Must has a specific id') self.send_error(400, reason='Must has a specific id') return db = self.settings['db'] testsuite = db.testsuites[tid] self.render('config/abx_config_template.js', testsuite_name=testsuite.name, tests=testsuite.tests)
def stop(self, timeout=10): if self.runner.check_pid(): logger.info("Waiting for the application to shutdown...") stopped = self.runner.stop(timeout) if stopped: logger.info("The application has been stopped successfully.") return True logger.warn("The application did not shutdown by itself...") return False else: self.runner.cleanup_pid() return True
def get_manifest(app_dir): manifest = read_manifest(app_dir).replace("\n", "") try: logger.info("[*]Parsing AndroidManifest.xml...") parsed_manifest = minidom.parseString(manifest) except Exception as e: logger.error("[*] Parsing AndroidManifest.xml - " + str(e)) parsed_manifest = minidom.parseString( r'<?xml version="1.0" encoding="utf-8"?><manifest xmlns:android="http://schemas.android.com/apk/res/android" android:versionCode="Failed" android:versionName="Failed" package="Failed" platformBuildVersionCode="Failed" platformBuildVersionName="Failed XML Parsing" ></manifest>' ) logger.warn("[*] Using fake XML to continue the analysis...") return parsed_manifest
def main(self): rc = 0 try: while not self.exit_cond.wait(.1): time.sleep(.1) except KeyboardInterrupt as e: logger.warn('App: KeyboardInterrupt') rc = -1 self.on_exit() logger.info('App: exited with {}'.format(rc)) return rc
def append(self, arg): if self.players.has_key(arg['name']): logger.debug('player %s exists', arg['name']) return False try: player = self._create_one(arg, self.global_conf) player.start() self.players[player.name] = player self.confs.append(arg) except ImportError, e: logger.warn('create player failed of invalid loader: %r', e) return False
def terminate(self, timeout=10): if self.runner.check_pid(): logger.info("Waiting for the JVM process to disappear...") stopped = self.runner.terminate(timeout) if stopped: logger.info("The JVM process has been stopped.") return True logger.warn("The application process seems not to respond to any " "command or signal.") return False else: self.runner.cleanup_pid() return True
def get(self): getjson = self.request.body try: getset = json.loads(getjson) aid = getset['aid'] wsc = getset['wsc'] if wsc == 1: db = self.application.db init_a(db, getset['data'], aid) else: from options import areas_time areas_time[aid] = time.time() except Exception, e: back = json.dumps({"qsc": 0}) self.write(back) logger.warn(e)
def query_ip(ip): payload = {"ip":ip} resultL = [] while True: try: payload.pop("format", 0) result = requests.get(base_taobao_url, params=payload, timeout=5) if result.status_code == 200: json = result.json() resultL.append(json["data"]) break else: logger.warn("request ip.taobao error, maybe too many requests, let's wait a while") sleep(1) except Exception, e: logger.error("request taobao exception: " + str(e.message))
def setUp(self): self.tempdir = mkdtemp() self.ok = False passwd = os.environ.get('PASSWD', '') if not passwd: logger.warn('no password in $PASSWD') self.ftp = ftpUploader('lisa1', getuser(), passwd) if not self.ftp.ok: return self.ok = True #self.ftp = ftpUploader('snoball.corp.adobe.com', getuser(), passwd) self.ftp.infos() self.pwd = self.ftp.pwd() # create remote dummy dir self.remoteDir = basename(self.tempdir) self.ftp.mkd(self.remoteDir)