def register_execution(self, seconds_from_now: str, start_at: str): start_time = compute_start_time(seconds_from_now, start_at) start_time_ms = start_time * 1000 for ID in self.active_workers_ids(): url = f"{self._workers[ID]['baseURL']}/worker/execution?start_time={start_time_ms}" try: rs = requests.post(url) logger.info(f"POST {url} -> {rs.status_code}") except RequestException as e: logger.error(e) self.stop() raise e self._sequence_scheduler.register_task(0, init_mixer) audio_file = self.sequence_file('ogg') self._sequence_scheduler.register_task( 1, lambda: load_audio_file(audio_file)) register_countdown( lambda delay_seconds, fun: self._sequence_scheduler.register_task( start_time_ms - PLAYBACK_LATENCY + (delay_seconds * 1000), fun)) self._sequence_scheduler.register_task( start_time_ms - PLAYBACK_LATENCY, play_music) super().register_tasks(start_time_ms) self._active_sequence['start_time'] = start_time
def run(self): ''' 启动slave ''' # register try: self.register("/modify_thread_num", self.modify_thread_num) except: pass # register to master try: if not self.register_in_master(): logger.error("Can't register to master.") except: pass # start the timer # 取消 http 心跳 # self.__timer.start() self.__timer_new.start() # start the workers self.__workers.start() # start the server self.__server.run()
def run(self): ''' 启动slave ''' # register try: self.register("/modify_thread_num", self.modify_thread_num) except: pass # register to master try: if not self.register_in_master(): logger.error("Can't register to master.") except: pass # start the timer self.__timer.start() # start the workers self.__workers.start() # start the server # if self.info.recv_real_time_request self.__server.run()
def manage_exception(e): """ 自定义处理异常信息的方式 :param e: `except Exception as e` 这类地方的异常信息 :return: pass """ logger.error(repr(e))
def __init__(self, symbol, qty, side, price=None, type=ORDER_TYPE_MARKET, time_in_force=ORDER_TIME_IN_FORCE_GTC, fraction_qty=False): self.symbol = symbol self.qty = float(qty) self.side = side self.price = price self.type = type self.time_in_force = time_in_force self.fraction_qty = fraction_qty if not fraction_qty: self.qty = round(self.qty, 0) if self.qty <= 0: logger.error( "Invalide order, GQOrder get <= 0 qty: {}".format(qty)) return if self.type == ORDER_TYPE_LIMIT: if self.price is None or self.price < 0: logger.error( "Invalide order, GQOrder get price < 0: {}".format(price)) return self.valid = True
def on_capture_success(task_id): logger.info("Callback triggered: Adding image record of capture.") try: res = celery_app.AsyncResult(task_id) data = res.get() res.forget() logger.info(f'data: {data}') image = Image(cell=data.get('cell'), path=data.get('path'), device=data.get('device'), created=data.get('created'), created_by=data.get('created_by'), label=data.get('label'), offset_x=data.get('offset_x'), offset_y=data.get('offset_y'), offset_z=data.get('offset_z'), pos_x=data.get('pos_x'), pos_y=data.get('pos_y'), pos_z=data.get('pos_z')) db.session.add(image) db.session.commit() return jsonify('Successfully registered image record'), 200 except Exception as e: logger.error(e) return jsonify('Failed to register image record'), 200
def generateQuery(subjectIdField, subjectType, starttime, ids): ''' 生成mongodb 查询需要的query条件 :param subjectIdField: 主体查询的相关字段 :param subjectType: 主体(商场/店铺) 内容 :param starttime: 起始时间 :param ids: id 集合 :return: mongodb 查询条件 ''' ## 如果redis 中存在这个商场或店铺的好评/差评数据,则增量处理。否则,全量处理 # key: 店铺标识 / 商场标识_评论类型_店铺 / 商场id query = {} key = const.REDIS_DIR + const.UNDER_LINE.join([str(subjectType), "?", "*"]) try: num = exists(key) except: num = 0 logger.error("redis 判断key:{0}是否存在出错".format(key),exc_info = 1) if num > 0: # 已经有数据存在,则增量处理 query = {"createTime": {"$ge": starttime }} else: ## 没有数据存在,则全量处理 , 默认处理时间为后1天 starttime = starttime + timedelta(days=1) query = {"createTime": {"$lt": starttime}} ## 支持批量查询多个商场/或者店铺的id, 防止一次性查询过多数据,导致程序奔溃。 if ids is not None and isinstance(ids, (list, set)): query[subjectIdField] = {"$in": ids} return query
def get_twitter_data(client, node: str, field: dict): for k, v in dict(field).items(): if not v: field.pop(k) encoded_field = parse.urlencode(field) url = base + node + '?' + encoded_field logger.info(f'request to "{url}"') logger.debug( f'request info : <base : "{base}"> <node: {node}> <encoded_field: {field}>' ) response, data = client.request(url) # type : dict, bytes logger.debug(f'response status : {response["status"]}') try: if response['status'] == '200': ret = json.loads(data.decode('utf-8')) # print(ret) return ret except Exception as e: print("ERR:", e) logger.error('cannot responsed from : "%s" ' % (response['status'], url)) logger.error('error message :', e) return None
def login_user(userid, password): logger.info("User Login") user_data = User.query.filter_by(userid=userid).first() if user_data is not None: if not user_data.check_password(password): logger.error("Authentication error: Wrong userid or password") return { 'message': 'Authentication error: Wrong userid or password', "authenticated": False }, 401 # Set JWT_ACCESS_TOKEN_EXPIRES to change token duration. access_token = create_access_token(identity=user_data) refresh_token = create_refresh_token(identity=user_data) logger.info("Access token created") logger.debug(f'access_token: {access_token}') resp = { 'login': True, 'msg': 'New login', 'access_token': access_token, #'refresh_token': refresh_token } _set_cookies(access_token, refresh_token) return resp, 200 else: logger.error("User Does Not Exist") return {'message': 'User Does Not Exist', "authenticated": False}, 401
def create_user(**kwargs): logger.info("User registration") now = datetime.utcnow() try: query = User.query.filter_by(userid=kwargs.get('userid')).first() if query is not None: logger.error("Userid already exists") return {'message': 'userid already exists'}, 409 comp_name = kwargs.get('company') if comp_name is None: return {'message': 'Company name is empty.'}, 400 kwargs.update({ 'company': Company.query.filter_by(name=comp_name).one(), 'created': now, 'last_edited': now, 'is_deleted': False }) user = User(**kwargs) user.hash_password() db.session.add(user) db.session.commit() logger.info('User registration successful') return {'message': 'User registration successful'}, 201 except NoResultFound as e: return {'message': 'Company doesn\'t exist.'}, 400 except Exception as e: logger.error(traceback.format_exc()) raise e
def getMysqlConnect(): ''' 获取mysql 的连接信息 :return: 与mysql 的连接 ''' try: configure = getConfig() host = configure['mysql']['host'] username = configure['mysql']['username'] password = configure['mysql']['password'] database = configure['mysql']['database'] port = configure.getint('mysql', 'port') charset = configure['mysql']['charset'] # connect_timeout = configure['mysql'].getint('connect_timeout') or 100 conn = pymysql.connect(host=host, user=username, passwd=password, db=database, port=port, charset=charset, autocommit=False, cursorclass=pymysql.cursors.Cursor) except: logger.error('connect mysql failure', exc_info=1) return conn
def offset_position(): logger.info('Update relative camera position') try: data = request.get_json() x = data.get('x') y = data.get('y') z = data.get('z') logger.info('offset: ' + str({"x": x, "y": y, "z": z})) resp = requests.get( f'http://{DEVICE_IP}/isp/appispmu.cgi?btOK=submit&i_mt_incx={x}&i_mt_incy={y}&i_mt_incz={z}', auth=HTTPDigestAuth(DEVICE_ID, DEVICE_PW)) logger.info(resp.text) if resp.status_code == 200: return { 'message': 'Successfully updated camera position.', 'result': { "x": x, "y": y, "z": z } }, 200 else: raise CGIException(resp) except Exception as e: logger.error(e) logger.debug(traceback.format_exc()) raise e
def create_device(**kwargs): logger.info('Register new device') now = datetime.utcnow() try: query = Device.query.filter_by(serial=kwargs.get('serial')).first() if query is not None: logger.error("Device already exists") return {'message': 'Device already exists'}, 409 company = db.session.query(Company).filter_by( name=kwargs.get('company')).one() owner = db.session.query(User).filter_by( userid=kwargs.get('owner')).one() device = Device(model=kwargs.get('model'), serial=kwargs.get('serial'), company=company, owner=owner, ip=kwargs.get('ip'), created=now, created_by=kwargs.get('created_by', current_user), last_edited=now, edited_by=kwargs.get('edited_by', current_user), is_deleted=False) db.session.add(device) db.session.commit() return { 'message': f'Posted device<{kwargs.get("serial")}> to db.' }, 201 except Exception as e: logger.error(e) logger.debug(traceback.format_exc()) raise e
def update_device(**kwargs): logger.info('Update existing device') now = datetime.utcnow() try: query = db.session.query(Device).filter_by( serial=kwargs.get('serial')).one() if kwargs.get('newserial'): query.serial = kwargs.get('newserial') if kwargs.get('model'): query.model = kwargs.get('model') if kwargs.get('company'): query.company = db.session.query(Company).filter_by( name=kwargs.get('company')).one() if kwargs.get('ip'): query.ip = kwargs.get('ip') if kwargs.get('owner'): query.owner = db.session.query(User).filter_by( userid=kwargs.get('owner')).one() query.edited = now query.edited_by = current_user db.session.commit() return {'message': f'Updated device<{query.serial}> from db.'}, 200 except Exception as e: logger.error(e) logger.debug(traceback.format_exc()) raise e
def outer(self, outer_step): try: path = self.__config.get(outer_step, 'path').strip() func = self.__config.get(outer_step, 'func').strip() params_keys = self.__config.get(outer_step, 'params').strip().split(' ') params = [] for key in params_keys: params.append(self.__params[key]) response = self.__config.get(outer_step, 'response') if path not in sys.path: sys.path.append(path) exec_func = func + ' as tmp_func' exec(exec_func) self.__params[response] = tmp_func(params) except Exception, e: logger.error('ConfigParser Error: ' + outer_step + ': exec outer func Error ' + str(e)) print 'ConfigParser Error: ' + outer_step + ': exec outer func Error ' + str( e) self.__error = CONFIGFILE_ERROR return False
def create(self, force_exit=True): payload = { 'name': self.name, 'numPartitions': self._num_partitions, 'replicationFactor': self._replication_factor, 'configs': { 'cleanup.policy': 'delete', 'delete.retention.ms': getenv('KAFKA_LOG_DELETE_RETENTION_MS'), # 1 minute 'max.message.bytes': 1000012, 'min.insync.replicas': '1', 'retention.bytes': getenv('KAFKA_RENTENTION_BYTES'), 'retention.ms': getenv('KAFKA_RETENTION_MS_LONG') if self._long_last else getenv('KAFKA_RETENTION_MS_SHORT'), }, } response = requests.put( join(self._base_url, 'kafka', self._cluster_id, 'topics?validate=false'), json=payload, ) if response.status_code >= 400: logger.error('Unable to create topic: {}'.format(self.name)) logger.error(response.text) exit(1) if force_exit else None
def _check_data(self, data_df): if data_df.empty: err = ValueError("get_prices return empty data") logger.error(err) raise err else: logger.debug("get number of data shape: " + str(data_df.shape))
def query(self, payload, force_exit=True, show_output=True): response = requests.post(self._ksql_endpoint, json=payload) if response.status_code >= 400: if show_output: logger.error('Unable to run statement: {}'.format(payload['ksql'])) logger.error(response.text) exit(1) if force_exit else None return response
def time_stamp(cls): try: # 格式化 time_format1 = '%Y%m%d%H%M%S' return time.strftime(time_format1) except Exception as e: logger.error(e) logger.exception(u"捕获到错误如下:")
def _create(self, payload, force_exit=True): response = requests.post(self._api_url, json=payload) if response.status_code >= 400: logger.error( 'Unable to create new connector {}. Please inspect from the logs' .format(self.name)) logger.error(response.text) exit(1) if force_exit else None
def s_color(path, params): logger.info('Change colormap of image') try: task_id = cv_task.send_color(path, **params) return task_id except Exception as e: logger.error(e) traceback.print_stack() traceback.print_exc()
def s_threshold(path, params): logger.info('Threshold image') try: task_id = cv_task.send_threshold(path, **params) return task_id except Exception as e: logger.error(e) traceback.print_stack() traceback.print_exc()
def s_normalize(path, params): logger.info('Normalize image') try: task_id = cv_task.send_normalize(path, **params) return task_id except Exception as e: logger.error(e) traceback.print_stack() traceback.print_exc()
def _delete(self, force_exit=True): if self._get(): response = requests.delete(join(self._api_url, self.name)) if response.status_code >= 400: logger.error( 'Unable to delete existing connector {}. Please manually delete it from the web UI.' .format(self.name)) logger.error(response.text) exit(1) if force_exit else None
def getMongoData(collection = None, date = None ,subjectIdField= None, ids = None): ''' 获取指定collection 的数据.可以一次性获取所有商场/店铺的当天(或指定时间内)的数据 为了防止数据量太大,也可以一次性的获取小批量数据 :param collection mongodb的表 :param subjectIdField 主体字段id :param date: 日期 :param ids list 或者set 类型,表示店铺或者商场的id 集合 :return: 查询结果,字典类型 key0: 商场id , key2(内层字典的key): neg(负面内容) ,pos(正面的内容),type(商场/店铺) ''' if collection is None : logger.error('参数不正确,没有指定要查询的表') return None starttime = parseDateParam(date) if starttime is None: return None ## 执行查询 conn = getMongoConn() if collection == 'Mall_Comment_Info': subjectIdField = "mallId" if subjectIdField is None else subjectIdField subjectType = const.MALL_COMMENT else : subjectIdField = "shopId" if subjectIdField is None else subjectIdField subjectType = const.SHOP_COMMENT ## 生成mongodb需要查询的query 参数 query = generateQuery(subjectIdField,subjectType,starttime,ids) logger.info('相关的查询条件为空,param: {0},{1},{2},{3}'.format(subjectIdField,subjectType,starttime,ids)) result = conn[collection].find(query,{subjectIdField,"commentLevel","content"}) commentMap = {} for res in result: if res.get(subjectIdField) is not None: subjectId = res.get(subjectIdField) if res.get('content') is not None: if res.get('commentLevel') < const.NEGATIVE_SCORE_LEVEL: if commentMap.get(subjectId) is not None and commentMap.get(subjectId).get('neg') is not None: commentMap.get(subjectId)["neg"].append(res.get('content')) else: content = {} content["neg"] = [res.get('content')] content["type"] = subjectType commentMap[subjectId] = content elif res.get('commentLevel') >= const.POSITIVE_SCORE_LEVEL: if commentMap.get(subjectId) is not None and commentMap.get(subjectId).get('pos') is not None: commentMap.get(subjectId)["pos"].append(res.get('content')) else: content = {} content["pos"] = [res.get('content')] content["type"] = subjectType commentMap[subjectId] = content return commentMap
def timelapse_start(): logger.info('Start timelapse') try: data = request.get_json() project = data.get('project') cell = data.get('cell') device = data.get('device') label = data.get('label') run_every = data.get('run_every') expire_at = data.get('expire_at') debug = data.get('debug') # skip integrity check if debugging if debug: kwargs = { 'header': 'test', 'run_every': run_every, 'expire_at': None, 'data': { 'cell': None, 'device': None, 'label': None } } else: pid = db.session.query(Project).filter_by(name=project).one() tid = db.session.query(Cell) \ .filter_by(project=pid.id) \ .filter_by(name=cell).one() did = db.session.query(Device).filter_by(serial=device).one() kwargs = { 'header': pid.shorthand, 'run_every': run_every, 'expire_at': expire_at, 'data': { 'cell': tid.id, 'device': did.id, 'label': label } } logger.info(kwargs) status, key = cam_task.send_start_timelapse(**kwargs) if status: return { 'message': f'Timelapse task for device {kwargs.get("data").get("device")} queued', 'key': key }, 200 else: raise Exception('Result false') except Exception as e: # TODO # 각 DB exception 에 따라 예외처리 세분화 logger.error(e) logger.debug(traceback.format_exc()) raise e
def stop(self): super().stop() for ID in self.active_workers_ids(): url = f"{self._workers[ID]['baseURL']}/worker/execution" try: rs = requests.delete(url) logger.info(f"DELETE {url} -> {rs.status_code}") except RequestException as e: logger.error(e)
def __init__(self, interface_code, data_dict, secretkey_value): try: # 生成时间戳 self.__time_stamp = TimeStamp.time_stamp() self.__data_dict = data_dict self.__secretkey_value = secretkey_value self.__interface_code = interface_code except Exception as e: logger.error(e) logger.exception(u"捕获到错误如下:")
def ytdyccrm_cancel_func(self): try: self.__data_dict["dt"] = self.__time_stamp sign_str = str(self.__data_dict["dt"]) + self.__data_dict["method"] + str(2.0) + self.__secretkey_value sign = Md5.md5(sign_str) self.__data_dict["sign"] = sign return self.__data_dict except Exception as e: logger.error(e) logger.exception(u"捕获到错误如下:")
def run(self): ''' :return: ''' if self.base_url is None: logger.error('没有指定书url') return self.get_url() for i in xrange(self.max_pool_size): self.pool.spawn(self.crawl_single_chapter) self.pool.join()
def delete_device(**kwargs): logger.info('Delete existing device') try: query = db.session.query(Device).filter_by(**kwargs).one() db.session.delete(query) db.session.commit() return {'message': f'Deleted device<{query.serial}> from db.'}, 200 except Exception as e: logger.error(e) logger.debug(traceback.format_exc()) raise e
def create_algorithm(self, alg_type, args): try: module_name = 'algorithm.' + alg_type + '.model' class_name = getattr(importlib.import_module(module_name), alg_type.upper()) return class_name(args) except Exception: logger.error( "use alg_type:<{}> parse algorithm class exception: {}".format( alg_type, traceback.format_exc())) return None
def read_device(**kwargs): logger.info('Get device list') logger.info(f'Filter: {kwargs}') try: condition = {k: v for k, v in kwargs.items() if v is not None} query = db.session.query(Device).filter_by(**condition).all() return query except Exception as e: logger.error(e) logger.debug(traceback.format_exc()) raise e
def processing(self): try: processing_dict = dict() # 字典生成key和value的对应关系,value为方法对象而不是执行方法 processing_dict["ytdyccrmcancel"] = self.ytdyccrm_cancel_func if self.__interface_code in processing_dict: return processing_dict[self.__interface_code]() else: return 0 except Exception as e: logger.error(e) logger.exception(u"捕获到错误如下:")
def urldecoded(self, data): try: data_list_first = data.split("&") data_list_second = list() for tmp in data_list_first: data_list_second.append(tmp.split("=")) result = dict() for i in data_list_second: result[i[0]] = i[1] return result except Exception as e: logger.error(e) logger.exception(u"捕获到错误如下:")
def execute_model_policy(instance, deleted): # Automatic dirtying walk_inv_deps(update_dep, instance) sender_name = instance.__class__.__name__ policy_name = 'model_policy_%s'%sender_name noargs = False if deleted: walk_inv_deps(delete_if_inactive, instance) else: try: policy_handler = getattr(model_policies, policy_name, None) logger.error("POLICY HANDLER: %s %s" % (policy_name, policy_handler)) if policy_handler is not None: policy_handler.handle(instance) except: logger.log_exc("Model Policy Error:") print "Policy Exceution Error" instance.policed=datetime.now() instance.save(update_fields=['policed'])
def run(self): ''' 启动slave ''' # register self.register("/modify_thread_num", self.modify_thread_num) # register to master if not self.register_in_master(): logger.error("Can't register to master.") return # start the timer self.__timer.start() # start the workers self.__workers.start() # start the server if self.info.recv_real_time_request: self.__server.run()
def execute_model_policy(instance, deleted): # Automatic dirtying if (instance in bad_instances): return # These are the models whose children get deleted when they are delete_policy_models = ['Slice','Sliver','Network'] sender_name = instance.__class__.__name__ policy_name = 'model_policy_%s'%sender_name noargs = False if (not deleted): walk_inv_deps(update_dep, instance) walk_deps(update_wp, instance) elif (sender_name in delete_policy_models): walk_inv_deps(delete_if_inactive, instance) try: policy_handler = getattr(model_policies, policy_name, None) logger.error("POLICY HANDLER: %s %s" % (policy_name, policy_handler)) if policy_handler is not None: if (deleted): try: policy_handler.handle_delete(instance) except AttributeError: pass else: policy_handler.handle(instance) except: logger.log_exc("Model Policy Error:") try: instance.policed=datetime.now() instance.save(update_fields=['policed']) except: logging.error('Object %r is defective'%instance) bad_instances.append(instance)
game_rounds = int(math.log(len(PLAYERS), 2)) # Allow the app to try 5 times before gracefully quiting with an error # message. tries = 1 for game_round in xrange(game_rounds): logger.info('%s Round: %s %s', '=' * 10, game_round, '=' * 10) try: logger.info("\t'populate.py' Try: %s", tries) sp = swissPairings() for pair in sp: winner_id = pair[0] loser_id = pair[2] reportMatch(winner_id, loser_id) except psycopg2.IntegrityError as e: logger.error(e) tries += 1 if tries > 5: msg = """ The app exceeded number of allowed tries (5). Please try again later. """ logger.info(msg) print msg sys.exit(1) msg = "All players matched successfully in %s attempts!" % tries logger.info(msg) print msg sys.exit(0)