async def main(): import engine.plugins streamers = config['streamers'] streamer_url = {k: v['url'] for k, v in streamers.items()} inverted_index = invert_dict(streamer_url) urls = list(inverted_index.keys()) url_status = dict.fromkeys(inverted_index, 0) checker = Plugin(engine.plugins).sorted_checker(urls) # 初始化事件管理器 event_manager.context = { **config, 'urls': urls, 'url_status': url_status, 'checker': checker, 'inverted_index': inverted_index, 'streamer_url': streamer_url } from engine.handler import CHECK_UPLOAD, CHECK event_manager.start() async def check_timer(): event_manager.send_event(Event(CHECK_UPLOAD)) for k in checker.keys(): event_manager.send_event(Event(CHECK, (k, ))) # 初始化定时器 timer = Timer(func=check_timer, interval=40) # 模块更新自动重启 detector = AutoReload(event_manager, timer, interval=15) await asyncio.gather(detector.astart(), timer.astart(), return_exceptions=True)
def __init__(self,offset=30): self.stamp_list = {} self.access_store = {} self.__stamp_lock = threading.Lock() self.time_offset = offset self.load_access_info() guard = Timer(5,self,"AuthenDriver") guard.start()
def __init__(self): ''' Constructor ''' self.host = LinuxHost() self.timer = Timer(1, self.host, 'host time') self.timer.start() self.net_stats = []
def main(event_manager): # 初始化定时器 timer = Timer(func=check_timer, args=(event_manager, ), interval=40) # 模块更新自动重启 autoreload(event_manager, timer, interval=15) event_manager.start() timer.start()
def main(event_manager): # 初始化定时器 timer = Timer(func=event_manager.send_event, args=(Event(CHECK),), interval=40) # 模块更新自动重启 autoreload(event_manager, timer, interval=15) event_manager.start() timer.start()
def fetch_player_ladders(self, region, player_path, timeout=60): """ Fetch player ladders from blizzard api. :return: <status code, ApiPlayerLadders or None, fetch time, fetch duration> """ url_prefix = self.REGION_URL_PREFIXES_1[region] url = '%s%sladders' % (url_prefix, player_path) timer = Timer() status, data = self.http_get_json(url, timeout, API_KEY_AUTH) return PlayerLaddersResponse(status, ApiPlayerLadders(data, url), utcnow(), timer.end())
def fetch_current_season(self, region, timeout=60): """ Fetch current season information. :return: <status code, ApiSeasonInfo or None, fetch time, fetch duration> """ url_prefix = self.REGION_URL_PREFIXES_2[region] url = '%s/season/current' % url_prefix timer = Timer() status, data = self.http_get_json(url, timeout, ACCESS_TOKEN_AUTH) return SeasonResponse(status, ApiSeason(data, url), utcnow(), timer.end())
def fetch_ladder(self, region, bid, timeout=60): """ Fetch ladder from blizzard api. :return: <status code, ApiLadder or None, fetch time, fetch duration> """ url_prefix = self.REGION_URL_PREFIXES[region] url = f"{url_prefix}/data/sc2/ladder/{bid}" timer = Timer() status, data = self.http_get_json(url, timeout, ACCESS_TOKEN_AUTH) al = ApiLadder(data, url) return LadderResponse(status, al, utcnow(), timer.end())
def run(self): accesses = get_random_accesses(NumCalls) timer = Timer() timer.start() self.bulk_op = self.db.bench.initialize_unordered_bulk_op() for i in accesses: self.do_bench_call(i) self.bulk_op.execute({'w' : self.writes }) return timer.stop()
def prune(checkpoint, save, sd_key, bs=8, topk=4, optimize_transposed=False, include=None, exclude=None, n_workers=None, debug_key=None): with Timer() as t: sd = load_sd_from_checkpoint(checkpoint, sd_key) print('Loading checkpoint, elapsed={}'.format(t.total())) save = checkpoint + '.pruned' if save is None else save shutil.copyfile(checkpoint, save) prune_method = PruningMethodTransposableBlockL1(block_size=bs, topk=topk, optimize_transposed=optimize_transposed, n_workers=n_workers, with_tqdm=True) keys = [k for k in sd.keys() if sd[k].dim() > 1 and 'bias' not in k and 'running' not in k] if include: invalid_keys = [k for k in include if k not in keys] assert not invalid_keys, 'Requested params to include={} not in model'.format(invalid_keys) print('Including {}'.format(exclude)) keys = include if exclude: invalid_keys = [k for k in exclude if k not in keys] assert not invalid_keys, 'Requested params to exclude={} not in model'.format(invalid_keys) print('Excluding {}'.format(exclude)) keys = [k for k in keys if k not in exclude] del sd with Timer() as t: for key in keys: v = load_var_from_checkpoint(checkpoint, key, sd_key) print('Pruning ' + key) prune_weight_mask = prune_method.compute_mask(v, torch_ones_like(v)) save_var_to_checkpoint(save, key, prune_weight_mask, sd_key) print('Total elapsed time: {}'.format(t.total())) if debug_key: bs = bs sd = load_sd_from_checkpoint(save, sd_key) v = sd[debug_key] # print first block permuted_mask = permute_to_nhwc(v) permuted_mask = pad_inner_dims(permuted_mask, bs * bs) permuted_mask = permuted_mask.reshape(-1, (bs * bs)) print('first block=\n{}'.format(permuted_mask.numpy()[0, :].reshape(1, -1, bs, bs)))
def get_mask(self, t): self.mp_tensor = t self.mp_mask = torch.zeros_like(t) co, inners = t.shape n_blocks = inners // (self.bs ** 2) if self.RUN_SPEED_TEST: self.RUN_SPEED_TEST = False with Timer() as t: self.get_mask_iter(0) elapsed = t.total().total_seconds() print('Single core speed test: blocks={} secs={} block-time={}'.format(n_blocks, elapsed, elapsed/n_blocks)) p = Pool(self.n_workers) n_iterations = co bar = tqdm(total=n_iterations, ncols=80) if self.with_tqdm else None bar.set_postfix_str('n_processes={}, blocks/iter={}'.format(p._processes, n_blocks)) if self.with_tqdm else None block_indexes = range(co) for _ in p.imap_unordered(self.get_mask_iter, block_indexes): bar.update(1) if self.with_tqdm else None bar.close() if self.with_tqdm else None p.close() return self.mp_mask
def recv_obj(sock: socket, timeout: Union[Timer, float]) -> object: """Read a length-delimited serialized object from the socket, or raise socket.error if the timeout is exceeded""" timer = timeout if isinstance(timeout, Timer) else Timer(timeout) len_data = recv_len(sock, 4, timer) length, = struct.unpack('!I', len_data) obj_data = recv_len(sock, length, timer) return pickle.loads(obj_data)
def run(self): pendingOps = [] accesses = get_random_accesses(NumCalls) timer = Timer() timer.start() for i in accesses: p = self.do_bench_call(i) pendingOps.append(p) while len(pendingOps) > 0: pendingOps[0].wait() pendingOps.pop(0) return timer.stop()
def __init__(self): #初始化信息 self.userName = global_config.get('config', 'userName') self.eomsHost = global_config.get('config', 'eomsHost') if self.userName: self.nick_name = self.userName else: self.nick_name = 'anonymous' #初始化变量 self.spider_session = RequestSession() self.spider_session.load_cookies_from_local( self.nick_name) #如果本地有cookies,则直接加载 self.Userlogin = UserLogin(self.spider_session) self.session = self.spider_session.get_session() self.user_agent = self.spider_session.user_agent self.timer = Timer()
def recv_len(sock: socket, length: int, timeout: Union[Timer, float]) -> bytes: """Read an exact number of bytes from the socket, or raise socket.error if the timeout is exceeded""" timer = timeout if isinstance(timeout, Timer) else Timer(timeout) buffer = bytearray() while len(buffer) < length and not timer.is_expired(): if recv_avail(sock, timer.get_remaining_time()): buffer += sock.recv(length - len(buffer)) if len(buffer) != length: raise socket.error('timed out') return buffer
def fetch_ladder(self, season_id, region, bid, timeout=60): """ Fetch ladder from blizzard api. :return: <status code, ApiLadder or None, fetch time, fetch duration> """ if season_id >= 28: url_prefix = self.REGION_URL_PREFIXES_2[region] url = "%s/ladder/%d" % (url_prefix, bid) timer = Timer() status, data = self.http_get_json(url, timeout, ACCESS_TOKEN_AUTH) else: url_prefix = self.REGION_URL_PREFIXES_1[region] url = "%s/ladder/%d" % (url_prefix, bid) timer = Timer() status, data = self.http_get_json(url, timeout, API_KEY_AUTH) al = ApiLadder(data, url) return LadderResponse(status, al, utcnow(), timer.end())
def connect_and_run(self) -> None: """Connect to the server and run the client""" logging.info('Connecting to server: {}:{}', self.host, self.port) self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.SOCKET_TIMEOUT) try: self.sock.connect((self.host, self.port)) except socket.error as err: logging.error('Unable to connect: {} (retrying in {}s)', err, self.RECONNECT_DELAY) return logging.info("Connected to server") # Initialize a timer to periodically send a SystemInfoMessage send_system_info_timer = Timer(self.SYSTEM_INFO_INTERVAL, start_expired=True) try: # Inform the server of the current state of the Arduino connection send_obj(self.sock, ArduinoConnectionMessage(self.arduino.is_connected())) while True: # Send a SystemInfoMessage if send_stats_interval has elapsed if send_system_info_timer.is_expired(): send_obj(self.sock, get_system_info_message()) send_system_info_timer.restart() # Receive and handle a command command = recv_obj(self.sock, self.SOCKET_TIMEOUT) self.handle_command(command) except socket.error as err: logging.error('Connection closed: {} (reconnecting in {}s)', err, self.RECONNECT_DELAY) finally: self.sock.close() # If the Arduino is connected, try and stop the motors if self.arduino.is_connected(): try: self.arduino.write_speeds(None) except serial.SerialException: pass # Stop the currently playing sound, if any self.sound_player.stop()
def main(args): torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) if args.cpu: logger.critical('use CPU !') args.cuda = False elif args.cuda: logger.critical("use CUDA") torch.cuda.manual_seed(args.seed) args = vars(args) logger.info('model args:') for k, v in args.items(): logger.info(f'--{k}: {v}') logger.critical(f"Running parser in {args['mode']} mode") if args['mode'] == 'train': with Timer('train time:'): train(args) else: with Timer('predict time:'): evaluate(args)
def fetch_league(self, region, season_id, version, mode, league, timeout=60): """ Fetch league information. :return: <status code, ApiLeagueInfo or None, fetch time, fetch duration> """ url_prefix = self.REGION_URL_PREFIXES[region] queue_id = self.QUEUE_ID_MAJOR[version] + self.QUEUE_ID_MINOR[mode] team_type = self.TEAM_TYPE[mode] url = f'{url_prefix}/data/sc2/league/{season_id}/{queue_id}/{team_type}/{league}' bid = league + team_type * 10 + queue_id * 100 + season_id * 100000 timer = Timer() status, data = self.http_get_json(url, timeout, ACCESS_TOKEN_AUTH) return LeagueResponse(status, ApiLeague(data, url, bid), utcnow(), timer.end())
def __init__(self): resource.Resource.__init__(self) # 60s self.elastic_timer = Timer(60, ElasticMgr(), 'elastic check') self.elastic_timer.start() # 60s self.monitor_timer = Timer(60, Monitor(), 'node_monitor') self.monitor_timer.start() # 60s self.syndata_timer = Timer(60, SynData(), 'syndata') self.syndata_timer.start() # 20s self.ufleetmonitor_time = Timer(20, UfleetMonitor(), 'ufleetmonitor') self.ufleetmonitor_time.start()
class ExplorationFirstRoundStateWithTimer(ExplorationFirstRoundState): _timer = None # timer for sending extra command in case map update not sent back in time _MAP_UPDATE_TIME_LIMIT = 1E6 def __init__(self,*args,**kwargs): super(ExplorationFirstRoundStateWithTimer,self).__init__(*args,**kwargs) self._timer = Timer(limit=self._MAP_UPDATE_TIME_LIMIT,end_callback=self.ask_for_map_update) #self._machine.add_mapupdate_listener(self._timer) def post_process(self,label,msg): cmd_ls,data_ls = super(ExplorationFirstRoundStateWithTimer,self).post_process(label,msg) if (cmd_ls): # get new command, stop the previous timer and start a new timer self._timer.shutdown() debug("Get new command, try to start timer",DEBUG_STATES) self._timer.start() return cmd_ls,data_ls def ask_for_map_update(self): self._machine.send_command(PMessage.M_GET_SENSOR) def trigger_end_exploration(self): self._timer.shutdown() super(ExplorationFirstRoundStateWithTimer,self).trigger_end_exploration()
class HostMgr(object): ''' # 实现主机信息管理 ''' def __init__(self): ''' Constructor ''' self.host = LinuxHost() self.timer = Timer(1, self.host, 'host time') self.timer.start() self.net_stats = [] # cli = DockerClient.instance() # if cli: # self.container_net_stats = ContainerNetStats(cli, self.net_stats) # self.container_net_stats.start() # @ring0 # def info(self): # rlt = RepositoryDBImpl.instance().exec_db_script('overview') # if not rlt.success: # Log(1, 'namespaces.read_record_list fail,as[%s]' % (rlt.message)) # # rlt.content.update(self.host.get_host_info()) # return rlt @ring5 @ring3 @ring0 @list_route(methods=['GET']) def hostinfo(self, **kwargs): return Result(self.host.get_host_info()) @ring0 @list_route(methods=['GET']) def netifs(self, **kwargs): iface = kwargs.get('iface', '') if iface: # return Result(self.host.get_network_data(iface)) return Result(self.net_stats) else: return Result(self.host.find_all_Ethernet_interface()) @ring0 def ifacelist(self): """ netifs """ return Result(self.host.find_all_Ethernet_interface()) @ring0 @list_route(methods=['GET']) def netstat(self, **kwargs): iface = kwargs.get('iface', '') return Result(self.host.get_network_data(iface)) @ring0 def exportLogs(self): file_name = time.strftime("Log_%Y%m%d.tar.gz", time.localtime()) wwwroot = ConfigMgr.instance().get_www_root_path() fullpath = os.path.join(wwwroot, file_name) if os.path.exists(fullpath): os.remove(fullpath) self.create_tar('Trace', fullpath) return Result(file_name) # @ring0 # def exportData(self): # file_name = time.strftime("Data_%Y%m%d.tar.gz", time.localtime()) # wwwroot = ConfigMgr.instance().get_www_root_path() # fullpath = os.path.join(wwwroot, file_name) # if os.path.exists(fullpath): # os.remove(fullpath) # # ExportAllData('ApphouseData') # # self.create_tar('ApphouseData', fullpath) # return Result(file_name) # @ring0 # def importData(self, post_data, **args): # f = FormData(post_data) # filepath = f.save_tar_file('_tmp') # if not filepath: # return Result('', UPLOAD_DATA_FILE_FAIL_ERR, 'save tar file fail.') # # if self.extract_files(filepath, './_tmp'): # return ImportData('_tmp/ApphouseData') # return Result('', EXTRACT_DATA_FILE_FAIL_ERR, 'extract file fail.') # @ring0 # def backups(self): # db = CommonDB('', []) # arr = db.get_back_up_db() # return Result(arr) # # @ring0 # def restore_backup(self, backup_name): # db = CommonDB('', []) # return db.restore_backup(backup_name) # # @ring0 # def delete_backup(self, backup_name): # db = CommonDB('', []) # if db.drop_back_db(backup_name): # return Result('droped') # return Result('', DROP_DATABASE_FAIL_ERR, 'drop data base fail.') def create_tar(self, folder, file_name): try: t = tarfile.open(file_name, "w:gz") for root, _, files in os.walk(folder): for _file in files: fullpath = os.path.join(root, _file) t.add(fullpath) except Exception: PrintStack() finally: if t: t.close() def extract_files(self, tar_path, ext_path): try: if os.path.isdir('_tmp/ApphouseData'): shutil.rmtree('_tmp/ApphouseData') with tarfile.open(tar_path) as tar: tar.extractall(path=ext_path) if os.path.isdir('_tmp/ApphouseData'): return True except: PrintStack() return False @ring0 def logs(self, line_num, skip=0): try: line_num = int(line_num) skip = int(skip) except Exception: return Result('', INVALID_PARAM_ERR, 'Param invalid') workdir = os.path.abspath('.') workdir = os.path.join(workdir, "Trace") workdir = os.path.join(workdir, "logs") log_path = os.path.join(workdir, "operation.log") if not os.path.isfile(log_path): Log(1, "The log file [%s] is not exist." % (log_path)) return Result('', LOG_FILE_NOT_EXIST_ERR, 'File not exist') arr = [] size = skip with FileGuard(log_path, 'r') as fp: fp.seek(skip) for line in fp: if line_num == 0: break; size += len(line) line_num -= 1 arr.append(line) return Result(arr, 0, size)
def __init__(self,*args,**kwargs): super(ExplorationFirstRoundStateWithTimer,self).__init__(*args,**kwargs) self._timer = Timer(limit=self._MAP_UPDATE_TIME_LIMIT,end_callback=self.ask_for_map_update)
class Spider(object): def __init__(self): #初始化信息 self.userName = global_config.get('config', 'userName') self.eomsHost = global_config.get('config', 'eomsHost') if self.userName: self.nick_name = self.userName else: self.nick_name = 'anonymous' #初始化变量 self.spider_session = RequestSession() self.spider_session.load_cookies_from_local( self.nick_name) #如果本地有cookies,则直接加载 self.Userlogin = UserLogin(self.spider_session) self.session = self.spider_session.get_session() self.user_agent = self.spider_session.user_agent self.timer = Timer() def check_login(func): """ 用户登录检测,然后发起登录 """ @functools.wraps(func) def new_func(self, *args, **kwargs): logger.info("请登录OA系统") self.Userlogin.login_by_username_OA() #登录OA logger.info("------------分隔符------------") if self.Userlogin.is_login: logger.info("请登录EOMS系统") self.Userlogin.login_by_cookies_EOMS() # 通过cookies登录EOMS logger.info("------------分隔符------------") return func(self, *args, **kwargs) return new_func @check_login def tousu_order(self): """ 投诉接单 """ logger.info("获取投诉工单列表") page = self.get_tousu_page() list = self.get_tousu_list(page) self.choose_not_order(list) def get_tousu_page(self): url = self.eomsHost + '/eoms35/sheet/complaint/complaint.do' payload = { 'method': 'showListsendundo', 'ssoToken': '170C02021416071A0A55', } headers = { 'User-Agent': self.spider_session.get_user_agent(), 'Referer': self.eomsHost + '/eoms35/main.jsp?id=10', } resp = self.session.get(url=url, headers=headers, params=payload) #print(resp) # print(resp.headers) #print(resp.text) return resp.text def get_tousu_list(self, resp): #f = open("js/编辑5.html", 'r', encoding='UTF-8') #resp = f.read() #print(resp) results = re.findall('<tr class=".*?">\n(.*?)</tr>', resp, re.S) #print(results) list_arr = [] for each in results: #print(each) ones = re.findall('<td.*?>(.*?)</td>\n', each, re.S) #print(ones) list_one = [] link = re.findall('window.open[(](.*)[)]; >(.*)</a>', ones[2], re.S) start_time = ones[13] status = ones[16].replace('\t', "").replace('\n', "").replace( '\r', "").replace(' ', "") id = link[0][1] link_url = link[0][0] link_url = self.eomsHost + "/eoms35/sheet/complaint/" + link_url[ 1:len(link_url) - 1] #print(id) #print(link_url) #print(start_time) #print(status) #print("\n") list_one.append(id) list_one.append(link_url) list_one.append(start_time) list_one.append(status) #print(list_one) list_arr.append(list_one) return list_arr def choose_not_order(self, list): if not list: logger.info("投诉工单为空") logger.info("+++++++++++++结束符++++++++++++") return for each in list: #处理时间 #$print(each) #判断:未接单和派单时间+30分钟 if each[3] == "未接单" and self.timer.local_start_time_diff(each[2]): logger.info("工单[" + each[0] + "]未接单,派单时间为" + each[2] + ",工单链接为:" + each[1]) else: logger.info("工单[" + each[0] + "]已受理") logger.info("+++++++++++++结束符++++++++++++")
pygame.init() pygame.display.init() screen = pygame.display.set_mode([1,1]) #init the queue pygame.fastevent.init() #init the joystick #js = pygame.joystick.Joystick(0) #js.init() #start the connection ip = ipInfo.IpInfo() con = Connection() #con.openConnection('192.168.0.100') con.openConnectionPort(ip.ip, ip.port) #con.openConnectionPort('127.0.0.1', 2001) #start the timer t=Timer() t.startTimer() #run the user init code dc = driverCode.DriverCode(con) #queue processing loop while True: #let the queue do what it needs pygame.fastevent.pump() #pull the next event ev = pygame.fastevent.poll() if ev.type == pygame.NOEVENT: #on an empty queue wait time.sleep(.2) elif ev.type == evtype.USRICK: #if an ick event respond to it pass
def __init__(self): thread_num = 3 schedule_name = 'WorkFlow' super(WorkFlowMgr,self).__init__(thread_num, schedule_name) schedu_timer = Timer(10, self, "WorkFlowDriver") schedu_timer.start()
if flag == 'pretrain': check.append(mode.vocab.id2unit(d.item())) else: check.append(mode.id2unit(d.item())) print(check) # print('|word size: {}'.format(word.size())) # print('|words mask size: {}'.format(words_mask.size())) # print('|wordchars size: {}'.format(wordchars.size())) # # print('|pretrained: {}'.format(pretrained.dtype)) # check(word, sdp_dataset.vocab['word'], 0, flag='word') # check(word, sdp_dataset.vocab['word'], -1, flag='word') # check(pretrained, pretrain) # from pprint import pprint # # pprint(sdp_dataset[0]) # print("===" * 10) # for i, data in enumerate(dataloader): # if i != 0: # break # pprint(data) from common.timer import Timer with Timer('dataset:'): for _, data in enumerate(sdp_dataset): pass # with Timer('dataloader:'): # for _, data in enumerate(dataloader): # pass
import sys from common.timer import Timer from spider.spider import Spider if __name__ == '__main__': spider = Spider() timer = Timer() while (True): spider.tousu_order() timer.cycleDelay()
class UserClient(object): """ # 对接用户模块 """ def __init__(self): self.domain = GetSysConfig('user_server_addr') self.__store = {} # CURLClient.__init__(self, domain) self.timer = Timer(10, self, 'UserTokenCheck') self.timer.start() def timeout(self): if len(self.__store) == 0: return data = {} for k, v in self.__store.items(): if v['expire'] > NowMilli(): data[k] = v self.__store = data def parse_token(self, token_str): if not token_str: Log(3, 'Unauthorized visit.') return {'ring': 'ring8'} if isinstance(token_str, list): token_str = token_str[0] data = self.__store.get(token_str, None) if data: return data['passport'] passport = self._parse_token(token_str) if passport: self.__store[token_str] = { 'passport': passport, 'expire': NowMilli() + 60000 } return passport def _parse_token(self, token_str): rlt = self.get_user_info(token_str) if not rlt.success: Log( 3, "parse token get_user_info error not success:{}, token:{}". format(rlt.message, token_str)) return None passport = rlt.content.get('systemProfile', {}) passport.update(rlt.content.get('profile', {})) passport['username'] = rlt.content.get('username', '') passport['id'] = rlt.content.get('id', '') passport['licensed'] = rlt.content.get('licensed', '') role = rlt.content.get('role', '') if role == 'superadmin': passport['ring'] = 'ring0' elif role == 'admin': passport['ring'] = 'ring3' elif passport.get('isActive', False) and passport.get( 'isValid', False): passport['ring'] = 'ring5' result = self.get_user_group(rlt.content.get('id'), token_str) if result.success: passport['group'] = result.content else: passport['group'] = [] else: passport['ring'] = 'ring8' return passport def get_user_info(self, token_str): """ "systemProfile": { "authType": "string", "createTime": 0, "isActive": true, "isSuperAdmin": true, "isValid": true, "lastLogin": 0 }, """ url = "http://" + self.domain + '/v1/user/verify/' + token_str r = my_request(url=url, method='GET', timeout=5, headers={"token": token_str}) if r.success: r = r.content if r.status_code == 200: data = r.json() if data is None: return Result('', USER_RESPONSE_DATA_INVALID_ERR, 'get_user_info data parse to json fail.') return Result(data) else: return Result('', r.status_code, r.text, r.status_code) else: # response.log('UserClient.get_user_info') Log(1, "user auth :{},url:{}".format(r.message, url)) return Result('', CALL_REMOTE_API_FAIL_ERR, 'get_user_info fail,as{}.'.format(r.message)) def get_user_group(self, user_id, token_str): if not user_id: # Log(1, 'get_user_group fail,as user_id[%s]invalid' % (str(user_id))) return Result('', INVALID_PARAM_ERR, 'user_id invalid') url = "http://" + self.domain + '/v1/usergroup/user/' + user_id r = my_request(url=url, method='GET', timeout=5, headers={"token": token_str}) if r.success: r = r.content if r.status_code == 200: data = r.json() if data is None: return Result('', USER_RESPONSE_DATA_INVALID_ERR, 'get_user_group data parse to json fail.') return Result(data) else: return Result('', r.status_code, r.text, r.status_code) else: # response.log('UserClient.get_user_group') return Result( '', CALL_REMOTE_API_FAIL_ERR, 'get_user_group fail,as{},url:{}'.format(r.message, url))
os.environ["SDL_VIDEODRIVER"] = 'dummy' #init the pygame and display setup pygame.init() pygame.display.init() screen = pygame.display.set_mode([1, 1]) #init the queue pygame.fastevent.init() #init the joystick #js = pygame.joystick.Joystick(0) #js.init() #start the connection hardware.init() con = Connection() con.openListen() #start the timer t = Timer() t.startTimer() #run the user init code rc = robotCode.RobotCode(con) #queue processing loop while True: #let the queue do what it needs pygame.fastevent.pump() #pull the next event ev = pygame.fastevent.poll() if ev.type == pygame.NOEVENT: #on an empty queue wait time.sleep(.2) elif ev.type == evtype.USRICK: #if an ick event respond to it pass
import time import numpy import pop_factory from common.timer import Timer r = numpy.random.rand(10000) snp_tuple = pop_factory.SNPTuples(100, "1", 50000) snp_tuple.add_tuple("G", 0.70) snp_tuple.add_tuple("A", 0.90) snp_tuple.add_tuple("T", 1.0) x = [100, 200] start = time.perf_counter() for i in range(10000000): with Timer("in_list") as t: y = 100 not in x end = time.perf_counter() print("elapsed %s" % str(end - start)) print(str(Timer.report_all())) x = {100: 1, 200: 2} start = time.perf_counter() for i in range(10000000): with Timer("in_dict") as t: y = 100 not in x end = time.perf_counter() print("elapsed %s" % str(end - start)) print(str(Timer.report_all()))
def __init__(self): self.domain = GetSysConfig('user_server_addr') self.__store = {} # CURLClient.__init__(self, domain) self.timer = Timer(10, self, 'UserTokenCheck') self.timer.start()