def __init__(self,offset=30): self.stamp_list = {} self.access_store = {} self.__stamp_lock = threading.Lock() self.time_offset = offset self.load_access_info() guard = Timer(5,self,"AuthenDriver") guard.start()
def main(event_manager): # 初始化定时器 timer = Timer(func=check_timer, args=(event_manager, ), interval=40) # 模块更新自动重启 autoreload(event_manager, timer, interval=15) event_manager.start() timer.start()
def main(event_manager): # 初始化定时器 timer = Timer(func=event_manager.send_event, args=(Event(CHECK),), interval=40) # 模块更新自动重启 autoreload(event_manager, timer, interval=15) event_manager.start() timer.start()
def run(self): accesses = get_random_accesses(NumCalls) timer = Timer() timer.start() self.bulk_op = self.db.bench.initialize_unordered_bulk_op() for i in accesses: self.do_bench_call(i) self.bulk_op.execute({'w' : self.writes }) return timer.stop()
def run(self): pendingOps = [] accesses = get_random_accesses(NumCalls) timer = Timer() timer.start() for i in accesses: p = self.do_bench_call(i) pendingOps.append(p) while len(pendingOps) > 0: pendingOps[0].wait() pendingOps.pop(0) return timer.stop()
class ExplorationFirstRoundStateWithTimer(ExplorationFirstRoundState): _timer = None # timer for sending extra command in case map update not sent back in time _MAP_UPDATE_TIME_LIMIT = 1E6 def __init__(self,*args,**kwargs): super(ExplorationFirstRoundStateWithTimer,self).__init__(*args,**kwargs) self._timer = Timer(limit=self._MAP_UPDATE_TIME_LIMIT,end_callback=self.ask_for_map_update) #self._machine.add_mapupdate_listener(self._timer) def post_process(self,label,msg): cmd_ls,data_ls = super(ExplorationFirstRoundStateWithTimer,self).post_process(label,msg) if (cmd_ls): # get new command, stop the previous timer and start a new timer self._timer.shutdown() debug("Get new command, try to start timer",DEBUG_STATES) self._timer.start() return cmd_ls,data_ls def ask_for_map_update(self): self._machine.send_command(PMessage.M_GET_SENSOR) def trigger_end_exploration(self): self._timer.shutdown() super(ExplorationFirstRoundStateWithTimer,self).trigger_end_exploration()
class RootResource(resource.Resource): isLeaf = True def __init__(self): resource.Resource.__init__(self) # 60s self.elastic_timer = Timer(60, ElasticMgr(), 'elastic check') self.elastic_timer.start() # 60s self.monitor_timer = Timer(60, Monitor(), 'node_monitor') self.monitor_timer.start() # 60s self.syndata_timer = Timer(60, SynData(), 'syndata') self.syndata_timer.start() # 20s self.ufleetmonitor_time = Timer(20, UfleetMonitor(), 'ufleetmonitor') self.ufleetmonitor_time.start()
class UserClient(object): """ # 对接用户模块 """ def __init__(self): self.domain = GetSysConfig('user_server_addr') self.__store = {} # CURLClient.__init__(self, domain) self.timer = Timer(10, self, 'UserTokenCheck') self.timer.start() def timeout(self): if len(self.__store) == 0: return data = {} for k, v in self.__store.items(): if v['expire'] > NowMilli(): data[k] = v self.__store = data def parse_token(self, token_str): if not token_str: Log(3, 'Unauthorized visit.') return {'ring': 'ring8'} if isinstance(token_str, list): token_str = token_str[0] data = self.__store.get(token_str, None) if data: return data['passport'] passport = self._parse_token(token_str) if passport: self.__store[token_str] = { 'passport': passport, 'expire': NowMilli() + 60000 } return passport def _parse_token(self, token_str): rlt = self.get_user_info(token_str) if not rlt.success: Log( 3, "parse token get_user_info error not success:{}, token:{}". format(rlt.message, token_str)) return None passport = rlt.content.get('systemProfile', {}) passport.update(rlt.content.get('profile', {})) passport['username'] = rlt.content.get('username', '') passport['id'] = rlt.content.get('id', '') passport['licensed'] = rlt.content.get('licensed', '') role = rlt.content.get('role', '') if role == 'superadmin': passport['ring'] = 'ring0' elif role == 'admin': passport['ring'] = 'ring3' elif passport.get('isActive', False) and passport.get( 'isValid', False): passport['ring'] = 'ring5' result = self.get_user_group(rlt.content.get('id'), token_str) if result.success: passport['group'] = result.content else: passport['group'] = [] else: passport['ring'] = 'ring8' return passport def get_user_info(self, token_str): """ "systemProfile": { "authType": "string", "createTime": 0, "isActive": true, "isSuperAdmin": true, "isValid": true, "lastLogin": 0 }, """ url = "http://" + self.domain + '/v1/user/verify/' + token_str r = my_request(url=url, method='GET', timeout=5, headers={"token": token_str}) if r.success: r = r.content if r.status_code == 200: data = r.json() if data is None: return Result('', USER_RESPONSE_DATA_INVALID_ERR, 'get_user_info data parse to json fail.') return Result(data) else: return Result('', r.status_code, r.text, r.status_code) else: # response.log('UserClient.get_user_info') Log(1, "user auth :{},url:{}".format(r.message, url)) return Result('', CALL_REMOTE_API_FAIL_ERR, 'get_user_info fail,as{}.'.format(r.message)) def get_user_group(self, user_id, token_str): if not user_id: # Log(1, 'get_user_group fail,as user_id[%s]invalid' % (str(user_id))) return Result('', INVALID_PARAM_ERR, 'user_id invalid') url = "http://" + self.domain + '/v1/usergroup/user/' + user_id r = my_request(url=url, method='GET', timeout=5, headers={"token": token_str}) if r.success: r = r.content if r.status_code == 200: data = r.json() if data is None: return Result('', USER_RESPONSE_DATA_INVALID_ERR, 'get_user_group data parse to json fail.') return Result(data) else: return Result('', r.status_code, r.text, r.status_code) else: # response.log('UserClient.get_user_group') return Result( '', CALL_REMOTE_API_FAIL_ERR, 'get_user_group fail,as{},url:{}'.format(r.message, url))
def __init__(self): thread_num = 3 schedule_name = 'WorkFlow' super(WorkFlowMgr,self).__init__(thread_num, schedule_name) schedu_timer = Timer(10, self, "WorkFlowDriver") schedu_timer.start()
class HostMgr(object): ''' # 实现主机信息管理 ''' def __init__(self): ''' Constructor ''' self.host = LinuxHost() self.timer = Timer(1, self.host, 'host time') self.timer.start() self.net_stats = [] # cli = DockerClient.instance() # if cli: # self.container_net_stats = ContainerNetStats(cli, self.net_stats) # self.container_net_stats.start() # @ring0 # def info(self): # rlt = RepositoryDBImpl.instance().exec_db_script('overview') # if not rlt.success: # Log(1, 'namespaces.read_record_list fail,as[%s]' % (rlt.message)) # # rlt.content.update(self.host.get_host_info()) # return rlt @ring5 @ring3 @ring0 @list_route(methods=['GET']) def hostinfo(self, **kwargs): return Result(self.host.get_host_info()) @ring0 @list_route(methods=['GET']) def netifs(self, **kwargs): iface = kwargs.get('iface', '') if iface: # return Result(self.host.get_network_data(iface)) return Result(self.net_stats) else: return Result(self.host.find_all_Ethernet_interface()) @ring0 def ifacelist(self): """ netifs """ return Result(self.host.find_all_Ethernet_interface()) @ring0 @list_route(methods=['GET']) def netstat(self, **kwargs): iface = kwargs.get('iface', '') return Result(self.host.get_network_data(iface)) @ring0 def exportLogs(self): file_name = time.strftime("Log_%Y%m%d.tar.gz", time.localtime()) wwwroot = ConfigMgr.instance().get_www_root_path() fullpath = os.path.join(wwwroot, file_name) if os.path.exists(fullpath): os.remove(fullpath) self.create_tar('Trace', fullpath) return Result(file_name) # @ring0 # def exportData(self): # file_name = time.strftime("Data_%Y%m%d.tar.gz", time.localtime()) # wwwroot = ConfigMgr.instance().get_www_root_path() # fullpath = os.path.join(wwwroot, file_name) # if os.path.exists(fullpath): # os.remove(fullpath) # # ExportAllData('ApphouseData') # # self.create_tar('ApphouseData', fullpath) # return Result(file_name) # @ring0 # def importData(self, post_data, **args): # f = FormData(post_data) # filepath = f.save_tar_file('_tmp') # if not filepath: # return Result('', UPLOAD_DATA_FILE_FAIL_ERR, 'save tar file fail.') # # if self.extract_files(filepath, './_tmp'): # return ImportData('_tmp/ApphouseData') # return Result('', EXTRACT_DATA_FILE_FAIL_ERR, 'extract file fail.') # @ring0 # def backups(self): # db = CommonDB('', []) # arr = db.get_back_up_db() # return Result(arr) # # @ring0 # def restore_backup(self, backup_name): # db = CommonDB('', []) # return db.restore_backup(backup_name) # # @ring0 # def delete_backup(self, backup_name): # db = CommonDB('', []) # if db.drop_back_db(backup_name): # return Result('droped') # return Result('', DROP_DATABASE_FAIL_ERR, 'drop data base fail.') def create_tar(self, folder, file_name): try: t = tarfile.open(file_name, "w:gz") for root, _, files in os.walk(folder): for _file in files: fullpath = os.path.join(root, _file) t.add(fullpath) except Exception: PrintStack() finally: if t: t.close() def extract_files(self, tar_path, ext_path): try: if os.path.isdir('_tmp/ApphouseData'): shutil.rmtree('_tmp/ApphouseData') with tarfile.open(tar_path) as tar: tar.extractall(path=ext_path) if os.path.isdir('_tmp/ApphouseData'): return True except: PrintStack() return False @ring0 def logs(self, line_num, skip=0): try: line_num = int(line_num) skip = int(skip) except Exception: return Result('', INVALID_PARAM_ERR, 'Param invalid') workdir = os.path.abspath('.') workdir = os.path.join(workdir, "Trace") workdir = os.path.join(workdir, "logs") log_path = os.path.join(workdir, "operation.log") if not os.path.isfile(log_path): Log(1, "The log file [%s] is not exist." % (log_path)) return Result('', LOG_FILE_NOT_EXIST_ERR, 'File not exist') arr = [] size = skip with FileGuard(log_path, 'r') as fp: fp.seek(skip) for line in fp: if line_num == 0: break; size += len(line) line_num -= 1 arr.append(line) return Result(arr, 0, size)