def render(self, *, episode: int, step: int, figsize: tuple, filepath: str, dpi: int, fmt: str, **kwargs): """The method visualizes the simulation timesteps.""" plotarr = np.zeros(shape=np.shape(self.env)) y, x = np.where(self.env != None) for idc in zip(y, x): plotarr[idc] = self._ag_to_int(ag=self.env[idc]) fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) im = ax.imshow(ma.masked_equal(plotarr, 0), cmap=ppm_cmap, norm=ppm_norm) # cbar = fig.colorbar(mappable=im, ax=ax, fraction=0.047, pad=0.01, # ticks=[-1, 1]) # cbar.ax.set_yticklabels(['Predator', 'Prey']) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_xticks([]) ax.set_yticks([]) info = " Prey: {}, Pred: {}".format(len(self._agents_tuple.Prey), len(self._agents_tuple.Predator)) # ax.set_title("Episode: {}, Step: {} |".format(episode, step) + info) filename = "{}_{:0>3}_{:0>3}".format(timestamp(), episode, step) fig.savefig(filepath + filename, dpi=dpi, format=fmt) plt.close(fig)
def al1(bot, update): global idlot db = SQLite() idlot = tools.timestamp() db.magic('insert into lot(id) values (?)', (idlot, )) update.message.reply_text(RU.aname) return NAME
def auth(request): order = request.session.get('order', None) if not order: redirect('/') order = get_object_or_404(Order, id = order) AMOUNT = str(order.totalcost) + '.00' CURRENCY = settings.CURRENCY ORDER = order.ordernum() DESC = order.desc MERCH_NAME = settings.MERCH_NAME MERCH_URL = settings.MERCH_URL MERCHANT = settings.MERCHANT TERMINAL = settings.TERMINAL EMAIL = settings.EMAIL TRTYPE = '0' COUNTRY = settings.COUNTRY MERCH_GMT = settings.MERCH_GMT TIMESTAMP = tools.timestamp() NONCE = tools.nonce(32) BACKREF = settings.BACKREF temp = tools.prepstr([AMOUNT, CURRENCY, ORDER, DESC, MERCH_NAME, MERCH_URL, MERCHANT, TERMINAL, EMAIL, TRTYPE, COUNTRY, MERCH_GMT, TIMESTAMP, NONCE, BACKREF]) P_SIGN = tools.mac(temp, settings.KEY) GATEWAY_URL = settings.GATEWAY_URL return render_to_response('ecommerce/auth.html', locals(), context_instance = RequestContext(request))
def save(): """Save the current state of the simulation to a resumeable file.""" print("\n: Storing the following keys: {}".format(save_state.keys())) filename = cfg['Sim']['save_state_to'] + "state_" + timestamp() ac.save_checkpoint(state=save_state, filename=filename) # clear episode/timestep/function call counter epsbatch.clear()
def test_update_missingfile(self): data1 = {'a': 1, 'b': 2} output_file = os.path.join(fixture_dir, 'foo2_{0}.json'.format(t.timestamp())) exists_before = t.item_exists(output_file) t.update_json(data=data1, input_file=output_file) exists_after = t.item_exists(output_file) self.assertTrue( exists_after and not exists_before, 'File was not created correctly by the update JSON function')
def test_update_json1(self): data1 = {'a': 1, 'b': 2} data2 = {'c': 3, 'd': 4} data3 = {'a': 1, 'c': 3, 'b': 2, 'd': 4} output_file = os.path.join(fixture_dir, 'foo_{0}.json'.format(t.timestamp())) t.write_json(object=data1, output_file=output_file) t.update_json(data=data2, input_file=output_file) data4 = t.load_json(input_file=output_file) self.assertTrue( data3 == data4, 'Data read from JSON file does not match expected output')
def test_partial_overlap(self): file1 = os.path.join(fixture_dir, 'variants_head200.tsv') ref_file = os.path.join(fixture_dir, 'variants_tail200.tsv') output_file = os.path.join(fixture_dir, 'foo_{0}.tsv'.format(t.timestamp())) t.write_tabular_overlap(file1=file1, ref_file=ref_file, output_file=output_file) num_lines = t.num_lines(input_file=output_file, skip=0) self.assertTrue( num_lines == 38, 'Number of lines output in partial overlap files does not match')
def new(self,title=None,brief=None,content=None,ts=None): ''' 描述:新建文章 参数: title String 0<len<50 文章标题 brief String 0<len<500 文章摘要 content String 0<len<50000 文章正文 返回: {"code": 200} {"code": 4**, "errmsg":...} ''' # 检查输入 if not bool(title): return {"code": 400, "errmsg": "缺少标题"} elif not bool(brief): return {"code": 400, "errmsg": "缺少摘要"} elif not bool(content): return {"code": 400, "errmsg": "缺少正文"} else: pass if bool(ts): ts = int(ts) else: ts = timestamp() session = SESSION(bind=ENGINE.connect()) a = DB( title = title, brief = brief, content = content, timestamp = ts ) session.add(a) session.commit() session.close() return {"code": 200}
def upload_book(**kwargs): ''' 接收上传的文件 1. 以文件 MD5 做文件名保存文件。 2. 将文件信息写入数据库。 3. 将文件 id 和文件绝对路径传入解析队列。 要处理以下情况: 1. 数据库中已存在书籍 MD5 ,就不写入文件 2. 数据库中已存在书籍 MD5 ,但用户 id 不匹配时,需要为该用户新建一条书籍记录,并检查是否存在已成功解析的记录,如果有,直接修改书籍记录为已完成解析。 已知问题 已修复: 但某用户上传一本书且正在解析,而另一用户上传同一书籍时将导致解析重复,产生重复章节数据。 可以通过修改状态码机制来修复 目前 状态码机制 -2 编码错误 -1 无法匹配章节 0 正在解析 1 解析成功 可以增加 2 表示正在解析 原有 0 修正为 已上传 ''' user_id = kwargs["user_id"] # 暂时只接受单个文件,如果上传了多个文件,则取其第一项 for file in request.files.to_dict(flat=False)['file']: file_name = file.filename file_extension = "." + os.path.splitext(file_name)[-1].replace( '.', '').lower() file_pure_name = file_name.replace(file_extension, "") # 由于只能读一次流,所以不能分块写入和分块求MD5,因此直接读取整个流 file_stream = file.stream.read() hash_md5 = hashlib.md5() hash_md5.update(file_stream) file_md5 = hash_md5.hexdigest() file.seek(0, os.SEEK_END) file_size = file.tell() # 准备存储目录 if not os.path.isdir(FILE_PATH): os.makedirs(FILE_PATH) file_to = os.path.join(FILE_PATH, file_md5) # 查询该用户的上传量 # 暂时不限制 # 查询书籍是否已经入库 is_book = Book().is_book(book_id=file_md5) if is_book["code"] == 200: # 跳过文件写入 pass elif is_book["code"] == 404: # 只有但全库查不到书记记录时才写入 if os.path.isfile(file_to): pass else: with open(file_to, 'wb') as f: f.write(file_stream) # 查询该用户是否已有本书记录 is_book = Book().is_book(book_id=file_md5, user_id=user_id) if is_book["code"] == 404: # 只有当 user_id + book_id 无记录时才新建记录 q = Book().new_book(user_id=user_id, book_id=file_md5, book_name=file_pure_name, book_size=file_size, book_status=0, book_upload_time=timestamp()) if q["code"] != 200: return "", 500 # 只有已存在书记记录且状态码为 0 \ 1 \ 2的情况下直接返回 200 elif is_book["code"] == 200: if is_book["query"].book_status == 0: return "", 200 if is_book["query"].book_status == 1: return "", 200 if is_book["query"].book_status == 2: return "", 200 # 查询所有书籍数据是否已有本书 status 为 1 的记录 is_book = Book().is_book(book_id=file_md5, book_status=1) if is_book["code"] == 404: # 查不到已经解析的书时才推送到解析队列 # 还需要检查是否存在正在解析的该书 is_book = Book().is_book(book_id=file_md5, book_status=2) # 当不存在正在解析的书籍时,才加入解析队列 if is_book["code"] == 404: # 更新书籍状态为 2 正在解析 Book().update_book_status(user_id=user_id, book_id=file_md5, book_status=2) BOOK_PARSER.Q.put({ "user_id": user_id, "book_id": file_md5, "book_path": file_to }) else: Book().update_book_status(user_id=user_id, book_id=file_md5, book_status=2) else: Book().update_book_status(user_id=user_id, book_id=file_md5, book_status=1) return "", 200
def button(bot, update): db = SQLite() query = update.callback_query a = db.magic('select id, head from lot where buyer isnull').fetchall() ids = [] for i in a: ids.append(i[0]) if query.data == 'refr': if a is not None: keyboard = [] for i in a: keyboard.append([InlineKeyboardButton(i[1], callback_data=i[0])]) keyboard.append([InlineKeyboardButton('Back',callback_data='back')]) bot.edit_message_text(text="text", chat_id=query.message.chat_id, message_id=query.message.message_id, reply_markup=InlineKeyboardMarkup(keyboard)) else: keyboard = [[InlineKeyboardButton('Back', callback_data='back')]] bot.edit_message_text(text=RU.empty, chat_id=query.message.chat_id, message_id=query.message.message_id, reply_markup=InlineKeyboardMarkup(keyboard)) elif query.data == 'back': markup = [[InlineKeyboardButton("Refresh", callback_data='refr'), ]] bot.edit_message_text(text=RU.welcome1.format(query.from_user.first_name), chat_id=query.message.chat_id, message_id=query.message.message_id, reply_markup=InlineKeyboardMarkup(markup)) elif re.match('add1 ', query.data): lot = query.data[5:] lotData = db.magic('select * from lot where id = (?)', (lot,)).fetchall()[0] try: maxprice = float(db.magic('select max(price) from room where lotID = (?)', (lot,)).fetchall()[0][0]) except: maxprice = lotData[4] maxprice += lotData[5] db.magic('replace into room(lotID, memberID, price) values (?,?,?)', (lot, query.from_user.id, maxprice)) markup = [[ InlineKeyboardButton("+{}".format(lotData[5]), callback_data='add1 {}'.format(lot)), InlineKeyboardButton("+{}".format(lotData[5]*10), callback_data='add10 {}'.format(lot)), InlineKeyboardButton("Set Price", callback_data='Set {}'.format(lot))]] bot.edit_message_text(text=RU.lot.format(lotData[1], maxprice), chat_id=query.message.chat_id, message_id=query.message.message_id, reply_markup=InlineKeyboardMarkup(markup)) elif re.match('add10', query.data): lot = query.data[6:] lotData = db.magic('select * from lot where id = (?)', (lot,)).fetchall()[0] try: maxprice = float(db.magic('select max(price) from room where lotID = (?)', (lot,)).fetchall()[0][0]) except: maxprice = lotData[4] maxprice += lotData[5]*10 db.magic('replace into room(lotID, memberID, price) values (?,?,?)', (lot, query.from_user.id, maxprice)) markup = [[ InlineKeyboardButton("+{}".format(lotData[5]), callback_data='add1 {}'.format(lot)), InlineKeyboardButton("+{}".format(lotData[5]*10), callback_data='add10 {}'.format(lot)), InlineKeyboardButton("Set Price", callback_data='Set {}'.format(lot))]] bot.edit_message_text(text=RU.lot.format(lotData[1], maxprice), chat_id=query.message.chat_id, message_id=query.message.message_id, reply_markup=InlineKeyboardMarkup(markup)) elif re.match('Set', query.data): lot = query.data[4:] elif int(query.data) in ids: stamp = tools.timestamp() lot = db.magic('select * from lot where id = (?)', (query.data,)).fetchall()[0] try: lotPrice = db.magic('select max(price) from room where lotID = {}'.format(lot,)).fetchall() lotPrice = float(lotPrice[0][0]) except: lotPrice = lot[4] data = [stamp, query.data, query.from_user.id, lotPrice] #TODO change price to actual db.magic('insert into room(id, lotID, memberID, price) values (?,?,?,?)', (data,)) markup = [[ InlineKeyboardButton("+1", callback_data='add1 {}'.format(query.data)), InlineKeyboardButton("+10", callback_data='add10 {}'.format(query.data)), InlineKeyboardButton("Set Price", callback_data='Set {}'.format(query.data))]] bot.edit_message_text(text=RU.lot.format(lot[1], lot[4]), chat_id=query.message.chat_id, message_id=query.message.message_id, reply_markup=InlineKeyboardMarkup(markup))
def __init__(self, url): self._url = url fname = "{}_{}.json".format(type(self).__name__, tools.timestamp()) self._fout = open(fname, "wt") self._symbols = _load_common_symbols() self._ws = None
def main(): """Trying to pseudo code here.""" inittime = timestamp(return_obj=True) # get initial time datetime object batch = deque() # initial creation of a save object for i_eps in range( resume_pars['last_episode'], cfg['Sim']['episodes']): # if resume is given, start from there # add entry to save_dict save_state['last_episode'] = i_eps eps_time = timestamp(return_obj=True) # record episode starting time print("\n: Environment resetting now...") state, idx = env.reset() # returns state and object of random agent # save data if i_eps % cfg['Sim']['save_state_every'] == 0: save() for _ in range(cfg['Sim']['steps']): print(":: Episode {}, Step {}".format(i_eps, _)) if cfg['Plot']['render']: if i_eps % cfg['Plot']['every'] == 0: # plot every nth episode print("::: Plotting current state...") env.render(episode=i_eps, step=_, **cfg['Plot']) # as long as there are agents active_agents = len(env.shuffled_agent_list) + 1 while (active_agents): # if any prey got eaten last round, use it # print(": eaten prey: {}".format(len(env.eaten_prey))) active_agents -= 1 final_action = False if active_agents != 0 else True if len(env.eaten_prey) != 0: tmpidx, ag = env.eaten_prey.pop() state = env.index_to_state(index=tmpidx, ag=ag) # remove the index from shuffled agent list if tmpidx in env.shuffled_agent_list: env.shuffled_agent_list.remove(tmpidx) if state[-1] is None: state[-1] = int(ag.food_reserve) # env.state = state model = PreyModel action = ac.select_action(model=model, agent=ag, state=state) reward, state, done, idx = env.step(model=model, agent=ag, index=tmpidx, returnidx=idx, action=action) else: ag = env.env[idx] # ag.memory.States.append(state) # select model and action model = PreyModel if ag.kin == "Prey" else PredatorModel action = ac.select_action(model=model, agent=ag, state=state) # take a step reward, state, done, idx = env.step(model=model, agent=ag, index=idx, action=action) if done or ((_ + 1) % cfg['Sim']['steps'] == 0): print( ":: Breakpoint reached -----------------------------") break # data analysis and storage --------------------------------------- # print food reserve mean_prey_fr = np.mean( [ag.food_reserve for ag in env._agents_tuple.Prey]) mean_pred_fr = np.mean( [ag.food_reserve for ag in env._agents_tuple.Predator]) print("::: Preys:\t{}, mean food reserve: {}".format( len(env._agents_tuple.Prey), mean_prey_fr)) print("::: Predators:\t{}, mean food reserve: {}".format( len(env._agents_tuple.Predator), mean_pred_fr)) # function calls function_calls = [] func_list = [f for a, f in sorted(env.action_lookup.items())] for chunk in chunkify(func_list, 5): # see tools! function_calls.append(sum_calls(chunk)) # see tools too here. print("::: Move calls: {}\t Eat calls: {}\t Procreate calls: {}" "".format(*function_calls)) gens = deque() for a in env._agents_set: gens.append(a.generation) mean_gens = np.mean(gens) gens.clear() # free memory print("::: Mean generation: {}".format(mean_gens)) batch.append([ function_calls, mean_gens, mean_pred_fr, mean_prey_fr, len(env._agents_tuple.Predator), len(env._agents_tuple.Prey) ]) # avg['mean_gens'].append(np.mean(gens)) for f in env.action_lookup.values(): f.calls = 0 # reset the call counter # simulation again ------------------------------------------------ if done or ((_ + 1) % cfg['Sim']['steps'] == 0): # save the episode number with number of steps # batch.append((i_eps, _)) epsbatch.append([(i_eps, _), batch.copy()]) # clear current batch deque batch.clear() break env.create_shuffled_agent_list() # prepare next step idx = env.shuffled_agent_list.pop() env.state = env.index_to_state(index=idx) state = env.state # if actual timestep limit is reached append agent memory to history if not done and training: for ag in env._agents_set: if ag.memory.Rewards: # if agent actually has memory getattr(env.history, ag.kin).append(ag.memory) print("\n: Episode Runtime: {}".format( timestamp(return_obj=True) - eps_time)) # only do updates if both kins have a history if len(env.history.Predator) and len(env.history.Prey) and training: print("\n: optimizing now...") opt_time_start = timestamp(return_obj=True) fcalls = {} l, mr, sa = ac.finish_episode(model=PreyModel, optimizer=PreyOptimizer, history=env.history.Prey, gamma=cfg['Network']['gamma'], return_means=True) fcalls['Prey'] = sa print(":: [avg] Prey loss:\t{}\t Prey reward: {}" "".format(l.item(), mr)) avg['mean_prey_loss'].append(l.item()) avg['mean_prey_rewards'].append(mr) l, mr, sa = ac.finish_episode(model=PredatorModel, optimizer=PredatorOptimizer, history=env.history.Predator, gamma=cfg['Network']['gamma'], return_means=True) fcalls['Predator'] = sa print(":: [avg] Predator loss:\t{}\t Predator reward: {}" "".format(l.item(), mr)) avg['mean_pred_loss'].append(l.item()) avg['mean_pred_rewards'].append(mr) epsbatch[-1].append(fcalls) # save the function calls print("\n: optimization time: {}".format( timestamp(return_obj=True) - opt_time_start)) else: print("\n: Not enough history to train...") print("\n: Entire simulation runtime: {}".format( timestamp(return_obj=True) - inittime)) # save everything save()
def POST(self, username): """ Revoke or Active keys. /admin/<username> revoke=true/false => Revoke user status=true/false => Display status """ # LDAP authentication is_admin_auth, message = ldap_authentification(admin=True) if not is_admin_auth: return response_render(message, http_code='401 Unauthorized') payload = data2map() if 'revoke' in payload: do_revoke = payload['revoke'].lower() == 'true' else: do_revoke = False if 'status' in payload: do_status = payload['status'].lower() == 'true' else: do_status = False pg_conn, message = TOOLS.pg_connection() if pg_conn is None: return response_render(message, http_code='503 Service Unavailable') cur = pg_conn.cursor() if username == 'all' and do_status: return response_render( TOOLS.list_keys(), content_type='application/json') # Search if key already exists cur.execute('SELECT * FROM USERS WHERE NAME=(%s)', (username,)) user = cur.fetchone() # If user dont exist if user is None: cur.close() pg_conn.close() message = 'User does not exists.' elif do_revoke: cur.execute('UPDATE USERS SET STATE=1 WHERE NAME=(%s)', (username,)) pg_conn.commit() pubkey = get_pubkey(username, pg_conn) cur.execute('INSERT INTO REVOCATION VALUES \ ((%s), (%s), (%s))', \ (pubkey, timestamp(), username)) pg_conn.commit() message = 'Revoke user=%s.' % username cur.close() pg_conn.close() # Display status elif do_status: return response_render( TOOLS.list_keys(username=username), content_type='application/json') # If user is in PENDING state elif user[2] == 2: cur.execute('UPDATE USERS SET STATE=0 WHERE NAME=(%s)', (username,)) pg_conn.commit() cur.close() pg_conn.close() message = 'Active user=%s. SSH Key active but need to be signed.' % username # If user is in REVOKE state elif user[2] == 1: cur.execute('UPDATE USERS SET STATE=0 WHERE NAME=(%s)', (username,)) pg_conn.commit() cur.close() pg_conn.close() message = 'Active user=%s. SSH Key active but need to be signed.' % username else: cur.close() pg_conn.close() message = 'user=%s already active. Nothing done.' % username return response_render(message)
def main(): """The main simulation loop.""" inittime = timestamp(return_obj=True) # initial datetime object batch = deque() # initial batch deque to append values to # if no resume was given above, this starts from 0: ----------------------- for i_eps in range(resume_pars['last_episode'], cfg['Sim']['episodes']): # add entry to save_dict save_state['last_episode'] = i_eps eps_time = timestamp(return_obj=True) # record episode starting time print("\n: [env] Resetting now...") env.reset() # returns None in this scenario # save data if i_eps % cfg['Sim']['save_state_every'] == 0: save() for ts in range(cfg['Sim']['steps']): # ts = timestep ---------------- print("\n:: [sim] Episode {}, Step {}".format(i_eps, ts)) # if ts should be rendered: if cfg['Plot']['render']: if i_eps % cfg['Plot']['every'] == 0: # plot every nth episode print("::: [sim] Plotting current state...") env.render(episode=i_eps, timestep=ts, params=cfg['Plot']['params']) # run while there are agents to play with while(len(env.shuffled_agent_list) > 0 or len(env.eaten_prey) > 0): # take a step reward, state, done = env.step(policy=Policy, select_action=ac.select_action) if done or ((ts + 1) % cfg['Sim']['steps'] == 0): print(":: [sim] Breakpoint reached " + 40 * "-") break # data analysis and storage --------------------------------------- preys = env._agents_tuple.OrientedPrey preds = env._agents_tuple.OrientedPredator # food reserve mean_prey_fr = np.mean([ag.food_reserve for ag in preys]) mean_pred_fr = np.mean([ag.food_reserve for ag in preds]) # generation mean_prey_gen = np.mean([ag.generation for ag in preys]) mean_pred_gen = np.mean([ag.generation for ag in preds]) print("::: Preys:\t{}, mean food reserve: {:.3f}, mean generation:" " {:.3f} ".format(len(preys), mean_prey_fr, mean_prey_gen)) print("::: Predators:\t{}, mean food reserve: {:.3f}, mean " "generation: {:.3f}".format(len(preds), mean_pred_fr, mean_pred_gen)) # storage batch.append([mean_pred_fr, mean_prey_fr, len(preds), len(preys), mean_pred_gen, mean_prey_gen]) # back to simulation ---------------------------------------------- if done or ((ts + 1) % cfg['Sim']['steps'] == 0): # save the episode number with number of steps epsbatch.append([(i_eps, ts), batch.copy()]) # clear current batch deque batch.clear() break # create new shuffled agent list env.create_shuffled_agent_list() # --------------------------------------------------------------------- # append memory of remaining agents to history if training: for ag in env._agents_set: if ag.memory.Rewards: # if that agent actually has memory getattr(env.history, ag.kin).append(ag.memory) print("\n: [sim] Episode Runtime: {}" "".format(timestamp(return_obj=True) - eps_time)) # optimization -------------------------------------------------------- optimize = all([len(hist) > 0 for hist in env.history]) and training if optimize: print("\n: [ac] optimizing now...") opt_time_start = timestamp(return_obj=True) fcalls = {} l, mr, sa = ac.finish_episode(model=PreyModel, optimizer=PreyOptimizer, history=env.history.OrientedPrey, gamma=cfg['Network']['gamma'], return_means=True) fcalls['Prey'] = sa print(":: [ac] Prey loss:\t{}\t Prey reward: {}" "".format(l.item(), mr)) avg['mean_prey_loss'].append(l.item()) avg['mean_prey_rewards'].append(mr) l, mr, sa = ac.finish_episode(model=PredatorModel, optimizer=PredatorOptimizer, history=env.history.OrientedPredator, gamma=cfg['Network']['gamma'], return_means=True) fcalls['Predator'] = sa # sa = selected actions print(":: [ac] Predator loss:\t{}\t Predator reward: {}" "".format(l.item(), mr)) avg['mean_pred_loss'].append(l.item()) avg['mean_pred_rewards'].append(mr) epsbatch[-1].append(fcalls) # save the function calls # FIXME: function calls is still broken!! there is no information # about the timesteps! :( print("\n: [ac] optimization time: " "{}".format(timestamp(return_obj=True) - opt_time_start)) else: print("\n: [ac] Not enough history to train...") print("\n: [sim] Entire simulation runtime: " "{}".format(timestamp(return_obj=True) - inittime)) # save everything save()