def read_data(x, y, split): with h5py.File(split, "r") as f: train_idx = utils.decode(f["train"][...]) val_idx = utils.decode(f["val"][...]) test_idx = utils.decode(f["test"][...]) all_idx = np.concatenate([train_idx, val_idx, test_idx], axis=0) with h5py.File(x, "r") as f: idx, mat = utils.unique(utils.decode(f["protein_id"][...]), f["mat"]) assert np.all(np.in1d(all_idx, idx)) idx_mapper = utils.get_idx_mapper(idx) x_train = mat[idx_mapper(train_idx)] x_val = mat[idx_mapper(val_idx)] x_test = mat[idx_mapper(test_idx)] with h5py.File(y, "r") as f: idx, mat = utils.unique(utils.decode(f["protein_id"][...]), f["mat"]) assert np.all(np.in1d(all_idx, idx)) idx_mapper = utils.get_idx_mapper(idx) y_train = mat[idx_mapper(train_idx)] y_val = mat[idx_mapper(val_idx)] y_test = mat[idx_mapper(test_idx)] return utils.DataDict([("x", x_train), ("y", y_train), ("protein_id", train_idx)]), utils.DataDict([ ("x", x_val), ("y", y_val), ("protein_id", val_idx) ]), utils.DataDict([("x", x_test), ("y", y_test), ("protein_id", test_idx)])
def bundle(self): """Gets the bundle id of the workflow. .. note:: The bundle id is obtained from one of these sources (in this order): 1. The environment variables 2. The workflow information (info.plist) 3. The workflow settings file (settings.json) If not defined in any of these, it defaults to ````. :return: the bundle id of the workflow. :rtype: ``str``. """ if not self._bundle: if self.environment('workflow_bundleid'): self._bundle = decode(self.environment('workflow_bundleid')) elif self.info.get('bundleid'): self._bundle = decode(self.info.get('bundleid')) elif self.setting('bundleid'): self._bundle = decode(self.setting('bundleid')) else: self._bundle = '' return self._bundle
def get_new_mail(): results = [] mail = imaplib.IMAP4_SSL(SERVER) mail.login(EMAIL, PASSWORD) mail.select('inbox') status, data = mail.search(None, 'UNSEEN') # status, data = mail.search(None, 'UID 10:10000') # GMAIL OFFSETS UIDS WHEN QUERYING, ID 1 = SEARCH FOR 7, IF LOOKING LIKE N:*, search will always return the latest even if it's under N. Fix by putting really high value N:1000 mail_ids = [] for block in data: mail_ids += block.split() mail_ids.sort(key=lambda bite: bite.decode()) for i in mail_ids: status, data = mail.fetch(i, '(RFC822)') for response_part in data: if isinstance(response_part, tuple): message = email.message_from_bytes(response_part[1]) mail_author, mail_from = process_sender(message["from"]) mail_subject = message['subject'] mail_date = message['Date'] if message.is_multipart(): mail_content = '' for part in message.get_payload(): if part.get_content_type() == 'text/plain': continue if part.get_content_type() == 'text/html': content = part.get_payload() content = extract_text_from_html(content) mail_content += decode(content) else: part = message.get_payload() if part.get_content_type() == 'text/html': print("!!!simple mail") content = part.get_payload() content = extract_text_from_html(content) # content = decodestring(content) # content = content.decode("utf-8") # content = str(content) mail_content += decode(content) results.append({ "date": mail_date, "mail": mail_from, "author": mail_author, "subject": mail_subject, "body": mail_content, "id": i.decode("utf-8") }) mail.logout() return results
def OnRspQrySettlementInfo(self, pSettlementInfo, pRspInfo, nRequestID, bIsLast): try: print utils.decode(pSettlementInfo.Content) except: if pSettlementInfo == None: pass else: print pSettlementInfo.Content
def load_metric_list(): # if not os.path.exists(METRIC_LIST): try: fp = open(METRIC_LIST) ml = decode(fp.read()) fp.close() return ml except Exception, e: def handle_device(device, metric_group, unmonitored_list): if device not in unmonitored_list: metric_group.setdefault("instances", []) # if not metric_group.has_key('instances'): # metric_group['instances'] = [] metric_group["instances"].append({"device": device}) from platform_info import get_network_info ifs = get_network_info().keys() from block import get_disk_partition disk_parts = get_disk_partition() metric_conf = load_config(METRIC_CONFIG) # df = open(metric_conf['default_list']) df = open(DEFAULT_METRIC_LIST) default_list = decode(df.read()) df.close() new_list = default_list for metric_group in iter(new_list["metric_groups"]): name = metric_group["name"] if name == "NetModule": total = metric_conf["network"]["total"] if len(total): handle_device(total, metric_group, []) unmonitored_ifs = metric_conf["network"]["black_list"] for iname in ifs: handle_device(iname, metric_group, unmonitored_ifs) elif name == "DiskModule": unmonitored_parts = metric_conf["disk"]["black_list"] enable_partitions = int(metric_conf["disk"]["enable_partitions"]) for dname, dparts in disk_parts.iteritems(): handle_device(dname, metric_group, unmonitored_parts) if enable_partitions: for pname in dparts: handle_device(pname, metric_group, unmonitored_parts) fp = open(METRIC_LIST, "w") fp.write(encode(new_list, True)) fp.close() return new_list
def main(): with open('4.txt', 'r') as f: ip = f.read().split() keys = [max(range(256), key=lambda k: score(decode(i, k))) for i in ip] print( max([{ "ciphertext": ip[i], "key": keys[i], "decoded": decode(ip[i], keys[i]) } for i in range(len(keys))], key=lambda k: score(k["decoded"])))
def main(): translate() # decode logging.info('Decoding ...') utils.decode(src_test_lemma, src_test_lemma_unk, pred_test_lemma_unk, pred_test_lemma) # delemmatize vocab = pickle.load(open('data/lemmatized/vocab.p', 'rb')) logging.info('Delemmatizing ...') utils.delemmatize(pred_test_lemma, pred_test, vocab) logging.info('Done. Saved predictions to {}'.format(pred_test))
def django_log_add(user, msg, event_id): try: message = IRCMessage() message.event_id = event_id message.date = datetime.now() message.message = unicode(utils.decode(msg)) message.nick = unicode(utils.decode(user)) message.save() except UnicodeDecodeError: return False return True
def search(self, s, page=0, **kw): conn = self.pool.get_connection() q = conn.query_parse(s) #conn.spell_correct(s)) brains = conn.search(q, page*self.n, page*self.n+self.n) results = [ dict( id=x.id.replace('public','books'), title=decode(x.data['title'][0]), summary=decode(x.summarise('searchable_text', maxlen=240)) ) for x in brains ] return brains.matches_estimated, results
def build_efficientnet_backbone(cfg, shape): version = cfg.MODEL.EfficientNet.VERSION assert isinstance(version, int) and 0 <= version <= 7 w, d, s, p = params_dict["model-b%d" % version] # note: all models have drop connect rate = 0.2, really ? blocks_args = decode([ 'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25', 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25', 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25', 'r1_k3_s11_e6_i192_o320_se0.25' ]) global_params = GlobalParams(batch_norm_momentum=0.99, batch_norm_epsilon=1e-3, dropout_rate=p, drop_connect_rate=0.2, num_classes=1000, width_coefficient=w, depth_coefficient=d, depth_divisor=8, min_depth=None, image_size=s) model = EfficientNet(blocks_args, global_params, norm=cfg.MODEL.EfficientNet.NORM, model_version=version) if cfg.MODEL.WEIGHTS and os.path.exists(cfg.MODEL.WEIGHTS): state_dict = torch.load(cfg.MODEL.WEIGHTS, map_location="cpu") model.load_state_dict(state_dict, strict=False) model.freeze_at(cfg.MODEL.EfficientNet.FREEZE_AT) return model
def _colorize(img_paths_batch, out_dir, model, input_tensor, sess): img_l_batch = [] img_l_rs_batch = [] for img_path in img_paths_batch: img = cv2.imread(img_path) img_rs = cv2.resize(img, (_INPUT_SIZE, _INPUT_SIZE)) img_l = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_l = img_l[None, :, :, None] img_l_rs = cv2.cvtColor(img_rs, cv2.COLOR_BGR2GRAY) img_l_rs = img_l_rs[:, :, None] img_l = (img_l.astype(dtype=np.float32)) / 255. * 100 - 50 img_l_rs = (img_l_rs.astype(dtype=np.float32)) / 255.0 * 100 - 50 img_l_batch.append(img_l) img_l_rs_batch.append(img_l_rs) img_l_rs_batch = np.asarray(img_l_rs_batch) img_313_rs_batch = sess.run(model, feed_dict={input_tensor: img_l_rs_batch}) for i in xrange(len(img_paths_batch)): img_l = img_l_batch[i] img_rgb, _ = utils.decode(img_l, img_313_rs_batch[i:i + 1], _T) img_name = os.path.split(img_paths_batch[i])[1] io.imsave(os.path.join(out_dir, img_name), img_rgb)
def read_clean(data): assert isinstance(data, np.ndarray) if data.dtype.type is np.bytes_: data = utils.decode(data) if data.size == 1: data = data.flat[0] return data
def __res_server__(): # template (TARGET, func_name, paramter, content_id) template = (TARGET, "?", "?", "?") while True: try: request, request_id = sky_client.take(template, return_id=True, timeout=60000) if request is None: continue _, func_name, param = request param = utils.decode(param) log().info("REQUEST, func_name=%s, params=%s", func_name, param) if func_name in remote_call_book: function = remote_call_book[func_name] result = function(**param) log().info("Result=%s", str(result)) sky_client.write(tuple=(RECEIVER, request_id, utils.encode(result)), expire=3000) else: log().warn("invalid func_name:%s", func_name) except Exception as e: log().error("handle request error, exception:%s\n%s", e, traceback.format_exc())
def gen_license(target_path, nf_dict, nf_src, cp_src): target_license_path = os.path.join(target_path, 'NOTICE') makedirs(target_path) with open(nf_src, 'rt') as f: nf_dict["license_content"] = decode(f.read()) with open(cp_src, 'rt') as f: nf_dict["copyright"] = decode(f.read()) with open(target_license_path, 'at') as f: f.write("Software: {}\n\n".format(encode(nf_dict["software"]))) f.write("Copyright notice: \n{}\n".format( encode(nf_dict["copyright"]))) f.write("License: {}\n{}\n\n".format( encode(nf_dict["license_name"]), encode(nf_dict["license_content"])))
def detect_face(net, img, device, scale=1., conf_thresh=0.3): # set input x if scale != 1: img = cv2.resize(img, None, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR) x = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0) _, _, height, width = x.shape if device.type == 'cuda': x = x.to(device) # forward pass loc, conf, iou = net(x) # get bounding boxes from PriorBox layer bbox_scale = torch.Tensor([width, height, width, height]) priorbox = PriorBox(cfg, image_size=(height, width)) priors = priorbox.forward() boxes = decode(loc.squeeze(0).data.cpu(), priors.data, cfg['variance']) boxes = boxes[:, :4] # omit landmarks boxes = boxes * bbox_scale / scale boxes = boxes.cpu().numpy() # get scores cls_scores = conf.squeeze(0).data.cpu().numpy()[:, 1] iou_scores = iou.squeeze(0).data.cpu().numpy()[:, 0] # clamp here for the compatibility for ONNX _idx = np.where(iou_scores < 0.) iou_scores[_idx] = 0. _idx = np.where(iou_scores > 1.) iou_scores[_idx] = 1. scores = np.sqrt(cls_scores * iou_scores) dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False) # ignore low scores keep_ind = np.where(dets[:, -1] > conf_thresh)[0] dets = dets[keep_ind, :] return dets
def challenge_10(): print(f"\n-- Challenge 10 - Implement CBC mode --") data_p = bo.pad(16, b"This is a secret message! TOP SECRET") key = b"PASSWORDPASSWORD" iv = b"1122334455667788" ECB_1 = ocl.AESECB(key) CBC_1 = ocl.AESCBC(iv, key) ECB_ciphertext = ECB_1.encrypt(data_p) ECB_plaintext = bo.depad(ECB_1.decrypt(ECB_ciphertext)) CBC_ciphertext = CBC_1.encrypt(data_p) CBC_plaintext = bo.depad(CBC_1.decrypt(CBC_ciphertext)) print(f"Padded Secret Message : {data_p}") print(f"Key : {key}") print(f"ECB encrypted message : {ECB_ciphertext}") print(f"ECB decrypted message : {ECB_plaintext}") print(f"iv : {iv}") print(f"CBC encrypted message : {CBC_ciphertext}") print(f"CBC decrypted message : {CBC_plaintext}") print("----- Part 2 ------") data = b64decode(ut.import_data("data_S2C10.txt")) key = b"YELLOW SUBMARINE" iv = bytes([0]) * 16 CBC_2 = ocl.AESCBC(iv, key) decrypted = decode(bo.depad(CBC_2.decrypt(data))) print(f"CBC decrypted message : \n{decrypted[0:90]}...")
def sender_recv(sender_socket): while True: try: raw_data, addr = sender_socket.recvfrom(65536) except BlockingIOError: continue return utils.decode(raw_data)
def run(args): tmp_file = args.path + '/' + args.file + '/' + args.job + '.' + args.tagOwner + '.' real_path = args.path + "/" + args.file + "/" + args.tagOwner + "." + args.file + ".data" if args.n != '': return up(int(args.n), tmp_file, args) try: meta = get_meta(args.path, args.tagOwner, args.file) # meta_json = utils.decode(args.meta) meta_json = utils.decode(meta) meta = utils.from_json(meta_json) except: return utils.log('err_parser_meta', args.file, 'error') if not verify(meta, real_path): return utils.log('err_verify_data', args.file, 'error') num = int((meta['size'] - 1) / 3000000) col_names = list(meta['colName'].split(',')) if not tmp(real_path, tmp_file, args.salt, col_names, args.encryptedColumn, args.unencryptedColumn): return utils.log('err_generate_encrypt', args.file, 'error') up(num, tmp_file, args) utils.log('info_impact', args.file)
def _reconstruct_single_img(img_name, jbu=False): img_path = os.path.join(IMG_DIR, img_name) img_id = os.path.splitext(img_name)[0] img_rgb = io.imread(img_path) if len(img_rgb.shape) != 3 or img_rgb.shape[2] != 3: return img_rgb = _resize(img_rgb, 224) img_lab = color.rgb2lab(img_rgb) img_l = img_lab[None, :, :, 0:1] # img_rgb_rs = cv2.resize(img_rgb, (_INPUT_SIZE, _INPUT_SIZE)) # img_lab_rs = color.rgb2lab(img_rgb_rs) # img_ab_rs = img_lab_rs[None, :, :, 1:] img_ab = img_lab[None, :, :, 1:] img_ab_ss = transform.downscale_local_mean(img_ab, (1, 4, 4, 1)) gt_313 = utils._nnencode(img_ab_ss) img_l = (img_l.astype(dtype=np.float32)) / 50. - 1 img_dec, _ = utils.decode(img_l, gt_313, T, sfm=False, jbu=jbu, jbu_k=_JBU_K) io.imsave( os.path.join(_OUTPUT_DIR, img_id + '{}.jpg'.format('_jbu' if jbu else '')), img_dec)
def _colorize_single_img(img_name, model, input_tensor, sess, jbu=False): img_path = os.path.join(IMG_DIR, img_name) img = cv2.imread(img_path) img = _resize(img) img_rs = cv2.resize(img, (_INPUT_SIZE, _INPUT_SIZE)) if len(img.shape) == 3: img_l = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_l = img_l[None, :, :, None] img_l_rs = cv2.cvtColor(img_rs, cv2.COLOR_BGR2GRAY) img_l_rs = img_l_rs[None, :, :, None] else: img_l = img[None, :, :, None] img_l_rs = img_rs[None, :, :, None] # img = _resize(img) # img_rgb_sk = io.imread(os.path.join("/srv/glusterfs/xieya/data/imagenet1k_uncompressed/val", img_name)) # if len(img_rgb_sk.shape) < 3 or img_rgb_sk.shape[2] != 3: # return # img_rgb_sk = cv2.resize(img_rgb_sk, (_INPUT_SIZE, _INPUT_SIZE)) # img_lab = color.rgb2lab(img_rgb_sk) # img_lab_rs = transform.downscale_local_mean(img_lab, (4, 4, 1)) # img_lab_rs[:, :, 0] = 50 # img_rgb_rs = color.lab2rgb(img_lab_rs) # io.imsave(os.path.join(_OUTPUT_DIR, "test_" + img_name), img_rgb_rs) img_l = (img_l.astype(dtype=np.float32)) / 255.0 * 2 - 1 img_l_rs = (img_l_rs.astype(dtype=np.float32)) / 255.0 * 2 - 1 img_313_rs = sess.run(model, feed_dict={input_tensor: img_l_rs}) # img_l_rs_rs = np.zeros((1, 56, 56, 1)) img_rgb, _ = utils.decode(img_l_rs, img_313_rs, T, jbu=jbu, jbu_k=_JBU_K) io.imsave(os.path.join(_OUTPUT_DIR, os.path.split(img_name)[1]), img_rgb)
def encrypt(self, user_bytes): user_string = decode(user_bytes) clean_user_string = user_string.replace(";", '";"').replace("=", '"="') byte_string = b"".join( [self.prefix, encode(clean_user_string), self.suffix]) data = bo.pad(16, byte_string) return AESCBC(self.iv, self.key).encrypt(data)
def OnRtnOrder(self, pOrder): # 报单提交(可提交,但是有错)后返回 self.order_status_dict[(pOrder.InstrumentID, pOrder.LimitPrice)] = pOrder.OrderStatus self.logger.info('-------------------------------------') self.logger.info('合约代码: %s', pOrder.InstrumentID) self.logger.info('价格: %s', pOrder.LimitPrice) self.logger.info('买卖方向: %s', pOrder.Direction) self.logger.info('数量: %s', pOrder.VolumeTotalOriginal) self.logger.info('报单提交状态: %s', pOrder.OrderSubmitStatus) """ OSS_InsertSubmitted = '0' #已经提交 OSS_CancelSubmitted = '1' #撤单已经提交 OSS_ModifySubmitted = '2' #修改已经提交 OSS_Accepted = '3' #已经接受 OSS_InsertRejected = '4' #报单已经被拒绝 OSS_CancelRejected = '5' #撤单已经被拒绝 """ self.logger.info('报单状态: %s', pOrder.OrderStatus) """ OST_AllTraded = '0' #全部成交 OST_PartTradedQueueing = '1' #部分成交还在队列中 OST_PartTradedNotQueueing = '2' #部分成交不在队列中 OST_NoTradeQueueing = '3' #未成交还在队列中 OST_NoTradeNotQueueing = '4' #未成交不在队列中 OST_Canceled = '5' #撤单 """ self.logger.info('报单日期: %s', pOrder.InsertDate) self.logger.info('委托时间: %s', pOrder.InsertTime) self.logger.info('状态信息: %s', utils.decode(pOrder.StatusMsg)) self.logger.info('报单价格条件: %s', pOrder.OrderPriceType)
def hash_transactions(self): ''' get hashed transactions by merkle tree ''' tx_byte_list = [utils.serialize(tx) for tx in self._transaction] merkle_tree = MerkleTree(tx_byte_list) return utils.decode(binascii.hexlify(merkle_tree.root_hash))
def get_task(chat_id, user_id, username, task_id): task = db.hget(f'/tasks/chat_id/{chat_id}', task_id) if task is None: return 'No task with such id', None task = decode(task) response = f'''Task id: {task_id} Title: {task["title"]} Status: {task["status"]} Created: {readable_time(task["created"])} Modified: {readable_time(task["modified"])} Assignee: {task["assignee"]} Assignee id: {task["assignee_id"]} Description: {task["description"]}''' keyboard = types.InlineKeyboardMarkup(row_width=3) btns = [] for status in Status.ALL: btns.append( types.InlineKeyboardButton( text=status, # cmd, status, user_id, username, task_id=data callback_data=f"set_status_task:{status}:{user_id}:" f"{username}:{task_id}")) keyboard.add(*btns) return response, keyboard
def init(self): global mysql_active if mysql_active: try: logging.debug("Database: MySQL: Started") self.engine = create_engine( "mysql://" + self.config['database.mysql']['username'] + ":" + utils.decode(self.config['general']['install_id'], self.config['database.mysql']['password']) + "@" + self.config['database.mysql']['host'] + ":" + str(self.config['database.mysql']['port']) + "/" + self.config['database.mysql']['database'] + "?charset=utf8", pool_recycle=3600, echo=False) logging.info("Database: MySQL: " + self.config['database.mysql']['database'] + " (" + self.config['database.mysql']['host'] + ":" + str(self.config['database.mysql']['port']) + ")") except Exception, e: mysql_active = False logging.error("Database: MySQL: Failed (" + self.config['database.mysql']['database'] + " [" + self.config['database.mysql']['host'] + ":" + str(self.config['database.mysql']['port']) + "])") logging.error("Database: MySQL: Failed (" + str(e) + ")") logging.warning("Database: Switching to SQLite Engine")
def save_to_disk(image_dir, dg, spa, p, png=False, max_samples_per_category=50000): if png: from PIL import Image encoder = sampling_encoder(p) decoder = decode(encoder) print('decoder:', decoder) train_counter = {k: 0 for k in decoder.values()} counter = {k: 0 for k in decoder.values()} for v in decoder.values(): directory = os.path.join(image_dir, v) if not os.path.exists(directory): os.makedirs(directory) for x, y in dg: yname = decoder[int(np.argmax(y))] if counter[yname] > max_samples_per_category: continue filename = '{:06}'.format(counter[yname]) fullname = os.path.join(image_dir, yname, filename) if png: im = Image.fromarray( np.squeeze((x / np.max(x)) * 255).astype('uint8').T) im.save(fullname + '.png') else: np.save(fullname + '.npy', np.squeeze(x)) counter[yname] += 1
def ex_json(post_data): post_data=json.loads(post_data) for p_name,p_value in post_data.items(): p_id = get_md5(self.data["host"] + self.path + decode(p_name) + self.data["method"]) p_state=self.get_Ostate(str(p_value)) p_type="post" yield (p_id, p_state, p_type, p_name)
def __init__(self,data): self.parameter={} self.data=data self.uri = urllib.unquote(data["uri"].encode("utf-8")) self.path = decode(get_path(self.uri)) self.payload = get_payload(self.uri).strip("?") self.get_parameter()
def get_Ostate(self,s): """ 字母 =》'A' 数字 =》'N' 中文 =》'C' 特殊字符不变 :param s: :return: """ A=self.get_num('A') N=self.get_num("N") C=self.get_num("C") state=[] if not isinstance(s,unicode): s=decode(str(s)) if len(s)==0: #空字符串取0 state.append([0]) return state #s=str(s).decode("utf-8","ignore") for i in s: if i.encode("utf-8").isalpha(): state.append([A]) elif i.isdigit(): state.append([N]) elif is_chinese(i): state.append([C]) else: state.append([self.get_num(i)]) return state
def batch_predict( model, data_loader, rel_vocab, word_pad_index, word_bos_index, word_eos_index, ): model.eval() arcs, rels = [], [] for inputs in data_loader(): if args.encoding_model.startswith("ernie") or args.encoding_model == "lstm-pe": words = inputs[0] words, feats = flat_words(words) s_arc, s_rel, words = model(words, feats) else: words, feats = inputs s_arc, s_rel, words = model(words, feats) mask = paddle.logical_and( paddle.logical_and(words != word_pad_index, words != word_bos_index), words != word_eos_index, ) lens = paddle.sum(paddle.cast(mask, "int32"), axis=-1) arc_preds, rel_preds = decode(s_arc, s_rel, mask) arcs.extend(paddle.split(paddle.masked_select(arc_preds, mask), lens.numpy().tolist())) rels.extend(paddle.split(paddle.masked_select(rel_preds, mask), lens.numpy().tolist())) arcs = [[str(s) for s in seq.numpy().tolist()] for seq in arcs] rels = [rel_vocab.to_tokens(seq.numpy().tolist()) for seq in rels] return arcs, rels
def checking_changing_status(cmd, message, status): """ Before starting tests method create only one task with key 1. """ key = db.incr(f'/tasks/chat_id/{message.chat.id}/last_task_id') task = { 'title': 'TestTitle', 'description': 'TeskDesc', 'created': 0, 'modified': 0, 'status': Status.DO if status is Status.TODO else Status.TODO, 'assignee': '', 'assignee_id': '', } db.hset(f'/tasks/chat_id/{message.chat.id}', key, json.dumps(task).encode()) if status is Status.DO: do(message) elif status is Status.TODO: todo(message) elif status is Status.DONE: done(message) else: assert status in Status.ALL assert handlers.bot.reply_to.called task = decode(db.hget(f'/tasks/chat_id/{message.chat.id}', key)) assert task['status'] == status
def forward(self, face_conf, face_locdata): priors = pyramidAnchors(640) face_confdata_0, _ = torch.max(face_conf[:, :, 0:3], dim=2, keepdim=True) face_confdata_1 = face_conf[:, :, 3:4] face_confdata = F.softmax(torch.cat((face_confdata_0, face_confdata_1), dim=2), dim=2) # [n, prior_num, 2] conf_pred = face_confdata.transpose(2, 1) num = face_conf.size(0) output = torch.zeros(num, self.top_k, 5) prs = torch.Tensor(priors[0]).to(self.device) for i in range(1, len(priors)): prs = torch.cat((prs, torch.Tensor(priors[i]).to(self.device)), 0) # [prior_num, 4] for i in range(num): conf_scores = conf_pred[i].clone() c_mask = conf_scores[0].gt(self.confidence_thred) scores = conf_scores[0][c_mask] if scores.dim() == 0: continue decoded_boxes = decode(face_locdata[i], prs) l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes) boxes = decoded_boxes[l_mask].view(-1, 4) ids, count = nms(boxes, scores, self.nms_thred, self.top_k) output[i, :count] = \ torch.cat((scores[ids[:count]].unsqueeze(1), boxes[ids[:count]]), 1) return output
def tee(process, filter): """Read lines from process.stdout and echo them to sys.stdout. Returns a list of lines read. Lines are not newline terminated. The 'filter' is a callable which is invoked for every line, receiving the line as argument. If the filter returns True, the line is echoed to sys.stdout. """ global _tee2_exit_flag lines = [] while True: line = process.stdout.readline() if line: if sys.version_info[0] >= 3: line = decode(line) stripped_line = line.rstrip() if filter(stripped_line): sys.stdout.write(line) lines.append(stripped_line) elif process.poll() is not None: _tee2_exit_flag = True process.stdout.close() break return lines
def get_credentials(): config = ConfigParser.ConfigParser() config.read([utils.get_config_file()]) if not config.has_option('user', 'username'): return None, None if not config.has_option('user', 'password'): return None, None return config.get('user', 'username'), utils.decode(config.get('user', 'password'))
def load_tabs_with_paths(self, paths): for path in paths: tab = TabVfs(self) err = tab.init(utils.decode(path)) if err: tab.init(os.path.abspath(u'.')) self.tabs.append(tab) self.act_tab = self.tabs[0]
def change_encoding(self, text, current_encoding): try: decoded_text, encoding = decode(text, self._encodings, current_encoding) except Exception: traceback.print_exc() self.log.error("cannot decode subtitles'") raise DecodeError() return decoded_text, encoding
def change_encoding(self, text, current_encoding): try: decoded_text, encoding = decode(text, self._encodings, current_encoding) except Exception: traceback.print_exc() print '[SubsLoader] cannot decode subtitles' raise DecodeError() return decoded_text, encoding
def read_result(self, worker=None): """Reads worker's stdout and expects a result message. Returns result.""" if worker is None: worker = self._worker msg = self.read_message(worker=worker) assert_equal("result", msg["type"]) assert_true("result" in msg) result = decode(msg["result"]) return result
def dehydrate(self, bundle): #phone number is encoded so decode it in response try: bundle.data['phone_number'] = decode(PHONE_KEY, str(bundle.data['phone_number'])) except: pass return bundle
def getoutput(self, *args, **kw): rc, output = self.popen(*args, **dict(kw, stdout=subprocess.PIPE)) if rc == 0 and output is not None: if sys.version_info[0] >= 3: output = decode(output) if output.strip(): # Return first line only return output.split('\n', 1)[0] return ''
def read_from_temp_file(self, log_file): """ Read data from log of process. @param log_file str, Path to file with log @return t.i.d.Deferred, str """ log_file.seek(0) buff = log_file.read() data = decode(buff) return data
def get_fileinfo_dict(path, filename, filevalues): """return a dict with file information""" res = {} res['filename'] = filename typ = filevalues[FT_TYPE] res['type_chr'] = FILETYPES[typ][0] if typ in (FTYPE_CDEV, FTYPE_BDEV): # HACK: it's too time consuming to calculate all files' rdevs # in a directory, so we just calculate needed ones here # at show time maj_rdev, min_rdev = get_rdev(os.path.join(path, filename)) res['size'] = 0 res['maj_rdev'] = maj_rdev res['min_rdev'] = min_rdev res['dev'] = 1 else: size = filevalues[FT_SIZE] if size >= 1000000000L: size = str(size/(1024*1024)) + 'M' elif size >= 10000000L: size = str(size/1024) + 'K' else: size = str(size) res['size'] = size res['maj_rdev'] = 0 res['min_rdev'] = 0 res['dev'] = 0 res['perms'] = perms2str(filevalues[1]) res['owner'] = filevalues[FT_OWNER] res['group'] = filevalues[FT_GROUP] if -15552000 < (time.time() - filevalues[FT_MTIME]) < 15552000: # filedate < 6 months from now, past or future mtime = time.strftime('%a %b %d %H:%M', time.localtime(filevalues[FT_MTIME])) mtime2 = time.strftime('%d %b %H:%M', time.localtime(filevalues[FT_MTIME])) else: mtime = time.strftime('%a %d %b %Y', time.localtime(filevalues[FT_MTIME])) mtime2 = time.strftime('%d %b %Y', time.localtime(filevalues[FT_MTIME])) res['mtime'] = decode(mtime) res['mtime2'] = decode(mtime2) return res
def get_dir(path, show_dotfiles=1): """return a dict whose elements are formed by file name as key and a (filetype, perms, owner, group, size, mtime) tuple as value""" # bug in python: os.path.normpath(u'/') returns str instead of unicode, # so convert to unicode anyway path = decode(os.path.normpath(path)) files_dict = {} if path != os.sep: files_dict[os.pardir] = get_fileinfo(os.path.dirname(path), 1) files_list = os.listdir(path) if not show_dotfiles: files_list = [f for f in files_list if f[0] != '.'] for f in files_list: if not isinstance(f, unicode): newf = decode(f) if ask_convert_invalid_encoding_filename(newf): convert_filename_encoding(path, f, newf) f = newf files_dict[f] = get_fileinfo(os.path.join(path, f)) return len(files_dict), files_dict
def _process_path(self, subfile, current_encoding=None) : try: text = load(subfile) except (URLError, HTTPError, IOError) as e: print '[SubsLoader] "{0}" ,cannot load: {1}'.format(subfile, e) raise LoadError(subfile) try: decoded_list, encoding = decode(text, self._encodings, current_encoding) except Exception as e: print '[SubsLoader] "{0}" cannot decode: {1}'.format(subfile, e) raise DecodeError(subfile) return decoded_list, encoding
def marshal(self): ''' Marshals the accessible relation to an element tree. :return: The marshaled accessible relation :rtype: xml.etree.Element ''' element = etree.Element("relation") etree.SubElement(element, "type").text = decode(self.type) targets = etree.SubElement(element, "targets") for target in self._targets: target.marshal(etree.SubElement(targets, "target")) return element
def wrapper(*args, **kwargs): func(*args, **kwargs) func_name = func.func_name param = utils.get_func_arguments(func, args, kwargs) log().info("CALL %s, param = %s", func_name, param) cid = sky_client.write(tuple=(TARGET, func_name, utils.encode(param)), fill_content_id=True) result = sky_client.take(template=(RECEIVER, cid, "?")) if result is None or len(result) < 3: log().warn("request failed, cid=%s", cid) log().warn("result=%s", str(result)) return return utils.decode(result[RESULT_IDX_CONTENT])
def run(self): self.connect() self.connected = True while self.connected: try: data = utils.decode(self.socket.recv(2048)) self.ibuffer += data while "\r\n" in self.ibuffer: reload_handlers() reload_plugins() reload_config() line, self.ibuffer = self.ibuffer.split("\r\n", 1) line = line.strip() try: func = globals()["handle_"+utils.parseArgs(line).type] except KeyError: log.warn("No handler for %s found", utils.parseArgs(line).type) else: func(self, utils.parseArgs(line)) log.debug("(%s) -> %s", self.netname, line) self.rx += len(line) self.rxmsgs += 1 args = line.split(" ") if not args: return if args[1] == "PONG": self.pingtime = int(time.time() - self.lastping) self.lastping = time.time() if self.pingtime - self.pingfreq > self.pingwarn: log.warn("(%s) Lag: %s seconds", self.netname, round(self.pingtime - self.pingfreq, 3)) except KeyboardInterrupt: self.pingTimer.stop() self.schedulePing() # writes nicks and channels to files, # will be moved to own function eventually self.disconnect("CTRL-C at console.")
def authenticate_user_token(blob): "authenticate a user from token" if not blob: return None token_store = UserTokenStore() raw_token = utils.decode(blob) token = token_store.find(raw_token) user = None if token: user = SecurityService.userid_to_user(token.user.name) if user.activation_code: user = None return user
def get_interface_description(cmd): """Returns the XML description for the GRASS cmd (force text encoding to "utf-8"). The DTD must be located in $GISBASE/gui/xml/grass-interface.dtd, otherwise the parser will not succeed. :param cmd: command (name of GRASS module) """ try: p = Popen([cmd, '--interface-description'], stdout=PIPE, stderr=PIPE) cmdout, cmderr = p.communicate() # TODO: do it better (?) if not cmdout and sys.platform == 'win32': # we in fact expect pure module name (without extension) # so, lets remove extension if cmd.endswith('.py'): cmd = os.path.splitext(cmd)[0] if cmd == 'd.rast3d': sys.path.insert(0, os.path.join(os.getenv('GISBASE'), 'etc', 'gui', 'scripts')) p = Popen([sys.executable, get_real_command(cmd), '--interface-description'], stdout=PIPE, stderr=PIPE) cmdout, cmderr = p.communicate() if cmd == 'd.rast3d': del sys.path[0] # remove gui/scripts from the path if p.returncode != 0: raise ScriptError, _("Unable to fetch interface description for command '%(cmd)s'." "\n\nDetails: %(det)s") % {'cmd': cmd, 'det': decode(cmderr)} except OSError as e: raise ScriptError, _("Unable to fetch interface description for command '%(cmd)s'." "\n\nDetails: %(det)s") % {'cmd': cmd, 'det': e} desc = cmdout.replace('grass-interface.dtd', os.path.join(os.getenv('GISBASE'), 'gui', 'xml', 'grass-interface.dtd')) return convert_xml_to_utf8(desc)
def read(self): if not self.handshaken: data = self.client.recv(1024) self.do_handshake(data) else: self.data += self.client.recv(1024) res = decode(self.data) if res['remaining'] == 0: self.data = '' if res['data']: self.onmessage(res['data']) if res['opcode'] == Opcode.CLOSE: self.close()
def livexml1(): code = request.args.get('id') lon_fixed = request.args.get('l') lines = list() lines.append('<?xml version="1.0" encoding="UTF-8"?>') lines.append('<markers>') address = decode(code) polyline = get_polyline(address) error = 1 if polyline is '' else 0 lines.append('<m e="{0}" i="{1}" r="{2}"/>' .format(error, code, polyline)) lines.append('</markers>') xml = '\n'.join(lines) return Response(xml, mimetype='text/xml')
def _process_path(self, subfile, current_encoding=None) : filename = os.path.basename(subfile) size = getFileSize(subfile) if size and size > SUBTITLES_FILE_MAX_SIZE: self.log.error("<%s> not supported subtitles size ({%d}KB > {%d}KB)!", filename, size / 1024, SUBTITLES_FILE_MAX_SIZE / 1024) raise LoadError('"%s" - not supported subtitles size: "%dKB"' % (toString(os.path.basename(subfile)), size / 1024)) try: text = load(subfile) except (URLError, HTTPError, IOError) as e: self.log.error("<%s> %s", filename, str(e)) raise LoadError(subfile) try: decoded_text, encoding = decode(text, self._encodings, current_encoding) except Exception as e: self.log.error("<%s> %s", filename, "cannot decode") raise DecodeError(subfile) return decoded_text, encoding
def args(self): """Gets the arguments passed to the workflow. .. note:: The arguments are split using whitespace as delimiter for each argument; the method also handles the workflow actions when the workflow has been marked as ``actionable``. Every action can add new items to the workflow, in which case, the execution of the action workflow is stopped and the items in the action are displayed. .. seealso:: :class:`WorkflowActions` :class:`WorkflowSettings` :return: a list of arguments. :rtype: :class:`list`. """ feedback = False args = [decode(arg) for arg in sys.argv[1:]] if len(args) == 1: args = args[0].split(' ') if len(args) and self.setting('actionable'): index = args.index('>') if '>' in args else -1 if index > -1: parameters = args[index + 1:] if len(parameters) > 0: command = parameters.pop(0) if command in self.actions: feedback = self.actions.get(command)(*parameters) else: feedback = self.actions.defaults(command) else: feedback = self.actions.defaults() if feedback: self.feedback() sys.exit(0) return args
def environment(self, variable): """Gets information about the environment (Alfred environment variables) .. note:: The table below describes the environment variables that are collected by the workflow. +--------------------------+-------------------+ | Alfred Env Variable | Workflow Variable | +==========================+===================+ | alfred_version | version | +--------------------------+-------------------+ | alfred_version_build | version_build | +--------------------------+-------------------+ | alfred_workflow_bundleid | workflow_bundleid | +--------------------------+-------------------+ | alfred_workflow_uid | workflow_uid | +--------------------------+-------------------+ | alfred_workflow_name | workflow_name | +--------------------------+-------------------+ | alfred_workflow_cache | workflow_cache | +--------------------------+-------------------+ | alfred_workflow_data | workflow_data | +--------------------------+-------------------+ .. seealso:: `Alfred Environment Variables <https://www.alfredapp.com/help/workflows/script-environment-variables/>`_ :param variable: the variable we are looking for. :type variable: ``str``. :return: the value of the variable. :rtype: ``int`` for ``version_build``; ``str`` for the rest of the variables. """ if not self._environment: if not sys.stdout.isatty(): self._environment = { 'version': decode(os.getenv('alfred_version')), 'version_build': int(os.getenv('alfred_version_build')), 'workflow_bundleid': decode(os.getenv('alfred_workflow_bundleid')), 'workflow_uid': decode(os.getenv('alfred_workflow_uid')), 'workflow_name': decode(os.getenv('alfred_workflow_name')), 'workflow_cache': decode(os.getenv('alfred_workflow_cache')), 'workflow_data': decode(os.getenv('alfred_workflow_data')) } else: self._environment = {} return self._environment[variable] if variable in self._environment else None
def tee2(process, filter): """Read lines from process.stderr and echo them to sys.stderr. The 'filter' is a callable which is invoked for every line, receiving the line as argument. If the filter returns True, the line is echoed to sys.stderr. """ global _tee2_exit_flag _tee2_exit_flag = False while True: line = process.stderr.readline() if line: if sys.version_info[0] >= 3: line = decode(line) stripped_line = line.rstrip() if filter(stripped_line): sys.stderr.write(line) elif _tee2_exit_flag: process.stderr.close() break
def addtoindex(self, page, soup): if (self.isindexed(page)): return False try: name = re.search( '^(.*) - MyAnimeList\.net$', str(soup('title')[0].contents[0]) ).group(1) member_favorites = str(soup('span', text='Member Favorites:')[0].findParent().findParent().contents[1]) except: print "Page parsing error." return False print "People Info Crawler. Indexing: %s (%s)" % (page, utils.decode(name)) try: given_name = str(soup('span', text='Given name:')[0].findParent().findParent().contents[1]) family_name = str(soup('span', text='Family name:')[0].findParent().findParent().contents[7]) except: given_name = '' family_name = '' try: birthday = str(soup('span', text='Birthday:')[0].findParent().findParent().contents[1]) except: birthday = '' try: more = ' '.join( map( str, soup('span', text='More:')[0].findParent('td').contents[15:] ) ) except: more = '' self.insertrow( (str(page).decode('utf-8', 'ignore'), str(name).decode('utf-8', 'ignore'), str(member_favorites).decode('utf-8', 'ignore'), str(given_name).decode('utf-8', 'ignore'), str(family_name).decode('utf-8', 'ignore'), str(birthday).decode('utf-8', 'ignore'), str(more).decode('utf-8', 'ignore')) )
def return_json(self, login=False): """ creating json response that mirrors that of django tastypie for custom login method * includes phone number if present """ profile_data = serializers.serialize('json',[self,self.user]) p_json = json.loads(profile_data) profile_json = p_json[0]['fields'] user_json = p_json[1]['fields'] try: del user_json['is_active'] del user_json['is_superuser'] del user_json['is_staff'] del user_json['groups'] del user_json['user_permissions'] del user_json['password'] except KeyError: pass profile_json['user'] = user_json profile_json['my_friends'] = self.friends profile_json['friend_requests'] = self.friend_requests profile_json['my_challenges'] = self.my_challenges profile_json['received_challenges'] = self.received_challenges profile_json['api_key'] = self.user.api_key.key if self.phone_number: profile_json['phone_number'] = decode(PHONE_KEY, str(profile_json['phone_number'])) if login: profile_json['code'] = 1 profile_json['message'] = 'Login was successful.' return profile_json