def wrapper(*args, **args2): t0 = time.time() logger.info(u"调用函数:{%s.%s}" % (func.__module__, func.__name__)) back = func(*args, **args2) logger.info("调用函数:{%s.%s},耗时:%.3fs" % (func.__module__, func.__name__, time.time() - t0)) return back
def write_copy_numbers_to_cnv(cnv_dict, base_xml_name, output): logger.info("Saving copy numbers to cnv file") with open( f"{output}/{base_xml_name}/{base_xml_name}.copynumber.csv", "w", ) as csvfile: csv_writer = csv.writer(csvfile, delimiter=",") csv_writer.writerow( [ "sample_id", "gene", "copy_number", "status", "attributes", "chromosome", "start_position", "end_position", "interpretation", ] ) for cnv in cnv_dict["CopyNumbers"]: csv_writer.writerow( [ cnv["sample_id"], cnv["gene"], cnv["copy_number"], cnv["status"], cnv["attributes"], cnv["chromosome"], cnv["start_position"], cnv["end_position"], cnv["interpretation"], ] )
def eval_epoch(val_loader, model, model_fn, epoch): logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>') am_dict = {} with torch.no_grad(): model.eval() start_epoch = time.time() for i, batch in enumerate(val_loader): ##### prepare input and forward loss, preds, visual_dict, meter_dict = model_fn( batch, model, epoch) ##### meter_dict for k, v in meter_dict.items(): if k not in am_dict.keys(): am_dict[k] = utils.AverageMeter() am_dict[k].update(v[0], v[1]) ##### print sys.stdout.write("\riter: {}/{} loss: {:.4f}({:.4f})".format( i + 1, len(val_loader), am_dict['loss'].val, am_dict['loss'].avg)) if (i == len(val_loader) - 1): print() logger.info("epoch: {}/{}, val loss: {:.4f}, time: {}s".format( epoch, cfg.epochs, am_dict['loss'].avg, time.time() - start_epoch)) for k in am_dict.keys(): if k in visual_dict.keys(): writer.add_scalar(k + '_eval', am_dict[k].avg, epoch)
def http_check(self, type='POST', media_type='application/json', data=None, header={}, timeout=1000): logger.info("-------BaseWS Dubbo服务监控-------") success = 0 fail = 0 result_flag = False time_cost = [] times = 0 for i in xrange(SAMPLING_COUNT): #GET请求,直接发送过去 if type == 'GET': http_result = httpUtil.do_get(url=WS_URL,timeout=timeout) result_parser = lambda x: x.find(SUCCESS_FLAG) >= 0 result_flag = result_parser(http_result[0]) time_cost.append(http_result[1]) # logger.info('result=%s, result_flag=%s ,time=%s' % (http_result[0], result_flag, http_result[1])) times += 1 if result_flag: success += 1 else: fail += 1 if not result_flag: analyzeLog = analyzeUtil.analyze(time_cost, success, fail) if fail > MAX_FAIL: analyzeLog = analyzeUtil.analyze2html(time_cost, success, fail) msg = CONTENT % {'failTimes': fail, 'times': MAX_FAIL, 'currentTime': dateUtil.getCurrentTime(), 'analyzeLog': analyzeLog} logger.info('send_email,message=' + msg) mailUtil.send_email(msg, SUB, TO_LIST)
def event_LOCATION(self, msg): # Need log to record location information of users openid = msg['FromUserName'] Latitude = msg['Latitude'] Longitude = msg['Longitude'] #print Latitude,Longitude user = self.db.get_openid(openid) if not user: user = self.get_subscribed_user(openid) #insert info to mysql self.db.insert_item(user) if user: # self.db.execute("update user set latitude=%s,longitude = %s" # "where openid = %s" # , Latitude, Longitude, openid) valueDict = {} valueDict['latitude'] = Latitude valueDict['longitude'] = Longitude self.db.update_item(openid, valueDict) logger.info( "update location of user, openid = " + openid + ", latitude = " + Latitude + ", Longitude = " + Longitude) else: logger.error("get user from weixin error, openid = " + openid)
def get_dataset(self, path: str, fields=Fields, separator=' '): logger.info('loading dataset from {}'.format(path)) st_dataset = CustomSequenceTaggingDataset(path, fields=fields, separator=separator) logger.info('successed loading dataset') return st_dataset
def save(self, path=None, type="evidence"): path = path if path else self.save_path if not os.path.isdir(path): os.mkdir(path) model_path = os.path.join(path, '%s_model.pkl' % type) torch.save(self.state_dict(), model_path) logger.info('saved model to {}'.format(model_path))
def __init__(self, args): super(BiLstmCrf, self).__init__(args) self.args = args self.hidden_dim = 300 self.tag_num = args.tag_num self.batch_size = args.batch_size self.bidirectional = True self.num_layers = args.num_layers self.pad_index = args.pad_index self.dropout = args.dropout self.save_path = args.save_path vocabulary_size = args.vocabulary_size embedding_dimension = args.embedding_dim self.embedding = nn.Embedding(vocabulary_size, embedding_dimension).to(DEVICE) if args.static: logger.info('logging word vectors from {}/{}'.format( args.pretrained_path, args.pretrained_name)) vectors = Vectors(args.pretrained_name, args.pretrained_path).vectors self.embedding = self.embedding.from_pretrained( vectors, freeze=not args.non_static).to(DEVICE) self.lstm = nn.LSTM(embedding_dimension, self.hidden_dim // 2, bidirectional=self.bidirectional, num_layers=self.num_layers, dropout=self.dropout).to(DEVICE) self.hidden2label = nn.Linear(self.hidden_dim, self.tag_num).to(DEVICE) self.crflayer = CRF(self.tag_num).to(DEVICE)
def init(): global result_dir result_dir = os.path.join( cfg.exp_path, 'result', 'epoch{}_nmst{}_scoret{}_npointt{}'.format(cfg.test_epoch, cfg.TEST_NMS_THRESH, cfg.TEST_SCORE_THRESH, cfg.TEST_NPOINT_THRESH), cfg.split) backup_dir = os.path.join(result_dir, 'backup_files') os.makedirs(backup_dir, exist_ok=True) os.makedirs(os.path.join(result_dir, 'predicted_masks'), exist_ok=True) os.system('cp test.py {}'.format(backup_dir)) os.system('cp {} {}'.format(cfg.model_dir, backup_dir)) os.system('cp {} {}'.format(cfg.dataset_dir, backup_dir)) os.system('cp {} {}'.format(cfg.config, backup_dir)) global semantic_label_idx semantic_label_idx = [1, 2, 3, 4, 5, 6, 7, 8] logger.info(cfg) random.seed(cfg.test_seed) np.random.seed(cfg.test_seed) torch.manual_seed(cfg.test_seed) torch.cuda.manual_seed_all(cfg.test_seed)
def send_sms(self, request): logger.info('send sms, request[%s]' % request) if request.mobile == '' or request.content == '': logger.warning('param error request[%s]' % request) return PARAM_ERROR param = self.__build_param(request) result = http.request(SEND_SMS_URL, param, 'GET', 10) if not result: return HTTP_REQUEST_ERROR logger.debug('result:%s' % result) root = None sid = request.sid errno = 0 task_id = '' valid = 0 dbstatus = 0 try: root = ET.fromstring(result) status = root.find('returnstatus').text if status != 'Success': msg = root.find('message').text logger.warning('send failed, msg[%s]' % msg) errno = SEND_FAILED else: task_id = root.find('taskID').text dbstatus = 1 valid = 1 except xml.etree.ElementTree.ParseError: logger.warning("invalid result[%s] request[%s]" % (result, request)) errno = THIRD_SERVER_ERROR except Exception, e: logger.warning('xml parse exception,ult[%s] request[%s]' % (result, request))
def storyWriteTxt(storyno): # for storyno in storynos: #获取写入的文件名称 # storyTitle=getStoryTitle(storyno) storyTitle=db.getStoryTitle(storyno) loggering.info(storyTitle) path=os.path.dirname(os.path.dirname(os.path.abspath(__file__))) filename=os.path.join(path,"story\\"+storyTitle+".txt") #获取写入txt的章节 nums=db.getDownLoadChapternum(storyno) storydict=db.getStoryText(nums) # if state: # os.remove(filename) for k, v in storydict.items(): value = k + "\n" + v + "\n" with open(filename, "a", encoding="utf-8")as f: try: f.write(value) except Exception as e: loggering.warn(e) db.changeState(nums) # no=82785 # storyWriteTxt(no) # # nums=getStoryChapterNum(no) # # dict=getStoryText(nums) # # storyWriteTxt(dict,no,1)
def run(self): get_sensitive_keywords() print(self.sensitive_keywords) sum_items = 0 sum_tp = 0 sum_fp = 0 sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8') api_folders = get_first_layer_files( "E:\\Lab Work\\dataset_science\\API_Docs\\Countly_API_Docs_test\\") sum_acs = 0 for api_folder in api_folders: logger.info("Processing Folder:" + str(api_folder)) (items, tp, fp, acs) = self.process_api(api_folder) sum_acs = sum_acs + acs sum_items = sum_items + items sum_tp = sum_tp + tp sum_fp = sum_fp + fp print("SUM=" + str(sum_acs)) # print("SUM_ITEMS=" + str(sum_items)) # print("SUM_TP=" + str(sum_tp)) # print("SUM_FP=" + str(sum_fp)) print("-----Sensitive Results-----") for sensitive_result in self.sensitive_results: print(sensitive_result) # logger.info("API Count=" + str(api_cnt)) logger.info("Sensitive API Count=" + str(len(self.sensitive_results)))
def post_text(self, msg): try: openid = msg['FromUserName'] keyword = msg['Content'].strip().encode('utf-8') logger.info("get text: user openid = " + str(openid) + ";msg = " + keyword) if keyword.startswith("bu"): #补数据模式 today = keyword[2:12] month = keyword[2:9] info = content_ope.record_msg(openid, keyword[12:], month, today) elif not keyword.startswith("hi"): info = "每日记录要以hi开头,我才记录哦" else: today = self.curr_date month = self.curr_month info = content_ope.record_msg(openid, keyword[2:], month, today) url = Const.TRACK_URL % (openid, month) info += "\n\n" + "<a href='" + url + "'>本月记录</a>" return info except Exception, e: logger.error(e) return "不好意思,发生了一点错误,请联系我的主人"
def producer(url, q): while True: i = 0 logger.debug('rtmp: {} read+'.format(url)) video_capture = cv2.VideoCapture(url) ret_val, image = video_capture.read() if False is video_capture.isOpened() or False is ret_val: logger.warning('{} url is: {} {}'.format(url, video_capture.isOpened(), ret_val)) continue logger.debug('rtmp: {} load finish'.format(url)) while True: i += 1 ret, frame = video_capture.read() if not ret: break if i % TIMES != 0 or image is None: continue if not FX == FY == 1: try: logger.debug('{}: {} fps image resize'.format(url, i)) frame = cv2.resize(frame, (0, 0), fx=FX, fy=FY) except Exception as e: logger.error(e) logger.error('image is bad') break if q.full(): q.get() q.put(frame) logger.info('{} image save to queue {}'.format(i, q.qsize()))
def get_default(self): id = self.get_argument('id', '') month = self.get_argument('month', self.curr_month) path = os.path.join(settings['data_path'], id, month) content = file_ope.parse_data_list(path) logger.info("get track data, openid=" + id + ",month=%s" + month) self.render("index.html", content=content, month=month)
def send_mail(email_id, attach_file, to_email, book_title, book_author): # 其他类型 # 发送邮件并修改待发送邮件状态 MailTask.send.delay(email_id, attach_file, to_email, book_title, book_author) logger.info(u'发送邮件中...') return True
def downLoadStory(storyno, urls): requests.adapters.DEFAULT_RETRIES = 5 s = requests.session() s.keep_alive = False stroy_text = {} for url in urls: logging.info(url) flag = True while flag: try: user_agent = user_Agent() res = s.get(url, headers=user_agent) flag = False # print(res.headers["User-Agent"]) # logging.info(res.headers["User-Agent"]) except Exception as e: logging.info("- - 连接失败,正在重连- ") logging.error(e) continue text_reg = re.compile( r'<div class="articlecon font-large"><p>(.+)<br/><br/></p></div>') result = text_reg.findall(res.text) new_result = result[0].replace("<br/>", "") new_result.lstrip("") new_result = re.sub(' +', '\n ', new_result) db.insertStory(url, new_result, storyno) return stroy_text
def sync_channel_partner(): dict = { "sld": "18746458381", "hcl": "18746286622", "fyd": "15945238667", "dx": "15545020905", "lxq": "18645218625", "cjl": "19845286972", "xmn": "16606671234", "wf": "15046200651", "wj": "13634528880", "yy": "18646619228", "sl": "13514685861", "xq": "13104527681" } # 查询并修改用户表渠道为xx的,且没有上级用户的,修改上级用户为手机号所绑定的用户 with engine.connect() as conn: for admin in dict.keys(): # 查询渠道管理员信息 select_channel_admin = conn.execute( select([MUserInfo ]).where(MUserInfo.mobile == dict[admin])).fetchone() admin_id = select_channel_admin['user_id'] logger.info(admin_id) # 更新合伙人状态 conn.execute( update(MPartnerInfo).values({ "status": 1 }).where(MPartnerInfo.user_id == admin_id)) print("done work")
def process_api(self, folder): sum_sensitive_apis = 0 tp = 0 fp = 0 files_list = get_all_files(folder) acs = 0 for i in range(0, len(files_list)): file = files_list[i] # logger.info("Processing File=" + str(file)) self.processing_class = file.split("\\")[-1].split(" ")[0] logger.info("Processing Class=" + self.processing_class) soup = BeautifulSoup(open(file, encoding='utf-8'), features='html.parser') # tag_list includes the whole tags in a html file. tag_list = soup.find_all() # print("tag list len=" + str(len(tag_list))) (num_sensitive_apis, c_tp, c_fp, ac) = self.get_privacy(tag_list) sum_sensitive_apis = sum_sensitive_apis + num_sensitive_apis tp = tp + c_tp fp = fp + c_fp acs = ac + acs # print("Sum_Sensitive_APIs=" + str(sum_sensitive_apis)) # print("TP=" + str(tp)) # print("FP=" + str(fp)) return (sum_sensitive_apis, tp, fp, acs)
def crawl(self, parser): """ 爬取 :param parser: :return: """ html_parser = Html_Parser() for url in parser['urls']: response = Html_Downloader.download(url) if response is not None: proxy_list = html_parser.parse(response, parser) if proxy_list is not None: # 检查爬取到的proxy count, new = 0, 0 for proxy in proxy_list: count += 1 proxy_str = '%s:%s' % (proxy['ip'], proxy['port']) if proxy_str not in self.proxies_set: self.proxies_set.add(proxy_str) new += 1 self.sqlhelper.insert(proxy) self.url_count += 1 logger.info( '%d/%d -- <%s> 获取%d, 未记录的%d' % (self.url_count, self.url_total, url, count, new)) else: self.url_count += 1 logger.warning('%d/%d -- <%s> 解析数据错误' % (self.url_count, self.url_total, url)) else: self.url_count += 1 logger.warning('%d/%d -- <%s> 下载页面错误' % (self.url_count, self.url_total, url))
def inertStoryUrl(self,storyNo,storyTitle,storyUrl):#插入小说的链接地址 createtime=datetime.now().strftime('%Y-%m-%d %H:%M:%S') sql= "INSERT INTO story_url(storyno,storytitle,storyurl,createtime) VALUES ('{}','{}','{}','{}')".format(storyNo, storyTitle,storyUrl,createtime) self.cur.execute(sql) self.conn.commit() msg="新增小说:"+storyTitle+"下载地址" logging.info(msg)
def release_proxy(group, task): """Handle proxy release """ tp = TaskProtocal(task) task_dct = tp.get_data() logger.info("release %s" % task_dct) if task_dct['proxy']: redis_execute(redis_client.sadd)(KEY_NAME, task_dct['proxy'])
def get_subscribed_user(self, openid): print self.get_access_token(), openid html = requests.get(Const.WXAPI_SUBSCRIBED_USERINFO % (self.get_access_token(), openid)) user = json.loads(html.content) if user: logger.info("get a user info from weixin, user = " + str(user)) return user return {}
async def pub_to_nsq(address, topic, msg): url = "http://{}/pub".format(address) logger.info(url) async with ClientSession() as session: async with session.post(url, params="topic="+topic, json=msg) as resp: if resp.status != 200: logger.error("[pub to nsq error] topic: {}".format(topic)) return resp.status
def train_epoch(dataset, model, model_fn, optimizer, epoch): iter_time = utils.AverageMeter() data_time = utils.AverageMeter() am_dict = {} model.train() start_epoch = time.time() end = time.time() train_loader = dataset.train_data_loader for i, batch_id in enumerate(train_loader): data_time.update(time.time() - end) torch.cuda.empty_cache() ##### adjust learning rate utils.step_learning_rate(optimizer, cfg.lr, epoch - 1, cfg.step_epoch, cfg.multiplier) ##### prepare input and forward batch = dataset.trainMerge(batch_id) loss, _, visual_dict, meter_dict = model_fn(batch, model, epoch) ##### meter_dict for k, v in meter_dict.items(): if k not in am_dict.keys(): am_dict[k] = utils.AverageMeter() am_dict[k].update(v[0], v[1]) ##### backward optimizer.zero_grad() loss.backward() optimizer.step() ##### time and print current_iter = (epoch - 1) * len(train_loader) + i + 1 max_iter = cfg.epochs * len(train_loader) remain_iter = max_iter - current_iter iter_time.update(time.time() - end) end = time.time() remain_time = remain_iter * iter_time.avg t_m, t_s = divmod(remain_time, 60) t_h, t_m = divmod(t_m, 60) remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m), int(t_s)) sys.stdout.write( "epoch: {}/{} iter: {}/{} loss: {:.4f}({:.4f}) data_time: {:.2f}({:.2f}) iter_time: {:.2f}({:.2f}) remain_time: {remain_time}\n".format (epoch, cfg.epochs, i + 1, len(train_loader), am_dict['loss'].val, am_dict['loss'].avg, data_time.val, data_time.avg, iter_time.val, iter_time.avg, remain_time=remain_time)) if (i == len(train_loader) - 1): print() logger.info("epoch: {}/{}, train loss: {:.4f}, time: {}s".format(epoch, cfg.epochs, am_dict['loss'].avg, time.time() - start_epoch)) utils.checkpoint_save(model, cfg.exp_path, cfg.config.split('/')[-1][:-5], epoch, cfg.save_freq, use_cuda) for k in am_dict.keys(): if k in visual_dict.keys(): writer.add_scalar(k+'_train', am_dict[k].avg, epoch)
def worker_wage_task(conn, task_info): trans = conn.begin() try: now = datetime.now() today_time = now - timedelta(hours=now.hour, minutes=now.minute, seconds=now.second, microseconds=now.microsecond) select_wage_record = conn.execute(select([MWageRecord]).where( and_( MWageRecord.status == 1, MWageRecord.create_time == today_time, MWageRecord.user_id == task_info['user_id'] ) )).fetchone() current_game = select_wage_record['current_game'] if select_wage_record else 0 current_video = select_wage_record['current_video'] if select_wage_record else 0 if select_wage_record: # 更新每日任务数据 if task_info['changed_type'] == 7: current_game += 1 elif task_info['changed_type'] == 30: current_video += 1 # 查询工资等级指标 select_wage_level = conn.execute(select([MWageLevel]).where( MWageLevel.wage_level == select_wage_record['wage_level'] )).fetchone() # 判断指标并更新结果 result = { "current_game": current_game, "current_video": current_video, } if current_game >= select_wage_level['game_number'] and \ current_video >= select_wage_level['video_number']: result["status"] = 2 result["reward"] = select_wage_level['reward'] # result["update_time"] = datetime.now() result["current_game"] = select_wage_level['game_number'] result["current_video"] = select_wage_level['video_number'] # 更新每日工资 conn.execute(update(MWageRecord).values(result).where( and_( MWageRecord.user_id == select_wage_record['user_id'], MWageRecord.create_time == select_wage_record['create_time'] ) )) logger.info("{}:game->{},video->{}".format(task_info['user_id'], current_game, current_video)) else: return False trans.commit() return True except Exception as e: logger.info(e) traceback.print_exc() trans.rollback() return False
def get(self, url, validator_func, encoding='utf-8', use_proxy=True): """ 获取页面 并检测内容 :param url: url :param validator_func: 内容检测函数 :param encoding: 编码 :param use_proxy: 是否使用代理 :return: """ id = url.replace("http://app.jg.eastmoney.com/NewsData/GetNewsText.do?id=", "")\ .replace("&cid=3948045045460666", '')\ .replace("http://app.jg.eastmoney.com/Notice/GetNoticeText.do?id=", "")\ .replace("&cid=3948047684990666", '') # 获取页面 一直获取 直到获取到 或者获取了30次 for i in range(100): if use_proxy: if not self.proxies: self._get_proxy() proxy, proxy_id = self.proxies.pop() logger.info('%s -- proxy with %s' % (id, proxy)) try: start = time.time() req = requests.get( url=url, headers={ 'User-Agent': random.choice(USER_AGENTS), 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, deflate', }, timeout=(5, 10), # 超时时间,(连接超时时间,读取超时时间) proxies={"http": proxy}) req.encoding = encoding if req.ok: speed = round(time.time() - start, 2) # 解析内容 data = validator_func(req.text) if data: # 反馈 self._proxy_used(proxy_id, True, speed) # 返回 return data else: # logger.info('request error -- %s' % req) pass except Exception as e: # logger.error(e) pass # logger.info('%s get error, continue, now num is %d' % (id, i)) self._proxy_used(proxy_id, False, 0)
def check_pcdd_sign(keysign, adid, pid, ordernum, deviceid): check_key = hashlib.md5((str(adid) + pid + ordernum + deviceid + PCDD_KEY).encode('utf-8')).hexdigest() logger.info("PCDD:server keycode:{},request keycode:{}".format( check_key, keysign)) if keysign == check_key: return True else: return False
def run(self): get_sensitive_keywords() api_folders = get_first_layer_files(Config.target_folder) for api_folder in api_folders: logger.info("Processing Folder:" + str(api_folder)) self.process_api(api_folder) print("-----Sensitive Results-----") print("APIs Count=" + str(len(self.apis))) print("Sensitive APIs Count=" + str(len(self.sensitive_results)))
def on_success(self, retval, task_id, args, kwargs): try: logger.info(u'发送邮件成功,参数:%s' % str(args)) # 更新发送邮件状态 db = Database() db.email_update_status(str(args[0]), globals.STATUS.get('complete')) except Exception as e: logger.error(u'更新发送邮件状态异常,错误:%s,参数:%s' % (str(e), str(args)))
def unzip(zipped_file): unzipped_file = os.path.splitext(zipped_file)[0] logger.info("Unzipping %s to %s", zipped_file, unzipped_file) with gzip.open(zipped_file, "rb") as f_in, open(unzipped_file, "wb") as f_out: shutil.copyfileobj(f_in, f_out) logger.info("Unzipping completed") return unzipped_file
def is_out_door(key, way): if in_out_door['into_door_per'] > in_out_door['out_door_per']: if key in in_house.keys(): del in_house[key] in_out_door['out_door_per'] += 1 logger.info('{} id:{} after out of door: {}'.format( way, key, in_out_door['out_door_per'])) else: in_out_door['into_door_per'] = in_out_door['out_door_per'] = 0
def check_yw_sign(keysign, rewardDataJson, time): check_key = (hashlib.md5((rewardDataJson + time + YW_SECRET).encode('utf-8')).hexdigest()).lower() logger.info("YW:server keycode:{},request keycode:{}".format( check_key, keysign)) if keysign == check_key: return True else: return False
def check_ibx_task_sign(keysign, app_key, device, device_info, target_id): check_key = (hashlib.md5( (app_key + device + device_info + target_id + IBX_SECRET).encode('utf-8')).hexdigest()).upper() logger.info("IBX:server keycode:{},request keycode:{}".format( check_key, keysign)) if keysign == check_key: return True else: return False
def get_default(self): echostr = self.get_argument('echostr', '') if echostr and self.verification(): self.write(echostr) res = "weixin callback success" else: self.write("") res = "weixin callback failed" log_info = {"handler": __name__ + '.' + self.__class__.__name__, "event": sys._getframe().f_code.co_name, "event_description": "", "res_type": "xml", "res_content": {"message": res, "echostr": echostr}} logger.info(log_info)
def post_default(self): msg = self.get_msg() # print msg try: msg_type = msg['MsgType'] if self.verification(): #add cookie self.set_secure_cookie("openid", msg["FromUserName"]) info = getattr(self, 'post_' + msg_type)(msg) else: info = u"Message verification failed" if info: logger.info("send a message to weixin, info = " + info) self.write(self.rep_text(msg, info)) except Exception, e: logger.error(e)
def event_subscribe(self, msg): openid = msg["FromUserName"] #get useinfo form weixin userFromWeixin = self.get_subscribed_user(openid) if not userFromWeixin: logger.error("cannot get userInfo of a subscribed user, openid = " + openid) else: user = self.db.get_openid(openid) #update if user: #update self.db.update_item(openid,userFromWeixin) logger.info("update a userInfo, user= "******"insert a userInfo, user= " + str(userFromWeixin)) return self.rep_follow(msg)
def request(url, params, method='GET', timeout=1): result = None response = None if method == 'GET': url = '%s?%s' % (url, urllib.urlencode(params)) logger.info('request url: %s' % url) try: response = urllib2.urlopen(url, timeout=timeout) result = response.read() except urllib2.URLError as e: code = e.code if hasattr(e, 'code') else -1 reason = e.reason if hasattr(e, 'reason') else None logger.warning('url error, code[%s] reason[%s]' % (code, reason)) except urllib2.HTTPError as e: code = e.code if hasattr(e, 'code') else -1 logger.warning('http error, code[%s]' % (code)) except Exception, e: logger.warning("url get exception, url: %s, e: %s" % (url, e)) finally:
def renew_access_token(self): if (not self.wxapp['access_token']) or\ int(time.time())-self.wxapp['create_time'] > self.wxapp['expires_in']/2: html = requests.get(Const.URL_ACCESS_TOKEN % (Const.WXAPP, Const.WXAPP_SECRET)) token = json.loads(html.content) #print token if token and token.get("access_token", 0) != 0: access_token = token['access_token'] expires_in = token['expires_in'] access_token_create_time = int(time.time()) self.wxapp['access_token'] = access_token self.wxapp['expires_in'] = expires_in self.wxapp['create_time'] = access_token_create_time logger.info("update access token, new token = " + access_token + ", time = " + self.curr_now) return True else: return False return True
from util.log import logger def init(): cfg = {} config = ConfigParser.ConfigParser() config.read('config/config.ini') sections = config.sections() for section in sections: cfg[section] = {} options = config.options(section) for option in options: try: cfg[section][option] = config.get(section, option) except Exception, e: logger.warning("parse config error, e[%s]" % e) logger.info('server cfg[%s]' % cfg) return cfg cfg = init() account = cfg['passport'].get('account', '') password = cfg['passport'].get('password', '') port = int(cfg['server'].get('port', '')) threads = int(cfg['server'].get('threads', '')) handler = SMSServiceHandler(account, password) processor = SmsService.Processor(handler) #transport = TSocket.TServerSocket(port=9990) transport = TSocket.TServerSocket(port=port) #tfactory = TTransport.TFramedTransportFactory() pfactory = TBinaryProtocol.TBinaryProtocolFactory()
import sys sys.path.append('gen-py') from push import PushService from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.server import TServer from thrift.server import TNonblockingServer from handler.push_service_handler import PushServiceHandler from util.log import logger handler = PushServiceHandler() processor = PushService.Processor(handler) #transport = TSocket.TServerSocket(port=9990) transport = TSocket.TServerSocket(port=8998) #tfactory = TTransport.TFramedTransportFactory() pfactory = TBinaryProtocol.TBinaryProtocolFactory() #server = TServer.TSimpleServer(processor, transport, tfactory, pfactory) #server = TServer.TThreadedServer(processor, transport, tfactory, pfactory) #server = TServer.TThreadPoolServer(processor, transport, tfactory, pfactory) server = TNonblockingServer.TNonblockingServer(processor, transport, None, pfactory) server.setNumThreads(12) logger.info('Starting the server...') server.serve() logger.info('done.')
def wrapper(*args): pre = time.time() result = fn(*args) cost = (time.time() - pre) * 1000 logger.info('%s, cost[%s]' % (content, cost)) return result