def _print_log(self, step, log_values, title='', max_n_batch=None): log_str = '{}\n'.format(self.params.exp_name) log_str += '{}: epoch {}'.format(title, self.last_epoch) if max_n_batch: log_str += '[{}/{}], lr: {}'.format( step, max_n_batch, get_learning_rates(self.optimizer)) i = 0 # global_step = step + (self.last_epoch - 1) * self.batch_per_epoch for k, v in log_values.items(): if isinstance(v, meter_utils.AverageValueMeter): mean, std = v.value() log_str += '\n\t{}: {:.10f}'.format(k, mean) i += 1 if max_n_batch: # print time data_time = self.data_timer.duration + 1e-6 batch_time = self.batch_timer.duration + 1e-6 rest_seconds = int((max_n_batch - step) * batch_time) log_str += '\n\t({:.2f}/{:.2f}s,' \ ' fps:{:.1f}, rest: {})'.format(data_time, batch_time, self.params.batch_size / batch_time, str(datetime.timedelta(seconds=rest_seconds))) self.batch_timer.clear() self.data_timer.clear() logger.info(log_str)
def cals_bal(self): """ 舜购买1瓶2次预约机会 查看还有多少预约机会 2021/01/12 加到3次预约机会 """ count = 3 selfGoodsNumber = Makes.objects.filter(userid=self.userid).count() query = """ SELECT t1.linkid,t1.gdnum FROM `ordergoodslink` as t1 INNER JOIN `order` as t2 ON t1.orderid = t2.orderid WHERE t2.status in ('1','2','3') and t2.userid = '{}' and t1.gdid = '{}' and t2.createtime > 1610186400 group by t1.linkid""".format( self.userid, "G000022") # logger.info(query) # GoodsNumber = len(list(OrderGoodsLink.objects.raw(query))) obj = list(OrderGoodsLink.objects.raw(query)) if len(obj): GoodsNumber = sum([item.gdnum for item in obj]) else: GoodsNumber = 0 logger.info("预约次数{}|舜{}|条件{}".format(selfGoodsNumber, GoodsNumber, count)) return GoodsNumber * count - selfGoodsNumber
def val(self): self.model.eval() logs = OrderedDict() sum_loss = meter_utils.AverageValueMeter() logger.info('Val on validation set...') self.batch_timer.clear() self.data_timer.clear() self.batch_timer.tic() self.data_timer.tic() for step, batch in enumerate(self.val_data): self.data_timer.toc() inputs, gts, _ = self.batch_processor(self, batch) _, saved_for_loss = self.model(*inputs) self.batch_timer.toc() loss, saved_for_log = self.model.module.build_loss(saved_for_loss, *gts) sum_loss.add(loss.item()) self._process_log(saved_for_log, logs) if step % self.params.print_freq == 0: self._print_log(step, logs, 'Validation', max_n_batch=len(self.val_data)) self.data_timer.tic() self.batch_timer.tic() mean, std = sum_loss.value() logger.info('\n\nValidation loss: mean: {}, std: {}'.format(mean, std))
def OrderCanleSysEx(self, request): logger.info("晚上批量处理订单!") today = UtilTime().today.shift(minutes=ORDERCANLETIME * -1) for order in Order.objects.select_for_update().filter( createtime__lte=today.timestamp, status='0'): OrderBase(order=order).callbackStock() order.status = '9' order.save() today = UtilTime().today.shift(days=-7) for order in Order.objects.select_for_update().filter( fhtime__lte=today.timestamp, status='2'): order.status = '3' order.save() today = UtilTime().today.shift(days=-1) for order in Order.objects.select_for_update().filter( createtime__lte=today.timestamp, status='9'): order.status = '8' order.save() logger.info("晚上批量处理成功!")
def save(self, prefix='latest'): logger.info('saving checkpoint') checkpoint_path = self.cfg.checkpoints_path / f'{prefix}_checkpoint.tar' net = self.net if isinstance(net, torch.nn.DataParallel): net = net.module checkpoint_states = {'net': net.state_dict()} if self.use_avg: netA = self.netA if isinstance(netA, torch.nn.DataParallel): netA = netA.module checkpoint_states['netA'] = netA.state_dict(), if not self.cfg.no_states: checkpoint_states['opt_states'] = self.optimizer.state_dict() checkpoint_states[ 'event_loop_states'] = self.event_loop.get_states() if len(self.optimizer_schedulers) > 0: checkpoint_states['schedulers'] = [ sched.state_dict() for sched in self.optimizer_schedulers ] torch.save(checkpoint_states, checkpoint_path)
def get_accids_format(self,obj): accids = json.loads(obj.accids) logger.info(accids) try: nickname = Acc.objects.get(accid=accids[0]).nick_name return "{}等{}个号".format(nickname,len(accids)) if len(accids)>1 else nickname except Acc.DoesNotExist: return ""
def isUpdate(self, version): res = self.get() logger.info("上传版本号{},系统版本{}".format(version, res)) if res and version and int(res['version']) > int(version): return res else: return None
def request_task_order(orderid): res = requests.request(url=TASKSERVERURL, method='POST', json={"data": { "orderid": orderid }}) logger.info("处理订单未付款到期状态{};{}".format(orderid, res.text))
def userHandler(self, request): logger.info(request.user) if request.user['rolecode'] != 1000: raise PubErrorCustom("只有超级管理员能操作!") if request.method == 'POST': # userid = request.data_format.get('userid') uuid = request.data_format.get("login_name") name = request.data_format.get("name") pic = request.data_format.get("pic") passwd = request.data_format.get("passwd") if not uuid: raise PubErrorCustom('登录名称不能为空!') if not name: name = uuid user = Users.objects.create( **{ "uuid": uuid, "mobile": uuid, "rolecode": 1001, "name": name, "passwd": passwd, "pic": pic }) user.userid = user.id user.save() elif request.method == 'PUT': userid = request.data_format.get('userid') uuid = request.data_format.get("login_name") name = request.data_format.get("name") pic = request.data_format.get("pic") passwd = request.data_format.get("passwd") try: user = Users.objects.get(userid=userid) except Users.DoesNotExist: raise PubErrorCustom('该用户不存在!') user.uuid = uuid if uuid else user.uuid user.mobile = uuid if uuid else user.uuid user.name = name if name else user.name user.pic = pic if pic else user.pic user.passwd = passwd user.save() elif request.method == 'DELETE': Users.objects.filter( userid=request.data_format.get('userid')).delete() elif request.method == 'GET': return { "data": UsersSerializers(Users.objects.filter(rolecode=1001).order_by( '-createtime')[request.page_start:request.page_end], many=True).data }
def test(self, request): logger.info(""" request data: \n --- {} --- \n --- {} --- """.format(request.query_params, request.data)) Users.objects.filter().count() return {"test": "test"}
def _save_ckpt(self, save_to): model = self.model.module if isinstance( self.model, nn.DataParallel) else self.model net_utils.save_net(save_to, model, epoch=self.last_epoch, optimizers=[self.optimizer], rm_prev_opt=True, max_n_ckpts=self.params.save_nckpt_max) logger.info('Save ckpt to {}'.format(save_to))
def load(self): prefix = self.cfg.test_prefix checkpoint_path = self.cfg.checkpoints_path / f'{prefix}_checkpoint.tar' checkpoint_states = torch.load(checkpoint_path, map_location=self.device) logger.info(f'loading checkpoint "{checkpoint_path}"') if self.use_avg: self.net.load_state_dict(checkpoint_states['netA'], strict=True) else: self.net.load_state_dict(checkpoint_states['net'], strict=True)
def train(self): logger.info('starting training loop...') while self.event_loop.n_periods < self.total_kimgs: self.optim_step() if self.use_avg: self.update_average() self.event_loop.step(self.batch_size) self.save(prefix='latest') logger.info('all done.')
def get_acc(self,obj): logger.info(obj.accid) try: r = Acc.objects.get(accid=obj.accid) return { "nick_name":r.nick_name, "head_img":r.head_img } except Acc.DoesNotExist: return {}
def send_msg(self,listids,openids,is_to_all,send_ignore_reprint,accid): from lib.utils.wechat.msg import WechatAccMassMsg logger.info("{}-{}-{}-{}-{}".format(listids,openids,is_to_all,send_ignore_reprint,accid)) WechatAccMassMsg(accid=accid).\ run( listids=listids, openids=openids, is_to_all=is_to_all, send_ignore_reprint=send_ignore_reprint )
def create(self, order_id, amount, subject=None): order_string = self.alipay.api_alipay_trade_app_pay( out_trade_no=order_id, total_amount=str(amount.quantize(Decimal('0.00'))), subject='支付订单:%s' % order_id if not subject else subject, return_url=None, notify_url=None, ) logger.info(order_string) return order_string
def save_checkpoint(net, args, epoch=None): if epoch is None: checkpoint_name = 'last_checkpoint.params' else: checkpoint_name = f'{epoch:03d}.params' if not args.checkpoints_path.exists(): args.checkpoints_path.mkdir(parents=True) checkpoint_path = args.checkpoints_path / checkpoint_name logger.info(f'Save checkpoint to {str(checkpoint_path)}') net.save_parameters(str(checkpoint_path))
def textSend(self, obj, user): data = { "touser": user['openid'], "msgtype": "text", "text": { "content": obj.content.replace("<粉丝昵称>", user['nickname']) } } data = json.dumps(data, ensure_ascii=False).encode('utf-8') logger.info("发送文本消息:{}".format(data)) self.request_handler(method="POST", url=self.url, data=data)
def signCheckForTx(signature, timestamp, eventId): if UtilTime().today.shift(seconds=-30).timestamp > int(timestamp): logger.info("请求timestamp已经超时!") raise PubErrorCustom("拒绝访问!") stringNew = sortKeyStringForDict( dict(token=TOKEN, timestamp=timestamp, eventId=eventId)) sign = sha256hex(stringNew) logger.info("{}----{}----{}".format(stringNew, signature, sign))
def sync(accid): logger.info("[{}]正在处理[{}]公众号标签同步...".format(UtilTime().arrow_to_string(), accid)) AccTag.objects.filter(accid=accid).update(umark='1') res = WeChatAccTag(accid=accid, isAccessToken=True).get_tag_list() handler(accid, res['tags']) logger.info("公众号[{}]标签同步[{}]处理完毕!".format(accid, UtilTime().arrow_to_string()))
def sendmsg(self, listid, nickname, openid, accid): from lib.utils.wechat.msg import WeChatAccEvent logger.info("{},{},{},{}".format(listid, nickname, openid, accid)) WeChatAccEvent(accid=accid).\ msgHandler( send_type='2', listids=[listid], isSend=True, user={ "openid": openid, "nickname": nickname } )
def _load_ckpt(self, ckpt): epoch, state_dicts = net_utils.load_net(ckpt, self.model, load_state_dict=True) if not self.params.ignore_opt_state and not self.params.zero_epoch and epoch >= 0: self.last_epoch = epoch logger.info('Set last epoch to {}'.format(self.last_epoch)) if state_dicts is not None: self.optimizer.load_state_dict(state_dicts[0]) net_utils.set_optimizer_state_devices(self.optimizer.state, self.params.gpus[0]) logger.info('Load optimizer state from checkpoint, ' 'new learning rate: {}'.format( get_learning_rates(self.optimizer)))
def queryBuyOkGoodsCount(userid, gdid, start, end): query_format = " and t1.gdid='{}' and t2.userid={} and t2.createtime>={} and t2.createtime<={}".format( gdid, userid, start, end) res = OrderGoodsLink.objects.raw( """ SELECT sum(t1.gdnum) as linkid from `ordergoodslink` as t1 INNER JOIN `order` as t2 ON t1.orderid = t2.orderid WHERE t2.status in ('0','1','2','3') and t2.before_status!='2' %s """ % (query_format), []) logger.info(res) res = list(res) return res[0].linkid if len(res) and res[0].linkid else 0
def handler(accid, openids): for openid in openids: logger.info("处理[{}]中".format(openid)) userinfo = WechatAccUser(accid=accid, isAccessToken=True).get_info(openid) logger.info("获取列表[{}]".format(userinfo)) if userinfo['subscribe'] == 0: continue with transaction.atomic(): try: accUserObj = AccLinkUser.objects.get(accid=accid, openid=openid) accUserObj.tags = json.dumps(userinfo['tagid_list']).replace( " ", "") accUserObj.nickname = userinfo['nickname'] accUserObj.sex = userinfo['sex'] accUserObj.city = userinfo['city'] accUserObj.province = userinfo['province'] accUserObj.country = userinfo['country'] accUserObj.headimgurl = userinfo['headimgurl'] accUserObj.subscribe_time = userinfo['subscribe_time'] accUserObj.subscribe_scene = userinfo['subscribe_scene'] accUserObj.umark = '0' accUserObj.save() except AccLinkUser.DoesNotExist: try: AccLinkUser.objects.create( **dict(accid=accid, openid=userinfo['openid'], tags=json.dumps(userinfo['tagid_list']).replace( " ", ""), nickname=userinfo['nickname'], sex=userinfo['sex'], city=userinfo['city'], province=userinfo['province'], country=userinfo['country'], headimgurl=userinfo['headimgurl'], subscribe_time=userinfo['subscribe_time'], subscribe_scene=userinfo['subscribe_scene'], umark='0')) except Exception as e: logger.error(str(e))
def __init__(self, model, args, num_classes, use_flip, scales, skip_bg=True, custom_evaluator=None, use_prob_avg=False, class_names=None, threshold=0.5): self.args = args if class_names is None: class_names = [f'cls-{i}' for i in range(num_classes)] self.metric_orig = SegmentationMetric(num_classes, skip_bg=skip_bg, threshold=threshold) self.metric = SegmentationMetricDetailed(num_classes, class_names, full_output=False, compute_auc=False, skip_bg=skip_bg, threshold=threshold) self.tqdm_out = TqdmToLogger(logger, level=logging.INFO, mininterval=1) model.cast(args.dtype) model.collect_params().reset_ctx(ctx=args.ctx) model.load_parameters(args.weights, ctx=args.ctx) if custom_evaluator is not None: self.evaluator = custom_evaluator(model, num_classes, ctx_list=args.ctx, flip=use_flip, scales=scales) else: self.evaluator = MultiEvalModel(model, num_classes, ctx_list=args.ctx, flip=use_flip, scales=scales) if use_prob_avg: self.evaluator.flip_inference = types.MethodType( prob_avg_flip_inference, self.evaluator) logger.info(f"\nLoaded model weights from file: {args.weights}\n")
def print_network_params(self, net, name='', verbose=False): logger.info('-----------------------------------------------') num_params = 0 for param in net.parameters(): num_params += param.numel() if verbose: logger.info(net) logger.info('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) logger.info('-----------------------------------------------')
def register_events(self): self.event_loop.register_metric('ce_loss', console_name='CELoss', console_period=self.log_period_kimgs, tb_name='ce_loss', tb_period=1, counter='ma_10') if self.val_dataset is not None: self.event_loop.register_metric_event( self.evaluate, metric_name='captcha_accuracy', period=self.eval_period, tb_global_step='n_periods', tb_name='val/captcha_accuracy') self.event_loop.register_metric_event( self.get_last_evaluate_symbol_accuracy, metric_name='symbol_accuracy', period=self.eval_period, tb_global_step='n_periods', tb_name='val/symbol_accuracy') for scheduler in self.optimizer_schedulers: logger.info('register optim') scheduler.register_optimizer(self.optimizer) self.event_loop.register_metric_event( scheduler.step, func_inputs=('f_periods', ), metric_name=f'net_{scheduler.param_name}', console_format='{}', period=0, console_period=self.log_period_kimgs, tb_period=2, tb_global_step='n_periods', tb_name=f'TrainStates/net_{scheduler.param_name}') self.event_loop.register_event(self.images_log, period=self.log_images_period_kimgs, func_inputs=('n_periods', )) self.event_loop.register_event(self.save, period=self.checkpoint_period_kimgs, func_inputs=('n_periods', )) self.event_loop.register_event( self.save, period=self.last_checkpoint_period_kimgs)
def test(cfg): base_size = 576 normalization = ImageNormalization(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) interpolation = cv2.INTER_LINEAR test_dataset = CaptchaDataset( input_dir=cfg.val_dataset_path, normalization=normalization, input_file='test.txt', aug=[ SmallestMaxSize(max_size=base_size, always_apply=True, interpolation=interpolation) ], default_transform=False, original_masks=True, ) num_classes = test_dataset.num_classes() num_labels = test_dataset.num_labels() net = init_model(num_classes, num_labels) batch_size = 1 if len(cfg.gpu_ids) > 0: batch_size = batch_size * len(cfg.gpu_ids) tester = CaptchaTester(cfg, net=net, test_dataset=test_dataset, image_normalization=normalization, batch_size=batch_size, n_display=64) if cfg.test_cmd == 'metrics': results = tester.evaluate(use_flips=True) output_str = ', '.join([f'{k}: {results[k]:.4f}' for k in results]) logger.info(output_str) elif cfg.test_cmd == 'visualize': raise NotImplementedError else: assert False, f'unknown test command {cfg.test_cmd}'
def send_request_other(url, method='get', params=None, data=None, headers={}): logger.info("请求参数: url:{} header:{} body:{} params:{}".format( url, headers, data, params)) try: result = request(method, url, params=params, json=data, verify=False, headers=headers) status_code = result.status_code result = result.json() print(result) if str(status_code) == '200': return result except Exception as ex: logger.error('{0} 调用失败:{1}'.format(url, ex)) raise PubErrorCustom('{0}'.format(ex))
def test(self, testset): test_data = gluon.data.DataLoader(testset, self.args.batch_size, shuffle=False, last_batch='keep', num_workers=self.args.workers) self.metric.reset() self.metric_orig.reset() tbar = tqdm(test_data, file=self.tqdm_out, ncols=100) for i, (data, dsts) in enumerate(tbar): predicts = self.evaluator.parallel_forward(data) if len(self.args.ctx) == 1: predicts = [predicts[0]] dsts = [dsts] else: predicts = [pred[0] for pred in predicts] targets = [target.as_in_context(predicts[0].context) \ for target in dsts] predicts = [mx.nd.softmax(p, axis=1) for p in predicts] self.metric.update(targets, predicts) self.metric_orig.update(targets, predicts) names, values = self.metric_orig.get() metrics_map = {} for name, value in zip(names, values): metrics_map[name] = value tbar.set_description( f'accuracy: {metrics_map["pixAcc"]:.3f}, mean-iou: {metrics_map["mIoU"]:.3f}' ) print('----- new metric ------') names, values = self.metric.get() for name, value in zip(names, values): logger.info(f'{name}: {value:.5%}') print('----- original metric ------') names, values = self.metric_orig.get() for name, value in zip(names, values): logger.info(f'{name}: {value:.5%}')