def put(self): try: name = get_argument('username', required=True, help='请输入用户名') email = get_argument('email', required=False, help='请输入邮箱') user1 = users.User.query.filter_by(name).first() user1.name = email db.session.commit() return make_response(status=HttpStatus.SUCCESS) except: db.session.rollback() raise AiException()
def get(self): name = get_argument('username', required=True, help='请输入用户名') password = get_argument('password', required=True, help='请输入密码') # Get请求也可通过如下方式获取参数 # name = request.args.get('username', '') # password = request.args.get('password', '') logger.info("name:%s, password:%s" % (name, password)) return make_response(status=HttpStatus.SUCCESS, data={ "name": name, "password": password })
def post(self): name = get_argument('username', required=True, help='请输入用户名') password = get_argument('password', required=False, help='请输入密码') #也可直接通过rest.form来获取请求报文 # name = request.form['username'] # password = request.form['password'] logger.info("name:%s, password:%s" % (name, password)) return make_response(status=HttpStatus.SUCCESS, data={ "name": name, "password": password })
def post(self): path = get_argument(constants.cons_request_path, required=True, help='文件路径为空') if os.path.isfile(path): img = cv2.imread(path) # cv2.imwrite("src.jpg", img) # 将图象由OpenCv的BGR转成RGB格式,wuyunzhen.zh, 20190309 img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # cv2.imwrite("rgb.jpg", img) tmp_embs = humanRecognizeSrv.get_embedding(img) if len(tmp_embs) > 0: logger.info("计算人脸特征:") emb = tmp_embs[0].reshape(-1) # 将ndArray转byte,再将byte转字符串 emb_list = emb.tolist() data = ",".join(str(s) for s in emb_list) ai_print(data) return make_response(status=HttpStatus.SUCCESS, data={"rslt_cmnt": data}) raise AiException(HttpStatus.SERVER_ERROR, AiErrorCode.YCEA4021009, path) else: raise AiException(HttpStatus.SERVER_ERROR, AiErrorCode.YCEA4021002, path)
def post(self): ccbins_id = get_argument(constants.cons_request_ccbins_id, required=True, help='机构编号不能为空') # 已改成后台自动定时读取人脸库信息 # humanRecognizeSrv.reload_faces_by_ins(ccbins_id) return make_response(status=HttpStatus.SUCCESS)
def __check_args(self): get_argument(constants.cons_request_appid, required=True, help='应用组件编号不能为空') get_argument(constants.cons_request_trace_id, required=True, help='全局追踪号不能为空') ccbins_id = get_argument(constants.cons_request_ccbins_id, required=True, help='机构编号不能为空') empe_Grp = get_argument(constants.cons_request_empe_Grp, type=list, required=True, help='人员信息不能为空') empe_infs = [] for empe in empe_Grp: empe_inf = {} empe_inf[constants.cons_request_ccbins_id] = ccbins_id empe_inf[constants.cons_request_empe_id] = empe[ constants.cons_request_empe_id] empe_inf[constants.cons_request_usr_nm] = empe[ constants.cons_request_usr_nm] empe_inf[constants.cons_request_mnt_tp_cd] = empe[ constants.cons_request_mnt_tp_cd] if os.path.isfile(empe[constants.cons_request_path]): empe_inf[constants.cons_request_img] = cv2.imread( empe[constants.cons_request_path]) else: raise AiException(HttpStatus.SERVER_ERROR, AiErrorCode.YCEA4021002, empe[constants.cons_request_path]) empe_infs.append(empe_inf) return empe_infs
def get(self): city = get_argument('city', required=True, help='请输入城市') if city is None: city = '武汉' # 下面接口须连互联网,查天气的接口 url = "https://www.sojson.com/open/api/weather/json.shtml" request_data = {"city": city} headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36" } ret = requests.get(url, params=request_data, headers=headers) # ### ### print(ret.url) if ret.status_code == 200: result = json.loads(ret.text) return make_response(status=HttpStatus.SUCCESS, data=result) return make_response(status=HttpStatus.SERVER_ERROR)
def post(self): ### print(request.__dict__) image_base64 = get_argument('image_base64', required=True, help='图片信息') # # 将base64转成文件 img_data = base64.b64decode(image_base64) # 转换为np数组 img_array = np.fromstring(img_data, np.uint8) # 转换成opencv可用格式 img_data = cv2.imdecode(img_array, cv2.IMREAD_COLOR) # ### print(image_base64) # img_data = np.asarray(image_base64) dir = current_app.config.get('BASE_DIR') path = dir + "/image/" + datetime.datetime.now().strftime( '%Y%m%d_%H%M%S_%f') + ".jpg" cv2.imwrite(path, img_data) return make_response(status=HttpStatus.SUCCESS)
def post(self): ccbinsId = get_argument("ccbinsId", required=True, help='所属机构不能为空') indexCode = get_argument("indexCode", required=True, help='所属设备不能为空') status = get_argument("status", required=True, help='报警类型不能为空') cct_rcrd_inf = get_argument("cct_rcrd_inf", required=False) eqmt_id = get_argument("eqmt_id", required=False) use_width = get_argument("use_width", required=False) thresh = get_argument("thresh", required=False) time_step = get_argument("time_step", required=False) video_conn_info_po = VideoConnInfo() video_conn_info_po.id = gen_uuid() video_conn_info_po.ccbins_id = ccbinsId video_conn_info_po.cct_rcrd_inf = cct_rcrd_inf video_conn_info_po.eqmt_id = eqmt_id video_conn_info_po.index_code = indexCode video_conn_info_po.status = status video_conn_info_po.param1 = use_width video_conn_info_po.param2 = thresh video_conn_info_po.param3 = time_step video_conn_dao.add_record(video_conn_info_po) return make_response(status=HttpStatus.SUCCESS, data={"code": "success"})
def main(): args = set_default(get_argument()) args, initial_epoch = search_same(args) if initial_epoch == -1: # training was already finished! return elif initial_epoch == 0: # first training or training with snapshot weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] temp = datetime.now() args.stamp = "{:02d}{:02d}{:02d}_{}_{:02d}_{:02d}_{:02d}".format( temp.year // 100, temp.month, temp.day, weekday[temp.weekday()], temp.hour, temp.minute, temp.second, ) get_session(args) logger = get_logger("MyLogger") for k, v in vars(args).items(): logger.info("{} : {}".format(k, v)) ########################## # Generator ########################## trainset, valset = set_dataset(args, logger) train_generator = create_generator(args, trainset, "train", args.batch_size) # for t in train_generator: # print(sorted(t[1]['main_output'].numpy().argmax(axis=0))) # print(t[0]['main_input'].shape, t[0]['main_input'].numpy().min(), t[0]['main_input'].numpy().max(), t[1]['main_output']) val_generator = create_generator(args, valset, "val", args.batch_size) # for t in val_generator: # print(t[0][0].shape, t[0][1], t[1]) test_generator1 = create_generator(args, trainset, "val", 1) test_generator2 = create_generator(args, valset, "val", 1) # for t in test_generator2: # print(t[0]['main_input'].shape, t[0]['arcface_input']) if args.class_weight: assert args.classes > 1 from sklearn.utils.class_weight import compute_class_weight train_label = trainset[:, 1:].astype(np.int).argmax(axis=1) class_weight = compute_class_weight( class_weight="balanced", classes=np.unique(train_label), y=train_label ) else: class_weight = None logger.info("TOTAL STEPS OF DATASET FOR TRAINING") logger.info("========== trainset ==========") steps_per_epoch = args.steps or len(trainset) // args.batch_size logger.info(" --> {}".format(steps_per_epoch)) # logger.info(" --> {}".format(trainset[:, 2:].sum(axis=0))) # logger.info(" --> {}".format(class_weight)) logger.info("=========== valset ===========") validation_steps = len(valset) // args.batch_size logger.info(" --> {}".format(validation_steps)) # logger.info(" --> {}".format(valset[:, 2:].sum(axis=0))) ########################## # Model ########################## model = create_model(args, logger) if args.summary: model.summary() print(model.inputs[0]) print(model.get_layer(name="fc2")) return model = compile_model(args, model, steps_per_epoch) logger.info("Build model!") ########################## # Callbacks ########################## callbacks = create_callbacks(args, test_generator1, test_generator2, trainset, valset) logger.info("Build callbacks!") ########################## # Train ########################## model.fit( x=train_generator, epochs=args.epochs, callbacks=callbacks, validation_data=val_generator, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, class_weight=class_weight, initial_epoch=initial_epoch, verbose=args.verbose, )
def __check_args(self): get_argument(constants.cons_request_appid, required=True, help='应用组件编号不能为空') get_argument(constants.cons_request_trace_id, required=True, help='全局追踪号不能为空') get_argument(constants.cons_request_content_type, required=True, help='文件类型不能为空') get_argument(constants.cons_request_function, required=True, help='功能不能为空') get_argument(constants.cons_request_ccbins_id, required=True, help='机构编号不能为空') model_type = get_argument(constants.cons_request_model_type, type=list, required=True, help='模型服务类型不能为空') if not model_type: raise AiException(HttpStatus.SERVER_ERROR, AiErrorCode.YCEA4021008) path_list = get_argument(constants.cons_request_paths, required=False) img_list = get_argument(constants.cons_request_imgs, required=False) cv_image_list = [] # 从路径数组中读图 path_flag = False if path_list: path_flag = True logger.debug('path字段非空,从路径信息' + ':'.join(path_list)) for path in path_list: if os.path.isfile(path): img = cv2.imread(path) cv_image_list.append(img) else: raise AiException(HttpStatus.SERVER_ERROR, AiErrorCode.YCEA4021002, path) # 如果路径数组为空,从base64数组中读图 elif img_list: logger.debug('path字段为空,取base64图片') for base64_img in img_list: if base64_img: img_data = base64.b64decode(base64_img) # 转换为np数组 img_array = np.fromstring(img_data, np.uint8) # 转换成opencv可用格式 img = cv2.imdecode(img_array, cv2.IMREAD_COLOR) cv_image_list.append(img) else: raise AiException(HttpStatus.SERVER_ERROR, AiErrorCode.YCEA4021006) else: raise AiException(HttpStatus.SERVER_ERROR, AiErrorCode.YCEA4021007) return path_flag, cv_image_list
def main(): args = set_default(get_argument()) args, initial_epoch = search_same(args) if initial_epoch == -1: # training was already finished! return elif initial_epoch == 0: # first training or training with snapshot weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] temp = datetime.now() args.stamp = "{:02d}{:02d}{:02d}_{}_{:02d}_{:02d}_{:02d}".format( temp.year // 100, temp.month, temp.day, weekday[temp.weekday()], temp.hour, temp.minute, temp.second, ) get_session(args) logger = get_logger("MyLogger") for k, v in vars(args).items(): logger.info("{} : {}".format(k, v)) ########################## # Generator ########################## trainset, valset, testset = set_dataset(args, logger) train_generator = dataloader(args, trainset, 'train') val_generator = dataloader(args, valset, 'val', False) # for t in train_generator: # print(t[0]['main_input'].shape, t[0]['main_input'].numpy().min(), t[0]['main_input'].numpy().max(), # t[1]['main_output'].shape, t[1]['main_output'].numpy().min(), t[1]['main_output'].numpy().max(), # t[1]['main_output'].numpy().argmax()) # print() logger.info("TOTAL STEPS OF DATASET FOR TRAINING") logger.info("========== trainset ==========") logger.info(" --> {}".format(len(trainset))) steps_per_epoch = args.steps or len(trainset) // args.batch_size logger.info(" --> {}".format(steps_per_epoch)) logger.info("=========== valset ===========") validation_steps = len(valset) // args.batch_size logger.info(" --> {}".format(len(valset))) logger.info(" --> {}".format(validation_steps)) ########################## # Model ########################## model = set_model.Backbone(args, logger) if args.summary: model.summary() return logger.info("Build model!") ########################## # Metric ########################## metrics = { 'loss': tf.keras.metrics.Mean('loss', dtype=tf.float32), 'acc': tf.keras.metrics.CategoricalAccuracy('acc', dtype=tf.float32), 'val_loss': tf.keras.metrics.Mean('val_loss', dtype=tf.float32), 'val_acc': tf.keras.metrics.CategoricalAccuracy('val_acc', dtype=tf.float32), } csvlogger, lr_scheduler = create_callbacks(args, steps_per_epoch, metrics) if args.optimizer == 'sgd': optimizer = tf.keras.optimizers.SGD(lr_scheduler, momentum=.9, decay=.0001) elif args.optimizer == 'adam': optimizer = tf.keras.optimizers.Adam(lr_scheduler) ########################## # Train ########################## # steps_per_epoch = 10 # validation_steps = 10 train_iterator = iter(train_generator) val_iterator = iter(val_generator) progress_desc_train = 'Train : Loss {:.4f} | Acc {:.4f}' progress_desc_val = 'Val : Loss {:.4f} | Acc {:.4f}' for epoch in range(initial_epoch, args.epochs): print('\nEpoch {}/{}'.format(epoch + 1, args.epochs)) print('Learning Rate : {}'.format( optimizer.learning_rate(optimizer.iterations))) progressbar_train = tqdm.tqdm(tf.range(steps_per_epoch), desc=progress_desc_train.format( 0, 0, 0, 0), leave=True) for step in progressbar_train: inputs = next(train_iterator) img = inputs[0]['main_input'] label = inputs[1]['main_output'] with tf.GradientTape() as tape: logits = tf.cast(model(img, training=True), tf.float32) loss = tf.keras.losses.categorical_crossentropy(label, logits) loss = tf.reduce_mean(loss) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) metrics['loss'].update_state(loss) metrics['acc'].update_state(label, logits) progressbar_train.set_description( progress_desc_train.format(metrics['loss'].result(), metrics['acc'].result())) progressbar_train.refresh() progressbar_val = tqdm.tqdm(tf.range(validation_steps), desc=progress_desc_val.format(0, 0), leave=True) for step in progressbar_val: val_inputs = next(val_iterator) val_img = val_inputs[0]['main_input'] val_label = val_inputs[1]['main_output'] val_logits = tf.cast(model(val_img, training=False), tf.float32) val_loss = tf.keras.losses.categorical_crossentropy( val_label, val_logits) val_loss = tf.reduce_mean(val_loss) metrics['val_loss'].update_state(val_loss) metrics['val_acc'].update_state(val_label, val_logits) progressbar_val.set_description( progress_desc_val.format(metrics['val_loss'].result(), metrics['val_acc'].result())) progressbar_val.refresh() logs = {k: v.result().numpy() for k, v in metrics.items()} logs['epoch'] = epoch + 1 if args.checkpoint: model.save_weights( os.path.join( args.result_path, '{}/checkpoint/{:04d}_{:.4f}_{:.4f}.h5'.format( args.stamp, epoch + 1, logs['val_acc'], logs['val_loss']))) print('\nSaved at {}'.format( os.path.join( args.result_path, '{}/checkpoint/{:04d}_{:.4f}_{:.4f}.h5'.format( args.stamp, epoch + 1, logs['val_acc'], logs['val_loss'])))) if args.history: csvlogger = csvlogger.append(logs, ignore_index=True) csvlogger.to_csv(os.path.join( args.result_path, '{}/history/epoch.csv'.format(args.stamp)), index=False) for k, v in metrics.items(): v.reset_states()
def get(self): email = get_argument('email') user = users.user_by_email(email) return make_response(status=HttpStatus.SUCCESS, data=user)
def post(self): name = get_argument('username', required=True, help='请输入用户名') password = get_argument('password', required=False, help='请输入密码') email = get_argument('email', required=False, help='请输入邮箱') users.create_user(name, email, password) return make_response(status=HttpStatus.SUCCESS)
def post(self): orgName = get_argument("orgName", required=True, help='所属机构不能为空') deviceName = get_argument("deviceName", required=True, help='所属设备不能为空') alarmType = get_argument("alarmType", required=True, help='报警类型不能为空') alarmLevel = get_argument("alarmLevel", required=True, help='报警等级不能为空') alarmTime = get_argument("alarmTime", required=True, help='报警时间不能为空') remark = get_argument("remark", required=False) # 转回img img = pickle.loads(eval(remark)) # json_data = request.form['json_data'] # dict_data = json.loads(json_data) # orgName =dict_data.get('orgName',-1) # deviceName =dict_data.get('deviceName',-1) # alarmType =dict_data.get('alarmType') # alarmLevel =dict_data.get('alarmLevel') # alarmTime =dict_data.get('alarmTime') # f = request.files['myfile'] # x=f.read() # print('>>>',x) # with open('D:/image/b123.jpg', 'ab') as f: # f.write(x) # for i in f: # print('@@',type(i)) # with open('D:/image/a.jpg', 'ab') as f: # f.write(i) # 告警类别 00 对应海康 人数超限:'131643', '单人异常', '131644', '多人异常' # 告警类别 01 对应海康 非法入侵:'131585', '跨域警戒线','131588', '区域入侵', '131586', '人员进入' if alarmType == '131643': alarmType = '00' vlt_err_alrm_inf = '单人异常' elif alarmType == '131644': alarmType = '00' vlt_err_alrm_inf = '多人异常' elif alarmType == '131585': alarmType = '01' vlt_err_alrm_inf = '跨域警戒线' elif alarmType == '131588': alarmType = '01' vlt_err_alrm_inf = '区域入侵' elif alarmType == '131586': alarmType = '01' vlt_err_alrm_inf = '人员进入' elif alarmType == '150002': alarmType = '01' vlt_err_alrm_inf = '非法人员入侵' else: raise AiException(HttpStatus.SERVER_ERROR, AiErrorCode.YCEA4021012) alarm_info_hk_po = AlarmInfoHk() alarm_info_hk_po.id = gen_uuid() alarm_info_hk_po.ccbins_id = orgName alarm_info_hk_po.eqmt_id = deviceName alarm_info_hk_po.bsn_cgycd = alarmType alarm_info_hk_po.vlt_err_alrm_inf = vlt_err_alrm_inf alarm_info_hk_po.alarm_level = alarmLevel alarm_info_hk_po.stdt_tm = datetime.strptime(alarmTime, '%Y%m%d %H:%M:%S') if remark and len(remark) > 0: try: cur_date = time.strftime('%Y%m%d', time.localtime(time.time())) folder_path = yaml_cfg.get( 'illegal_image_path_hk') + "/" + cur_date + '/' + orgName if not os.path.exists(folder_path): os.makedirs(folder_path) img_path = (folder_path + "/" + alarm_info_hk_po.id + ".jpg") cv.imwrite('D:/image/ab.jpg', img) # f.save(img_path) alarm_info_hk_po.uploadfiletrgtrfullnm = img_path except Exception: logger.error("存储hk告警图片失败, " + alarmType + "," + vlt_err_alrm_inf + "," + alarmTime) alarm_hk_dao.add_record(alarm_info_hk_po) return make_response(status=HttpStatus.SUCCESS, data={"code": "success"})