def main(): args = parse_args() print_arguments(args) print_paddle_envs() if args.update_method != 'local': args.dist_env = dist_env() train_parallel(args)
def main(): args = parser.parse_args() print_arguments(args) # place_name = "null" # sql_id = '0' print("1:普贤塔 2:象山岩 3:桂林抗战遗址") place_num = input("请输入地点标号:") args.monitoring_place = place_num # print_arguments(args) # if place_num == '1': # sql_id = "tbl_tower" # place_name = "tower" # elif place_num == '2': # sql_id = "tbl_rock" # place_name = "rock" # elif place_num == '3': # sql_id = "tbl_ruins" # place_name = "ruins" # else: # print("Worry") td_q, mp_q, td_threshold = td_mp_set(args) headquarters(td_q, mp_q, td_threshold, args)
def main(): args = parser.parse_args() models_now = args.model_category assert models_now in ["models", "models_name"], "{} is not in lists: {}".format( models_now, ["models", "models_name"]) set_models(models_now) print_arguments(args) train(args)
def main(): args = parser.parse_args() print_arguments(args) try: compress(args) except AssertionError as e: print("[CHECK] ", e) exit(1)
def main(): print_arguments(args) if args.target_dir.startswith('~'): args.target_dir = os.path.expanduser(args.target_dir) prepare_dataset(url=DATA_URL, md5sum=MD5_DATA, target_dir=args.target_dir, annotation_path=args.annotation_text)
def main(): args = parser.parse_args() print_arguments(args) if args.ce_test: # set seed seed = 111 np.random.seed(seed) paddle.seed(seed) random.seed(seed) quantize(args)
def test(): args = parser.parse_args() print_arguments(args) pretrained_model = args.pretrained_model place = fluid.CUDAPlace(0) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(dirname=pretrained_model, executor=exe, model_filename='model', params_filename='params') imgfn = args.imgfn maskfn = args.maskfn resultfn = args.resultfn if not os.path.exists(args.resultfn): os.makedirs(args.resultfn) imglist = sorted(glob.glob(imgfn)) masklist = sorted(glob.glob(maskfn)) for imgfn_,maskfn_ in (list(zip(imglist,masklist))): print(imgfn_) print(maskfn_) print('') img = cv2.imread(imgfn_) mask = cv2.imread(maskfn_) img = img.transpose(2, 0, 1)[::-1] img = img.astype(np.float32)/255.0 mask = mask.transpose(2, 0, 1) mask = mask.astype(np.float32)/255.0 threshhold = 0.5 mask = (mask >= threshhold).astype(np.float32) # CHW RGB mask = 1 - mask img = img * mask img0 = img img = np.concatenate((img, mask[0:1]), axis=0) result = exe.run(inference_program,feed={feed_target_names[0]: img[np.newaxis,:], feed_target_names[1]: mask[np.newaxis,:]}, fetch_list=fetch_targets) outimg = result[0][0] outimg = outimg * (1-mask) + img0 * mask # BGR HWC outimg = outimg[::-1].transpose(1, 2, 0)*255.0 outfn = os.path.join(args.resultfn, os.path.basename(imgfn_)) cv2.imwrite(outfn,outimg)
def main(): args = parser.parse_args() print_arguments(args) if args.profile: if args.use_gpu: with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: train(args) else: with profiler.profiler("CPU", sorted_key='total') as cpuprof: train(args) else: train(args)
def main(): print_arguments(args) counter = Counter() count_manifest(counter, args.manifest_path) count_sorted = sorted(counter.items(), key=lambda x: x[1], reverse=True) with codecs.open(args.vocab_path, 'w', 'utf-8') as fout: labels = ['?'] for char, count in count_sorted: if count < args.count_threshold: break labels.append(char) fout.write(str(labels)) print("done.")
def main(): parser = argparse.ArgumentParser(description=__doc__) add_arg = functools.partial(add_arguments, argparser=parser) add_arg('model_path', str, "", "The inference model path.") add_arg('model_filename', str, "int8_infer.pdmodel", "model filename") add_arg('params_filename', str, "int8_infer.pdiparams", "params filename") add_arg('data_dir', str, "/dataset/ILSVRC2012/", "The ImageNet dataset root dir.") add_arg('test_samples', int, -1, "Test samples. If set -1, use all test samples") add_arg('batch_size', int, 16, "Batch size.") args = parser.parse_args() print_arguments(args) eval(args)
def main(): args = parser.parse_args() print_arguments(args) check_cuda(args.use_gpu) data_dir = args.data_dir dataset = args.dataset assert dataset in ['pascalvoc', 'coco2014', 'coco2017'] # for pascalvoc label_file = 'label_list' train_file_list = 'trainval.txt' val_file_list = 'test.txt' if dataset == 'coco2014': train_file_list = 'annotations/instances_train2014.json' val_file_list = 'annotations/instances_val2014.json' elif dataset == 'coco2017': train_file_list = 'annotations/instances_train2017.json' val_file_list = 'annotations/instances_val2017.json' mean_BGR = [float(m) for m in args.mean_BGR.split(",")] image_shape = [int(m) for m in args.image_shape.split(",")] train_parameters[dataset]['image_shape'] = image_shape train_parameters[dataset]['batch_size'] = args.batch_size train_parameters[dataset]['lr'] = args.learning_rate train_parameters[dataset]['epoc_num'] = args.epoc_num train_parameters[dataset]['ap_version'] = args.ap_version data_args = reader.Settings(dataset=args.dataset, data_dir=data_dir, label_file=label_file, resize_h=image_shape[1], resize_w=image_shape[2], mean_value=mean_BGR, apply_distort=True, apply_expand=True, ap_version=args.ap_version) train(args, data_args, train_parameters[dataset], train_file_list=train_file_list, val_file_list=val_file_list)
def main(): args = parser.parse_args() set_models(args.model_category) print_arguments(args) model_path = os.path.join(args.model_save_dir + '/' + args.model) if args.place == "cuda": place = fluid.CUDAPlace(0) elif args.place == "xsim": place = fluid.XSIMPlace() elif args.place == "xpu": place = fluid.XPUPlace() else: print("Unsurpported place!") exit() if (args.run_mode == "train"): train(args, model_path, place) elif (args.run_mode == "infer" or args.run_mode == "fused_infer"): infer(args, model_path, place)
def getF(): #模型加载 args = parser.parse_args() print_arguments(args) data_dir = 'data/Mydata' label_file = 'label_list' data_args = reader.Settings( dataset=args.dataset, data_dir=data_dir, label_file=label_file, resize_h=args.resize_h, resize_w=args.resize_w, mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R], apply_distort=False, apply_expand=False, ap_version='') f=infer( args, data_args=data_args, model_dir=args.model_dir) return f
def main(): args = parser.parse_args() print_arguments(args) convert(args)
def main(): args = parser.parse_args() print_arguments(args) check_cuda(args.use_gpu) train_async(args)
def main(): paddle.enable_static() args = parser.parse_args() print_arguments(args) check_cuda(args.use_gpu) eval(args)
num_workers=FLAGS.num_workers, return_list=True, collate_fn=train_collate_fn) test_dataset = data.test() test_collate_fn = BatchCompose( [data.Resize(), data.Normalize(), data.PadTarget()]) test_sampler = data.BatchSampler( test_dataset, batch_size=FLAGS.batch_size, drop_last=False, shuffle=False) test_loader = paddle.io.DataLoader( test_dataset, batch_sampler=test_sampler, places=device, num_workers=0, return_list=True, collate_fn=test_collate_fn) model.fit(train_data=train_loader, eval_data=test_loader, epochs=FLAGS.epoch, save_dir=FLAGS.checkpoint_path, callbacks=[LoggerCallBack(10, 2, FLAGS.batch_size)]) if __name__ == '__main__': FLAGS = parser.parse_args() print_arguments(FLAGS) main(FLAGS)
auc_metric.accumulate(), 100 * args.batch_size / (time.time() - batch_begin))) batch_begin = time.time() total_loss = 0.0 batch_id += 1 logger.info("epoch %d is finished and takes %f s" % (epoch, time.time() - begin)) # save model and optimizer logger.info( "going to save epoch {} model and optimizer.".format(epoch)) paddle.save(deepfm.state_dict(), path=os.path.join(args.model_output_dir, "epoch_" + str(epoch), ".pdparams")) paddle.save(optimizer.state_dict(), path=os.path.join(args.model_output_dir, "epoch_" + str(epoch), ".pdopt")) logger.info("save epoch {} finished.".format(epoch)) # eval model deepfm.eval() eval(epoch) deepfm.train() paddle.enable_static() if __name__ == '__main__': args = utils.parse_args() utils.print_arguments(args) train(args)
def main(): args = parser.parse_args() print_arguments(args) check_cuda(args.use_gpu) infer(args)
def main(): paddle.enable_static() args = parser.parse_args() print_arguments(args) compress(args)
def main(): args = parser.parse_args() print_arguments(args) inference(args, data_reader=ctc_reader)
def main(): args = parser.parse_args() print_arguments(args) check_gpu(args.use_gpu) evaluate(args)
def main(): args = parser.parse_args() print_arguments(args) compress(args)
label.astype('float32'), fluid.layers.assign(np.array([num_classes], 'float32')), force_cpu=False).astype('float32') logit = fluid.layers.transpose(logit, [0, 2, 3, 1]) logit = fluid.layers.reshape(logit, [-1, num_classes]) label = fluid.layers.reshape(label, [-1, 1]) label = fluid.layers.cast(label, 'int64') label_nignore = fluid.layers.reshape(label_nignore, [-1, 1]) loss = fluid.layers.softmax_with_cross_entropy(logit, label, ignore_index=255, numeric_stable_mode=True) label_nignore.stop_gradient = True label.stop_gradient = True return loss, label_nignore args = parser.parse_args() utility.print_arguments(args) models.clean() models.bn_momentum = 0.9997 models.dropout_keep_prop = 0.9 models.label_number = args.num_classes models.default_norm_type = args.norm_type deeplabv3p = models.deeplabv3p sp = fluid.Program() tp = fluid.Program() # only for ce if args.enable_ce: SEED = 102 sp.random_seed = SEED
def main(): args = parser.parse_args() print_arguments(args) train(args)
def main(): print_arguments(args) create_manifest(annotation_path=args.annotation_path, manifest_path_prefix=args.manifest_prefix)
def main(): args = parser.parse_args() print_arguments(args) infer(args)
def main(): args = parser.parse_args() assert args.model in nets.__all__, "model is not in list %s" % nets.__all__ print_arguments(args) train(args)
encode_func = unicode if six.PY2 else str outfile.write(encode_func(json.dumps(dts_res))) print("start evaluate using coco api") cocoGt = COCO(os.path.join(data_args.data_dir, test_list)) cocoDt = cocoGt.loadRes("detection_result.json") cocoEval = COCOeval(cocoGt, cocoDt, "bbox") cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() test() if __name__ == '__main__': args = parser.parse_args() print_arguments(args) assert args.dataset in ['coco2014', 'coco2017'] data_dir = './data/coco' if '2014' in args.dataset: test_list = 'annotations/instances_val2014.json' elif '2017' in args.dataset: test_list = 'annotations/instances_val2017.json' data_args = reader.Settings( dataset=args.dataset, data_dir=args.data_dir if len(args.data_dir) > 0 else data_dir, label_file='', resize_h=args.resize_h, resize_w=args.resize_w, mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R], apply_distort=False,
def main(): args = parse_args() print_arguments(args) print_paddle_envs() args.dist_env = dist_env() train_parallel(args)