def main(args): place = set_device(args.device) fluid.enable_dygraph(place) if args.dynamic else None inputs = [ Input( [None, None], 'int64', name='words'), Input( [None], 'int64', name='length') ] dataset = LacDataset(args) predict_dataset = LacDataLoader(args, place, phase="predict") vocab_size = dataset.vocab_size num_labels = dataset.num_labels model = SeqTagging(args, vocab_size, num_labels, mode="predict") model.mode = "test" model.prepare(inputs=inputs) model.load(args.init_from_checkpoint, skip_mismatch=True) f = open(args.output_file, "wb") for data in predict_dataset.dataloader: if len(data) == 1: input_data = data[0] else: input_data = data results, length = model.test_batch(inputs=flatten(input_data)) for i in range(len(results)): word_len = length[i] word_ids = results[i][:word_len] tags = [dataset.id2label_dict[str(id)] for id in word_ids] f.write("\002".join(tags) + "\n")
def test_bmn(args): device = set_device(args.device) fluid.enable_dygraph(device) if args.dynamic else None #config setting config = parse_config(args.config_file) eval_cfg = merge_configs(config, 'test', vars(args)) feat_dim = config.MODEL.feat_dim tscale = config.MODEL.tscale dscale = config.MODEL.dscale prop_boundary_ratio = config.MODEL.prop_boundary_ratio num_sample = config.MODEL.num_sample num_sample_perbin = config.MODEL.num_sample_perbin #input and video index inputs = [ Input([None, config.MODEL.feat_dim, config.MODEL.tscale], 'float32', name='feat_input') ] gt_iou_map = Input([None, config.MODEL.dscale, config.MODEL.tscale], 'float32', name='gt_iou_map') gt_start = Input([None, config.MODEL.tscale], 'float32', name='gt_start') gt_end = Input([None, config.MODEL.tscale], 'float32', name='gt_end') video_idx = Input([None, 1], 'int64', name='video_idx') labels = [gt_iou_map, gt_start, gt_end, video_idx] #data eval_dataset = BmnDataset(eval_cfg, 'test') #model model = bmn(tscale, dscale, prop_boundary_ratio, num_sample, num_sample_perbin, pretrained=args.weights is None) model.prepare(loss_function=BmnLoss(tscale, dscale), metrics=BmnMetric(config, mode='test'), inputs=inputs, labels=labels, device=device) #load checkpoint if args.weights is not None: assert os.path.exists(args.weights + '.pdparams'), \ "Given weight dir {} not exist.".format(args.weights) logger.info('load test weights from {}'.format(args.weights)) model.load(args.weights) model.evaluate(eval_data=eval_dataset, batch_size=eval_cfg.TEST.batch_size, num_workers=eval_cfg.TEST.num_workers, log_freq=args.log_interval) logger.info("[EVAL] eval finished")
def infer_bmn(args): device = set_device(args.device) fluid.enable_dygraph(device) if args.dynamic else None #config setting config = parse_config(args.config_file) infer_cfg = merge_configs(config, 'infer', vars(args)) feat_dim = config.MODEL.feat_dim tscale = config.MODEL.tscale dscale = config.MODEL.dscale prop_boundary_ratio = config.MODEL.prop_boundary_ratio num_sample = config.MODEL.num_sample num_sample_perbin = config.MODEL.num_sample_perbin #input and video index inputs = [ Input([None, config.MODEL.feat_dim, config.MODEL.tscale], 'float32', name='feat_input') ] labels = [Input([None, 1], 'int64', name='video_idx')] #data infer_dataset = BmnDataset(infer_cfg, 'infer') #model model = bmn(tscale, dscale, prop_boundary_ratio, num_sample, num_sample_perbin, pretrained=args.weights is None) model.prepare(metrics=BmnMetric(config, mode='infer'), inputs=inputs, labels=labels, device=device) # load checkpoint if args.weights is not None: assert os.path.exists( args.weights + ".pdparams"), "Given weight dir {} not exist.".format(args.weights) logger.info('load test weights from {}'.format(args.weights)) model.load(args.weights) # here use model.eval instead of model.test, as post process is required in our case model.evaluate(eval_data=infer_dataset, batch_size=infer_cfg.TEST.batch_size, num_workers=infer_cfg.TEST.num_workers, log_freq=args.log_interval) logger.info("[INFER] infer finished")
def main(args): place = set_device(args.device) fluid.enable_dygraph(place) if args.dynamic else None inputs = [ Input( [None, None], 'int64', name='words'), Input( [None], 'int64', name='length'), Input( [None, None], 'int64', name='target') ] labels = [Input([None, None], 'int64', name='labels')] feed_list = None if args.dynamic else [ x.forward() for x in inputs + labels ] dataset = LacDataset(args) train_dataset = LacDataLoader(args, place, phase="train") vocab_size = dataset.vocab_size num_labels = dataset.num_labels model = SeqTagging(args, vocab_size, num_labels, mode="train") optim = AdamOptimizer( learning_rate=args.base_learning_rate, parameter_list=model.parameters()) model.prepare( optim, LacLoss(), ChunkEval(num_labels), inputs=inputs, labels=labels, device=args.device) if args.init_from_checkpoint: model.load(args.init_from_checkpoint) if args.init_from_pretrain_model: model.load(args.init_from_pretrain_model, reset_optimizer=True) model.fit(train_dataset.dataloader, epochs=args.epoch, batch_size=args.batch_size, eval_freq=args.eval_freq, save_freq=args.save_freq, save_dir=args.save_dir)
def test_test_batch(self, dynamic=True): dim = 20 data = np.random.random(size=(4, dim)).astype(np.float32) def get_expect(): fluid.enable_dygraph(fluid.CPUPlace()) self.set_seed() m = MyModel() m.eval() output = m(to_variable(data)) fluid.disable_dygraph() return output.numpy() ref = get_expect() for dynamic in [True, False]: device = set_device('cpu') fluid.enable_dygraph(device) if dynamic else None self.set_seed() model = MyModel() inputs = [Input([None, dim], 'float32', name='x')] model.prepare(inputs=inputs, device=device) out, = model.test_batch([data]) np.testing.assert_allclose(out, ref) fluid.disable_dygraph() if dynamic else None
def export_deploy_model(self): model = resnet18() inputs = [Input([None, 3, 224, 224], 'float32', name='image')] model.prepare(inputs=inputs) self.save_dir = tempfile.mkdtemp() if not os.path.exists(self.save_dir): os.makedirs(self.save_dir) model.save_inference_model(self.save_dir) place = fluid.CPUPlace( ) if not fluid.is_compiled_with_cuda() else fluid.CUDAPlace(0) exe = fluid.Executor(place) [inference_program, feed_target_names, fetch_targets] = (fluid.io.load_inference_model(dirname=self.save_dir, executor=exe)) tensor_img = np.array(np.random.random((1, 3, 224, 224)), dtype=np.float32) ori_results = model.test_batch(tensor_img) results = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets) np.testing.assert_allclose(results, ori_results)
def predict(self, dynamic): fluid.enable_dygraph(self.device) if dynamic else None inputs = [Input([-1, 1, 28, 28], 'float32', name='image')] labels = [Input([None, 1], 'int64', name='label')] test_dataloader = fluid.io.DataLoader(self.test_dataset, places=self.device, batch_size=64, return_list=True) model = LeNet() model.load(self.weight_path) model.prepare(metrics=Accuracy(), inputs=inputs, labels=labels) output = model.predict(test_dataloader, stack_outputs=True)
def test_train_batch(self, dynamic=True): dim = 20 data = np.random.random(size=(4, dim)).astype(np.float32) label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64) def get_expect(): fluid.enable_dygraph(fluid.CPUPlace()) self.set_seed() m = MyModel() optim = fluid.optimizer.SGD(learning_rate=0.001, parameter_list=m.parameters()) m.train() output = m(to_variable(data)) l = to_variable(label) loss = fluid.layers.cross_entropy(output, l) avg_loss = fluid.layers.reduce_sum(loss) avg_loss.backward() optim.minimize(avg_loss) m.clear_gradients() fluid.disable_dygraph() return avg_loss.numpy() ref = get_expect() for dynamic in [True, False]: device = set_device('cpu') fluid.enable_dygraph(device) if dynamic else None self.set_seed() model = MyModel() optim2 = fluid.optimizer.SGD(learning_rate=0.001, parameter_list=model.parameters()) inputs = [Input([None, dim], 'float32', name='x')] labels = [Input([None, 1], 'int64', name='label')] model.prepare(optim2, loss_function=CrossEntropy(average=False), inputs=inputs, labels=labels, device=device) loss, = model.train_batch([data], [label]) np.testing.assert_allclose(loss.flatten(), ref.flatten()) fluid.disable_dygraph() if dynamic else None
def main(FLAGS): device = set_device("gpu" if FLAGS.use_gpu else "cpu") fluid.enable_dygraph(device) if FLAGS.dynamic else None model = Seq2SeqAttModel(encoder_size=FLAGS.encoder_size, decoder_size=FLAGS.decoder_size, emb_dim=FLAGS.embedding_dim, num_classes=FLAGS.num_classes) # yapf: disable inputs = [ Input([None, 1, 48, 384], "float32", name="pixel"), Input([None, None], "int64", name="label_in") ] labels = [ Input([None, None], "int64", name="label_out"), Input([None, None], "float32", name="mask") ] # yapf: enable model.prepare(loss_function=WeightCrossEntropy(), metrics=SeqAccuracy(), inputs=inputs, labels=labels, device=device) model.load(FLAGS.init_model) test_dataset = data.test() test_collate_fn = BatchCompose( [data.Resize(), data.Normalize(), data.PadTarget()]) test_sampler = data.BatchSampler(test_dataset, batch_size=FLAGS.batch_size, drop_last=False, shuffle=False) test_loader = fluid.io.DataLoader(test_dataset, batch_sampler=test_sampler, places=device, num_workers=0, return_list=True, collate_fn=test_collate_fn) model.evaluate(eval_data=test_loader, callbacks=[LoggerCallBack(10, 2, FLAGS.batch_size)])
def test_parameters(self): for dynamic in [True, False]: device = set_device('cpu') fluid.enable_dygraph(device) if dynamic else None model = MyModel() inputs = [Input([None, 20], 'float32', name='x')] model.prepare(inputs=inputs) params = model.parameters() self.assertTrue(params[0].shape[0] == 20) self.assertTrue(params[0].shape[1] == 10) fluid.disable_dygraph() if dynamic else None
def main(): place = set_device(FLAGS.device) fluid.enable_dygraph(place) if FLAGS.dynamic else None # Generators g_AB = Generator() g_BA = Generator() g = GeneratorCombine(g_AB, g_BA, is_train=False) im_shape = [-1, 3, 256, 256] input_A = Input(im_shape, 'float32', 'input_A') input_B = Input(im_shape, 'float32', 'input_B') g.prepare(inputs=[input_A, input_B], device=FLAGS.device) g.load(FLAGS.init_model, skip_mismatch=True, reset_optimizer=True) if not os.path.exists(FLAGS.output): os.makedirs(FLAGS.output) test_data_A = data.TestDataA() test_data_B = data.TestDataB() for i in range(len(test_data_A)): data_A, A_name = test_data_A[i] data_B, B_name = test_data_B[i] data_A = np.array(data_A).astype("float32") data_B = np.array(data_B).astype("float32") fake_A, fake_B, cyc_A, cyc_B = g.test_batch([data_A, data_B]) datas = [fake_A, fake_B, cyc_A, cyc_B, data_A, data_B] odatas = [] for o in datas: d = np.squeeze(o[0]).transpose([1, 2, 0]) im = ((d + 1) * 127.5).astype(np.uint8) odatas.append(im) imsave(FLAGS.output + "/fakeA_" + B_name, odatas[0]) imsave(FLAGS.output + "/fakeB_" + A_name, odatas[1]) imsave(FLAGS.output + "/cycA_" + A_name, odatas[2]) imsave(FLAGS.output + "/cycB_" + B_name, odatas[3]) imsave(FLAGS.output + "/inputA_" + A_name, odatas[4]) imsave(FLAGS.output + "/inputB_" + B_name, odatas[5])
def test_save_load(self): path = tempfile.mkdtemp() for dynamic in [True, False]: device = set_device('cpu') fluid.enable_dygraph(device) if dynamic else None model = MyModel() inputs = [Input([None, 20], 'float32', name='x')] model.prepare(inputs=inputs) model.save(path + '/test') model.load(path + '/test') shutil.rmtree(path) fluid.disable_dygraph() if dynamic else None
def setUpClass(cls): cls.device = set_device('gpu') fluid.enable_dygraph(cls.device) sp_num = 1280 cls.train_dataset = MnistDataset(mode='train', sample_num=sp_num) cls.val_dataset = MnistDataset(mode='test', sample_num=sp_num) cls.test_dataset = MnistDataset(mode='test', return_label=False, sample_num=sp_num) cls.train_loader = fluid.io.DataLoader(cls.train_dataset, places=cls.device, batch_size=64) cls.val_loader = fluid.io.DataLoader(cls.val_dataset, places=cls.device, batch_size=64) cls.test_loader = fluid.io.DataLoader(cls.test_dataset, places=cls.device, batch_size=64) seed = 333 fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed dy_lenet = LeNetDygraph() cls.init_param = dy_lenet.state_dict() dynamic_train(dy_lenet, cls.train_loader) cls.acc1 = dynamic_evaluate(dy_lenet, cls.val_loader) cls.inputs = [Input([-1, 1, 28, 28], 'float32', name='image')] cls.labels = [Input([None, 1], 'int64', name='label')] cls.save_dir = tempfile.mkdtemp() cls.weight_path = os.path.join(cls.save_dir, 'lenet') fluid.dygraph.save_dygraph(dy_lenet.state_dict(), cls.weight_path) fluid.disable_dygraph()
def main(): place = set_device(FLAGS.device) fluid.enable_dygraph(place) if FLAGS.dynamic else None # Generators g_AB = Generator() g_BA = Generator() g = GeneratorCombine(g_AB, g_BA, is_train=False) im_shape = [-1, 3, 256, 256] input_A = Input(im_shape, 'float32', 'input_A') input_B = Input(im_shape, 'float32', 'input_B') g.prepare(inputs=[input_A, input_B], device=FLAGS.device) g.load(FLAGS.init_model, skip_mismatch=True, reset_optimizer=True) out_path = FLAGS.output + "/single" if not os.path.exists(out_path): os.makedirs(out_path) for f in glob.glob(FLAGS.input): image_name = os.path.basename(f) image = Image.open(f).convert('RGB') image = image.resize((256, 256), Image.BICUBIC) image = np.array(image) / 127.5 - 1 image = image[:, :, 0:3].astype("float32") data = image.transpose([2, 0, 1])[np.newaxis, :] if FLAGS.input_style == "A": _, fake, _, _ = g.test_batch([data, data]) if FLAGS.input_style == "B": fake, _, _, _ = g.test_batch([data, data]) fake = np.squeeze(fake[0]).transpose([1, 2, 0]) opath = "{}/fake{}{}".format(out_path, FLAGS.input_style, image_name) imsave(opath, ((fake + 1) * 127.5).astype(np.uint8)) print("transfer {} to {}".format(f, opath))