Exemple #1
0
    def dygraph_sen(self):
        paddle.disable_static()
        net = paddle.vision.models.LeNet()
        optimizer = paddle.optimizer.Adam(learning_rate=0.001,
                                          parameters=net.parameters())
        inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
        labels = [Input([None, 1], 'int64', name='label')]
        model = paddle.Model(net, inputs, labels)
        model.prepare(optimizer, paddle.nn.CrossEntropyLoss(),
                      paddle.metric.Accuracy(topk=(1, 5)))

        model.fit(self.train_dataset, epochs=1, batch_size=128, verbose=1)
        result = model.evaluate(self.val_dataset, batch_size=128, verbose=1)
        pruner = None
        if self._pruner == 'l1norm':
            pruner = L1NormFilterPruner(net, [1, 1, 28, 28])
        elif self._pruner == 'fpgm':
            pruner = FPGMFilterPruner(net, [1, 1, 28, 28])

        def eval_fn():
            result = model.evaluate(self.val_dataset, batch_size=128)
            return result['acc_top1']

        sen = pruner.sensitive(
            eval_func=eval_fn,
            sen_file="_".join(["./dygraph_sen_",
                               str(time.time())]),
            #sen_file="sen.pickle",
            target_vars=self._param_names)
        params = {}
        for param in net.parameters():
            params[param.name] = np.array(param.value().get_tensor())
        print(f'dygraph sen: {sen}')
        return sen, params
Exemple #2
0
def main():
    paddle.enable_static() if FLAGS.static else None
    device = paddle.set_device(FLAGS.device)

    train_dataset = MNIST(mode='train')
    val_dataset = MNIST(mode='test')

    inputs = [Input(shape=[None, 1, 28, 28], dtype='float32', name='image')]
    labels = [Input(shape=[None, 1], dtype='int64', name='label')]

    net = LeNet()
    model = paddle.Model(net, inputs, labels)

    optim = Momentum(learning_rate=FLAGS.lr,
                     momentum=.9,
                     parameter_list=model.parameters())

    model.prepare(optim, paddle.nn.CrossEntropyLoss(),
                  paddle.metric.Accuracy(topk=(1, 2)))

    if FLAGS.resume is not None:
        model.load(FLAGS.resume)

    if FLAGS.eval_only:
        model.evaluate(val_dataset, batch_size=FLAGS.batch_size)
        return

    model.fit(train_dataset,
              val_dataset,
              epochs=FLAGS.epoch,
              batch_size=FLAGS.batch_size,
              save_dir=FLAGS.output_dir)
Exemple #3
0
    def test_static_multiple_gpus(self):
        device = set_device('gpu')

        im_shape = (-1, 1, 28, 28)
        batch_size = 128

        inputs = [Input(im_shape, 'float32', 'image')]
        labels = [Input([None, 1], 'int64', 'label')]

        model = Model(LeNet(), inputs, labels)
        optim = fluid.optimizer.Momentum(
            learning_rate=0.001, momentum=.9, parameter_list=model.parameters())
        model.prepare(optim, CrossEntropyLoss(), Accuracy())

        train_dataset = MnistDataset(mode='train')
        val_dataset = MnistDataset(mode='test')
        test_dataset = MnistDataset(mode='test', return_label=False)

        cbk = paddle.callbacks.ProgBarLogger(50)
        model.fit(train_dataset,
                  val_dataset,
                  epochs=2,
                  batch_size=batch_size,
                  callbacks=cbk)

        eval_result = model.evaluate(val_dataset, batch_size=batch_size)

        output = model.predict(
            test_dataset, batch_size=batch_size, stack_outputs=True)

        np.testing.assert_equal(output[0].shape[0], len(test_dataset))

        acc = compute_accuracy(output[0], val_dataset.labels)

        np.testing.assert_allclose(acc, eval_result['acc'])
Exemple #4
0
def main(args):
    place = paddle.set_device(args.device)
    fluid.enable_dygraph(place) if args.dynamic else None

    inputs = [
        Input(
            [None, None], 'int64', name='words'), Input(
                [None], 'int64', name='length'), Input(
                    [None, None], 'int64', name='target')
    ]
    labels = [Input([None, None], 'int64', name='labels')]

    dataset = LacDataset(args)
    eval_dataset = LacDataLoader(args, place, phase="test")

    vocab_size = dataset.vocab_size
    num_labels = dataset.num_labels
    model = paddle.Model(
        SeqTagging(
            args, vocab_size, num_labels, mode="test"),
        inputs=inputs,
        labels=labels)

    model.mode = "test"
    model.prepare(metrics=ChunkEval(num_labels))
    model.load(args.init_from_checkpoint, skip_mismatch=True)

    eval_result = model.evaluate(
        eval_dataset.dataloader, batch_size=args.batch_size)
    print("precison: %.5f" % (eval_result["precision"][0]))
    print("recall: %.5f" % (eval_result["recall"][0]))
    print("F1: %.5f" % (eval_result["F1"][0]))
Exemple #5
0
 def make_inputs(self):
     inputs = [
         Input([None, None], "int64", "word"),
         Input([None], "int64", "lengths"),
         Input([None, None], "int64", "target"),
     ]
     return inputs
 def runTest(self):
     with fluid.unique_name.guard():
         net = paddle.vision.models.LeNet()
         optimizer = paddle.optimizer.Adam(learning_rate=0.001,
                                           parameters=net.parameters())
         inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
         labels = [Input([None, 1], 'int64', name='label')]
     pruner = UnstructuredPruner(net, mode='ratio', ratio=0.55)
     net.train()
     self._update_masks(pruner, 0.0)
     pruner.update_params()
     self._update_masks(pruner, 1.0)
     pruner.set_static_masks()
     sparsity_0 = UnstructuredPruner.total_sparse(net)
     for i, data in enumerate(self.train_loader):
         x_data = data[0]
         y_data = paddle.to_tensor(data[1])
         logits = net(x_data)
         loss = F.cross_entropy(logits, y_data)
         loss.backward()
         optimizer.step()
         optimizer.clear_grad()
         if i == 10: break
     sparsity_1 = UnstructuredPruner.total_sparse(net)
     pruner.update_params()
     sparsity_2 = UnstructuredPruner.total_sparse(net)
     print(sparsity_0, sparsity_1, sparsity_2)
     self.assertEqual(sparsity_0, 1.0)
     self.assertEqual(sparsity_2, 1.0)
     self.assertLess(sparsity_1, 1.0)
Exemple #7
0
 def make_inputs(self):
     inputs = [
         Input([None, None, self.inputs[0].shape[-1]], "float32",
               "enc_output"),
         Input([None, self.inputs[1].shape[1], None, None], "float32",
               "trg_src_attn_bias"),
     ]
     return inputs
Exemple #8
0
def main():
    paddle.enable_static() if FLAGS.static else None
    device = paddle.set_device(FLAGS.device)

    model_list = [x for x in models.__dict__["__all__"]]
    assert FLAGS.arch in model_list, "Expected FLAGS.arch in {}, but received {}".format(
        model_list, FLAGS.arch)
    net = models.__dict__[FLAGS.arch](
        pretrained=FLAGS.eval_only and not FLAGS.resume)

    inputs = [Input([None, 3, 224, 224], 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    model = paddle.Model(net, inputs, labels)

    if FLAGS.resume is not None:
        model.load(FLAGS.resume)

    train_dataset = ImageNetDataset(os.path.join(FLAGS.data, 'train'),
                                    mode='train',
                                    image_size=FLAGS.image_size,
                                    resize_short_size=FLAGS.resize_short_size)

    val_dataset = ImageNetDataset(os.path.join(FLAGS.data, 'val'),
                                  mode='val',
                                  image_size=FLAGS.image_size,
                                  resize_short_size=FLAGS.resize_short_size)

    optim = make_optimizer(np.ceil(
        len(train_dataset) * 1. / FLAGS.batch_size / ParallelEnv().nranks),
                           parameter_list=model.parameters())

    model.prepare(optim, paddle.nn.CrossEntropyLoss(),
                  paddle.metric.Accuracy(topk=(1, 5)))

    if FLAGS.eval_only:
        model.evaluate(val_dataset,
                       batch_size=FLAGS.batch_size,
                       num_workers=FLAGS.num_workers)
        return

    output_dir = os.path.join(
        FLAGS.output_dir, FLAGS.arch,
        time.strftime('%Y-%m-%d-%H-%M', time.localtime()))
    if ParallelEnv().local_rank == 0 and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    model.fit(train_dataset,
              val_dataset,
              batch_size=FLAGS.batch_size,
              epochs=FLAGS.epoch,
              save_dir=output_dir,
              num_workers=FLAGS.num_workers)
Exemple #9
0
 def make_inputs(self):
     inputs = [
         Input([None, None, self.inputs[0].shape[-1]], "float32",
               "dec_input"),
         Input([None, None, self.inputs[0].shape[-1]], "float32",
               "enc_output"),
         Input([None, self.inputs[-1].shape[1], None, None], "float32",
               "self_attn_bias"),
         Input([None, self.inputs[-1].shape[1], None, None], "float32",
               "cross_attn_bias"),
     ]
     return inputs
Exemple #10
0
def do_train(args):
    device = paddle.set_device("gpu" if args.use_gpu else "cpu")
    fluid.enable_dygraph(device) if args.eager_run else None

    if args.enable_ce:
        fluid.default_main_program().random_seed = 102
        fluid.default_startup_program().random_seed = 102

    # define model
    inputs = [
        Input(
            [None, None], "int64", name="src_word"),
        Input(
            [None], "int64", name="src_length"),
        Input(
            [None, None], "int64", name="trg_word"),
    ]
    labels = [
        Input(
            [None], "int64", name="trg_length"),
        Input(
            [None, None, 1], "int64", name="label"),
    ]

    # def dataloader
    train_loader, eval_loader = create_data_loader(args, device)

    model_maker = get_model_cls(
        AttentionModel) if args.attention else get_model_cls(BaseModel)
    model = paddle.Model(
        model_maker(args.src_vocab_size, args.tar_vocab_size, args.hidden_size,
                    args.hidden_size, args.num_layers, args.dropout),
        inputs=inputs,
        labels=labels)
    grad_clip = fluid.clip.GradientClipByGlobalNorm(
        clip_norm=args.max_grad_norm)
    optimizer = fluid.optimizer.Adam(
        learning_rate=args.learning_rate,
        parameter_list=model.parameters(),
        grad_clip=grad_clip)

    ppl_metric = PPL(reset_freq=100)  # ppl for every 100 batches
    model.prepare(optimizer, CrossEntropyCriterion(), ppl_metric)
    model.fit(train_data=train_loader,
              eval_data=eval_loader,
              epochs=args.max_epoch,
              eval_freq=1,
              save_freq=1,
              save_dir=args.model_path,
              callbacks=[TrainCallback(ppl_metric, args.log_freq)])
Exemple #11
0
def eval(args):

    paddle.set_device('gpu' if args.use_gpu else 'cpu')
    test_reader = None
    if args.data == "cifar10":
        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
        val_dataset = paddle.vision.datasets.Cifar10(mode="test",
                                                     backend="cv2",
                                                     transform=transform)
        class_dim = 10
        image_shape = [3, 224, 224]
        pretrain = False
    elif args.data == "imagenet":
        val_dataset = ImageNetDataset("data/ILSVRC2012",
                                      mode='val',
                                      image_size=224,
                                      resize_short_size=256)
        class_dim = 1000
        image_shape = [3, 224, 224]
        pretrain = True
    else:
        raise ValueError("{} is not supported.".format(args.data))
    assert args.model in model_list, "{} is not in lists: {}".format(
        args.model, model_list)
    inputs = [Input([None] + image_shape, 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    # model definition
    net = models.__dict__[args.model](pretrained=pretrain,
                                      num_classes=class_dim)

    pruner = paddleslim.dygraph.L1NormFilterPruner(net, [1] + image_shape)
    params = get_pruned_params(args, net)
    ratios = {}
    for param in params:
        ratios[param] = args.pruned_ratio
    print("ratios: {}".format(ratios))
    pruner.prune_vars(ratios, [0])

    model = paddle.Model(net, inputs, labels)
    model.prepare(None, paddle.nn.CrossEntropyLoss(),
                  paddle.metric.Accuracy(topk=(1, 5)))
    model.load(args.checkpoint)
    model.evaluate(eval_data=val_dataset,
                   batch_size=args.batch_size,
                   verbose=1,
                   num_workers=8)
Exemple #12
0
    def runTest(self):
        with fluid.unique_name.guard():
            net = paddle.vision.models.LeNet()
            optimizer = paddle.optimizer.Adam(learning_rate=0.001,
                                              parameters=net.parameters())
            inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
            labels = [Input([None, 1], 'int64', name='label')]
            model = paddle.Model(net, inputs, labels)
            model.prepare(optimizer, paddle.nn.CrossEntropyLoss(),
                          paddle.metric.Accuracy(topk=(1, 5)))
            model.fit(self.train_dataset, epochs=1, batch_size=128, verbose=1)
            pruners = []
            pruner = L1NormFilterPruner(net, [1, 1, 28, 28], opt=optimizer)
            pruners.append(pruner)
            pruner = FPGMFilterPruner(net, [1, 1, 28, 28], opt=optimizer)
            pruners.append(pruner)
            pruner = L2NormFilterPruner(net, [1, 1, 28, 28], opt=optimizer)
            pruners.append(pruner)

            def eval_fn():
                result = model.evaluate(self.val_dataset,
                                        batch_size=128,
                                        verbose=1)
                return result['acc_top1']

            sen_file = "_".join(["./dygraph_sen_", str(time.time())])
            for pruner in pruners:
                sen = pruner.sensitive(eval_func=eval_fn,
                                       sen_file=sen_file,
                                       target_vars=self._param_names)
                model.fit(self.train_dataset,
                          epochs=1,
                          batch_size=128,
                          verbose=1)
                base_acc = eval_fn()
                plan = pruner.sensitive_prune(0.01)
                pruner.restore()
                restore_acc = eval_fn()
                self.assertTrue(restore_acc == base_acc)

                plan = pruner.sensitive_prune(0.01, align=4)
                for param in net.parameters():
                    if param.name in self._param_names:
                        print(f"name: {param.name}; shape: {param.shape}")
                        self.assertTrue(param.shape[0] % 4 == 0)
                pruner.restore()
Exemple #13
0
def main():
    place = paddle.set_device(FLAGS.device)
    paddle.disable_static(place) if FLAGS.dynamic else None

    im_shape = [-1, 3, 256, 256]
    input_A = Input(im_shape, 'float32', 'input_A')
    input_B = Input(im_shape, 'float32', 'input_B')

    # Generators
    g_AB = Generator()
    g_BA = Generator()
    g = paddle.Model(
        GeneratorCombine(
            g_AB, g_BA, is_train=False),
        inputs=[input_A, input_B])

    g.prepare()
    g.load(FLAGS.init_model, skip_mismatch=True, reset_optimizer=True)

    if not os.path.exists(FLAGS.output):
        os.makedirs(FLAGS.output)

    test_data_A = data.TestDataA()
    test_data_B = data.TestDataB()

    for i in range(len(test_data_A)):
        data_A, A_name = test_data_A[i]
        data_B, B_name = test_data_B[i]
        data_A = np.array(data_A).astype("float32")
        data_B = np.array(data_B).astype("float32")

        fake_A, fake_B, cyc_A, cyc_B = g.test_batch([data_A, data_B])

        datas = [fake_A, fake_B, cyc_A, cyc_B, data_A, data_B]
        odatas = []
        for o in datas:
            d = np.squeeze(o[0]).transpose([1, 2, 0])
            im = ((d + 1) * 127.5).astype(np.uint8)
            odatas.append(im)
        imsave(FLAGS.output + "/fakeA_" + B_name, odatas[0])
        imsave(FLAGS.output + "/fakeB_" + A_name, odatas[1])
        imsave(FLAGS.output + "/cycA_" + A_name, odatas[2])
        imsave(FLAGS.output + "/cycB_" + B_name, odatas[3])
        imsave(FLAGS.output + "/inputA_" + A_name, odatas[4])
        imsave(FLAGS.output + "/inputB_" + B_name, odatas[5])
def main(args):
    print('download training data and load training data')
    train_dataset = MnistDataset(mode='train', )
    val_dataset = MnistDataset(mode='test', )
    test_dataset = MnistDataset(mode='test', return_label=False)

    im_shape = (-1, 1, 28, 28)
    batch_size = 64

    inputs = [Input(im_shape, 'float32', 'image')]
    labels = [Input([None, 1], 'int64', 'label')]

    model = Model(LeNet(), inputs, labels)
    optim = paddle.optimizer.SGD(learning_rate=0.001)
    if args.bf16:
        optim = amp.bf16.decorate_bf16(
            optim,
            amp_lists=amp.bf16.AutoMixedPrecisionListsBF16(
                custom_bf16_list={
                    'matmul_v2', 'pool2d', 'relu', 'scale', 'elementwise_add',
                    'reshape2', 'slice', 'reduce_mean', 'conv2d'
                }, ))

    # Configuration model
    model.prepare(optim, paddle.nn.CrossEntropyLoss(), Accuracy())
    # Training model #
    if args.bf16:
        print('Training BF16')
    else:
        print('Training FP32')
    model.fit(train_dataset, epochs=2, batch_size=batch_size, verbose=1)
    eval_result = model.evaluate(val_dataset, batch_size=batch_size, verbose=1)

    output = model.predict(
        test_dataset, batch_size=batch_size, stack_outputs=True)

    np.testing.assert_equal(output[0].shape[0], len(test_dataset))

    acc = compute_accuracy(output[0], val_dataset.labels)

    print("acc", acc)
    print("eval_result['acc']", eval_result['acc'])

    np.testing.assert_allclose(acc, eval_result['acc'])
Exemple #15
0
def beam_search(FLAGS):
    paddle.enable_static() if FLAGS.static else None
    device = paddle.set_device("gpu" if FLAGS.use_gpu else "cpu")

    # yapf: disable
    inputs = [
        Input([None, 1, 48, 384], "float32", name="pixel"),
        Input([None, None], "int64", name="label_in")
    ]
    labels = [
        Input([None, None], "int64", name="label_out"),
        Input([None, None], "float32", name="mask")
    ]
    # yapf: enable

    model = paddle.Model(Seq2SeqAttInferModel(encoder_size=FLAGS.encoder_size,
                                              decoder_size=FLAGS.decoder_size,
                                              emb_dim=FLAGS.embedding_dim,
                                              num_classes=FLAGS.num_classes,
                                              beam_size=FLAGS.beam_size),
                         inputs=inputs,
                         labels=labels)

    model.prepare(metrics=SeqBeamAccuracy())
    model.load(FLAGS.init_model)

    test_dataset = data.test()
    test_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(),
         data.PadTarget()])
    test_sampler = data.BatchSampler(test_dataset,
                                     batch_size=FLAGS.batch_size,
                                     drop_last=False,
                                     shuffle=False)
    test_loader = paddle.io.DataLoader(test_dataset,
                                       batch_sampler=test_sampler,
                                       places=device,
                                       num_workers=0,
                                       return_list=True,
                                       collate_fn=test_collate_fn)

    model.evaluate(eval_data=test_loader,
                   callbacks=[LoggerCallBack(10, 2, FLAGS.batch_size)])
Exemple #16
0
def main(args):
    place = paddle.set_device(args.device)
    fluid.enable_dygraph(place) if args.dynamic else None

    inputs = [
        Input([None, None], 'int64', name='words'),
        Input([None], 'int64', name='length'),
    ]

    dataset = LacDataset(args)
    predict_dataset = LacDataLoader(args, place, phase="predict")

    vocab_size = dataset.vocab_size
    num_labels = dataset.num_labels
    model = paddle.Model(SeqTagging(args,
                                    vocab_size,
                                    num_labels,
                                    mode="predict"),
                         inputs=inputs)

    model.mode = "test"
    model.prepare()

    model.load(args.init_from_checkpoint, skip_mismatch=True)

    f = open(args.output_file, "wb")
    for data in predict_dataset.dataloader:
        if len(data) == 1:
            input_data = data[0]
        else:
            input_data = data
        results, length = model.test_batch(inputs=flatten(input_data))
        for i in range(len(results)):
            word_len = length[i]
            word_ids = results[i][:word_len]
            tags = [dataset.id2label_dict[str(id)] for id in word_ids]
            if six.PY3:
                tags = [bytes(tag, encoding="utf8") for tag in tags]
                out = b"\002".join(tags) + b"\n"
                f.write(out)
            else:
                f.write("\002".join(tags) + "\n")
Exemple #17
0
def main():
    place = paddle.set_device(FLAGS.device)
    paddle.disable_static(place) if FLAGS.dynamic else None

    im_shape = [-1, 3, 256, 256]
    input_A = Input(im_shape, 'float32', 'input_A')
    input_B = Input(im_shape, 'float32', 'input_B')

    # Generators
    g_AB = Generator()
    g_BA = Generator()

    g = paddle.Model(GeneratorCombine(g_AB, g_BA, is_train=False),
                     inputs=[input_A, input_B])
    g.prepare()

    g.load(FLAGS.init_model, skip_mismatch=True, reset_optimizer=True)

    out_path = FLAGS.output + "/single"
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    for f in glob.glob(FLAGS.input):
        image_name = os.path.basename(f)
        image = Image.open(f).convert('RGB')
        image = image.resize((256, 256), Image.BICUBIC)
        image = np.array(image) / 127.5 - 1

        image = image[:, :, 0:3].astype("float32")
        data = image.transpose([2, 0, 1])[np.newaxis, :]

        if FLAGS.input_style == "A":
            _, fake, _, _ = g.test_batch([data, data])

        if FLAGS.input_style == "B":
            fake, _, _, _ = g.test_batch([data, data])

        fake = np.squeeze(fake[0]).transpose([1, 2, 0])

        opath = "{}/fake{}{}".format(out_path, FLAGS.input_style, image_name)
        imsave(opath, ((fake + 1) * 127.5).astype(np.uint8))
        print("transfer {} to {}".format(f, opath))
Exemple #18
0
def main(args):
    place = paddle.set_device(args.device)
    fluid.enable_dygraph(place) if args.dynamic else None

    inputs = [
        Input([None, None], 'int64', name='words'),
        Input([None], 'int64', name='length'),
        Input([None, None], 'int64', name='target'),
    ]

    labels = [Input([None, None], 'int64', name='labels')]

    dataset = LacDataset(args)
    train_dataset = LacDataLoader(args, place, phase="train")

    vocab_size = dataset.vocab_size
    num_labels = dataset.num_labels
    model = paddle.Model(SeqTagging(args, vocab_size, num_labels,
                                    mode="train"),
                         inputs=inputs,
                         labels=labels)

    optim = AdamOptimizer(learning_rate=args.base_learning_rate,
                          parameter_list=model.parameters())

    model.prepare(optim, LacLoss(), ChunkEval(num_labels))

    if args.init_from_checkpoint:
        model.load(args.init_from_checkpoint)

    if args.init_from_pretrain_model:
        model.load(args.init_from_pretrain_model, reset_optimizer=True)

    model.fit(train_dataset.dataloader,
              epochs=args.epoch,
              batch_size=args.batch_size,
              eval_freq=args.eval_freq,
              save_freq=args.save_freq,
              save_dir=args.save_dir)
Exemple #19
0
def main(FLAGS):
    device = paddle.set_device("gpu" if FLAGS.use_gpu else "cpu")
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    inputs = [
        Input([None, 1, 48, 384], "float32", name="pixel"),
    ]
    model = paddle.Model(
        Seq2SeqAttInferModel(encoder_size=FLAGS.encoder_size,
                             decoder_size=FLAGS.decoder_size,
                             emb_dim=FLAGS.embedding_dim,
                             num_classes=FLAGS.num_classes,
                             beam_size=FLAGS.beam_size), inputs)

    model.prepare()
    model.load(FLAGS.init_model)

    fn = lambda p: Image.open(p).convert('L')
    test_dataset = ImageFolder(FLAGS.image_path, loader=fn)
    test_collate_fn = BatchCompose([data.Resize(), data.Normalize()])
    test_loader = fluid.io.DataLoader(test_dataset,
                                      places=device,
                                      num_workers=0,
                                      return_list=True,
                                      collate_fn=test_collate_fn)

    samples = test_dataset.samples
    #outputs = model.predict(test_loader)
    ins_id = 0
    for image, in test_loader:
        image = image if FLAGS.dynamic else image[0]
        pred = model.test_batch([image])[0]
        pred = pred[:, :, np.newaxis] if len(pred.shape) == 2 else pred
        pred = np.transpose(pred, [0, 2, 1])
        for ins in pred:
            impath = samples[ins_id]
            ins_id += 1
            print('Image {}: {}'.format(ins_id, impath))
            for beam_idx, beam in enumerate(ins):
                id_list = postprocess(beam)
                word_list = index2word(id_list)
                sequence = "".join(word_list)
                print('{}: {}'.format(beam_idx, sequence))
Exemple #20
0
def compress(args):

    paddle.set_device('gpu' if args.use_gpu else 'cpu')
    train_reader = None
    test_reader = None
    if args.data == "cifar10":

        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])

        train_dataset = paddle.vision.datasets.Cifar10(mode="train",
                                                       backend="cv2",
                                                       transform=transform)
        val_dataset = paddle.vision.datasets.Cifar10(mode="test",
                                                     backend="cv2",
                                                     transform=transform)
        class_dim = 10
        image_shape = [3, 32, 32]
        pretrain = False
    elif args.data == "imagenet":

        train_dataset = ImageNetDataset("data/ILSVRC2012",
                                        mode='train',
                                        image_size=224,
                                        resize_short_size=256)

        val_dataset = ImageNetDataset("data/ILSVRC2012",
                                      mode='val',
                                      image_size=224,
                                      resize_short_size=256)

        class_dim = 1000
        image_shape = [3, 224, 224]
        pretrain = True
    else:
        raise ValueError("{} is not supported.".format(args.data))
    assert args.model in model_list, "{} is not in lists: {}".format(
        args.model, model_list)
    inputs = [Input([None] + image_shape, 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    # model definition
    net = models.__dict__[args.model](pretrained=pretrain,
                                      num_classes=class_dim)

    _logger.info("FLOPs before pruning: {}GFLOPs".format(
        flops(net, [1] + image_shape) / 1000))
    net.eval()
    if args.criterion == 'fpgm':
        pruner = paddleslim.dygraph.FPGMFilterPruner(net, [1] + image_shape)
    elif args.criterion == 'l1_norm':
        pruner = paddleslim.dygraph.L1NormFilterPruner(net, [1] + image_shape)

    params = get_pruned_params(args, net)
    ratios = {}
    for param in params:
        ratios[param] = args.pruned_ratio
    plan = pruner.prune_vars(ratios, [0])

    _logger.info("FLOPs after pruning: {}GFLOPs; pruned ratio: {}".format(
        flops(net, [1] + image_shape) / 1000, plan.pruned_flops))

    for param in net.parameters():
        if "conv2d" in param.name:
            print("{}\t{}".format(param.name, param.shape))

    net.train()
    model = paddle.Model(net, inputs, labels)
    steps_per_epoch = int(np.ceil(len(train_dataset) * 1. / args.batch_size))
    opt = create_optimizer(args, net.parameters(), steps_per_epoch)
    model.prepare(opt, paddle.nn.CrossEntropyLoss(),
                  paddle.metric.Accuracy(topk=(1, 5)))
    if args.checkpoint is not None:
        model.load(args.checkpoint)
    model.fit(train_data=train_dataset,
              eval_data=val_dataset,
              epochs=args.num_epochs,
              batch_size=args.batch_size // ParallelEnv().nranks,
              verbose=1,
              save_dir=args.model_path,
              num_workers=8)
Exemple #21
0
def do_predict(args):
    device = paddle.set_device("gpu" if args.use_cuda else "cpu")
    fluid.enable_dygraph(device) if args.eager_run else None

    inputs = [
        Input([None, None], "int64", name="src_word"),
        Input([None, None], "int64", name="src_pos"),
        Input([None, args.n_head, None, None],
              "float32",
              name="src_slf_attn_bias"),
        Input([None, args.n_head, None, None],
              "float32",
              name="trg_src_attn_bias"),
    ]

    # define data
    dataset = Seq2SeqDataset(fpattern=args.predict_file,
                             src_vocab_fpath=args.src_vocab_fpath,
                             trg_vocab_fpath=args.trg_vocab_fpath,
                             token_delimiter=args.token_delimiter,
                             start_mark=args.special_token[0],
                             end_mark=args.special_token[1],
                             unk_mark=args.special_token[2],
                             byte_data=True)
    args.src_vocab_size, args.trg_vocab_size, args.bos_idx, args.eos_idx, \
        args.unk_idx = dataset.get_vocab_summary()
    trg_idx2word = Seq2SeqDataset.load_dict(dict_path=args.trg_vocab_fpath,
                                            reverse=True,
                                            byte_data=True)
    batch_sampler = Seq2SeqBatchSampler(dataset=dataset,
                                        use_token_batch=False,
                                        batch_size=args.batch_size,
                                        max_length=args.max_length)
    data_loader = DataLoader(dataset=dataset,
                             batch_sampler=batch_sampler,
                             places=device,
                             collate_fn=partial(prepare_infer_input,
                                                bos_idx=args.bos_idx,
                                                eos_idx=args.eos_idx,
                                                src_pad_idx=args.eos_idx,
                                                n_head=args.n_head),
                             num_workers=0,
                             return_list=True)

    # define model
    model = paddle.Model(
        InferTransformer(args.src_vocab_size,
                         args.trg_vocab_size,
                         args.max_length + 1,
                         args.n_layer,
                         args.n_head,
                         args.d_key,
                         args.d_value,
                         args.d_model,
                         args.d_inner_hid,
                         args.prepostprocess_dropout,
                         args.attention_dropout,
                         args.relu_dropout,
                         args.preprocess_cmd,
                         args.postprocess_cmd,
                         args.weight_sharing,
                         args.bos_idx,
                         args.eos_idx,
                         beam_size=args.beam_size,
                         max_out_len=args.max_out_len), inputs)
    model.prepare()

    # load the trained model
    assert args.init_from_params, (
        "Please set init_from_params to load the infer model.")
    model.load(args.init_from_params)

    # TODO: use model.predict when support variant length
    f = open(args.output_file, "wb")
    for data in data_loader():
        finished_seq = model.test_batch(inputs=flatten(data))[0]
        finished_seq = np.transpose(finished_seq, [0, 2, 1])
        for ins in finished_seq:
            for beam_idx, beam in enumerate(ins):
                if beam_idx >= args.n_best: break
                id_list = post_process_seq(beam, args.bos_idx, args.eos_idx)
                word_list = [trg_idx2word[id] for id in id_list]
                sequence = b" ".join(word_list) + b"\n"
                f.write(sequence)
Exemple #22
0
def main(FLAGS):
    paddle.enable_static() if FLAGS.static else None
    device = paddle.set_device("gpu" if FLAGS.use_gpu else "cpu")

    # yapf: disable
    inputs = [
        Input([None,1,48,384], "float32", name="pixel"),
        Input([None, None], "int64", name="label_in"),
    ]
    labels = [
        Input([None, None], "int64", name="label_out"),
        Input([None, None], "float32", name="mask"),
    ]
    # yapf: enable

    model = paddle.Model(
        Seq2SeqAttModel(
            encoder_size=FLAGS.encoder_size,
            decoder_size=FLAGS.decoder_size,
            emb_dim=FLAGS.embedding_dim,
            num_classes=FLAGS.num_classes),
        inputs,
        labels)

    lr = FLAGS.lr
    if FLAGS.lr_decay_strategy == "piecewise_decay":
        learning_rate = fluid.layers.piecewise_decay(
            [200000, 250000], [lr, lr * 0.1, lr * 0.01])
    else:
        learning_rate = lr
    grad_clip = fluid.clip.GradientClipByGlobalNorm(FLAGS.gradient_clip)
    optimizer = fluid.optimizer.Adam(
        learning_rate=learning_rate,
        parameter_list=model.parameters(),
        grad_clip=grad_clip)

    model.prepare(optimizer, WeightCrossEntropy(), SeqAccuracy())

    train_dataset = data.train()
    train_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(), data.PadTarget()])
    train_sampler = data.BatchSampler(
        train_dataset, batch_size=FLAGS.batch_size, shuffle=True)
    train_loader = paddle.io.DataLoader(
        train_dataset,
        batch_sampler=train_sampler,
        places=device,
        num_workers=FLAGS.num_workers,
        return_list=True,
        collate_fn=train_collate_fn)
    test_dataset = data.test()
    test_collate_fn = BatchCompose(
        [data.Resize(), data.Normalize(), data.PadTarget()])
    test_sampler = data.BatchSampler(
        test_dataset,
        batch_size=FLAGS.batch_size,
        drop_last=False,
        shuffle=False)
    test_loader = paddle.io.DataLoader(
        test_dataset,
        batch_sampler=test_sampler,
        places=device,
        num_workers=0,
        return_list=True,
        collate_fn=test_collate_fn)

    model.fit(train_data=train_loader,
              eval_data=test_loader,
              epochs=FLAGS.epoch,
              save_dir=FLAGS.checkpoint_path,
              callbacks=[LoggerCallBack(10, 2, FLAGS.batch_size)])
Exemple #23
0
def do_predict(args):
    device = paddle.set_device("gpu" if args.use_gpu else "cpu")
    fluid.enable_dygraph(device) if args.eager_run else None

    # define model
    inputs = [
        Input(
            [None, None], "int64", name="src_word"),
        Input(
            [None], "int64", name="src_length"),
    ]

    # def dataloader
    dataset = Seq2SeqDataset(
        fpattern=args.infer_file,
        src_vocab_fpath=args.vocab_prefix + "." + args.src_lang,
        trg_vocab_fpath=args.vocab_prefix + "." + args.tar_lang,
        token_delimiter=None,
        start_mark="<s>",
        end_mark="</s>",
        unk_mark="<unk>")
    trg_idx2word = Seq2SeqDataset.load_dict(
        dict_path=args.vocab_prefix + "." + args.tar_lang, reverse=True)
    (args.src_vocab_size, args.trg_vocab_size, bos_id, eos_id,
     unk_id) = dataset.get_vocab_summary()
    batch_sampler = Seq2SeqBatchSampler(
        dataset=dataset, use_token_batch=False, batch_size=args.batch_size)
    data_loader = DataLoader(
        dataset=dataset,
        batch_sampler=batch_sampler,
        places=device,
        collate_fn=partial(
            prepare_infer_input, bos_id=bos_id, eos_id=eos_id, pad_id=eos_id),
        num_workers=0,
        return_list=True)

    model_maker = AttentionInferModel if args.attention else BaseInferModel
    model = paddle.Model(
        model_maker(
            args.src_vocab_size,
            args.tar_vocab_size,
            args.hidden_size,
            args.hidden_size,
            args.num_layers,
            args.dropout,
            bos_id=bos_id,
            eos_id=eos_id,
            beam_size=args.beam_size,
            max_out_len=256),
        inputs=inputs)

    model.prepare()

    # load the trained model
    assert args.reload_model, (
        "Please set reload_model to load the infer model.")
    model.load(args.reload_model)

    # TODO(guosheng): use model.predict when support variant length
    with io.open(args.infer_output_file, 'w', encoding='utf-8') as f:
        for data in data_loader():
            finished_seq = model.test_batch(inputs=flatten(data))[0]
            finished_seq = finished_seq[:, :, np.newaxis] if len(
                finished_seq.shape) == 2 else finished_seq
            finished_seq = np.transpose(finished_seq, [0, 2, 1])
            for ins in finished_seq:
                for beam_idx, beam in enumerate(ins):
                    id_list = post_process_seq(beam, bos_id, eos_id)
                    word_list = [trg_idx2word[id] for id in id_list]
                    sequence = " ".join(word_list) + "\n"
                    f.write(sequence)
                    break
 def make_inputs(self):
     inputs = [
         Input([None, self.inputs[0].shape[-1]], "float64", "init_hidden"),
         Input([None, self.inputs[1].shape[-1]], "float64", "init_cell"),
     ]
     return inputs
Exemple #25
0
 def make_inputs(self):
     inputs = [
         Input([None, self.inputs[-1].shape[1], None], "float32", "input"),
     ]
     return inputs
Exemple #26
0
def do_train(args):
    device = paddle.set_device("gpu" if args.use_cuda else "cpu")
    fluid.enable_dygraph(device) if args.eager_run else None

    # set seed for CE
    random_seed = eval(str(args.random_seed))
    if random_seed is not None:
        fluid.default_main_program().random_seed = random_seed
        fluid.default_startup_program().random_seed = random_seed

    # define inputs
    inputs = [
        Input(
            [None, None], "int64", name="src_word"),
        Input(
            [None, None], "int64", name="src_pos"),
        Input(
            [None, args.n_head, None, None],
            "float32",
            name="src_slf_attn_bias"),
        Input(
            [None, None], "int64", name="trg_word"),
        Input(
            [None, None], "int64", name="trg_pos"),
        Input(
            [None, args.n_head, None, None],
            "float32",
            name="trg_slf_attn_bias"),
        Input(
            [None, args.n_head, None, None],
            "float32",
            name="trg_src_attn_bias"),
    ]
    labels = [
        Input(
            [None, 1], "int64", name="label"),
        Input(
            [None, 1], "float32", name="weight"),
    ]

    # def dataloader
    (train_loader, train_steps_fn), (
        eval_loader, eval_steps_fn) = create_data_loader(args, device)

    # define model
    model = paddle.Model(
        Transformer(args.src_vocab_size, args.trg_vocab_size,
                    args.max_length + 1, args.n_layer, args.n_head, args.d_key,
                    args.d_value, args.d_model, args.d_inner_hid,
                    args.prepostprocess_dropout, args.attention_dropout,
                    args.relu_dropout, args.preprocess_cmd,
                    args.postprocess_cmd, args.weight_sharing, args.bos_idx,
                    args.eos_idx), inputs, labels)

    model.prepare(
        fluid.optimizer.Adam(
            learning_rate=fluid.layers.noam_decay(
                args.d_model,
                args.warmup_steps,
                learning_rate=args.learning_rate),
            beta1=args.beta1,
            beta2=args.beta2,
            epsilon=float(args.eps),
            parameter_list=model.parameters()),
        CrossEntropyCriterion(args.label_smooth_eps))

    ## init from some checkpoint, to resume the previous training
    if args.init_from_checkpoint:
        model.load(args.init_from_checkpoint)
    ## init from some pretrain models, to better solve the current task
    if args.init_from_pretrain_model:
        model.load(args.init_from_pretrain_model, reset_optimizer=True)

    # model train
    model.fit(train_data=train_loader,
              eval_data=eval_loader,
              epochs=args.epoch,
              eval_freq=1,
              save_freq=1,
              save_dir=args.save_model,
              callbacks=[
                  TrainCallback(
                      args,
                      train_steps_fn=train_steps_fn,
                      eval_steps_fn=eval_steps_fn)
              ])
Exemple #27
0
def main():
    paddle.enable_static(place) if FLAGS.static else None
    place = paddle.set_device(FLAGS.device)

    im_shape = [None, 3, 256, 256]
    input_A = Input(im_shape, 'float32', 'input_A')
    input_B = Input(im_shape, 'float32', 'input_B')
    fake_A = Input(im_shape, 'float32', 'fake_A')
    fake_B = Input(im_shape, 'float32', 'fake_B')

    # Generators
    g_AB = Generator()
    g_BA = Generator()
    d_A = Discriminator()
    d_B = Discriminator()

    g = paddle.Model(GeneratorCombine(g_AB, g_BA, d_A, d_B),
                     inputs=[input_A, input_B])
    g_AB = paddle.Model(g_AB, [input_A])
    g_BA = paddle.Model(g_BA, [input_B])

    # Discriminators
    d_A = paddle.Model(d_A, [input_B, fake_B])
    d_B = paddle.Model(d_B, [input_A, fake_A])

    da_params = d_A.parameters()
    db_params = d_B.parameters()
    g_params = g_AB.parameters() + g_BA.parameters()

    da_optimizer = opt(da_params)
    db_optimizer = opt(db_params)
    g_optimizer = opt(g_params)

    g_AB.prepare()
    g_BA.prepare()

    g.prepare(g_optimizer, GLoss())
    d_A.prepare(da_optimizer, DLoss())
    d_B.prepare(db_optimizer, DLoss())

    if FLAGS.resume:
        g.load(FLAGS.resume)

    loader_A = paddle.io.DataLoader(data.DataA(),
                                    places=place,
                                    shuffle=True,
                                    return_list=True,
                                    batch_size=FLAGS.batch_size)
    loader_B = paddle.io.DataLoader(data.DataB(),
                                    places=place,
                                    shuffle=True,
                                    return_list=True,
                                    batch_size=FLAGS.batch_size)

    A_pool = data.ImagePool()
    B_pool = data.ImagePool()

    for epoch in range(FLAGS.epoch):
        for i, (data_A, data_B) in enumerate(zip(loader_A, loader_B)):
            data_A = data_A[0][0] if not FLAGS.static else data_A[0]
            data_B = data_B[0][0] if not FLAGS.static else data_B[0]
            start = time.time()

            fake_B = g_AB.test_batch(data_A)[0]
            fake_A = g_BA.test_batch(data_B)[0]
            g_loss = g.train_batch([data_A, data_B])[0]
            fake_pb = B_pool.get(fake_B)
            da_loss = d_A.train_batch([data_B, fake_pb])[0]

            fake_pa = A_pool.get(fake_A)
            db_loss = d_B.train_batch([data_A, fake_pa])[0]

            t = time.time() - start
            if i % 20 == 0:
                print("epoch: {} | step: {:3d} | g_loss: {:.4f} | " \
                      "da_loss: {:.4f} | db_loss: {:.4f} | s/step {:.4f}".
                      format(epoch, i, g_loss[0], da_loss[0], db_loss[0], t))
        g.save('{}/{}'.format(FLAGS.checkpoint_path, epoch))