예제 #1
0
def main(args):
    place = set_device(args.device)
    fluid.enable_dygraph(place) if args.dynamic else None

    inputs = [
        Input(
            [None, None], 'int64', name='words'), Input(
                [None], 'int64', name='length')
    ]

    dataset = LacDataset(args)
    predict_dataset = LacDataLoader(args, place, phase="predict")

    vocab_size = dataset.vocab_size
    num_labels = dataset.num_labels
    model = SeqTagging(args, vocab_size, num_labels, mode="predict")

    model.mode = "test"
    model.prepare(inputs=inputs)

    model.load(args.init_from_checkpoint, skip_mismatch=True)

    f = open(args.output_file, "wb")
    for data in predict_dataset.dataloader:
        if len(data) == 1:
            input_data = data[0]
        else:
            input_data = data
        results, length = model.test_batch(inputs=flatten(input_data))
        for i in range(len(results)):
            word_len = length[i]
            word_ids = results[i][:word_len]
            tags = [dataset.id2label_dict[str(id)] for id in word_ids]
            f.write("\002".join(tags) + "\n")
예제 #2
0
    def test_static_save_dynamic_load(self):
        path = os.path.join(tempfile.mkdtemp(),
                            '.cache_test_static_save_dynamic_load')
        if not os.path.exists(path):
            os.makedirs(path)
        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(path)

        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)  #if dynamic else None

        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.load(path)
        shutil.rmtree(path)
        fluid.disable_dygraph()
예제 #3
0
    def test_static_save_dynamic_load(self):
        path = tempfile.mkdtemp()

        net = MyModel(classifier_activation=None)
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(path + '/test')

        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)  #if dynamic else None

        net = MyModel(classifier_activation=None)
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.load(path + '/test')
        shutil.rmtree(path)
        fluid.disable_dygraph()
예제 #4
0
    def test_test_batch(self, dynamic=True):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel()
            m.eval()
            output = m(to_variable(data))
            fluid.disable_dygraph()
            return output.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
            device = set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()
            model = MyModel()
            inputs = [Input([None, dim], 'float32', name='x')]
            model.prepare(inputs=inputs, device=device)
            out, = model.test_batch([data])

            np.testing.assert_allclose(out, ref)
            fluid.disable_dygraph() if dynamic else None
예제 #5
0
    def test_static_save_dynamic_load(self):
        path = tempfile.mkdtemp()

        model = MyModel()
        inputs = [Input([None, 20], 'float32', name='x')]
        labels = [Input([None, 1], 'int64', name='label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=model.parameters())
        model.prepare(inputs=inputs,
                      optimizer=optim,
                      loss_function=CrossEntropy(average=False),
                      labels=labels)
        model.save(path + '/test')

        device = set_device('cpu')
        fluid.enable_dygraph(device)  #if dynamic else None

        model = MyModel()
        inputs = [Input([None, 20], 'float32', name='x')]
        labels = [Input([None, 1], 'int64', name='label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=model.parameters())
        model.prepare(inputs=inputs,
                      optimizer=optim,
                      loss_function=CrossEntropy(average=False),
                      labels=labels)
        model.load(path + '/test')
        shutil.rmtree(path)
        fluid.disable_dygraph()
예제 #6
0
    def predict(self, dynamic):
        fluid.enable_dygraph(self.device) if dynamic else None
        model = LeNet()
        model.prepare(inputs=self.inputs)
        model.load(self.weight_path)
        output = model.predict(self.test_dataset,
                               batch_size=64,
                               stack_outputs=True)
        np.testing.assert_equal(output[0].shape[0], len(self.test_dataset))

        acc = compute_acc(output[0], self.val_dataset.labels)
        np.testing.assert_allclose(acc, self.acc1)

        sampler = DistributedBatchSampler(self.test_dataset,
                                          batch_size=64,
                                          shuffle=False)

        test_loader = fluid.io.DataLoader(self.test_dataset,
                                          batch_sampler=sampler,
                                          places=self.device,
                                          return_list=True)

        model.evaluate(test_loader)

        fluid.disable_dygraph() if dynamic else None
예제 #7
0
파일: test_loss.py 프로젝트: zhengya01/hapi
    def test_soft_cross_entronpy(self):
        class_num = 100
        batch_size = 128

        inputs = [randomize_probability(128, class_num) for _ in range(2)]

        labels = [
            np.random.randint(0, class_num, (batch_size, 1), dtype="int64")
            for _ in range(2)
        ]

        fluid.enable_dygraph()
        softmax_cross_entropy = SoftmaxWithCrossEntropy()

        softmax_cross_entropy(
            [fluid.dygraph.to_variable(x) for x in inputs],
            [fluid.dygraph.to_variable(label) for label in labels])

        softmax_cross_entropy = SoftmaxWithCrossEntropy(average=False)

        inputs = [randomize_probability(128, class_num)]

        labels = [
            np.random.randint(0, class_num, (batch_size, 1), dtype="int64")
        ]

        softmax_cross_entropy([fluid.dygraph.to_variable(x) for x in inputs],
                              fluid.dygraph.to_variable(labels[0]))
예제 #8
0
def main(args):
    place = set_device(args.device)
    fluid.enable_dygraph(place) if args.dynamic else None

    inputs = [
        Input([None, None], 'int64', name='words'),
        Input([None], 'int64', name='length'),
        Input([None, None], 'int64', name='target')
    ]
    labels = [Input([None, None], 'int64', name='labels')]

    dataset = LacDataset(args)
    eval_dataset = LacDataLoader(args, place, phase="test")

    vocab_size = dataset.vocab_size
    num_labels = dataset.num_labels
    model = SeqTagging(args, vocab_size, num_labels, mode="test")

    model.mode = "test"
    model.prepare(metrics=ChunkEval(num_labels),
                  inputs=inputs,
                  labels=labels,
                  device=place)
    model.load(args.init_from_checkpoint, skip_mismatch=True)

    eval_result = model.evaluate(eval_dataset.dataloader,
                                 batch_size=args.batch_size)
    print("precison: %.5f" % (eval_result["precision"][0]))
    print("recall: %.5f" % (eval_result["recall"][0]))
    print("F1: %.5f" % (eval_result["F1"][0]))
예제 #9
0
    def test_generator_uniform_random_dygraph(self):
        """Test Generator seed."""

        fluid.enable_dygraph()

        gen = paddle.seed(12312321111)
        x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0)

        st1 = gen.get_state()
        x1 = fluid.layers.uniform_random(
            [10], dtype="float32", min=0.0, max=1.0)

        gen.set_state(st1)
        print(gen.get_state())
        x2 = fluid.layers.uniform_random(
            [10], dtype="float32", min=0.0, max=1.0)

        paddle.seed(12312321111)
        x3 = fluid.layers.uniform_random(
            [10], dtype="float32", min=0.0, max=1.0)

        x_np = x.numpy()
        x1_np = x1.numpy()
        x2_np = x2.numpy()
        x3_np = x3.numpy()

        if not core.is_compiled_with_cuda():
            self.assertTrue(np.allclose(x1_np, x2_np))
            self.assertTrue(np.allclose(x_np, x3_np))
예제 #10
0
    def test_generator_sampling_id_dygraph(self):
        """Test Generator seed."""
        gen = paddle.seed(12312321111)

        fluid.enable_dygraph()

        gen.manual_seed(12312321111)
        x = fluid.layers.uniform_random(
            [10, 10], dtype="float32", min=0.0, max=1.0)
        y = fluid.layers.sampling_id(x)

        st1 = gen.get_state()
        x1 = fluid.layers.uniform_random(
            [10, 10], dtype="float32", min=0.0, max=1.0)
        y1 = fluid.layers.sampling_id(x)

        gen.set_state(st1)
        x2 = fluid.layers.uniform_random(
            [10, 10], dtype="float32", min=0.0, max=1.0)
        y2 = fluid.layers.sampling_id(x)

        gen.manual_seed(12312321111)
        x3 = fluid.layers.uniform_random(
            [10, 10], dtype="float32", min=0.0, max=1.0)
        y3 = fluid.layers.sampling_id(x)

        x_np = y.numpy()
        x1_np = y1.numpy()
        x2_np = y2.numpy()
        x3_np = y3.numpy()

        if not core.is_compiled_with_cuda():
            print(">>>>>>> sampling id dygraph >>>>>>>")
            self.assertTrue(np.allclose(x1_np, x2_np))
            self.assertTrue(np.allclose(x_np, x3_np))
예제 #11
0
    def test_export_deploy_model(self):
        for dynamic in [True, False]:
            fluid.enable_dygraph() if dynamic else None
            # paddle.disable_static() if dynamic else None
            prog_translator = ProgramTranslator()
            prog_translator.enable(False) if not dynamic else None
            net = LeNetDeclarative()
            inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
            model = Model(net, inputs)
            model.prepare()
            save_dir = tempfile.mkdtemp()
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            tensor_img = np.array(np.random.random((1, 1, 28, 28)),
                                  dtype=np.float32)
            ori_results = model.test_batch(tensor_img)
            model.save(save_dir, training=False)
            fluid.disable_dygraph() if dynamic else None

            place = fluid.CPUPlace(
            ) if not fluid.is_compiled_with_cuda() else fluid.CUDAPlace(0)
            new_scope = fluid.Scope()
            with fluid.scope_guard(new_scope):
                exe = fluid.Executor(place)
                [inference_program, feed_target_names, fetch_targets
                 ] = (fluid.io.load_inference_model(dirname=save_dir,
                                                    executor=exe))
                results = exe.run(inference_program,
                                  feed={feed_target_names[0]: tensor_img},
                                  fetch_list=fetch_targets)
                np.testing.assert_allclose(results,
                                           ori_results,
                                           rtol=1e-5,
                                           atol=1e-7)
                shutil.rmtree(save_dir)
예제 #12
0
def main(args):
    place = set_device(args.device)
    fluid.enable_dygraph(place) if args.dynamic else None

    inputs = [
        Input([None, args.max_seq_len], 'int64', name='words'),
        Input([None, args.max_seq_len], 'int64', name='target'),
        Input([None], 'int64', name='length')
    ]
    labels = [Input([None, args.max_seq_len], 'int64', name='labels')]

    feed_list = None if args.dynamic else [
        x.forward() for x in inputs + labels
    ]
    dataset = LacDataset(args)
    train_path = os.path.join(args.data, "train.tsv")
    test_path = os.path.join(args.data, "test.tsv")

    train_generator = create_lexnet_data_generator(args,
                                                   reader=dataset,
                                                   file_name=train_path,
                                                   place=place,
                                                   mode="train")
    test_generator = create_lexnet_data_generator(args,
                                                  reader=dataset,
                                                  file_name=test_path,
                                                  place=place,
                                                  mode="test")

    train_dataset = create_dataloader(train_generator,
                                      place,
                                      feed_list=feed_list)
    test_dataset = create_dataloader(test_generator,
                                     place,
                                     feed_list=feed_list)

    vocab_size = dataset.vocab_size
    num_labels = dataset.num_labels
    model = SeqTagging(args, vocab_size, num_labels)

    optim = AdamOptimizer(learning_rate=args.base_learning_rate,
                          parameter_list=model.parameters())

    model.prepare(optim,
                  LacLoss(),
                  ChunkEval(num_labels),
                  inputs=inputs,
                  labels=labels,
                  device=args.device)

    if args.resume is not None:
        model.load(args.resume)

    model.fit(train_dataset,
              test_dataset,
              epochs=args.epoch,
              batch_size=args.batch_size,
              eval_freq=args.eval_freq,
              save_freq=args.save_freq,
              save_dir=args.save_dir)
예제 #13
0
 def setUp(self):
     self.model_path = "test_jit_save_load/model"
     # enable dygraph mode
     fluid.enable_dygraph()
     # config seed
     paddle.seed(SEED)
     paddle.framework.random._manual_program_seed(SEED)
예제 #14
0
def prepare_distributed_context(place=None):
    if place is None:
        place = fluid.CUDAPlace(ParallelEnv().dev_id) if ParallelEnv().nranks > 1 \
            else fluid.CUDAPlace(0)

    strategy = ParallelStrategy()
    strategy.nranks = ParallelEnv().nranks
    strategy.local_rank = ParallelEnv().local_rank
    strategy.trainer_endpoints = ParallelEnv().trainer_endpoints
    strategy.current_endpoint = ParallelEnv().current_endpoint

    if strategy.nranks < 2:
        return

    global _parallel_context_initialized

    if not _parallel_context_initialized and isinstance(place, fluid.CUDAPlace):

        def _init_context():
            communicator_prog = fluid.Program()
            init_communicator(communicator_prog, strategy.local_rank,
                              strategy.nranks, True, strategy.current_endpoint,
                              strategy.trainer_endpoints)
            exe = fluid.Executor(place)
            exe.run(communicator_prog)

        fluid.disable_dygraph()
        _init_context()
        fluid.enable_dygraph(place)

    else:
        assert ("Only support CUDAPlace for now.")

    _parallel_context_initialized = True
    return strategy
예제 #15
0
def main():
    device = set_device(FLAGS.device)
    fluid.enable_dygraph(device) if FLAGS.dynamic else None

    train_dataset = MnistDataset(mode='train')
    val_dataset = MnistDataset(mode='test')

    inputs = [Input([None, 784], 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    model = MNIST()
    optim = Momentum(learning_rate=FLAGS.lr,
                     momentum=.9,
                     parameter_list=model.parameters())

    model.prepare(optim,
                  CrossEntropy(),
                  Accuracy(topk=(1, 2)),
                  inputs,
                  labels,
                  device=FLAGS.device)
    if FLAGS.resume is not None:
        model.load(FLAGS.resume)

    model.fit(train_dataset,
              val_dataset,
              epochs=FLAGS.epoch,
              batch_size=FLAGS.batch_size,
              save_dir='mnist_checkpoint')
예제 #16
0
    def test_test_batch(self):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel()
            m.eval()
            output = m(to_tensor(data))
            fluid.disable_dygraph()
            return output.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
            device = paddle.set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()
            net = MyModel()
            inputs = [InputSpec([None, dim], 'float32', 'x')]
            model = Model(net, inputs)
            model.prepare()
            out, = model.predict_batch([data])

            np.testing.assert_allclose(out, ref, rtol=1e-6)
            fluid.disable_dygraph() if dynamic else None
예제 #17
0
def synthesis(text_input, args):
    local_rank = dg.parallel.Env().local_rank
    place = (fluid.CUDAPlace(local_rank) if args.use_gpu else fluid.CPUPlace())
    fluid.enable_dygraph(place)

    with open(args.config) as f:
        cfg = yaml.load(f, Loader=yaml.Loader)

    # tensorboard
    if not os.path.exists(args.output):
        os.mkdir(args.output)

    writer = SummaryWriter(os.path.join(args.output, 'log'))

    model = FastSpeech(cfg['network'], num_mels=cfg['audio']['num_mels'])
    # Load parameters.
    global_step = io.load_parameters(model=model,
                                     checkpoint_path=args.checkpoint)
    model.eval()

    text = np.asarray(text_to_sequence(text_input))
    text = np.expand_dims(text, axis=0)
    pos_text = np.arange(1, text.shape[1] + 1)
    pos_text = np.expand_dims(pos_text, axis=0)

    text = dg.to_variable(text)
    pos_text = dg.to_variable(pos_text)

    _, mel_output_postnet = model(text, pos_text, alpha=args.alpha)

    result = np.exp(mel_output_postnet.numpy())
    mel_output_postnet = fluid.layers.transpose(
        fluid.layers.squeeze(mel_output_postnet, [0]), [1, 0])
    mel_output_postnet = np.exp(mel_output_postnet.numpy())
    basis = librosa.filters.mel(cfg['audio']['sr'], cfg['audio']['n_fft'],
                                cfg['audio']['num_mels'])
    inv_basis = np.linalg.pinv(basis)
    spec = np.maximum(1e-10, np.dot(inv_basis, mel_output_postnet))

    # synthesis use clarinet
    wav_clarinet = synthesis_with_clarinet(args.config_clarinet,
                                           args.checkpoint_clarinet, result,
                                           place)
    writer.add_audio(text_input + '(clarinet)', wav_clarinet, 0,
                     cfg['audio']['sr'])
    if not os.path.exists(os.path.join(args.output, 'samples')):
        os.mkdir(os.path.join(args.output, 'samples'))
    write(os.path.join(os.path.join(args.output, 'samples'), 'clarinet.wav'),
          cfg['audio']['sr'], wav_clarinet)

    #synthesis use griffin-lim
    wav = librosa.core.griffinlim(spec**cfg['audio']['power'],
                                  hop_length=cfg['audio']['hop_length'],
                                  win_length=cfg['audio']['win_length'])
    writer.add_audio(text_input + '(griffin-lim)', wav, 0, cfg['audio']['sr'])
    write(
        os.path.join(os.path.join(args.output, 'samples'), 'grinffin-lim.wav'),
        cfg['audio']['sr'], wav)
    print("Synthesis completed !!!")
    writer.close()
예제 #18
0
 def setUp(self):
     self.linear_size = 4
     self.model_path = "jit_prune_model_and_load/model"
     # enable dygraph mode
     fluid.enable_dygraph()
     # config seed
     paddle.seed(SEED)
     paddle.framework.random._manual_program_seed(SEED)
예제 #19
0
def add(x, y=None):
    fluid.enable_dygraph()
    with fluid.dygraph.guard():
        x = x.numpy() if not isinstance(x, np.ndarray) else x
        if y is not None:
            x += y
            return x
        return x
예제 #20
0
 def get_expect():
     fluid.enable_dygraph(fluid.CPUPlace())
     self.set_seed()
     m = MyModel()
     m.eval()
     output = m(to_tensor(data))
     fluid.disable_dygraph()
     return output.numpy()
예제 #21
0
파일: eval.py 프로젝트: wangxiao1021/hapi
def test_bmn(args):
    device = set_device(args.device)
    fluid.enable_dygraph(device) if args.dynamic else None

    #config setting
    config = parse_config(args.config_file)
    eval_cfg = merge_configs(config, 'test', vars(args))

    feat_dim = config.MODEL.feat_dim
    tscale = config.MODEL.tscale
    dscale = config.MODEL.dscale
    prop_boundary_ratio = config.MODEL.prop_boundary_ratio
    num_sample = config.MODEL.num_sample
    num_sample_perbin = config.MODEL.num_sample_perbin

    #input and video index
    inputs = [
        Input([None, config.MODEL.feat_dim, config.MODEL.tscale],
              'float32',
              name='feat_input')
    ]
    gt_iou_map = Input([None, config.MODEL.dscale, config.MODEL.tscale],
                       'float32',
                       name='gt_iou_map')
    gt_start = Input([None, config.MODEL.tscale], 'float32', name='gt_start')
    gt_end = Input([None, config.MODEL.tscale], 'float32', name='gt_end')
    video_idx = Input([None, 1], 'int64', name='video_idx')
    labels = [gt_iou_map, gt_start, gt_end, video_idx]

    #data
    eval_dataset = BmnDataset(eval_cfg, 'test')

    #model
    model = bmn(tscale,
                dscale,
                prop_boundary_ratio,
                num_sample,
                num_sample_perbin,
                pretrained=args.weights is None)
    model.prepare(loss_function=BmnLoss(tscale, dscale),
                  metrics=BmnMetric(config, mode='test'),
                  inputs=inputs,
                  labels=labels,
                  device=device)

    #load checkpoint
    if args.weights is not None:
        assert os.path.exists(args.weights + '.pdparams'), \
            "Given weight dir {} not exist.".format(args.weights)
        logger.info('load test weights from {}'.format(args.weights))
        model.load(args.weights)

    model.evaluate(eval_data=eval_dataset,
                   batch_size=eval_cfg.TEST.batch_size,
                   num_workers=eval_cfg.TEST.num_workers,
                   log_freq=args.log_interval)

    logger.info("[EVAL] eval finished")
예제 #22
0
파일: train.py 프로젝트: wzzju/hapi
def train_bmn(args):
    device = set_device(args.device)
    fluid.enable_dygraph(device) if args.dynamic else None

    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)

    #config setting
    config = parse_config(args.config_file)
    train_cfg = merge_configs(config, 'train', vars(args))
    val_cfg = merge_configs(config, 'valid', vars(args))

    feat_dim = config.MODEL.feat_dim
    tscale = config.MODEL.tscale
    dscale = config.MODEL.dscale
    prop_boundary_ratio = config.MODEL.prop_boundary_ratio
    num_sample = config.MODEL.num_sample
    num_sample_perbin = config.MODEL.num_sample_perbin

    # input and label list
    inputs = [Input([None, feat_dim, tscale], 'float32', name='feat_input')]
    gt_iou_map = Input([None, dscale, tscale], 'float32', name='gt_iou_map')
    gt_start = Input([None, tscale], 'float32', name='gt_start')
    gt_end = Input([None, tscale], 'float32', name='gt_end')
    labels = [gt_iou_map, gt_start, gt_end]

    # data
    train_dataset = BmnDataset(train_cfg, 'train')
    val_dataset = BmnDataset(val_cfg, 'valid')

    # model
    model = bmn(tscale,
                dscale,
                prop_boundary_ratio,
                num_sample,
                num_sample_perbin,
                pretrained=False)
    optim = optimizer(config, parameter_list=model.parameters())
    model.prepare(optimizer=optim,
                  loss_function=BmnLoss(tscale, dscale),
                  inputs=inputs,
                  labels=labels,
                  device=device)

    # if resume weights is given, load resume weights directly
    if args.resume is not None:
        model.load(args.resume)
    model.fit(train_data=train_dataset,
              eval_data=val_dataset,
              batch_size=train_cfg.TRAIN.batch_size,
              epochs=train_cfg.TRAIN.epoch,
              eval_freq=args.valid_interval,
              log_freq=args.log_interval,
              save_dir=args.save_dir,
              shuffle=train_cfg.TRAIN.use_shuffle,
              num_workers=train_cfg.TRAIN.num_workers,
              drop_last=True)
예제 #23
0
 def setUp(self):
     self.linear_size = 4
     self.model_path = "jit_multi_load/model"
     # enable dygraph mode
     fluid.enable_dygraph()
     # config seed
     paddle.seed(SEED)
     paddle.framework.random._manual_program_seed(SEED)
     # train and save base model
     self.train_and_save_orig_model()
예제 #24
0
def load_D(path='data/anime-biggan-256px-run39-607250.discriminator'):
    place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id)
    fluid.enable_dygraph(place)

    discriminator = Discriminator(n_class=1000,
                                  chn=96,
                                  blocks_with_attention="B2",
                                  resolution=256)
    discriminator.set_dict(dg.load_dygraph(path)[0])
    model_cache.D = discriminator
예제 #25
0
 def test_predict_without_inputs(self):
     fluid.enable_dygraph(self.device)
     model = Model(LeNet())
     model.prepare()
     model.load(self.weight_path)
     model._inputs = None
     output = model.predict(
         self.test_dataset, batch_size=64, stack_outputs=True)
     np.testing.assert_equal(output[0].shape[0], len(self.test_dataset))
     fluid.disable_dygraph()
예제 #26
0
 def evaluate(self, dynamic):
     fluid.enable_dygraph(self.device) if dynamic else None
     model = LeNet()
     model.prepare(metrics=Accuracy(),
                   inputs=self.inputs,
                   labels=self.labels)
     model.load(self.weight_path)
     result = model.evaluate(self.val_dataset, batch_size=64)
     np.testing.assert_allclose(result['acc'], self.acc1)
     fluid.disable_dygraph() if dynamic else None
예제 #27
0
def train():
    fluid.enable_dygraph(device)
    processor = SentaProcessor(data_dir=args.data_dir,
                               vocab_path=args.vocab_path,
                               random_seed=args.random_seed)
    num_labels = len(processor.get_labels())

    num_train_examples = processor.get_num_examples(phase="train")

    max_train_steps = args.epoch * num_train_examples // args.batch_size // dev_count

    train_data_generator = processor.data_generator(
        batch_size=args.batch_size,
        padding_size=args.padding_size,
        places=device,
        phase='train',
        epoch=args.epoch,
        shuffle=False)

    eval_data_generator = processor.data_generator(
        batch_size=args.batch_size,
        padding_size=args.padding_size,
        places=device,
        phase='dev',
        epoch=args.epoch,
        shuffle=False)
    if args.model_type == 'cnn_net':
        model = CNN(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'bow_net':
        model = BOW(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'gru_net':
        model = GRU(args.vocab_size, args.batch_size, args.padding_size)
    elif args.model_type == 'bigru_net':
        model = BiGRU(args.vocab_size, args.batch_size, args.padding_size)

    optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr,
                                        parameter_list=model.parameters())

    inputs = [Input([None, None], 'int64', name='doc')]
    labels = [Input([None, 1], 'int64', name='label')]

    model.prepare(optimizer,
                  CrossEntropy(),
                  Accuracy(topk=(1, )),
                  inputs,
                  labels,
                  device=device)

    model.fit(train_data=train_data_generator,
              eval_data=eval_data_generator,
              batch_size=args.batch_size,
              epochs=args.epoch,
              save_dir=args.checkpoints,
              eval_freq=args.eval_freq,
              save_freq=args.save_freq)
예제 #28
0
 def test_parameters(self):
     for dynamic in [True, False]:
         device = set_device('cpu')
         fluid.enable_dygraph(device) if dynamic else None
         model = MyModel()
         inputs = [Input([None, 20], 'float32', name='x')]
         model.prepare(inputs=inputs)
         params = model.parameters()
         self.assertTrue(params[0].shape[0] == 20)
         self.assertTrue(params[0].shape[1] == 10)
         fluid.disable_dygraph() if dynamic else None
예제 #29
0
    def fit(self, dynamic, num_replicas=None, rank=None, num_iters=None):
        fluid.enable_dygraph(self.device) if dynamic else None
        seed = 333
        paddle.seed(seed)
        paddle.framework.random._manual_program_seed(seed)

        net = LeNet()
        optim_new = fluid.optimizer.Adam(
            learning_rate=0.001, parameter_list=net.parameters())
        model = Model(net, inputs=self.inputs, labels=self.labels)
        model.prepare(
            optim_new,
            loss=CrossEntropyLoss(reduction="sum"),
            metrics=Accuracy())
        model.fit(self.train_dataset, batch_size=64, shuffle=False)

        result = model.evaluate(self.val_dataset, batch_size=64)
        np.testing.assert_allclose(result['acc'], self.acc1)

        model.fit(self.train_dataset,
                  batch_size=64,
                  shuffle=False,
                  num_iters=num_iters)

        result = model.evaluate(
            self.val_dataset, batch_size=64, num_iters=num_iters)

        train_sampler = DistributedBatchSampler(
            self.train_dataset,
            batch_size=64,
            shuffle=False,
            num_replicas=num_replicas,
            rank=rank)
        val_sampler = DistributedBatchSampler(
            self.val_dataset,
            batch_size=64,
            shuffle=False,
            num_replicas=num_replicas,
            rank=rank)

        train_loader = fluid.io.DataLoader(
            self.train_dataset,
            batch_sampler=train_sampler,
            places=self.device,
            return_list=True)

        val_loader = fluid.io.DataLoader(
            self.val_dataset,
            batch_sampler=val_sampler,
            places=self.device,
            return_list=True)

        model.fit(train_loader, val_loader)
        fluid.disable_dygraph() if dynamic else None
예제 #30
0
def load_G(path='data/anime-biggan-256px-run39-607250.generator'):
    place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id)
    fluid.enable_dygraph(place)

    generator = Generator(code_dim=140,
                          n_class=1000,
                          chn=96,
                          blocks_with_attention="B5",
                          resolution=256)
    generator.set_dict(dg.load_dygraph(path)[0])
    model_cache.G = generator