コード例 #1
0
    def predict(self, dynamic):
        fluid.enable_dygraph(self.device) if dynamic else None
        model = LeNet()
        model.prepare(inputs=self.inputs)
        model.load(self.weight_path)
        output = model.predict(self.test_dataset,
                               batch_size=64,
                               stack_outputs=True)
        np.testing.assert_equal(output[0].shape[0], len(self.test_dataset))

        acc = compute_acc(output[0], self.val_dataset.labels)
        np.testing.assert_allclose(acc, self.acc1)

        sampler = DistributedBatchSampler(self.test_dataset,
                                          batch_size=64,
                                          shuffle=False)

        test_loader = fluid.io.DataLoader(self.test_dataset,
                                          batch_sampler=sampler,
                                          places=self.device,
                                          return_list=True)

        model.evaluate(test_loader)

        fluid.disable_dygraph() if dynamic else None
コード例 #2
0
    def test_static_save_dynamic_load(self):
        path = tempfile.mkdtemp()

        model = MyModel()
        inputs = [Input([None, 20], 'float32', name='x')]
        labels = [Input([None, 1], 'int64', name='label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=model.parameters())
        model.prepare(inputs=inputs,
                      optimizer=optim,
                      loss_function=CrossEntropy(average=False),
                      labels=labels)
        model.save(path + '/test')

        device = set_device('cpu')
        fluid.enable_dygraph(device)  #if dynamic else None

        model = MyModel()
        inputs = [Input([None, 20], 'float32', name='x')]
        labels = [Input([None, 1], 'int64', name='label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=model.parameters())
        model.prepare(inputs=inputs,
                      optimizer=optim,
                      loss_function=CrossEntropy(average=False),
                      labels=labels)
        model.load(path + '/test')
        shutil.rmtree(path)
        fluid.disable_dygraph()
コード例 #3
0
    def test_test_batch(self):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel()
            m.eval()
            output = m(to_tensor(data))
            fluid.disable_dygraph()
            return output.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
            device = paddle.set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()
            net = MyModel()
            inputs = [InputSpec([None, dim], 'float32', 'x')]
            model = Model(net, inputs)
            model.prepare()
            out, = model.predict_batch([data])

            np.testing.assert_allclose(out, ref, rtol=1e-6)
            fluid.disable_dygraph() if dynamic else None
コード例 #4
0
    def test_static_save_dynamic_load(self):
        path = os.path.join(tempfile.mkdtemp(),
                            '.cache_test_static_save_dynamic_load')
        if not os.path.exists(path):
            os.makedirs(path)
        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(path)

        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)  #if dynamic else None

        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.load(path)
        shutil.rmtree(path)
        fluid.disable_dygraph()
コード例 #5
0
    def test_generator_uniform_random_static(self):
        fluid.disable_dygraph()

        gen = paddle.seed(123123143)

        startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # example 1:
            # attr shape is a list which doesn't contain tensor Variable.
            result_1 = fluid.layers.uniform_random(shape=[3, 4])
            result_2 = fluid.layers.uniform_random(shape=[3, 4])

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(startup_program)
            out1 = exe.run(train_program,
                           feed={},
                           fetch_list=[result_1, result_2])
            #gen.set_state(cur_state)
            gen.manual_seed(123123143)
            out2 = exe.run(train_program,
                           feed={},
                           fetch_list=[result_1, result_2])

            out1_res1 = np.array(out1[0])
            out1_res2 = np.array(out1[1])
            out2_res1 = np.array(out2[0])
            out2_res2 = np.array(out2[1])

            if not core.is_compiled_with_cuda():
                self.assertTrue(np.allclose(out1_res1, out2_res1))
                self.assertTrue(np.allclose(out1_res2, out2_res2))
                self.assertTrue(not np.allclose(out1_res2, out1_res1))
コード例 #6
0
    def test_generator_randperm_static(self):

        fluid.disable_dygraph()

        paddle.seed(123123143)

        startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # example 1:
            # attr shape is a list which doesn't contain tensor Variable.
            result_1 = paddle.randperm(10)
            result_2 = paddle.randperm(10)

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(startup_program)
            out1 = exe.run(train_program,
                           feed={},
                           fetch_list=[result_1, result_2])

            paddle.seed(123123143)
            out2 = exe.run(train_program,
                           feed={},
                           fetch_list=[result_1, result_2])

            out1_res1 = np.array(out1[0])
            out1_res2 = np.array(out1[1])
            out2_res1 = np.array(out2[0])
            out2_res2 = np.array(out2[1])

            if not core.is_compiled_with_cuda():
                print(">>>>>>> randperm static >>>>>>>")
                self.assertTrue(np.allclose(out1_res1, out2_res1))
                self.assertTrue(np.allclose(out1_res2, out2_res2))
                self.assertTrue(not np.allclose(out1_res2, out1_res1))
コード例 #7
0
    def test_export_deploy_model(self):
        for dynamic in [True, False]:
            fluid.enable_dygraph() if dynamic else None
            # paddle.disable_static() if dynamic else None
            prog_translator = ProgramTranslator()
            prog_translator.enable(False) if not dynamic else None
            net = LeNetDeclarative()
            inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
            model = Model(net, inputs)
            model.prepare()
            save_dir = tempfile.mkdtemp()
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            tensor_img = np.array(np.random.random((1, 1, 28, 28)),
                                  dtype=np.float32)
            ori_results = model.test_batch(tensor_img)
            model.save(save_dir, training=False)
            fluid.disable_dygraph() if dynamic else None

            place = fluid.CPUPlace(
            ) if not fluid.is_compiled_with_cuda() else fluid.CUDAPlace(0)
            new_scope = fluid.Scope()
            with fluid.scope_guard(new_scope):
                exe = fluid.Executor(place)
                [inference_program, feed_target_names, fetch_targets
                 ] = (fluid.io.load_inference_model(dirname=save_dir,
                                                    executor=exe))
                results = exe.run(inference_program,
                                  feed={feed_target_names[0]: tensor_img},
                                  fetch_list=fetch_targets)
                np.testing.assert_allclose(results,
                                           ori_results,
                                           rtol=1e-5,
                                           atol=1e-7)
                shutil.rmtree(save_dir)
コード例 #8
0
    def test_static_save_dynamic_load(self):
        path = tempfile.mkdtemp()

        net = MyModel(classifier_activation=None)
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(path + '/test')

        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)  #if dynamic else None

        net = MyModel(classifier_activation=None)
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.load(path + '/test')
        shutil.rmtree(path)
        fluid.disable_dygraph()
コード例 #9
0
def prepare_distributed_context(place=None):
    if place is None:
        place = fluid.CUDAPlace(ParallelEnv().dev_id) if ParallelEnv().nranks > 1 \
            else fluid.CUDAPlace(0)

    strategy = ParallelStrategy()
    strategy.nranks = ParallelEnv().nranks
    strategy.local_rank = ParallelEnv().local_rank
    strategy.trainer_endpoints = ParallelEnv().trainer_endpoints
    strategy.current_endpoint = ParallelEnv().current_endpoint

    if strategy.nranks < 2:
        return

    global _parallel_context_initialized

    if not _parallel_context_initialized and isinstance(place, fluid.CUDAPlace):

        def _init_context():
            communicator_prog = fluid.Program()
            init_communicator(communicator_prog, strategy.local_rank,
                              strategy.nranks, True, strategy.current_endpoint,
                              strategy.trainer_endpoints)
            exe = fluid.Executor(place)
            exe.run(communicator_prog)

        fluid.disable_dygraph()
        _init_context()
        fluid.enable_dygraph(place)

    else:
        assert ("Only support CUDAPlace for now.")

    _parallel_context_initialized = True
    return strategy
コード例 #10
0
    def test_test_batch(self, dynamic=True):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel()
            m.eval()
            output = m(to_variable(data))
            fluid.disable_dygraph()
            return output.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
            device = set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()
            model = MyModel()
            inputs = [Input([None, dim], 'float32', name='x')]
            model.prepare(inputs=inputs, device=device)
            out, = model.test_batch([data])

            np.testing.assert_allclose(out, ref)
            fluid.disable_dygraph() if dynamic else None
コード例 #11
0
 def get_expect():
     fluid.enable_dygraph(fluid.CPUPlace())
     self.set_seed()
     m = MyModel()
     m.eval()
     output = m(to_tensor(data))
     fluid.disable_dygraph()
     return output.numpy()
コード例 #12
0
 def test_predict_without_inputs(self):
     fluid.enable_dygraph(self.device)
     model = Model(LeNet())
     model.prepare()
     model.load(self.weight_path)
     model._inputs = None
     output = model.predict(
         self.test_dataset, batch_size=64, stack_outputs=True)
     np.testing.assert_equal(output[0].shape[0], len(self.test_dataset))
     fluid.disable_dygraph()
コード例 #13
0
 def evaluate(self, dynamic):
     fluid.enable_dygraph(self.device) if dynamic else None
     model = LeNet()
     model.prepare(metrics=Accuracy(),
                   inputs=self.inputs,
                   labels=self.labels)
     model.load(self.weight_path)
     result = model.evaluate(self.val_dataset, batch_size=64)
     np.testing.assert_allclose(result['acc'], self.acc1)
     fluid.disable_dygraph() if dynamic else None
コード例 #14
0
 def test_parameters(self):
     for dynamic in [True, False]:
         device = set_device('cpu')
         fluid.enable_dygraph(device) if dynamic else None
         model = MyModel()
         inputs = [Input([None, 20], 'float32', name='x')]
         model.prepare(inputs=inputs)
         params = model.parameters()
         self.assertTrue(params[0].shape[0] == 20)
         self.assertTrue(params[0].shape[1] == 10)
         fluid.disable_dygraph() if dynamic else None
コード例 #15
0
    def fit(self, dynamic, num_replicas=None, rank=None, num_iters=None):
        fluid.enable_dygraph(self.device) if dynamic else None
        seed = 333
        paddle.seed(seed)
        paddle.framework.random._manual_program_seed(seed)

        net = LeNet()
        optim_new = fluid.optimizer.Adam(
            learning_rate=0.001, parameter_list=net.parameters())
        model = Model(net, inputs=self.inputs, labels=self.labels)
        model.prepare(
            optim_new,
            loss=CrossEntropyLoss(reduction="sum"),
            metrics=Accuracy())
        model.fit(self.train_dataset, batch_size=64, shuffle=False)

        result = model.evaluate(self.val_dataset, batch_size=64)
        np.testing.assert_allclose(result['acc'], self.acc1)

        model.fit(self.train_dataset,
                  batch_size=64,
                  shuffle=False,
                  num_iters=num_iters)

        result = model.evaluate(
            self.val_dataset, batch_size=64, num_iters=num_iters)

        train_sampler = DistributedBatchSampler(
            self.train_dataset,
            batch_size=64,
            shuffle=False,
            num_replicas=num_replicas,
            rank=rank)
        val_sampler = DistributedBatchSampler(
            self.val_dataset,
            batch_size=64,
            shuffle=False,
            num_replicas=num_replicas,
            rank=rank)

        train_loader = fluid.io.DataLoader(
            self.train_dataset,
            batch_sampler=train_sampler,
            places=self.device,
            return_list=True)

        val_loader = fluid.io.DataLoader(
            self.val_dataset,
            batch_sampler=val_sampler,
            places=self.device,
            return_list=True)

        model.fit(train_loader, val_loader)
        fluid.disable_dygraph() if dynamic else None
コード例 #16
0
 def test_save_load(self):
     path = tempfile.mkdtemp()
     for dynamic in [True, False]:
         device = set_device('cpu')
         fluid.enable_dygraph(device) if dynamic else None
         model = MyModel()
         inputs = [Input([None, 20], 'float32', name='x')]
         model.prepare(inputs=inputs)
         model.save(path + '/test')
         model.load(path + '/test')
         shutil.rmtree(path)
         fluid.disable_dygraph() if dynamic else None
コード例 #17
0
ファイル: test_text.py プロジェクト: neuzxy/Paddle
 def _calc_output(self, place, mode="test", dygraph=True):
     if dygraph:
         fluid.enable_dygraph(place)
     else:
         fluid.disable_dygraph()
     fluid.default_main_program().random_seed = self._random_seed
     fluid.default_startup_program().random_seed = self._random_seed
     model = self.model_cls(**self.attrs) if isinstance(
         self.attrs, dict) else self.model_cls(*self.attrs)
     model.prepare(inputs=self.make_inputs(), device=place)
     if self.param_states:
         model.load(self.param_states, optim_state=None)
     return model.test_batch(self.inputs)
コード例 #18
0
    def predict(self, dynamic):
        fluid.enable_dygraph(self.device) if dynamic else None
        model = LeNet()
        model.prepare(inputs=self.inputs)
        model.load(self.weight_path)
        output = model.predict(self.test_dataset,
                               batch_size=64,
                               stack_outputs=True)
        np.testing.assert_equal(output[0].shape[0], len(self.test_dataset))

        acc = compute_acc(output[0], self.val_dataset.labels)
        np.testing.assert_allclose(acc, self.acc1)
        fluid.disable_dygraph() if dynamic else None
コード例 #19
0
 def get_expect():
     fluid.enable_dygraph(fluid.CPUPlace())
     self.set_seed()
     m = MyModel()
     optim = fluid.optimizer.SGD(learning_rate=0.001,
                                 parameter_list=m.parameters())
     m.train()
     output = m(to_tensor(data))
     loss = CrossEntropyLoss(reduction='sum')(output, to_tensor(label))
     avg_loss = fluid.layers.reduce_sum(loss)
     avg_loss.backward()
     optim.minimize(avg_loss)
     m.clear_gradients()
     fluid.disable_dygraph()
     return avg_loss.numpy()
コード例 #20
0
 def _calc_output(self, place, mode="test", dygraph=True):
     if dygraph:
         fluid.enable_dygraph(place)
     else:
         fluid.disable_dygraph()
     gen = paddle.seed(self._random_seed)
     paddle.framework.random._manual_program_seed(self._random_seed)
     scope = fluid.core.Scope()
     with fluid.scope_guard(scope):
         layer = self.model_cls(**self.attrs) if isinstance(
             self.attrs, dict) else self.model_cls(*self.attrs)
         model = Model(layer, inputs=self.make_inputs())
         model.prepare()
         if self.param_states:
             model.load(self.param_states, optim_state=None)
         return model.predict_batch(self.inputs)
コード例 #21
0
    def test_gen_TruncatedNormal_initializer(self):
        fluid.disable_dygraph()

        gen = paddle.seed(123123143)
        cur_state = paddle.get_cuda_rng_state()

        startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # example 1:
            # attr shape is a list which doesn't contain tensor Variable.
            x = fluid.layers.uniform_random(shape=[2, 10])
            result_1 = fluid.layers.fc(
                input=x,
                size=10,
                param_attr=fluid.initializer.TruncatedNormal(loc=0.0,
                                                             scale=2.0))
            result_2 = fluid.layers.fc(
                input=x,
                size=10,
                param_attr=fluid.initializer.TruncatedNormal(loc=0.0,
                                                             scale=2.0))

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(startup_program)
            out1 = exe.run(train_program,
                           feed={},
                           fetch_list=[result_1, result_2])

        paddle.seed(123123143)
        with fluid.program_guard(train_program, startup_program):
            exe.run(startup_program)
            out2 = exe.run(train_program,
                           feed={},
                           fetch_list=[result_1, result_2])

        out1_res1 = np.array(out1[0])
        out1_res2 = np.array(out1[1])
        out2_res1 = np.array(out2[0])
        out2_res2 = np.array(out2[1])

        if core.is_compiled_with_cuda():
            print(">>>>>>> truncated normal static >>>>>>>")
            self.assertTrue(np.allclose(out1_res1, out2_res1))
            self.assertTrue(np.allclose(out1_res2, out2_res2))
            self.assertTrue(not np.allclose(out1_res2, out1_res1))
コード例 #22
0
    def fit(self, dynamic):
        fluid.enable_dygraph(self.device) if dynamic else None
        seed = 333
        fluid.default_startup_program().random_seed = seed
        fluid.default_main_program().random_seed = seed

        model = LeNet()
        optim_new = fluid.optimizer.Adam(learning_rate=0.001,
                                         parameter_list=model.parameters())
        model.prepare(optim_new,
                      loss_function=CrossEntropy(average=False),
                      metrics=Accuracy(),
                      inputs=self.inputs,
                      labels=self.labels)
        model.fit(self.train_dataset, batch_size=64, shuffle=False)

        result = model.evaluate(self.val_dataset, batch_size=64)
        np.testing.assert_allclose(result['acc'], self.acc1)
        fluid.disable_dygraph() if dynamic else None
コード例 #23
0
    def test_train_batch(self, dynamic=True):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)
        label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel()
            optim = fluid.optimizer.SGD(learning_rate=0.001,
                                        parameter_list=m.parameters())
            m.train()
            output = m(to_variable(data))
            l = to_variable(label)
            loss = fluid.layers.cross_entropy(output, l)
            avg_loss = fluid.layers.reduce_sum(loss)
            avg_loss.backward()
            optim.minimize(avg_loss)
            m.clear_gradients()
            fluid.disable_dygraph()
            return avg_loss.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
            device = set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()
            model = MyModel()

            optim2 = fluid.optimizer.SGD(learning_rate=0.001,
                                         parameter_list=model.parameters())

            inputs = [Input([None, dim], 'float32', name='x')]
            labels = [Input([None, 1], 'int64', name='label')]
            model.prepare(optim2,
                          loss_function=CrossEntropy(average=False),
                          inputs=inputs,
                          labels=labels,
                          device=device)
            loss, = model.train_batch([data], [label])

            np.testing.assert_allclose(loss.flatten(), ref.flatten())
            fluid.disable_dygraph() if dynamic else None
コード例 #24
0
ファイル: test_model.py プロジェクト: sandyhouse/Paddle
    def setUpClass(cls):
        if not fluid.is_compiled_with_cuda():
            cls().skipTest('module not tested when ONLY_CPU compling')
        cls.device = paddle.set_device('gpu')
        fluid.enable_dygraph(cls.device)

        sp_num = 1280
        cls.train_dataset = MnistDataset(mode='train', sample_num=sp_num)
        cls.val_dataset = MnistDataset(mode='test', sample_num=sp_num)
        cls.test_dataset = MnistDataset(mode='test',
                                        return_label=False,
                                        sample_num=sp_num)

        cls.train_loader = fluid.io.DataLoader(cls.train_dataset,
                                               places=cls.device,
                                               batch_size=64)
        cls.val_loader = fluid.io.DataLoader(cls.val_dataset,
                                             places=cls.device,
                                             batch_size=64)
        cls.test_loader = fluid.io.DataLoader(cls.test_dataset,
                                              places=cls.device,
                                              batch_size=64)

        seed = 333
        paddle.seed(seed)
        paddle.framework.random._manual_program_seed(seed)

        dy_lenet = LeNetDygraph()
        cls.init_param = dy_lenet.state_dict()
        dynamic_train(dy_lenet, cls.train_loader)

        cls.acc1 = dynamic_evaluate(dy_lenet, cls.val_loader)

        cls.inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
        cls.labels = [InputSpec([None, 1], 'int64', 'label')]

        cls.save_dir = os.path.join(tempfile.mkdtemp(), '.cache_test_model')
        if not os.path.exists(cls.save_dir):
            os.makedirs(cls.save_dir)
        cls.weight_path = os.path.join(cls.save_dir, 'lenet')
        fluid.dygraph.save_dygraph(dy_lenet.state_dict(), cls.weight_path)

        fluid.disable_dygraph()
コード例 #25
0
    def evaluate(self, dynamic):
        fluid.enable_dygraph(self.device) if dynamic else None
        model = Model(LeNet(), self.inputs, self.labels)
        model.prepare(metrics=Accuracy())
        model.load(self.weight_path)
        result = model.evaluate(self.val_dataset, batch_size=64)
        np.testing.assert_allclose(result['acc'], self.acc1)

        sampler = DistributedBatchSampler(
            self.val_dataset, batch_size=64, shuffle=False)

        val_loader = fluid.io.DataLoader(
            self.val_dataset,
            batch_sampler=sampler,
            places=self.device,
            return_list=True)

        model.evaluate(val_loader)

        fluid.disable_dygraph() if dynamic else None
コード例 #26
0
    def setUpClass(cls):
        if not fluid.is_compiled_with_cuda():
            self.skipTest('module not tested when ONLY_CPU compling')
        cls.device = set_device('gpu')
        fluid.enable_dygraph(cls.device)

        sp_num = 1280
        cls.train_dataset = MnistDataset(mode='train', sample_num=sp_num)
        cls.val_dataset = MnistDataset(mode='test', sample_num=sp_num)
        cls.test_dataset = MnistDataset(mode='test',
                                        return_label=False,
                                        sample_num=sp_num)

        cls.train_loader = fluid.io.DataLoader(cls.train_dataset,
                                               places=cls.device,
                                               batch_size=64)
        cls.val_loader = fluid.io.DataLoader(cls.val_dataset,
                                             places=cls.device,
                                             batch_size=64)
        cls.test_loader = fluid.io.DataLoader(cls.test_dataset,
                                              places=cls.device,
                                              batch_size=64)

        seed = 333
        fluid.default_startup_program().random_seed = seed
        fluid.default_main_program().random_seed = seed

        dy_lenet = LeNetDygraph()
        cls.init_param = dy_lenet.state_dict()
        dynamic_train(dy_lenet, cls.train_loader)

        cls.acc1 = dynamic_evaluate(dy_lenet, cls.val_loader)

        cls.inputs = [Input([-1, 1, 28, 28], 'float32', name='image')]
        cls.labels = [Input([None, 1], 'int64', name='label')]

        cls.save_dir = tempfile.mkdtemp()
        cls.weight_path = os.path.join(cls.save_dir, 'lenet')
        fluid.dygraph.save_dygraph(dy_lenet.state_dict(), cls.weight_path)

        fluid.disable_dygraph()
コード例 #27
0
    def test_train_batch(self, dynamic=True):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)
        label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel(classifier_activation=None)
            optim = fluid.optimizer.SGD(learning_rate=0.001,
                                        parameter_list=m.parameters())
            m.train()
            output = m(to_tensor(data))
            loss = CrossEntropyLoss(reduction='sum')(output, to_tensor(label))
            avg_loss = fluid.layers.reduce_sum(loss)
            avg_loss.backward()
            optim.minimize(avg_loss)
            m.clear_gradients()
            fluid.disable_dygraph()
            return avg_loss.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
            device = paddle.set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()

            net = MyModel(classifier_activation=None)
            optim2 = fluid.optimizer.SGD(learning_rate=0.001,
                                         parameter_list=net.parameters())

            inputs = [InputSpec([None, dim], 'float32', 'x')]
            labels = [InputSpec([None, 1], 'int64', 'label')]
            model = Model(net, inputs, labels)
            model.prepare(optim2, loss=CrossEntropyLoss(reduction="sum"))
            loss, = model.train_batch([data], [label])
            np.testing.assert_allclose(loss.flatten(), ref.flatten())
            fluid.disable_dygraph() if dynamic else None
コード例 #28
0
    def test_gen_dropout_static(self):
        fluid.disable_dygraph()

        gen = paddle.seed(123123143)

        startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # example 1:
            # attr shape is a list which doesn't contain tensor Variable.
            x_1 = fluid.layers.uniform_random(shape=[2, 10])
            y_1 = fluid.layers.dropout(x_1, 0.5)
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(startup_program)
            out1 = exe.run(train_program, feed={}, fetch_list=[y_1])
            #gen.set_state(cur_state)
            gen.manual_seed(123123143)
            out2 = exe.run(train_program, feed={}, fetch_list=[y_1])
        out1_np = np.array(out1[0])
        out2_np = np.array(out2[0])

        if not core.is_compiled_with_cuda():
            print(">>>>>>> dropout static >>>>>>>")
            self.assertTrue(np.allclose(out1_np, out2_np))
コード例 #29
0
 def functional_dygraph_context(self):
     self.assertFalse(fluid.dygraph.enabled())
     fluid.enable_dygraph()
     self.assertTrue(fluid.dygraph.enabled())
     np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
     var_inp = paddle.to_tensor(np_inp)
     mlp = MLP(input_size=2)
     out = mlp(var_inp)
     dy_out1 = out.numpy()
     out.backward()
     dy_grad1 = mlp._linear1.weight.gradient()
     fluid.disable_dygraph()
     self.assertFalse(fluid.dygraph.enabled())
     with fluid.dygraph.guard():
         self.assertTrue(fluid.dygraph.enabled())
         var_inp = paddle.to_tensor(np_inp)
         mlp = MLP(input_size=2)
         out = mlp(var_inp)
         dy_out2 = out.numpy()
         out.backward()
         dy_grad2 = mlp._linear1.weight.gradient()
     self.assertFalse(fluid.dygraph.enabled())
     self.assertTrue(np.array_equal(dy_out1, dy_out2))
     self.assertTrue(np.array_equal(dy_grad1, dy_grad2))
コード例 #30
0
                            batch_size=BATCH_SIZE,
                            drop_last=True)
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_sample_list_generator(train_reader, places=place)
# train
for epoch in range(EPOCH_NUM):
    train_one_epoch(mnist, train_loader)
# save
fluid.dygraph.jit.save(layer=mnist, model_path=MODEL_PATH)
'''
Part 4. Load & Inference
'''
# load model by jit.load & inference
translated_mnist = fluid.dygraph.jit.load(model_path=MODEL_PATH)
translated_mnist.eval()
image = np.random.random((1, 1, 28, 28)).astype('float32')
image_var = fluid.dygraph.to_variable(image)
dygraph_pred = translated_mnist(image_var)
# load model by io.load_inference_model & inference
fluid.disable_dygraph()
exe = fluid.Executor(place)
[infer_program, feed,
 fetch] = fluid.io.load_inference_model(dirname=MODEL_PATH,
                                        executor=exe,
                                        params_filename="__variables__")
static_pred = exe.run(infer_program, feed={feed[0]: image}, fetch_list=fetch)
# compare
print("dygraph prediction: {}".format(dygraph_pred.numpy()))
print("static prediction: {}".format(static_pred[0]))
np.testing.assert_array_equal(dygraph_pred.numpy(), static_pred[0])