示例#1
0
    def test_static_multiple_gpus(self):
        device = set_device('gpu')

        im_shape = (-1, 1, 28, 28)
        batch_size = 128

        inputs = [Input(im_shape, 'float32', 'image')]
        labels = [Input([None, 1], 'int64', 'label')]

        model = Model(LeNet(), inputs, labels)
        optim = fluid.optimizer.Momentum(
            learning_rate=0.001, momentum=.9, parameter_list=model.parameters())
        model.prepare(optim, CrossEntropyLoss(), Accuracy())

        train_dataset = MnistDataset(mode='train')
        val_dataset = MnistDataset(mode='test')
        test_dataset = MnistDataset(mode='test', return_label=False)

        cbk = paddle.callbacks.ProgBarLogger(50)
        model.fit(train_dataset,
                  val_dataset,
                  epochs=2,
                  batch_size=batch_size,
                  callbacks=cbk)

        eval_result = model.evaluate(val_dataset, batch_size=batch_size)

        output = model.predict(
            test_dataset, batch_size=batch_size, stack_outputs=True)

        np.testing.assert_equal(output[0].shape[0], len(test_dataset))

        acc = compute_accuracy(output[0], val_dataset.labels)

        np.testing.assert_allclose(acc, eval_result['acc'])
示例#2
0
    def test_dygraph_export_deploy_model_about_inputs(self):
        self.set_seed()
        np.random.seed(201)
        mnist_data = MnistDataset(mode='train')
        paddle.disable_static()
        # without inputs
        save_dir = os.path.join(tempfile.mkdtemp(),
                                '.cache_test_dygraph_export_deploy')
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        for initial in ["fit", "train_batch", "eval_batch", "predict_batch"]:
            net = LeNet()
            model = Model(net)
            optim = fluid.optimizer.Adam(
                learning_rate=0.001, parameter_list=model.parameters())
            model.prepare(
                optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
            if initial == "fit":
                model.fit(mnist_data, batch_size=64, verbose=0)
            else:
                img = np.array(
                    np.random.random((1, 1, 28, 28)), dtype=np.float32)
                label = np.array(np.random.rand(1, 1), dtype=np.int64)
                if initial == "train_batch":
                    model.train_batch([img], [label])
                elif initial == "eval_batch":
                    model.eval_batch([img], [label])
                else:
                    model.predict_batch([img])

            model.save(save_dir, training=False)
        shutil.rmtree(save_dir)
        # with inputs, and the type of inputs is InputSpec
        save_dir = os.path.join(tempfile.mkdtemp(),
                                '.cache_test_dygraph_export_deploy_2')
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        net = LeNet()
        inputs = InputSpec([None, 1, 28, 28], 'float32', 'x')
        model = Model(net, inputs)
        optim = fluid.optimizer.Adam(
            learning_rate=0.001, parameter_list=model.parameters())
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(save_dir, training=False)
        shutil.rmtree(save_dir)
示例#3
0
 def get_model(self, amp_config):
     net = LeNet()
     inputs = InputSpec([None, 1, 28, 28], "float32", 'x')
     labels = InputSpec([None, 1], "int64", "y")
     model = Model(net, inputs, labels)
     optim = paddle.optimizer.Adam(learning_rate=0.001,
                                   parameters=model.parameters())
     model.prepare(optimizer=optim,
                   loss=CrossEntropyLoss(reduction="sum"),
                   amp_configs=amp_config)
     return model
示例#4
0
    def test_dynamic_save_static_load(self):
        path = tempfile.mkdtemp()
        # dynamic saving
        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)
        model = Model(MyModel(classifier_activation=None))
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=model.parameters())
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(path + '/test')
        fluid.disable_dygraph()

        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        model = Model(MyModel(classifier_activation=None), inputs, labels)
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=model.parameters())
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.load(path + '/test')
        shutil.rmtree(path)
示例#5
0
 def test_parameters(self):
     for dynamic in [True, False]:
         device = paddle.set_device('cpu')
         fluid.enable_dygraph(device) if dynamic else None
         net = MyModel()
         inputs = [InputSpec([None, 20], 'float32', 'x')]
         model = Model(net, inputs)
         model.prepare()
         params = model.parameters()
         self.assertTrue(params[0].shape[0] == 20)
         self.assertTrue(params[0].shape[1] == 10)
         fluid.disable_dygraph() if dynamic else None
示例#6
0
    def test_dynamic_save_static_load(self):
        path = os.path.join(tempfile.mkdtemp(),
                            '.cache_dynamic_save_static_load')
        if not os.path.exists(path):
            os.makedirs(path)
        # dynamic saving
        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)
        model = Model(MyModel())
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=model.parameters())
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(path)
        fluid.disable_dygraph()

        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        model = Model(MyModel(), inputs, labels)
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=model.parameters())
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.load(path)
        shutil.rmtree(path)
示例#7
0
    def test_static_check_input(self):
        paddle.enable_static()
        amp_configs = {"level": "O2", "use_pure_fp16": True}
        if not fluid.is_compiled_with_cuda():
            self.skipTest('module not tested when ONLY_CPU compling')
        paddle.set_device('gpu')

        net = LeNet()
        inputs = InputSpec([None, 1, 28, 28], "float32", 'x')
        labels = InputSpec([None, 1], "int64", "y")
        model = Model(net, inputs, labels)

        optim = paddle.optimizer.Adam(learning_rate=0.001,
                                      parameters=model.parameters())
        loss = CrossEntropyLoss(reduction="sum")
        with self.assertRaises(ValueError):
            model.prepare(optimizer=optim, loss=loss, amp_configs=amp_configs)
示例#8
0
    def test_amp_training_purefp16(self):
        if not fluid.is_compiled_with_cuda():
            self.skipTest('module not tested when ONLY_CPU compling')
        data = np.random.random(size=(4, 1, 28, 28)).astype(np.float32)
        label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)

        paddle.enable_static()
        paddle.set_device('gpu')
        net = LeNet()
        amp_level = "O2"
        inputs = InputSpec([None, 1, 28, 28], "float32", 'x')
        labels = InputSpec([None, 1], "int64", "y")
        model = Model(net, inputs, labels)
        optim = paddle.optimizer.Adam(learning_rate=0.001,
                                      parameters=model.parameters(),
                                      multi_precision=True)
        amp_configs = {"level": amp_level, "use_fp16_guard": False}
        model.prepare(optimizer=optim,
                      loss=CrossEntropyLoss(reduction="sum"),
                      amp_configs=amp_configs)
        model.train_batch([data], [label])
示例#9
0
 def test_dynamic_check_input(self):
     paddle.disable_static()
     amp_configs_list = [
         {
             "level": "O3"
         },
         {
             "level": "O1",
             "test": 0
         },
         {
             "level": "O1",
             "use_fp16_guard": True
         },
         "O3",
     ]
     if not fluid.is_compiled_with_cuda():
         self.skipTest('module not tested when ONLY_CPU compling')
     paddle.set_device('gpu')
     net = LeNet()
     model = Model(net)
     optim = paddle.optimizer.Adam(learning_rate=0.001,
                                   parameters=model.parameters())
     loss = CrossEntropyLoss(reduction="sum")
     with self.assertRaises(ValueError):
         for amp_configs in amp_configs_list:
             model.prepare(optimizer=optim,
                           loss=loss,
                           amp_configs=amp_configs)
     model.prepare(optimizer=optim, loss=loss, amp_configs="O2")
     model.prepare(optimizer=optim,
                   loss=loss,
                   amp_configs={
                       "custom_white_list": {"matmul"},
                       "init_loss_scaling": 1.0
                   })