Пример #1
0
    def test_static_save_dynamic_load(self):
        path = tempfile.mkdtemp()

        net = MyModel(classifier_activation=None)
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(path + '/test')

        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)  #if dynamic else None

        net = MyModel(classifier_activation=None)
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.load(path + '/test')
        shutil.rmtree(path)
        fluid.disable_dygraph()
Пример #2
0
    def test_static_save_dynamic_load(self):
        path = os.path.join(tempfile.mkdtemp(),
                            '.cache_test_static_save_dynamic_load')
        if not os.path.exists(path):
            os.makedirs(path)
        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(path)

        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)  #if dynamic else None

        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.load(path)
        shutil.rmtree(path)
        fluid.disable_dygraph()
Пример #3
0
    def test_fit_by_step(self):
        base_lr = 1e-3
        boundaries = [5, 8]

        def make_optimizer(parameters=None):
            momentum = 0.9
            weight_decay = 5e-4
            values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)]
            learning_rate = paddle.optimizer.lr.PiecewiseDecay(
                boundaries=boundaries, values=values)
            learning_rate = paddle.optimizer.lr.LinearWarmup(
                learning_rate=learning_rate,
                warmup_steps=4,
                start_lr=base_lr / 5.,
                end_lr=base_lr,
                verbose=True)
            optimizer = paddle.optimizer.Momentum(
                learning_rate=learning_rate,
                weight_decay=weight_decay,
                momentum=momentum,
                parameters=parameters)
            return optimizer

        # dynamic test
        device = paddle.set_device('cpu')
        fluid.enable_dygraph(device)
        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = make_optimizer(net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))

        dataset = MyDataset()
        model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0)

        np.testing.assert_allclose(model._optimizer._learning_rate.last_lr,
                                   base_lr * (0.1**len(boundaries)))
        # static test
        paddle.enable_static()

        net = MyModel()
        inputs = [InputSpec([None, 20], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        optim = make_optimizer(net.parameters())
        model = Model(net, inputs, labels)
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))

        dataset = MyDataset()
        model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0)

        np.testing.assert_allclose(model._optimizer._learning_rate.last_lr,
                                   base_lr * (0.1**len(boundaries)))
Пример #4
0
    def test_static_multiple_gpus(self):
        device = set_device('gpu')

        im_shape = (-1, 1, 28, 28)
        batch_size = 128

        inputs = [Input(im_shape, 'float32', 'image')]
        labels = [Input([None, 1], 'int64', 'label')]

        model = Model(LeNet(), inputs, labels)
        optim = fluid.optimizer.Momentum(
            learning_rate=0.001, momentum=.9, parameter_list=model.parameters())
        model.prepare(optim, CrossEntropyLoss(), Accuracy())

        train_dataset = MnistDataset(mode='train')
        val_dataset = MnistDataset(mode='test')
        test_dataset = MnistDataset(mode='test', return_label=False)

        cbk = paddle.callbacks.ProgBarLogger(50)
        model.fit(train_dataset,
                  val_dataset,
                  epochs=2,
                  batch_size=batch_size,
                  callbacks=cbk)

        eval_result = model.evaluate(val_dataset, batch_size=batch_size)

        output = model.predict(
            test_dataset, batch_size=batch_size, stack_outputs=True)

        np.testing.assert_equal(output[0].shape[0], len(test_dataset))

        acc = compute_accuracy(output[0], val_dataset.labels)

        np.testing.assert_allclose(acc, eval_result['acc'])
Пример #5
0
    def test_dynamic_load(self):
        mnist_data = MnistDataset(mode='train')

        path = os.path.join(tempfile.mkdtemp(), '.cache_dynamic_load')
        if not os.path.exists(path):
            os.makedirs(path)

        for new_optimizer in [True, False]:
            paddle.disable_static()
            net = LeNet()
            inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
            labels = [InputSpec([None, 1], 'int64', 'label')]
            if new_optimizer:
                optim = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=net.parameters())
            else:
                optim = fluid.optimizer.Adam(
                    learning_rate=0.001, parameter_list=net.parameters())
            model = Model(net, inputs, labels)
            model.prepare(
                optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
            model.fit(mnist_data, batch_size=64, verbose=0)
            model.save(path)
            model.load(path)
            paddle.enable_static()
        shutil.rmtree(path)
Пример #6
0
    def test_accumulate(self, ):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)
        label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)
        net = MyModel()
        optim = fluid.optimizer.SGD(learning_rate=0.001,
                                    parameter_list=net.parameters())
        inputs = [InputSpec([None, dim], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]

        for amp_cfg in [None, 'O1']:
            model = Model(net, inputs, labels)
            model.prepare(
                optim,
                loss=CrossEntropyLoss(reduction="sum"),
                amp_configs=amp_cfg)
            losses, grads = [], []
            for stat in [False, False, True]:
                loss, = model.train_batch([data], [label], update=stat)
                losses.append(loss)
                grads.append([p.grad.numpy() for p in net.parameters()])

            for grad1, grad2, grad3 in zip(*grads):
                np.testing.assert_almost_equal(grad1 * 2, grad2, decimal=4)
                np.testing.assert_almost_equal(
                    grad3, np.zeros_like(grad3), decimal=4)

            np.testing.assert_almost_equal(losses[0], losses[1], decimal=4)
            np.testing.assert_almost_equal(losses[0], losses[2], decimal=4)
Пример #7
0
    def test_dygraph_export_deploy_model_about_inputs(self):
        self.set_seed()
        np.random.seed(201)
        mnist_data = MnistDataset(mode='train')
        paddle.disable_static()
        # without inputs
        save_dir = os.path.join(tempfile.mkdtemp(),
                                '.cache_test_dygraph_export_deploy')
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        for initial in ["fit", "train_batch", "eval_batch", "predict_batch"]:
            net = LeNet()
            model = Model(net)
            optim = fluid.optimizer.Adam(
                learning_rate=0.001, parameter_list=model.parameters())
            model.prepare(
                optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
            if initial == "fit":
                model.fit(mnist_data, batch_size=64, verbose=0)
            else:
                img = np.array(
                    np.random.random((1, 1, 28, 28)), dtype=np.float32)
                label = np.array(np.random.rand(1, 1), dtype=np.int64)
                if initial == "train_batch":
                    model.train_batch([img], [label])
                elif initial == "eval_batch":
                    model.eval_batch([img], [label])
                else:
                    model.predict_batch([img])

            model.save(save_dir, training=False)
        shutil.rmtree(save_dir)
        # with inputs, and the type of inputs is InputSpec
        save_dir = os.path.join(tempfile.mkdtemp(),
                                '.cache_test_dygraph_export_deploy_2')
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        net = LeNet()
        inputs = InputSpec([None, 1, 28, 28], 'float32', 'x')
        model = Model(net, inputs)
        optim = fluid.optimizer.Adam(
            learning_rate=0.001, parameter_list=model.parameters())
        model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum"))
        model.save(save_dir, training=False)
        shutil.rmtree(save_dir)
    def func_warn_or_error(self):
        with self.assertRaises(ValueError):
            paddle.callbacks.ReduceLROnPlateau(factor=2.0)
        # warning
        paddle.callbacks.ReduceLROnPlateau(mode='1', patience=3, verbose=1)

        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
        train_dataset = CustomMnist(mode='train', transform=transform)
        val_dataset = CustomMnist(mode='test', transform=transform)
        net = LeNet()
        optim = paddle.optimizer.Adam(learning_rate=0.001,
                                      parameters=net.parameters())
        inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        model = Model(net, inputs=inputs, labels=labels)
        model.prepare(optim, loss=CrossEntropyLoss(), metrics=[Accuracy()])
        callbacks = paddle.callbacks.ReduceLROnPlateau(monitor='miou',
                                                       patience=3,
                                                       verbose=1)
        model.fit(train_dataset,
                  val_dataset,
                  batch_size=8,
                  log_freq=1,
                  save_freq=10,
                  epochs=1,
                  callbacks=[callbacks])

        optim = paddle.optimizer.Adam(
            learning_rate=paddle.optimizer.lr.PiecewiseDecay([0.001, 0.0001],
                                                             [5, 10]),
            parameters=net.parameters())

        model.prepare(optim, loss=CrossEntropyLoss(), metrics=[Accuracy()])
        callbacks = paddle.callbacks.ReduceLROnPlateau(monitor='acc',
                                                       mode='max',
                                                       patience=3,
                                                       verbose=1,
                                                       cooldown=1)
        model.fit(train_dataset,
                  val_dataset,
                  batch_size=8,
                  log_freq=1,
                  save_freq=10,
                  epochs=3,
                  callbacks=[callbacks])
Пример #9
0
def dynamic_train(model, dataloader):
    optim = fluid.optimizer.Adam(
        learning_rate=0.001, parameter_list=model.parameters())
    model.train()
    for inputs, labels in dataloader:
        outputs = model(inputs)
        loss = CrossEntropyLoss(reduction="sum")(outputs, labels)
        avg_loss = fluid.layers.reduce_sum(loss)
        avg_loss.backward()
        optim.minimize(avg_loss)
        model.clear_gradients()
Пример #10
0
    def fit(self, dynamic, num_replicas=None, rank=None, num_iters=None):
        fluid.enable_dygraph(self.device) if dynamic else None
        seed = 333
        paddle.seed(seed)
        paddle.framework.random._manual_program_seed(seed)

        net = LeNet()
        optim_new = fluid.optimizer.Adam(
            learning_rate=0.001, parameter_list=net.parameters())
        model = Model(net, inputs=self.inputs, labels=self.labels)
        model.prepare(
            optim_new,
            loss=CrossEntropyLoss(reduction="sum"),
            metrics=Accuracy())
        model.fit(self.train_dataset, batch_size=64, shuffle=False)

        result = model.evaluate(self.val_dataset, batch_size=64)
        np.testing.assert_allclose(result['acc'], self.acc1)

        model.fit(self.train_dataset,
                  batch_size=64,
                  shuffle=False,
                  num_iters=num_iters)

        result = model.evaluate(
            self.val_dataset, batch_size=64, num_iters=num_iters)

        train_sampler = DistributedBatchSampler(
            self.train_dataset,
            batch_size=64,
            shuffle=False,
            num_replicas=num_replicas,
            rank=rank)
        val_sampler = DistributedBatchSampler(
            self.val_dataset,
            batch_size=64,
            shuffle=False,
            num_replicas=num_replicas,
            rank=rank)

        train_loader = fluid.io.DataLoader(
            self.train_dataset,
            batch_sampler=train_sampler,
            places=self.device,
            return_list=True)

        val_loader = fluid.io.DataLoader(
            self.val_dataset,
            batch_sampler=val_sampler,
            places=self.device,
            return_list=True)

        model.fit(train_loader, val_loader)
        fluid.disable_dygraph() if dynamic else None
Пример #11
0
 def get_model(self, amp_config):
     net = LeNet()
     inputs = InputSpec([None, 1, 28, 28], "float32", 'x')
     labels = InputSpec([None, 1], "int64", "y")
     model = Model(net, inputs, labels)
     optim = paddle.optimizer.Adam(learning_rate=0.001,
                                   parameters=model.parameters())
     model.prepare(optimizer=optim,
                   loss=CrossEntropyLoss(reduction="sum"),
                   amp_configs=amp_config)
     return model
Пример #12
0
 def get_expect():
     fluid.enable_dygraph(fluid.CPUPlace())
     self.set_seed()
     m = MyModel()
     optim = fluid.optimizer.SGD(learning_rate=0.001,
                                 parameter_list=m.parameters())
     m.train()
     output = m(to_tensor(data))
     loss = CrossEntropyLoss(reduction='sum')(output, to_tensor(label))
     avg_loss = fluid.layers.reduce_sum(loss)
     avg_loss.backward()
     optim.minimize(avg_loss)
     m.clear_gradients()
     fluid.disable_dygraph()
     return avg_loss.numpy()
Пример #13
0
    def test_static_check_input(self):
        paddle.enable_static()
        amp_configs = {"level": "O2", "use_pure_fp16": True}
        if not fluid.is_compiled_with_cuda():
            self.skipTest('module not tested when ONLY_CPU compling')
        paddle.set_device('gpu')

        net = LeNet()
        inputs = InputSpec([None, 1, 28, 28], "float32", 'x')
        labels = InputSpec([None, 1], "int64", "y")
        model = Model(net, inputs, labels)

        optim = paddle.optimizer.Adam(learning_rate=0.001,
                                      parameters=model.parameters())
        loss = CrossEntropyLoss(reduction="sum")
        with self.assertRaises(ValueError):
            model.prepare(optimizer=optim, loss=loss, amp_configs=amp_configs)
Пример #14
0
    def test_amp_training_purefp16(self):
        if not fluid.is_compiled_with_cuda():
            self.skipTest('module not tested when ONLY_CPU compling')
        data = np.random.random(size=(4, 1, 28, 28)).astype(np.float32)
        label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)

        paddle.enable_static()
        paddle.set_device('gpu')
        net = LeNet()
        amp_level = "O2"
        inputs = InputSpec([None, 1, 28, 28], "float32", 'x')
        labels = InputSpec([None, 1], "int64", "y")
        model = Model(net, inputs, labels)
        optim = paddle.optimizer.Adam(learning_rate=0.001,
                                      parameters=model.parameters(),
                                      multi_precision=True)
        amp_configs = {"level": amp_level, "use_fp16_guard": False}
        model.prepare(optimizer=optim,
                      loss=CrossEntropyLoss(reduction="sum"),
                      amp_configs=amp_configs)
        model.train_batch([data], [label])
 def func_reduce_lr_on_plateau(self):
     transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
     train_dataset = CustomMnist(mode='train', transform=transform)
     val_dataset = CustomMnist(mode='test', transform=transform)
     net = LeNet()
     optim = paddle.optimizer.Adam(learning_rate=0.001,
                                   parameters=net.parameters())
     inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
     labels = [InputSpec([None, 1], 'int64', 'label')]
     model = Model(net, inputs=inputs, labels=labels)
     model.prepare(optim, loss=CrossEntropyLoss(), metrics=[Accuracy()])
     callbacks = paddle.callbacks.ReduceLROnPlateau(patience=1,
                                                    verbose=1,
                                                    cooldown=1)
     model.fit(train_dataset,
               val_dataset,
               batch_size=8,
               log_freq=1,
               save_freq=10,
               epochs=10,
               callbacks=[callbacks])
Пример #16
0
    def test_train_batch(self, dynamic=True):
        dim = 20
        data = np.random.random(size=(4, dim)).astype(np.float32)
        label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)

        def get_expect():
            fluid.enable_dygraph(fluid.CPUPlace())
            self.set_seed()
            m = MyModel()
            optim = fluid.optimizer.SGD(learning_rate=0.001,
                                        parameter_list=m.parameters())
            m.train()
            output = m(to_tensor(data))
            loss = CrossEntropyLoss(reduction='sum')(output, to_tensor(label))
            avg_loss = fluid.layers.reduce_sum(loss)
            avg_loss.backward()
            optim.minimize(avg_loss)
            m.clear_gradients()
            fluid.disable_dygraph()
            return avg_loss.numpy()

        ref = get_expect()
        for dynamic in [True, False]:
            device = paddle.set_device('cpu')
            fluid.enable_dygraph(device) if dynamic else None
            self.set_seed()

            net = MyModel()
            optim2 = fluid.optimizer.SGD(learning_rate=0.001,
                                         parameter_list=net.parameters())

            inputs = [InputSpec([None, dim], 'float32', 'x')]
            labels = [InputSpec([None, 1], 'int64', 'label')]
            model = Model(net, inputs, labels)
            model.prepare(optim2, loss=CrossEntropyLoss(reduction="sum"))
            loss, = model.train_batch([data], [label])
            np.testing.assert_allclose(loss.flatten(), ref.flatten())
            fluid.disable_dygraph() if dynamic else None
Пример #17
0
 def test_dynamic_check_input(self):
     paddle.disable_static()
     amp_configs_list = [
         {
             "level": "O3"
         },
         {
             "level": "O1",
             "test": 0
         },
         {
             "level": "O1",
             "use_fp16_guard": True
         },
         "O3",
     ]
     if not fluid.is_compiled_with_cuda():
         self.skipTest('module not tested when ONLY_CPU compling')
     paddle.set_device('gpu')
     net = LeNet()
     model = Model(net)
     optim = paddle.optimizer.Adam(learning_rate=0.001,
                                   parameters=model.parameters())
     loss = CrossEntropyLoss(reduction="sum")
     with self.assertRaises(ValueError):
         for amp_configs in amp_configs_list:
             model.prepare(optimizer=optim,
                           loss=loss,
                           amp_configs=amp_configs)
     model.prepare(optimizer=optim, loss=loss, amp_configs="O2")
     model.prepare(optimizer=optim,
                   loss=loss,
                   amp_configs={
                       "custom_white_list": {"matmul"},
                       "init_loss_scaling": 1.0
                   })
Пример #18
0
    def test_earlystopping(self):
        paddle.seed(2020)
        for dynamic in [True, False]:
            paddle.enable_static if not dynamic else None
            device = paddle.set_device('cpu')
            sample_num = 100
            train_dataset = MnistDataset(mode='train', sample_num=sample_num)
            val_dataset = MnistDataset(mode='test', sample_num=sample_num)

            net = LeNet()
            optim = paddle.optimizer.Adam(learning_rate=0.001,
                                          parameters=net.parameters())

            inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
            labels = [InputSpec([None, 1], 'int64', 'label')]

            model = Model(net, inputs=inputs, labels=labels)
            model.prepare(optim,
                          loss=CrossEntropyLoss(reduction="sum"),
                          metrics=[Accuracy()])
            callbacks_0 = paddle.callbacks.EarlyStopping('loss',
                                                         mode='min',
                                                         patience=1,
                                                         verbose=1,
                                                         min_delta=0,
                                                         baseline=None,
                                                         save_best_model=True)
            callbacks_1 = paddle.callbacks.EarlyStopping('acc',
                                                         mode='auto',
                                                         patience=1,
                                                         verbose=1,
                                                         min_delta=0,
                                                         baseline=0,
                                                         save_best_model=True)
            callbacks_2 = paddle.callbacks.EarlyStopping('loss',
                                                         mode='auto_',
                                                         patience=1,
                                                         verbose=1,
                                                         min_delta=0,
                                                         baseline=None,
                                                         save_best_model=True)
            callbacks_3 = paddle.callbacks.EarlyStopping('acc_',
                                                         mode='max',
                                                         patience=1,
                                                         verbose=1,
                                                         min_delta=0,
                                                         baseline=0,
                                                         save_best_model=True)
            model.fit(
                train_dataset,
                val_dataset,
                batch_size=64,
                save_freq=10,
                save_dir=self.save_dir,
                epochs=10,
                verbose=0,
                callbacks=[callbacks_0, callbacks_1, callbacks_2, callbacks_3])
            # Test for no val_loader
            model.fit(train_dataset,
                      batch_size=64,
                      save_freq=10,
                      save_dir=self.save_dir,
                      epochs=10,
                      verbose=0,
                      callbacks=[callbacks_0])