def test_dynamic_load(self): mnist_data = MnistDataset(mode='train') path = os.path.join(tempfile.mkdtemp(), '.cache_dynamic_load') if not os.path.exists(path): os.makedirs(path) for new_optimizer in [True, False]: paddle.disable_static() net = LeNet() inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] if new_optimizer: optim = paddle.optimizer.Adam( learning_rate=0.001, parameters=net.parameters()) else: optim = fluid.optimizer.Adam( learning_rate=0.001, parameter_list=net.parameters()) model = Model(net, inputs, labels) model.prepare( optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) model.fit(mnist_data, batch_size=64, verbose=0) model.save(path) model.load(path) paddle.enable_static() shutil.rmtree(path)
def func_warn_or_error(self): with self.assertRaises(ValueError): paddle.callbacks.ReduceLROnPlateau(factor=2.0) # warning paddle.callbacks.ReduceLROnPlateau(mode='1', patience=3, verbose=1) transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) train_dataset = CustomMnist(mode='train', transform=transform) val_dataset = CustomMnist(mode='test', transform=transform) net = LeNet() optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=net.parameters()) inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] model = Model(net, inputs=inputs, labels=labels) model.prepare(optim, loss=CrossEntropyLoss(), metrics=[Accuracy()]) callbacks = paddle.callbacks.ReduceLROnPlateau(monitor='miou', patience=3, verbose=1) model.fit(train_dataset, val_dataset, batch_size=8, log_freq=1, save_freq=10, epochs=1, callbacks=[callbacks]) optim = paddle.optimizer.Adam( learning_rate=paddle.optimizer.lr.PiecewiseDecay([0.001, 0.0001], [5, 10]), parameters=net.parameters()) model.prepare(optim, loss=CrossEntropyLoss(), metrics=[Accuracy()]) callbacks = paddle.callbacks.ReduceLROnPlateau(monitor='acc', mode='max', patience=3, verbose=1, cooldown=1) model.fit(train_dataset, val_dataset, batch_size=8, log_freq=1, save_freq=10, epochs=3, callbacks=[callbacks])
def fit(self, dynamic, num_replicas=None, rank=None, num_iters=None): fluid.enable_dygraph(self.device) if dynamic else None seed = 333 paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) net = LeNet() optim_new = fluid.optimizer.Adam( learning_rate=0.001, parameter_list=net.parameters()) model = Model(net, inputs=self.inputs, labels=self.labels) model.prepare( optim_new, loss=CrossEntropyLoss(reduction="sum"), metrics=Accuracy()) model.fit(self.train_dataset, batch_size=64, shuffle=False) result = model.evaluate(self.val_dataset, batch_size=64) np.testing.assert_allclose(result['acc'], self.acc1) model.fit(self.train_dataset, batch_size=64, shuffle=False, num_iters=num_iters) result = model.evaluate( self.val_dataset, batch_size=64, num_iters=num_iters) train_sampler = DistributedBatchSampler( self.train_dataset, batch_size=64, shuffle=False, num_replicas=num_replicas, rank=rank) val_sampler = DistributedBatchSampler( self.val_dataset, batch_size=64, shuffle=False, num_replicas=num_replicas, rank=rank) train_loader = fluid.io.DataLoader( self.train_dataset, batch_sampler=train_sampler, places=self.device, return_list=True) val_loader = fluid.io.DataLoader( self.val_dataset, batch_sampler=val_sampler, places=self.device, return_list=True) model.fit(train_loader, val_loader) fluid.disable_dygraph() if dynamic else None
def func_reduce_lr_on_plateau(self): transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) train_dataset = CustomMnist(mode='train', transform=transform) val_dataset = CustomMnist(mode='test', transform=transform) net = LeNet() optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=net.parameters()) inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] model = Model(net, inputs=inputs, labels=labels) model.prepare(optim, loss=CrossEntropyLoss(), metrics=[Accuracy()]) callbacks = paddle.callbacks.ReduceLROnPlateau(patience=1, verbose=1, cooldown=1) model.fit(train_dataset, val_dataset, batch_size=8, log_freq=1, save_freq=10, epochs=10, callbacks=[callbacks])
def test_earlystopping(self): paddle.seed(2020) for dynamic in [True, False]: paddle.enable_static if not dynamic else None device = paddle.set_device('cpu') sample_num = 100 train_dataset = MnistDataset(mode='train', sample_num=sample_num) val_dataset = MnistDataset(mode='test', sample_num=sample_num) net = LeNet() optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=net.parameters()) inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] model = Model(net, inputs=inputs, labels=labels) model.prepare(optim, loss=CrossEntropyLoss(reduction="sum"), metrics=[Accuracy()]) callbacks_0 = paddle.callbacks.EarlyStopping('loss', mode='min', patience=1, verbose=1, min_delta=0, baseline=None, save_best_model=True) callbacks_1 = paddle.callbacks.EarlyStopping('acc', mode='auto', patience=1, verbose=1, min_delta=0, baseline=0, save_best_model=True) callbacks_2 = paddle.callbacks.EarlyStopping('loss', mode='auto_', patience=1, verbose=1, min_delta=0, baseline=None, save_best_model=True) callbacks_3 = paddle.callbacks.EarlyStopping('acc_', mode='max', patience=1, verbose=1, min_delta=0, baseline=0, save_best_model=True) model.fit( train_dataset, val_dataset, batch_size=64, save_freq=10, save_dir=self.save_dir, epochs=10, verbose=0, callbacks=[callbacks_0, callbacks_1, callbacks_2, callbacks_3]) # Test for no val_loader model.fit(train_dataset, batch_size=64, save_freq=10, save_dir=self.save_dir, epochs=10, verbose=0, callbacks=[callbacks_0])