Exemple #1
0
    def test_optimizer_saver(self,
                             optimizer_config=None,
                             save_to_json_file=False):

        model = Model().to(self.device)
        optimizer = optim.Adam(model.parameters(), lr=0.01)
        criterion = torch.nn.CrossEntropyLoss()

        train_loader = DataLoader(self.train_dataset,
                                  batch_size=64,
                                  num_workers=4)

        for batch_idx, (image, target) in enumerate(tqdm(train_loader)):
            image, target = image.to(self.device), target.to(self.device)
            optimizer.zero_grad()
            output = model(image)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

        if optimizer_config is None:
            self.optimizer_config = OptimizerConfig(
                **self.storage_config,
                optimizer_name="testoptimizer",
                additional={"version": "1.0.1"})
        else:
            self.optimizer_config = optimizer_config

        if save_to_json_file:
            self.optimizer_config_file = "optimizer_config_file.json"
            self.optimizer_config.to_json_file(self.optimizer_config_file)

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)
        self.optimizer_manager.save(optimizer)
    def test_optimizer_saver(self,
                             optimizer_config=None,
                             save_to_json_file=False):

        (train_images, train_labels), _ = tf.keras.datasets.mnist.load_data()
        train_labels = train_labels[:1000]
        train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0

        self.model = self.create_model()
        self.model.fit(train_images, train_labels, epochs=1)

        if optimizer_config is None:
            self.optimizer_config = OptimizerConfig(
                **self.storage_config,
                optimizer_name="test_optimizer_saver",
                additional={"framework": "tensorflow"})
        else:
            self.optimizer_config = optimizer_config

        if save_to_json_file:
            self.optimizer_config_file = "model_config_file.json"
            self.optimizer_config.to_json_file(self.optimizer_config_file)

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)

        self.optimizer_manager.save(self.model.optimizer)
Exemple #3
0
    def test_optimizer_scheduler_saver(self):
        from torch.optim.lr_scheduler import StepLR

        model = Model().to(self.device)
        optimizer = optim.Adam(model.parameters(), lr=1.0)
        criterion = torch.nn.CrossEntropyLoss()
        scheduler = StepLR(optimizer, step_size=134, gamma=0.99)

        train_loader = DataLoader(self.train_dataset,
                                  batch_size=64,
                                  num_workers=4)

        for batch_idx, (image, target) in enumerate(tqdm(train_loader)):
            image, target = image.to(self.device), target.to(self.device)
            optimizer.zero_grad()
            output = model(image)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            scheduler.step()

        self.optimizer_config = OptimizerConfig(
            **self.storage_config,
            optimizer_name="testoptimizerwithscheduler",
            additional={"version": "1.0.1"})
        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)
        self.optimizer_manager.save(optimizer, scheduler)

        return scheduler
Exemple #4
0
    def test_optimizer_saver_from_json_file(self):

        self.test_optimizer_saver(save_to_json_file=True)

        self.optimizer_config = None
        self.optimizer_manager = None

        self.optimizer_config = OptimizerConfig.from_json_file(
            self.optimizer_config_file)

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)

        model = Model().to(self.device)
        optimizer = optim.Adam(model.parameters(), lr=0.01)
        criterion = torch.nn.CrossEntropyLoss()

        train_loader = DataLoader(self.train_dataset,
                                  batch_size=64,
                                  num_workers=4)

        for batch_idx, (image, target) in enumerate(tqdm(train_loader)):
            image, target = image.to(self.device), target.to(self.device)
            optimizer.zero_grad()
            output = model(image)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)
        self.optimizer_manager.save(optimizer)
    def test_optimizer_saver_nas(self):

        (train_images, train_labels), _ = tf.keras.datasets.mnist.load_data()
        train_labels = train_labels[:1000]
        train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0

        self.model = self.create_model()
        self.model.fit(train_images, train_labels, epochs=1)

        self.optimizer_config = OptimizerConfig(
            **self.nas_config,
            optimizer_name="test_optimizer_saver_nas",
            additional={"framework": "tensorflow"})

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)

        self.optimizer_manager.save(self.model.optimizer)
    def test_tfmodel_saver_from_json_file(self):

        self.test_optimizer_saver(save_to_json_file=True)

        self.optimizer_config = None
        self.optimizer_manager = None

        self.optimizer_config = OptimizerConfig.from_json_file(
            self.optimizer_config_file)

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)

        self.optimizer_manager.save(self.model.optimizer)
class TFOptimizerTest(OptimizerTest, unittest.TestCase):
    def create_model(self):
        model = tf.keras.models.Sequential([
            keras.layers.Dense(512, activation="relu", input_shape=(784, )),
            keras.layers.Dropout(0.2),
            keras.layers.Dense(10),
        ])
        model.compile(
            optimizer="adam",
            loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
            metrics=["accuracy"],
        )
        return model

    def test_optimizer_saver(self,
                             optimizer_config=None,
                             save_to_json_file=False):

        (train_images, train_labels), _ = tf.keras.datasets.mnist.load_data()
        train_labels = train_labels[:1000]
        train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0

        self.model = self.create_model()
        self.model.fit(train_images, train_labels, epochs=1)

        if optimizer_config is None:
            self.optimizer_config = OptimizerConfig(
                **self.storage_config,
                optimizer_name="test_optimizer_saver",
                additional={"framework": "tensorflow"})
        else:
            self.optimizer_config = optimizer_config

        if save_to_json_file:
            self.optimizer_config_file = "model_config_file.json"
            self.optimizer_config.to_json_file(self.optimizer_config_file)

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)

        self.optimizer_manager.save(self.model.optimizer)

    def test_tfmodel_saver_from_json_file(self):

        self.test_optimizer_saver(save_to_json_file=True)

        self.optimizer_config = None
        self.optimizer_manager = None

        self.optimizer_config = OptimizerConfig.from_json_file(
            self.optimizer_config_file)

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)

        self.optimizer_manager.save(self.model.optimizer)

    def test_optimizer_loader(self):

        (train_images, train_labels), _ = tf.keras.datasets.mnist.load_data()
        train_labels = train_labels[:1000]
        train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0

        self.model = self.create_model()
        self.model.fit(train_images, train_labels, epochs=1)
        before_optim_weight = copy.deepcopy(self.model.optimizer.get_weights())

        self.test_optimizer_saver()

        self.optimizer_manager.load(self.model.optimizer, step=32)
        after_optim_weight = self.model.optimizer.get_weights()

        assert len(before_optim_weight) == len(after_optim_weight)
        for i in range(1, len(after_optim_weight)):
            assert not np.array_equal(after_optim_weight, before_optim_weight)

    def test_optimizer_saver_nas(self):

        (train_images, train_labels), _ = tf.keras.datasets.mnist.load_data()
        train_labels = train_labels[:1000]
        train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0

        self.model = self.create_model()
        self.model.fit(train_images, train_labels, epochs=1)

        self.optimizer_config = OptimizerConfig(
            **self.nas_config,
            optimizer_name="test_optimizer_saver_nas",
            additional={"framework": "tensorflow"})

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)

        self.optimizer_manager.save(self.model.optimizer)

    def test_optimizer_loader_nas(self):

        (train_images, train_labels), _ = tf.keras.datasets.mnist.load_data()
        train_labels = train_labels[:1000]
        train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0

        self.model = self.create_model()
        self.model.fit(train_images, train_labels, epochs=1)
        before_optim_weight = copy.deepcopy(self.model.optimizer.get_weights())

        self.test_optimizer_saver_nas()

        self.optimizer_manager.load(self.model.optimizer, step=32)
        after_optim_weight = self.model.optimizer.get_weights()

        assert len(before_optim_weight) == len(after_optim_weight)
        for i in range(1, len(after_optim_weight)):
            assert not np.array_equal(after_optim_weight, before_optim_weight)
Exemple #8
0
class TorchOptimizerTest(OptimizerTest, unittest.TestCase):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])

    train_dataset = datasets.MNIST("/tmp/data",
                                   train=True,
                                   download=True,
                                   transform=transform)

    def test_optimizer_saver(self,
                             optimizer_config=None,
                             save_to_json_file=False):

        model = Model().to(self.device)
        optimizer = optim.Adam(model.parameters(), lr=0.01)
        criterion = torch.nn.CrossEntropyLoss()

        train_loader = DataLoader(self.train_dataset,
                                  batch_size=64,
                                  num_workers=4)

        for batch_idx, (image, target) in enumerate(tqdm(train_loader)):
            image, target = image.to(self.device), target.to(self.device)
            optimizer.zero_grad()
            output = model(image)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

        if optimizer_config is None:
            self.optimizer_config = OptimizerConfig(
                **self.storage_config,
                optimizer_name="testoptimizer",
                additional={"version": "1.0.1"})
        else:
            self.optimizer_config = optimizer_config

        if save_to_json_file:
            self.optimizer_config_file = "optimizer_config_file.json"
            self.optimizer_config.to_json_file(self.optimizer_config_file)

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)
        self.optimizer_manager.save(optimizer)

    def test_optimizer_saver_from_json_file(self):

        self.test_optimizer_saver(save_to_json_file=True)

        self.optimizer_config = None
        self.optimizer_manager = None

        self.optimizer_config = OptimizerConfig.from_json_file(
            self.optimizer_config_file)

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)

        model = Model().to(self.device)
        optimizer = optim.Adam(model.parameters(), lr=0.01)
        criterion = torch.nn.CrossEntropyLoss()

        train_loader = DataLoader(self.train_dataset,
                                  batch_size=64,
                                  num_workers=4)

        for batch_idx, (image, target) in enumerate(tqdm(train_loader)):
            image, target = image.to(self.device), target.to(self.device)
            optimizer.zero_grad()
            output = model(image)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)
        self.optimizer_manager.save(optimizer)

    def test_optimizer_loader(self):

        self.test_optimizer_saver()

        model = Model().to(self.device)
        optimizer = optim.Adam(model.parameters(), lr=0.01)

        self.optimizer_manager.load(optimizer, step=938)

    def test_optimizer_scheduler_saver(self):
        from torch.optim.lr_scheduler import StepLR

        model = Model().to(self.device)
        optimizer = optim.Adam(model.parameters(), lr=1.0)
        criterion = torch.nn.CrossEntropyLoss()
        scheduler = StepLR(optimizer, step_size=134, gamma=0.99)

        train_loader = DataLoader(self.train_dataset,
                                  batch_size=64,
                                  num_workers=4)

        for batch_idx, (image, target) in enumerate(tqdm(train_loader)):
            image, target = image.to(self.device), target.to(self.device)
            optimizer.zero_grad()
            output = model(image)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            scheduler.step()

        self.optimizer_config = OptimizerConfig(
            **self.storage_config,
            optimizer_name="testoptimizerwithscheduler",
            additional={"version": "1.0.1"})
        self.optimizer_manager = OptimizerManager(config=self.optimizer_config)
        self.optimizer_manager.save(optimizer, scheduler)

        return scheduler

    def test_optimizer_scheduler_loader(self):
        from torch.optim.lr_scheduler import StepLR

        _scheduler = self.test_optimizer_scheduler_saver()

        model = Model()
        optimizer = optim.Adam(model.parameters(), lr=0.01)
        scheduler = StepLR(optimizer, step_size=30, gamma=0.1)

        self.optimizer_manager.load_with_scheduler(optimizer,
                                                   scheduler,
                                                   step=938)
        self.assertEqual(_scheduler.state_dict(), scheduler.state_dict())