Beispiel #1
0
def train_model():
    """
    Trains a CIANet model on the following params.
    """

    epochs = 100
    learning_rate = 0.001
    momentum = 0.9
    expand_by_k = 5
    iter_to_change = 30
    err_stop_threshold = 0.99
    # sizes = {"encoder": [28 * 28, 50, 50, 10],
    # sizes = {"encoder": [28 * 28, 20, 20, 15, 15, 10, 10, 10],
    #          "action": [10, 10]}
    sizes = {"classifier": [28 * 28, 312, 128, 10]}
    drift_thresholds = {
        "classifier": [0.45, 0.4, 10]
    }  # Drift threshold for split in DENz
    drift_deltas = {"classifier": [0, 0, 10]}

    trainer = DENTrainer(data_loaders, FF, sizes, learning_rate, momentum,
                         criterion, penalty, iter_to_change, device,
                         error_function, number_of_tasks, drift_thresholds,
                         err_stop_threshold, drift_deltas)

    results = trainer.train_all_tasks_sequentially(epochs, with_den=True)
    loss, err = trainer.test_model(list(range(number_of_tasks)), False)[0]

    print("Net has final shape:" + str(trainer.model.sizes))
    print("Done training with total net accuracy:" + str(err))
    print("Done training with results from error function:" + str(results))

    return trainer.model, results
Beispiel #2
0
def train_model():
    """
    Trains a CIANet model on the following params.
    """

    epochs = 1000
    learning_rate = 0.001
    momentum = 0.9
    iter_to_change = 50
    err_stop_threshold = 0.99
    # sizes = {"classifier": [3072, 120, 60, 10],
    sizes = {"encoder": [32 * 32 * 3, 100, 50, 100], "action": [100, 100]}
    # }
    trainer = DENTrainer(data_loaders, ActionEncoder, sizes, learning_rate,
                         momentum, criterion, penalty, iter_to_change, device,
                         error_function, number_of_tasks, drift_threshold,
                         err_stop_threshold)

    results = trainer.train_all_tasks_sequentially(epochs, with_den=True)
    loss, err = trainer.test_model(list(range(number_of_tasks)), False)[0]

    print("Net has final shape:" + str(trainer.model.sizes))
    print("Done training with total net accuracy:" + str(err))
    print("Done training with results from error function:" + str(results))

    return trainer.model, results
Beispiel #3
0
def train_model():
    """
    Trains a CIANet model on the following params.
    """

    epochs = 30
    learning_rate = 0.05
    momentum = 0.9
    expand_by_k = 5
    err_stop_threshold = 0.99
    sizes = {
        "classifier": [3072, 120, 60, 10],
        # sizes = {"encoder": [28 * 28, 20, 20, 15, 15, 10, 10, 10],
        #          "action": [10, 10]}
    }
    trainer = DENTrainer(data_loaders, FFConv, sizes, learning_rate, momentum,
                         criterion, penalty, expand_by_k, device,
                         error_function, number_of_tasks, drift_threshold,
                         err_stop_threshold)

    results = trainer.train_all_tasks_sequentially(epochs, with_den=True)
    loss, err = trainer.test_model(list(range(number_of_tasks)), False)[0]

    print("Net has final shape:" + str(trainer.model.sizes))
    print("Done training with total net accuracy:" + str(err))
    print("Done training with results from error function:" + str(results))

    return trainer.model, results
Beispiel #4
0
    def _setup(self, config):
        trainer_args = config.get("DENTrainerArgs")  # Type: list

        self.trainer = DENTrainer(*trainer_args)

        self.trainer.optimizer = torch.optim.SGD(
            self.trainer.model.parameters(),
            lr=config.get("lr", 0.01),
            momentum=config.get("momentum", 0.9))

        self.trainer.expand_by_k = config.get("expand_by_k", 10)
        self.trainer.drift_threshold = config.get("drift_threshold", 0.02)

        if hasattr(self.trainer.penalty, "l1_coeff"):
            self.trainer.penalty.l1_coeff = config.get("l1_coeff", 0)

        if hasattr(self.trainer.penalty, "l2_coeff"):
            self.trainer.penalty.l2_coeff = config.get("l2_coeff", 0)
Beispiel #5
0
def train_model():
    """
    Trains a CIANet model on the following params.
    """
    epochs = 5
    learning_rate = 0.002
    momentum = 0
    expand_by_k = 10
    sizes = {
        "encoder": [img_size[0] * img_size[1] * 3, 1000, 522],
        "action": [522, 80, 2]
    }

    trainer = DENTrainer(data_loaders, sizes, learning_rate, momentum,
                         criterion, penalty, expand_by_k, device,
                         error_function, number_of_tasks, drift_threshold)

    print(trainer.model.sizes)
    results = trainer.train_all_tasks_sequentially(epochs, with_den=True)

    print("Done training with results from error function:" + str(results))

    trainer.save_model("banana_car.pt")
    return trainer.model, results
Beispiel #6
0
class PytorchTrainable(tune.Trainable):
    """
    Train a Pytorch net with Trainable and PopulationBasedTraining scheduler.
    Finds hypers for one task.
    """
    def _setup(self, config):
        trainer_args = config.get("DENTrainerArgs")  # Type: list

        self.trainer = DENTrainer(*trainer_args)

        self.trainer.optimizer = torch.optim.SGD(
            self.trainer.model.parameters(),
            lr=config.get("lr", 0.01),
            momentum=config.get("momentum", 0.9))

        self.trainer.expand_by_k = config.get("expand_by_k", 10)
        self.trainer.drift_threshold = config.get("drift_threshold", 0.02)

        if hasattr(self.trainer.penalty, "l1_coeff"):
            self.trainer.penalty.l1_coeff = config.get("l1_coeff", 0)

        if hasattr(self.trainer.penalty, "l2_coeff"):
            self.trainer.penalty.l2_coeff = config.get("l2_coeff", 0)

    def _train(self):
        # Do one epoch for all tasks
        for i in range(self.trainer.number_of_tasks):
            self.trainer.train_tasks([i], 5, (not i == 0))

        loss_err_list = self.trainer.test_model(
            tasks=list(range(self.trainer.number_of_tasks)))

        mean_err = 0
        mean_loss = 0

        for loss, err in loss_err_list:
            mean_err += err
            mean_loss += loss

        mean_loss /= len(loss_err_list)
        mean_err /= len(loss_err_list)

        return {"mean_accuracy": mean_err, "mean_loss": mean_loss}

    def _save(self, checkpoint_dir):
        return self.trainer.save_model("model.pth", checkpoint_dir)

    def _restore(self, checkpoint_path):
        self.trainer.load_model(checkpoint_path)

    def _export_model(self, export_formats, export_dir):
        if export_formats == [ExportFormat.MODEL]:
            path = os.path.join(export_dir, "exported_actionnet.pt")
            torch.save(self.trainer.model.state_dict(), path)
            return {export_formats[0]: path}

        raise ValueError("unexpected formats: " + str(export_formats))

    def reset_config(self, new_config):
        for param_group in self.trainer.optimizer.param_groups:
            if "lr" in new_config:
                param_group["lr"] = new_config["lr"]
            if "momentum" in new_config:
                param_group["momentum"] = new_config["momentum"]

        self.config = new_config
        return True