def _run(self,
             testproblem=None,
             initializations=None,
             hyperparams=None,
             batch_size=None,
             num_epochs=None,
             random_seed=None,
             data_dir=None,
             output_dir=None,
             weight_decay=None,
             no_logs=None,
             train_log_interval=None,
             print_train_iter=None,
             tb_log=None,
             tb_log_dir=None,
             **training_params):

        # Creates a backup copy of the initial parameters. Users might change the dicts during training.
        hyperparams_before_training = deepcopy(hyperparams)
        training_params_before_training = deepcopy(training_params)

        batch_size = self._use_default_batch_size_if_missing(
            testproblem, batch_size)
        num_epochs = self._use_default_num_epochs_if_missing(
            testproblem, num_epochs)

        if data_dir is not None:
            global_config.set_data_dir(data_dir)

        run_directory, file_name = self.generate_output_directory_name(
            testproblem, initializations, batch_size, num_epochs, weight_decay,
            random_seed, output_dir, hyperparams, **training_params)

        if tb_log:
            if tb_log_dir == 'none':
                print(
                    'Tensorboard logging: No tb_log_dir specified, using settings folder {0:s} as default.'
                    .format(run_directory))
                os.makedirs(run_directory, exist_ok=True)
                tb_log_dir = run_directory

        tproblem = self.create_testproblem(testproblem, initializations,
                                           batch_size, weight_decay,
                                           random_seed)

        output = self.training(tproblem, hyperparams, num_epochs,
                               print_train_iter, train_log_interval, tb_log,
                               tb_log_dir, **training_params)

        output = self._post_process_output(output, testproblem,
                                           initializations, batch_size,
                                           num_epochs, random_seed,
                                           weight_decay,
                                           hyperparams_before_training,
                                           **training_params_before_training)
        if not no_logs:
            os.makedirs(run_directory, exist_ok=True)
            self.write_output(output, run_directory, file_name)
        return output
Пример #2
0
"""
Reduced and unreduced forward pass using only one forward throught the model.
"""

from test_forward import set_up_problem

from backobs.integration import integrate_individual_loss
from backpack import backpack, extensions
from deepobs.config import set_data_dir
from deepobs.pytorch.testproblems import fmnist_2c2d, mnist_logreg, quadratic_deep

if __name__ == "__main__":
    use_backpack = False
    set_data_dir("~/tmp/data_deepobs")

    batch_size = 20

    tp_classes = [
        mnist_logreg,
        fmnist_2c2d,
        quadratic_deep,
    ]

    for tp_cls in tp_classes:
        for use_backpack in [
                False,
                True,
        ]:

            losses = []
            accuracies = []
Пример #3
0
def fix_deepobs_data_dir():
    """Fix DeepOBS' data directory to one path avoid multiple dataset copies."""
    DIR = "~/tmp/data_deepobs"
    set_data_dir(os.path.expanduser(DIR))