Exemplo n.º 1
0
 def init_dirs(self, gate):
     if self.is_main:
         base_dir = create_directory_timestamp(self.base_dir, gate)
     else:
         base_dir = os.path.join(self.base_dir, gate)
         create_directory(base_dir)
     return base_dir
Exemplo n.º 2
0
 def save_results(self):
     save_directory = create_directory_timestamp(self.save_path,
                                                 self.save_dir)
     save(mode='pickle',
          configs=self.config_dict,
          path=save_directory,
          filename='result',
          dictionary=self.results.results)
Exemplo n.º 3
0
def train_with_loader(network,
                      training_data,
                      config_dict,
                      validation_data=None,
                      loss_fn=torch.nn.MSELoss()):
    print('------- TRAINING WITH LOADER ---------')
    # set configurations
    # optimizer = set_optimizer(network, config_dict)
    optimizer = torch.optim.Adam(network.params_groups(config_dict),
                                 betas=config_dict['betas'])
    config_dict["save_dir"] = create_directory_timestamp(
        config_dict["save_dir"], config_dict["name"])
    save_configs(
        config_dict,
        os.path.join(config_dict["save_dir"], "training_configs.json"))
    # Define variables
    regularization = torch.tensor(config_dict['cv_penalty'],
                                  device=TorchUtils.get_accelerator_type())
    costs = np.zeros((config_dict["nr_epochs"],
                      2))  # training and validation costs per epoch
    accuracy_over_epochs = np.zeros((config_dict["nr_epochs"], 2))
    # grads_epochs = np.zeros((config_dict["nr_epochs"], config_dict["nr_nodes"]))
    looper = trange(config_dict["nr_epochs"], desc='Initialising')
    for epoch in looper:

        costs[epoch, 0], accuracy_over_epochs[epoch, 0] = batch_training(
            training_data, network, optimizer, loss_fn, regularization)
        # grads_epochs[epoch] = np.asarray([np.abs(p.grad.detach().cpu().numpy()).max()
        #                                   for p in network.dnpu_layer.parameters() if p.requires_grad])
        # Evaluate Validation error
        if validation_data is not None:
            costs[epoch, 1], accuracy_over_epochs[epoch, 1] = evaluate(
                network, loss_fn, validation_data)
        else:
            costs[epoch, 1], accuracy_over_epochs[epoch, 1] = np.nan, np.nan

        if 'save_interval' in config_dict.keys(
        ) and epoch % config_dict["save_interval"] == 10:
            save_model(network, config_dict["save_dir"], f'checkpoint_{epoch}')
        if np.isnan(costs[epoch, 0]):
            costs[epoch:, 0] = np.nan
            costs[epoch:, 1] = np.nan
            print('--------- Training interrupted value was Nan!! ---------')
            break
        looper.set_description(
            f' Epoch: {epoch} | Training Error:{costs[epoch, 0]:7.3f} | Val. Error:{costs[epoch, 1]:7.3f}'
        )

    save_model(network, config_dict["save_dir"], 'final_model')
    save_parameters_as_numpy(network, config_dict["save_dir"])
    np.savez(os.path.join(config_dict["save_dir"], 'training_history'),
             costs=costs,
             accuracy=accuracy)
    print('------------DONE-------------')
    return costs, accuracy_over_epochs
Exemplo n.º 4
0
 def init_dirs(self, base_dir, is_main=False):
     main_dir = 'ring_classification'
     reproducibility_dir = 'reproducibility'
     results_dir = 'results'
     if is_main:
         base_dir = create_directory_timestamp(base_dir, main_dir)
     self.reproducibility_dir = os.path.join(base_dir, reproducibility_dir)
     create_directory(self.reproducibility_dir)
     self.configs['algorithm_configs']['results_base_dir'] = base_dir
     self.algorithm = get_algorithm(self.configs['algorithm_configs'])
     self.results_dir = os.path.join(base_dir, results_dir)
     create_directory(self.results_dir)
Exemplo n.º 5
0
    def init_dirs(self, gap):
        main_dir = f'searcher_{gap}mV'
        search_stats_dir = 'search_stats'

        if self.is_main:
            base_dir = create_directory_timestamp(self.base_dir, main_dir)
        else:
            base_dir = os.path.join(self.base_dir, main_dir)
            create_directory(base_dir)
        self.search_stats_dir = os.path.join(base_dir, search_stats_dir)
        create_directory(self.search_stats_dir)

        self.task.init_dirs(base_dir, is_main=False)
Exemplo n.º 6
0
 def init_dirs(self):
     self.main_dir = create_directory_timestamp(
         os.path.join(self.configs['results_base_dir'], 'validation'),
         'validation')
     self.debugger.init_dirs(self.main_dir)
     if self.processor.configs['debug'] and self.processor.configs[
             'architecture'] == 'device_architecture':
         self.processor.init_dirs(self.main_dir, is_main=False)
     if self.validation_processor.configs[
             'debug'] and self.validation_processor.configs[
                 'architecture'] == 'device_architecture':
         self.validation_processor.init_dirs(self.main_dir, is_main=False)
     self.debug_plots = os.path.join(self.main_dir, 'debug', 'results')
     create_directory(self.debug_plots)
Exemplo n.º 7
0
 def init_dirs(self, base_dir, is_main=False):
     if 'experiment_name' in self.configs:
         main_folder_name = self.configs['experiment_name']
     else:
         main_folder_name = 'genetic_algorithm_data'
     if self.is_main:
         base_dir = create_directory_timestamp(base_dir, main_folder_name)
     else:
         base_dir = os.path.join(base_dir, main_folder_name)
         create_directory(base_dir)
     self.default_output_dir = os.path.join(base_dir, 'reproducibility')
     create_directory(self.default_output_dir)
     if self.configs['checkpoints']['use_checkpoints']:
         self.default_checkpoints_dir = os.path.join(
             base_dir, 'checkpoints')
         create_directory(self.default_checkpoints_dir)
Exemplo n.º 8
0
    def init_dirs(self, vc_dimension):
        results_folder_name = f'vc_dimension_{vc_dimension}'

        if self.is_main:
            base_dir = create_directory_timestamp(self.base_dir,
                                                  results_folder_name)
            self.excel_file = ExcelFile(
                os.path.join(base_dir, 'capacity_test_results.xlsx'))
        else:
            if self.excel_file is None:
                self.excel_file = ExcelFile(
                    os.path.join(self.base_dir, 'capacity_test_results.xlsx'))
            base_dir = os.path.join(
                self.base_dir,
                results_folder_name)  # 'dimension_' + str(vc_dimension))
        self.boolean_gate_test_configs['results_base_dir'] = base_dir
        return base_dir
Exemplo n.º 9
0
 def init_dirs(self):
     main_dir = f'capacity'
     base_dir = create_directory_timestamp(self.base_dir, main_dir)
     return base_dir
Exemplo n.º 10
0
def trainer(data, network, config_dict, loss_fn=torch.nn.MSELoss()):

    # set configurations
    if "seed" in config_dict.keys():
        torch.manual_seed(config_dict['seed'])
        print('The torch RNG is seeded with ', config_dict['seed'])

    if "betas" in config_dict.keys():
        optimizer = torch.optim.Adam(network.parameters(),
                                     lr=config_dict['learning_rate'],
                                     betas=config_dict["betas"])
        print("Set betas to values: ", {config_dict["betas"]})
    else:
        optimizer = torch.optim.Adam(network.parameters(),
                                     lr=config_dict['learning_rate'])
    print('Prediction using ADAM optimizer')
    if 'results_path' in config_dict.keys():
        dir_path = create_directory_timestamp(config_dict['results_path'],
                                              config_dict['experiment_name'])
    else:
        dir_path = None

    # Define variables
    x_train, y_train = data[0]
    x_val, y_val = data[1]
    costs = np.zeros((config_dict['nr_epochs'],
                      2))  # training and validation costs per epoch

    for epoch in range(config_dict['nr_epochs']):

        network.train()
        permutation = torch.randperm(x_train.size()[0])  # Permute indices

        for mb in range(0, len(permutation), config_dict['batch_size']):

            # Get prediction
            indices = permutation[mb:mb + config_dict['batch_size']]
            x_mb = x_train[indices]
            y_pred = network(x_mb)
            # GD step
            if 'regularizer' in dir(network):
                loss = loss_fn(y_pred,
                               y_train[indices]) + network.regularizer()
            else:
                loss = loss_fn(y_pred, y_train[indices])

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        # Evaluate training error
        network.eval()
        samples = len(x_val)
        get_indices = torch.randperm(len(x_train))[:samples]
        x_sampled = x_train[get_indices]
        prediction = network(x_sampled)
        target = y_train[get_indices]
        costs[epoch, 0] = loss_fn(prediction, target).item()
        # Evaluate Validation error
        prediction = network(x_val)
        costs[epoch, 1] = loss_fn(prediction, y_val).item()

        if dir_path and (epoch + 1) % SGD_CONFIGS['save_interval'] == 0:
            save('torch',
                 config_dict,
                 dir_path,
                 f'checkpoint_epoch{epoch}.pt',
                 torch_model=network)

        if epoch % 10 == 0:
            print('Epoch:', epoch, 'Val. Error:', costs[epoch, 1],
                  'Training Error:', costs[epoch, 0])

    if dir_path:
        save('torch',
             config_dict,
             dir_path,
             'trained_network.pt',
             torch_model=network)
    return costs
Exemplo n.º 11
0
 def init_dirs(self, configs):
     base_dir = create_directory_timestamp(configs['results_base_dir'],
                                           'capacity_test')
     configs['vc_dimension_test']['results_base_dir'] = base_dir
     self.configs_dir = os.path.join(base_dir, 'capacity_configs.json')
     return configs