def init_dirs(base_dir, is_main=True, gate=""): if is_main: base_dir = create_directory_timestamp(base_dir, gate) else: base_dir = os.path.join(base_dir, gate) create_directory(base_dir) return base_dir
def init_dirs(dimension, base_dir, is_main): results_folder_name = "vc_dimension_" + str(dimension) if is_main: base_dir = create_directory_timestamp(base_dir, results_folder_name) create_directory(base_dir) else: base_dir = os.path.join(base_dir, results_folder_name) create_directory(base_dir) return base_dir
def init_dirs(base_dir, is_main=True): name = 'validation' base_dir = os.path.join(base_dir, 'validation') if is_main: base_dir = create_directory_timestamp(base_dir, name) else: base_dir = os.path.join(base_dir, name) create_directory(base_dir) return base_dir
def init_dirs(gate_name, base_dir, is_main): if is_main: base_dir = create_directory_timestamp(base_dir, gate_name) reproducibility_dir = os.path.join(base_dir, "reproducibility") create_directory(reproducibility_dir) else: base_dir = os.path.join(base_dir, gate_name) reproducibility_dir = os.path.join(base_dir, "reproducibility") create_directory(reproducibility_dir) return base_dir, reproducibility_dir
def init_dirs(gap, base_dir, is_main=False, save_data=False): main_dir = "ring_classification_gap_" + gap reproducibility_dir = "reproducibility" results_dir = "results" if is_main: base_dir = create_directory_timestamp(base_dir, main_dir) if save_data: reproducibility_dir = os.path.join(base_dir, reproducibility_dir) else: reproducibility_dir = os.path.join(base_dir, reproducibility_dir, "tmp") create_directory(reproducibility_dir) results_dir = os.path.join(base_dir, results_dir) create_directory(results_dir) return results_dir, reproducibility_dir
def init_dirs(gap, base_dir, is_main=True): main_dir = f"searcher_{gap}gap" search_stats_dir = "search_stats" results_dir = "results" reproducibility_dir = "reproducibility" if is_main: base_dir = create_directory_timestamp(base_dir, main_dir) else: base_dir = os.path.join(base_dir, main_dir) create_directory(base_dir) search_stats_dir = os.path.join(base_dir, search_stats_dir) results_dir = os.path.join(base_dir, results_dir) reproducibility_dir = os.path.join(base_dir, reproducibility_dir) create_directory(search_stats_dir) create_directory(results_dir) create_directory(reproducibility_dir) return base_dir, search_stats_dir, results_dir, reproducibility_dir
def train_surrogate_model(configs, model, criterion, optimizer, logger=None, main_folder='training_data'): results_dir = create_directory_timestamp(configs['results_base_dir'], main_folder) if 'seed' in configs: seed = configs['seed'] else: seed = None seed = TorchUtils.init_seed(seed, deterministic=True) configs['seed'] = seed # Get training and validation data # INPUTS, TARGETS, INPUTS_VAL, TARGETS_VAL, INFO = get_training_data(configs) dataloaders, amplification, info_dict = load_data(configs) model, performances = train(model, (dataloaders[0], dataloaders[1]), criterion, optimizer, configs['hyperparameters'], logger=logger, save_dir=results_dir) # model_generator = get_algorithm(configs, is_main=True) # data = model_generator.optimize(INPUTS, TARGETS, validation_data=(INPUTS_VAL, TARGETS_VAL), data_info=INFO) labels = ['TRAINING', 'VALIDATION', 'TEST'] for i in range(len(dataloaders)): if dataloaders[i] is not None: postprocess(dataloaders[i], model, amplification, results_dir, label=labels[i]) # train_targets = amplification * TorchUtils.get_numpy_from_tensor(TARGETS[data.results['target_indices']][:len(INPUTS_VAL)]) # train_output = amplification * data.results['best_output_training'] # plot_all(train_targets, train_output, results_dir, name='TRAINING') # val_targets = amplification * TorchUtils.get_numpy_from_tensor(TARGETS_VAL) # val_output = amplification * data.results['best_output'] # plot_all(val_targets, val_output, results_dir, name='VALIDATION') training_profile = [ TorchUtils.get_numpy_from_tensor( performances['performance_history'][i]) * (amplification**2) for i in range(len(performances['performance_history'])) ] plt.figure() for i in range(len(training_profile)): plt.plot(training_profile[i]) plt.title(f'Training profile') plt.legend(['training', 'validation']) plt.savefig(os.path.join(results_dir, 'training_profile')) # Save the model according to the SMG standard state_dict = model.state_dict() state_dict['info'] = {} state_dict['info']['data_info'] = info_dict state_dict['info']['smg_configs'] = configs torch.save(state_dict, os.path.join(results_dir, "model.pt")) # model_generator.path_to_model = os.path.join(model_generator.base_dir, 'reproducibility', 'model.pt') print('Model saved in :' + results_dir)
def capacity_test( configs, custom_model, criterion, algorithm, data_transforms=None, waveform_transforms=None, logger=None, ): print( "*****************************************************************************************" ) print( f"CAPACITY TEST FROM VCDIM {configs['from_dimension']} TO VCDIM {configs['to_dimension']} " ) print( "*****************************************************************************************" ) base_dir = create_directory_timestamp(configs["results_base_dir"], "capacity_test") # save(mode='configs', file_path=self.configs_dir, data=configs) summary_results = { "capacity_per_N": [], "accuracy_distrib_per_N": [], "performance_distrib_per_N": [], "correlation_distrib_per_N": [], } for i in range(configs["from_dimension"], configs["to_dimension"] + 1): # capacity, accuracy_array, performance_array, correlation_array = vc_dimension_test(self.current_dimension, validate=validate) configs["results_base_dir"] = base_dir configs["current_dimension"] = i results = vc_dimension_test( configs, custom_model, criterion, algorithm, data_transforms=data_transforms, waveform_transforms=waveform_transforms, logger=logger, is_main=False, ) summary_results["capacity_per_N"].append( TorchUtils.get_numpy_from_tensor(results["capacity"])) summary_results["accuracy_distrib_per_N"].append( TorchUtils.get_numpy_from_tensor(results["accuracies"])) summary_results["performance_distrib_per_N"].append( TorchUtils.get_numpy_from_tensor(results["performances"][:, -1])) summary_results["correlation_distrib_per_N"].append( TorchUtils.get_numpy_from_tensor(results["correlations"])) del results # self.vcdimension_test.close_results_file() # self.plot_summary() # dict_loc = os.path.join(self.configs['vc_dimension_test']['results_base_dir'], 'summary_results.pkl') with open(os.path.join(base_dir, "summary_results.pickle"), "wb") as fp: pickle.dump(summary_results, fp, protocol=pickle.HIGHEST_PROTOCOL) # torch.save(summary_results, os.path.join(base_dir, 'summary_results.pickle')) plot_summary(summary_results, configs["from_dimension"], configs["to_dimension"], base_dir) print( "*****************************************************************************************" )
def generate_surrogate_model( configs, custom_model=NeuralNetworkModel, criterion=MSELoss(), custom_optimizer=Adam, main_folder="training_data", ): # Initialise seed and create data directories init_seed(configs) results_dir = create_directory_timestamp(configs["results_base_dir"], main_folder) # Get training, validation and test data # Get amplification of the device and the info dataloaders, amplification, info_dict = load_data(configs) # Initilialise model model = custom_model(configs["processor"]) model.info = info_dict model = TorchUtils.format_model(model) # Initialise optimiser optimizer = custom_optimizer( filter(lambda p: p.requires_grad, model.parameters()), lr=configs["hyperparameters"]["learning_rate"], ) # Whole training loop model, performances = train_loop( model, (dataloaders[0], dataloaders[1]), criterion, optimizer, configs["hyperparameters"]["epochs"], amplification, save_dir=results_dir, ) # Plot results labels = ["TRAINING", "VALIDATION", "TEST"] for i in range(len(dataloaders)): if dataloaders[i] is not None: postprocess( dataloaders[i], model, criterion, amplification, results_dir, label=labels[i], ) test_loss = None if dataloaders[2] is not None: test_loss = default_val_step(model, dataloaders[2], criterion, amplification) print("Test loss: " + str(test_loss)) performances = np.array(performances) plt.figure() plt.plot(performances[0]) if not performances[1] == []: plt.plot(performances[1]) if test_loss is None: plt.title("Training profile") else: plt.title("Training profile (Amplified)/n Amplified Test loss: %.8f" % test_loss) if not performances[1] == []: plt.legend(["training", "validation"]) plt.savefig(os.path.join(results_dir, "training_profile")) state_dict = model.state_dict() state_dict["info"] = model.info torch.save(state_dict, os.path.join(results_dir, "model.pt"))