def init_dirs(base_dir, is_main=True, gate=""): if is_main: base_dir = create_directory_timestamp(base_dir, gate) else: base_dir = os.path.join(base_dir, gate) create_directory(base_dir) return base_dir
def init_dirs(dimension, base_dir, is_main): results_folder_name = "vc_dimension_" + str(dimension) if is_main: base_dir = create_directory_timestamp(base_dir, results_folder_name) create_directory(base_dir) else: base_dir = os.path.join(base_dir, results_folder_name) create_directory(base_dir) return base_dir
def init_dirs(base_dir, is_main=True): name = 'validation' base_dir = os.path.join(base_dir, 'validation') if is_main: base_dir = create_directory_timestamp(base_dir, name) else: base_dir = os.path.join(base_dir, name) create_directory(base_dir) return base_dir
def init_dirs(gate_name, base_dir, is_main): if is_main: base_dir = create_directory_timestamp(base_dir, gate_name) reproducibility_dir = os.path.join(base_dir, "reproducibility") create_directory(reproducibility_dir) else: base_dir = os.path.join(base_dir, gate_name) reproducibility_dir = os.path.join(base_dir, "reproducibility") create_directory(reproducibility_dir) return base_dir, reproducibility_dir
def init_dirs(gap, base_dir, is_main=False, save_data=False): main_dir = "ring_classification_gap_" + gap reproducibility_dir = "reproducibility" results_dir = "results" if is_main: base_dir = create_directory_timestamp(base_dir, main_dir) if save_data: reproducibility_dir = os.path.join(base_dir, reproducibility_dir) else: reproducibility_dir = os.path.join(base_dir, reproducibility_dir, "tmp") create_directory(reproducibility_dir) results_dir = os.path.join(base_dir, results_dir) create_directory(results_dir) return results_dir, reproducibility_dir
def init_dirs(gap, base_dir, is_main=True): main_dir = f"searcher_{gap}gap" search_stats_dir = "search_stats" results_dir = "results" reproducibility_dir = "reproducibility" if is_main: base_dir = create_directory_timestamp(base_dir, main_dir) else: base_dir = os.path.join(base_dir, main_dir) create_directory(base_dir) search_stats_dir = os.path.join(base_dir, search_stats_dir) results_dir = os.path.join(base_dir, results_dir) reproducibility_dir = os.path.join(base_dir, reproducibility_dir) create_directory(search_stats_dir) create_directory(results_dir) create_directory(reproducibility_dir) return base_dir, search_stats_dir, results_dir, reproducibility_dir
def validate_vcdim(vcdim_base_dir, validation_processor_configs, is_main=True): base_dir = init_dirs(vcdim_base_dir, is_main=is_main) dirs = [ os.path.join(vcdim_base_dir, o) for o in os.listdir(vcdim_base_dir) if os.path.isdir(os.path.join(vcdim_base_dir, o)) ] for d in dirs: if os.path.split(d)[1] != "validation": gate_dir = create_directory( os.path.join(base_dir, d.split(os.path.sep)[-1])) model = torch.load(os.path.join(d, 'reproducibility', 'model.pt'), map_location=torch.device( TorchUtils.get_accelerator_type())) results = torch.load( os.path.join(d, 'reproducibility', "results.pickle"), map_location=torch.device(TorchUtils.get_accelerator_type())) experiment_configs = load_configs( os.path.join(d, 'reproducibility', "configs.yaml")) #results_dir = init_dirs(d, is_main=is_main) criterion = manager.get_criterion(experiment_configs["algorithm"]) waveform_transforms = transforms.Compose([ PlateausToPoints( experiment_configs['processor']["data"]['waveform'] ), # Required to remove plateaus from training because the perceptron cannot accept less than 10 values for each gate PointsToPlateaus( validation_processor_configs["data"]["waveform"]) ]) # validate_gate(os.path.join(d, "reproducibility"), base_dir, is_main=False) validate_gate(model, results, validation_processor_configs, criterion, results_dir=gate_dir, transforms=waveform_transforms, is_main=False)
def get_error(model_data_path, test_data_path, steps=1, batch_size=2048): inputs, targets, info = load_data(test_data_path, steps) error = np.zeros_like(targets) prediction = np.zeros_like(targets) model = SurrogateModel({'torch_model_dict': model_data_path}) with torch.no_grad(): i_start = 0 i_end = batch_size threshold = (inputs.shape[0] - batch_size) while i_end <= inputs.shape[0]: prediction[i_start:i_end] = TorchUtils.get_numpy_from_tensor( model(TorchUtils.get_tensor_from_numpy(inputs[i_start:i_end]))) error[i_start: i_end] = prediction[i_start:i_end] - targets[i_start:i_end] i_start += batch_size i_end += batch_size if i_end > threshold and i_end < inputs.shape[0]: i_end = inputs.shape[0] main_path = os.path.dirname(os.path.dirname(model_data_path)) path = create_directory(os.path.join(main_path, 'test_model')) mse = plot_all(targets, prediction, path, name='TEST') return mse