def main(): config = Configuration() config.print_detailed_config_used_for_training() dataset = FullDataset(config.training_data_folder, config, training=True) dataset.load() dataset = Representation.convert_dataset_to_baseline_representation( config, dataset) checker = ConfigChecker(config, dataset, 'snn', training=True) checker.pre_init_checks() snn = initialise_snn(config, dataset, True) snn.print_detailed_model_info() if config.print_model: tf.keras.utils.plot_model(snn.encoder.model, to_file='model.png', show_shapes=True, expand_nested=True) checker.post_init_checks(snn) start_time_string = datetime.now().strftime("%m-%d_%H-%M-%S") print('---------------------------------------------') print('Training:') print('---------------------------------------------') print() optimizer = SNNOptimizer(snn, dataset, config) optimizer.optimize() print() print('---------------------------------------------') print('Inference:') print('---------------------------------------------') print() change_model(config, start_time_string) if config.case_base_for_inference: dataset: FullDataset = FullDataset(config.case_base_folder, config, training=False) else: dataset: FullDataset = FullDataset(config.training_data_folder, config, training=False) dataset.load() dataset = Representation.convert_dataset_to_baseline_representation( config, dataset) snn = initialise_snn(config, dataset, False) inference = Inference(config, snn, dataset) inference.infer_test_dataset()
def main(): # suppress debugging messages of TensorFlow os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' config = Configuration() config.print_detailed_config_used_for_training() dataset = FullDataset(config.training_data_folder, config, training=True) dataset.load() checker = ConfigChecker(config, dataset, 'snn', training=True) checker.pre_init_checks() snn = initialise_snn(config, dataset, True) snn.print_detailed_model_info() checker.post_init_checks(snn) start_time_string = datetime.now().strftime("%m-%d_%H-%M-%S") print('---------------------------------------------') print('Training:') print('---------------------------------------------') print() optimizer = SNNOptimizer(snn, dataset, config) optimizer.optimize() print() print('---------------------------------------------') print('Inference:') print('---------------------------------------------') print() change_model(config, start_time_string) if config.case_base_for_inference: dataset: FullDataset = FullDataset(config.case_base_folder, config, training=False) else: dataset: FullDataset = FullDataset(config.training_data_folder, config, training=False) dataset.load() snn = initialise_snn(config, dataset, False) inference = Inference(config, snn, dataset) inference.infer_test_dataset()
def main(): config = Configuration() if config.case_base_for_inference: dataset: FullDataset = FullDataset(config.case_base_folder, config, training=False) else: dataset: FullDataset = FullDataset(config.training_data_folder, config, training=False) dataset.load() dataset = Representation.convert_dataset_to_baseline_representation( config, dataset) checker = ConfigChecker(config, dataset, 'snn', training=False) checker.pre_init_checks() architecture = initialise_snn(config, dataset, False) checker.post_init_checks(architecture) inference = Inference(config, architecture, dataset) if config.print_model: tf.keras.utils.plot_model(architecture.encoder.model, to_file='model.png', show_shapes=True, expand_nested=True) print('Ensure right model file is used:') print(config.directory_model_to_use, '\n') inference.infer_test_dataset()
def init_architecture(self, selection): if selection == 'snn': dataset: FullDataset = FullDataset( self.config.training_data_folder, self.config, training=False) dataset.load() self.architecture = initialise_snn(self.config, dataset, False) elif selection == 'cbs': self.architecture = CBS(self.config, False) else: raise ValueError('Unknown architecture variant')
def inference_during_training(self, epoch): if self.config.use_inference_test_during_training and epoch != 0: if epoch % self.config.inference_during_training_epoch_interval == 0: print("Inference at epoch: ", epoch) dataset2: FullDataset = FullDataset(self.config.training_data_folder, self.config, training=False) dataset2.load() self.config.directory_model_to_use = self.dir_name_last_model_saved print("self.dir_name_last_model_saved: ", self.dir_name_last_model_saved) print("self.config.filename_model_to_use: ", self.config.directory_model_to_use) architecture2 = initialise_snn(self.config, dataset2, False) inference = Inference(self.config, architecture2, dataset2) inference.infer_test_dataset()
def run(self): group_ds = self.dataset.create_group_dataset(self.group_id) self.model: SimpleSNN = initialise_snn(self.config, group_ds, self.training, True, self.group_id) if self.training: self.optimizer_helper = CBSOptimizerHelper(self.model, self.config, self.dataset, self.group_id) # Change the execution of the process depending on # whether the model is trained or applied # as additional variable so it can't be changed during execution is_training = self.training # Send message so that the initiator knows that the preparations are complete. self.output_queue.put(str(self.group_id) + ' init finished. ') while True: elem = self.input_queue.get(block=True) # Stop the process execution if a stop message was send via the queue if isinstance(elem, str) and elem == 'stop': break elem, gpu = elem with tf.device(gpu): if is_training: # Train method must be called by the process itself so that the advantage of parallel execution # of the training of the individual groups can be exploited. # Feedback contains loss and additional information using a single string feedback = self.train(elem) self.output_queue.put(feedback) else: # Reduce the input example to the features required for this group # and pass it to the model to calculate the similarities elem = self.dataset.get_masked_example_group( elem, self.group_id) output = self.model.get_sims(elem) self.output_queue.put(output)
def main(): config = Configuration() dataset = FullDataset(config.training_data_folder, config, training=True) dataset.load() dataset = Representation.convert_dataset_to_baseline_representation( config, dataset) checker = ConfigChecker(config, dataset, 'snn', training=True) checker.pre_init_checks() snn = initialise_snn(config, dataset, True) snn.print_detailed_model_info() checker.post_init_checks(snn) print('Training:') optimizer = SNNOptimizer(snn, dataset, config) optimizer.optimize()
def main(): # suppress debugging messages of TensorFlow os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' config = Configuration() dataset = FullDataset(config.training_data_folder, config, training=True) dataset.load() checker = ConfigChecker(config, dataset, 'snn', training=True) checker.pre_init_checks() snn = initialise_snn(config, dataset, True) snn.print_detailed_model_info() checker.post_init_checks(snn) print('Training:') optimizer = SNNOptimizer(snn, dataset, config) optimizer.optimize()
def main(): # suppress debugging messages of TensorFlow os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' config = Configuration() if config.case_base_for_inference: dataset: FullDataset = FullDataset(config.case_base_folder, config, training=False) else: dataset: FullDataset = FullDataset(config.training_data_folder, config, training=False) dataset.load() checker = ConfigChecker(config, dataset, 'snn', training=False) checker.pre_init_checks() architecture = initialise_snn(config, dataset, False) checker.post_init_checks(architecture) inference = Inference(config, architecture, dataset) if config.print_model: tf.keras.utils.plot_model(architecture.encoder.model, to_file='model.png', show_shapes=True, expand_nested=True) print('Ensure right model file is used:') print(config.directory_model_to_use, '\n') inference.infer_test_dataset()
def main(): config = Configuration() config.print_detailed_config_used_for_training() dataset = FullDataset(config.training_data_folder, config, training=True, model_selection=True) dataset.load() dataset = Representation.convert_dataset_to_baseline_representation(config, dataset) checker = ConfigChecker(config, dataset, 'snn', training=True) checker.pre_init_checks() snn = initialise_snn(config, dataset, True) snn.print_detailed_model_info() if config.print_model: tf.keras.utils.plot_model(snn.encoder.model, to_file='model.png', show_shapes=True, expand_nested=True) checker.post_init_checks(snn) start_time_string = datetime.now().strftime("%m-%d_%H-%M-%S") print('---------------------------------------------') print('Training:') print('---------------------------------------------') print() optimizer = SNNOptimizer(snn, dataset, config) optimizer.optimize() print() print('---------------------------------------------') print('Selecting (of the model for final evaluation):') print('---------------------------------------------') print() num_of_selection_tests = config.number_of_selection_tests config.use_masking_regularization = False score_valid_to_model_loss = {} for i in range(num_of_selection_tests): loss_of_selected_model = change_model(config, start_time_string, num_of_selction_iteration=i) if config.case_base_for_inference: dataset: FullDataset = FullDataset(config.case_base_folder, config, training=False, model_selection=True) else: dataset: FullDataset = FullDataset(config.training_data_folder, config, training=False, model_selection=True) dataset.load() dataset = Representation.convert_dataset_to_baseline_representation(config, dataset) snn = initialise_snn(config, dataset, False) inference = Inference(config, snn, dataset) curr_model_score = inference.infer_test_dataset() score_valid_to_model_loss[curr_model_score] = loss_of_selected_model print("score_valid_to_model_loss: ", score_valid_to_model_loss) print() print('---------------------------------------------') print('Inference:') print('---------------------------------------------') print() max_score = max(list(score_valid_to_model_loss.keys())) min_loss = score_valid_to_model_loss[max_score] print("Model with the following loss is selected for the final evaluation:", min_loss) change_model(config, start_time_string, get_model_by_loss_value=min_loss) if config.case_base_for_inference: dataset: FullDataset = FullDataset(config.case_base_folder, config, training=False) else: dataset: FullDataset = FullDataset(config.training_data_folder, config, training=False) dataset.load() dataset = Representation.convert_dataset_to_baseline_representation(config, dataset) snn = initialise_snn(config, dataset, False) inference = Inference(config, snn, dataset) inference.infer_test_dataset()
def main(): config = Configuration() # Define different version of original configuration #config_2 = copy(config) #config_2.hyper_file = config_2.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_2_a.json' # wie Standard, aber owl2vec als Graph Features added config_3 = copy(config) config_3.hyper_file = config_3.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_2_b.json' # wie Standard, aber Linear transformation an config_4 = copy(config) config_4.hyper_file = config_4.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_2_c.json' # wie Standard, aber nur Context Ausgabe #### ''' config_2 = copy(config) config_2.batch_distribution = { BatchSubsetType.DISTRIB_BASED_ON_DATASET: 0.75, BatchSubsetType.EQUAL_CLASS_DISTRIB: 0.25 } config_3 = copy(config) config_3.hyper_file = config_3.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_2.json' # Owl2vec after 2DCNN Removed, film on config_4 = copy(config) config_4.hyper_file = config_4.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_3.json' # Owl2vec after 2DCNN Removed, film off config_5 = copy(config) config_5.hyper_file = config_5.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_4.json' # wie Standard, aber Gradient Cap 1 config_6 = copy(config) config_6.hyper_file = config_6.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_5.json' # wie Standard, aber 256,128,64 config_7 = copy(config) config_7.hyper_file = config_7.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_6.json' # wie Standard, aber 512,256,128 config_8 = copy(config) config_8.hyper_file = config_8.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_7.json' # wie Standard, aber 128,64,32 config_9 = copy(config) config_9.hyper_file = config_9.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_8.json' # wie Standard, aber 256,128,128 config_10 = copy(config) config_10.hyper_file = config_10.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_9.json' # wie Standard, aber 128,128,128, FC 386-256, CNN2d 128,64,3 config_11 = copy(config) config_11.hyper_file = config_11.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_10.json' # wie Standard, aber 128,64,64, FC 386-256, CNN2d 128,64,3 config_12 = copy(config) config_12.hyper_file = config_12.hyper_file_folder + 'cnn2d_withAddInput_Graph_o1_GlobAtt_o2_2_HO_11.json' # wie Standard, aber 256,128,128 nur mit allem aktiviert ''' ''' config_3 = copy(config) config_3.hyper_file = config_3.hyper_file_folder + 'cnn2d_with_graph_test_Readout_WOowl2vec.json' config_4 = copy(config) config_4.hyper_file = config_4.hyper_file_folder + 'cnn2d_with_graph_test_Readout_lrSmaller.json' config_5 = copy(config) config_5.hyper_file = config_5.hyper_file_folder + 'cnn2d_with_graph_test_Readout_WOAttributeWise.json' ''' #list_of_configs = [config_3, config_4, config_5, config_6, config_7, config_8, config_9,config_10, config_11] list_of_configs = [config, config_3, config_4] #list_of_configs = [config, config_2, config_3,config_4,config_5, config_6,config_7,config_8,config_9,config_10,config_11] for i, config in enumerate(list_of_configs): print("Run number of config:", i) config.print_detailed_config_used_for_training() dataset = FullDataset(config.training_data_folder, config, training=True, model_selection=True) dataset.load() dataset = Representation.convert_dataset_to_baseline_representation( config, dataset) checker = ConfigChecker(config, dataset, 'snn', training=True) checker.pre_init_checks() snn = initialise_snn(config, dataset, True) snn.print_detailed_model_info() if config.print_model: tf.keras.utils.plot_model(snn.encoder.model, to_file='model.png', show_shapes=True, expand_nested=True) checker.post_init_checks(snn) start_time_string = datetime.now().strftime("%m-%d_%H-%M-%S") print('---------------------------------------------') print('Training:') print('---------------------------------------------') print() optimizer = SNNOptimizer(snn, dataset, config) optimizer.optimize() print() print('---------------------------------------------') print('Evaluation of the current config:') print('---------------------------------------------') print() num_of_selection_tests = config.number_of_selection_tests score_valid_to_model_loss = {} for i in range(num_of_selection_tests): loss_of_selected_model = change_model(config, start_time_string, num_of_selction_iteration=i) if config.case_base_for_inference: dataset: FullDataset = FullDataset(config.case_base_folder, config, training=False, model_selection=True) else: dataset: FullDataset = FullDataset(config.training_data_folder, config, training=False, model_selection=True) dataset.load() dataset = Representation.convert_dataset_to_baseline_representation( config, dataset) snn = initialise_snn(config, dataset, False) inference = Inference(config, snn, dataset) curr_model_score = inference.infer_test_dataset() score_valid_to_model_loss[ curr_model_score] = loss_of_selected_model # loop to sum all values to compute the mean: res = 0 for val in score_valid_to_model_loss.values(): res += val loss_mean = res / len(score_valid_to_model_loss) for val in score_valid_to_model_loss.keys(): res += val mean_score = res / len(score_valid_to_model_loss) # printing result print("Run: ", i, " loss mean:" + str(loss_mean), " score mean: " + str(mean_score)) print("Run: ", i, " score_valid_to_model_loss:", score_valid_to_model_loss) '''