def initialize_network(self): self.local_model = load_model( self.model_path, custom_objects={ "annealed_loss": lc.loss_selector("annealed_loss") }, )
def __load_local_model(self, path: str): self.model = load_model( path, custom_objects={ "annealed_loss": lc.loss_selector("annealed_loss") }, )
def __init__(self, inferrence_json_path, generator_obj): self.inferrence_json_path = inferrence_json_path self.generator_obj = generator_obj local_json_loader = JsonLoader(inferrence_json_path) local_json_loader.load_json() self.json_data = local_json_loader.json_data self.output_file = self.json_data["output_file"] self.model_path = self.json_data["model_path"] if "save_raw" in self.json_data.keys(): self.save_raw = self.json_data["save_raw"] else: self.save_raw = False if "rescale" in self.json_data.keys(): self.rescale = self.json_data["rescale"] else: self.rescale = True self.batch_size = self.generator_obj.batch_size self.nb_datasets = len(self.generator_obj) self.indiv_shape = self.generator_obj.get_output_size() self.model = load_model( self.model_path, custom_objects={"annealed_loss": lc.loss_selector("annealed_loss")}, )
def initialize_loss(self): self.loss = lc.loss_selector(self.loss_type) # For transfer learning, knowing the baseline validation loss is important baseline_val_loss = self.local_model.evaluate(self.local_test_generator) # save init losses save_loss_path = os.path.join( self.output_dir, self.run_uid + "_" + self.model_string + "init_val_loss.npy" ) np.save(save_loss_path, baseline_val_loss)
def initialize_loss(self): self.loss = lc.loss_selector(self.loss_type)