def multitraining(datasets, models_type, models_arch, models_latent_space, models_use_bn, lrs, epochs, batch_size, ckpt_paths, ckpt_epochs, filename, path): model_args = [ datasets, models_type, models_arch, models_latent_space, models_use_bn, lrs, epochs, batch_size, ckpt_paths, ckpt_epochs ] max_len = max(map(len, model_args)) print(max_len) #Adapt to get all parameters to same size for i, arg in enumerate(model_args): temp_arg = arg.copy() for j in range((max_len - 1) // len(arg)): arg.extend(temp_arg) model_args[i] = arg[:max_len] #Use this to get unique dataset used multiple times ds = {} for dataset in datasets: if dataset not in ds: train_test_ds, shape = datasetLoader.get_dataset(dataset) ds.update({dataset: [train_test_ds, shape]}) train_losses = [] test_losses = [] train_accs = [] test_accs = [] legendes = [] # Construct the model, for i in range(max_len): #Get name str_ds = model_args[0][i] str_model = model_args[1][i] str_arch = '_'.join(str(x) for x in model_args[2][i]) str_lat = 'lat' + str(model_args[3][i]) str_use_bn = 'BN' if model_args[4][i] else '' str_all = '_'.join( filter(None, [str_ds, str_model, str_arch, str_lat, str_use_bn])) #Construct the model dataset = ds[model_args[0][i]][0] shape = ds[model_args[0][i]][1] model_type = model_args[1][i] model_arch = model_args[2][i] model_lat = model_args[3][i] model_use_bn = model_args[4][i] lr = model_args[5][i] epoch = model_args[6][i] bs = model_args[7][i] ckpt_path = model_args[8][i] ckpt_epoch = model_args[9][i] print(model_lat) model = construct_model.get_model(model_type, model_arch, model_lat, shape, model_use_bn) #Train print(dataset) print(model) print(lr) print(epoch) print(bs) print(ckpt_path) print(ckpt_epoch) train_l, test_l, train_acc, test_acc = train(dataset, model, lr, epoch, bs, ckpt_path, ckpt_epoch, str_all, path) #Get curves and output them train_losses.append(train_l) train_accs.append(train_acc) test_losses.append(test_l) test_accs.append(test_acc) legendes.append(str_all) image_saver.curves(test_accs, legendes, filename, path)
def forward_epoch(self): print("forward epoch") t_loss_mean = tf.keras.metrics.Mean(name='t_loss') t_acc_mean = tf.keras.metrics.Mean(name='t_acc') v_loss_mean = tf.keras.metrics.Mean(name='v_loss') v_acc_mean = tf.keras.metrics.Mean(name='v_acc') starting_epoch = int(self.ckpt.epoch) print(starting_epoch) for epoch in range(starting_epoch, self.epoch_max + 1): len_train = self.train_size progbar = tf.keras.utils.Progbar(len_train) self.lr = self.lr_fn(self.lr, epoch) self.optimizer.lr = self.lr # One epoch on TRAIN dataset for i, train_x in enumerate(self.train_ds): progbar.update(i + 1) t_loss_mean( self.model.compute_apply_gradients(train_x, self.optimizer)) t_acc_mean(self.model.compute_accuracy(train_x)) # One epoch on VALIDATION dataset for i, val_x in enumerate(self.val_ds): v_loss_mean(self.model.compute_loss(val_x)) v_acc_mean(self.model.compute_accuracy(val_x)) self.t_loss.append(t_loss_mean.result().numpy()) self.t_acc.append(t_acc_mean.result().numpy()) self.v_loss.append(v_loss_mean.result().numpy()) self.v_acc.append(v_acc_mean.result().numpy()) # Create temp image of loss image_saver.curves([self.t_loss, self.v_loss], ['Training', 'Validation'], 'training_validation_loss', self.img_path, 'Steps', 'Loss') image_saver.curves([self.t_acc, self.v_acc], ['Training', 'Validation'], 'training_validation_accuracy', self.img_path, 'Steps', 'Loss') img_name = 'epoch_' + str(epoch) for val_x in self.val_ds.take(1): if self.is_seq: #image_saver.compare_images_seq(val_x, self.model.reconstruct(val_x), img_name, self.img_path) image_saver.generate_and_save_images_compare_seq( self.model, val_x, self.name + '_epoch_{:03d}_test'.format(epoch), self.img_path) image_saver.generate_gif_concat( self.model, val_x, self.name + '_epoch_{:03d}_test_gif'.format(epoch), self.img_path) else: image_saver.compare_images( val_x[0], self.model.reconstruct(val_x)[0], img_name, self.img_path) for train_x in self.train_ds.take(1): if self.is_seq: #image_saver.compare_images_seq(val_x, self.model.reconstruct(val_x), img_name, self.img_path) image_saver.generate_and_save_images_compare_seq( self.model, train_x, self.name + '_epoch_{:03d}_train'.format(epoch), self.img_path) else: image_saver.compare_images( train_x[0], self.model.reconstruct(train_x)[0], img_name, self.img_path) t_loss_mean.reset_states() t_acc_mean.reset_states() v_loss_mean.reset_states() v_acc_mean.reset_states() if epoch % self.save_steps == 0 or epoch == self.epoch_max: if self.redone: self.save_redone() else: self.save() self.ckpt.epoch.assign_add(1)
def multitraining(datasets, models_type, models_arch, models_latent_space, models_use_bn, lrs, epochs, batch_size, ckpt_epochs, directory_name, path, models_std, legends=None): model_args = [ datasets, models_type, models_arch, models_latent_space, models_use_bn, models_std, lrs, epochs, batch_size, ckpt_epochs ] max_len = max(map(len, model_args)) print(max_len) path_directory = os.path.join(path, directory_name) if not os.path.isdir(path_directory): os.makedirs(path_directory) #Adapt to get all parameters to same size for i, arg in enumerate(model_args): temp_arg = arg.copy() for j in range((max_len - 1) // len(arg)): arg.extend(temp_arg) model_args[i] = arg[:max_len] #Use this to get unique dataset used multiple times ds = {} for dataset in datasets: if dataset not in ds: train_test_ds, shape = datasetLoader.get_dataset(dataset) ds.update({dataset: [train_test_ds, shape]}) train_losses = [] test_losses = [] train_accs = [] test_accs = [] legendes = [] models = [] # Construct the model for i in range(max_len): #Get name str_ds = model_args[0][i] str_model = model_args[1][i] str_arch = '_'.join(str(x) for x in model_args[2][i]) str_lat = 'lat' + str(model_args[3][i]) str_use_bn = 'BN' if model_args[4][i] else '' str_std = 'std' + str(model_args[5][i]) str_all = '_'.join( filter( None, [str_ds, str_model, str_arch, str_lat, str_std, str_use_bn])) #str_all = '_'.join(filter(None, [str_ds, str_model, str_arch, str_lat, str_use_bn])) #Construct the model dataset = ds[model_args[0][i]][0] shape = ds[model_args[0][i]][1] model_type = model_args[1][i] model_arch = model_args[2][i].copy() model_lat = model_args[3][i] model_use_bn = model_args[4][i] model_std = model_args[5][i] lr = model_args[6][i] epoch = model_args[7][i] bs = model_args[8][i] ckpt_epoch = model_args[9][i] print(model_lat) model = construct_model.get_model(model_type, model_arch, model_lat, shape, model_use_bn, model_std) models.append(model) #Train print(dataset) print(model) print(lr) print(epoch) print(bs) print(ckpt_epoch) train_l, test_l, train_acc, test_acc = train(dataset, model, lr, epoch, bs, ckpt_epoch, path_directory, str_all, True) #Get curves and output them train_losses.append(train_l) train_accs.append(train_acc) test_losses.append(test_l) test_accs.append(test_acc) legendes.append(str_all) if legends is None: legends = legendes image_saver.curves(test_accs, legends, directory_name + '_curves', path_directory, 'epochs', 'accuracy (L2)') dataset_test = ds[model_args[0][0]][0][1] images = [] for test in dataset_test.batch(3).take(1): print(test.shape) ground_truth = test.numpy() images.append(ground_truth) for model in models: output = model.reconstruct(test) images.append(output.numpy()) image_saver.compare_multiple_images_Lab(images, legends, directory_name + '_images', path_directory)
def forward_percent(self): print("forward percent") t_loss_mean = tf.keras.metrics.Mean(name='t_loss') t_acc_mean = tf.keras.metrics.Mean(name='t_acc') v_loss_mean = tf.keras.metrics.Mean(name='v_loss') v_acc_mean = tf.keras.metrics.Mean(name='v_acc') j_mean = tf.keras.metrics.Mean(name='j_mean') f_mean = tf.keras.metrics.Mean(name='f_mean') starting_epoch = int(self.ckpt.epoch) starting_step = int(self.ckpt.step) epoch_percent_train = self.train_size // 1000 epoch_percent_train = 1 if epoch_percent_train == 0 else epoch_percent_train epoch_percent_val = self.val_size // 1000 epoch_percent_val = 1 if epoch_percent_val == 0 else epoch_percent_val print("starting_step: ", starting_step) print("start progbar: ", starting_step // epoch_percent_train) print("train_size: ", self.train_size) print("val_size: ", self.val_size) for epoch in range(starting_epoch, self.epoch_max + 1): print("epoch : ", epoch) progbar = tf.keras.utils.Progbar(1000) progbar.update(starting_step // epoch_percent_train) lr = self.optimizer.lr lr = self.lr_fn(lr, epoch) self.optimizer.lr = lr # One epoch on TRAIN dataset #train_enum = self.train_ds.enumerate() #for element in train_enum.as_numpy_iterator(): #print("train_ds: ", self.train_ds) for i, train_x in enumerate(self.train_ds, starting_step): #for test_pack in self.test_ds: # first = True # for test in test_pack: # j, f = image_saver.KAST_JF(self.model, test, first) # first = False # if j >= 0.: # j_mean(j) # f_mean(f) # self.model.reset_mem() start_full_time = time.time() for seq_test in self.test_ds: first = True for test in seq_test: j, f = image_saver.KAST_JF(self.model, test, first) j_mean(j) f_mean(f) self.model.reset_mem() print("\nJ : ", j_mean.result().numpy()) print("F : ", f_mean.result().numpy()) full_time = time.time() - start_full_time minutes = int(full_time // 60) seconds = full_time % 60 print("Full time: {:2d} minutes {:.4f}".format( minutes, seconds)) print(self.js[1000000]) t_loss_mean( self.model.compute_apply_gradients(train_x, self.optimizer)) t_acc_mean(self.model.compute_accuracy(train_x)) if i > (epoch_percent_train * 1000): break if i % epoch_percent_train == 0 and i != 0: progbar.add(1) if i % (epoch_percent_train * 100) == 0 and i != 0: for val_x in self.val_ds.take(epoch_percent_val): v_loss_mean(self.model.compute_loss(val_x)) v_acc_mean(self.model.compute_accuracy(val_x)) self.t_loss.append(t_loss_mean.result().numpy()) self.t_acc.append(t_acc_mean.result().numpy()) self.v_loss.append(v_loss_mean.result().numpy()) self.v_acc.append(v_acc_mean.result().numpy()) t_loss_mean.reset_states() t_acc_mean.reset_states() v_loss_mean.reset_states() v_acc_mean.reset_states() if i % (epoch_percent_train * 500) == 0 and i != 0: for seq_test in self.test_ds: first = True for test in seq_test: j, f = image_saver.KAST_JF(self.model, test, first) j_mean(j) f_mean(f) self.model.reset_mem() print("\nJ : ", j_mean.result().numpy()) print("F : ", f_mean.result().numpy()) self.js.append(j_mean.result().numpy()) self.fs.append(f_mean.result().numpy()) j_mean.reset_states() f_mean.reset_states() if i != 0 and i % (epoch_percent_train * self.save_steps) == 0: for val_x in self.val_ds.take(1): if self.is_seq: image_saver.KAST_View( self.model, val_x, False, self.name + '_epoch_{:03d}_step_{:03d}_test'.format( epoch, i // epoch_percent_train), self.img_path) #image_saver.generate_gif_concat(self.model, val_x, # self.name + '_epoch_{:03d}_test_gif'.format(epoch), # self.img_path) else: image_saver.generate_and_save_images_compare_lab( self.model, val_x, self.name + '_epoch_{:03d}_step_{:03d}_test'.format( epoch, i // epoch_percent_train), self.img_path) for train_x in self.train_ds.take(1): if self.is_seq: image_saver.KAST_View( self.model, train_x, True, self.name + '_epoch_{:03d}_step_{:03d}_train'.format( epoch, i // epoch_percent_train), self.img_path) else: image_saver.generate_and_save_images_compare_lab( self.model, train_x, self.name + '_epoch_{:03d}_step_{:03d}_train'.format( epoch, i // epoch_percent_train), self.img_path) if not self.redone: for test_seq in self.test_ds.take(1): nb_seq = 1 for test in test_seq: image_saver.KAST_test( self.model, test, self.name + '_epoch_{:03d}_step_{:03d}_train_DAVISseq{:01d}' .format(epoch, i // epoch_percent_train, nb_seq), self.img_path) nb_seq = nb_seq + 1 self.model.reset_mem() print('i :', i) print('epoch percent train: ', epoch_percent_train) print('save step: ', self.save_steps) x_axis = np.linspace(0, len(self.t_loss) / 1000, len(self.t_loss)) x_jf_axis = np.linspace(0, len(self.js) / 1000, len(self.js)) image_saver.curves([self.t_loss, self.v_loss], ['Training', 'Validation'], 'training_validation_loss', self.img_path, 'Steps', 'Loss', x_axis) image_saver.curves([self.t_acc, self.v_acc], ['Training', 'Validation'], 'training_validation_accuracy', self.img_path, 'Steps', 'Accuracy', x_axis) image_saver.curves([self.js, self.fs], ['J', 'F'], 'j_f', self.img_path, 'Steps', 'Metrics', x_jf_axis) self.ckpt.step.assign(i + 1) if self.redone: self.save_redone() else: self.save() t_loss_mean.reset_states() t_acc_mean.reset_states() v_loss_mean.reset_states() v_acc_mean.reset_states() j_mean.reset_states() f_mean.reset_states() starting_step = 0 # Create temp image of loss #img_name = 'epoch_' + str(epoch) #for val_x in self.val_ds.take(1): #image_saver.compare_images(val_x, self.model.reconstruct(val_x), img_name, self.img_path) self.ckpt.epoch.assign_add(1) self.ckpt.step.assign(0) if self.redone: self.save_redone() else: self.save()
def forward_percent(self): print("forward percent") t_loss_mean = tf.keras.metrics.Mean(name='t_loss') t_acc_mean = tf.keras.metrics.Mean(name='t_acc') v_loss_mean = tf.keras.metrics.Mean(name='v_loss') v_acc_mean = tf.keras.metrics.Mean(name='v_acc') starting_epoch = int(self.ckpt.epoch) starting_step = int(self.ckpt.step) epoch_percent_train = self.train_size // 100 epoch_percent_train = 1 if epoch_percent_train == 0 else epoch_percent_train epoch_percent_val = self.val_size // 100 epoch_percent_val = 1 if epoch_percent_val == 0 else epoch_percent_val print("starting_step: ", starting_step) print("start progbar: ", starting_step // epoch_percent_train) for epoch in range(starting_epoch, self.epoch_max + 1): print("epoch : ", epoch) progbar = tf.keras.utils.Progbar(100) progbar.update(starting_step // epoch_percent_train) self.lr = self.lr_fn(self.lr, epoch) self.optimizer.lr = self.lr # One epoch on TRAIN dataset #train_enum = self.train_ds.enumerate() #for element in train_enum.as_numpy_iterator(): for i, train_x in enumerate(self.train_ds, starting_step): t_loss_mean( self.model.compute_apply_gradients(train_x, self.optimizer)) t_acc_mean(self.model.compute_accuracy(train_x)) if i % epoch_percent_train == 0: progbar.add(1) for val_x in self.val_ds.take(epoch_percent_val): v_loss_mean(self.model.compute_loss(val_x)) v_acc_mean(self.model.compute_accuracy(val_x)) self.t_loss.append(t_loss_mean.result().numpy()) self.t_acc.append(t_acc_mean.result().numpy()) self.v_loss.append(v_loss_mean.result().numpy()) self.v_acc.append(v_acc_mean.result().numpy()) t_loss_mean.reset_states() t_acc_mean.reset_states() v_loss_mean.reset_states() v_acc_mean.reset_states() if i != 0 and i % (epoch_percent_train * self.save_steps) == 0: for val_x in self.val_ds.take(1): if self.is_seq: image_saver.generate_and_save_images_compare_seq( self.model, val_x, self.name + '_epoch_{:03d}_step_{:03d}_test'.format( epoch, i // epoch_percent_train), self.img_path) #image_saver.generate_gif_concat(self.model, val_x, # self.name + '_epoch_{:03d}_test_gif'.format(epoch), # self.img_path) else: image_saver.generate_and_save_images_compare_lab( self.model, val_x, self.name + '_epoch_{:03d}_step_{:03d}_test'.format( epoch, i // epoch_percent_train), self.img_path) for train_x in self.train_ds.take(1): if self.is_seq: image_saver.generate_and_save_images_compare_seq( self.model, train_x, self.name + '_epoch_{:03d}_step_{:03d}_train'.format( epoch, i // epoch_percent_train), self.img_path) else: image_saver.generate_and_save_images_compare_lab( self.model, train_x, self.name + '_epoch_{:03d}_step_{:03d}_train'.format( epoch, i // epoch_percent_train), self.img_path) print('i :', i) print('epoch percent train: ', epoch_percent_train) print('save step: ', self.save_steps) x_axis = np.linspace(0, len(self.t_loss) / 100, len(self.t_loss)) image_saver.curves([self.t_loss, self.v_loss], ['Training', 'Validation'], 'training_validation_loss', self.img_path, 'Steps', 'Loss', x_axis) image_saver.curves([self.t_acc, self.v_acc], ['Training', 'Validation'], 'training_validation_accuracy', self.img_path, 'Steps', 'Accuracy', x_axis) self.ckpt.step.assign(i + 1) self.save() t_loss_mean.reset_states() t_acc_mean.reset_states() v_loss_mean.reset_states() v_acc_mean.reset_states() starting_step = 0 # Create temp image of loss #img_name = 'epoch_' + str(epoch) #for val_x in self.val_ds.take(1): #image_saver.compare_images(val_x, self.model.reconstruct(val_x), img_name, self.img_path) self.ckpt.epoch.assign_add(1) self.ckpt.step.assign(0) self.save()