def train_model(self,epoches,n_critic=5,class_type=0,directory = 'imgs'): """ Train model for an amount of epochs :param epoches: - cycles of training over all dataset :param n_critic: - number of times critic trains more than generator :param class_type: - class number: converge generated data to this class :param directory: - directory where images will be placed during training """ class_type = [0 for i in range(10)] class_type[0] = 0.5 class_type[1] = 0.5 batch_per_epoch = int(self.train_dataset.shape[0] / self.batch_size) # calculate the number of training iterations n_steps = batch_per_epoch * epoches # calculate the size of half a batch of samples half_batch = int(self.batch_size / 2) sum_writer_loss = self.define_loss_tensorboard() #self.classifier_m.load_local_model() avg_loss_critic = tf.keras.metrics.Mean() avg_loss_gen = tf.keras.metrics.Mean() try: epoch = int(open('current_epoch.txt').read()) except: epoch = 0 n_dif_images = 4 start_time = time.time() for i in range(n_steps): for _ in range(n_critic): # get randomly selected 'real' samples X_real, y_real = self.generate_real_samples(half_batch) # generate 'fake' examples X_fake, y_fake = self.generate_fake_samples(self.random_noise_size, half_batch) # update critic model weights c_loss = self.training_step_critic(X_real,X_fake, y_real,y_fake,half_batch) avg_loss_critic(c_loss) gen_loss, matched_images, gen_images = self.training_step_generator(self.random_noise_size,class_type) avg_loss_gen(gen_loss) data_access.print_training_output(i,n_steps, avg_loss_critic.result(),avg_loss_gen.result()) if((i % (n_steps / epoches)) == 0): data_access.store_images_seed(directory,gen_images[:n_dif_images],epoch) with sum_writer_loss.as_default(): tf.summary.scalar('loss_gen', avg_loss_gen.result(),step=self.generator.optimizer.iterations) tf.summary.scalar('avg_loss_critic', avg_loss_critic.result(),step=self.critic.optimizer.iterations) epoch += 1 if((epoch % 10) == 0): self.generator.save_weights('/content/weights/g_weights/g_weights',save_format='tf') self.critic.save_weights('/content/weights/c_weights/c_weights',save_format='tf') with open('current_epoch.txt','w') as ofil: ofil.write(f'{epoch}') print('Saved epoch ',epoch) data_access.create_collection(epoches,n_dif_images,directory) data_access.print_training_time(start_time,time.time(),None)
def train_model(self,epoches,n_critic=5,class_type=0,directory = 'imgs',n_img_per_epoch=4): """ Train model for an amount of epochs :param epoches: - cycles of training over all dataset :param n_critic: - number of times critic trains more than generator :param class_type: - it can be a number if class convergence is wanted or a probability distribution (array) discribing the probabilities for each class Example: class convergence(when there are 5 classes): 0 class divergence(when there are 5 classes): [0.25, 0.25, 0.25, 0.25, 0.0] :param directory: - directory where images will be placed during training :n_img_per_epoch: - number of images stored per epoch, while training """ batch_per_epoch = int(self.train_dataset.shape[0] / self.batch_size) # calculate the number of training iterations n_steps = batch_per_epoch * epoches # calculate the size of half a batch of samples half_batch = int(self.batch_size / 2) sum_writer_loss = self.define_loss_tensorboard() avg_loss_critic = tf.keras.metrics.Mean() avg_loss_gen = tf.keras.metrics.Mean() try: epoch = int(open('current_epoch.txt').read()) except: epoch = 0 start_time = time.time() print(self.model_parameters) for i in range(n_steps): for _ in range(n_critic): # get randomly selected 'real' samples X_real, y_real = self.generate_real_samples(half_batch) # generate 'fake' examples X_fake, y_fake = self.generate_fake_samples(self.random_noise_size, half_batch) # update critic model weights c_loss = self.training_step_critic(X_real,X_fake, y_real,y_fake,half_batch) avg_loss_critic(c_loss) gen_loss, matched_images, gen_images = self.training_step_generator(self.random_noise_size,class_type) avg_loss_gen(gen_loss) data_access.print_training_output(i,n_steps, avg_loss_critic.result(),avg_loss_gen.result()) if((i % (n_steps / epoches)) == 0): data_access.store_images_seed(directory,gen_images[:n_img_per_epoch],epoch) with sum_writer_loss.as_default(): tf.summary.scalar('loss_gen', avg_loss_gen.result(),step=self.generator.optimizer.iterations) tf.summary.scalar('avg_loss_critic', avg_loss_critic.result(),step=self.critic.optimizer.iterations) epoch += 1 if((epoch % 1) == 0): self.generator.save_weights('weights/g_weights/g_weights',save_format='tf') self.critic.save_weights('weights/c_weights/c_weights',save_format='tf') data_access.write_current_epoch(filename='current_epoch',epoch=epoch) data_access.create_collection(epoches,n_img_per_epoch,directory) data_access.print_training_time(start_time,time.time(),self.model_parameters)
def train_model(self, epoches, n_critic=5, noise_size=100, class_type=5): batch_per_epoch = int(self.train_dataset.shape[0] / self.batch_size) # calculate the number of training iterations n_steps = batch_per_epoch * epoches # calculate the size of half a batch of samples half_batch = int(self.batch_size / 2) sum_writer_loss = self.define_loss_tensorboard() self.classifier_m.load_local_model() avg_loss_critic = tf.keras.metrics.Mean() avg_loss_gen = tf.keras.metrics.Mean() epoch = 0 n_dif_images = 4 directory = 'imgs' start_time = time.time() for i in range(n_steps): for _ in range(n_critic): # get randomly selected 'real' samples X_real, y_real = self.generate_real_samples(half_batch) # generate 'fake' examples X_fake, y_fake = self.generate_fake_samples( noise_size, half_batch) # update critic model weights c_loss = self.training_step_critic(X_real, X_fake, y_real, y_fake, half_batch) avg_loss_critic(c_loss) gen_loss, matched_images, gen_images = self.training_step_generator( noise_size, class_type) avg_loss_gen(gen_loss) data_access.print_training_output(i, n_steps, avg_loss_critic.result(), avg_loss_gen.result()) if ((i % (n_steps / epoches)) == 0): data_access.store_images_seed(directory, gen_images[:n_dif_images], epoch) with sum_writer_loss.as_default(): tf.summary.scalar('loss_gen', avg_loss_gen.result(), step=self.generator.optimizer.iterations) tf.summary.scalar('avg_loss_critic', avg_loss_critic.result(), step=self.critic.optimizer.iterations) epoch += 1 data_access.create_collection(epoches, n_dif_images, directory) print('Time elapse {}'.format(time.time() - start_time))
def train_model(self, epoches, b_size, n_critic=5, class_type=0, directory='imgs', n_img_per_epoch=4): """ Train model for an amount of epochs :param epoches: - cycles of training over all dataset :param n_critic: - number of times critic trains more than generator :param class_type: - class number: converge generated data to this class :param directory: - directory where images will be placed during training """ batch_per_epoch = int(self.train_dataset.shape[0] / self.batch_size) # calculate the number of training iterations n_steps = batch_per_epoch * epoches # calculate the size of half a batch of samples half_batch = int(self.batch_size / 2) sum_writer_loss = self.define_loss_tensorboard() #self.classifier_m.load_local_model() avg_loss_critic = tf.keras.metrics.Mean() avg_loss_gen = tf.keras.metrics.Mean() try: epoch = int(open('current_epoch.txt').read()) except: epoch = 0 start_time = time.time() for i in range(n_steps): self.update_fadein([self.generator, self.critic], i, n_steps) for _ in range(n_critic): # get randomly selected 'real' samples X_real, y_real = self.generate_real_samples(half_batch) # generate 'fake' examples X_fake, y_fake = self.generate_fake_samples( self.random_noise_size, half_batch) # update critic model weights tf.config.experimental_run_functions_eagerly(True) c_loss = self.training_step_critic(X_real, X_fake, y_real, y_fake, half_batch) avg_loss_critic(c_loss) gen_loss, matched_images, gen_images = self.training_step_generator( self.random_noise_size, class_type) avg_loss_gen(gen_loss) data_access.print_training_output(i, n_steps, avg_loss_critic.result(), avg_loss_gen.result()) if ((i % (n_steps / epoches)) == 0): data_access.store_images_seed(directory, gen_images[:n_img_per_epoch], epoch) with sum_writer_loss.as_default(): tf.summary.scalar('loss_gen', avg_loss_gen.result(), step=self.generator.optimizer.iterations) tf.summary.scalar('avg_loss_critic', avg_loss_critic.result(), step=self.critic.optimizer.iterations) epoch += 1 if ((epoch % 1) == 0): self.generator.save_weights('weights/g_weights/g_weights', save_format='tf') self.critic.save_weights('weights/c_weights/c_weights', save_format='tf') data_access.write_current_epoch(filename='current_epoch', epoch=epoch) print('Time elapse {}'.format(time.time() - start_time))