def train_model(self,epochs,batch_size=None,directory='imgs',images_per_epoch=4):
        if batch_size is None: batch_size = self.batch_size
        sum_writer_loss = self.define_loss_tensorboard()
        avg_vae_loss = tf.keras.metrics.Mean()
        start_time = time.time()

        batch_per_epoch = int(self.train_dataset.shape[0] / batch_size)
        # calculate the number of training iterations
        n_steps = batch_per_epoch * epochs

        try:
            epoch = int(open('current_epoch.txt').read())
        except:
            epoch = 0

        for step_i in range(n_steps):
            train_x = self.generate_real_samples(batch_size)
            loss = self.compute_apply_gradients(train_x)
            avg_vae_loss(loss)
            if((step_i % (n_steps / epochs)) == 0):
                epoch += 1
                gen_images = self.sample(self.generative_net.seed,False)
                data_access.store_images_seed(directory,gen_images[:images_per_epoch],epoch)
                data_access.write_current_epoch('current_epoch',epoch=epoch)
            data_access.print_training_output_simple_loss(step_i,n_steps,loss)
            with sum_writer_loss.as_default():
                tf.summary.scalar('loss_vae', avg_vae_loss.result(),step=self.optimizer.iterations)
        end_time = time.time()
        data_access.print_training_time(start_time,end_time,None)
 def generate_images(self,number_of_samples,directory):
     seed = tf.random.normal([number_of_samples, self.random_noise_size])
     images = self.generator(seed)
     if self.classifier is not None: 
         predictions = self.classifier(data_access.normalize(data_access.de_standardize(images)))
         data_access.produce_generate_figure(directory,images,predictions,class_names)
     else:
         data_access.store_images_seed(directory,images,'gen_images','gan')
    def train_model(self,epoches,n_critic=5,class_type=0,directory = 'imgs'):
        """
        Train model for an amount of epochs

        :param epoches: - cycles of training over all dataset
        :param n_critic: - number of times critic trains more than generator
        :param class_type: - class number: converge generated data to this class
        :param directory: - directory where images will be placed during training
        """
        class_type = [0 for i in range(10)]
        class_type[0] = 0.5
        class_type[1] = 0.5
        batch_per_epoch = int(self.train_dataset.shape[0] / self.batch_size)

        # calculate the number of training iterations
        n_steps = batch_per_epoch * epoches
        # calculate the size of half a batch of samples
        half_batch = int(self.batch_size / 2)

        sum_writer_loss = self.define_loss_tensorboard()
        #self.classifier_m.load_local_model()
        avg_loss_critic = tf.keras.metrics.Mean()
        avg_loss_gen = tf.keras.metrics.Mean()
        try:
            epoch = int(open('current_epoch.txt').read())
        except:
            epoch = 0
        n_dif_images = 4
        start_time = time.time()
        for i in range(n_steps):
            for _ in range(n_critic):
                # get randomly selected 'real' samples
                X_real, y_real = self.generate_real_samples(half_batch)
                # generate 'fake' examples
                X_fake, y_fake = self.generate_fake_samples(self.random_noise_size, half_batch)
                
                # update critic model weights
                c_loss = self.training_step_critic(X_real,X_fake, y_real,y_fake,half_batch)
                avg_loss_critic(c_loss)
                
            gen_loss, matched_images, gen_images = self.training_step_generator(self.random_noise_size,class_type)
            avg_loss_gen(gen_loss)
            data_access.print_training_output(i,n_steps, avg_loss_critic.result(),avg_loss_gen.result()) 
            if((i % (n_steps / epoches)) == 0):
                data_access.store_images_seed(directory,gen_images[:n_dif_images],epoch)
                with sum_writer_loss.as_default():
                    tf.summary.scalar('loss_gen', avg_loss_gen.result(),step=self.generator.optimizer.iterations)
                    tf.summary.scalar('avg_loss_critic', avg_loss_critic.result(),step=self.critic.optimizer.iterations)
                epoch += 1
                if((epoch % 10) == 0):
                    self.generator.save_weights('/content/weights/g_weights/g_weights',save_format='tf')
                    self.critic.save_weights('/content/weights/c_weights/c_weights',save_format='tf')
                    with open('current_epoch.txt','w') as ofil:
                        ofil.write(f'{epoch}')
                    print('Saved epoch ',epoch)
        data_access.create_collection(epoches,n_dif_images,directory)
        data_access.print_training_time(start_time,time.time(),None)
Esempio n. 4
0
    def train_model(self,epoches,n_critic=5,class_type=0,directory = 'imgs',n_img_per_epoch=4):
        """
        Train model for an amount of epochs

        :param epoches: - cycles of training over all dataset
        :param n_critic: - number of times critic trains more than generator
        :param class_type: - it can be a number if class convergence is wanted
            or a probability distribution (array) discribing the probabilities for each class
            Example: class convergence(when there are 5 classes): 0
                     class divergence(when there are 5 classes): [0.25, 0.25, 0.25, 0.25, 0.0]
        :param directory: - directory where images will be placed during training
        :n_img_per_epoch: - number of images stored per epoch, while training
        """
        batch_per_epoch = int(self.train_dataset.shape[0] / self.batch_size)

        # calculate the number of training iterations
        n_steps = batch_per_epoch * epoches
        # calculate the size of half a batch of samples
        half_batch = int(self.batch_size / 2)

        sum_writer_loss = self.define_loss_tensorboard()
        avg_loss_critic = tf.keras.metrics.Mean()
        avg_loss_gen = tf.keras.metrics.Mean()
        try:
            epoch = int(open('current_epoch.txt').read())
        except:
            epoch = 0
        start_time = time.time()
        print(self.model_parameters)
        for i in range(n_steps):
            for _ in range(n_critic):
                # get randomly selected 'real' samples
                X_real, y_real = self.generate_real_samples(half_batch)
                # generate 'fake' examples
                X_fake, y_fake = self.generate_fake_samples(self.random_noise_size, half_batch)
                
                # update critic model weights
                c_loss = self.training_step_critic(X_real,X_fake, y_real,y_fake,half_batch)
                avg_loss_critic(c_loss)
                
            gen_loss, matched_images, gen_images = self.training_step_generator(self.random_noise_size,class_type)
            avg_loss_gen(gen_loss)
            data_access.print_training_output(i,n_steps, avg_loss_critic.result(),avg_loss_gen.result()) 
            if((i % (n_steps / epoches)) == 0):
                data_access.store_images_seed(directory,gen_images[:n_img_per_epoch],epoch)
                with sum_writer_loss.as_default():
                    tf.summary.scalar('loss_gen', avg_loss_gen.result(),step=self.generator.optimizer.iterations)
                    tf.summary.scalar('avg_loss_critic', avg_loss_critic.result(),step=self.critic.optimizer.iterations)
                epoch += 1
                if((epoch % 1) == 0):
                    self.generator.save_weights('weights/g_weights/g_weights',save_format='tf')
                    self.critic.save_weights('weights/c_weights/c_weights',save_format='tf')
                    data_access.write_current_epoch(filename='current_epoch',epoch=epoch)
        data_access.create_collection(epoches,n_img_per_epoch,directory)
        data_access.print_training_time(start_time,time.time(),self.model_parameters)
    def train_model(self, epoches, n_critic=5, noise_size=100, class_type=5):

        batch_per_epoch = int(self.train_dataset.shape[0] / self.batch_size)

        # calculate the number of training iterations
        n_steps = batch_per_epoch * epoches
        # calculate the size of half a batch of samples
        half_batch = int(self.batch_size / 2)

        sum_writer_loss = self.define_loss_tensorboard()
        self.classifier_m.load_local_model()
        avg_loss_critic = tf.keras.metrics.Mean()
        avg_loss_gen = tf.keras.metrics.Mean()
        epoch = 0
        n_dif_images = 4
        directory = 'imgs'
        start_time = time.time()
        for i in range(n_steps):
            for _ in range(n_critic):
                # get randomly selected 'real' samples
                X_real, y_real = self.generate_real_samples(half_batch)
                # generate 'fake' examples
                X_fake, y_fake = self.generate_fake_samples(
                    noise_size, half_batch)

                # update critic model weights
                c_loss = self.training_step_critic(X_real, X_fake, y_real,
                                                   y_fake, half_batch)
                avg_loss_critic(c_loss)

            gen_loss, matched_images, gen_images = self.training_step_generator(
                noise_size, class_type)
            avg_loss_gen(gen_loss)
            data_access.print_training_output(i, n_steps,
                                              avg_loss_critic.result(),
                                              avg_loss_gen.result())

            if ((i % (n_steps / epoches)) == 0):
                data_access.store_images_seed(directory,
                                              gen_images[:n_dif_images], epoch)
                with sum_writer_loss.as_default():
                    tf.summary.scalar('loss_gen',
                                      avg_loss_gen.result(),
                                      step=self.generator.optimizer.iterations)
                    tf.summary.scalar('avg_loss_critic',
                                      avg_loss_critic.result(),
                                      step=self.critic.optimizer.iterations)
                epoch += 1
        data_access.create_collection(epoches, n_dif_images, directory)
        print('Time elapse {}'.format(time.time() - start_time))
Esempio n. 6
0
    def train_model(self,
                    epochs=None,
                    batch_size=None,
                    images_per_epoch=4,
                    directory='imgs'):
        if epochs is None: epochs = self.params['epochs']
        if batch_size is None: batch_size = self.params['batchsz']

        batch_per_epoch = int(self.train_dataset.shape[0] / batch_size)
        # calculate the number of training iterations
        n_steps = batch_per_epoch * epochs

        start_time = time.time()
        try:
            epoch = int(open('current_epoch.txt').read())
        except:
            epoch = 0

        for step_i in range(n_steps):
            train_x = self.generate_real_samples(batch_size)
            inf_loss, gen_loss = self.compute_apply_gradients(train_x)
            data_access.print_training_output_vae(step_i, n_steps, inf_loss,
                                                  gen_loss)
            if ((step_i % (n_steps / epochs)) == 0):
                epoch += 1
                gen_images = self.sample(self.generative_net.seed)
                data_access.store_images_seed(directory,
                                              gen_images[:images_per_epoch],
                                              epoch)

                self.generative_net.save_weights('weights/g_weights/g_weights',
                                                 save_format='tf')
                self.inference_net.save_weights('weights/i_weights/i_weights',
                                                save_format='tf')
                data_access.write_current_epoch(filename='current_epoch',
                                                epoch=epoch)
        end_time = time.time()
        data_access.print_training_time(start_time, end_time, self.params)
 def generate_images(self, number_of_samples=5, directory="imgs"):
     random_vector_for_generation = tf.random.normal(
         shape=[number_of_samples, self.latent_dim])
     images = self.sample(random_vector_for_generation, False)
     data_access.store_images_seed(directory, images, 'None', 'vae')
    def train_model(self,
                    epoches,
                    b_size,
                    n_critic=5,
                    class_type=0,
                    directory='imgs',
                    n_img_per_epoch=4):
        """
        Train model for an amount of epochs

        :param epoches: - cycles of training over all dataset
        :param n_critic: - number of times critic trains more than generator
        :param class_type: - class number: converge generated data to this class
        :param directory: - directory where images will be placed during training
        """
        batch_per_epoch = int(self.train_dataset.shape[0] / self.batch_size)

        # calculate the number of training iterations
        n_steps = batch_per_epoch * epoches
        # calculate the size of half a batch of samples
        half_batch = int(self.batch_size / 2)

        sum_writer_loss = self.define_loss_tensorboard()
        #self.classifier_m.load_local_model()
        avg_loss_critic = tf.keras.metrics.Mean()
        avg_loss_gen = tf.keras.metrics.Mean()
        try:
            epoch = int(open('current_epoch.txt').read())
        except:
            epoch = 0
        start_time = time.time()
        for i in range(n_steps):
            self.update_fadein([self.generator, self.critic], i, n_steps)
            for _ in range(n_critic):
                # get randomly selected 'real' samples
                X_real, y_real = self.generate_real_samples(half_batch)
                # generate 'fake' examples
                X_fake, y_fake = self.generate_fake_samples(
                    self.random_noise_size, half_batch)

                # update critic model weights
                tf.config.experimental_run_functions_eagerly(True)
                c_loss = self.training_step_critic(X_real, X_fake, y_real,
                                                   y_fake, half_batch)
                avg_loss_critic(c_loss)

            gen_loss, matched_images, gen_images = self.training_step_generator(
                self.random_noise_size, class_type)
            avg_loss_gen(gen_loss)
            data_access.print_training_output(i, n_steps,
                                              avg_loss_critic.result(),
                                              avg_loss_gen.result())
            if ((i % (n_steps / epoches)) == 0):
                data_access.store_images_seed(directory,
                                              gen_images[:n_img_per_epoch],
                                              epoch)
                with sum_writer_loss.as_default():
                    tf.summary.scalar('loss_gen',
                                      avg_loss_gen.result(),
                                      step=self.generator.optimizer.iterations)
                    tf.summary.scalar('avg_loss_critic',
                                      avg_loss_critic.result(),
                                      step=self.critic.optimizer.iterations)
                epoch += 1
                if ((epoch % 1) == 0):
                    self.generator.save_weights('weights/g_weights/g_weights',
                                                save_format='tf')
                    self.critic.save_weights('weights/c_weights/c_weights',
                                             save_format='tf')
                    data_access.write_current_epoch(filename='current_epoch',
                                                    epoch=epoch)
        print('Time elapse {}'.format(time.time() - start_time))