Example #1
0
    def _create_optimizer(self, loss, variables_to_optimize=None, learning_rate=0.001):
        """ Create optimizer for the network
        Args:
    
        Returns:
        """

        utils.show_message('Setup optimizer', lvl=1)

        model_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)

        if (variables_to_optimize is not None):
            model_vars_train = variables_to_optimize #[tensor for tensor in model_vars if (tensor.name in variable_names_to_optimize)]
        else:
            model_vars_train = model_vars
        print('Number of tensors to optimize: ' + str(len(model_vars_train)))
        # print('Tensors to optimize: ')
        # print([T.name for T in model_vars_train])

        # optimizer = tf.train.AdamOptimizer()
        # TODO: https://github.com/ibab/tensorflow-wavenet/issues/267#issuecomment-302799152
        optimizer = tf.train.GradientDescentOptimizer(learning_rate)

        optimizer_op = slim.learning.create_train_op(loss, optimizer)
        # optimizer_op = optimizer.minimize(loss, var_list = model_vars_train)

        return optimizer_op
Example #2
0
    def post_evaluation(self, hparams_string):
        """ Run post evaluation on the results of the model
        Args:
    
        Returns:
        """
        args_evaluate = hparams_parser_evaluate(hparams_string)

        if args_evaluate.epoch_no == None:
            setname = 'Evaluation'
        else:
            setname = 'Evaluation_' + str(args_evaluate.epoch_no)

        dir_results_eval = os.path.join(self.dir_results, setname)

        if not (os.path.isdir(dir_results_eval)):
            utils.show_message('{0} does not exist!'.format(dir_results_eval))
            return

        if args_evaluate.convert_samples:
            if os.path.isdir(os.path.join(dir_results_eval, 'Samples')):
                GAN_samples.interim(self.model, setname)
            else:
                utils.show_message(
                    'No GAN samples found in {0}'.format(dir_results_eval))
Example #3
0
    def _create_inference(self, inputs, num_classes, is_training = True, global_pool=True, dropout_keep_prob = 0.5):
        """ Define the inference model for the network
        Args:
    
        Returns:
        """
        utils.show_message('Create model inference', lvl=1)
        print('Model: ' + self.model_version)

        with slim.arg_scope(resnet_v1.resnet_arg_scope(batch_norm_decay=0.95)):
            if self.model_version == 'ResNet50':
                logits, endpoints = resnet_v1.resnet_v1_50(inputs, num_classes, is_training=is_training, global_pool=global_pool, spatial_squeeze=False)
                input_layer_name = ['resnet_v1_50/conv1']
                output_layer_names = [ep for ep in endpoints if ('logits' in ep)]

            elif self.model_version == 'ResNet101':
                logits, endpoints = resnet_v1.resnet_v1_101(inputs, num_classes, is_training=is_training, global_pool=global_pool, spatial_squeeze=False)
                input_layer_name = ['resnet_v1_101/conv1']
                output_layer_names = [ep for ep in endpoints if ('logits' in ep)]
            
            elif self.model_version == 'ResNet152':
                logits, endpoints = resnet_v1.resnet_v1_152(inputs, num_classes, is_training=is_training, global_pool=global_pool, spatial_squeeze=False)
                input_layer_name = ['resnet_v1_152/conv1']
                output_layer_names = [ep for ep in endpoints if ('logits' in ep)]

            elif self.model_version == 'ResNet200':
                logits, endpoints = resnet_v1.resnet_v1_200(inputs, num_classes, is_training=is_training, global_pool=global_pool, spatial_squeeze=False)
                input_layer_name = ['resnet_v1_200/conv1']
                output_layer_names = [ep for ep in endpoints if ('logits' in ep)]

        print('Input layer  : ' + '; '.join(input_layer_name))
        print('Output layers: ' + '; '.join(output_layer_names))

        return logits, endpoints, input_layer_name, output_layer_names
Example #4
0
def interim(model_name, setname):
    """ Convert GAN image samples to tfrecord

    """

    _dir_results = os.path.join('models', model_name, 'results', setname, 'Samples')
    _dir_interim = os.path.join('data/interim', model_name, setname)
    utils.checkfolder(_dir_interim)

    # list filenames and classes. Also divides filenames into equally sized shards
    filenames, class_names = _get_filenames_and_classes(_dir_results)

    # save class dictionary
    class_dict = dict(zip(class_names, range(len(class_names))))
    utils.save_dict(class_dict, _dir_interim, 'class_dict.json')

    # convert images to tf records based on the list of filenames
    for shard_n in range(_NUM_SHARDS):
        utils.show_message('Processing shard %d/%d' % (shard_n+1,_NUM_SHARDS))
        tf_filename = _get_output_filename(_dir_interim, shard_n)

        with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
            _convert_to_tfrecord(filenames[shard_n], class_dict, tfrecord_writer)
        
    print('\nFinished converting GAN samples to tfrecord for %s %s!' % (model_name, setname))
Example #5
0
def process(dataset_part):
    """Runs the conversion operation.

    Args:
      dataset_part: The dataset part to be converted [Nonsegmented, Segmented].
    """
    if dataset_part == 'Nonsegmented':
        _dir_raw = _DIR_RAW_NONSEGMENTED
        _dir_processed = _DIR_PROCESSED_NONSEGMENTED
        setname = 'Nonsegmented'
    else:
        _dir_raw = _DIR_RAW_SEGMENTED
        _dir_processed = _DIR_PROCESSED_SEGMENTED
        setname = 'Segmented'

    if _EXCLUDED_GRASSES:
        exclude_list = ['Black-grass', 'Common wheat', 'Loose Silky-bent']
    else:
        exclude_list = []

    # extract raw data
    data_filename = os.path.join(_dir_raw)
    archive = zipfile.ZipFile(data_filename)
    archive.extractall(_dir_processed)

    # list filenames and classes. Also divides filenames into equally sized shards
    filenames, class_names = _get_filenames_and_classes(
        _dir_processed, [setname], exclude_list)

    # save class dictionary
    class_dict = dict(zip(class_names, range(len(class_names))))
    utils.save_dict(class_dict, _dir_processed, 'class_dict.json')

    # convert images to tf records based on the list of filenames
    for shard_n in range(_NUM_SHARDS):
        utils.show_message('Processing shard %d/%d' %
                           (shard_n + 1, _NUM_SHARDS))
        tf_filename = _get_output_filename(_dir_processed, shard_n)

        with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
            _convert_to_tfrecord(filenames[shard_n], class_dict,
                                 tfrecord_writer)

    # clean up
    tmp_dir = os.path.join(_dir_processed, setname)
    tf.gfile.DeleteRecursively(tmp_dir)

    print('\nFinished converting the PSD %s dataset!' % setname)
Example #6
0
    def _load_pretrained_model(self, output_logits, pretrained_model_path, variables_to_exclude=[]):
        utils.show_message('Loading pretrained model:', lvl=1)

        print('Pre-trained model path       :' + pretrained_model_path)

        # # Get all restorable variables from graph
        graph_variables_all = slim.get_variables_to_restore()
        graph_variable_names_all = [tf_var.name.split(':')[0] for tf_var in graph_variables_all]
        print('Variables in graph           : ' + '{:>7d}'.format(len(graph_variable_names_all)))

        # Get variable names from pretrained model
        pretrained_model_variables_names_all = self._get_variable_names_from_checkpoint(pretrained_model_path)
        print('Variables in pretrained model: ' + '{:>7d}'.format(len(pretrained_model_variables_names_all)))

        # Get intersection of variable names in graph and pretrained model
        # variable_names_in_graph_and_pretrained_model = list(set(graph_variable_names_all).intersection(set(pretrained_model_variables_names_all)))
        # print('Variables intersecting       : ' + '{:>7d}'.format(len(variable_names_in_graph_and_pretrained_model)))

        # Add variable names of "not in pretrained model, but in graph" to exclude list
        variables_to_exclude += list(set(graph_variable_names_all)-set(pretrained_model_variables_names_all))

        # # Find and exclude biases (since they do not appear to be saved in the  imagenet checkpoint)
        # self.tf_variables_to_exclude = slim.get_variables_by_suffix("biases")
        # self.variables_to_exclude = [tf_var.name for tf_var in self.tf_variables_to_exclude]

        # # Find and exclude input and output layers
        # self.variables_to_exclude += ['resnet_v1_50/conv1','resnet_v1_50/logits']
        # self.variables_to_exclude += ['resnet_v1_50/logits']
        # print('Variables to exclude         : ' + '{:>7d}'.format(len(variables_to_exclude)))

        model_vars_restored = slim.get_variables_to_restore(exclude=variables_to_exclude)
        model_var_names_restored = [tf_var.name.split(':')[0] for tf_var in model_vars_restored]
        print('Variables to restore         : ' + '{:>7d}'.format(len(model_var_names_restored)))
        model_vars_not_restored = list(set(graph_variables_all) - set(model_vars_restored))
        print('Variables not restored       : ' + '{:>7d}'.format(len(model_vars_not_restored)))

        # self.variables_to_restore = slim.get_variables_to_restore()
        restorer = tf.train.Saver(model_vars_restored)
        # restorer = tf.train.Saver()
        with tf.Session() as sess:
            # restorer.restore(sess, "C:/github/Classifier_collection/models/resnet_v1_50.ckpt")
            # print('Latest check-point:')
            # print(tf.train.latest_checkpoint("C:/github/Classifier_collection/models/ResNet_batch_train_test/checkpoints"))
            # print('That was latest check-point!')
            # restorer.restore(sess, "C:/github/Classifier_collection/models/ResNet_batch_train_test/checkpoints/ResNet_batch_train_test.model-55")
            restorer.restore(sess, pretrained_model_path)

        return output_logits, model_vars_restored, model_vars_not_restored
Example #7
0
def main():
    # parse arguments
    args = parse_args()
    if args is None:
        exit()

    # Make dataset
    if args.make_dataset:
        utils.show_message('Fetching raw dataset: {0}'.format(args.dataset),
                           lvl=1)
        dataset_manager.make_dataset(args.dataset)

    # Make dataset
    if args.process_dataset:
        utils.show_message('Processing raw dataset: {0}'.format(args.dataset),
                           lvl=1)
        dataset_manager.process_dataset(args.dataset)

    # Build and train model
    if args.train_model:
        utils.show_message('Configuring and Training Network: {0}'.format(
            args.model),
                           lvl=1)

        if args.model == 'BasicModel':
            model = BasicModel(dataset=args.dataset, id=args.id)
            model.train(hparams_string=args.hparams)

        elif args.model == 'LogReg_example':
            model = logreg_example(dataset=args.dataset, id=args.id)
            model.train(hparams_string=args.hparams)

    # Evaluate model
    if args.evaluate_model:
        utils.show_message('Evaluating Network: {0}'.format(args.model), lvl=1)

        if args.model == 'BasicModel':
            model = BasicModel(dataset=args.dataset, id=args.id)
            model.evaluate(hparams_string=args.hparams)

        elif args.model == 'LogReg_example':
            model = logreg_example(dataset=args.dataset, id=args.id)
            model.evaluate(hparams_string=args.hparams)

    # Visualize results
    if args.visualize:
        print('Visualizing Results')
Example #8
0
def main():
    # parse arguments
    args = parse_args()
    if args is None:
        exit()
    
    # Make dataset
    if args.make_dataset:
        utils.show_message('Fetching raw dataset: {0}'.format(args.dataset), lvl = 1)
        dataset_manager.make_dataset(args.dataset)
        
    # Make dataset
    if args.process_dataset:
        utils.show_message('Processing raw dataset: {0}'.format(args.dataset), lvl = 1)
        dataset_manager.process_dataset(args.dataset)
        
    # Build and train model
    if args.train_model:
        utils.show_message('Configuring and Training Network: {0}'.format(args.model), lvl = 1)        

        if args.model == 'BasicModel':
            model = BasicModel(
                dataset = args.dataset,
                id = args.id)
            model.train(hparams_string = args.hparams)
        
        elif args.model == 'acgan':
            model = acgan(
                dataset = args.dataset,
                id = args.id)
            model.train(hparams_string = args.hparams)

        elif args.model == 'WacGAN':
            model = WacGAN(
                dataset = args.dataset,
                id = args.id)
            model.train(hparams_string = args.hparams)

        elif args.model == 'WacGAN_small':
            model = WacGAN_small(
                dataset = args.dataset,
                id = args.id)
            model.train(hparams_string = args.hparams)
            
        elif args.model == 'WacGAN_info':
            model = WacGAN_info(
                dataset = args.dataset,
                id = args.id)
            model.train(hparams_string = args.hparams)
        
        
  
    # Evaluate model
    if args.evaluate_model:
        utils.show_message('Evaluating Network: {0}'.format(args.model), lvl = 1)

        if args.model == 'BasicModel':
            model = BasicModel(
                dataset = args.dataset,
                id = args.id)
            model.evaluate(hparams_string = args.hparams)
        
        elif args.model == 'acgan':
            model = acgan(
                dataset = args.dataset,
                id = args.id)
            model.evaluate(hparams_string = args.hparams)

        elif args.model == 'WacGAN':
            model = WacGAN(
                dataset = args.dataset,
                id = args.id)
            model.evaluate(hparams_string = args.hparams)

        elif args.model == 'WacGAN_small':
            model = WacGAN_small(
                dataset = args.dataset,
                id = args.id)
            model.evaluate(hparams_string = args.hparams)
			
        elif args.model == 'WacGAN_info':
            model = WacGAN_info(
                dataset = args.dataset,
                id = args.id)
            model.evaluate(hparams_string = args.hparams)

    if args.posteval_model:
        utils.show_message('Running Post Evaluation on Network: {0}'.format(args.model), lvl = 1)

        if args.model == 'WacGAN':
            model = WacGAN(
                dataset = args.dataset,
                id = args.id)
            model.post_evaluation(hparams_string = args.hparams)

        if args.model == 'WacGAN_info':
            model = WacGAN_info(
                dataset = args.dataset,
                id = args.id)
            model.post_evaluation(hparams_string = args.hparams)
Example #9
0
    def evaluate(self, hparams_string):
        """ Run experiments to evaluate the performance of the model
        Args:
    
        Returns:
        """
        args_train = utils.load_model_configuration(self.dir_base)
        args_evaluate = hparams_parser_evaluate(hparams_string)

        self.unstructured_noise_dim = args_train.unstructured_noise_dim

        input_lbls = tf.placeholder(
            dtype = tf.float32, 
            shape = [None, self.lbls_dim], 
            name = 'input_test_lbls')
        input_noise = tf.placeholder(
            dtype = tf.float32, 
            shape = [None, self.unstructured_noise_dim], 
            name = 'input_test_noise')

        _  = self.__generator(input_noise, input_lbls)
        generated_images = self.__generator(input_noise, input_lbls, is_training=False, reuse=True)

        num_samples = 200

        ckpt = tf.train.get_checkpoint_state(self.dir_checkpoints)
        
        if args_evaluate.epoch_no == None:
            checkpoint_path = ckpt.model_checkpoint_path
        else:
            all_checkpoint_paths = ckpt.all_model_checkpoint_paths[:]
            suffix_match = '-'+str(args_evaluate.epoch_no)
            ckpt_match = [f for f in all_checkpoint_paths if f.endswith(suffix_match)]
            
            if ckpt_match:
                checkpoint_path = ckpt_match[0]
            else:
                checkpoint_path = ckpt.model_checkpoint_path

        with tf.Session() as sess:
            # Initialize all model Variables.
            sess.run(tf.global_variables_initializer())
            
            # Create Saver object for loading and storing checkpoints
            saver = tf.train.Saver()

            # Reload Tensor values from latest or specified checkpoint
            saver.restore(sess, checkpoint_path)
        
            # Generate evaluation noise
            np.random.seed(seed = 0)
            eval_noise = np.random.uniform(low = -1.0, high = 1.0, size = [num_samples, self.unstructured_noise_dim])

            # Generate artificial images for each class
            for i in range(0,self.lbls_dim):
                utils.show_message('Generating images for class ' + str(i))
                
                eval_lbls = np.zeros(shape = [num_samples, self.lbls_dim])
                eval_lbls[:,i] = 1

                eval_images = sess.run(
                    generated_images, 
                    feed_dict={input_noise: eval_noise,
                               input_lbls:  eval_lbls})
                
                dir_results_eval = os.path.join(self.dir_results, 'Evaluation', str(i))
                utils.checkfolder(dir_results_eval)

                for j in range(0,num_samples):
                    utils.save_image_local(eval_images[j,:,:,:], dir_results_eval,'Sample_' + str(j))
Example #10
0
    def evaluate(self, hparams_string, preprocessing_params=''):
        """ Run prediction of the network
        Args:
    
        Returns:
        """
        args_evaluate = hparams_parser_evaluate(hparams_string)

        output_folder = args_evaluate.output_folder
        if (output_folder == None):
            output_folder = self.dir_results
        print('Output folder:' + output_folder)

        # Load dataset
        if (self.dataset == 'PSD_Segmented'):
            DS = DS_PSDs.Dataset()
        elif (self.dataset == 'seeds_all'):
            DS = DS_Seeds.Dataset()
        elif (self.dataset == 'barley'):
            DS = DS_Barley.Dataset()
        elif (self.dataset == 'barley_abnormal'):
            DS = DS_Barley_Abnormal.Dataset()
        elif (self.dataset == 'barley_d0'):
            DS = DS_Barley_D0.Dataset()
        elif (self.dataset == 'barley_next'):
            DS = DS_Barley_Next.Dataset()
        elif (self.dataset == 'barley_next_stratified'):
            DS = DS_Barley_Next_Stratified.Dataset()
        elif (self.dataset == 'okra'):
            DS = DS_Okra.Dataset()
        elif (self.dataset == 'okra_abnormal'):
            DS = DS_Okra_Abnormal.Dataset()
        elif (self.dataset == 'okra_next'):
            DS = DS_Okra_next.Dataset()
        elif (self.dataset == 'okra_d0'):
            DS = DS_Okra_D0.Dataset()
        tf_dataset_list, dataset_sizes = DS.get_dataset_list(data_source = args_evaluate.data_source,
                                                            data_folder = args_evaluate.data_folder,
                                                            shuffle_before_split=args_evaluate.shuffle_before_split,
                                                            shuffle_seed=args_evaluate.shuffle_seed,
                                                            group_before_split=args_evaluate.group_before_split,
                                                            validation_method=args_evaluate.validation_method,
                                                            holdout_split=args_evaluate.holdout_split,
                                                            cross_folds=10,
                                                            cross_val_fold=None,
                                                            cross_test_fold=0,
                                                            shard_val=args_evaluate.shard_val,
                                                            shard_test=args_evaluate.shard_test,
                                                            stratify_training_set=args_evaluate.stratify_training_set)

        class_dicts = DS.get_class_dicts()
        num_classes = [len(class_dict) for class_dict in class_dicts]

        preprocessing = preprocess_factory.preprocess_factory()
        if not (preprocessing_params == ''):
            # Setup preprocessing pipeline
            preprocessing.prep_pipe_from_string(preprocessing_params)

        with tf.name_scope('Test_dataset'):
            tf_dataset_test = tf_dataset_list[2]
            if (tf_dataset_test is not None):
                tf_dataset_test = tf_dataset_test.map(DS._decode_from_TFexample)
                tf_dataset_test = tf_dataset_test.map(preprocessing.pipe)
                tf_dataset_test = tf_dataset_test.batch(batch_size = args_evaluate.batch_size, drop_remainder=False)
                tf_dataset_test = tf_dataset_test.prefetch(buffer_size=3)
                # tf_dataset_test_iterator = tf_dataset_test.make_initializable_iterator()
                tf_dataset_test_iterator = tf_dataset_test.make_one_shot_iterator()
                tf_input_getBatch_test = tf_dataset_test_iterator.get_next()

        CMatsTest = [CM.confusionmatrix(N_classes) for N_classes in num_classes]

        with tf.Session() as tf_session:
   
            # Locate checkpoints and load the latest metagraph and checkpoint
            ckpt = tf.train.get_checkpoint_state(self.dir_checkpoints)
            saver = tf.train.import_meta_graph(ckpt.model_checkpoint_path + '.meta')
            saver.restore(tf_session, tf.train.latest_checkpoint(self.dir_checkpoints))

            # Grab input and output tensors
            graph = tf.get_default_graph()
            input_images = graph.get_tensor_by_name('input_images:0')
            tf_is_training = graph.get_tensor_by_name('is_training_flag:0')
            input_lbls = []
            output_logits = []
            for i, N_classes in enumerate(num_classes):
                input_lbls.append(graph.get_tensor_by_name('input_lbls' + str(i) + ':0'))
                output_logits.append(graph.get_tensor_by_name('resnet_v1_101/logits' + str(i) + '/BiasAdd:0'))
                # output_logits.append(graph.get_tensor_by_name('logits' + str(i) + ':0'))
            
            fob_results_list = []
            for i, N_classes in enumerate(num_classes):
                results_list_file = os.path.join(output_folder, self.model + '_Classifications_output' + '{:02d}'.format(i) + '.csv')
                fob = open(results_list_file,'w+')
                fob_results_list.append(fob)
                # Write header
                out_string = 'filename' + ',predict_idx' + ',' + ','.join(['logit_'+ '{:d}'.format(c) for c in range(N_classes)]) + '\n'
                fob.write(out_string)

            # Reset confusion matrices and accumulated loss
            for CMat in CMatsTest:
                CMat.Reset()
            loss_acc = 0

            # tf_session.run(tf_dataset_test_iterator.initializer)

            # Loop through all batches of examples
            for batchCounter in range(math.ceil(float(dataset_sizes[2])/float(args_evaluate.batch_size))):
            # while 1:
                # Grab an image and label batch from the test set
                image_batch, lbl_batch, class_text, height, width, channels, origins = tf_session.run(tf_input_getBatch_test)
                # Built feed dict based on list of labels
                # feed_dict = {input_lbl: np.expand_dims(lbl_batch[:,i],1) for i,input_lbl in enumerate(input_lbls)}
                # feed_dict.update({input_images:    image_batch})
                feed_dict = {input_images:    image_batch}
                feed_dict.update({tf_is_training: False})
                # Perform evaluation step
                lbl_batch_predict = tf_session.run([output_logits],
                                                    feed_dict=feed_dict
                                                )
                # Store results from evaluation step
                # Calculate confusion matrix for all outputs
                # lbl_strings = ['' for x in CMatsTest]
                for i,CMat in enumerate(CMatsTest):
                    lbl_idx = lbl_batch[:,i]
                    lbl_idx_predict = np.squeeze(np.argmax(lbl_batch_predict[i][0], axis=3))
                    CMat.Append(lbl_idx,lbl_idx_predict)
                    # lbl_string[] += ','+str(lbl_idx)+','+str(lbl_idx_predict)
                
                # Loop over outputs
                for fob, lbl_predict in zip(fob_results_list,lbl_batch_predict):
                    # Loop over batch elements
                    for origin, lbl in zip(origins, lbl_predict[0]):
                        out_string = origin.decode("utf-8") + ',' + '{:d}'.format(np.squeeze(np.argmax(lbl))) + ',' + ','.join(['{:f}'.format(l) for l in np.squeeze(lbl)]) + '\n'
                        fob.write(out_string)
                
                # Show progress in stdout
                # batchCounter = 0
                self._show_progress('TE', 0, batchCounter, math.ceil(float(dataset_sizes[2])/float(args_evaluate.batch_size))-1, np.nan, CMatsTest)
            # except:
                # pass
            # fob_results_list.close()
            # Print confusion matrix for each output
            print('\n')
            for i, CMat in enumerate(CMatsTest):
                CMat.Save(os.path.join(output_folder, self.model + '_ConfMat_Test_output' + '{:02d}'.format(i) + '.csv'),'csv') # Save confusion matrix
                print(CMat)

            for fob in fob_results_list:
                fob.close()

        utils.show_message('Evaluation completed!', lvl=1)
Example #11
0
    def train(self, hparams_string, preprocessing_params='', preprocessing_eval_params=''):
        """ Run training of the network
        Args:
    
        Returns:
        """

        args_train = hparams_parser_train(hparams_string)
        self.batch_size = args_train.batch_size
        self.epoch_max = args_train.epoch_max 
        self.model_version = args_train.model_version
        pretrained_model_path = args_train.pretrained_model
        use_pretrained_model = False if pretrained_model_path is '' else True
        pretrain_exclude_input = args_train.pretrain_exclude_input
        pretrain_exclude_output = args_train.pretrain_exclude_output
        optim_vars = args_train.optim_vars
        args_train.preprocessing = preprocessing_params
        args_train.preprocessing_eval = preprocessing_eval_params


        print('Training parameters:')
        print(args_train)

        utils.save_model_configuration(args_train, self.dir_base)
        
        # Load dataset
        if (self.dataset == 'PSD_Segmented'):
            DS = DS_PSDs.Dataset()
        elif (self.dataset == 'seeds_all'):
            DS = DS_Seeds.Dataset()
        elif (self.dataset == 'barley'):
            DS = DS_Barley.Dataset()
        elif (self.dataset == 'barley_abnormal'):
            DS = DS_Barley_Abnormal.Dataset()
        elif (self.dataset == 'barley_d0'):
            DS = DS_Barley_D0.Dataset()
        elif (self.dataset == 'barley_next'):
            DS = DS_Barley_Next.Dataset()
        elif (self.dataset == 'barley_next_stratified'):
            DS = DS_Barley_Next_Stratified.Dataset()
        elif (self.dataset == 'okra'):
            DS = DS_Okra.Dataset()
        elif (self.dataset == 'okra_abnormal'):
            DS = DS_Okra_Abnormal.Dataset()
        elif (self.dataset == 'okra_next'):
            DS = DS_Okra_next.Dataset()
        elif (self.dataset == 'okra_d0'):
            DS = DS_Okra_D0.Dataset()
        tf_dataset_list, dataset_sizes = DS.get_dataset_list(data_source = args_train.data_source,
                                                            data_folder = args_train.data_folder,
                                                            shuffle_before_split=args_train.shuffle_before_split,
                                                            shuffle_seed=args_train.shuffle_seed,
                                                            group_before_split=args_train.group_before_split,
                                                            validation_method=args_train.validation_method,
                                                            holdout_split=args_train.holdout_split,
                                                            cross_folds=10,
                                                            cross_val_fold=None,
                                                            cross_test_fold=0,
                                                            shard_val=args_train.shard_val,
                                                            shard_test=args_train.shard_test,
                                                            stratify_training_set=args_train.stratify_training_set)

        with tf.Session('') as tf_session:
            DS.save_dataset_filenames(os.path.join(self.dir_logs, 'filenames_training.txt'),tf_dataset_list[0], tf_session)
            DS.save_dataset_filenames(os.path.join(self.dir_logs, 'filenames_validation.txt'),tf_dataset_list[1], tf_session)
            DS.save_dataset_filenames(os.path.join(self.dir_logs, 'filenames_test.txt'),tf_dataset_list[2], tf_session)

        class_dicts = DS.get_class_dicts()
        num_classes = [len(class_dict) for class_dict in class_dicts]

        preprocessing = preprocess_factory.preprocess_factory()
        if not (preprocessing_params == ''):
            # Setup preprocessing pipeline
            preprocessing.prep_pipe_from_string(preprocessing_params)

        with tf.name_scope('Training_dataset'):
            tf_dataset_train = tf_dataset_list[0]
            tf_dataset_train = tf_dataset_train.shuffle(buffer_size = 10000, seed = None)
            tf_dataset_train = tf_dataset_train.map(DS._decode_from_TFexample)
            tf_dataset_train = tf_dataset_train.map(preprocessing.pipe)
            tf_dataset_train = tf_dataset_train.batch(batch_size = self.batch_size, drop_remainder=False)
            tf_dataset_train = tf_dataset_train.repeat(count=-1) # -1 --> repeat indefinitely
            # tf_dataset_train = tf_dataset_train.prefetch(buffer_size=3)
            tf_dataset_train_iterator = tf_dataset_train.make_one_shot_iterator()
            input_getBatch = tf_dataset_train_iterator.get_next()

        # Setup preprocessing pipeline
        preprocessing_eval = preprocess_factory.preprocess_factory()
        if not (preprocessing_eval_params == ''):
            preprocessing_eval.prep_pipe_from_string(preprocessing_eval_params)
        elif not (preprocessing_params ==''): # Use same preprocessing as training step, if it is not specified for validation step
            preprocessing_eval.prep_pipe_from_string(preprocessing_params)
        else:
            pass # If no preprocessing is specified, dont to any preprocessing

        with tf.name_scope('Validation_dataset'):
            tf_dataset_val = tf_dataset_list[1]
            if (tf_dataset_val is not None):
                tf_dataset_val = tf_dataset_val.map(DS._decode_from_TFexample)
                tf_dataset_val = tf_dataset_val.map(preprocessing_eval.pipe)
                tf_dataset_val = tf_dataset_val.batch(batch_size = self.batch_size, drop_remainder=False)
                tf_dataset_val = tf_dataset_val.repeat(count=-1) # -1 --> repeat indefinitely
                # tf_dataset_val = tf_dataset_val.prefetch(buffer_size=3)
                tf_dataset_val_iterator = tf_dataset_val.make_one_shot_iterator()
                tf_input_getBatch_val = tf_dataset_val_iterator.get_next()

        # Define input and output layers
        input_images = tf.placeholder(
            dtype = tf.float32, 
            shape = [None] + self.image_dims, 
            name = 'input_images')
        input_lbls = []
        for i, N_classes in enumerate(num_classes):
            input_lbls.append(
                                tf.placeholder(
                                    dtype = tf.uint8,   
                                    shape = [None, 1], # shape = [None, N_classes],
                                    name = 'input_lbls' + str(i)
                                )
                            )
        tf_is_training = tf.placeholder(
            dtype = tf.bool,
            shape = (),
            name = 'is_training_flag'
        )
        # define model model and load pre-trained model
        output_logits, endpoints, input_layer_name, output_layer_names = self._create_inference(input_images, is_training=tf_is_training, num_classes=num_classes, global_pool=args_train.global_pool)
        if (use_pretrained_model):
            exclude_layers = []
            if (pretrain_exclude_input):
                exclude_layers += input_layer_name
            if (pretrain_exclude_output):
                exclude_layers += output_layer_names
            output_logits, model_vars_restored, model_vars_not_restored = self._load_pretrained_model(output_logits, pretrained_model_path, exclude_layers) #['resnet_v1_50/conv1','resnet_v1_50/logits']) #['resnet_v1_50/conv1','resnet_v1_50/logits'])
        else:
            model_vars_restored = []
            model_vars_not_restored = [value for key,value in endpoints.items()]
        
        # Setup loss function
        loss = self._create_losses(output_logits, input_lbls, num_classes)

        # Setup optimizer
        variables_to_optimize = None
        if (optim_vars == 'all'):
            variables_to_optimize = None
        elif (optim_vars == 'non_restored'):
            variables_to_optimize = model_vars_not_restored
        else:
            raise NotImplementedError('Value set for optim_vars not implemented. Value = ' + optim_vars)
        
        optimizer_op = self._create_optimizer(loss, variables_to_optimize=variables_to_optimize, learning_rate=args_train.learning_rate)
        
        # Setup summaries
        CMatsTrain = [CM.confusionmatrix(N_classes) for N_classes in num_classes]
        CMatsVal = [CM.confusionmatrix(N_classes) for N_classes in num_classes]
        tf_loss = tf.placeholder(tf.float32, name='loss_mean')
        tf_accuracies = []
        tf_recalls = []
        tf_precisions = []
        tf_F1s = []
        tf_cs_categories = []
        for i, N_classes in enumerate(num_classes):
            tf_accuracies.append(tf.placeholder(dtype = tf.float32, name = 'Overview/Accuracy' + str(i)) )
            with tf.name_scope('output_' + str(i)):
                tf_recall, tf_chart_recall = tf_custom_summaries.class_score_mmm('Recall')
                tf_recalls.append(tf_recall)
                tf_precision, tf_chart_precision = tf_custom_summaries.class_score_mmm('Precision')
                tf_precisions.append(tf_precision)
                tf_F1, tf_chart_F1 = tf_custom_summaries.class_score_mmm('F1')
                tf_F1s.append(tf_F1)
            tf_cs_categories.append(
                                tf_custom_summaries.layout_pb2.Category(
                                    title='output' + str(i),
                                    chart=[tf_chart_F1, tf_chart_precision, tf_chart_recall]
                                )
                            )
        summary_list = tf_accuracies
        summary_dict = {'Overview/loss':         tf_loss}

        layout_summary = tf_custom_summaries.summary_lib.custom_scalar_pb(
                                tf_custom_summaries.layout_pb2.Layout(
                                    category=tf_cs_categories
                                    )
                            )
        self._create_summaries(loss, summary_dict=summary_dict, summary_list=summary_list)
        tf_summary_op = tf.summary.merge_all()
        
        # show network architecture
        # utils.show_all_variables()
        
        gpu_options = tf.GPUOptions(allow_growth=True)
        with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
            
            # Initialize all model Variables.
            sess.run(tf.global_variables_initializer())
            
            # Create Saver object for loading and storing checkpoints
            saver = tf.train.Saver()
            
            # Create Writer object for storing graph and summaries for TensorBoard
            writer_train = tf.summary.FileWriter(os.path.join(self.dir_logs,'train'), sess.graph)
            writer_validation = tf.summary.FileWriter(os.path.join(self.dir_logs,'val'), sess.graph)
            writer_train.add_summary(layout_summary)
            writer_validation.add_summary(layout_summary)
            
            # Reload Tensor values from latest checkpoint
            ckpt = tf.train.get_checkpoint_state(self.dir_checkpoints)
            epoch_start = 0
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
                epoch_start = int(ckpt_name.split('-')[-1])
            
            # Do training loops
            for epoch_n in range(epoch_start, self.epoch_max):
                
                #################
                # Training step #
                #################
                utils.show_message('Running training epoch no: {0}'.format(epoch_n), lvl=1)
                # Reset confusion matrices and accumulated loss
                for CMat in CMatsTrain:
                    CMat.Reset()
                loss_train = 0
                 # Loop through all batches of examples
                for batchCounter in range(math.ceil(float(dataset_sizes[0])/float(self.batch_size))):
                    # Grab an image and label batch from the validation set
                    image_batch, lbl_batch, *args = sess.run(input_getBatch)
                    # Built feed dict based on list of labels
                    feed_dict = {input_lbl: np.expand_dims(lbl_batch[:,i],1) for i,input_lbl in enumerate(input_lbls)}
                    feed_dict.update({input_images:    image_batch})
                    feed_dict.update({tf_is_training: True})

                    # Perform training step
                    _, loss_out, lbl_batch_predict = sess.run(
                        [optimizer_op, loss, output_logits],
                        feed_dict=feed_dict)
                    loss_train += loss_out
                    # Store results from training step
                    # Calculate confusion matrix for all outputs
                    for i,CMat in enumerate(CMatsTrain):
                        lbl_idx = lbl_batch[:,i]
                        lbl_idx_predict = np.squeeze(np.argmax(lbl_batch_predict[i], axis=3))
                        CMat.Append(lbl_idx,lbl_idx_predict)
                    # Show progress in stdout
                    self._show_progress('TR', epoch_n, batchCounter, math.ceil(float(dataset_sizes[0])/float(self.batch_size))-1, loss_out, CMatsTrain)

                # Print accumulated confusion matricx for each output
                print('\n')
                for i, CMat in enumerate(CMatsTrain):
                    CMat.Save(os.path.join(self.dir_logs, 'ConfMat_Train_output' + '{:02d}'.format(i) + '.csv'),'csv')
                    print(CMat)
                
                # Create fill in summaries for training log
                feed_dict_summary = {tf_acc: CMat.accuracy() for tf_acc, CMat in zip(tf_accuracies,CMatsTrain)}
                feed_dict_summary.update({tf_rec: [0 if np.isnan(x) else x for x in CMat.recall()] for tf_rec, CMat in zip(tf_recalls,CMatsTrain)})
                feed_dict_summary.update({tf_pre: [0 if np.isnan(x) else x for x in CMat.precision()] for tf_pre, CMat in zip(tf_precisions,CMatsTrain)})
                feed_dict_summary.update({tf_f1:  [0 if np.isnan(x) else x for x in CMat.fScore(beta=1)] for tf_f1, CMat in zip(tf_F1s,CMatsTrain)})
                loss_train = loss_train/batchCounter
                feed_dict_summary.update({tf_loss: loss_train})
                summaries = sess.run(tf_summary_op, 
                                    feed_dict=feed_dict_summary)
                # Write summaries to training log
                writer_train.add_summary(summaries, global_step=epoch_n)

                ###################
                # Validation step #
                ###################

                if (tf_dataset_val is not None): # Skip validation step, if there is no validation dataset
                    utils.show_message('Running validation epoch no: {0}'.format(epoch_n),lvl=1)
                    # Reset confusion matrices and accumulated loss
                    for CMat in CMatsVal:
                        CMat.Reset()
                    loss_val = 0
                    # Loop through all batches of examples
                    for batchCounter in range(math.ceil(float(dataset_sizes[1])/float(self.batch_size))):
                        # Grab an image and label batch from the validation set
                        image_batch, lbl_batch, *args = sess.run(tf_input_getBatch_val)
                        # Built feed dict based on list of labels
                        feed_dict = {input_lbl: np.expand_dims(lbl_batch[:,i],1) for i,input_lbl in enumerate(input_lbls)}
                        feed_dict.update({input_images:    image_batch})
                        feed_dict.update({tf_is_training: False})

                        # Perform evaluation step
                        lbl_batch_predict, loss_out = sess.run(
                                                            [output_logits, loss],
                                                            feed_dict=feed_dict
                                                        )
                        # Store results from evaluation step
                        # Calculate confusion matrix for all outputs
                        for i,CMat in enumerate(CMatsVal):
                            lbl_idx = lbl_batch[:,i] #np.squeeze(np.argmax(lbl_batch, axis=1))
                            lbl_idx_predict = np.squeeze(np.argmax(lbl_batch_predict[i], axis=3))
                            CMat.Append(lbl_idx,lbl_idx_predict)
                        loss_val += loss_out
                        # Show progress in stdout
                        self._show_progress('VA', epoch_n, batchCounter, math.ceil(float(dataset_sizes[1])/float(self.batch_size))-1, loss_out, CMatsVal)
                    
                    # Print confusion matrix for each output
                    print('\n')
                    for i, CMat in enumerate(CMatsVal):
                        CMat.Save(os.path.join(self.dir_logs, 'ConfMat_Val_output' + '{:02d}'.format(i) + '.csv'),'csv') # Save confusion matrix
                        print(CMat)

                    # Create fill in summaries for validation log
                    feed_dict_summary = {tf_acc: CMat.accuracy() for tf_acc, CMat in zip(tf_accuracies,CMatsVal)}
                    feed_dict_summary.update({tf_rec: [0 if np.isnan(x) else x for x in CMat.recall()] for tf_rec, CMat in zip(tf_recalls,CMatsVal)})
                    feed_dict_summary.update({tf_pre: [0 if np.isnan(x) else x for x in CMat.precision()] for tf_pre, CMat in zip(tf_precisions,CMatsVal)})
                    feed_dict_summary.update({tf_f1:  [0 if np.isnan(x) else x for x in CMat.fScore(beta=1)] for tf_f1, CMat in zip(tf_F1s,CMatsVal)})
                    loss_val = loss_val/batchCounter
                    feed_dict_summary.update({tf_loss: loss_val})
                    summaries = sess.run(tf_summary_op, 
                                        feed_dict=feed_dict_summary)
                    # Write summaries to validation log
                    writer_validation.add_summary(summaries, global_step=epoch_n)
                
                # Save checkpoint for this epoch
                if epoch_n % 1 == 0:
                    saver.save(sess,os.path.join(self.dir_checkpoints, self.model + '.model'), global_step=epoch_n)
Example #12
0
def main():
    # parse arguments
    args = parse_args()
    if args is None:
        exit()

    # Set tensorflow log level (0 = all, 1 >= no INFO, 2 >= no WARNING, 3 >= no ERROR)
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'  # or any {'0', '1', '2', '3'}

    # Make dataset
    if args.make_dataset:
        utils.show_message('Fetching raw dataset: {0}'.format(args.dataset),
                           lvl=1)
        dataset_manager.make_dataset(args.dataset)

    # Make dataset
    if args.process_dataset:
        utils.show_message('Processing raw dataset: {0}'.format(args.dataset),
                           lvl=1)
        dataset_manager.process_dataset(args.dataset)

    # Build and train model
    if args.train_model:
        utils.show_message('Configuring and Training Network: {0}'.format(
            args.model),
                           lvl=1)

        if args.model == 'BasicModel':
            model = BasicModel(dataset=args.dataset, id=args.id)
            model.train(hparams_string=args.hparams)

        elif args.model == 'LogReg_example':
            model = logreg_example(dataset=args.dataset, id=args.id)
            model.train(hparams_string=args.hparams)

        elif args.model == 'VGG':
            model = VGG(dataset=args.dataset, id=args.id)
            model.train(hparams_string=args.hparams)

        elif args.model == 'ResNet':
            model = ResNet(dataset=args.dataset, id=args.id)
            model.train(hparams_string=args.hparams,
                        preprocessing_params=args.preprocess,
                        preprocessing_eval_params=args.preprocess_eval)

    # Evaluate model
    if args.evaluate_model:
        utils.show_message('Evaluating Network: {0}'.format(args.model), lvl=1)

        if args.model == 'BasicModel':
            model = BasicModel(dataset=args.dataset, id=args.id)
            model.evaluate(hparams_string=args.hparams)

        elif args.model == 'LogReg_example':
            model = logreg_example(dataset=args.dataset, id=args.id)
            model.evaluate(hparams_string=args.hparams)

        elif args.model == 'ResNet':
            model = ResNet(dataset=args.dataset, id=args.id)
            model.evaluate(hparams_string=args.hparams,
                           preprocessing_params=args.preprocess)

    # Visualize results
    if args.visualize:
        print('Visualizing Results')
        #################################
        ####### To Be Implemented #######
        #################################
        raise NotImplementedError(
            'Visualization has not yet been implemented.')
Example #13
0
    def train(self, hparams_string):
        """ Run training of the network
        Args:
    
        Returns:
        """

        args_train = hparams_parser_train(hparams_string)
        self.batch_size = args_train.batch_size
        self.epoch_max = args_train.epoch_max
        self.use_imagenet = args_train.use_imagenet
        self.model_version = args_train.model_version

        utils.save_model_configuration(args_train, self.dir_base)

        # Use dataset for loading in datasamples from .tfrecord (https://www.tensorflow.org/programmers_guide/datasets#consuming_tfrecord_data)
        # The iterator will get a new batch from the dataset each time a sess.run() is executed on the graph.
        dataset = tf.data.TFRecordDataset(self.dateset_filenames)
        dataset = dataset.map(util_data.decode_image)  # decoding the tfrecord
        dataset = dataset.map(
            self._preProcessData)  # potential local preprocessing of data
        dataset = dataset.shuffle(buffer_size=10000, seed=None)
        dataset = dataset.batch(batch_size=self.batch_size)
        iterator = dataset.make_initializable_iterator()
        input_getBatch = iterator.get_next()

        input_images = tf.placeholder(dtype=tf.float32,
                                      shape=[None] + self.image_dims,
                                      name='input_images')
        input_lbls = tf.placeholder(dtype=tf.float32,
                                    shape=[None, self.lbls_dim],
                                    name='input_lbls')

        # define model, loss, optimizer and summaries.
        output_logits = self._create_inference(input_images)
        loss = self._create_losses(output_logits, input_lbls)
        optimizer_op = self._create_optimizer(loss)
        summary_op = self._create_summaries(loss)

        # show network architecture
        utils.show_all_variables()

        if self.use_imagenet:
            if self.model_version == 'VGG16':
                path_imagenet_ckpt = os.path.join(self.dir_checkpoints,
                                                  'vgg_16.ckpt')
                if not tf.gfile.Exists(path_imagenet_ckpt):
                    url_imagenet_model = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz"
                    utils.download_and_uncompress_tarball(
                        url_imagenet_model, self.dir_checkpoints)

                variables_to_restore = slim.get_model_variables('vgg_16')
                variables_to_restore = variables_to_restore[:
                                                            -6]  # ignore fc layers
                init_fn = slim.assign_from_checkpoint_fn(
                    path_imagenet_ckpt, variables_to_restore)

            elif self.model_version == 'VGG19':
                path_imagenet_ckpt = os.path.join(self.dir_checkpoints,
                                                  'vgg_19.ckpt')
                if not tf.gfile.Exists(path_imagenet_ckpt):
                    url_imagenet_model = "http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz"
                    utils.download_and_uncompress_tarball(
                        url_imagenet_model, self.dir_checkpoints)

                variables_to_restore = slim.get_model_variables('vgg_19')
                variables_to_restore = variables_to_restore[:
                                                            -6]  # ignore fc layers
                init_fn = slim.assign_from_checkpoint_fn(
                    path_imagenet_ckpt, variables_to_restore)

        with tf.Session() as sess:

            # Initialize all model Variables.
            sess.run(tf.global_variables_initializer())

            if self.use_imagenet:
                init_fn(sess)

            # Create Saver object for loading and storing checkpoints
            saver = tf.train.Saver()

            # Create Writer object for storing graph and summaries for TensorBoard
            writer = tf.summary.FileWriter(self.dir_logs, sess.graph)

            # Reload Tensor values from latest checkpoint
            ckpt = tf.train.get_checkpoint_state(self.dir_checkpoints)
            epoch_start = 0
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
                epoch_start = int(ckpt_name.split('-')[-1])

            interationCnt = 0
            # Do training loops
            for epoch_n in range(epoch_start, self.epoch_max):

                # Initiate or Re-initiate iterator
                sess.run(iterator.initializer)

                # Test model output before any training
                # if epoch_n == 0:
                #     summary_loss = sess.run(summary_op)
                #     writer.add_summary(summary_loss, global_step=-1)

                utils.show_message(
                    'Running training epoch no: {0}'.format(epoch_n))
                while True:
                    try:
                        image_batch, lbl_batch = sess.run(input_getBatch)
                        _, summary_loss = sess.run([optimizer_op, summary_op],
                                                   feed_dict={
                                                       input_images:
                                                       image_batch,
                                                       input_lbls: lbl_batch
                                                   })

                        writer.add_summary(summary_loss,
                                           global_step=interationCnt)
                        counter = +1

                    except tf.errors.OutOfRangeError:
                        # Do some evaluation after each Epoch
                        break

                if epoch_n % 1 == 0:
                    saver.save(sess,
                               os.path.join(self.dir_checkpoints,
                                            self.model + '.model'),
                               global_step=epoch_n)
Example #14
0
    def evaluate(self, hparams_string):
        """ Run experiments to evaluate the performance of the model
        Args:
    
        Returns:
        """

        args_train = utils.load_model_configuration(self.dir_base)
        args_evaluate = hparams_parser_evaluate(hparams_string)

        self.unstructured_noise_dim = args_train.unstructured_noise_dim
        self.info_var_dim = args_train.info_var_dim

        summary_samples = args_evaluate.summary_samples
        num_samples = args_evaluate.num_samples
        chunk_size = args_evaluate.chunk_size

        # setup inference
        input_lbls = tf.placeholder(dtype=tf.float32,
                                    shape=[None, self.lbls_dim],
                                    name='input_test_lbls')
        input_noise = tf.placeholder(dtype=tf.float32,
                                     shape=[None, self.unstructured_noise_dim],
                                     name='input_test_noise')
        input_info_noise = tf.placeholder(dtype=tf.float32,
                                          shape=[None, self.info_var_dim],
                                          name='input_test_noise')

        generated_images = self.__generator(input_noise,
                                            input_lbls,
                                            input_info_noise,
                                            is_training=False)
        logits_source, logits_class, _ = self.__discriminator(
            generated_images, is_training=False)

        summary_images = tf.placeholder(
            dtype=tf.float32,
            shape=[self.lbls_dim * summary_samples] + self.image_dims,
            name='summary_images')
        eval_summary_img = tfgan.eval.image_reshaper(tf.concat(
            summary_images, 0),
                                                     num_cols=self.lbls_dim)

        # select check point file
        ckpt = tf.train.get_checkpoint_state(self.dir_checkpoints)

        if args_evaluate.epoch_no == None:
            checkpoint_path = ckpt.model_checkpoint_path
            dir_results_eval = os.path.join(self.dir_results, 'Evaluation')
        else:
            all_checkpoint_paths = ckpt.all_model_checkpoint_paths[:]
            suffix_match = '-' + str(args_evaluate.epoch_no)
            ckpt_match = [
                f for f in all_checkpoint_paths if f.endswith(suffix_match)
            ]

            if ckpt_match:
                checkpoint_path = ckpt_match[0]
                dir_results_eval = os.path.join(
                    self.dir_results,
                    'Evaluation_' + str(args_evaluate.epoch_no))

            else:
                checkpoint_path = ckpt.model_checkpoint_path
                dir_results_eval = os.path.join(self.dir_results, 'Evaluation')

        # Generate folders for evaluation samples
        utils.checkfolder(dir_results_eval)

        with tf.Session() as sess:
            # Initialize all model Variables.
            sess.run(tf.global_variables_initializer())

            # Create Saver object for loading and storing checkpoints
            saver = tf.train.Saver()

            # Reload Tensor values from latest or specified checkpoint
            utils.show_message(
                'Restoring model parameters from: {0}'.format(checkpoint_path))
            saver.restore(sess, checkpoint_path)

            ## Generate summary image
            if args_evaluate.gen_summary:
                np.random.seed(seed=0)
                eval_unstructured_noise = np.random.uniform(
                    low=-1.0,
                    high=1,
                    size=[summary_samples, self.unstructured_noise_dim])
                eval_unstructured_noise = np.repeat(eval_unstructured_noise,
                                                    self.lbls_dim,
                                                    axis=0)

                eval_info_noise = np.random.uniform(
                    low=-1.0,
                    high=1,
                    size=[summary_samples, self.info_var_dim])
                eval_info_noise = np.repeat(eval_info_noise,
                                            self.lbls_dim,
                                            axis=0)

                # Create one-hot encoded label for each class and tile along axis 1
                eval_lbls = np.eye(self.lbls_dim)
                eval_lbls = np.tile(eval_lbls, (summary_samples, 1))

                summary_imgs = sess.run(generated_images,
                                        feed_dict={
                                            input_noise:
                                            eval_unstructured_noise,
                                            input_info_noise: eval_info_noise,
                                            input_lbls: eval_lbls
                                        })

                summary_img = sess.run(
                    eval_summary_img, feed_dict={summary_images: summary_imgs})

                utils.save_image_local(
                    summary_img, self.dir_results,
                    'evalSummary_{0}'.format(args_evaluate.epoch_no))

            ## Generate samples for each class
            if args_evaluate.gen_samples:
                # Seed RNG to reproduce results
                np.random.seed(seed=0)
                eval_noise = np.random.uniform(
                    low=-1.0,
                    high=1.0,
                    size=[num_samples, self.unstructured_noise_dim])
                eval_noise_info = np.random.uniform(
                    low=-1.0, high=1.0, size=[num_samples, self.info_var_dim])

                chunks_eval_noise = [
                    eval_noise[i:i + chunk_size]
                    for i in range(0, num_samples, chunk_size)
                ]
                chunks_eval_noise_info = [
                    eval_noise_info[i:i + chunk_size]
                    for i in range(0, num_samples, chunk_size)
                ]

                for idx_class in range(self.lbls_dim):
                    utils.show_message('Generating samples for class ' +
                                       str(idx_class))

                    dir_results_eval_samples = os.path.join(
                        dir_results_eval, 'Samples', str(idx_class))
                    utils.checkfolder(dir_results_eval_samples)

                    for idx_chunk in range(
                            int(np.ceil(num_samples / chunk_size))):
                        eval_lbls = np.zeros(shape=[
                            len(chunks_eval_noise[idx_chunk]), self.lbls_dim
                        ])
                        eval_lbls[:, idx_class] = 1

                        eval_images, logits_s, logits_c = sess.run(
                            [generated_images, logits_source, logits_class],
                            feed_dict={
                                input_noise: chunks_eval_noise[idx_chunk],
                                input_info_noise:
                                chunks_eval_noise_info[idx_chunk],
                                input_lbls: eval_lbls
                            })

                        #print(logits_s)

                        f = open(dir_results_eval_samples + '/scores.txt', 'a')
                        for idx_sample in range(
                                len(chunks_eval_noise[idx_chunk])):
                            utils.save_image_local(
                                eval_images[idx_sample, :, :, :],
                                dir_results_eval_samples,
                                'Sample_{0}'.format(idx_sample +
                                                    idx_chunk * chunk_size))
                            f.write('Sample_{0},{1},{2}\n'.format(
                                idx_sample + idx_chunk * chunk_size,
                                logits_s[idx_sample], logits_c[idx_sample, :]))
                        f.close()
Example #15
0
    def train(self, hparams_string):
        """ Run training of the network
        Args:
    
        Returns:
        """
        args_train = hparams_parser_train(hparams_string)

        self.batch_size = args_train.batch_size
        self.epoch_max = args_train.epoch_max

        self.unstructured_noise_dim = args_train.unstructured_noise_dim
        self.info_var_dim = args_train.info_var_dim
        self.n_testsamples = args_train.n_testsamples

        self.d_learning_rate = args_train.lr_discriminator
        self.g_learning_rate = args_train.lr_generator
        self.d_iter = args_train.d_iter

        self.gp_lambda = args_train.gp_lambda
        self.class_scale_d = args_train.class_scale_d
        self.class_scale_g = args_train.class_scale_g

        self.info_scale_d = args_train.info_scale_d
        self.info_scale_g = args_train.info_scale_g

        self.backup_frequency = args_train.backup_frequency

        self.shards_idx_test = args_train.shards_idx_test

        utils.save_model_configuration(args_train, self.dir_base)

        # Create folder for saving training results
        dir_results_train = os.path.join(self.dir_results, 'Training')
        utils.checkfolder(dir_results_train)

        for class_n in range(self.lbls_dim):
            dir_result_train_class = dir_results_train + '/' + str(
                class_n).zfill(2)
            utils.checkfolder(dir_result_train_class)

        if 0 in self.shards_idx_test:
            dataset_filenames = self.dataset_filenames
        else:
            self.shards_idx_test = np.subtract(self.shards_idx_test, 1)
            shards_idx_training = np.delete(range(len(self.dataset_filenames)),
                                            self.shards_idx_test)
            dataset_filenames = [
                self.dataset_filenames[i] for i in shards_idx_training
            ]

            utils.show_message('Training Data:')
            print(dataset_filenames)

        # Setup preprocessing pipeline
        preprocessing = preprocess_factory.preprocess_factory()

        # Dataset specific preprocessing
        if self.dataset == 'MNIST':
            pass

        elif self.dataset == 'PSD_Nonsegmented':
            pass

        elif self.dataset == 'PSD_Segmented':
            preprocessing.prep_pipe_from_string(
                "pad_to_size;{'height': 566, 'width': 566, 'constant': -1.0};random_rotation;{};crop_to_size;{'height': 400, 'width': 400};resize;{'height': 128, 'width': 128}"
            )

        # Use dataset for loading in datasamples from .tfrecord (https://www.tensorflow.org/programmers_guide/datasets#consuming_tfrecord_data)
        # The iterator will get a new batch from the dataset each time a sess.run() is executed on the graph.
        dataset = tf.data.TFRecordDataset(dataset_filenames)
        dataset = dataset.shuffle(buffer_size=10000, seed=None)
        dataset = dataset.map(util_data.decode_image)  # decoding the tfrecord
        dataset = dataset.map(
            self._genLatentCodes)  # preprocess data and perform augmentation
        dataset = dataset.map(preprocessing.pipe)
        dataset = dataset.batch(batch_size=self.batch_size)
        iterator = dataset.make_initializable_iterator()
        input_getBatch = iterator.get_next()

        # Create input placeholders
        input_images = tf.placeholder(dtype=tf.float32,
                                      shape=[self.batch_size] +
                                      self.image_dims,
                                      name='input_images')
        input_lbls = tf.placeholder(dtype=tf.float32,
                                    shape=[None, self.lbls_dim],
                                    name='input_lbls')
        input_unstructured_noise = tf.placeholder(
            dtype=tf.float32,
            shape=[None, self.unstructured_noise_dim],
            name='input_unstructured_noise')
        input_info_noise = tf.placeholder(dtype=tf.float32,
                                          shape=[None, self.info_var_dim],
                                          name='input_info_noise')
        input_test_lbls = tf.placeholder(dtype=tf.float32,
                                         shape=[
                                             self.n_testsamples**np.minimum(
                                                 2, self.info_var_dim),
                                             self.lbls_dim
                                         ],
                                         name='input_test_lbls')
        input_test_noise = tf.placeholder(dtype=tf.float32,
                                          shape=[
                                              self.n_testsamples**np.minimum(
                                                  2, self.info_var_dim),
                                              self.unstructured_noise_dim
                                          ],
                                          name='input_test_noise')
        input_test_info_noise = tf.placeholder(
            dtype=tf.float32,
            shape=[
                self.n_testsamples**np.minimum(2, self.info_var_dim),
                self.info_var_dim
            ],
            name='input_test_info_noise')

        # Define model, loss, optimizer and summaries.
        logits_source, logits_class, logits_info, artificial_images = self._create_inference(
            input_images, input_lbls, input_unstructured_noise,
            input_info_noise)
        loss_discriminator, loss_generator = self._create_losses(
            logits_source, logits_class, logits_info, artificial_images,
            input_lbls, input_info_noise)
        train_op_discriminator, train_op_generator = self._create_optimizer(
            loss_discriminator, loss_generator)
        summary_op_dloss, summary_op_gloss, summary_op_img, summary_img = self._create_summaries(
            loss_discriminator, loss_generator, input_test_noise,
            input_test_lbls, input_test_info_noise)

        # show network architecture
        utils.show_all_variables()

        # create constant test variable to inspect changes in the model
        self.combinations_info_var = itertools.combinations(
            range(self.info_var_dim), 2)
        self.combinations_info_var = list(self.combinations_info_var)

        test_noise, test_info = self._genTestInput()

        with tf.Session() as sess:
            # Initialize all model Variables.
            sess.run(tf.global_variables_initializer())

            # Create Saver object for loading and storing checkpoints
            saver = tf.train.Saver(max_to_keep=500)

            # Create Writer object for storing graph and summaries for TensorBoard
            writer = tf.summary.FileWriter(self.dir_logs, sess.graph)

            # Reload Tensor values from latest checkpoint
            ckpt = tf.train.get_checkpoint_state(self.dir_checkpoints)
            epoch_start = 0
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
                epoch_start = int(ckpt_name.split('-')[-1]) + 1

            interationCnt = 0
            for epoch_n in range(epoch_start, self.epoch_max):

                # Test model output before any training
                if epoch_n == 0:
                    for class_n in range(self.lbls_dim):
                        test_lbls = np.zeros([
                            self.n_testsamples**np.minimum(
                                2, self.info_var_dim), self.lbls_dim
                        ])
                        test_lbls[:, class_n] = 1

                        for i in range(len(test_info)):
                            test_info_combi = test_info[i]

                            _, summaryImg = sess.run(
                                [summary_op_img, summary_img],
                                feed_dict={
                                    input_test_noise: test_noise,
                                    input_test_lbls: test_lbls,
                                    input_test_info_noise: test_info_combi
                                })

                            dir_result_train_class = dir_results_train + '/' + str(
                                class_n).zfill(2)
                            if self.info_var_dim < 2:
                                filename_temp = 'Epoch_{0}_LatentVar_1'.format(
                                    epoch_n)
                            else:
                                filename_temp = 'Epoch_{0}_LatentCombi_{1}_{2}'.format(
                                    epoch_n, self.combinations_info_var[i][0],
                                    self.combinations_info_var[i][1])

                            # writer.add_summary(summaryImg_tb, global_step=epoch_n)
                            utils.save_image_local(summaryImg,
                                                   dir_result_train_class,
                                                   filename_temp)

                # Initiate or Re-initiate iterator
                sess.run(iterator.initializer)

                ### ----------------------------------------------------------
                ### Update model
                if (np.mod(epoch_n, 100) == 0) or epoch_n < 25:
                    utils.show_message(
                        'Running training epoch no: {0}'.format(epoch_n))

                while True:
                    # for idx in range(0, num_batches):
                    try:
                        for _ in range(self.d_iter):
                            image_batch, lbl_batch, unst_noise_batch, info_noise_batch = sess.run(
                                input_getBatch)

                            if (image_batch.shape[0] != self.batch_size):
                                raise OutOfRangeError

                            _, summary_dloss = sess.run(
                                [train_op_discriminator, summary_op_dloss],
                                feed_dict={
                                    input_images: image_batch,
                                    input_lbls: lbl_batch,
                                    input_unstructured_noise: unst_noise_batch,
                                    input_info_noise: info_noise_batch
                                })

                        writer.add_summary(summary_dloss,
                                           global_step=interationCnt)

                        _, summary_gloss = sess.run(
                            [train_op_generator, summary_op_gloss],
                            feed_dict={
                                input_images: image_batch,
                                input_lbls: lbl_batch,
                                input_unstructured_noise: unst_noise_batch,
                                input_info_noise: info_noise_batch
                            })

                        writer.add_summary(summary_gloss,
                                           global_step=interationCnt)
                        interationCnt += 1

                    except (tf.errors.OutOfRangeError, OutOfRangeError):
                        # Test current model
                        for class_n in range(self.lbls_dim):
                            test_lbls = np.zeros([
                                self.n_testsamples**np.minimum(
                                    2, self.info_var_dim), self.lbls_dim
                            ])
                            test_lbls[:, class_n] = 1

                            for i in range(len(test_info)):
                                test_info_combi = test_info[i]

                                _, summaryImg = sess.run(
                                    [summary_op_img, summary_img],
                                    feed_dict={
                                        input_test_noise: test_noise,
                                        input_test_lbls: test_lbls,
                                        input_test_info_noise: test_info_combi
                                    })

                                dir_result_train_class = dir_results_train + '/' + str(
                                    class_n).zfill(2)
                                if self.info_var_dim < 2:
                                    filename_temp = 'Epoch_{0}_LatentVar_1'.format(
                                        epoch_n)
                                else:
                                    filename_temp = 'Epoch_{0}_LatentCombi_{1}_{2}'.format(
                                        epoch_n,
                                        self.combinations_info_var[i][0],
                                        self.combinations_info_var[i][1])

                                # writer.add_summary(summaryImg_tb, global_step=epoch_n)
                                utils.save_image_local(summaryImg,
                                                       dir_result_train_class,
                                                       filename_temp)

                        break

                # Save model variables to checkpoint
                if (epoch_n + 1) % self.backup_frequency == 0:
                    saver.save(sess,
                               os.path.join(self.dir_checkpoints,
                                            self.model + '.model'),
                               global_step=epoch_n)
Example #16
0
def process(dataset_part):
    """Runs the download and conversion operation.

    Args:
      dataset_dir: The dataset directory where the dataset is stored.
    """
    if dataset_part == 'Nonsegmented':
        _dir_raw = _DIR_RAW_NONSEGMENTED
        _dir_processed = _DIR_PROCESSED_NONSEGMENTED
        setname = 'Nonsegmented'
        #training_filename = _get_output_filename(_DIR_PROCESSED_NONSEGMENTED, 'train')
        # testing_filename = _get_output_filename(_DIR_PROCESSED_NONSEGMENTED, 'test')
    else:
        _dir_raw = _DIR_RAW_SEGMENTED
        _dir_processed = _DIR_PROCESSED_SEGMENTED
        setname = 'Segmented' 
        #training_filename = _get_output_filename(_DIR_PROCESSED_SEGMENTED, 'train')
        # testing_filename = _get_output_filename(_DIR_PROCESSED_SEGMENTED, 'test')

    #if tf.gfile.Exists(training_filename): #and tf.gfile.Exists(testing_filename):
    #    print('Dataset files already exist. Exiting without re-creating them.')
    #    return


    if _EXCLUDED_GRASSES:
        exclude_list = ['Black-grass', 'Common wheat', 'Loose Silky-bent']
    else:
        exclude_list = []

    # First, process training data:

    data_filename = os.path.join(_dir_raw)
    archive = zipfile.ZipFile(data_filename)
    archive.extractall(_dir_processed)
    filenames, class_names = _get_filenames_and_classes(_dir_processed, [setname], exclude_list)

    class_dict = dict(zip(class_names, range(len(class_names))))
    utils.save_dict(class_dict, _dir_processed, 'class_dict.json')

    for shard_n in range(_NUM_SHARDS):
        utils.show_message('Processing shard %d/%d' % (shard_n+1,_NUM_SHARDS))
        tf_filename = _get_output_filename(_dir_processed, shard_n)

        with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
            _convert_to_tfrecord(filenames[shard_n], class_dict, tfrecord_writer)

    tmp_dir = os.path.join(_dir_processed, setname)
    tf.gfile.DeleteRecursively(tmp_dir)

    # # First, process test data:
    # with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
    #     data_filename = os.path.join(_dir_raw)
    #     archive = zipfile.ZipFile(data_filename)
    #     archive.extractall(_dir_processed)
    #     # filenames, class_names = _get_filenames_and_classes(_dir_processed, [setname, 'test'], exclude_list)
    #     class_dict = dict(zip(class_names, range(len(class_names))))

    #     _convert_to_tfrecord(filenames, class_dict, tfrecord_writer)

    #     tmp_dir = os.path.join(_dir_processed, setname)
    #     tf.gfile.DeleteRecursively(tmp_dir)

    print('\nFinished converting the PSD %s dataset!' % setname)
Example #17
0
    def train(self, hparams_string):
        """ Run training of the network
        Args:
    
        Returns:
        """
        args_train = hparams_parser_train(hparams_string)

        self.batch_size = args_train.batch_size
        self.epoch_max = args_train.epoch_max

        utils.save_model_configuration(args_train, self.dir_base)

        # Use dataset for loading in datasamples from .tfrecord (https://www.tensorflow.org/programmers_guide/datasets#consuming_tfrecord_data)
        # The iterator will get a new batch from the dataset each time a sess.run() is executed on the graph.
        dataset = tf.data.TFRecordDataset(self.dateset_filenames)
        dataset = dataset.map(util_data.decode_image)  # decoding the tfrecord
        dataset = dataset.map(
            self._preProcessData)  # potential local preprocessing of data
        dataset = dataset.shuffle(buffer_size=10000, seed=None)
        dataset = dataset.batch(batch_size=self.batch_size)
        iterator = dataset.make_initializable_iterator()
        inputs = iterator.get_next()

        # depends on self._preProcessData
        [in_image, in_label] = inputs

        # show network architecture
        utils.show_all_variables()

        # define model, loss, optimizer and summaries.
        outputs = self._create_inference(in_image)
        loss = self._create_losses(outputs, in_label)
        optimizer_op = self._create_optimizer(loss)
        summary_op = self._create_summaries(loss)

        with tf.Session() as sess:

            # Initialize all model Variables.
            sess.run(tf.global_variables_initializer())

            # Create Saver object for loading and storing checkpoints
            saver = tf.train.Saver()

            # Create Writer object for storing graph and summaries for TensorBoard
            writer = tf.summary.FileWriter(self.dir_logs, sess.graph)

            # Reload Tensor values from latest checkpoint
            ckpt = tf.train.get_checkpoint_state(self.dir_checkpoints)
            epoch_start = 0
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
                epoch_start = int(ckpt_name.split('-')[-1])

            interationCnt = 0
            # Do training loops
            for epoch_n in range(epoch_start, self.epoch_max):

                # Initiate or Re-initiate iterator
                sess.run(iterator.initializer)

                # Test model output before any training
                if epoch_n == 0:
                    summary = sess.run(summary_op)
                    writer.add_summary(summary, global_step=-1)

                utils.show_message(
                    'Running training epoch no: {0}'.format(epoch_n))
                while True:
                    try:
                        _, summary = sess.run([optimizer_op, summary_op])

                        writer.add_summary(summary, global_step=interationCnt)
                        counter = +1

                    except tf.errors.OutOfRangeError:
                        # Do some evaluation after each Epoch
                        break

                if epoch_n % 1 == 0:
                    saver.save(sess,
                               os.path.join(self.dir_checkpoints,
                                            self.model + '.model'),
                               global_step=epoch_n)
Example #18
0
    def train(self, hparams_string):
        """ Run training of the network
        Args:
    
        Returns:
        """
        args_train = hparams_parser_train(hparams_string)

        self.batch_size = args_train.batch_size
        self.epoch_max = args_train.epoch_max

        self.unstructured_noise_dim = args_train.unstructured_noise_dim

        self.d_learning_rate = args_train.lr_discriminator
        self.g_learning_rate = args_train.lr_generator

        self.d_iter = args_train.d_iter
        self.n_testsamples = args_train.n_testsamples

        self.gp_lambda = args_train.gp_lambda
        self.class_scale_d = args_train.class_scale_d
        self.class_scale_g = args_train.class_scale_g

        self.backup_frequency = args_train.backup_frequency

        utils.save_model_configuration(args_train, self.dir_base)

        # Use dataset for loading in datasamples from .tfrecord (https://www.tensorflow.org/programmers_guide/datasets#consuming_tfrecord_data)
        # The iterator will get a new batch from the dataset each time a sess.run() is executed on the graph.
        dataset = tf.data.TFRecordDataset(self.dateset_filenames)
        dataset = dataset.map(util_data.decode_image)  # decoding the tfrecord
        dataset = dataset.map(self._genLatentCodes)
        dataset = dataset.shuffle(buffer_size=10000, seed=None)
        dataset = dataset.batch(batch_size=self.batch_size)
        iterator = dataset.make_initializable_iterator()
        input_getBatch = iterator.get_next()

        # Create input placeholders
        input_images = tf.placeholder(dtype=tf.float32,
                                      shape=[self.batch_size] +
                                      self.image_dims,
                                      name='input_images')
        input_lbls = tf.placeholder(dtype=tf.float32,
                                    shape=[None, self.lbls_dim],
                                    name='input_lbls')
        input_unstructured_noise = tf.placeholder(
            dtype=tf.float32,
            shape=[None, self.unstructured_noise_dim],
            name='input_unstructured_noise')
        input_test_lbls = tf.placeholder(
            dtype=tf.float32,
            shape=[self.n_testsamples * self.lbls_dim, self.lbls_dim],
            name='input_test_lbls')
        input_test_noise = tf.placeholder(
            dtype=tf.float32,
            shape=[
                self.n_testsamples * self.lbls_dim, self.unstructured_noise_dim
            ],
            name='input_test_noise')

        images = input_images

        # Define model, loss, optimizer and summaries.
        logits_source, logits_class, artificial_images = self._create_inference(
            images, input_lbls, input_unstructured_noise)
        loss_discriminator, loss_generator = self._create_losses(
            logits_source, logits_class, artificial_images, input_lbls)
        train_op_discriminator, train_op_generator = self._create_optimizer(
            loss_discriminator, loss_generator)
        summary_op_dloss, summary_op_gloss, summary_op_img, summary_img = self._create_summaries(
            loss_discriminator, loss_generator, input_test_noise,
            input_test_lbls)

        # show network architecture
        utils.show_all_variables()

        # create constant test variable to inspect changes in the model
        test_noise, test_lbls = self._genTestInput(
            self.lbls_dim, n_samples=self.n_testsamples)

        dir_results_train = os.path.join(self.dir_results, 'Training')
        utils.checkfolder(dir_results_train)

        with tf.Session() as sess:
            # Initialize all model Variables.
            sess.run(tf.global_variables_initializer())

            # Create Saver object for loading and storing checkpoints
            saver = tf.train.Saver(max_to_keep=100)

            # Create Writer object for storing graph and summaries for TensorBoard
            writer = tf.summary.FileWriter(self.dir_logs, sess.graph)

            # Reload Tensor values from latest checkpoint
            ckpt = tf.train.get_checkpoint_state(self.dir_checkpoints)
            epoch_start = 0
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
                epoch_start = int(ckpt_name.split('-')[-1]) + 1

            interationCnt = 0
            for epoch_n in range(epoch_start, self.epoch_max):

                # Test model output before any training
                if epoch_n == 0:
                    summaryImg_tb, summaryImg = sess.run(
                        [summary_op_img, summary_img],
                        feed_dict={
                            input_test_noise: test_noise,
                            input_test_lbls: test_lbls
                        })

                    writer.add_summary(summaryImg_tb, global_step=-1)
                    utils.save_image_local(summaryImg, dir_results_train,
                                           'Epoch_' + str(-1))

                # Initiate or Re-initiate iterator
                sess.run(iterator.initializer)

                ### ----------------------------------------------------------
                ### Update model
                utils.show_message(
                    'Running training epoch no: {0}'.format(epoch_n))
                while True:
                    # for idx in range(0, num_batches):
                    try:
                        for _ in range(self.d_iter):
                            image_batch, lbl_batch, unst_noise_batch = sess.run(
                                input_getBatch)

                            if (image_batch.shape[0] != self.batch_size):
                                raise OutOfRangeError

                            _, summary_dloss = sess.run(
                                [train_op_discriminator, summary_op_dloss],
                                feed_dict={
                                    input_images: image_batch,
                                    input_lbls: lbl_batch,
                                    input_unstructured_noise: unst_noise_batch
                                })

                        writer.add_summary(summary_dloss,
                                           global_step=interationCnt)

                        _, summary_gloss = sess.run(
                            [train_op_generator, summary_op_gloss],
                            feed_dict={
                                input_images: image_batch,
                                input_lbls: lbl_batch,
                                input_unstructured_noise: unst_noise_batch
                            })

                        writer.add_summary(summary_gloss,
                                           global_step=interationCnt)
                        interationCnt += 1

                    except (tf.errors.OutOfRangeError, OutOfRangeError):
                        # Test current model
                        summaryImg_tb, summaryImg = sess.run(
                            [summary_op_img, summary_img],
                            feed_dict={
                                input_test_noise: test_noise,
                                input_test_lbls: test_lbls
                            })

                        writer.add_summary(summaryImg_tb, global_step=epoch_n)
                        utils.save_image_local(summaryImg, dir_results_train,
                                               'Epoch_' + str(epoch_n))

                        break

                # Save model variables to checkpoint
                if (epoch_n + 1) % self.backup_frequency == 0:
                    saver.save(sess,
                               os.path.join(self.dir_checkpoints, self.model),
                               global_step=epoch_n)