def eval(split, train_dir):
    """ Evaluates the given network on a subset of the data-split (1000 images) and prints the top-5 and top-1 Accuracy
    
    Args: 
      split: Chooses split of flower dataset to test the network
      train_dir: Directory in which checkpoints get loaded
    Returns:
      -
    """
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)

        dataset = dataVisualisation.get_split(split_name=split,
                                              dataset_dir=flowers_data_dir,
                                              label_type="one")
        images, _, labels = load_batch(dataset,
                                       batch_size=100,
                                       height=image_size,
                                       width=image_size)

        logits = my_cnn(images, is_training=False)
        predictions = tf.argmax(logits, 1)

        # Define the metrics:
        names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
            'eval/Accuracy':
            slim.metrics.streaming_accuracy(predictions, labels),
            'eval/Recall@5':
            slim.metrics.streaming_sparse_recall_at_k(logits, labels, 5)
        })

        op = tf.summary.scalar('top5percentError',
                               names_to_values['eval/Recall@5'],
                               collections=[])
        tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
        op = tf.summary.scalar('top1percentError',
                               names_to_values['eval/Accuracy'],
                               collections=[])
        tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

        print('Running evaluation Loop...')
        checkpoint_path = tf.train.latest_checkpoint(train_dir)

        metric_values = slim.evaluation.evaluate_once(
            master='',
            checkpoint_path=checkpoint_path,
            logdir=train_dir,
            num_evals=30,  # TODO Adjust number of evaluations
            summary_op=tf.summary.merge_all(),
            eval_op=list(names_to_updates.values()),
            final_op=list(names_to_values.values()))

        print("Evaluation 'eval' started.")
        names_to_values = dict(zip(names_to_values.keys(), metric_values))
        for name in names_to_values:
            print('%s: %f' % (name, names_to_values[name]))

        return names_to_values['eval/Accuracy'], names_to_values[
            'eval/Recall@5']
示例#2
0
def train(split, train_steps, train_dir):
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO) # showing INFO logs
        
        dataset = dataVisualisation.get_split(split, flowers_data_dir) 
        
        images, _, labels = load_batch(dataset, height=image_size, width=image_size, is_training=True)
        
        one_hot_labels = slim.one_hot_encoding(labels, num_classes)

        # Forward pass with non-flipped images
        logits = my_cnn(images, is_training=True)

        tf.losses.softmax_cross_entropy(one_hot_labels, logits)  
    
        total_loss = tf.losses.get_total_loss()

        # Create some summaries to visualize the training process:
        tf.summary.scalar('losses/Total_Loss', total_loss)
        
        # Learning rate decay
        global_step = variables.get_or_create_global_step()
                
        # Piecewise constant from boundaries and interval values.
        boundaries = [tf.constant(100000, dtype= "int64"), tf.constant(200000, dtype= "int64"), tf.constant(300000, dtype= "int64")]
        values = [0.001, 0.0001, 0.00001, 0.000001] # 20.000 steps lr 0.001 -> 0.0005 # 35000 steps lr 0.0005 -> 0.001 # 40.000 steps lr 0.001 -> 0.0005 # 55.000 lr 0.0005 -> 0.0001
        
        #boundaries = [tf.constant(10000, dtype= "int64"), tf.constant(100000, dtype= "int64"), tf.constant(200000, dtype= "int64"), tf.constant(300000, dtype= "int64")]
        #values = [0.001, 0.0005, 0.0001, 0.00001, 0.000001] # 20.000 steps lr 0.001 -> 0.0005 # 35000 steps lr 0.0005 -> 0.001 # 40.000 steps lr 0.001 -> 0.0005 # 55.000 lr 0.0005 -> 0.0001
        #boundaries = [tf.constant(100000, dtype= "int64"), tf.constant(200000, dtype= "int64"), tf.constant(300000, dtype= "int64"), tf.constant(400000, dtype= "int64")]
        #values = [0.001, 0.0001, 0.00001, 0.000001, 0.000001] # 20.000 steps lr 0.001 -> 0.0005 # 35000 steps lr 0.0005 -> 0.001 # 40.000 steps lr 0.001 -> 0.0005 # 55.000 lr 0.0005 -> 0.0001
        my_learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)

        # Specify the optimizer and create the train op:
        optimizer = tf.train.MomentumOptimizer(learning_rate=my_learning_rate, momentum = 0.9) 
        train_op = slim.learning.create_train_op(total_loss, optimizer)
        
        #TODO Choose init_fn or get_init_fn()
        #checkpoint_path = tf.train.latest_checkpoint(train_dir)
        #init_fn = slim.assign_from_checkpoint_fn(checkpoint_path, slim.get_variables_to_restore())
        
        # Run the training:
        final_loss = slim.learning.train(
            train_op,
            logdir=train_dir,
            log_every_n_steps=1,
            init_fn= get_init_fn(),
            number_of_steps=train_steps,
            global_step = global_step)       

    
    print('Finished training. Last batch loss %f' % final_loss)
示例#3
0
def eval(split, train_dir):
    # This might take a few minutes.
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)
        
        dataset = dataVisualisation.get_split(split, flowers_data_dir) 
        images, _, labels = load_batch(dataset, batch_size = 100, height=image_size, width=image_size) # TODO load_batch really necessary? Processing all! At least adjust batch_size
        
        logits = my_cnn(images, is_training=False)
        predictions = tf.argmax(logits, 1)  
        
                     
        # Define the metrics:
        names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
            'eval/Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
            'eval/Recall@5': slim.metrics.streaming_sparse_recall_at_k(logits, labels, 5)#,
            #'eval/Recall@1000': slim.metrics.streaming_sparse_recall_at_k(logits, labels, 1000) # should be 1
        })
        
        op = tf.summary.scalar('top5percentError', names_to_values['eval/Recall@5'], collections=[])  
        tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
        op = tf.summary.scalar('top1percentError', names_to_values['eval/Accuracy'], collections=[])  
        tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

 #       for name, value in names_to_values.iteritems():
 #           summary_name = 'eval/%s' % name
 #           op = tf.summary.scalar(summary_name, value, collections=[])
 #           op = tf.Print(op, [value], summary_name)
 #           tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

        print('Running evaluation Loop...')
        checkpoint_path = tf.train.latest_checkpoint(train_dir)
        
        #print_tensors_in_checkpoint_file(checkpoint_path, tensor_name ='my_fc_1/weights', all_tensors=False)
        #print_tensors_in_checkpoint_file(checkpoint_path, tensor_name ='resnet_v2_50/block1/unit_1/bottleneck_v2/conv3/weights', all_tensors=False)
        
        metric_values = slim.evaluation.evaluate_once(
            master='',
            checkpoint_path=checkpoint_path,
            logdir=train_dir,
            num_evals = 10,  # TODO Adjust number of evaluations
            summary_op=tf.summary.merge_all(),
            eval_op=list(names_to_updates.values()),
            final_op=list(names_to_values.values()))
        
        print("Evaluation 'eval' started.")
        names_to_values = dict(zip(names_to_values.keys(), metric_values))
        for name in names_to_values:
            print('%s: %f' % (name, names_to_values[name]))
def eval(split, eval_steps, train_dir, fc_after, level): 
    """ Evaluates the given neural network, which is saved in train_dir
    
    Args: 
      split: Chooses split of flower dataset to train the network
      eval_steps: Number of steps to evaluate network
      train_dir: Directory in which checkpoints sare found
      fc_after: where to cut of the network
      level: which class do you want to eval on? (family, genus, species, organ)
      
    Returns:
      -
    """
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)
        
        dataset = dataVisualisation.get_split(split_name = split, dataset_dir = flowers_data_dir, label_type="multiple")
        images, _, label_species, labels_genus, labels_family, labels_organ= load_batch_intermediate(dataset,height=224, width=224, is_training=False, batch_size=100)      
        

                    #print(family, genus, species)
                
        abstraction_levels = {"family":labels_family, "genus":labels_genus, "species":label_species, "organs":labels_organ}  
        levels_length = {"family":124, "genus":516, "species":1000, "organs":7}  
                    
        labels = tf.stack(abstraction_levels.get(level, label_species))
        
        one_hot_labels = slim.one_hot_encoding(labels, levels_length.get(level, 1000)) 
        
        
        
        logits,_ = my_intermediate_cnn(images, is_training=True, fc_after=fc_after,num_classes = levels_length.get(level, 1000))
        logits = slim.softmax(logits)
        #logits = tf.cast(logits, tf.int64)
        predictions = tf.argmax(logits, 1)  
        predictions = tf.cast(predictions, tf.int64)
 
        
                     
        # Define the metrics:
        names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
            'eval/Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
            'eval/Recall@5': slim.metrics.streaming_sparse_recall_at_k(logits, labels, 5)
        })
        
        op = tf.summary.scalar('top5percentError', names_to_values['eval/Recall@5'], collections=[])  
        tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
        op = tf.summary.scalar('top1percentError', names_to_values['eval/Accuracy'], collections=[])  
        tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)


        print('Running evaluation Loop...')
        checkpoint_path = tf.train.latest_checkpoint(train_dir)
         
        metric_values = slim.evaluation.evaluate_once(
            master='',
            checkpoint_path=checkpoint_path,
            logdir=train_dir,
            num_evals = eval_steps,  # TODO Adjust number of evaluations
            summary_op=tf.summary.merge_all(),
            eval_op=list(names_to_updates.values()),
            final_op=list(names_to_values.values()))
        
        print("Evaluation 'eval' started.")
        names_to_values = dict(zip(names_to_values.keys(), metric_values))
        for name in names_to_values:
            print('%s: %f' % (name, names_to_values[name]))
        
        return names_to_values['eval/Accuracy'], names_to_values['eval/Recall@5']
def train(split, train_steps, train_dir, fc_after, level, checkpoints_dir = checkpoints_dir,checkpoint = 'model.ckpt-150000'):
    """ Trains the given neural network and saves the weights and summary information into a new checkpoint file in the train_dir
    
    Args: 
      split: Chooses split of flower dataset to train the network
      train_steps: Number of steps to train network
      train_dir: Directory in which checkpoints should be stored, and old checkpoints get loaded
    Returns:
      -
    """

    
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO) # showing INFO logs
        
        dataset = dataVisualisation.get_split(split_name = split, dataset_dir = flowers_data_dir, label_type="multiple")
        images, _, label_species, labels_genus, labels_family, labels_organ= load_batch_intermediate(dataset,height=224, width=224, is_training=True, batch_size=100)      
        

                    #print(family, genus, species)
                
        abstraction_levels = {"family":labels_family, "genus":labels_genus, "species":label_species, "organs":labels_organ}  
        levels_length = {"family":124, "genus":516, "species":1000, "organs":7}  
                    
        labels = tf.stack(abstraction_levels.get(level, label_species))
        
        one_hot_labels = slim.one_hot_encoding(labels, levels_length.get(level, 1000))
        

        
        
        

        # Forward pass with non-flipped images
        logits,_ = my_intermediate_cnn(images, is_training=True, fc_after=fc_after, num_classes = levels_length.get(level, 1000))
        #print(logits, one_hot_labels)
        #with tf.Session() as sess:
        #    print(sess.run(tf.shape(logits)))

        tf.losses.softmax_cross_entropy(one_hot_labels, logits)      
        total_loss = tf.losses.get_total_loss()
        tf.summary.scalar('losses/Total_Loss', total_loss)
        
        
        # Learning rate decay
        global_step = variables.get_or_create_global_step()
        boundaries = [tf.constant(100000, dtype= "int64"), tf.constant(200000, dtype= "int64"), tf.constant(300000, dtype= "int64")]
        values = [0.001, 0.0001, 0.00001, 0.000001]
        my_learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
        
        #for v in tf.trainable_variables():
        #    print(v)
            
        #for v in slim.get_variables(scope="resnet_v2_50/fc_intermediate/"):
        #    print(v)

        # Specify the optimizer and create the train op:
        optimizer = tf.train.MomentumOptimizer(learning_rate=my_learning_rate, momentum = 0.9) 
        train_op = slim.learning.create_train_op(total_loss=total_loss, optimizer=optimizer, variables_to_train=slim.get_variables(scope="fc_intermediate"))
        
        saver = tf.train.Saver(max_to_keep=1)
 
        
        # Run the training:
        final_loss = slim.learning.train(
            train_op,
            logdir=train_dir,
            log_every_n_steps=50,
            init_fn= get_init_fn(fc_after),
            number_of_steps=train_steps,
            global_step = global_step, 
            saver = saver)    
        
       


    
    print('Finished training. Last batch loss %f' % final_loss)
def train(split, train_steps, train_dir):
    """ Trains the given neural network and saves the weights and summary information into a new checkpoint file in the train_dir
    
    Args: 
      split: Chooses split of flower dataset to train the network
      train_steps: Number of steps to train network
      train_dir: Directory in which checkpoints should be stored, and old checkpoints get loaded
    Returns:
      -
    """

    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)  # showing INFO logs

        dataset = dataVisualisation.get_split(split_name=split,
                                              dataset_dir=flowers_data_dir,
                                              label_type="one")

        images, _, labels = load_batch(dataset,
                                       height=image_size,
                                       width=image_size,
                                       is_training=True)

        one_hot_labels = slim.one_hot_encoding(labels, num_classes)

        # Forward pass with non-flipped images
        logits = my_cnn(images, is_training=True)

        tf.losses.softmax_cross_entropy(one_hot_labels, logits)

        total_loss = tf.losses.get_total_loss()

        # Create some summaries to visualize the training process:
        tf.summary.scalar('losses/Total_Loss', total_loss)

        # Learning rate decay
        global_step = variables.get_or_create_global_step()

        # Piecewise constant from boundaries and interval values.
        boundaries = [
            tf.constant(100000, dtype="int64"),
            tf.constant(200000, dtype="int64"),
            tf.constant(300000, dtype="int64")
        ]
        values = [0.001, 0.0001, 0.00001, 0.000001]

        my_learning_rate = tf.train.piecewise_constant(global_step, boundaries,
                                                       values)

        # Specify the optimizer and create the train op:
        optimizer = tf.train.MomentumOptimizer(learning_rate=my_learning_rate,
                                               momentum=0.9)
        train_op = slim.learning.create_train_op(total_loss, optimizer)

        # Run the training:
        final_loss = slim.learning.train(train_op,
                                         logdir=train_dir,
                                         log_every_n_steps=1,
                                         init_fn=get_init_fn(),
                                         number_of_steps=train_steps,
                                         global_step=global_step)

    print('Finished training. Last batch loss %f' % final_loss)
def final_evaluation(label_dir, dataset_dir, filename="predictions.txt"):
    number_images = 100  #8000
    correct = 0  # TODO REMOVE

    #[ 2.49181414 -0.33259141 -1.27059281 -1.52844727  2.57813239 -1.20948148
    checkpoint_paths = [
        "mydata/resnet_finetuned_plantclef2015_1/model.ckpt-145552",
        "mydata/resnet_finetuned_plantclef2015_1/model.ckpt-145552",
        "mydata/resnet_finetuned_plantclef2015_1/model.ckpt-145552"
    ]

    output_list = []
    labels_list = []

    for index in range(len(checkpoint_paths)):
        with tf.Graph().as_default():
            dataset = dataVisualisation.get_split('train_set3', dataset_dir)

            data_provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                shuffle=False,
                common_queue_capacity=8000,
                common_queue_min=0)

            image_raw, label = data_provider.get(['image', 'label'])
            """                 
            imaget = my_resnet_preprocessing.preprocess_image(image_raw, 224, 224, is_training=False)
            image,augmented_image1,augmented_image2, augmented_image3, augmented_image4,augmented_image5, labels = tf.train.batch([imaget,imaget,imaget,imaget,imaget,imaget, label],  batch_size=1,
                                            num_threads=1,
                                            capacity=12 * 1)
            """

            image, augmented_image1, augmented_image2, augmented_image3, augmented_image4, augmented_image5 = my_resnet_preprocessing.preprocess_for_final_run(
                image_raw, 224, 224)

            image, augmented_image1, augmented_image2, augmented_image3, augmented_image4, augmented_image5, labels = tf.train.batch(
                [
                    image, augmented_image1, augmented_image2,
                    augmented_image3, augmented_image4, augmented_image5, label
                ],
                batch_size=1,
                num_threads=1,
                capacity=2 * 1)

            logits1 = resNetClassifier.my_cnn(image,
                                              is_training=False,
                                              dropout_rate=1)
            logits2 = resNetClassifier.my_cnn(augmented_image1,
                                              is_training=False,
                                              dropout_rate=1)
            logits3 = resNetClassifier.my_cnn(augmented_image2,
                                              is_training=False,
                                              dropout_rate=1)
            logits4 = resNetClassifier.my_cnn(augmented_image3,
                                              is_training=False,
                                              dropout_rate=1)
            logits5 = resNetClassifier.my_cnn(augmented_image4,
                                              is_training=False,
                                              dropout_rate=1)
            logits6 = resNetClassifier.my_cnn(augmented_image5,
                                              is_training=False,
                                              dropout_rate=1)

            total_output = np.empty([number_images * 1, dataset.num_classes])
            total_labels = np.empty([number_images * 1], dtype=np.int32)
            offset = 0

            #init_fn = slim.assign_from_checkpoint_fn(checkpoint_paths[index], slim.get_model_variables())

            with tf.Session() as sess:
                coord = tf.train.Coordinator()
                saver = tf.train.Saver()

                saver.restore(sess, checkpoint_paths[index])
                #init_fn(sess)
                merged = tf.summary.merge_all()
                test_writer = tf.summary.FileWriter(
                    '/home/lolek/FlowerIdentificationM/models/slim/mydata/test/'
                    + '/train')
                visualize_writer = tf.summary.FileWriter(
                    '/home/lolek/FlowerIdentificationM/models/slim/mydata/test/'
                    + '/visualize')

                #for v in tf.trainable_variables():
                #    print(v.name)
                #print_tensors_in_checkpoint_file(checkpoint_paths[index], tensor_name =None, all_tensors=False)
                #var = [v for v in tf.trainable_variables() if v.name == "my_fc_1/weights:0"]
                #print("my_fc_1/weights \n", sess.run(var))
                #print_tensors_in_checkpoint_file(checkpoint_paths[index], tensor_name ='my_fc_1/weights', all_tensors=False)
                #var2 = [v for v in tf.trainable_variables() if v.name == "resnet_v2_50/block1/unit_1/bottleneck_v2/conv3/weights:0"]
                #print("resnet_v2_50/block1/unit_1/bottleneck_v2/conv3/weights \n", sess.run(var2))
                #print_tensors_in_checkpoint_file(checkpoint_paths[index], tensor_name ='resnet_v2_50/block1/unit_1/bottleneck_v2/conv3/weights', all_tensors=False)

                # Visualize Kernels
                tf.get_variable_scope().reuse_variables()
                weights = tf.get_variable("resnet_v2_50/conv1/weights")
                grid = kernel_visualization.put_kernels_on_grid(weights)
                sum1 = tf.summary.image('conv1/kernels', grid, max_outputs=1)
                _, summary1 = sess.run([merged, sum1])
                visualize_writer.add_summary(summary1, 2)

                threads = tf.train.start_queue_runners(sess=sess, coord=coord)
                for i in range(number_images):
                    print('step: %d/%d' % (i + 1, number_images))
                    """
                    image_t1, image_t2 = sess.run([image, augmented_image1])
                    plt.figure()
                    plt.imshow(image_t1[0, :, :, :].astype(np.uint8))
                    plt.title('title')
                    plt.axis('off')
                    plt.show()
                    
                    plt.figure()
                    plt.imshow(image_t2[0, :, :, :].astype(np.uint8))
                    plt.title('title')
                    plt.axis('off')
                    plt.show()
                
                    """
                    sum1 = tf.summary.image('final_eval_whole_image1', image)
                    sum2 = tf.summary.image('final_eval_image_center1',
                                            augmented_image1)
                    sum3 = tf.summary.image('final_eval_top_left1',
                                            augmented_image2)
                    sum4 = tf.summary.image('final_eval_bottom_left1',
                                            augmented_image3)
                    sum5 = tf.summary.image('final_eval_top_right1',
                                            augmented_image4)
                    sum6 = tf.summary.image('final_eval_bottom_right1',
                                            augmented_image5)
                    _, summary1, summary2, summary3, summary4, summary5, summary6 = sess.run(
                        [merged, sum1, sum2, sum3, sum4, sum5, sum6])
                    test_writer.add_summary(summary1, 1)
                    test_writer.add_summary(summary2, 1)
                    test_writer.add_summary(summary3, 1)
                    test_writer.add_summary(summary4, 1)
                    test_writer.add_summary(summary5, 1)
                    test_writer.add_summary(summary6, 1)

                    logit1, logit2, logit3, logit4, logit5, logit6, media_id = sess.run(
                        [
                            logits1, logits2, logits3, logits4, logits5,
                            logits6, labels
                        ])
                    print(media_id, " ", np.argmax(logit1[0]), " ",
                          np.argmax(logit2[0]), " ", np.argmax(logit3[0]), " ",
                          np.argmax(logit4[0]), " ", np.argmax(logit5[0]), " ",
                          np.argmax(logit6[0]))
                    #print(np.amax(logit1[0]), " ",np.amax(logit2[0]), " ",np.amax(logit3[0]), " ",np.amax(logit4[0]), " ",np.amax(logit5[0]), " ",np.amax(logit6[0]))
                    #print(len(logit1[0]))

                    media_id = media_id[0]

                    logits = tuple(
                        max(i, j) for i, j in zip(logit1[0], logit2[0]))
                    logits = tuple(
                        max(i, j) for i, j in zip(logits, logit3[0]))
                    logits = tuple(
                        max(i, j) for i, j in zip(logits, logit4[0]))
                    logits = tuple(
                        max(i, j) for i, j in zip(logits, logit5[0]))
                    logits = tuple(
                        max(i, j) for i, j in zip(logits, logit6[0]))
                    """
                    logits = tuple(i + j for i, j in zip(logit1[0], logit2[0]))
                    logits = tuple(i + j for i, j in zip(logits, logit3[0]))
                    logits = tuple(i + j for i, j in zip(logits, logit4[0]))
                    logits = tuple(i + j for i, j in zip(logits, logit5[0]))
                    logits = tuple(i + j for i, j in zip(logits, logit6[0]))
                    logits = [x / 6 for x in logits] 
                    """

                    logits = numpy_softmax(logits)
                    #print(np.argmax(logits))
                    #print(len(logits))
                    #first_prediction = np.argmax(logits)
                    #print(first_prediction," ", media_id)
                    #print(np.argmax(logits))

                    #logits = logit1

                    #print(np.amax(logits))
                    #pred = [np.argmax(logit1[0]),np.argmax(logit2[0]),np.argmax(logit3[0]), np.argmax(logit4[0]), np.argmax(logit5[0]), np.argmax(logit6[0] )]

                    if media_id == np.argmax(logits):  # TODO REMOVE
                        correct = correct + 1  # TODO REMOVE
                    #print(correct)              # TODO REMOVE
                    #print(media_id, " ", np.argmax(logit1)," ",np.argmax(logit2)," ",np.argmax(logit3), " ",np.argmax(logit4), " ",np.argmax(logit5), " ",np.argmax(logit6)   )

                    total_output[offset:offset + 1] = logits
                    total_labels[offset:offset + 1] = media_id
                    offset += 1
                coord.request_stop()
                coord.join(threads)

            output_list.append(total_output)
            labels_list.append(total_labels)

        print(correct)  # TODO REMOVE

    for i in range(len(output_list)):
        logits = tf.cast(tf.constant(output_list[i]), dtype=tf.float32)
        predictions = tf.nn.softmax(logits)
        labels = tf.constant(labels_list[i])
        top1_op = tf.nn.in_top_k(predictions, labels, 1)
        top5_op = tf.nn.in_top_k(predictions, labels, 5)

        with tf.Session() as sess:
            top1, top5 = sess.run([top1_op, top5_op])

        print('Top 1 accuracy: %f' % (np.sum(top1) / float(number_images)))
        print('Top 5 accuracy: %f' % (np.sum(top5) / float(number_images)))

    for i in range(number_images):
        image_id = labels_list[0][i]

        prediction1 = np.amax(output_list[0][i])

        prediction2 = np.amax(output_list[1][i])
        prediction3 = np.amax(output_list[2][i])

        print(prediction1, " ", prediction2, " ", prediction3)

        # Find best class with highest prediction (softmax) score
        if prediction1 > prediction2:
            if prediction1 > prediction3:
                prediction = np.argmax(output_list[0][i])
                probability = prediction1

            else:
                prediction = np.argmax(output_list[2][i])
                probability = prediction3
        else:
            if prediction2 > prediction3:
                prediction = np.argmax(output_list[1][i])
                probability = prediction2
            else:
                prediction = np.argmax(output_list[2][i])
                probability = prediction3

        class_id = dataset_utils.read_label_file(label_dir)[prediction]

        image_id = dataset_utils.read_label_file(label_dir)[
            image_id]  # TODO REMOVE!!!

        print('<%s;%s;%f>\n' % (image_id, class_id, probability))
        # Save the predictions
        labels_filename = os.path.join(label_dir, filename)
        with tf.gfile.Open(labels_filename, 'a') as f:
            f.write('<%s;%s;%f>\n' %
                    (image_id, class_id,
                     probability))  # <ImageId;ClassId;Probability>
def final_evaluation_generic(label_dir, dataset_dir, checkpoint_paths, preprocessing_methods, filename="predictions.txt"):
    """
    Evaulates a CNN on the test-set and saves the predictions in the form  <ImageId;ClassId;Probability> into a txt-file (filename)
    Can use multiple models, is not limited to 3
    
    Args:
        label_dir: Directory where labels dictionary can be found (mapping from one-hot encodings to class_id)
        dataset_dir: Directory where test dataset can be found
        checkpoint_paths: checkpoints of the used models
        preprocessing_methods: corresponding preprocessing methods for the models
        filename: filename of txt-file to save predictions
        
    Returns:
        Saves the predictions into "filename"
    """
    number_images =8000
        

    output_list = []
    labels_list = []
    
    for checkpoint, preprocessing_method in zip(checkpoint_paths, preprocessing_methods):
        with tf.Graph().as_default() as graph:
            dataset = dataVisualisation.get_split('test_set', dataset_dir,label_type="one")
            data_provider = slim.dataset_data_provider.DatasetDataProvider(dataset, 
                                                                           shuffle=False,
                                                                           common_queue_capacity=8000,
                                                                           common_queue_min=0)
            
            image_raw, label = data_provider.get(['image', 'label'])
                            

            
            # Preprocessing return original image, center_crop and 4 corner crops with adjusted color values
            image  = preprocessing_method(image_raw, 224, 224) 
            
            images, labels = tf.train.batch([image,  label], 
                                            batch_size=1,
                                            num_threads=1,
                                            capacity=2 * 1)
            
            
            
            
            logits1 = resNetClassifier.my_cnn(images, is_training = False, dropout_rate =1)

            total_output = np.empty([number_images * 1, dataset.num_classes])
            total_labels = np.empty([number_images * 1], dtype=np.int32)
            offset = 0
            
            with tf.Session() as sess:
                coord = tf.train.Coordinator()
                saver = tf.train.Saver()
                saver.restore(sess, checkpoint)
       

           
                
                
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)
                for i in range(number_images):
                    #print('step: %d/%d' % (i+1, number_images))
                           
                    logit1, media_id = sess.run([logits1, labels])
                    
                    media_id = media_id[0]

                                       
                    # Passing logits through softmax function to receive "probabilities"
                    logits = my_functions.numpy_softmax(logit1)
                    
                    
                    total_output[offset:offset + 1] = logits
                    total_labels[offset:offset + 1] = media_id
                    offset += 1
                coord.request_stop()
                coord.join(threads)

            output_list.append(total_output)
            labels_list.append(total_labels)

            
    with tf.gfile.Open(filename, 'a') as f:
        for i in range(number_images):
            image_id = labels_list[0][i]
            
            for p in range(1000):
                predictions = []
                for index in range(len(output_list)):
                    predictions.append(output_list[index][i][p])
                
                probability = np.sum(predictions)/len(predictions) #np.amax(predictions)
                
                class_id = dataset_utils.read_label_file(label_dir)[p]
                f.write('%s;%s;%f\n' % (image_id, class_id, probability)) # <ImageId;ClassId;Probability>
def final_evaluation(label_dir, dataset_dir, filename="predictions.txt", visualize_kernel = False):
    """
    Evaulates a CNN on the test-set and saves the predictions in the form  <ImageId;ClassId;Probability> into a txt-file (filename)
    
    Args:
        label_dir: Directory where labels dictionary can be found (mapping from one-hot encodings to class_id)
        dataset_dir: Directory where test dataset can be found
        filename: filename of txt-file to save predictions
        visualize_kernel: Do you want to visualize the first layer of convolutions?
        
    Returns:
        Saves the predictions into "filename"
    """
    number_images =8000
    
    # Choose the three networks to evaluate on
    checkpoint_paths = [ "mydata/resnet_finetuned_plantclef2015_5/model.ckpt-150000",  "mydata/resnet_finetuned_plantclef2015_6/model.ckpt-150000","mydata/resnet_finetuned_plantclef2015_7/model.ckpt-102500"]
    
    output_list = []
    labels_list = []
    
    for index in range(len(checkpoint_paths)):
        with tf.Graph().as_default() as graph:
            dataset = dataVisualisation.get_split('test_set', dataset_dir,label_type="one")
            
            data_provider = slim.dataset_data_provider.DatasetDataProvider(dataset, 
                                                                           shuffle=False,
                                                                           common_queue_capacity=8000,
                                                                           common_queue_min=0)
            
            image_raw, label = data_provider.get(['image', 'label'])
                            

            
            # Preprocessing return original image, center_crop and 4 corner crops with adjusted color values
            image, augmented_image1, augmented_image2, augmented_image3, augmented_image4, augmented_image5 = my_resnet_preprocessing.preprocess_for_final_run2(image_raw, 224, 224) 
            
            image,augmented_image1,augmented_image2, augmented_image3, augmented_image4,augmented_image5, labels = tf.train.batch([image, augmented_image1, augmented_image2, augmented_image3, augmented_image4, augmented_image5, label], 
                                            batch_size=1,
                                            num_threads=1,
                                            capacity=2 * 1)
            
            
            
            
            logits1 = resNetClassifier.my_cnn(image, is_training = False, dropout_rate =1)
            logits2 = resNetClassifier.my_cnn(augmented_image1, is_training = False, dropout_rate =1)
            logits3 = resNetClassifier.my_cnn(augmented_image2, is_training = False, dropout_rate =1)
            logits4 = resNetClassifier.my_cnn(augmented_image3, is_training = False, dropout_rate =1)
            logits5 = resNetClassifier.my_cnn(augmented_image4, is_training = False, dropout_rate =1)            
            logits6 = resNetClassifier.my_cnn(augmented_image5, is_training = False, dropout_rate =1)

            total_output = np.empty([number_images * 1, dataset.num_classes])
            total_labels = np.empty([number_images * 1], dtype=np.int32)
            offset = 0
            
            with tf.Session() as sess:
                coord = tf.train.Coordinator()
                saver = tf.train.Saver()
                saver.restore(sess, checkpoint_paths[index])
              
                
                if visualize_kernel:
                    tf.get_variable_scope().reuse_variables()
                    
                    #for v in tf.global_variables():
                    #    print(v.name)
                        
                    weights = tf.get_variable("resnet_v2_50/conv1/weights")
                    print(weights.get_shape()[0].value, weights.get_shape()[1].value, weights.get_shape()[2].value, weights.get_shape()[3].value)
                    
                    weights = tf.slice(weights,[0,0,0,1] , [weights.get_shape()[0].value, weights.get_shape()[1].value, weights.get_shape()[2].value, 2])
                    
                    grid = kernel_visualization.put_kernels_on_grid (weights)
                    
                    
                    sum1 = tf.summary.image('conv1/kernels', grid, max_outputs=1)
                    _, summary1, img = sess.run([merged, sum1, tf.squeeze(grid)])
                    visualize_writer.add_summary(summary1,2)
                    fig = plt.figure()
                    plt.imshow(img)
                    plt.savefig("images/kernelsOne_%s.png" % (index))
                    #plt.show() 
                    plt.close(fig)
                
                
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)
                for i in range(number_images):
                    #print('step: %d/%d' % (i+1, number_images))
                    
                    logit1, logit2, logit3, logit4,logit5, logit6, media_id = sess.run([logits1, logits2, logits3, logits4, logits5, logits6, labels])
                    
                    media_id = media_id[0]

                    # Use Average for voting of logits
                    logits = tuple(i + j for i, j in zip(logit1[0], logit2[0]))
                    logits = tuple(i + j for i, j in zip(logits, logit3[0]))
                    logits = tuple(i + j for i, j in zip(logits, logit4[0]))
                    logits = tuple(i + j for i, j in zip(logits, logit5[0]))
                    logits = tuple(i + j for i, j in zip(logits, logit6[0]))
                    logits = [x / 6 for x in logits] 
                    
                    
                    # Passing logits through softmax function to receive "probabilities"
                    logits = my_functions.numpy_softmax(logits)
                    
                    
                    total_output[offset:offset + 1] = logits
                    total_labels[offset:offset + 1] = media_id
                    offset += 1
                coord.request_stop()
                coord.join(threads)

            output_list.append(total_output)
            labels_list.append(total_labels)
            
        
    prediction_filename = filename
    #os.remove(prediction_filename)
            
    for i in range(number_images):
        image_id = labels_list[0][i]
        
        for p in range(1000):
            p1 = output_list[0][i][p]
            p2 = output_list[1][i][p]
            p3 = output_list[2][i][p]
            
            probability = np.amax([p1, p2, p3])
           
            
            class_id = dataset_utils.read_label_file(label_dir)[p]
            
            with tf.gfile.Open(prediction_filename, 'a') as f:
                f.write('%s;%s;%f\n' % (image_id, class_id, probability)) # <ImageId;ClassId;Probability>