Exemple #1
0
    def visual_results(self, dataset_type = "TEST", images_index = 3, FLAG_MAX_VOTE = False):

        image_w = self.config["INPUT_WIDTH"]
        image_h = self.config["INPUT_HEIGHT"]
        image_c = self.config["INPUT_CHANNELS"]
        train_dir = self.config["SAVE_MODEL_DIR"]
        FLAG_BAYES = self.config["BAYES"]
        print(FLAG_BAYES)
        with self.sess as sess:

            # Restore saved session
            saver = tf.train.Saver()
            saver.restore(sess, train_dir)

            _, _, prediction = cal_loss(logits=self.logits,
                                        labels=self.labels_pl,number_class=self.num_classes)
            prob = tf.nn.softmax(self.logits,dim = -1)

            if (dataset_type=='TRAIN'):
                test_type_path = self.config["TRAIN_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    '''CHANGE IT BACK'''
                    #indexes = random.sample(range(367),images_index)
                    indexes = random.sample(range(6),images_index)
                #indexes = [0,75,150,225,300]
            elif (dataset_type=='VAL'):
                test_type_path = self.config["VAL_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    #indexes = random.sample(range(101),images_index)
                    indexes = random.sample(range(10),images_index)
                #indexes = [0,25,50,75,100]
            elif (dataset_type=='TEST'):
                test_type_path = self.config["TEST_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    indexes = random.sample(range(5),images_index)
                    #indexes = random.sample(range(233),images_index)
                #indexes = [0,50,100,150,200]

            # Load images

            image_filename,label_filename = get_filename_list(test_type_path, self.config)
            images, labels = get_all_test_data(image_filename,label_filename)
            
            # Keep images subset of length images_index
            images = [images[i] for i in indexes]
            labels = [labels[i] for i in indexes]

            num_sample_generate = 30
            pred_tot = []
            var_tot = []
            print(image_c)
            for image_batch, label_batch in zip(images,labels):
                print(image_batch.shape)
                image_batch = np.reshape(image_batch,[1,image_h,image_w,image_c])
                label_batch = np.reshape(label_batch,[1,image_h,image_w,1])

                if FLAG_BAYES is False:
                    print("NON BAYES")
                    fetches = [prediction]
                    feed_dict = {self.inputs_pl: image_batch,
                                 self.labels_pl: label_batch,
                                 self.is_training_pl: False,
                                 self.keep_prob_pl: 0.5,
                                 self.batch_size_pl: 1}
                    pred = sess.run(fetches = fetches, feed_dict = feed_dict)
                    pred = np.reshape(pred,[image_h,image_w])
                    var_one = []
                else:
                    feed_dict = {self.inputs_pl: image_batch,
                                 self.labels_pl: label_batch,
                                 self.is_training_pl: False,
                                 self.keep_prob_pl: 0.5,
                                 self.with_dropout_pl: True,
                                 self.batch_size_pl: 1}
                    prob_iter_tot = []
                    pred_iter_tot = []
                    for iter_step in range(num_sample_generate):
                        prob_iter_step = sess.run(fetches = [prob], feed_dict = feed_dict)
                        prob_iter_tot.append(prob_iter_step)
                        pred_iter_tot.append(np.reshape(np.argmax(prob_iter_step,axis = -1),[-1]))

                    if FLAG_MAX_VOTE is True:
                        prob_variance,pred = MAX_VOTE(pred_iter_tot,prob_iter_tot,self.config["NUM_CLASSES"])
                        #acc_per = np.mean(np.equal(pred,np.reshape(label_batch,[-1])))
                        var_one = var_calculate(pred,prob_variance)
                        pred = np.reshape(pred,[image_h,image_w])
                    else:
                        prob_mean = np.nanmean(prob_iter_tot,axis = 0)
                        prob_variance = np.var(prob_iter_tot, axis = 0)
                        pred = np.reshape(np.argmax(prob_mean,axis = -1),[-1]) #pred is the predicted label with the mean of generated samples
                        #THIS TIME I DIDN'T INCLUDE TAU
                        var_one = var_calculate(pred,prob_variance)
                        pred = np.reshape(pred,[image_h,image_w])


                pred_tot.append(pred)
                var_tot.append(var_one)

            draw_plots_bayes(images, labels, pred_tot, var_tot)
        return (images,labels,pred_tot,var_tot)
Exemple #2
0
    def visual_results_external_image(self, images, FLAG_MAX_VOTE = False):

        #train_dir = "./saved_models/segnet_vgg_bayes/segnet_vgg_bayes_30000/model.ckpt-30000"
        #train_dir = "./saved_models/segnet_scratch/segnet_scratch_30000/model.ckpt-30000"


        i_width = 64
        i_height =64
        images = [misc.imresize(image, (i_height, i_width)) for image in images]

        image_w = self.config["INPUT_WIDTH"]
        image_h = self.config["INPUT_HEIGHT"]
        image_c = self.config["INPUT_CHANNELS"]
        train_dir = self.config["SAVE_MODEL_DIR"]
        FLAG_BAYES = self.config["BAYES"]

        with self.sess as sess:

            # Restore saved session
            saver = tf.train.Saver()
            saver.restore(sess, train_dir)

            _, _, prediction = cal_loss(logits=self.logits,
                                           labels=self.labels_pl)
            prob = tf.nn.softmax(self.logits,dim = -1)

            num_sample_generate = 30
            pred_tot = []
            var_tot = []

            labels = []
            for i in range(len(images)):
                labels.append(np.array([[1 for x in range(64)] for y in range(64)]))


            inference_time = []
            start_time = time.time()

            for image_batch, label_batch in zip(images,labels):
            #for image_batch in zip(images):

                image_batch = np.reshape(image_batch,[1,image_h,image_w,image_c])
                label_batch = np.reshape(label_batch,[1,image_h,image_w,1])

                if FLAG_BAYES is False:
                    fetches = [prediction]
                    feed_dict = {self.inputs_pl: image_batch,
                                 self.labels_pl: label_batch,
                                 self.is_training_pl: False,
                                 self.keep_prob_pl: 0.5,
                                 self.batch_size_pl: 1}
                    pred = sess.run(fetches = fetches, feed_dict = feed_dict)
                    pred = np.reshape(pred,[image_h,image_w])
                    var_one = []
                else:
                    feed_dict = {self.inputs_pl: image_batch,
                                 self.labels_pl: label_batch,
                                 self.is_training_pl: False,
                                 self.keep_prob_pl: 0.5,
                                 self.with_dropout_pl: True,
                                 self.batch_size_pl: 1}
                    prob_iter_tot = []
                    pred_iter_tot = []
                    for iter_step in range(num_sample_generate):
                        prob_iter_step = sess.run(fetches = [prob], feed_dict = feed_dict)
                        prob_iter_tot.append(prob_iter_step)
                        pred_iter_tot.append(np.reshape(np.argmax(prob_iter_step,axis = -1),[-1]))

                    if FLAG_MAX_VOTE is True:
                        prob_variance,pred = MAX_VOTE(pred_iter_tot,prob_iter_tot,self.config["NUM_CLASSES"])
                        #acc_per = np.mean(np.equal(pred,np.reshape(label_batch,[-1])))
                        var_one = var_calculate(pred,prob_variance)
                        pred = np.reshape(pred,[image_h,image_w])
                    else:
                        prob_mean = np.nanmean(prob_iter_tot,axis = 0)
                        prob_variance = np.var(prob_iter_tot, axis = 0)
                        pred = np.reshape(np.argmax(prob_mean,axis = -1),[-1]) #pred is the predicted label with the mean of generated samples
                        #THIS TIME I DIDN'T INCLUDE TAU
                        var_one = var_calculate(pred,prob_variance)
                        pred = np.reshape(pred,[image_h,image_w])


                pred_tot.append(pred)
                var_tot.append(var_one)
                inference_time.append(time.time() - start_time)
                start_time = time.time()

            try:
                draw_plots_bayes_external(images, pred_tot, var_tot)
                return pred_tot, var_tot, inference_time
            except:
                return pred_tot, var_tot, inference_time
Exemple #3
0
    def visual_results(self,
                       dataset_type="TEST",
                       images_index=3,
                       FLAG_MAX_VOTE=False):

        image_w = self.config["INPUT_WIDTH"]
        image_h = self.config["INPUT_HEIGHT"]
        image_c = self.config["INPUT_CHANNELS"]
        train_dir = self.config["SAVE_MODEL_DIR"]
        FLAG_BAYES = self.config["BAYES"]

        with self.sess as sess:

            # Restore saved session
            saver = tf.train.Saver()
            saver.restore(sess, train_dir)

            kernel = variable_with_weight_decay('weights',
                                                initializer=initialization(
                                                    1, 64),
                                                shape=[1, 1, 64, 3],
                                                wd=False)
            conv = tf.nn.conv2d(self.deconv1_3,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = variable_with_weight_decay('biases',
                                                tf.constant_initializer(0.0),
                                                shape=[3],
                                                wd=False)
            logits = tf.nn.bias_add(conv, biases, name="scope.name")
            #exit()
            sess.run(tf.global_variables_initializer())
            #sess.run(logits)
            _, _, prediction = cal_loss(logits=logits, labels=self.labels_pl)
            prob = tf.nn.softmax(logits, dim=-1)
            print(
                "==================================================================================="
            )
            print(prediction)
            #exit()
            if (dataset_type == 'TRAIN'):
                test_type_path = self.config["TRAIN_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    indexes = random.sample(range(367), images_index)
                #indexes = [0,75,150,225,300]
            elif (dataset_type == 'VAL'):
                test_type_path = self.config["VAL_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    indexes = random.sample(range(101), images_index)
                #indexes = [0,25,50,75,100]
            elif (dataset_type == 'TEST'):
                test_type_path = self.config["TEST_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    indexes = random.sample(range(233), images_index)
                #indexes = [0,50,100,150,200]

            # Load images
            image_filename, label_filename = get_filename_list(
                test_type_path, self.config)
            images, labels = get_all_test_data(image_filename, label_filename)

            # Keep images subset of length images_index
            images = [images[i] for i in indexes]
            labels = [labels[i] for i in indexes]

            num_sample_generate = 30
            pred_tot = []
            var_tot = []

            for image_batch, label_batch in zip(images, labels):

                image_batch = np.reshape(image_batch,
                                         [1, image_h, image_w, image_c])
                label_batch = np.reshape(label_batch, [1, image_h, image_w, 1])

                if FLAG_BAYES is False:
                    fetches = [prediction]
                    feed_dict = {
                        self.inputs_pl: image_batch,
                        self.labels_pl: label_batch,
                        self.is_training_pl: False,
                        self.keep_prob_pl: 0.5,
                        self.batch_size_pl: 1
                    }
                    pred = sess.run(fetches=fetches, feed_dict=feed_dict)
                    pred = np.reshape(pred, [image_h, image_w])
                    var_one = []
                else:
                    feed_dict = {
                        self.inputs_pl: image_batch,
                        self.labels_pl: label_batch,
                        self.is_training_pl: False,
                        self.keep_prob_pl: 0.5,
                        self.with_dropout_pl: True,
                        self.batch_size_pl: 1
                    }
                    prob_iter_tot = []
                    pred_iter_tot = []
                    for iter_step in range(num_sample_generate):
                        prob_iter_step = sess.run(fetches=[prob],
                                                  feed_dict=feed_dict)
                        prob_iter_tot.append(prob_iter_step)
                        pred_iter_tot.append(
                            np.reshape(np.argmax(prob_iter_step, axis=-1),
                                       [-1]))

                    if FLAG_MAX_VOTE is True:
                        prob_variance, pred = MAX_VOTE(
                            pred_iter_tot, prob_iter_tot,
                            self.config["NUM_CLASSES"])
                        #acc_per = np.mean(np.equal(pred,np.reshape(label_batch,[-1])))
                        var_one = var_calculate(pred, prob_variance)
                        pred = np.reshape(pred, [image_h, image_w])
                    else:
                        prob_mean = np.nanmean(prob_iter_tot, axis=0)
                        prob_variance = np.var(prob_iter_tot, axis=0)
                        pred = np.reshape(
                            np.argmax(prob_mean, axis=-1), [-1]
                        )  #pred is the predicted label with the mean of generated samples
                        #THIS TIME I DIDN'T INCLUDE TAU
                        var_one = var_calculate(pred, prob_variance)
                        pred = np.reshape(pred, [image_h, image_w])

                pred_tot.append(pred)
                var_tot.append(var_one)

            draw_plots_bayes(images, labels, pred_tot, var_tot)