def visual_results(self, dataset_type="TEST", images_index=3):
        image_w = self.config["INPUT_WIDTH"]
        image_h = self.config["INPUT_HEIGHT"]
        image_c = self.config["INPUT_CHANNELS"]

        with self.sess as sess:
            # Restore saved session
            saver = tf.train.Saver()
            saver.restore(self.sess,
                          os.path.join(self.saved_dir, 'model.ckpt-19'))

            _, _, prediction = cal_loss(logits=self.logits,
                                        labels=self.labels_pl)

            if (dataset_type == 'TRAIN'):
                test_type_path = self.config["TRAIN_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    indexes = random.sample(range(367), images_index)
            elif (dataset_type == 'VAL'):
                test_type_path = self.config["VAL_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    indexes = random.sample(range(101), images_index)
            elif (dataset_type == 'TEST'):
                test_type_path = self.config["TEST_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    indexes = random.sample(range(233), images_index)

            # Load images
            image_filename, label_filename = get_filename_list(
                test_type_path, self.config)
            images, labels = get_all_test_data(image_filename, label_filename)

            # Keep images subset of length images_index
            images = [images[i] for i in indexes]
            labels = [labels[i] for i in indexes]

            pred_tot = []

            for image_batch, label_batch in zip(images, labels):
                image_batch = np.reshape(image_batch,
                                         [1, image_h, image_w, image_c])
                label_batch = np.reshape(label_batch, [1, image_h, image_w, 1])
                fetches = [prediction]
                feed_dict = {
                    self.inputs_pl: image_batch,
                    self.labels_pl: label_batch,
                    self.batch_size_pl: 1
                }
                pred = sess.run(fetches=fetches, feed_dict=feed_dict)
                pred = np.reshape(pred, [image_h, image_w])
                pred_tot.append(pred)

            draw_plots(images, labels, pred_tot)
Example #2
0
    def test(self):
        image_filename, label_filename = get_filename_list(
            self.test_file, self.config)

        with self.graph.as_default():
            with self.sess as sess:
                loss, accuracy, prediction = normal_loss(
                    self.logits, self.labels_pl, self.num_classes)

                images, labels = get_all_test_data(image_filename,
                                                   label_filename)

                #acc_final = []
                #iu_final = []
                #iu_mean_final = []

                loss_tot = []
                acc_tot = []
                pred_tot = []
                #hist = np.zeros((self.num_classes, self.num_classes))
                step = 0
                for image_batch, label_batch in zip(images, labels):
                    image_batch = np.reshape(
                        image_batch,
                        [1, self.input_h, self.input_w, self.input_c])
                    label_batch = np.reshape(
                        label_batch, [1, self.input_h, self.input_w, 1])
                    feed_dict = {
                        self.inputs_pl: image_batch,
                        self.labels_pl: label_batch,
                        self.batch_size_pl: 1
                    }
                    fetches = [loss, accuracy, self.logits, prediction]
                    loss_per, acc_per, logit, pred = sess.run(
                        fetches=fetches, feed_dict=feed_dict)
                    loss_tot.append(loss_per)
                    acc_tot.append(acc_per)
                    pred_tot.append(pred)
                    print(
                        "Image Index {}: TEST Loss{:6.3f}, TEST Accu {:6.3f}".
                        format(step, loss_tot[-1], acc_tot[-1]))
                    step = step + 1
                    #per_class_acc(logit, label_batch, self.num_classes)
                    #hist += get_hist(logit, label_batch)

                #acc_tot = np.diag(hist).sum() / hist.sum()
                #iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))

                #print("Total Accuracy for test image: ", acc_tot)
                #print("Total MoI for test images: ", iu)
                #print("mean MoI for test images: ", np.nanmean(iu))

                #acc_final.append(acc_tot)
                #iu_final.append(iu)
                #iu_mean_final.append(np.nanmean(iu))

            return pred_tot
Example #3
0
def train(ob, max_steps=30001, batch_size=3):
        # For train the bayes, the FLAG_OPT SHOULD BE SGD, BUT FOR TRAIN THE NORMAL SEGNET,
        # THE FLAG_OPT SHOULD BE ADAM!!!
	image_filename=[]
	label_filename=[]
	val_image_filename=[]
	val_label_filename=[]
	i=0
	for o in ob:

		image_filename[i], label_filename[i] = get_filename_list(o.train_file, o.config)
		val_image_filename[i], val_label_filename[i] = get_filename_list(o.val_file, o.config)
        	i=i+1
	with ob[0].graph.as_default():
		with ob[1].graph.as_default():
			with ob[2].graph.as_default():
				with ob[3].graph.as_default():
					i=0
					for o in ob:
						if o.images_tr is None:
							o.images_tr, o.labels_tr = dataset_inputs(image_filename[i], label_filename[i], batch_size, o.config)
							o.images_val, o.labels_val = dataset_inputs(val_image_filename[i], val_label_filename[i], batch_size,o.config)

					l=tf.concat([latenv(ob[0]),latenv(ob[1])latenv(ob[2]),latenv(ob[3])],axis=0)
              				fc1=tf.contrib.layers.fully_connected(l,786432,activation_fn=tf.nn.relu,    normalizer_fn=None, weights_initializer=initializers.xavier_initializer(),
              			biases_initializer=tf.zeros_initializer(),trainable=True)
              				fc2=tf.contrib.layers.fully_connected(fc1,540000,activation_fn=tf.nn.relu,    normalizer_fn=None, weights_initializer=initializers.xavier_initializer(),
              			biases_initializer=tf.zeros_initializer(),trainable=True)
              				fc3=tf.contrib.layers.fully_connected(fc2,270000,activation_fn=tf.nn.relu,    normalizer_fn=None, weights_initializer=initializers.xavier_initializer(),
              			biases_initializer=tf.zeros_initializer(),trainable=True)
              				fc4=tf.contrib.layers.fully_connected(fc3,270000,activation_fn=None,    normalizer_fn=None, weights_initializer=initializers.xavier_initializer(),
              			biases_initializer=tf.zeros_initializer(),trainable=True)
                                                                  
            					#define separate losses for each segment, and a loss for the fully connected layers, change input file
            				loss1 = segloss(logits=ob[0].logits, labels=ob[0].labels_pl)
					loss2 = segloss(logits=ob[1].logits, labels=ob[1].labels_pl)
					loss3 = segloss(logits=ob[2].logits, labels=ob[2].labels_pl)
					loss4 = segloss(logits=ob[3].logits, labels=ob[3].labels_pl)
					fcloss=multiloss(fc4,ob[0].labels_pl)
					init=tf.global_variables_initializer()

					train1, train2,train3,train4,trainf,global_step = train_op(seg1=loss1,seg2=loss2, seg3=loss3,seg3=loss3,lossf=fcloss,opt='ADAM')
Example #4
0
def load_data(manifest, indices=None, n=None):
    image_filenames, label_filenames = get_filename_list(manifest)

    if indices is None:
        indices = range(
            0, len(label_filenames)) if n is None else np.random.choice(
                range(0, len(label_filenames)), n)

    image_filenames = [image_filenames[i] for i in indices]
    label_filenames = [label_filenames[i] for i in indices]

    images, labels = get_all_test_data(image_filenames, label_filenames)

    return indices, np.array(images), np.array(labels)
    def test(self):
        image_filename, label_filename = get_filename_list(
            self.test_file, self.config)

        with self.graph.as_default():
            with self.sess as sess:
                saver = tf.train.Saver()
                saver.restore(self.sess,
                              os.path.join(self.saved_dir, 'model.ckpt-19'))

                loss, accuracy, prediction = normal_loss(
                    self.logits, self.labels_pl, self.num_classes)
                images, labels = get_all_test_data(image_filename,
                                                   label_filename)

                loss_tot = []
                acc_tot = []
                pred_tot = []
                step = 0

                for image_batch, label_batch in zip(images, labels):
                    image_batch = np.reshape(
                        image_batch,
                        [1, self.input_h, self.input_w, self.input_c])
                    label_batch = np.reshape(
                        label_batch, [1, self.input_h, self.input_w, 1])
                    feed_dict = {
                        self.inputs_pl: image_batch,
                        self.labels_pl: label_batch,
                        self.batch_size_pl: 1
                    }
                    fetches = [loss, accuracy, self.logits, prediction]
                    loss_per, acc_per, logit, pred = sess.run(
                        fetches=fetches, feed_dict=feed_dict)
                    loss_tot.append(loss_per)
                    acc_tot.append(acc_per)
                    pred_tot.append(pred)
                    print(
                        "Image Index {}: TEST Loss{:6.3f}, TEST Accu {:6.3f}".
                        format(step, loss_tot[-1], acc_tot[-1]))
                    step = step + 1
Example #6
0
m = model.predict(x_test)
res = value_max = np.argmax(m[no], 1)
res = res.reshape(128, 128)
plt.imshow(test_images[no])
plt.show()
plt.imshow(res)
print(res)

score = model.evaluate(np.array([x_test[no]]),
                       np.array([y_test[no]]),
                       verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#%%

test_file = get_filename_list(seg.test_file, seg.config)
la_filename = test_file[0][no]

im = imread(la_filename, pilmode='L') / 255
e = EclipseGenerator.Eclipse(1, 1, N=128)

e.img = im
e.Plot()
plt.show()

seeds = [[int(len(im[no]) / 2), int(len(im[no]) / 2)], [127, 127]]
labels = [0, 1]
beta = 90
[mask, proba] = Random_walker.random_walker(im, seeds, labels, beta)
e.img = mask
e.Plot()
Example #7
0
    def train(self, max_steps=30000, batch_size=3):
        image_filename, label_filename = get_filename_list(
            self.train_file, self.config)
        val_image_filename, val_label_filename = get_filename_list(
            self.val_file, self.config)

        with self.graph.as_default():
            if self.images_tr is None:
                self.images_tr, self.labels_tr = dataset_inputs(
                    image_filename, label_filename, batch_size, self.config)
                self.images_val, self.labels_val = dataset_inputs(
                    val_image_filename, val_label_filename, batch_size,
                    self.config)

            loss, accuracy, prediction = cal_loss(logits=self.logits,
                                                  labels=self.labels_pl)
            #                                                  , number_class=self.num_classes)
            train, global_step = train_op(total_loss=loss, opt=self.opt)

            summary_op = tf.summary.merge_all()

            with self.sess.as_default():
                self.sess.run(tf.local_variables_initializer())
                self.sess.run(tf.global_variables_initializer())

                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(coord=coord)

                train_writer = tf.summary.FileWriter(self.tb_logs,
                                                     self.sess.graph)
                self.saver = tf.train.Saver()

                for step in range(max_steps):
                    image_batch, label_batch = self.sess.run(
                        [self.images_tr, self.labels_tr])
                    feed_dict = {
                        self.inputs_pl: image_batch,
                        self.labels_pl: label_batch,
                        self.batch_size_pl: batch_size
                    }

                    _, _loss, _accuracy, summary = self.sess.run(
                        [train, loss, accuracy, summary_op],
                        feed_dict=feed_dict)
                    self.train_loss.append(_loss)
                    self.train_accuracy.append(_accuracy)
                    print(
                        "Iteration {}: Train Loss{:6.3f}, Train Accu {:6.3f}".
                        format(step, self.train_loss[-1],
                               self.train_accuracy[-1]))

                    if step % 100 == 0:
                        train_writer.add_summary(summary, step)

                    if step % 1000 == 0:
                        print("start validating..")
                        _val_loss = []
                        _val_acc = []
                        for test_step in range(9):
                            image_batch_val, label_batch_val = self.sess.run(
                                [self.images_val, self.labels_val])
                            feed_dict_valid = {
                                self.inputs_pl: image_batch_val,
                                self.labels_pl: label_batch_val,
                                self.batch_size_pl: batch_size
                            }
                            # since we still using mini-batch, so in the batch norm we set phase_train to be
                            # true, and because we didin't run the trainop process, so it will not update
                            # the weight!
                            _loss, _acc, _val_pred = self.sess.run(
                                [loss, accuracy, self.logits], feed_dict_valid)
                            _val_loss.append(_loss)
                            _val_acc.append(_acc)

                        self.val_loss.append(np.mean(_val_loss))
                        self.val_acc.append(np.mean(_val_acc))

                        print("Val Loss {:6.3f}, Val Acc {:6.3f}".format(
                            self.val_loss[-1], self.val_acc[-1]))

                        self.saver.save(self.sess,
                                        os.path.join(self.saved_dir,
                                                     'model.ckpt'),
                                        global_step=self.model_version)
                        self.model_version = self.model_version + 1

                coord.request_stop()
                coord.join(threads)
Example #8
0
    def test(self):
        image_filename, label_filename = get_filename_list(self.test_file, self.config)

        with self.graph.as_default():
            with self.sess as sess:
                loss, accuracy, prediction = normal_loss(self.logits, self.labels_pl, self.num_classes)
                prob = tf.nn.softmax(self.logits, dim=-1)
                prob = tf.reshape(prob, [self.input_h, self.input_w, self.num_classes])

                images, labels = get_all_test_data(image_filename, label_filename)

                NUM_SAMPLE = []
                for i in range(30):
                    NUM_SAMPLE.append(2 * i + 1)

                acc_final = []
                iu_final = []
                iu_mean_final = []
                # uncomment the line below to only run for two times.
                # NUM_SAMPLE = [1, 30]
                NUM_SAMPLE = [1]
                for num_sample_generate in NUM_SAMPLE:

                    loss_tot = []
                    acc_tot = []
                    pred_tot = []
                    var_tot = []
                    hist = np.zeros((self.num_classes, self.num_classes))
                    step = 0
                    for image_batch, label_batch in zip(images, labels):
                        image_batch = np.reshape(image_batch, [1, self.input_h, self.input_w, self.input_c])
                        label_batch = np.reshape(label_batch, [1, self.input_h, self.input_w, 1])
                        # comment the code below to apply the dropout for all the samples
                        if num_sample_generate == 1:
                            feed_dict = {self.inputs_pl: image_batch, self.labels_pl: label_batch,
                                         self.is_training_pl: False,
                                         self.keep_prob_pl: 0.5, self.with_dropout_pl: False,
                                         self.batch_size_pl: 1}
                        else:
                            feed_dict = {self.inputs_pl: image_batch, self.labels_pl: label_batch,
                                         self.is_training_pl: False,
                                         self.keep_prob_pl: 0.5, self.with_dropout_pl: True,
                                         self.batch_size_pl: 1}
                        # uncomment this code below to run the dropout for all the samples
                        # feed_dict = {test_data_tensor: image_batch, test_label_tensor:label_batch, phase_train: False, keep_prob:0.5, phase_train_dropout:True}
                        fetches = [loss, accuracy, self.logits, prediction]
                        if self.bayes is False:
                            loss_per, acc_per, logit, pred = sess.run(fetches=fetches, feed_dict=feed_dict)
                            var_one = []
                        else:
                            logit_iter_tot = []
                            loss_iter_tot = []
                            acc_iter_tot = []
                            prob_iter_tot = []
                            logit_iter_temp = []
                            for iter_step in range(num_sample_generate):
                                loss_iter_step, acc_iter_step, logit_iter_step, prob_iter_step = sess.run(
                                    fetches=[loss, accuracy, self.logits, prob], feed_dict=feed_dict)
                                loss_iter_tot.append(loss_iter_step)
                                acc_iter_tot.append(acc_iter_step)
                                logit_iter_tot.append(logit_iter_step)
                                prob_iter_tot.append(prob_iter_step)
                                logit_iter_temp.append(
                                    np.reshape(logit_iter_step, [self.input_h, self.input_w, self.num_classes]))

                            loss_per = np.nanmean(loss_iter_tot)
                            acc_per = np.nanmean(acc_iter_tot)
                            logit = np.nanmean(logit_iter_tot, axis=0)
                            print(np.shape(prob_iter_tot))

                            prob_mean = np.nanmean(prob_iter_tot, axis=0)
                            prob_variance = np.var(prob_iter_tot, axis=0)
                            logit_variance = np.var(logit_iter_temp, axis=0)

                            # THIS TIME I DIDN'T INCLUDE TAU
                            pred = np.reshape(np.argmax(prob_mean, axis=-1), [-1])  # pred is the predicted label

                            var_sep = []  # var_sep is the corresponding variance if this pixel choose label k
                            length_cur = 0  # length_cur represent how many pixels has been read for one images
                            for row in np.reshape(prob_variance, [self.input_h * self.input_w, self.num_classes]):
                                temp = row[pred[length_cur]]
                                length_cur += 1
                                var_sep.append(temp)
                            var_one = np.reshape(var_sep, [self.input_h,
                                                           self.input_w])  # var_one is the corresponding variance in terms of the "optimal" label
                            pred = np.reshape(pred, [self.input_h, self.input_w])

                        loss_tot.append(loss_per)
                        acc_tot.append(acc_per)
                        pred_tot.append(pred)
                        var_tot.append(var_one)
                        print("Image Index {}: TEST Loss{:6.3f}, TEST Accu {:6.3f}".format(step, loss_tot[-1], acc_tot[-1]))
                        step = step + 1
                        per_class_acc(logit, label_batch, self.num_classes)
                        hist += get_hist(logit, label_batch)

                    acc_tot = np.diag(hist).sum() / hist.sum()
                    iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))

                    print("Total Accuracy for test image: ", acc_tot)
                    print("Total MoI for test images: ", iu)
                    print("mean MoI for test images: ", np.nanmean(iu))

                    acc_final.append(acc_tot)
                    iu_final.append(iu)
                    iu_mean_final.append(np.nanmean(iu))

            return acc_final, iu_final, iu_mean_final, prob_variance, logit_variance, pred_tot, var_tot
Example #9
0
    def visual_results(self, dataset_type = "TEST", images_index = 3, FLAG_MAX_VOTE = False):

        image_w = self.config["INPUT_WIDTH"]
        image_h = self.config["INPUT_HEIGHT"]
        image_c = self.config["INPUT_CHANNELS"]
        train_dir = self.config["SAVE_MODEL_DIR"]
        FLAG_BAYES = self.config["BAYES"]
        print(FLAG_BAYES)
        with self.sess as sess:

            # Restore saved session
            saver = tf.train.Saver()
            saver.restore(sess, train_dir)

            _, _, prediction = cal_loss(logits=self.logits,
                                        labels=self.labels_pl,number_class=self.num_classes)
            prob = tf.nn.softmax(self.logits,dim = -1)

            if (dataset_type=='TRAIN'):
                test_type_path = self.config["TRAIN_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    '''CHANGE IT BACK'''
                    #indexes = random.sample(range(367),images_index)
                    indexes = random.sample(range(6),images_index)
                #indexes = [0,75,150,225,300]
            elif (dataset_type=='VAL'):
                test_type_path = self.config["VAL_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    #indexes = random.sample(range(101),images_index)
                    indexes = random.sample(range(10),images_index)
                #indexes = [0,25,50,75,100]
            elif (dataset_type=='TEST'):
                test_type_path = self.config["TEST_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    indexes = random.sample(range(5),images_index)
                    #indexes = random.sample(range(233),images_index)
                #indexes = [0,50,100,150,200]

            # Load images

            image_filename,label_filename = get_filename_list(test_type_path, self.config)
            images, labels = get_all_test_data(image_filename,label_filename)
            
            # Keep images subset of length images_index
            images = [images[i] for i in indexes]
            labels = [labels[i] for i in indexes]

            num_sample_generate = 30
            pred_tot = []
            var_tot = []
            print(image_c)
            for image_batch, label_batch in zip(images,labels):
                print(image_batch.shape)
                image_batch = np.reshape(image_batch,[1,image_h,image_w,image_c])
                label_batch = np.reshape(label_batch,[1,image_h,image_w,1])

                if FLAG_BAYES is False:
                    print("NON BAYES")
                    fetches = [prediction]
                    feed_dict = {self.inputs_pl: image_batch,
                                 self.labels_pl: label_batch,
                                 self.is_training_pl: False,
                                 self.keep_prob_pl: 0.5,
                                 self.batch_size_pl: 1}
                    pred = sess.run(fetches = fetches, feed_dict = feed_dict)
                    pred = np.reshape(pred,[image_h,image_w])
                    var_one = []
                else:
                    feed_dict = {self.inputs_pl: image_batch,
                                 self.labels_pl: label_batch,
                                 self.is_training_pl: False,
                                 self.keep_prob_pl: 0.5,
                                 self.with_dropout_pl: True,
                                 self.batch_size_pl: 1}
                    prob_iter_tot = []
                    pred_iter_tot = []
                    for iter_step in range(num_sample_generate):
                        prob_iter_step = sess.run(fetches = [prob], feed_dict = feed_dict)
                        prob_iter_tot.append(prob_iter_step)
                        pred_iter_tot.append(np.reshape(np.argmax(prob_iter_step,axis = -1),[-1]))

                    if FLAG_MAX_VOTE is True:
                        prob_variance,pred = MAX_VOTE(pred_iter_tot,prob_iter_tot,self.config["NUM_CLASSES"])
                        #acc_per = np.mean(np.equal(pred,np.reshape(label_batch,[-1])))
                        var_one = var_calculate(pred,prob_variance)
                        pred = np.reshape(pred,[image_h,image_w])
                    else:
                        prob_mean = np.nanmean(prob_iter_tot,axis = 0)
                        prob_variance = np.var(prob_iter_tot, axis = 0)
                        pred = np.reshape(np.argmax(prob_mean,axis = -1),[-1]) #pred is the predicted label with the mean of generated samples
                        #THIS TIME I DIDN'T INCLUDE TAU
                        var_one = var_calculate(pred,prob_variance)
                        pred = np.reshape(pred,[image_h,image_w])


                pred_tot.append(pred)
                var_tot.append(var_one)

            draw_plots_bayes(images, labels, pred_tot, var_tot)
        return (images,labels,pred_tot,var_tot)
Example #10
0
    def transfer_output(self,rotation=[0],dataset_type = "TRAIN"):
        
        image_w = self.config["INPUT_WIDTH"]
        image_h = self.config["INPUT_HEIGHT"]
        image_c = self.config["INPUT_CHANNELS"]
        train_dir = self.config["SAVE_MODEL_DIR"]
        datagen = ImageDataGenerator()
        with self.sess as sess:
            saver=tf.train.Saver()
            saver.restore(sess, train_dir)
            #self.logits=tf.nn.bias_add(self.conv, self.biases, name=scope.name)
            #_, _, prediction = cal_loss(logits=self.logits,labels=self.labels_pl,number_class=self.num_classes)
            
            #prob = tf.nn.softmax(self.logits,dim = -1)
            if(dataset_type == "TRAIN"):
                test_type_path = self.config["TRAIN_FILE"]
            else:
                test_type_path = self.config["TEST_FILE"]
            
            image_filename,label_filename = get_filename_list(test_type_path, self.config)
            images, labels = get_all_test_data(image_filename,label_filename)
            print(len(images))
            image_batch=np.reshape(images,[len(images),image_h,image_w,image_c])
            label_batch=np.reshape(labels,[len(labels),image_h,image_w,1])
            #pred_tot = []
            #var_tot = []
            
            logit_aug=[]
            image_aug=[]
            label_aug=[]
            
            for deg in rotation:
                
                logit_tot=[]
                image_tot=[]
                label_tot=[]
                for image_batch, label_batch in zip(images,labels):
                    #print(image_batch.shape)
                    #to just convert the image into the standard format
                    #for example: 1(batch size),64,64,3
                    
                    image_batch=datagen.apply_transform(image_batch,{'theta':deg})
                    label_batch=datagen.apply_transform(label_batch,{'theta':deg})
                    
                    image_batch = np.reshape(image_batch,[1,image_h,image_w,image_c])
                    label_batch = np.reshape(label_batch,[1,image_h,image_w,1])

                    
                    image_tot.append(image_batch)
                    label_tot.append(label_batch)
                    #non baysien model
                    #fetches = [prediction]
                    feed_dict = {self.inputs_pl: image_batch,
                                 self.labels_pl: label_batch,
                                 self.is_training_pl: False,
                                 self.keep_prob_pl: 0.5,
                                 self.with_dropout_pl: True,
                                 self.batch_size_pl: 1}
                    #pred = sess.run(fetches = fetches, feed_dict = feed_dict)
                    logit= sess.run([self.logits],feed_dict = feed_dict)
                    #原本是images_h* images_w, with each element the label of it
                    #pred = np.reshape(pred,[image_h,image_w])

                    #pred_tot.append(pred)
                    logit_tot.append(logit)
                logit_aug.append(logit_tot)
                image_aug.append(image_tot)
                label_aug.append(label_tot)
        return logit_aug,label_aug,image_aug#logit_tot,label_tot,image_tot
Example #11
0
    def train(self, max_steps=30001, batch_size=3):
        # For train the bayes, the FLAG_OPT SHOULD BE SGD, BUT FOR TRAIN THE NORMAL SEGNET,
        # THE FLAG_OPT SHOULD BE ADAM!!!

        image_filename, label_filename = get_filename_list(self.train_file, self.config)
        val_image_filename, val_label_filename = get_filename_list(self.val_file, self.config)

        with self.graph.as_default():
            if self.images_tr is None:
                self.images_tr, self.labels_tr = dataset_inputs(image_filename, label_filename, batch_size, self.config)
                self.images_val, self.labels_val = dataset_inputs(val_image_filename, val_label_filename, batch_size,
                                                                  self.config)

            loss, accuracy, prediction = cal_loss(logits=self.logits, labels=self.labels_pl,
                                                     number_class=self.num_classes)
            train, global_step = train_op(total_loss=loss, opt=self.opt)

            summary_op = tf.summary.merge_all()

            with self.sess.as_default():
                self.sess.run(tf.local_variables_initializer())
                self.sess.run(tf.global_variables_initializer())

                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(coord=coord)
                # The queue runners basic reference:
                # https://www.tensorflow.org/versions/r0.12/how_tos/threading_and_queues
                train_writer = tf.summary.FileWriter(self.tb_logs, self.sess.graph)
                for step in range(max_steps):
                    print("OK")
                    image_batch, label_batch = self.sess.run([self.images_tr, self.labels_tr])
                    feed_dict = {self.inputs_pl: image_batch,
                                 self.labels_pl: label_batch,
                                 self.is_training_pl: True,
                                 self.keep_prob_pl: 0.5,
                                 self.with_dropout_pl: True,
                                 self.batch_size_pl: batch_size}

                    _, _loss, _accuracy, summary = self.sess.run([train, loss, accuracy, summary_op],
                                                                 feed_dict=feed_dict)
                    self.train_loss.append(_loss)
                    self.train_accuracy.append(_accuracy)
                    print("Iteration {}: Train Loss{:6.3f}, Train Accu {:6.3f}".format(step, self.train_loss[-1],
                                                                                       self.train_accuracy[-1]))

                    if step % 100 == 0:
                        conv_classifier = self.sess.run(self.logits, feed_dict=feed_dict)
                        print('per_class accuracy by logits in training time',
                              per_class_acc(conv_classifier, label_batch, self.num_classes))
                        # per_class_acc is a function from utils
                        train_writer.add_summary(summary, step)

                    if step % 1000 == 0:
                        print("start validating.......")
                        _val_loss = []
                        _val_acc = []
                        hist = np.zeros((self.num_classes, self.num_classes))
                        for test_step in range(int(20)):
                            fetches_valid = [loss, accuracy, self.logits]
                            image_batch_val, label_batch_val = self.sess.run([self.images_val, self.labels_val])
                            feed_dict_valid = {self.inputs_pl: image_batch_val,
                                               self.labels_pl: label_batch_val,
                                               self.is_training_pl: True,
                                               self.keep_prob_pl: 1.0,
                                               self.with_dropout_pl: False,
                                               self.batch_size_pl: batch_size}
                            # since we still using mini-batch, so in the batch norm we set phase_train to be
                            # true, and because we didin't run the trainop process, so it will not update
                            # the weight!
                            _loss, _acc, _val_pred = self.sess.run(fetches_valid, feed_dict_valid)
                            _val_loss.append(_loss)
                            _val_acc.append(_acc)
                            hist += get_hist(_val_pred, label_batch_val)

                        print_hist_summary(hist)

                        self.val_loss.append(np.mean(_val_loss))
                        self.val_acc.append(np.mean(_val_acc))

                        print(
                            "Iteration {}: Train Loss {:6.3f}, Train Acc {:6.3f}, Val Loss {:6.3f}, Val Acc {:6.3f}".format(
                                step, self.train_loss[-1], self.train_accuracy[-1], self.val_loss[-1],
                                self.val_acc[-1]))

                coord.request_stop()
                coord.join(threads)
Example #12
0
    def visual_results(self, dataset_type="TRAIN", NUM_IMAGES=3):

        #train_dir = "./saved_models/segnet_vgg_bayes/segnet_vgg_bayes_30000/model.ckpt-30000"
        #train_dir = "./saved_models/segnet_scratch/segnet_scratch_30000/model.ckpt-30000"

        image_w = self.config["INPUT_WIDTH"]
        image_h = self.config["INPUT_HEIGHT"]
        image_c = self.config["INPUT_CHANNELS"]
        train_dir = self.config["SAVE_MODEL_DIR"]
        FLAG_BAYES = self.config["BAYES"]

        with self.sess as sess:

            # Restore saved session
            saver = tf.train.Saver()
            saver.restore(sess, train_dir)

            _, _, prediction = normal_loss(logits=self.logits,
                                           labels=self.labels_pl,
                                           number_class=self.num_classes)
            prob = tf.nn.softmax(self.logits, dim=-1)

            if (dataset_type == 'TRAIN'):
                test_type_path = self.config["TRAIN_FILE"]
                indexes = random.sample(range(367), NUM_IMAGES)
                #indexes = [0,75,150,225,300]
            elif (dataset_type == 'VAL'):
                test_type_path = self.config["VAL_FILE"]
                indexes = random.sample(range(101), NUM_IMAGES)
                #indexes = [0,25,50,75,100]
            elif (dataset_type == 'TEST'):
                test_type_path = self.config["TEST_FILE"]
                indexes = random.sample(range(233), NUM_IMAGES)
                #indexes = [0,50,100,150,200]

            # Load images
            image_filename, label_filename = get_filename_list(
                test_type_path, self.config)
            images, labels = get_all_test_data(image_filename, label_filename)

            # Keep images subset of length NUM_IMAGES
            images = [images[i] for i in indexes[0:NUM_IMAGES]]
            labels = [labels[i] for i in indexes[0:NUM_IMAGES]]

            num_sample_generate = 30
            pred_tot = []
            var_tot = []

            for image_batch, label_batch in zip(images, labels):

                image_batch = np.reshape(image_batch,
                                         [1, image_h, image_w, image_c])
                label_batch = np.reshape(label_batch, [1, image_h, image_w, 1])

                if FLAG_BAYES is False:
                    fetches = [prediction]
                    feed_dict = {
                        self.inputs_pl: image_batch,
                        self.labels_pl: label_batch,
                        self.is_training_pl: False,
                        self.keep_prob_pl: 0.5
                    }
                    pred = sess.run(fetches=fetches, feed_dict=feed_dict)
                    pred = np.reshape(pred, [image_h, image_w])
                    var_one = []
                else:
                    feed_dict = {
                        self.inputs_pl: image_batch,
                        self.labels_pl: label_batch,
                        self.is_training_pl: False,
                        self.keep_prob_pl: 0.5,
                        self.with_dropout_pl: False
                    }
                    prob_iter_tot = []
                    for iter_step in range(num_sample_generate):
                        prob_iter_step = sess.run(fetches=[prob],
                                                  feed_dict=feed_dict)
                        prob_iter_tot.append(prob_iter_step)

                    prob_mean = np.nanmean(prob_iter_tot, axis=0)
                    prob_variance = np.var(prob_iter_tot, axis=0)

                    #THIS TIME I DIDN'T INCLUDE TAU
                    pred = np.reshape(np.argmax(prob_mean, axis=-1),
                                      [-1])  #pred is the predicted label

                    var_sep = [
                    ]  #var_sep is the corresponding variance if this pixel choose label k
                    length_cur = 0  #length_cur represent how many pixels has been read for one images
                    for row in np.reshape(prob_variance,
                                          [image_h * image_w, 12]):
                        temp = row[pred[length_cur]]
                        length_cur += 1
                        var_sep.append(temp)
                    var_one = np.reshape(
                        var_sep, [image_h, image_w]
                    )  #var_one is the corresponding variance in terms of the "optimal" label
                    pred = np.reshape(pred, [image_h, image_w])

                pred_tot.append(pred)
                var_tot.append(var_one)

            draw_plots(images, labels, pred_tot)
Example #13
0
    def visual_results(self,
                       dataset_type="TEST",
                       images_index=3,
                       FLAG_MAX_VOTE=False):

        image_w = self.config["INPUT_WIDTH"]
        image_h = self.config["INPUT_HEIGHT"]
        image_c = self.config["INPUT_CHANNELS"]
        train_dir = self.config["SAVE_MODEL_DIR"]
        FLAG_BAYES = self.config["BAYES"]

        with self.sess as sess:

            # Restore saved session
            saver = tf.train.Saver()
            saver.restore(sess, train_dir)

            kernel = variable_with_weight_decay('weights',
                                                initializer=initialization(
                                                    1, 64),
                                                shape=[1, 1, 64, 3],
                                                wd=False)
            conv = tf.nn.conv2d(self.deconv1_3,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = variable_with_weight_decay('biases',
                                                tf.constant_initializer(0.0),
                                                shape=[3],
                                                wd=False)
            logits = tf.nn.bias_add(conv, biases, name="scope.name")
            #exit()
            sess.run(tf.global_variables_initializer())
            #sess.run(logits)
            _, _, prediction = cal_loss(logits=logits, labels=self.labels_pl)
            prob = tf.nn.softmax(logits, dim=-1)
            print(
                "==================================================================================="
            )
            print(prediction)
            #exit()
            if (dataset_type == 'TRAIN'):
                test_type_path = self.config["TRAIN_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    indexes = random.sample(range(367), images_index)
                #indexes = [0,75,150,225,300]
            elif (dataset_type == 'VAL'):
                test_type_path = self.config["VAL_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    indexes = random.sample(range(101), images_index)
                #indexes = [0,25,50,75,100]
            elif (dataset_type == 'TEST'):
                test_type_path = self.config["TEST_FILE"]
                if type(images_index) == list:
                    indexes = images_index
                else:
                    indexes = random.sample(range(233), images_index)
                #indexes = [0,50,100,150,200]

            # Load images
            image_filename, label_filename = get_filename_list(
                test_type_path, self.config)
            images, labels = get_all_test_data(image_filename, label_filename)

            # Keep images subset of length images_index
            images = [images[i] for i in indexes]
            labels = [labels[i] for i in indexes]

            num_sample_generate = 30
            pred_tot = []
            var_tot = []

            for image_batch, label_batch in zip(images, labels):

                image_batch = np.reshape(image_batch,
                                         [1, image_h, image_w, image_c])
                label_batch = np.reshape(label_batch, [1, image_h, image_w, 1])

                if FLAG_BAYES is False:
                    fetches = [prediction]
                    feed_dict = {
                        self.inputs_pl: image_batch,
                        self.labels_pl: label_batch,
                        self.is_training_pl: False,
                        self.keep_prob_pl: 0.5,
                        self.batch_size_pl: 1
                    }
                    pred = sess.run(fetches=fetches, feed_dict=feed_dict)
                    pred = np.reshape(pred, [image_h, image_w])
                    var_one = []
                else:
                    feed_dict = {
                        self.inputs_pl: image_batch,
                        self.labels_pl: label_batch,
                        self.is_training_pl: False,
                        self.keep_prob_pl: 0.5,
                        self.with_dropout_pl: True,
                        self.batch_size_pl: 1
                    }
                    prob_iter_tot = []
                    pred_iter_tot = []
                    for iter_step in range(num_sample_generate):
                        prob_iter_step = sess.run(fetches=[prob],
                                                  feed_dict=feed_dict)
                        prob_iter_tot.append(prob_iter_step)
                        pred_iter_tot.append(
                            np.reshape(np.argmax(prob_iter_step, axis=-1),
                                       [-1]))

                    if FLAG_MAX_VOTE is True:
                        prob_variance, pred = MAX_VOTE(
                            pred_iter_tot, prob_iter_tot,
                            self.config["NUM_CLASSES"])
                        #acc_per = np.mean(np.equal(pred,np.reshape(label_batch,[-1])))
                        var_one = var_calculate(pred, prob_variance)
                        pred = np.reshape(pred, [image_h, image_w])
                    else:
                        prob_mean = np.nanmean(prob_iter_tot, axis=0)
                        prob_variance = np.var(prob_iter_tot, axis=0)
                        pred = np.reshape(
                            np.argmax(prob_mean, axis=-1), [-1]
                        )  #pred is the predicted label with the mean of generated samples
                        #THIS TIME I DIDN'T INCLUDE TAU
                        var_one = var_calculate(pred, prob_variance)
                        pred = np.reshape(pred, [image_h, image_w])

                pred_tot.append(pred)
                var_tot.append(var_one)

            draw_plots_bayes(images, labels, pred_tot, var_tot)
Example #14
0
    def test(self):
        image_filename, label_filename = get_filename_list(self.test_file)

        with tf.Session() as sess:
            # Restore saved session
            saver = tf.train.Saver()
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.runtime_dir))

            loss, accuracy, prediction = normal_loss(self.logits,
                                                     self.labels_pl,
                                                     self.n_classes)

            images, labels = get_all_test_data(image_filename, label_filename)

            NUM_SAMPLE = []
            for i in range(30):
                NUM_SAMPLE.append(2 * i + 1)

            acc_final = []
            iu_final = []
            iu_mean_final = []
            # uncomment the line below to only run for two times.
            # NUM_SAMPLE = [1, 30]
            NUM_SAMPLE = [1]
            for num_sample_generate in NUM_SAMPLE:

                loss_tot = []
                acc_tot = []

                hist = np.zeros((self.n_classes, self.n_classes))
                step = 0
                for image_batch, label_batch in zip(images, labels):
                    image_batch = np.reshape(
                        image_batch,
                        [1, self.input_h, self.input_w, self.input_c])
                    label_batch = np.reshape(
                        label_batch, [1, self.input_h, self.input_w, 1])
                    # comment the code below to apply the dropout for all the samples
                    if num_sample_generate == 1:
                        feed_dict = {
                            self.inputs_pl: image_batch,
                            self.labels_pl: label_batch,
                            self.is_training_pl: False,
                            self.keep_prob_pl: 0.5,
                            self.with_dropout_pl: False,
                            self.batch_size_pl: 1
                        }
                    else:
                        feed_dict = {
                            self.inputs_pl: image_batch,
                            self.labels_pl: label_batch,
                            self.is_training_pl: False,
                            self.keep_prob_pl: 0.5,
                            self.with_dropout_pl: True,
                            self.batch_size_pl: 1
                        }

                    loss_per, acc_per, logit, pred = sess.run(
                        [loss, accuracy, self.logits, prediction],
                        feed_dict=feed_dict)

                    loss_tot.append(loss_per)
                    acc_tot.append(acc_per)
                    print(
                        "Image Index {}: TEST Loss{:6.3f}, TEST Accu {:6.3f}".
                        format(step, loss_tot[-1], acc_tot[-1]))
                    step = step + 1
                    per_class_acc(logit, label_batch, self.n_classes)
                    hist += get_hist(logit, label_batch)

                acc_tot = np.diag(hist).sum() / hist.sum()
                iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) -
                                      np.diag(hist))

                print("Total Accuracy for test image: ", acc_tot)
                print("Total MoI for test images: ", iu)
                print("mean MoI for test images: ", np.nanmean(iu))

                acc_final.append(acc_tot)
                iu_final.append(iu)
                iu_mean_final.append(np.nanmean(iu))

            return acc_final, iu_final, iu_mean_final
Example #15
0
    def visual_results(self,
                       dataset_type="TEST",
                       indices=None,
                       n_samples=3,
                       model_file=None):

        with tf.Session() as sess:

            # Restore saved session
            saver = tf.train.Saver()

            if model_file is None:
                saver.restore(sess,
                              tf.train.latest_checkpoint(FLAGS.runtime_dir))
            else:
                saver.restore(sess, os.path.join(FLAGS.runtime_dir,
                                                 model_file))

            _, _, prediction = cal_loss(logits=self.logits,
                                        labels=self.labels_pl,
                                        n_classes=self.n_classes)

            test_type_path = None
            if dataset_type == 'TRAIN':
                test_type_path = self.train_file
            elif dataset_type == 'VAL':
                test_type_path = self.val_file
            elif dataset_type == 'TEST':
                test_type_path = self.test_file

            # Load images
            image_filenames, label_filenames = get_filename_list(
                test_type_path)
            images, labels = get_all_test_data(image_filenames,
                                               label_filenames)

            if not indices:
                indices = random.sample(range(len(images)), n_samples)

            # Keep images subset of length images_index
            images = [images[i] for i in indices]
            labels = [labels[i] for i in indices]

            pred_tot = []

            for image_batch, label_batch in zip(images, labels):
                image_batch = np.reshape(
                    image_batch, [1, self.input_h, self.input_w, self.input_c])
                label_batch = np.reshape(label_batch,
                                         [1, self.input_h, self.input_w, 1])

                fetches = [prediction]
                feed_dict = {
                    self.inputs_pl: image_batch,
                    self.labels_pl: label_batch,
                    self.is_training_pl: False,
                    self.keep_prob_pl: 0.5,
                    self.batch_size_pl: 1
                }
                pred = sess.run(fetches=fetches, feed_dict=feed_dict)
                pred = np.reshape(pred, [self.input_h, self.input_w])
                pred_tot.append(pred)

            draw_plots_bayes(images, labels, pred_tot)
Example #16
0
    def train(self):
        image_filename, label_filename = get_filename_list(self.train_file)
        val_image_filename, val_label_filename = get_filename_list(
            self.val_file)

        if self.images_tr is None:
            self.images_tr, self.labels_tr = dataset_inputs(
                image_filename, label_filename, FLAGS.batch_size, self.input_w,
                self.input_h, self.input_c)
            self.images_val, self.labels_val = dataset_inputs(
                val_image_filename, val_label_filename, FLAGS.batch_size,
                self.input_w, self.input_h, self.input_c)

        loss, accuracy, predictions = cal_loss(logits=self.logits,
                                               labels=self.labels_pl,
                                               n_classes=self.n_classes)
        train, global_step = train_op(loss, FLAGS.learning_rate)

        tf.summary.scalar("global_step", global_step)
        tf.summary.scalar("total loss", loss)

        # Calculate total number of trainable parameters
        total_parameters = 0
        for variable in tf.trainable_variables():
            shape = variable.get_shape()
            variable_parameters = 1
            for dim in shape:
                variable_parameters *= dim.value
            total_parameters += variable_parameters
        print('Total Trainable Parameters: ', total_parameters)

        with tf.train.SingularMonitoredSession(
                # save/load model state
                checkpoint_dir=FLAGS.runtime_dir,
                hooks=[
                    tf.train.StopAtStepHook(last_step=FLAGS.n_epochs),
                    tf.train.CheckpointSaverHook(
                        checkpoint_dir=FLAGS.runtime_dir,
                        save_steps=FLAGS.checkpoint_frequency,
                        saver=tf.train.Saver()),
                    tf.train.SummarySaverHook(
                        save_steps=FLAGS.summary_frequency,
                        output_dir=FLAGS.runtime_dir,
                        scaffold=tf.train.Scaffold(
                            summary_op=tf.summary.merge_all()),
                    )
                ],
                config=tf.ConfigProto(log_device_placement=True)) as mon_sess:

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord, sess=mon_sess)

            while not mon_sess.should_stop():

                image_batch, label_batch = mon_sess.raw_session().run(
                    [self.images_tr, self.labels_tr])
                feed_dict = {
                    self.inputs_pl: image_batch,
                    self.labels_pl: label_batch,
                    self.is_training_pl: True,
                    self.keep_prob_pl: 0.5,
                    self.with_dropout_pl: True,
                    self.batch_size_pl: FLAGS.batch_size
                }

                step, _, training_loss, training_acc = mon_sess.run(
                    [global_step, train, loss, accuracy], feed_dict=feed_dict)

                print("Iteration {}: Train Loss{:9.6f}, Train Accu {:9.6f}".
                      format(step, training_loss, training_acc))

                # Check against validation set
                if step % FLAGS.validate_frequency == 0:
                    sampled_losses = []
                    sampled_accuracies = []

                    hist = np.zeros((self.n_classes, self.n_classes))

                    for test_step in range(int(20)):
                        fetches_valid = [loss, accuracy, self.logits]
                        image_batch_val, label_batch_val = mon_sess.raw_session(
                        ).run([self.images_val, self.labels_val])

                        feed_dict_valid = {
                            self.inputs_pl: image_batch_val,
                            self.labels_pl: label_batch_val,
                            self.is_training_pl: True,
                            self.keep_prob_pl: 1.0,
                            self.with_dropout_pl: False,
                            self.batch_size_pl: FLAGS.batch_size
                        }

                        validate_loss, validate_acc, predictions = mon_sess.raw_session(
                        ).run(fetches_valid, feed_dict_valid)
                        sampled_losses.append(validate_loss)
                        sampled_accuracies.append(validate_acc)
                        hist += get_hist(predictions, label_batch_val)

                    print_hist_summary(hist)

                    # Average loss and accuracy over n samples from validation set
                    avg_loss = np.mean(sampled_losses)
                    avg_acc = np.mean(sampled_accuracies)

                    print(
                        "Iteration {}: Avg Val Loss {:9.6f}, Avg Val Acc {:9.6f}"
                        .format(step, avg_loss, avg_acc))

                coord.request_stop()
                coord.join(threads)