def visual_results(self, dataset_type="TEST", images_index=3, FLAG_MAX_VOTE=False): image_w = self.config["INPUT_WIDTH"] image_h = self.config["INPUT_HEIGHT"] image_c = self.config["INPUT_CHANNELS"] train_dir = self.config["SAVE_MODEL_DIR"] FLAG_BAYES = self.config["BAYES"] with self.sess as sess: # Restore saved session saver = tf.train.Saver() saver.restore(sess, train_dir) kernel = variable_with_weight_decay('weights', initializer=initialization( 1, 64), shape=[1, 1, 64, 3], wd=False) conv = tf.nn.conv2d(self.deconv1_3, kernel, [1, 1, 1, 1], padding='SAME') biases = variable_with_weight_decay('biases', tf.constant_initializer(0.0), shape=[3], wd=False) logits = tf.nn.bias_add(conv, biases, name="scope.name") #exit() sess.run(tf.global_variables_initializer()) #sess.run(logits) _, _, prediction = cal_loss(logits=logits, labels=self.labels_pl) prob = tf.nn.softmax(logits, dim=-1) print( "===================================================================================" ) print(prediction) #exit() if (dataset_type == 'TRAIN'): test_type_path = self.config["TRAIN_FILE"] if type(images_index) == list: indexes = images_index else: indexes = random.sample(range(367), images_index) #indexes = [0,75,150,225,300] elif (dataset_type == 'VAL'): test_type_path = self.config["VAL_FILE"] if type(images_index) == list: indexes = images_index else: indexes = random.sample(range(101), images_index) #indexes = [0,25,50,75,100] elif (dataset_type == 'TEST'): test_type_path = self.config["TEST_FILE"] if type(images_index) == list: indexes = images_index else: indexes = random.sample(range(233), images_index) #indexes = [0,50,100,150,200] # Load images image_filename, label_filename = get_filename_list( test_type_path, self.config) images, labels = get_all_test_data(image_filename, label_filename) # Keep images subset of length images_index images = [images[i] for i in indexes] labels = [labels[i] for i in indexes] num_sample_generate = 30 pred_tot = [] var_tot = [] for image_batch, label_batch in zip(images, labels): image_batch = np.reshape(image_batch, [1, image_h, image_w, image_c]) label_batch = np.reshape(label_batch, [1, image_h, image_w, 1]) if FLAG_BAYES is False: fetches = [prediction] feed_dict = { self.inputs_pl: image_batch, self.labels_pl: label_batch, self.is_training_pl: False, self.keep_prob_pl: 0.5, self.batch_size_pl: 1 } pred = sess.run(fetches=fetches, feed_dict=feed_dict) pred = np.reshape(pred, [image_h, image_w]) var_one = [] else: feed_dict = { self.inputs_pl: image_batch, self.labels_pl: label_batch, self.is_training_pl: False, self.keep_prob_pl: 0.5, self.with_dropout_pl: True, self.batch_size_pl: 1 } prob_iter_tot = [] pred_iter_tot = [] for iter_step in range(num_sample_generate): prob_iter_step = sess.run(fetches=[prob], feed_dict=feed_dict) prob_iter_tot.append(prob_iter_step) pred_iter_tot.append( np.reshape(np.argmax(prob_iter_step, axis=-1), [-1])) if FLAG_MAX_VOTE is True: prob_variance, pred = MAX_VOTE( pred_iter_tot, prob_iter_tot, self.config["NUM_CLASSES"]) #acc_per = np.mean(np.equal(pred,np.reshape(label_batch,[-1]))) var_one = var_calculate(pred, prob_variance) pred = np.reshape(pred, [image_h, image_w]) else: prob_mean = np.nanmean(prob_iter_tot, axis=0) prob_variance = np.var(prob_iter_tot, axis=0) pred = np.reshape( np.argmax(prob_mean, axis=-1), [-1] ) #pred is the predicted label with the mean of generated samples #THIS TIME I DIDN'T INCLUDE TAU var_one = var_calculate(pred, prob_variance) pred = np.reshape(pred, [image_h, image_w]) pred_tot.append(pred) var_tot.append(var_one) draw_plots_bayes(images, labels, pred_tot, var_tot)
def visual_results(self, dataset_type = "TEST", images_index = 3, FLAG_MAX_VOTE = False): image_w = self.config["INPUT_WIDTH"] image_h = self.config["INPUT_HEIGHT"] image_c = self.config["INPUT_CHANNELS"] train_dir = self.config["SAVE_MODEL_DIR"] FLAG_BAYES = self.config["BAYES"] print(FLAG_BAYES) with self.sess as sess: # Restore saved session saver = tf.train.Saver() saver.restore(sess, train_dir) _, _, prediction = cal_loss(logits=self.logits, labels=self.labels_pl,number_class=self.num_classes) prob = tf.nn.softmax(self.logits,dim = -1) if (dataset_type=='TRAIN'): test_type_path = self.config["TRAIN_FILE"] if type(images_index) == list: indexes = images_index else: '''CHANGE IT BACK''' #indexes = random.sample(range(367),images_index) indexes = random.sample(range(6),images_index) #indexes = [0,75,150,225,300] elif (dataset_type=='VAL'): test_type_path = self.config["VAL_FILE"] if type(images_index) == list: indexes = images_index else: #indexes = random.sample(range(101),images_index) indexes = random.sample(range(10),images_index) #indexes = [0,25,50,75,100] elif (dataset_type=='TEST'): test_type_path = self.config["TEST_FILE"] if type(images_index) == list: indexes = images_index else: indexes = random.sample(range(5),images_index) #indexes = random.sample(range(233),images_index) #indexes = [0,50,100,150,200] # Load images image_filename,label_filename = get_filename_list(test_type_path, self.config) images, labels = get_all_test_data(image_filename,label_filename) # Keep images subset of length images_index images = [images[i] for i in indexes] labels = [labels[i] for i in indexes] num_sample_generate = 30 pred_tot = [] var_tot = [] print(image_c) for image_batch, label_batch in zip(images,labels): print(image_batch.shape) image_batch = np.reshape(image_batch,[1,image_h,image_w,image_c]) label_batch = np.reshape(label_batch,[1,image_h,image_w,1]) if FLAG_BAYES is False: print("NON BAYES") fetches = [prediction] feed_dict = {self.inputs_pl: image_batch, self.labels_pl: label_batch, self.is_training_pl: False, self.keep_prob_pl: 0.5, self.batch_size_pl: 1} pred = sess.run(fetches = fetches, feed_dict = feed_dict) pred = np.reshape(pred,[image_h,image_w]) var_one = [] else: feed_dict = {self.inputs_pl: image_batch, self.labels_pl: label_batch, self.is_training_pl: False, self.keep_prob_pl: 0.5, self.with_dropout_pl: True, self.batch_size_pl: 1} prob_iter_tot = [] pred_iter_tot = [] for iter_step in range(num_sample_generate): prob_iter_step = sess.run(fetches = [prob], feed_dict = feed_dict) prob_iter_tot.append(prob_iter_step) pred_iter_tot.append(np.reshape(np.argmax(prob_iter_step,axis = -1),[-1])) if FLAG_MAX_VOTE is True: prob_variance,pred = MAX_VOTE(pred_iter_tot,prob_iter_tot,self.config["NUM_CLASSES"]) #acc_per = np.mean(np.equal(pred,np.reshape(label_batch,[-1]))) var_one = var_calculate(pred,prob_variance) pred = np.reshape(pred,[image_h,image_w]) else: prob_mean = np.nanmean(prob_iter_tot,axis = 0) prob_variance = np.var(prob_iter_tot, axis = 0) pred = np.reshape(np.argmax(prob_mean,axis = -1),[-1]) #pred is the predicted label with the mean of generated samples #THIS TIME I DIDN'T INCLUDE TAU var_one = var_calculate(pred,prob_variance) pred = np.reshape(pred,[image_h,image_w]) pred_tot.append(pred) var_tot.append(var_one) draw_plots_bayes(images, labels, pred_tot, var_tot) return (images,labels,pred_tot,var_tot)
def visual_results(self, dataset_type="TEST", indices=None, n_samples=3, model_file=None): with tf.Session() as sess: # Restore saved session saver = tf.train.Saver() if model_file is None: saver.restore(sess, tf.train.latest_checkpoint(FLAGS.runtime_dir)) else: saver.restore(sess, os.path.join(FLAGS.runtime_dir, model_file)) _, _, prediction = cal_loss(logits=self.logits, labels=self.labels_pl, n_classes=self.n_classes) test_type_path = None if dataset_type == 'TRAIN': test_type_path = self.train_file elif dataset_type == 'VAL': test_type_path = self.val_file elif dataset_type == 'TEST': test_type_path = self.test_file # Load images image_filenames, label_filenames = get_filename_list( test_type_path) images, labels = get_all_test_data(image_filenames, label_filenames) if not indices: indices = random.sample(range(len(images)), n_samples) # Keep images subset of length images_index images = [images[i] for i in indices] labels = [labels[i] for i in indices] pred_tot = [] for image_batch, label_batch in zip(images, labels): image_batch = np.reshape( image_batch, [1, self.input_h, self.input_w, self.input_c]) label_batch = np.reshape(label_batch, [1, self.input_h, self.input_w, 1]) fetches = [prediction] feed_dict = { self.inputs_pl: image_batch, self.labels_pl: label_batch, self.is_training_pl: False, self.keep_prob_pl: 0.5, self.batch_size_pl: 1 } pred = sess.run(fetches=fetches, feed_dict=feed_dict) pred = np.reshape(pred, [self.input_h, self.input_w]) pred_tot.append(pred) draw_plots_bayes(images, labels, pred_tot)