コード例 #1
0
    def normal_evaluate(self, dataset='valid', train_step=None):
        num_batch = self.num_test_batch if dataset == 'test' else self.num_val_batch
        self.sess.run(tf.local_variables_initializer())
        hist = np.zeros((self.conf.num_cls, self.conf.num_cls))
        plot_inputs = np.zeros((0, self.conf.height, self.conf.width, self.conf.channel))
        plot_mask = np.zeros((0, self.conf.height, self.conf.width))
        plot_mask_pred = np.zeros((0, self.conf.height, self.conf.width))
        for step in range(num_batch):
            start = self.conf.val_batch_size * step
            end = self.conf.val_batch_size * (step + 1)
            data_x, data_y = self.data_reader.next_batch(start=start, end=end, mode=dataset)
            feed_dict = {self.inputs_pl: data_x,
                         self.labels_pl: data_y,
                         self.is_training_pl: True,
                         self.with_dropout_pl: False,
                         self.keep_prob_pl: 1}
            self.sess.run([self.mean_loss_op, self.mean_accuracy_op], feed_dict=feed_dict)
            mask_pred = self.sess.run(self.y_pred, feed_dict=feed_dict)
            hist += get_hist(mask_pred.flatten(), data_y.flatten(), num_cls=self.conf.num_cls)
            if plot_inputs.shape[0] < 100:  # randomly select a few slices to plot and save
                # idx = np.random.randint(self.conf.batch_size)
                plot_inputs = np.concatenate((plot_inputs, data_x.reshape(-1, self.conf.height, self.conf.width,
                                                                          self.conf.channel)), axis=0)
                plot_mask = np.concatenate((plot_mask, data_y.reshape(-1, self.conf.height, self.conf.width)),
                                           axis=0)
                plot_mask_pred = np.concatenate(
                    (plot_mask_pred, mask_pred.reshape(-1, self.conf.height, self.conf.width)), axis=0)

        IOU, ACC = compute_iou(hist)
        mean_IOU = np.mean(IOU)
        loss, acc = self.sess.run([self.mean_loss, self.mean_accuracy])
        if dataset == "valid":  # save the summaries and improved model in validation mode
            summary_valid = self.sess.run(self.merged_summary, feed_dict=feed_dict)
            self.save_summary(summary_valid, train_step, is_train=False)
            if loss < self.best_validation_loss:
                self.best_validation_loss = loss
                if mean_IOU > self.best_mean_IOU:
                    self.best_mean_IOU = mean_IOU
                    print('>>>>>>>> Both model validation loss and mean IOU improved; saving the model......')
                else:
                    print('>>>>>>>> model validation loss improved; saving the model......')
                self.save(train_step)
            elif mean_IOU > self.best_mean_IOU:
                self.best_mean_IOU = mean_IOU
                print('>>>>>>>> model mean IOU improved; saving the model......')
                self.save(train_step)

        print('****** IoU & ACC ******')
        print('Mean IoU = {0:.01%}, valid_loss = {1:.4f}'.format(mean_IOU, loss))
        for ii in range(self.conf.num_cls):
            print('     - {0:<15}: IoU={1:<5.01%}, ACC={2:<5.01%}'.format(self.conf.label_name[ii], IOU[ii], ACC[ii]))
        print('-' * 20)
        self.visualize(plot_inputs, plot_mask, plot_mask_pred, train_step=train_step, mode='valid')
コード例 #2
0
    def normal_evaluate(self, dataset='valid', train_step=None):
        num_batch = self.num_test_batch if dataset == 'test' else self.num_val_batch
        self.sess.run(tf.local_variables_initializer())
        hist = np.zeros((self.conf.num_cls, self.conf.num_cls))
        scan_num = 0
        for image_index in range(num_batch):
            data_x, data_y = self.data_reader.next_batch(num=scan_num, mode=dataset)
            depth = data_x.shape[0] * data_x.shape[-2]
            scan_input = np.zeros((self.conf.height, self.conf.width, depth, self.conf.channel))
            scan_mask = np.zeros((self.conf.height, self.conf.width, depth))
            scan_mask_pred = np.zeros((self.conf.height, self.conf.width, depth))
            for slice_num in range(data_x.shape[0]):  # for each slice of the 3D image
                feed_dict = {self.inputs_pl: np.expand_dims(data_x[slice_num], 0),
                             self.labels_pl: np.expand_dims(data_y[slice_num], 0),
                             self.is_training_pl: True,
                             self.with_dropout_pl: False,
                             self.keep_prob_pl: 1}
                self.sess.run([self.mean_loss_op, self.mean_accuracy_op], feed_dict=feed_dict)
                inputs, mask, mask_pred = self.sess.run([self.inputs_pl,
                                                         self.labels_pl,
                                                         self.y_pred], feed_dict=feed_dict)
                hist += get_hist(mask_pred.flatten(), mask.flatten(), num_cls=self.conf.num_cls)
                idx_d, idx_u = slice_num * self.conf.Dcut_size, (slice_num + 1) * self.conf.Dcut_size
                scan_input[:, :, idx_d:idx_u] = np.squeeze(inputs, axis=0)
                scan_mask[:, :, idx_d:idx_u] = np.squeeze(mask, axis=0)
                scan_mask_pred[:, :, idx_d:idx_u] = np.squeeze(mask_pred, axis=0)
            self.visualize_me(np.squeeze(scan_input), scan_mask, scan_mask_pred, train_step=train_step,
                              img_idx=image_index, mode='valid')
            scan_num += 1
        IOU, ACC = compute_iou(hist)
        mean_IOU = np.mean(IOU)
        loss, acc = self.sess.run([self.mean_loss, self.mean_accuracy])

        if dataset == "valid":  # save the summaries and improved model in validation mode
            summary_valid = self.sess.run(self.merged_summary, feed_dict=feed_dict)
            self.save_summary(summary_valid, train_step, is_train=False)
            if loss < self.best_validation_loss:
                self.best_validation_loss = loss
                print('>>>>>>>> model validation loss improved; saving the model......')
                self.save(train_step)

        print('After {0} training step: val_loss= {1:.4f}, val_acc={2:.01%}'.format(train_step, loss, acc))
        print('- IOU: bg={0:.01%}, liver={1:.01%}, spleen={2:.01%}, '
              'kidney={3:.01%}, bone={4:.01%}, vessel={5:.01%}, mean_IoU={6:.01%}'
              .format(IOU[0], IOU[1], IOU[2], IOU[3], IOU[4], IOU[5], mean_IOU))
        print('- ACC: bg={0:.01%}, liver={1:.01%}, spleen={2:.01%}, '
              'kidney={3:.01%}, bone={4:.01%}, vessel={5:.01%}'
              .format(ACC[0], ACC[1], ACC[2], ACC[3], ACC[4], ACC[5]))
        print('-' * 60)
コード例 #3
0
    def MC_evaluate(self, dataset='valid', train_step=None):
        num_batch = self.num_test_batch if dataset == 'test' else self.num_val_batch
        hist = np.zeros((self.conf.num_cls, self.conf.num_cls))
        self.sess.run(tf.local_variables_initializer())
        all_inputs = np.zeros(
            (0, self.conf.height, self.conf.width, self.conf.channel))
        all_mask = np.zeros((0, self.conf.height, self.conf.width))
        all_pred = np.zeros((0, self.conf.height, self.conf.width))
        all_var = np.zeros((0, self.conf.height, self.conf.width))
        cls_uncertainty = np.zeros(
            (0, self.conf.height, self.conf.width, self.conf.num_cls))
        for step in tqdm(range(num_batch)):
            start = self.conf.val_batch_size * step
            end = self.conf.val_batch_size * (step + 1)
            data_x, data_y = self.data_reader.next_batch(start=start,
                                                         end=end,
                                                         mode=dataset)
            mask_pred_mc = [
                np.zeros((self.conf.val_batch_size, self.conf.height,
                          self.conf.width))
                for _ in range(self.conf.monte_carlo_simulations)
            ]
            mask_prob_mc = [
                np.zeros((self.conf.val_batch_size, self.conf.height,
                          self.conf.width, self.conf.num_cls))
                for _ in range(self.conf.monte_carlo_simulations)
            ]
            feed_dict = {
                self.inputs_pl: data_x,
                self.labels_pl: data_y,
                self.is_training_pl: True,
                self.with_dropout_pl: True,
                self.keep_prob_pl: self.conf.keep_prob
            }
            for mc_iter in range(self.conf.monte_carlo_simulations):
                inputs, mask, mask_prob, mask_pred = self.sess.run(
                    [self.inputs_pl, self.labels_pl, self.y_prob, self.y_pred],
                    feed_dict=feed_dict)
                mask_prob_mc[mc_iter] = mask_prob
                mask_pred_mc[mc_iter] = mask_pred

            prob_mean = np.nanmean(mask_prob_mc, axis=0)
            prob_variance = np.var(mask_prob_mc, axis=0)
            pred = np.argmax(prob_mean, axis=-1)
            var_one = np.nanmean(prob_variance, axis=-1)
            # var_one = var_calculate_2d(pred, prob_variance)
            hist += get_hist(pred.flatten(),
                             mask.flatten(),
                             num_cls=self.conf.num_cls)

            # if all_inputs.shape[0] < 6:
            # ii = np.random.randint(self.conf.val_batch_size)
            # ii = 1
            all_inputs = np.concatenate(
                (all_inputs,
                 inputs.reshape(-1, self.conf.height, self.conf.width,
                                self.conf.channel)),
                axis=0)
            all_mask = np.concatenate(
                (all_mask, mask.reshape(-1, self.conf.height,
                                        self.conf.width)),
                axis=0)
            all_pred = np.concatenate(
                (all_pred, pred.reshape(-1, self.conf.height,
                                        self.conf.width)),
                axis=0)
            all_var = np.concatenate(
                (all_var, var_one.reshape(-1, self.conf.height,
                                          self.conf.width)),
                axis=0)
            cls_uncertainty = np.concatenate(
                (cls_uncertainty,
                 prob_variance.reshape(-1, self.conf.height, self.conf.width,
                                       self.conf.num_cls)),
                axis=0)
            # else:
        self.visualize(all_inputs,
                       all_mask,
                       all_pred,
                       all_var,
                       cls_uncertainty,
                       train_step=train_step)
        import h5py
        h5f = h5py.File(self.conf.run_name + '_bayes.h5', 'w')
        h5f.create_dataset('x', data=all_inputs)
        h5f.create_dataset('y', data=all_mask)
        h5f.create_dataset('y_pred', data=all_pred)
        h5f.create_dataset('y_var', data=all_var)
        h5f.create_dataset('cls_uncertainty', data=cls_uncertainty)
        h5f.close()

        uncertainty_measure = get_uncertainty_measure(all_inputs, all_mask,
                                                      all_pred, all_var)

        # break
        IOU, ACC = compute_iou(hist)
        mean_IOU = np.mean(IOU)
        print('****** IoU & ACC ******')
        print('Uncertainty Quality Measure = {}'.format(uncertainty_measure))
        print('Mean IoU = {0:.01%}'.format(mean_IOU))
        for ii in range(self.conf.num_cls):
            print('     - {0} class: IoU={1:.01%}, ACC={2:.01%}'.format(
                self.conf.label_name[ii], IOU[ii], ACC[ii]))
        print('-' * 20)
コード例 #4
0
    def normal_evaluate(self, train_step=None):
        hist = np.zeros((self.conf.num_cls, self.conf.num_cls))
        plot_inputs = np.zeros(
            (0, self.conf.height, self.conf.width, self.conf.channel))
        plot_mask = np.zeros((0, self.conf.height, self.conf.width))
        plot_mask_pred = np.zeros((0, self.conf.height, self.conf.width))

        if self.conf.mode == 'train':
            self.sess.run(self.valid_iterator.initializer)
            self.handle_ = self.sess.run(self.valid_iterator.string_handle())
        elif self.conf.mode == 'test':
            self.sess.run(self.test_iterator.initializer)
            self.handle_ = self.sess.run(self.test_iterator.string_handle())

        while True:
            try:
                feed_dict = {
                    self.is_training_pl: True,
                    self.with_dropout_pl: False,
                    self.keep_prob_pl: 1,
                    self.handle: self.handle_
                }
                _, _, mask_pred, data_x, data_y, summary_valid = self.sess.run(
                    [
                        self.mean_loss_op, self.mean_accuracy_op, self.y_pred,
                        self.inputs_pl, self.labels_pl, self.merged_summary
                    ],
                    feed_dict=feed_dict)
                hist += get_hist(mask_pred.flatten(),
                                 data_y.flatten(),
                                 num_cls=self.conf.num_cls)
                if plot_inputs.shape[
                        0] < 100:  # randomly select a few slices to plot
                    # idx = np.random.randint(self.conf.batch_size)
                    plot_inputs = np.concatenate(
                        (plot_inputs,
                         data_x.reshape(-1, self.conf.height, self.conf.width,
                                        self.conf.channel)),
                        axis=0)
                    plot_mask = np.concatenate(
                        (plot_mask,
                         data_y.reshape(-1, self.conf.height,
                                        self.conf.width)),
                        axis=0)
                    plot_mask_pred = np.concatenate(
                        (plot_mask_pred,
                         mask_pred.reshape(-1, self.conf.height,
                                           self.conf.width)),
                        axis=0)
            except tf.errors.OutOfRangeError:
                # assert len(validation_predictions) == VALIDATION_SIZE
                IOU, ACC = compute_iou(hist)
                mean_IOU = np.mean(IOU)
                loss, acc = self.sess.run([self.mean_loss, self.mean_accuracy])
                if self.conf.mode == "train":  # save the summaries and improved model in validation mode
                    # summary_valid = self.sess.run(self.merged_summary, feed_dict=feed_dict)
                    self.save_summary(summary_valid,
                                      train_step,
                                      is_train=False)
                    if loss < self.best_validation_loss:
                        self.best_validation_loss = loss
                        print(
                            '>>>>>>>> model validation loss improved; saving the model......'
                        )
                        self.save(train_step)
                    elif mean_IOU > self.best_mean_IOU:
                        self.best_mean_IOU = mean_IOU
                        print(
                            '>>>>>>>> model mean IOU improved; saving the model......'
                        )
                        self.save(train_step)

                print('****** IoU & ACC ******')
                print('Mean IoU = {0:.01%}'.format(mean_IOU))
                for ii in range(self.conf.num_cls):
                    print('     - {0:<15}: IoU={1:<5.01%}, ACC={2:<5.01%}'.
                          format(self.conf.label_name[ii], IOU[ii], ACC[ii]))
                print('-' * 20)
                self.visualize(plot_inputs,
                               plot_mask,
                               plot_mask_pred,
                               train_step=train_step)
                break
コード例 #5
0
    def normal_evaluate(self, dataset='valid', train_step=None):
        valiter = CityscapesDataset(which_set='val',
                                    batch_size=self.conf.val_batch_size,
                                    seq_per_subset=0,
                                    seq_length=0,
                                    return_one_hot=False,
                                    return_01c=True,
                                    use_threads=True,
                                    return_list=True,
                                    nthreads=8,
                                    infinite_iterator=False)
        num_batch = self.num_test_batch if dataset == 'test' else 500
        self.sess.run(tf.local_variables_initializer())
        hist = np.zeros((self.conf.num_cls, self.conf.num_cls))
        plot_inputs = np.zeros(
            (0, self.conf.height, self.conf.width, self.conf.channel))
        plot_mask = np.zeros((0, self.conf.height, self.conf.width))
        plot_mask_pred = np.zeros((0, self.conf.height, self.conf.width))
        for step in range(num_batch):
            valid_data = valiter.__next__()
            feed_dict = {
                self.inputs_pl: valid_data[0],
                self.labels_pl: valid_data[1],
                self.is_training_pl: True,
                self.with_dropout_pl: False,
                self.keep_prob_pl: 1
            }
            self.sess.run([self.mean_loss_op, self.mean_accuracy_op],
                          feed_dict=feed_dict)
            mask_pred = self.sess.run(self.y_pred, feed_dict=feed_dict)
            hist += get_hist(mask_pred.flatten(),
                             valid_data[1].flatten(),
                             num_cls=self.conf.num_cls)
            if plot_inputs.shape[
                    0] < 20:  # randomly select a few slices to plot and save
                # idx = np.random.randint(self.conf.batch_size)
                plot_inputs = np.concatenate(
                    (plot_inputs, valid_data[0].reshape(
                        -1, self.conf.height, self.conf.width,
                        self.conf.channel)),
                    axis=0)
                plot_mask = np.concatenate((plot_mask, valid_data[1].reshape(
                    -1, self.conf.height, self.conf.width)),
                                           axis=0)
                plot_mask_pred = np.concatenate(
                    (plot_mask_pred,
                     mask_pred.reshape(-1, self.conf.height, self.conf.width)),
                    axis=0)
                # self.visualize(plot_inputs, plot_mask, plot_mask_pred, train_step=train_step, mode='valid')

        IOU, ACC = compute_iou(hist)
        mean_IOU = np.mean(IOU)
        loss, acc = self.sess.run([self.mean_loss, self.mean_accuracy])
        if dataset == "valid":  # save the summaries and improved model in validation mode
            summary_valid = self.sess.run(self.merged_summary,
                                          feed_dict=feed_dict)
            self.save_summary(summary_valid, train_step, is_train=False)
            if loss < self.best_validation_loss:
                self.best_validation_loss = loss
                if mean_IOU > self.best_mean_IOU:
                    self.best_mean_IOU = mean_IOU
                    print(
                        '>>>>>>>> Both model validation loss and mean IOU improved; saving the model......'
                    )
                else:
                    print(
                        '>>>>>>>> model validation loss improved; saving the model......'
                    )
                self.save(train_step)
            elif mean_IOU > self.best_mean_IOU:
                self.best_mean_IOU = mean_IOU
                print(
                    '>>>>>>>> model mean IOU improved; saving the model......')
                self.save(train_step)

        print('****** IoU & ACC ******')
        print('Mean IoU = {0:.01%}, valid_loss = {1:.4f}'.format(
            mean_IOU, loss))
        for ii in range(self.conf.num_cls):
            print('     - {0:<15}: IoU={1:<5.01%}, ACC={2:<5.01%}'.format(
                self.conf.label_name[ii], IOU[ii], ACC[ii]))
        print('-' * 20)
        self.visualize(plot_inputs,
                       plot_mask,
                       plot_mask_pred,
                       train_step=train_step,
                       mode='valid')
コード例 #6
0
ファイル: base_model.py プロジェクト: ypy516478793/OS
    def MC_evaluate(self, dataset='valid', train_step=None):
        num_batch = self.num_test_batch if dataset == 'test' else self.num_val_batch
        hist = np.zeros((self.conf.num_cls, self.conf.num_cls))
        self.sess.run(tf.local_variables_initializer())
        all_inputs = np.zeros(
            (0, self.conf.height, self.conf.width, self.conf.channel))
        all_mask = np.zeros((0, self.conf.height, self.conf.width))
        all_pred = np.zeros((0, self.conf.height, self.conf.width))
        all_var = np.zeros((0, self.conf.height, self.conf.width))
        cls_uncertainty = np.zeros(
            (0, self.conf.height, self.conf.width, self.conf.num_cls))
        mask_prob_mc_list = []
        mask_prob_mean_list = []
        for step in tqdm(range(num_batch)):
            start = self.conf.val_batch_size * step
            end = self.conf.val_batch_size * (step + 1)
            data_x, data_y = self.data_reader.next_batch(start=start,
                                                         end=end,
                                                         mode=dataset)
            # mask_pred_mc = [np.zeros((self.conf.val_batch_size, self.conf.height, self.conf.width))
            #                 for _ in range(self.conf.monte_carlo_simulations)]
            mask_prob_mc = [
                np.zeros((self.conf.val_batch_size, self.conf.height,
                          self.conf.width, self.conf.num_cls))
                for _ in range(self.conf.monte_carlo_simulations)
            ]
            feed_dict = {
                self.inputs_pl: data_x,
                self.labels_pl: data_y,
                self.is_training_pl: True,
                self.with_dropout_pl: True,
                self.keep_prob_pl: self.conf.keep_prob
            }
            for mc_iter in range(self.conf.monte_carlo_simulations):
                inputs, mask, mask_prob = self.sess.run(
                    [self.inputs_pl, self.labels_pl, self.y_prob],
                    feed_dict=feed_dict)
                mask_prob_mc[mc_iter] = mask_prob
                # mask_pred_mc[mc_iter] = mask_pred

            mask_prob_mc_list.append(mask_prob_mc)
            prob_mean = np.nanmean(mask_prob_mc, axis=0)
            prob_variance = np.var(mask_prob_mc, axis=0)
            pred = np.argmax(prob_mean, axis=-1)
            # var_one = np.nanmean(prob_variance, axis=-1)
            # var_one = var_calculate_2d(pred, prob_variance)
            # var_one = predictive_entropy(prob_mean)
            var_one = mutual_info(prob_mean, mask_prob_mc)
            mask_prob_mean_list.append([pred, var_one])
            hist += get_hist(pred.flatten(),
                             mask.flatten(),
                             num_cls=self.conf.num_cls)

            if all_inputs.shape[0] < 1000:
                all_inputs = np.concatenate(
                    (all_inputs,
                     inputs.reshape(-1, self.conf.height, self.conf.width,
                                    self.conf.channel)),
                    axis=0)
                all_mask = np.concatenate(
                    (all_mask,
                     mask.reshape(-1, self.conf.height, self.conf.width)),
                    axis=0)
                all_pred = np.concatenate(
                    (all_pred,
                     pred.reshape(-1, self.conf.height, self.conf.width)),
                    axis=0)
                all_var = np.concatenate(
                    (all_var,
                     var_one.reshape(-1, self.conf.height, self.conf.width)),
                    axis=0)

                cls_uncertainty = np.concatenate(
                    (cls_uncertainty,
                     prob_variance.reshape(-1, self.conf.height,
                                           self.conf.width,
                                           self.conf.num_cls)),
                    axis=0)
        # savePickle("dropconnect_pred_1.pkl", mask_prob_mc_list[:25])
        # savePickle("dropconnect_pred_mean_1.pkl", mask_prob_mean_list[:25])
        # savePickle("dropconnect_pred_2.pkl", mask_prob_mc_list[25:])
        # savePickle("dropconnect_pred_mean_2.pkl", mask_prob_mean_list[25:])
        self.visualize(all_inputs,
                       all_mask,
                       all_pred,
                       all_var,
                       cls_uncertainty,
                       train_step=train_step,
                       mode='test')
        IOU, ACC = compute_iou(hist)
        mean_IOU = np.mean(IOU)
        print('****** IoU & ACC ******')
        print('Mean IoU = {0:.01%}'.format(mean_IOU))
        for ii in range(self.conf.num_cls):
            print('     - {0} class: IoU={1:.01%}, ACC={2:.01%}'.format(
                self.conf.label_name[ii], IOU[ii], ACC[ii]))
        print('-' * 20)
コード例 #7
0
import h5py
import numpy as np
from utils.eval_utils import get_hist

run_name = 'dropconnect_MI_uncertainty'
num_cls = 6


def compute_metrics(hist):
    intersection = np.diag(hist)
    ground_truth_set = hist.sum(axis=1)
    predicted_set = hist.sum(axis=0)
    union = ground_truth_set + predicted_set - intersection
    mIoU = np.mean(intersection / union.astype(np.float32))
    p_acc = np.sum(np.diag(hist)) / np.sum(hist)
    m_acc = (1 / hist.shape[0]) * np.sum(np.diag(hist) / np.sum(hist, axis=1))
    return mIoU, p_acc, m_acc


h5f = h5py.File(run_name + '.h5', 'r')
y = h5f['y'][:]
y_pred = h5f['y_pred'][:]
h5f.close()

hist = get_hist(y_pred.astype(int), y.astype(int), num_cls)
mean_iou, pixel_acc, mean_acc = compute_metrics(hist)

print()