Ejemplo n.º 1
0
    def grad_cam(self, step_num):
        cost = (-1) * tf.reduce_sum(tf.multiply(self.y, tf.log(self.prob)), axis=1)
        # gradient for partial linearization. We only care about target visualization class.
        y_c = tf.reduce_sum(tf.multiply(self.logits, self.y), axis=1)   # vgg.fc8: outputs before softmax
        # Get last convolutional layer gradient for generating gradCAM visualization
        target_conv_layer = self.net_grad   # vgg.pool5 of shape (batch_size, 7, 7, 512)
        target_conv_layer_grad = tf.gradients(y_c, target_conv_layer)[0]
        # Guided backpropagtion back to input layer
        gb_grad = tf.gradients(cost, self.x)[0]

        self.sess.run(tf.local_variables_initializer())
        self.reload(step_num)
        from DataLoaders.ApoptosisLoader import DataLoader
        self.data_reader = DataLoader(self.conf)
        self.data_reader.get_data(mode='test')
        self.num_test_batch = self.data_reader.count_num_batch(self.conf.batch_size, mode='test')

        for step in range(self.num_test_batch):
            start = step * self.conf.batch_size
            end = (step + 1) * self.conf.batch_size
            x_test, y_test = self.data_reader.next_batch(start, end, mode='test')
            prob, gb_grad_value, target_conv_layer_value, target_conv_layer_grad_value = self.sess.run(
                [self.prob, gb_grad, target_conv_layer, target_conv_layer_grad],
                feed_dict={self.x: x_test, self.y: y_test})

            visualize(x_test, target_conv_layer_value, target_conv_layer_grad_value, gb_grad_value,
                      prob, y_test, img_size=self.conf.height, fig_name='img_' + str(step))
Ejemplo n.º 2
0
 def test(self, step_num):
     self.sess.run(tf.local_variables_initializer())
     self.reload(step_num)
     if self.conf.data == 'mnist':
         from DataLoaders.MNISTLoader import DataLoader
     elif self.conf.data == 'nodule':
         from DataLoaders.DataLoader import DataLoader
     elif self.conf.data == 'cifar10':
         from DataLoaders.CIFARLoader import DataLoader
     elif self.conf.data == 'apoptosis':
         from DataLoaders.ApoptosisLoader import DataLoader
     self.data_reader = DataLoader(self.conf)
     self.data_reader.get_data(mode='test')
     self.num_test_batch = self.data_reader.count_num_batch(self.conf.batch_size, mode='test')
     self.is_train = False
     self.sess.run(tf.local_variables_initializer())
     y_pred = np.zeros((self.data_reader.y_test.shape[0]))
     y_prob = np.zeros((self.data_reader.y_test.shape[0], self.conf.num_cls))
     img_recon = np.zeros((self.data_reader.y_test.shape[0], self.conf.height * self.conf.width))
     for step in range(self.num_test_batch):
         start = step * self.conf.batch_size
         end = (step + 1) * self.conf.batch_size
         x_test, y_test = self.data_reader.next_batch(start, end, mode='test')
         feed_dict = {self.x: x_test, self.y: y_test, self.is_training: False}
         yp, yprob, _, _ = self.sess.run([self.y_pred, self.prob, self.mean_loss_op, self.mean_accuracy_op],
                                         feed_dict=feed_dict)
         y_pred[start:end] = yp
         y_prob[start:end] = yprob
     test_loss, test_acc = self.sess.run([self.mean_loss, self.mean_accuracy])
     print('-' * 18 + 'Test Completed' + '-' * 18)
     print('test_loss= {0:.4f}, test_acc={1:.01%}'.format(test_loss, test_acc))
     print(confusion_matrix(np.argmax(self.data_reader.y_test, axis=1), y_pred))
     print('-' * 50)
     Precision, Recall, thresholds = precision_recall_curve(np.argmax(self.data_reader.y_test, axis=1), y_prob[:, 1])
     precision_recall(np.argmax(self.data_reader.y_test, axis=1), y_pred)
Ejemplo n.º 3
0
    def get_features(self, step_num):
        self.sess.run(tf.local_variables_initializer())
        self.reload(step_num)
        from DataLoaders.Sequential_ApoptosisLoader import DataLoader
        self.data_reader = DataLoader(self.conf)
        self.data_reader.get_data(mode='train')
        self.data_reader.get_data(mode='test')
        self.num_train_batch = self.data_reader.count_num_batch(self.conf.batch_size, mode='train')
        self.num_test_batch = self.data_reader.count_num_batch(self.conf.batch_size, mode='test')
        self.is_train = False

        self.sess.run(tf.local_variables_initializer())
        y_pred = np.zeros((self.data_reader.y_test.shape[0]) * self.conf.max_time)
        features = np.zeros((self.data_reader.y_test.shape[0] * self.conf.max_time, 512))
        for step in range(self.num_test_batch):
            start = step * self.conf.batch_size
            end = (step + 1) * self.conf.batch_size
            x_test, y_test = self.data_reader.next_batch(start, end, mode='test')
            feed_dict = {self.x: x_test, self.y: y_test, self.is_training: False}
            yp, feats, _, _ = self.sess.run([self.y_pred, self.features, self.mean_loss_op, self.mean_accuracy_op],
                                            feed_dict=feed_dict)
            y_pred[start * self.conf.max_time:end * self.conf.max_time] = yp
            features[start * self.conf.max_time:end * self.conf.max_time] = feats
        test_features = np.reshape(features, [-1, self.conf.max_time, 512])
        test_loss, test_acc = self.sess.run([self.mean_loss, self.mean_accuracy])
        print('-' * 18 + 'Test Completed' + '-' * 18)
        print('test_loss= {0:.4f}, test_acc={1:.01%}'.format(test_loss, test_acc))
        y_true = np.reshape(np.argmax(self.data_reader.y_test, axis=-1), [-1])
        print(confusion_matrix(y_true, y_pred))
        print('-' * 50)

        self.sess.run(tf.local_variables_initializer())
        y_pred = np.zeros((self.data_reader.y_train.shape[0]) * self.conf.max_time)
        features = np.zeros((self.data_reader.y_train.shape[0] * self.conf.max_time, 512))
        for step in range(self.num_test_batch):
            start = step * self.conf.batch_size
            end = (step + 1) * self.conf.batch_size
            x_train, y_train = self.data_reader.next_batch(start, end, mode='train')
            feed_dict = {self.x: x_train, self.y: y_train, self.is_training: False}
            yp, feats, _, _ = self.sess.run([self.y_pred, self.features, self.mean_loss_op, self.mean_accuracy_op],
                                            feed_dict=feed_dict)
            y_pred[start * self.conf.max_time:end * self.conf.max_time] = yp
            features[start * self.conf.max_time:end * self.conf.max_time] = feats
        train_features = np.reshape(features, [-1, self.conf.max_time, 512])
        train_loss, train_acc = self.sess.run([self.mean_loss, self.mean_accuracy])
        print('-' * 18 + 'Test Completed' + '-' * 18)
        print('test_loss= {0:.4f}, test_acc={1:.01%}'.format(train_loss, train_acc))
        y_true = np.reshape(np.argmax(self.data_reader.y_train, axis=-1), [-1])
        print(confusion_matrix(y_true, y_pred))
        print('-' * 50)
        import h5py
        data_dir = '/home/cougarnet.uh.edu/amobiny/Desktop/Apoptosis_Project/data/'
        h5f = h5py.File(data_dir + 'features.h5', 'w')
        h5f.create_dataset('X_train', data=train_features)
        h5f.create_dataset('Y_train', data=self.data_reader.y_train)
        h5f.create_dataset('X_valid', data=test_features)
        h5f.create_dataset('Y_valid', data=self.data_reader.y_test)
        h5f.create_dataset('X_test', data=test_features)
        h5f.create_dataset('Y_test', data=self.data_reader.y_test)
        h5f.close()
Ejemplo n.º 4
0
    def train(self):
        self.sess.run(tf.local_variables_initializer())
        if self.conf.data == 'mnist':
            from DataLoaders.MNISTLoader import DataLoader
        elif self.conf.data == 'nodule':
            from DataLoaders.DataLoader import DataLoader

        self.data_reader = DataLoader(self.conf)
        self.data_reader.get_validation()
        if self.conf.reload_step > 0:
            self.reload(self.conf.reload_step)
            print('*' * 50)
            print('----> Continue Training from step #{}'.format(
                self.conf.reload_step))
            print('*' * 50)
        else:
            print('*' * 50)
            print('----> Start Training')
            print('*' * 50)
        self.num_train_batch = int(self.data_reader.y_train.shape[0] /
                                   self.conf.batch_size)
        self.num_val_batch = int(self.data_reader.y_valid.shape[0] /
                                 self.conf.val_batch_size)
        for epoch in range(self.conf.max_epoch):
            self.data_reader.randomize()
            for train_step in range(self.num_train_batch):
                start = train_step * self.conf.batch_size
                end = (train_step + 1) * self.conf.batch_size
                global_step = epoch * self.num_train_batch + train_step
                x_batch, y_batch = self.data_reader.next_batch(start, end)
                feed_dict = {
                    self.x: x_batch,
                    self.y: y_batch,
                    self.mask_with_labels: True
                }
                if train_step % self.conf.SUMMARY_FREQ == 0:
                    _, _, _, summary = self.sess.run([
                        self.train_op, self.mean_loss_op,
                        self.mean_accuracy_op, self.merged_summary
                    ],
                                                     feed_dict=feed_dict)
                    loss, acc = self.sess.run(
                        [self.mean_loss, self.mean_accuracy])
                    self.save_summary(summary, global_step, mode='train')
                    print(
                        'step: {0:<6}, train_loss= {1:.4f}, train_acc={2:.01%}'
                        .format(train_step, loss, acc))
                else:
                    self.sess.run([
                        self.train_op, self.mean_loss_op, self.mean_accuracy_op
                    ],
                                  feed_dict=feed_dict)

            self.evaluate(epoch)
            self.save(epoch)
Ejemplo n.º 5
0
 def train(self):
     self.sess.run(tf.local_variables_initializer())
     self.best_validation_accuracy = 0
     if self.conf.data == 'mnist':
         from DataLoaders.MNISTLoader import DataLoader
     elif self.conf.data == 'nodule':
         from DataLoaders.DataLoader import DataLoader
     elif self.conf.data == 'cifar10':
         from DataLoaders.CIFARLoader import DataLoader
     elif self.conf.data == 'apoptosis':
         from DataLoaders.ApoptosisLoader import DataLoader
     self.data_reader = DataLoader(self.conf)
     self.data_reader.get_data(mode='train')
     self.data_reader.get_data(mode='valid')
     self.train_loop()