コード例 #1
0
ファイル: deploy.py プロジェクト: Mulugeta/DLTK
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(
        args.csv,
        dtype=object,
        keep_default_na=False,
        na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    file_names = file_names[-N_VALIDATION_SUBJECTS:]

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
                  if os.path.isdir(os.path.join(args.model_path, o)) and
                  o.isdigit()][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=32)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Calculate the Dice coefficient
        dsc = np.nanmean(metrics.dice(pred, lbl, num_classes)[1:])

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.model_path, '{}_seg.nii.gz'.format(output['subject_id']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        # Print outputs
        print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format(
            output['subject_id'], dsc, time.time() - t0, output_fn))
コード例 #2
0
def eval_mrbrains_metrics(y_, y):
    """
        Re-map the labels to CSF, GM, WM and evaluate the metrics for MRBrainS13:
        
        from: 
        1. Cortical gray matter
        2. Basal ganglia
        3. White matter
        4. White matter lesions
        5. Cerebrospinal fluid in the extracerebral space
        6. Ventricles
        7. Cerebellum
        8. Brainstem

        to:
        
        1. CSF
        2. GM
        3. WM

        Parameters
        ----------
        y : np array
            a valdiation label map
        
        y_ : np array
            a prediction label map
        
        Returns
        -------
        mrbrains_summary : a tensorboard summary
    """

    y_mapped_ = np.zeros_like(y_)
    y_mapped_[y_ == 1] = 2
    y_mapped_[y_ == 2] = 2
    y_mapped_[y_ == 3] = 3
    y_mapped_[y_ == 4] = 3
    y_mapped_[y_ == 5] = 1
    y_mapped_[y_ == 6] = 1

    y_mapped = np.zeros_like(y)
    y_mapped[y == 1] = 2
    y_mapped[y == 2] = 2
    y_mapped[y == 3] = 3
    y_mapped[y == 4] = 3
    y_mapped[y == 5] = 1
    y_mapped[y == 6] = 1

    # remove voxel locations that are Cerebellum or Brainstem from evaluation
    y_mapped_[y == 7] = 0
    y_mapped_[y == 8] = 0

    # compute metrics between the mapped labelmaps:
    dscs = metrics.dice(y_mapped_, y_mapped, num_classes=4)
    avds = metrics.abs_vol_difference(y_mapped_, y_mapped, num_classes=4)

    return dscs, avds
コード例 #3
0
def infer(args):
    s = tf.Session()

    filenames = pd.read_csv(args.csv, dtype=str).as_matrix()

    inputs, outputs = ResNetFCN.load(args.model_path, s)

    r = reader.OrgansReader([tf.float32, tf.int32],[[None, None, None, 1], [None, None, None]]) #,name='val_queue')

    
    for f in filenames:

        x, y = r._read_sample([f], is_training=False)

        sw = SlidingWindow(x.shape[1:4], [64, 64, 64], striding=[64, 64, 64])

        # Allocate the prediction output and a counter for averaging probabilities
        y_prob = np.zeros(y.shape + (num_classes,))
        y_pred_count = np.zeros_like(y_prob)
        for slicer in sw:
            y_sw = s.run(outputs['y_prob'], feed_dict={inputs[0]: x[slicer]})
            y_prob[slicer] += y_sw
            y_pred_count[slicer] += 1

        y_prob /= y_pred_count
        
        y_ = np.argmax(y_prob, axis=-1)

        dscs = metrics.dice(y_, y, num_classes)
        
        print(f[0] + ';  mean DSC = {:.3f}\n\t'.format(np.mean(dscs[1:]))
              + ', '.join(['DSC {}: {:.3f}'.format(i, dsc) for i, dsc in enumerate(dscs)]))

        y_ = np.squeeze (y_, axis = 0)

        itk_prediction = sitk.GetImageFromArray(y_)
        ds = np.transpose(dscs)
        DSC_all.append(ds)

    np.save('DSC_MR.npy', DSC_all)
コード例 #4
0
    def calculate_dice(self, predict_nii):
        """
        根据传预测输出的 nii 和 Label 计算 Dice
        :param predict_nii: 预测出来用于计算 Dice 的 nii
        :return: 保存各类 Dice 值的数组 -> [背景Dice, 灰质Dice, 白质Dice, 脑脊液Dice]
        """
        if self.label_path:
            print('------------>> step7: calculate dice...')
            label_data = image.load_img(self.label_path).get_data()

            # 一些奇怪的label数据得到的不是整数,如输出值是2,确变成了1.999999,这里需要四舍五入
            label_data = np.rint(label_data)

            predict_data = image.load_img(predict_nii).get_data()
            num_classes = len(np.unique(label_data))
            print(np.unique(label_data))
            print(num_classes)
            dict_arr = dice(predictions=predict_data,
                            labels=label_data,
                            num_classes=num_classes)
            print(dict_arr)
            return dict_arr
コード例 #5
0
def validate(ops, session, supervisor, name, v_all=True):
    """
        Run an inference on a validation dataset

        Parameters
        ----------
        ops : dict
            a dictionary containing all validation ops

        session : tf.session object

        supervisor : tf.Supervisor object

        Returns
        -------
    """

    # Pick num_validation_examples datasets to validate on
    if v_all:
        num_validation_examples = len(ops['filenames'])
    else:
        num_validation_examples = 4

    val_idx = range(num_validation_examples)

    # Track loss and Dice similarity coefficients as validation metrics
    val_loss = []
    val_dscs = []
#    val_orig_dscs = []

    # Iterate through the datasets and perform a sliding window inference
    for f in ops['filenames'][val_idx]:

        # Read a validation image and label of arbitrary dimensions
        val_x, val_y = ops['read_func']([f])

#        pid = os.path.basename(f[-1]).split('_')[0]
        pid = 'Subj.' + f[0].split('p/')[1][:2]

        y_prob = sliding_window_segmentation_inference(session, [ops['y_prob']], {ops['x']: val_x}, batch_size=16)[0]

        y_ = np.argmax(y_prob, axis=-1)

        # Compute the performance metrics for the dataset
        dscs = metrics.dice(y_, val_y, num_classes)
        loss = metrics.crossentropy(y_prob, np.eye(num_classes)[val_y.astype(int)], logits=False)
#        print(pid + '; CE= {:.6f}; DSC: l1 = {:.3f}'.format(loss, dscs[1]))
        print(pid + '; CE= {:.6f}; DSC:  LVR = {:.3f}, SPL = {:.3f}, RKDN = {:.3f}, LKDN = {:.3f}'.format(loss, dscs[1], dscs[2], dscs[3],dscs[4]))  
        # Collect the metrics over the validation data
        val_loss = val_loss + [loss]
        val_dscs = val_dscs + [dscs]

    np.save(args.save_dscs, val_dscs) 
    mean_dscs = np.mean(val_dscs, axis=0)
    mean_loss = np.mean(val_loss, axis=0)

    print('Mean; CE= {:.6f}; DSC: l1 = {:.3f}'.format(mean_loss, mean_dscs[1]))

    # Add the last computed dataset as an numpy image summary
    img_summaries = [modules.image_summary(val_x[0], name + '_img'),
                     modules.image_summary(val_y[0, :, :, :, np.newaxis] / num_classes, name + '_lbl'),
                     modules.image_summary(y_[0, :, :, :, np.newaxis] / num_classes, name + '_pred')]

    metrics_summaries = [modules.scalar_summary(mean_loss, name + '/ce'),
                         modules.scalar_summary(mean_dscs.mean(), name + '/dsc'),
                         modules.scalar_summary(mean_dscs[1:].mean(), name + '/dsc_wo_bg'),
                        ] + [modules.scalar_summary(mean_dscs[i + 1], name + '/dsc_lbl{}'.format(i + 1))
                             for i in range(num_classes - 1)]

    val_summaries = img_summaries + metrics_summaries
    return val_summaries
コード例 #6
0
def train(args):
    """
        Complete training and validation script. Additionally, saves inference model, trained weights and summaries.

        Parameters
        ----------
        args : argparse.parser object
            contains all necessary command line arguments

        Returns
        -------
    """

    if not args.resume:
        os.system("rm -rf %s" % args.save_path)
        os.system("mkdir -p %s" % args.save_path)
    else:
        print('Resuming training')

    g = tf.Graph()
    with g.as_default():

        # Set a seed
        np.random.seed(1337)
        tf.set_random_seed(1337)

        # Build the network graph
        net = ResNetFCN(num_classes,
                        num_residual_units=3,
                        filters=[16, 32, 64, 128, 256],
                        strides=[[1, 1, 1],[2, 2, 2], [2, 2, 2], [2, 2, 2], [1, 1, 1]])

        # I/O ops via a custom reader with queueing
        print('Loading training file names from %s' % args.train_csv)
        train_ops = create_ops(net, mode='train')

        train_ops['filenames'] = pd.read_csv(args.train_csv, dtype=object).as_matrix()
        train_ops['reader'] = reader.OrgansReader([64, 64, 64], name='train_queue')
        train_ops['inputs'] =  train_ops['reader'](
            train_ops['filenames'],
            batch_size=tps.batch_size, n_examples=32,
            min_queue_examples=tps.batch_size * 3,
            capacity=tps.batch_size * 8, num_readers=4)
        train_ops['read_func'] = lambda x: train_ops['reader']._read_sample(x, is_training=False)

        train_ops['filenames2'] = pd.read_csv(args.train_csv2, dtype=object).as_matrix()
        train_ops['reader2'] = reader.OrgansReader([64, 64, 64], name='train_queue2')   
        train_ops['inputs2'] =  train_ops['reader2'](
            train_ops['filenames2'],
            batch_size=tps.batch_size, n_examples=32,
            min_queue_examples=tps.batch_size * 3,
            capacity=tps.batch_size * 8, num_readers=4)
        train_ops['read_func2'] = lambda x: train_ops['reader2']._read_sample(x, is_training=False)
        print('Loading training file names from %s' % args.train_csv2)
        
        if args.run_validation:
            print('Loading validation file names from %s' % args.val_csv)
            val_ops = create_ops(net, mode='val')

            val_ops['filenames'] = pd.read_csv(args.val_csv, dtype=str).as_matrix()
            val_ops['reader'] = reader.OrgansReader([None, None, None],
                                                       name='val_queue')
            # Get the read function in mode is_training=False to prevent augmentation
            val_ops['read_func'] = lambda x: val_ops['reader']._read_sample(x, is_training=False)

        # Define and set up a training supervisor, handling queues and logging for tensorboard
        net.save_metagraph(os.path.join(args.save_path, 'saves'), is_training=False)
        sv = tf.train.Supervisor(logdir=args.save_path,
                                 is_chief=True,
                                 summary_op=None,
                                 save_summaries_secs=tps.save_summary_sec,
                                 save_model_secs=tps.save_model_sec,
                                 global_step=train_ops['global_step'])

        s = sv.prepare_or_wait_for_session(config=tf.ConfigProto())

        # Main training loop
        step = s.run(train_ops['global_step']) if args.resume else 0
        while not sv.should_stop():

            if  (step %2 == 0):
                x, y = s.run(train_ops['inputs'])

                feed_dict = {train_ops['x']: x, train_ops['y']: y}

                (_, train_loss, train_y_) = s.run([train_ops['optimiser'], train_ops['loss_all'],train_ops['y_']],feed_dict=feed_dict)
            

            
            else:
                x, y = s.run(train_ops['inputs2'])
                feed_dict = {train_ops['x']: x, train_ops['y']: y}

                (_, train_loss, train_y_) = s.run([train_ops['optimiser'], train_ops['loss_all'],train_ops['y_']],feed_dict=feed_dict)
#
            # Evaluation of training and validation data
            if step % tps.steps_eval == 0:

                # Save the complete model
                net.save_model(os.path.join(args.save_path, 'saves'), s)

                # Compute the loss and summaries and save them to tensorboard
                (train_loss, train_y_, train_summaries) = s.run([train_ops['loss_all'], train_ops['y_'],
                                                                 train_ops['summaries']], feed_dict=feed_dict)
                sv.summary_computed(s, train_summaries, global_step=step)

                print("\nEval step= {:d}".format(step))
                print("Train: Loss= {:.6f} {:.6f}".format(train_loss, 1 - np.mean(metrics.dice(train_y_, y, num_classes))))

                # Run inference on validation data and save results to tensorboard
                if args.run_validation:
                    val_summaries = validate(ops=val_ops, session=s, supervisor=sv, name='val')
                    [sv.summary_computed(s, v, global_step=step) for v in val_summaries]

#                    train_val_summaries = validate(ops=train_ops, session=s, supervisor=sv, name='train_val', v_all=False)
#                    [sv.summary_computed(s, v, global_step=step) for v in train_val_summaries]

            # Stopping condition
            if step >= tps.max_steps and tps.max_steps > 0:
                print('Run %d steps of %d steps - stopping now' % (step, tps.max_steps))
                net.save_model(os.path.join(args.save_path, 'saves'), s)
                break
            step += 1
コード例 #7
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    file_names = file_names[-N_VALIDATION_SUBJECTS:]

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=32)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Calculate the Dice coefficient
        dsc = np.nanmean(metrics.dice(pred, lbl, num_classes)[1:])

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.model_path,
                                 '{}_seg.nii.gz'.format(output['subject_id']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        # Print outputs
        print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format(
            output['subject_id'], dsc,
            time.time() - t0, output_fn))
コード例 #8
0
def infer(args):
    s = tf.Session()

    filenames = pd.read_csv(args.csv, dtype=str).as_matrix()
    filenames2 = pd.read_csv(args.csv2, dtype=str).as_matrix()

    inputs, outputs = DualStreamFCN_v4.load(args.model_path, s)

    r = reader.OrgansReader(
        [tf.float32, tf.int32],
        [[None, None, None, 1], [None, None, None]])  #,name='val_queue')

    for f in filenames:

        x, y = r._read_sample([f], is_training=False)

        sw = SlidingWindow(x.shape[1:4], [64, 64, 64], striding=[64, 64, 64])

        # Allocate the prediction output and a counter for averaging probabilities
        y_prob = np.zeros(y.shape + (num_classes, ))
        y_pred_count = np.zeros_like(y_prob)
        for slicer in sw:
            y_sw = s.run(outputs['y_prob'],
                         feed_dict={
                             inputs[0]: x[slicer],
                             inputs[1]: 0
                         })  # TODO fix inputs[1]: 0

            y_prob[slicer] += y_sw
            y_pred_count[slicer] += 1

        y_prob /= y_pred_count

        y_ = np.argmax(y_prob, axis=-1)

        dscs = metrics.dice(y_, y, num_classes)

        print(f[0] + ';  mean DSC = {:.3f}\n\t'.format(np.mean(dscs[1:])) +
              ', '.join([
                  'DSC {}: {:.3f}'.format(i, dsc) for i, dsc in enumerate(dscs)
              ]))

        y_ = np.squeeze(y_, axis=0)
        pid = f[0].split('p/')[1][:2]

        np.save(os.path.join(args.output_path, 'Seg_MR_%s.npy' % pid),
                np.asanyarray(y_))
        itk_prediction = sitk.GetImageFromArray(y_)

        ds = np.transpose(dscs)
        DSC_all.append(ds)

        sitk.WriteImage(
            itk_prediction,
            os.path.join(args.output_path, '%s_segmentation.nii.gz' % (pid)))

    for f in filenames2:

        x, y = r._read_sample([f], is_training=False)

        sw = SlidingWindow(x.shape[1:4], [64, 64, 64], striding=[64, 64, 64])

        # Allocate the prediction output and a counter for averaging probabilities
        y_prob = np.zeros(y.shape + (num_classes, ))
        y_pred_count = np.zeros_like(y_prob)
        for slicer in sw:
            y_sw = s.run(outputs['y_prob'],
                         feed_dict={
                             inputs[0]: x[slicer],
                             inputs[1]: 1
                         })  # TODO fix inputs[1]: 0

            y_prob[slicer] += y_sw
            y_pred_count[slicer] += 1

        y_prob /= y_pred_count

        y_ = np.argmax(y_prob, axis=-1)

        dscs = metrics.dice(y_, y, num_classes)

        print(f[0] + ';  mean DSC = {:.3f}\n\t'.format(np.mean(dscs[1:])) +
              ', '.join([
                  'DSC {}: {:.3f}'.format(i, dsc) for i, dsc in enumerate(dscs)
              ]))

        y_ = np.squeeze(y_, axis=0)
        pid = f[0].split('p/')[1][:2]

        np.save(os.path.join(args.output_path, 'Seg_CT_%s.npy' % pid),
                np.asanyarray(y_))
        itk_prediction = sitk.GetImageFromArray(y_)

        ds = np.transpose(dscs)
        DSC_all.append(ds)

        sitk.WriteImage(
            itk_prediction,
            os.path.join(args.output_path, '%s_segmentation.nii.gz' % (pid)))

    np.save('DSC_dualstream_v1_set1.npy', DSC_all)
コード例 #9
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    #    file_names = file_names[-N_VALIDATION_SUBJECTS:]
    #
    #    print('filenames', file_names)
    file_names = file_names[0:48]
    #    file_names = file_names[1:48:2]
    #    print('filenames', file_names)
    #3fold, test A
    #    file_names = file_names[30:47]
    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)
    #    for o in os.listdir(args.model_path):
    #        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit():
    #            print(o)
    #    print('Loading from {}'.format(export_dir))
    #    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)

        print('Id={}'.format(output['subject_id']))

        # Do a sliding window inference with our DLTK wrapper
        prob = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=64)[0]

        newpath = r"data_fcn_weighted/" + output['subject_id']
        if not os.path.exists(newpath):
            os.makedirs(newpath)
        for c in range(0, 19):
            output_pm = "data_fcn_weighted/" + output[
                'subject_id'] + "/WB_prob_" + str(c) + ".nii.gz"
            #        output_pm = os.path.join(args.model_path, '{}_prob.nii.gz'.format(output['subject_id']))

            probmap = sitk.GetImageFromArray(prob[0, :, :, :,
                                                  c].astype(np.float32))
            probmap.CopyInformation(output['sitk'])
            #        print('size prob', np.size(prob))
            #            print('size pmap', probmap.GetSize())
            #            print('size pmap'+str(c), probmap.GetSize())
            sitk.WriteImage(probmap, output_pm)

        # Calculate the prediction from the probabilities
        pred = np.argmax(prob, -1)

        # Calculate the Dice coefficient
        dsc = metrics.dice(pred, lbl, num_classes)[1:].mean()
        dsc1 = metrics.dice(pred, lbl, num_classes)[1]
        dsc2 = metrics.dice(pred, lbl, num_classes)[2]
        dsc3 = metrics.dice(pred, lbl, num_classes)[3]
        dsc4 = metrics.dice(pred, lbl, num_classes)[4]
        dsc5 = metrics.dice(pred, lbl, num_classes)[5]
        dsc6 = metrics.dice(pred, lbl, num_classes)[6]
        dsc7 = metrics.dice(pred, lbl, num_classes)[7]
        dsc8 = metrics.dice(pred, lbl, num_classes)[8]
        dsc9 = metrics.dice(pred, lbl, num_classes)[9]
        dsc10 = metrics.dice(pred, lbl, num_classes)[10]
        dsc11 = metrics.dice(pred, lbl, num_classes)[11]
        dsc12 = metrics.dice(pred, lbl, num_classes)[12]
        dsc13 = metrics.dice(pred, lbl, num_classes)[13]
        dsc14 = metrics.dice(pred, lbl, num_classes)[14]
        dsc15 = metrics.dice(pred, lbl, num_classes)[15]
        dsc16 = metrics.dice(pred, lbl, num_classes)[16]
        dsc17 = metrics.dice(pred, lbl, num_classes)[17]
        dsc18 = metrics.dice(pred, lbl, num_classes)[18]
        ds = [
            dsc1, dsc2, dsc3, dsc4, dsc5, dsc6, dsc7, dsc8, dsc9, dsc10, dsc11,
            dsc12, dsc13, dsc14, dsc15, dsc16, dsc17, dsc18
        ]

        DSC_all.append(ds)
        print(np.shape(DSC_all))
        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.model_path,
                                 '{}_seg.nii.gz'.format(output['subject_id']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        # Print outputs
        #        print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format(
        #            output['subject_id'], dsc, time.time() - t0, output_fn))
        print(
            'Id={}; Dice adrnl= {:0.4f}; glbd= {:0.4f}; panc = {:0.4f}; rectum = {:0.4f}; output_path={};'
            .format(output['subject_id'], dsc5, dsc6, dsc10, dsc17, output_fn))
    np.save(args.save_npy, DSC_all)
コード例 #10
0
def predict(args):
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    file_names = file_names[-N_VALIDATION_SUBJECTS:]

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # EDIT: Fetch the feature vector op of the trained network
    logits = my_predictor._fetch_tensors['logits']

    results = []
    print("Preparing to predict")
    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()
        print("Predicting on an entry")
        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)
        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=16)[0]

        print("Prediction: " + str(pred.shape))

        #features = sliding_window_segmentation_inference(
        #   session=my_predictor.session,
        #  ops_list=[logits],
        # sample_dict={my_predictor._feed_tensors['x']: img},
        #batch_size=16)[0]

        class_confidences = pred

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Calculate the Dice coefficient
        dsc = metrics.dice(pred, lbl, num_classes)[1:].mean()

        # Calculate the cross entropy coeff
        #cross_ent = metrics.crossentropy(features, lbl)
        cross_ent = "error"

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.model_path,
                                 '{}_seg.nii.gz'.format(output['subject_id']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        # Save the feature vector file as a .nii.gz using header info from origincal sitk
        #print("Features: " + str(features.shape))
        #feature_sitk = sitk.GetImageFromArray(features[0])
        #feature_sitk.CopyInformation(output['sitk'])
        #sitk.WriteImage(feature_sitk, os.path.join(args.model_path, 'ALout', '{}_feat.nii.gz'.format(output['subject_id'])))

        ## Save the confidence vector file as a .nii.gz using header info from original stack
        #print("Confidences: " + str(class_confidences.shape))
        #conf_sitk = sitk.GetImageFromArray(class_confidences[0])
        #conf_sitk.CopyInformation(output['sitk'])
        #sitk.WriteImage(conf_sitk, os.path.join(args.model_path, 'ALout', '{}_conf.nii.gz'.format(output['subject_id'])))

        # Print outputs
        print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format(
            output['subject_id'], dsc,
            time.time() - t0, output_fn))
        res_row = [
            output['subject_id'], dsc, cross_ent,
            time.time() - t0, output_fn
        ]
        results.append(res_row)

    df = pd.DataFrame(
        results,
        columns=["ID", "Dice", "Cross Entropy", "Time", "Segmentation Path"])
    df.to_csv(os.path.join(args.model_path, 'results_baseline_cgm.csv'),
              index=False)
def predict(args):

    # Read List of Validation/Testing Samples
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).values

    # Load Pre-Trained Model
    export_dir01 = [
        os.path.join(args.model01_path, o)
        for o in os.listdir(args.model01_path)
        if os.path.isdir(os.path.join(args.model01_path, o)) and o.isdigit()
    ][-1]
    export_dir02 = [
        os.path.join(args.model02_path, o)
        for o in os.listdir(args.model02_path)
        if os.path.isdir(os.path.join(args.model02_path, o)) and o.isdigit()
    ][-1]
    export_dir03 = [
        os.path.join(args.model03_path, o)
        for o in os.listdir(args.model03_path)
        if os.path.isdir(os.path.join(args.model03_path, o)) and o.isdigit()
    ][-1]
    export_dir04 = [
        os.path.join(args.model04_path, o)
        for o in os.listdir(args.model04_path)
        if os.path.isdir(os.path.join(args.model04_path, o)) and o.isdigit()
    ][-1]

    predictor01 = predictor.from_saved_model(export_dir01)
    predictor02 = predictor.from_saved_model(export_dir02)
    predictor03 = predictor.from_saved_model(export_dir03)
    predictor04 = predictor.from_saved_model(export_dir04)

    print('Pre-Trained Models Loaded.')

    # Fetch Output Probability of Trained Network
    y_prob01 = predictor01._fetch_tensors['y_prob']
    y_prob02 = predictor02._fetch_tensors['y_prob']
    y_prob03 = predictor03._fetch_tensors['y_prob']
    y_prob04 = predictor04._fetch_tensors['y_prob']

    num_classes = y_prob01.get_shape().as_list()[-1]

    if (EXECUTION_MODE == 'TEST'):
        KEY = tf.estimator.ModeKeys.PREDICT
    elif (EXECUTION_MODE == 'VAL'):
        KEY = tf.estimator.ModeKeys.EVAL

    # Iterate through Files, Predict on Full Volumes and Compute Dice Coefficient
    for output in read_fn(file_references=file_names,
                          mode=KEY,
                          params=READER_PARAMS):
        t0 = time.time()

        img = np.expand_dims(output['features']['x'], axis=0)

        # Sliding Window Inference with DLTK Wrapper
        pred01 = sliding_window_segmentation_inference(
            session=predictor01.session,
            ops_list=[y_prob01],
            sample_dict={predictor01._feed_tensors['x']: img},
            batch_size=BATCH_SIZE)[0]
        pred02 = sliding_window_segmentation_inference(
            session=predictor02.session,
            ops_list=[y_prob02],
            sample_dict={predictor02._feed_tensors['x']: img},
            batch_size=BATCH_SIZE)[0]
        pred03 = sliding_window_segmentation_inference(
            session=predictor03.session,
            ops_list=[y_prob03],
            sample_dict={predictor03._feed_tensors['x']: img},
            batch_size=BATCH_SIZE)[0]
        pred04 = sliding_window_segmentation_inference(
            session=predictor04.session,
            ops_list=[y_prob04],
            sample_dict={predictor04._feed_tensors['x']: img},
            batch_size=BATCH_SIZE)[0]

        # Calculate Prediction from Probabilities
        if (ENSEMBLE_MODE == 'weighted_mean'):
            pred = (0.20 * pred01 + 0.35 * pred02 + 0.30 * pred03 +
                    0.15 * pred04)
        elif (ENSEMBLE_MODE == 'maxconf'):
            pred = np.maximum(np.maximum(pred01, pred02),
                              np.maximum(pred03, pred04))

        pred = np.argmax(pred, -1)

        # Save Ensemble Prediction
        output_fn = os.path.join(str(args.output_path + 'ensemble/'),
                                 '{}_seg.nii.gz'.format(output['img_id']))
        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])
        sitk.WriteImage(new_sitk, output_fn)

        # Save Member Predictions
        pred01_op = np.argmax(pred01, -1)
        output_fn = os.path.join(str(args.output_path + 'resfcn32-350E/'),
                                 '{}_seg.nii.gz'.format(output['img_id']))
        new_sitk = sitk.GetImageFromArray(pred01_op[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])
        sitk.WriteImage(new_sitk, output_fn)

        pred02_op = np.argmax(pred02, -1)
        output_fn = os.path.join(str(args.output_path + 'resfcn96-350E/'),
                                 '{}_seg.nii.gz'.format(output['img_id']))
        new_sitk = sitk.GetImageFromArray(pred02_op[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])
        sitk.WriteImage(new_sitk, output_fn)

        pred03_op = np.argmax(pred03, -1)
        output_fn = os.path.join(str(args.output_path + 'resfcn112-350E/'),
                                 '{}_seg.nii.gz'.format(output['img_id']))
        new_sitk = sitk.GetImageFromArray(pred03_op[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])
        sitk.WriteImage(new_sitk, output_fn)

        pred04_op = np.argmax(pred04, -1)
        output_fn = os.path.join(str(args.output_path + 'resunet112-250E/'),
                                 '{}_seg.nii.gz'.format(output['img_id']))
        new_sitk = sitk.GetImageFromArray(pred04_op[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])
        sitk.WriteImage(new_sitk, output_fn)

        if (EXECUTION_MODE == 'VAL'):
            # Calculate Dice Coefficient
            lbl = np.expand_dims(output['labels']['y'], axis=0)
            dsc = metrics.dice(pred, lbl, num_classes)[1:]
            dsc1 = metrics.dice(pred01_op, lbl, num_classes)[1:]
            dsc2 = metrics.dice(pred02_op, lbl, num_classes)[1:]
            dsc3 = metrics.dice(pred03_op, lbl, num_classes)[1:]
            dsc4 = metrics.dice(pred04_op, lbl, num_classes)[1:]
            avd = metrics.abs_vol_difference(pred, lbl, num_classes)[1:]
            avd1 = metrics.abs_vol_difference(pred01_op, lbl, num_classes)[1:]
            avd2 = metrics.abs_vol_difference(pred02_op, lbl, num_classes)[1:]
            avd3 = metrics.abs_vol_difference(pred03_op, lbl, num_classes)[1:]
            avd4 = metrics.abs_vol_difference(pred04_op, lbl, num_classes)[1:]
            print('ID=' + str(output['img_id']))
            print('Dice Score:')
            print(dsc1)
            print(dsc2)
            print(dsc3)
            print(dsc4)
            print(dsc)
            print('Absolute Volume Difference:')
            print(avd1)
            print(avd2)
            print(avd3)
            print(avd4)
            print(avd)
コード例 #12
0
def predict(args):
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    test_filenames = []
    for i, row in enumerate(file_names):
        if row[9] == '1':
            test_filenames.append(row)
    print('testing on : ', len(test_filenames), ' entries')
    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    mod = import_module('readers.slice_reader')
    # mod = import_module(module_name)
    read_fn = vars(mod)['read_fn']
    reader_params = {'extract_examples': False}

    results = []
    print("Preparing to predict")
    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=test_filenames,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=reader_params):
        print("Predicting on an entry")
        t0 = time.time()
        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)
        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=16)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Calculate the Dice coefficient
        dsc = metrics.dice(pred, lbl, num_classes)[1:].mean()

        # Print outputs
        print('Id={}; Dice={:0.4f}; time={:0.2} secs;'.format(
            output['subject_id'], dsc,
            time.time() - t0))
        res_row = [output['subject_id'], dsc, time.time() - t0]
        results.append(res_row)

    df = pd.DataFrame(results, columns=["ID", "Dice", "Time"])
    df.to_csv(os.path.join(args.model_path, 'test_results.csv'), index=False)
コード例 #13
0
ファイル: deploy.py プロジェクト: jiapei100/DLTKModelZoo
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    print('Loading from {}'.format(args.model_path))
    my_predictor = predictor.from_saved_model(args.model_path)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    print('Got y_prob as {}'.format(y_prob))
    num_classes = y_prob.get_shape().as_list()[-1]

    mode = (tf.estimator.ModeKeys.PREDICT
            if args.predict_only else tf.estimator.ModeKeys.EVAL)

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=mode,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)

        print('running inference on {} with img {} and op {}'.format(
            my_predictor._feed_tensors['x'], img.shape, y_prob))
        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=32)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        if not args.predict_only:
            lbl = np.expand_dims(output['labels']['y'], axis=0)
            # Calculate the Dice coefficient
            dsc = metrics.dice(pred, lbl, num_classes)[1:].mean()

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.export_path,
                                 '{}_seg.nii.gz'.format(output['img_name']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        if args.predict_only:
            print('Id={}; time={:0.2} secs; output_path={};'.format(
                output['img_name'],
                time.time() - t0, output_fn))
        else:
            # Print outputs
            print(
                'Id={}; Dice={:0.4f} time={:0.2} secs; output_path={};'.format(
                    output['img_name'], dsc,
                    time.time() - t0, output_fn))