Example #1
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(
        args.csv,
        dtype=object,
        keep_default_na=False,
        na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    file_names = file_names[-N_VALIDATION_SUBJECTS:]

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
                  if os.path.isdir(os.path.join(args.model_path, o)) and
                  o.isdigit()][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=32)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Calculate the Dice coefficient
        dsc = np.nanmean(metrics.dice(pred, lbl, num_classes)[1:])

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.model_path, '{}_seg.nii.gz'.format(output['subject_id']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        # Print outputs
        print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format(
            output['subject_id'], dsc, time.time() - t0, output_fn))
def test_sw_inference():

    inp = tf.placeholder(tf.float32, [1, 1, 2, 1])
    op = tf.ones([1, 1, 2, 1], tf.float32)
    np_inp = np.ones([1, 4, 4, 1])

    with tf.Session() as s:
        out = sliding_window_segmentation_inference(s, [op], {inp: np_inp})[0]
        assert np.isclose(out, np_inp).all(), \
            'Got {} but expected {}'.format(out, np_inp)
Example #3
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names_list = pd.read_csv(args.csv,
                                  dtype=object,
                                  keep_default_na=False,
                                  na_values=[]).values

    # We trained on the last 15 subjects, so we predict on the rest
    file_names = file_names_list[:3]

    # From the model_path, parse the latest saved model and restore a predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # Iterate through the files, predict on the full volumes and compute a Dice coefficient
    from collections import defaultdict
    total_dice = defaultdict(list)
    total_hd = defaultdict(list)

    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=1)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Save the file as .nii.gz using the header information from the original sitk image
        output_fn = os.path.join(args.model_path,
                                 '{}_seg.nii.gz'.format(output['subject_id']))
        new_sitk = sitk.GetImageFromArray(pred[0, :, :, :].astype(np.int32))
        sitk.WriteImage(new_sitk, output_fn)

        # Calculate the AVG Dice coefficient for one image
        dsc = np.nanmean(metrics2.dice(pred, lbl, num_classes)[1:15])
        hd = np.nanmean(metrics2.hd(pred[0], lbl[0], num_classes)[1:15])

        # Calculate and Print each Dice coefficient for one image
        for idx, i in enumerate(
            [14, 13, 6, 5, 12, 11, 10, 9, 8, 7, 4, 3, 2, 1]):
            dsc_tmp = metrics2.dice(pred, lbl, num_classes)[i]
            total_dice.setdefault("dsc_{}".format(idx), []).append(dsc_tmp)
            print('Id={}; Dice_{}={:0.4f}; time={:0.2} secs;'.format(
                output['subject_id'], idx, dsc_tmp,
                time.time() - t0))

        total_dice.setdefault("total_mean_dsc", []).append(dsc)
        print('Id={}; AVG Dice={:0.4f}; time={:0.2} secs; output_path={};'.
              format(output['subject_id'], dsc,
                     time.time() - t0, output_fn))

        for idx, i in enumerate(
            [14, 13, 6, 5, 12, 11, 10, 9, 8, 7, 4, 3, 2, 1]):
            hd_tmp = metrics2.hd(pred[0], lbl[0], num_classes)[i]
            total_hd.setdefault("hd_{}".format(idx), []).append(hd_tmp)
            print('Id={}; hd_{}={:0.4f}; time={:0.2} secs;'.format(
                output['subject_id'], idx, hd_tmp,
                time.time() - t0))
        total_hd.setdefault("total_mean_hd", []).append(hd)
        print(
            'Id={}; AVG HD={:0.4f}; time={:0.2} secs; output_path={};'.format(
                output['subject_id'], hd,
                time.time() - t0, output_fn))

    print("\n")
    print(
        "~~~~~~~~~~~~~~~~~~~~~~ Dice Results on All Test Cases ~~~~~~~~~~~~~~~~~~~~~~"
    )

    all_dice = []
    for k, v in total_dice.items():
        all_dice.append(np.mean(v))
        print(k, "%.3f" % (np.mean(v)), "±", "%.3f" % (np.std(v)))

    print("\n")
    print(
        "~~~~~~~~~~~~~~~~~~~~~~ HD Results (mean, std) on All Test Cases ~~~~~~~~~~~~~~~~~~~~~~"
    )

    all_hd = []
    for k, v in total_hd.items():
        v = [i for i in v if i != 0]
        print(k, "%.2f" % (np.mean(v)), "±", "%.2f" % (np.std(v)))
        all_hd.append(np.mean(v))
Example #4
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    file_names = file_names[-N_VALIDATION_SUBJECTS:]

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=32)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Calculate the Dice coefficient
        dsc = np.nanmean(metrics.dice(pred, lbl, num_classes)[1:])

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.model_path,
                                 '{}_seg.nii.gz'.format(output['subject_id']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        # Print outputs
        print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format(
            output['subject_id'], dsc,
            time.time() - t0, output_fn))
Example #5
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    #    file_names = file_names[-N_VALIDATION_SUBJECTS:]
    #
    #    print('filenames', file_names)
    file_names = file_names[0:48]
    #    file_names = file_names[1:48:2]
    #    print('filenames', file_names)
    #3fold, test A
    #    file_names = file_names[30:47]
    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)
    #    for o in os.listdir(args.model_path):
    #        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit():
    #            print(o)
    #    print('Loading from {}'.format(export_dir))
    #    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)

        print('Id={}'.format(output['subject_id']))

        # Do a sliding window inference with our DLTK wrapper
        prob = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=64)[0]

        newpath = r"data_fcn_weighted/" + output['subject_id']
        if not os.path.exists(newpath):
            os.makedirs(newpath)
        for c in range(0, 19):
            output_pm = "data_fcn_weighted/" + output[
                'subject_id'] + "/WB_prob_" + str(c) + ".nii.gz"
            #        output_pm = os.path.join(args.model_path, '{}_prob.nii.gz'.format(output['subject_id']))

            probmap = sitk.GetImageFromArray(prob[0, :, :, :,
                                                  c].astype(np.float32))
            probmap.CopyInformation(output['sitk'])
            #        print('size prob', np.size(prob))
            #            print('size pmap', probmap.GetSize())
            #            print('size pmap'+str(c), probmap.GetSize())
            sitk.WriteImage(probmap, output_pm)

        # Calculate the prediction from the probabilities
        pred = np.argmax(prob, -1)

        # Calculate the Dice coefficient
        dsc = metrics.dice(pred, lbl, num_classes)[1:].mean()
        dsc1 = metrics.dice(pred, lbl, num_classes)[1]
        dsc2 = metrics.dice(pred, lbl, num_classes)[2]
        dsc3 = metrics.dice(pred, lbl, num_classes)[3]
        dsc4 = metrics.dice(pred, lbl, num_classes)[4]
        dsc5 = metrics.dice(pred, lbl, num_classes)[5]
        dsc6 = metrics.dice(pred, lbl, num_classes)[6]
        dsc7 = metrics.dice(pred, lbl, num_classes)[7]
        dsc8 = metrics.dice(pred, lbl, num_classes)[8]
        dsc9 = metrics.dice(pred, lbl, num_classes)[9]
        dsc10 = metrics.dice(pred, lbl, num_classes)[10]
        dsc11 = metrics.dice(pred, lbl, num_classes)[11]
        dsc12 = metrics.dice(pred, lbl, num_classes)[12]
        dsc13 = metrics.dice(pred, lbl, num_classes)[13]
        dsc14 = metrics.dice(pred, lbl, num_classes)[14]
        dsc15 = metrics.dice(pred, lbl, num_classes)[15]
        dsc16 = metrics.dice(pred, lbl, num_classes)[16]
        dsc17 = metrics.dice(pred, lbl, num_classes)[17]
        dsc18 = metrics.dice(pred, lbl, num_classes)[18]
        ds = [
            dsc1, dsc2, dsc3, dsc4, dsc5, dsc6, dsc7, dsc8, dsc9, dsc10, dsc11,
            dsc12, dsc13, dsc14, dsc15, dsc16, dsc17, dsc18
        ]

        DSC_all.append(ds)
        print(np.shape(DSC_all))
        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.model_path,
                                 '{}_seg.nii.gz'.format(output['subject_id']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        # Print outputs
        #        print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format(
        #            output['subject_id'], dsc, time.time() - t0, output_fn))
        print(
            'Id={}; Dice adrnl= {:0.4f}; glbd= {:0.4f}; panc = {:0.4f}; rectum = {:0.4f}; output_path={};'
            .format(output['subject_id'], dsc5, dsc6, dsc10, dsc17, output_fn))
    np.save(args.save_npy, DSC_all)
Example #6
0
def predict(args):
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    file_names = file_names[-N_VALIDATION_SUBJECTS:]

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # EDIT: Fetch the feature vector op of the trained network
    logits = my_predictor._fetch_tensors['logits']

    results = []
    print("Preparing to predict")
    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()
        print("Predicting on an entry")
        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)
        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=16)[0]

        print("Prediction: " + str(pred.shape))

        #features = sliding_window_segmentation_inference(
        #   session=my_predictor.session,
        #  ops_list=[logits],
        # sample_dict={my_predictor._feed_tensors['x']: img},
        #batch_size=16)[0]

        class_confidences = pred

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Calculate the Dice coefficient
        dsc = metrics.dice(pred, lbl, num_classes)[1:].mean()

        # Calculate the cross entropy coeff
        #cross_ent = metrics.crossentropy(features, lbl)
        cross_ent = "error"

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.model_path,
                                 '{}_seg.nii.gz'.format(output['subject_id']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        # Save the feature vector file as a .nii.gz using header info from origincal sitk
        #print("Features: " + str(features.shape))
        #feature_sitk = sitk.GetImageFromArray(features[0])
        #feature_sitk.CopyInformation(output['sitk'])
        #sitk.WriteImage(feature_sitk, os.path.join(args.model_path, 'ALout', '{}_feat.nii.gz'.format(output['subject_id'])))

        ## Save the confidence vector file as a .nii.gz using header info from original stack
        #print("Confidences: " + str(class_confidences.shape))
        #conf_sitk = sitk.GetImageFromArray(class_confidences[0])
        #conf_sitk.CopyInformation(output['sitk'])
        #sitk.WriteImage(conf_sitk, os.path.join(args.model_path, 'ALout', '{}_conf.nii.gz'.format(output['subject_id'])))

        # Print outputs
        print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format(
            output['subject_id'], dsc,
            time.time() - t0, output_fn))
        res_row = [
            output['subject_id'], dsc, cross_ent,
            time.time() - t0, output_fn
        ]
        results.append(res_row)

    df = pd.DataFrame(
        results,
        columns=["ID", "Dice", "Cross Entropy", "Time", "Segmentation Path"])
    df.to_csv(os.path.join(args.model_path, 'results_baseline_cgm.csv'),
              index=False)
Example #7
0
def predict():
    #fn = request.data['filename']
    # TODO  using the filename to set sonfig and run the model
	
	
	
	
	
	
	
    
    cuda_devices='0'
    conf="config_spm_tissue.json"
    csv="val.csv"

  
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    tf.logging.set_verbosity(tf.logging.ERROR)

    # GPU allocation options
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_devices

    # Parse the run config
    with open(conf) as f:
        config = json.load(f)

   
	
	
	
	
	# Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # From the model model_path, parse the latest saved estimator model
    # and restore a predictor from it
    export_dir = [os.path.join(config["model_path"], o) for o in os.listdir(config["model_path"])
                  if os.path.isdir(os.path.join(config["model_path"], o)) and o.isdigit()][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    protocols = config["protocols"]
    # Fetch the output probability ops of the trained network
    y_probs = [my_predictor._fetch_tensors['y_prob_{}'.format(p)] for p in protocols]

    # Iterate through the files, predict on the full volumes and
    #  compute a Dice similariy coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.PREDICT,
                          params={'extract_examples': False,
                                  'protocols': protocols}):

        print('Running file {}'.format(output['img_id']))
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension
        #  as required
        img = np.expand_dims(output['features']['x'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        preds = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=y_probs,
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=2)

        # Calculate the prediction from the probabilities
        preds = [np.squeeze(np.argmax(pred, -1), axis=0) for pred in preds]

        # Map the consecutive integer label ids back to the original ones
        for i in range(len(protocols)):
            preds[i] = map_labels(preds[i],
                                  protocol=protocols[i],
                                  convert_to_protocol=True)

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        out_folder = os.path.join(config["out_segm_path"], '{}'.format(output['img_id']))

        os.system('mkdir -p {}'.format(out_folder))
		

        for i in range(len(protocols)):
            #output_fn = os.path.join(out_folder, protocols[i] + '.nii.gz')
            output_fn = os.path.join(out_folder, 'test_seg.nii.gz')
            new_sitk = sitk.GetImageFromArray(preds[i].astype(np.int32))
            new_sitk.CopyInformation(output['sitk'])
            try:
                os.remove(output_fn)
            except OSError:
                pass   
            sitk.WriteImage(new_sitk, output_fn)

        # Print outputs
        print('ID={}; input_dim={}; time={};'.format(
            output['img_id'], img.shape, time.time() - t0))
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
    return "{result:'done'}"
def predict(args):

    # Read List of Validation/Testing Samples
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).values

    # Load Pre-Trained Model
    export_dir01 = [
        os.path.join(args.model01_path, o)
        for o in os.listdir(args.model01_path)
        if os.path.isdir(os.path.join(args.model01_path, o)) and o.isdigit()
    ][-1]
    export_dir02 = [
        os.path.join(args.model02_path, o)
        for o in os.listdir(args.model02_path)
        if os.path.isdir(os.path.join(args.model02_path, o)) and o.isdigit()
    ][-1]
    export_dir03 = [
        os.path.join(args.model03_path, o)
        for o in os.listdir(args.model03_path)
        if os.path.isdir(os.path.join(args.model03_path, o)) and o.isdigit()
    ][-1]
    export_dir04 = [
        os.path.join(args.model04_path, o)
        for o in os.listdir(args.model04_path)
        if os.path.isdir(os.path.join(args.model04_path, o)) and o.isdigit()
    ][-1]

    predictor01 = predictor.from_saved_model(export_dir01)
    predictor02 = predictor.from_saved_model(export_dir02)
    predictor03 = predictor.from_saved_model(export_dir03)
    predictor04 = predictor.from_saved_model(export_dir04)

    print('Pre-Trained Models Loaded.')

    # Fetch Output Probability of Trained Network
    y_prob01 = predictor01._fetch_tensors['y_prob']
    y_prob02 = predictor02._fetch_tensors['y_prob']
    y_prob03 = predictor03._fetch_tensors['y_prob']
    y_prob04 = predictor04._fetch_tensors['y_prob']

    num_classes = y_prob01.get_shape().as_list()[-1]

    if (EXECUTION_MODE == 'TEST'):
        KEY = tf.estimator.ModeKeys.PREDICT
    elif (EXECUTION_MODE == 'VAL'):
        KEY = tf.estimator.ModeKeys.EVAL

    # Iterate through Files, Predict on Full Volumes and Compute Dice Coefficient
    for output in read_fn(file_references=file_names,
                          mode=KEY,
                          params=READER_PARAMS):
        t0 = time.time()

        img = np.expand_dims(output['features']['x'], axis=0)

        # Sliding Window Inference with DLTK Wrapper
        pred01 = sliding_window_segmentation_inference(
            session=predictor01.session,
            ops_list=[y_prob01],
            sample_dict={predictor01._feed_tensors['x']: img},
            batch_size=BATCH_SIZE)[0]
        pred02 = sliding_window_segmentation_inference(
            session=predictor02.session,
            ops_list=[y_prob02],
            sample_dict={predictor02._feed_tensors['x']: img},
            batch_size=BATCH_SIZE)[0]
        pred03 = sliding_window_segmentation_inference(
            session=predictor03.session,
            ops_list=[y_prob03],
            sample_dict={predictor03._feed_tensors['x']: img},
            batch_size=BATCH_SIZE)[0]
        pred04 = sliding_window_segmentation_inference(
            session=predictor04.session,
            ops_list=[y_prob04],
            sample_dict={predictor04._feed_tensors['x']: img},
            batch_size=BATCH_SIZE)[0]

        # Calculate Prediction from Probabilities
        if (ENSEMBLE_MODE == 'weighted_mean'):
            pred = (0.20 * pred01 + 0.35 * pred02 + 0.30 * pred03 +
                    0.15 * pred04)
        elif (ENSEMBLE_MODE == 'maxconf'):
            pred = np.maximum(np.maximum(pred01, pred02),
                              np.maximum(pred03, pred04))

        pred = np.argmax(pred, -1)

        # Save Ensemble Prediction
        output_fn = os.path.join(str(args.output_path + 'ensemble/'),
                                 '{}_seg.nii.gz'.format(output['img_id']))
        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])
        sitk.WriteImage(new_sitk, output_fn)

        # Save Member Predictions
        pred01_op = np.argmax(pred01, -1)
        output_fn = os.path.join(str(args.output_path + 'resfcn32-350E/'),
                                 '{}_seg.nii.gz'.format(output['img_id']))
        new_sitk = sitk.GetImageFromArray(pred01_op[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])
        sitk.WriteImage(new_sitk, output_fn)

        pred02_op = np.argmax(pred02, -1)
        output_fn = os.path.join(str(args.output_path + 'resfcn96-350E/'),
                                 '{}_seg.nii.gz'.format(output['img_id']))
        new_sitk = sitk.GetImageFromArray(pred02_op[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])
        sitk.WriteImage(new_sitk, output_fn)

        pred03_op = np.argmax(pred03, -1)
        output_fn = os.path.join(str(args.output_path + 'resfcn112-350E/'),
                                 '{}_seg.nii.gz'.format(output['img_id']))
        new_sitk = sitk.GetImageFromArray(pred03_op[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])
        sitk.WriteImage(new_sitk, output_fn)

        pred04_op = np.argmax(pred04, -1)
        output_fn = os.path.join(str(args.output_path + 'resunet112-250E/'),
                                 '{}_seg.nii.gz'.format(output['img_id']))
        new_sitk = sitk.GetImageFromArray(pred04_op[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])
        sitk.WriteImage(new_sitk, output_fn)

        if (EXECUTION_MODE == 'VAL'):
            # Calculate Dice Coefficient
            lbl = np.expand_dims(output['labels']['y'], axis=0)
            dsc = metrics.dice(pred, lbl, num_classes)[1:]
            dsc1 = metrics.dice(pred01_op, lbl, num_classes)[1:]
            dsc2 = metrics.dice(pred02_op, lbl, num_classes)[1:]
            dsc3 = metrics.dice(pred03_op, lbl, num_classes)[1:]
            dsc4 = metrics.dice(pred04_op, lbl, num_classes)[1:]
            avd = metrics.abs_vol_difference(pred, lbl, num_classes)[1:]
            avd1 = metrics.abs_vol_difference(pred01_op, lbl, num_classes)[1:]
            avd2 = metrics.abs_vol_difference(pred02_op, lbl, num_classes)[1:]
            avd3 = metrics.abs_vol_difference(pred03_op, lbl, num_classes)[1:]
            avd4 = metrics.abs_vol_difference(pred04_op, lbl, num_classes)[1:]
            print('ID=' + str(output['img_id']))
            print('Dice Score:')
            print(dsc1)
            print(dsc2)
            print(dsc3)
            print(dsc4)
            print(dsc)
            print('Absolute Volume Difference:')
            print(avd1)
            print(avd2)
            print(avd3)
            print(avd4)
            print(avd)
Example #9
0
def predict(args):
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We want to predict only on unannotated subjects
    ua_fn = []
    for i, row in enumerate(file_names):
        if row[10] == '1':
            ua_fn.append(row)
    print('selecting from', len(ua_fn), 'image stacks')
    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # EDIT: Fetch the feature vector op of the trained network
    logits = my_predictor._fetch_tensors['logits']

    print("Preparing to predict")
    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    app_json = get_config_for_app()
    # module_name = 'contributions.applications.AL_framework.applications.app' + str(app_json['id']) + '.readers.'
    module_name = 'readers.'
    if app_json['reader_type'] == "Patch":
        module_name = module_name + 'patch_reader'
    elif app_json['reader_type'] == "Slice":
        module_name = module_name + 'slice_reader'
    elif app_json['reader_type'] == "Stack":
        module_name = module_name + 'stack_reader'
    else:
        print("Unsupported reader type: please specify a new one")
        return

    # mod = import_module('readers.stack_reader')
    mod = import_module(module_name)
    read_fn = vars(mod)['read_fn']
    reader_params = {'extract_examples': False}
    for output in read_fn(file_references=ua_fn,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=reader_params):
        print("Predicting on an entry")
        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=app_json['batch_size'])[0]

        print("Prediction: " + str(pred.shape))

        features = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[logits],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=app_json['batch_size'])[0]

        class_confidences = pred

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Save the file as .nii.gz using the header information from the
        # original sitk
        # print(output)a
        subj_path = output['path']
        output_fn = os.path.join(
            subj_path, '{}bronze_seg.nii.gz'.format(output['prefix']))
        #new_stack = np.zeros(sitk.GetArrayFromImage(output['sitk']).shape)
        #new_stack[output['slice'], :, :] = pred[0]
        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        print('pred unique:', np.unique(np.array(pred[0]).flatten()))
        #info_sitk = sitk.GetArrayFromImage(output['sitk'])
        #info_sitk = info_sitk[output['slice_index'],:,:]
        #info_sitk = sitk.GetImageFromArray(info_sitk)
        #new_sitk.CopyInformation(info_sitk)
        sitk.WriteImage(new_sitk, output_fn)

        # Save the feature vector file as a .nii.gz using header info from origincal sitk
        print("Features: " + str(features.shape))
        feature_sitk = sitk.GetImageFromArray(features[0])
        #feature_sitk.CopyInformation(info_sitk)
        sitk.WriteImage(
            feature_sitk,
            os.path.join(subj_path, '{}feat.nii.gz'.format(output['prefix'])))

        # Save the confidence vector file as a .nii.gz using header info from original stack
        print("Confidences: " + str(class_confidences.shape))
        print(np.unique(np.array(class_confidences).flatten()))
        conf_sitk = sitk.GetImageFromArray(class_confidences[0])
        #conf_sitk.CopyInformation(info_sitk)
        sitk.WriteImage(
            conf_sitk,
            os.path.join(subj_path, '{}conf.nii.gz'.format(output['prefix'])))

    # Now perform patch selection with the saved outputs
    select_patch_batch(args, app_json)
Example #10
0
def predict(args):
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    test_filenames = []
    for i, row in enumerate(file_names):
        if row[9] == '1':
            test_filenames.append(row)
    print('testing on : ', len(test_filenames), ' entries')
    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    mod = import_module('readers.slice_reader')
    # mod = import_module(module_name)
    read_fn = vars(mod)['read_fn']
    reader_params = {'extract_examples': False}

    results = []
    print("Preparing to predict")
    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=test_filenames,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=reader_params):
        print("Predicting on an entry")
        t0 = time.time()
        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)
        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=16)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Calculate the Dice coefficient
        dsc = metrics.dice(pred, lbl, num_classes)[1:].mean()

        # Print outputs
        print('Id={}; Dice={:0.4f}; time={:0.2} secs;'.format(
            output['subject_id'], dsc,
            time.time() - t0))
        res_row = [output['subject_id'], dsc, time.time() - t0]
        results.append(res_row)

    df = pd.DataFrame(results, columns=["ID", "Dice", "Time"])
    df.to_csv(os.path.join(args.model_path, 'test_results.csv'), index=False)
Example #11
0
def predict(args, config):

    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # From the model model_path, parse the latest saved estimator model
    # and restore a predictor from it
    export_dir = [
        os.path.join(config["model_path"], o)
        for o in os.listdir(config["model_path"]) if
        os.path.isdir(os.path.join(config["model_path"], o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    protocols = config["protocols"]
    # Fetch the output probability ops of the trained network
    y_probs = [
        my_predictor._fetch_tensors['y_prob_{}'.format(p)] for p in protocols
    ]

    # Iterate through the files, predict on the full volumes and
    #  compute a Dice similariy coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.PREDICT,
                          params={
                              'extract_examples': False,
                              'protocols': protocols
                          }):

        print('Running file {}'.format(output['img_id']))
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension
        #  as required
        img = np.expand_dims(output['features']['x'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        preds = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=y_probs,
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=2)

        # Calculate the prediction from the probabilities
        preds = [np.squeeze(np.argmax(pred, -1), axis=0) for pred in preds]

        # Map the consecutive integer label ids back to the original ones
        for i in range(len(protocols)):
            preds[i] = map_labels(preds[i],
                                  protocol=protocols[i],
                                  convert_to_protocol=True)

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        out_folder = os.path.join(config["out_segm_path"],
                                  '{}'.format(output['img_id']))
        os.system('mkdir -p {}'.format(out_folder))

        for i in range(len(protocols)):
            output_fn = os.path.join(out_folder, protocols[i] + '.nii.gz')
            new_sitk = sitk.GetImageFromArray(preds[i].astype(np.int32))
            new_sitk.CopyInformation(output['sitk'])
            sitk.WriteImage(new_sitk, "{}_Seg.nii.gz".format(output['img_id']))

        # Print outputs
        print('ID={}; input_dim={}; time={};'.format(output['img_id'],
                                                     img.shape,
                                                     time.time() - t0))
Example #12
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    print('Loading from {}'.format(args.model_path))
    my_predictor = predictor.from_saved_model(args.model_path)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    print('Got y_prob as {}'.format(y_prob))
    num_classes = y_prob.get_shape().as_list()[-1]

    mode = (tf.estimator.ModeKeys.PREDICT
            if args.predict_only else tf.estimator.ModeKeys.EVAL)

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=mode,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)

        print('running inference on {} with img {} and op {}'.format(
            my_predictor._feed_tensors['x'], img.shape, y_prob))
        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=32)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        if not args.predict_only:
            lbl = np.expand_dims(output['labels']['y'], axis=0)
            # Calculate the Dice coefficient
            dsc = metrics.dice(pred, lbl, num_classes)[1:].mean()

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.export_path,
                                 '{}_seg.nii.gz'.format(output['img_name']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        if args.predict_only:
            print('Id={}; time={:0.2} secs; output_path={};'.format(
                output['img_name'],
                time.time() - t0, output_fn))
        else:
            # Print outputs
            print(
                'Id={}; Dice={:0.4f} time={:0.2} secs; output_path={};'.format(
                    output['img_name'], dsc,
                    time.time() - t0, output_fn))