Example #1
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(
        args.csv,
        dtype=object,
        keep_default_na=False,
        na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    file_names = file_names[-N_VALIDATION_SUBJECTS:]

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [os.path.join(args.model_path, o) for o in sorted(
        os.listdir(args.model_path)) if os.path.isdir(
        os.path.join(args.model_path, o)) and o.isdigit()][-1]

    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    mae = []
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = output['features']['x']
        lbl = output['labels']['y']
        test_id = output['img_id']

        # We know, that the training input shape of [64, 96, 96] will work with
        # our model strides, so we collect several crops of the test image and
        # average the predictions. Alternatively, we could pad or crop the input
        # to any shape that is compatible with the resolution scales of the
        # model:

        num_crop_predictions = 4
        crop_batch = extract_random_example_array(
            image_list=img,
            example_size=[64, 96, 96],
            n_examples=num_crop_predictions)

        y_ = my_predictor.session.run(
            fetches=my_predictor._fetch_tensors['logits'],
            feed_dict={my_predictor._feed_tensors['x']: crop_batch})

        # Average the predictions on the cropped test inputs:
        y_ = np.mean(y_)

        # Calculate the absolute error for this subject
        mae.append(np.abs(y_ - lbl))

        # Print outputs
        print('id={}; pred={:0.2f} yrs; true={:0.2f} yrs; run time={:0.2f} s; '
              ''.format(test_id, y_, lbl[0], time.time() - t0))
    print('mean absolute err={:0.3f} yrs'.format(np.mean(mae)))
Example #2
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(
        args.csv,
        dtype=object,
        keep_default_na=False,
        na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    file_names = file_names[-N_VALIDATION_SUBJECTS:]

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
                  if os.path.isdir(os.path.join(args.model_path, o)) and
                  o.isdigit()][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=32)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Calculate the Dice coefficient
        dsc = np.nanmean(metrics.dice(pred, lbl, num_classes)[1:])

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.model_path, '{}_seg.nii.gz'.format(output['subject_id']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        # Print outputs
        print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format(
            output['subject_id'], dsc, time.time() - t0, output_fn))
Example #3
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    file_names = file_names[-N_VALIDATION_SUBJECTS:]

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = \
        [os.path.join(args.model_path, o) for o in sorted(os.listdir(args.model_path))
         if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    accuracy = []
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = output['features']['x']
        lbl = output['labels']['y']
        test_id = output['img_id']

        # We know, that the training input shape of [64, 96, 96] will work with
        # our model strides, so we collect several crops of the test image and
        # average the predictions. Alternatively, we could pad or crop the input
        # to any shape that is compatible with the resolution scales of the
        # model:

        num_crop_predictions = 4
        crop_batch = extract_random_example_array(
            image_list=img,
            example_size=[64, 96, 96],
            n_examples=num_crop_predictions)

        y_ = my_predictor.session.run(
            fetches=my_predictor._fetch_tensors['y_prob'],
            feed_dict={my_predictor._feed_tensors['x']: crop_batch})

        # Average the predictions on the cropped test inputs:
        y_ = np.mean(y_, axis=0)
        predicted_class = np.argmax(y_)

        # Calculate the accuracy for this subject
        accuracy.append(predicted_class == lbl)

        # Print outputs
        print('id={}; pred={}; true={}; run time={:0.2f} s; '
              ''.format(test_id, predicted_class, lbl[0],
                        time.time() - t0))
    print('accuracy={}'.format(np.mean(accuracy)))
Example #4
0
def visulaize(args):

    export_dir = os.path.join(args.model_path, 'best/1556221214/')
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)
    print(my_predictor)

    test_df = pd.read_csv(args.test_csv)

    for output in read_fn(test_df,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):

        img = output['features']['x']
        lbl = output['labels']['y']
        test_id = output['img_id']
        img = np.expand_dims(img,0)
        #y_, predictions, logits = my_predictor.session.run(fetches=[my_predictor._fetch_tensors['y_prob'],my_predictor._fetch_tensors['y_'],my_predictor._fetch_tensors['logits']], feed_dict={my_predictor._feed_tensors['x']: img})
        #op = my_predictor.session.graph.get_operations()
        layer, logits, prediction = my_predictor.session.run(fetches=[my_predictor.graph.get_tensor_by_name('unit_4_1/sub_unit1/conv3d/Conv3D:0'),my_predictor._fetch_tensors['logits'],my_predictor._fetch_tensors['y_']], feed_dict={my_predictor._feed_tensors['x']: img})
        gradcam(layer,logits,prediction)
Example #5
0
def predict(args):

    test_df = pd.read_csv(args.test_csv)

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    #export_dir = \
    #[os.path.join(args.model_path, o) for o in sorted(os.listdir(args.model_path))
    #if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()][-1]
    export_dir = os.path.join(args.model_path, 'best/1557349239/')
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    mae = []
    err = np.empty([], dtype=np.float32)
    labels = np.empty([], dtype=int)
    pred = np.empty([], dtype=int)
    for output in read_fn(test_df,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = output['features']['x']
        lbl = output['labels']['y']
        test_id = output['img_id']

        # We know, that the training input shape of [64, 96, 96] will work with
        # our model strides, so we collect several crops of the test image and
        # average the predictions. Alternatively, we could pad or crop the input
        # to any shape that is compatible with the resolution scales of the
        # model:
        img = np.expand_dims(img, 0)
        '''num_crop_predictions = 4
        crop_batch = extract_random_example_array(
            image_list=img,
            example_size=[64, 96, 96],
            n_examples=num_crop_predictions)'''

        y_ = my_predictor.session.run(
            fetches=my_predictor._fetch_tensors['logits'],
            feed_dict={my_predictor._feed_tensors['x']: img})

        # Average the predictions on the cropped test inputs:
        y_ = np.mean(y_)
        labels = np.append(labels, lbl)
        pred = np.append(pred, y_)

        # Calculate the accuracy for this subject
        mae.append(np.abs(y_ - lbl))
        e = y_ - lbl
        err = np.append(err, e)

        # Print outputs
        # Print outputs
        print('id={}; pred={:0.2f} yrs; true={:0.2f} yrs; run time={:0.2f} s; '
              ''.format(test_id, y_, lbl[0],
                        time.time() - t0))
    print('mean absolute err={:0.3f} yrs'.format(np.mean(mae)))
    print('r2 score:{}'.format(r2_score(labels[1:], pred[1:])))
    dic = {'age': labels[1:], 'error': err[1:]}
    df = pd.DataFrame(dic)
    df.to_csv(
        '/data/agelgazzar/Work/AgePrediction/3DResnet/code/csvfiles/test_error_groupnet.csv'
    )

    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.plot(labels[1:], err[1:], 'ro')
    plt.show()
    plt.ylabel('Error')
    plt.xlabel('Age')
    fig.savefig('/data_local/deeplearning/pacage_error.png')
Example #6
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    file_names = file_names[-N_VALIDATION_SUBJECTS:]

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=32)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Calculate the Dice coefficient
        dsc = np.nanmean(metrics.dice(pred, lbl, num_classes)[1:])

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.model_path,
                                 '{}_seg.nii.gz'.format(output['subject_id']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        # Print outputs
        print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format(
            output['subject_id'], dsc,
            time.time() - t0, output_fn))
Example #7
0
def predict(args):
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # We trained on the first 4 subjects, so we predict on the rest
    file_names = file_names[-N_VALIDATION_SUBJECTS:]

    # From the model_path, parse the latest saved model and restore a
    # predictor from it
    export_dir = [
        os.path.join(args.model_path, o) for o in os.listdir(args.model_path)
        if os.path.isdir(os.path.join(args.model_path, o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    num_classes = y_prob.get_shape().as_list()[-1]

    # EDIT: Fetch the feature vector op of the trained network
    logits = my_predictor._fetch_tensors['logits']

    results = []
    print("Preparing to predict")
    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.EVAL,
                          params=READER_PARAMS):
        t0 = time.time()
        print("Predicting on an entry")
        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)
        lbl = np.expand_dims(output['labels']['y'], axis=0)
        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=16)[0]

        print("Prediction: " + str(pred.shape))

        #features = sliding_window_segmentation_inference(
        #   session=my_predictor.session,
        #  ops_list=[logits],
        # sample_dict={my_predictor._feed_tensors['x']: img},
        #batch_size=16)[0]

        class_confidences = pred

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        # Calculate the Dice coefficient
        dsc = metrics.dice(pred, lbl, num_classes)[1:].mean()

        # Calculate the cross entropy coeff
        #cross_ent = metrics.crossentropy(features, lbl)
        cross_ent = "error"

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.model_path,
                                 '{}_seg.nii.gz'.format(output['subject_id']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        # Save the feature vector file as a .nii.gz using header info from origincal sitk
        #print("Features: " + str(features.shape))
        #feature_sitk = sitk.GetImageFromArray(features[0])
        #feature_sitk.CopyInformation(output['sitk'])
        #sitk.WriteImage(feature_sitk, os.path.join(args.model_path, 'ALout', '{}_feat.nii.gz'.format(output['subject_id'])))

        ## Save the confidence vector file as a .nii.gz using header info from original stack
        #print("Confidences: " + str(class_confidences.shape))
        #conf_sitk = sitk.GetImageFromArray(class_confidences[0])
        #conf_sitk.CopyInformation(output['sitk'])
        #sitk.WriteImage(conf_sitk, os.path.join(args.model_path, 'ALout', '{}_conf.nii.gz'.format(output['subject_id'])))

        # Print outputs
        print('Id={}; Dice={:0.4f}; time={:0.2} secs; output_path={};'.format(
            output['subject_id'], dsc,
            time.time() - t0, output_fn))
        res_row = [
            output['subject_id'], dsc, cross_ent,
            time.time() - t0, output_fn
        ]
        results.append(res_row)

    df = pd.DataFrame(
        results,
        columns=["ID", "Dice", "Cross Entropy", "Time", "Segmentation Path"])
    df.to_csv(os.path.join(args.model_path, 'results_baseline_cgm.csv'),
              index=False)
Example #8
0
def predict():
    #fn = request.data['filename']
    # TODO  using the filename to set sonfig and run the model
	
	
	
	
	
	
	
    
    cuda_devices='0'
    conf="config_spm_tissue.json"
    csv="val.csv"

  
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    tf.logging.set_verbosity(tf.logging.ERROR)

    # GPU allocation options
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_devices

    # Parse the run config
    with open(conf) as f:
        config = json.load(f)

   
	
	
	
	
	# Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # From the model model_path, parse the latest saved estimator model
    # and restore a predictor from it
    export_dir = [os.path.join(config["model_path"], o) for o in os.listdir(config["model_path"])
                  if os.path.isdir(os.path.join(config["model_path"], o)) and o.isdigit()][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    protocols = config["protocols"]
    # Fetch the output probability ops of the trained network
    y_probs = [my_predictor._fetch_tensors['y_prob_{}'.format(p)] for p in protocols]

    # Iterate through the files, predict on the full volumes and
    #  compute a Dice similariy coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.PREDICT,
                          params={'extract_examples': False,
                                  'protocols': protocols}):

        print('Running file {}'.format(output['img_id']))
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension
        #  as required
        img = np.expand_dims(output['features']['x'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        preds = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=y_probs,
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=2)

        # Calculate the prediction from the probabilities
        preds = [np.squeeze(np.argmax(pred, -1), axis=0) for pred in preds]

        # Map the consecutive integer label ids back to the original ones
        for i in range(len(protocols)):
            preds[i] = map_labels(preds[i],
                                  protocol=protocols[i],
                                  convert_to_protocol=True)

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        out_folder = os.path.join(config["out_segm_path"], '{}'.format(output['img_id']))

        os.system('mkdir -p {}'.format(out_folder))
		

        for i in range(len(protocols)):
            #output_fn = os.path.join(out_folder, protocols[i] + '.nii.gz')
            output_fn = os.path.join(out_folder, 'test_seg.nii.gz')
            new_sitk = sitk.GetImageFromArray(preds[i].astype(np.int32))
            new_sitk.CopyInformation(output['sitk'])
            try:
                os.remove(output_fn)
            except OSError:
                pass   
            sitk.WriteImage(new_sitk, output_fn)

        # Print outputs
        print('ID={}; input_dim={}; time={};'.format(
            output['img_id'], img.shape, time.time() - t0))
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
	
    return "{result:'done'}"
Example #9
0
def visulaize(args):
    '''export_dir = os.path.join(args.model_path, 'best/1550815178/')
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)'''

    with tf.Session(graph=tf.Graph()) as sess:
        path_to_model = os.path.join(args.model_path, 'best/1551780240')
        tf.saved_model.loader.load(sess,
                                   [tf.saved_model.tag_constants.SERVING],
                                   path_to_model)
        #op = sess.graph.get_operations()
        #([print(m.values())for m in op])

        test_df = pd.read_csv(args.test_csv)

        for output in read_fn(test_df,
                              mode=tf.estimator.ModeKeys.EVAL,
                              params=READER_PARAMS):

            img = output['features']['x']
            lbl = output['labels']['y']
            test_id = output['img_id']
            img = np.expand_dims(img, 0)
            #x = tf.placeholder(tf.float32,[None,None,None,None,1])

            x = sess.graph.get_tensor_by_name('Placeholder:0')

            #layer = sess.graph.get_tensor_by_name('unit_4_1/sub_unit1/conv3d/Conv3D:0')
            #layer = sess.graph.get_tensor_by_name('unit_4_0/sub_unit_add/add:0')
            layer = sess.graph.get_tensor_by_name(
                'pool/batch_normalization/batchnorm/mul_1:0')

            logits = sess.graph.get_tensor_by_name(
                'last/hidden_units/MatMul:0')
            pred = sess.graph.get_tensor_by_name('pred/ArgMax:0')
            '''if lbl == 0:
                pred = tf.constant(1)
            elif lbl == 1:
                pred = tf.constant(0)'''

            one_hot = tf.sparse_to_dense(pred, [2], 1.0)
            signal = tf.multiply(logits, one_hot)
            loss = tf.reduce_mean(signal)

            grads = tf.gradients(loss, layer)[0]
            # Normalizing the gradients
            norm_grads = tf.div(
                grads,
                tf.sqrt(tf.reduce_mean(tf.square(grads))) + tf.constant(1e-5))

            output, grads_val = sess.run([layer, norm_grads],
                                         feed_dict={x: img})
            output = output[0]  # [6,7,6,256]
            grads_val = grads_val[0]  # [6,7,6,,256]

            weights = np.mean(grads_val, axis=(0, 1, 2))  # [256]
            cam = np.ones(output.shape[0:3], dtype=np.float32)  # [7,7]

            # Taking a weighted average
            for i, w in enumerate(weights):
                cam += w * output[:, :, :, i]

            # Passing through ReLU
            cam = np.maximum(cam, 0)
            cam = cam / np.max(cam)
            cam = resize(cam, (91, 109, 91))
            img = nib.Nifti1Image(cam, np.eye(4) * 2)
            nib.save(
                img,
                os.path.join(args.save_dir,
                             '{}_{}.nii.gz'.format(test_id, lbl)))
Example #10
0
def predict(args, config):

    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    # From the model model_path, parse the latest saved estimator model
    # and restore a predictor from it
    export_dir = [
        os.path.join(config["model_path"], o)
        for o in os.listdir(config["model_path"]) if
        os.path.isdir(os.path.join(config["model_path"], o)) and o.isdigit()
    ][-1]
    print('Loading from {}'.format(export_dir))
    my_predictor = predictor.from_saved_model(export_dir)

    protocols = config["protocols"]
    # Fetch the output probability ops of the trained network
    y_probs = [
        my_predictor._fetch_tensors['y_prob_{}'.format(p)] for p in protocols
    ]

    # Iterate through the files, predict on the full volumes and
    #  compute a Dice similariy coefficient
    for output in read_fn(file_references=file_names,
                          mode=tf.estimator.ModeKeys.PREDICT,
                          params={
                              'extract_examples': False,
                              'protocols': protocols
                          }):

        print('Running file {}'.format(output['img_id']))
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension
        #  as required
        img = np.expand_dims(output['features']['x'], axis=0)

        # Do a sliding window inference with our DLTK wrapper
        preds = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=y_probs,
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=2)

        # Calculate the prediction from the probabilities
        preds = [np.squeeze(np.argmax(pred, -1), axis=0) for pred in preds]

        # Map the consecutive integer label ids back to the original ones
        for i in range(len(protocols)):
            preds[i] = map_labels(preds[i],
                                  protocol=protocols[i],
                                  convert_to_protocol=True)

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        out_folder = os.path.join(config["out_segm_path"],
                                  '{}'.format(output['img_id']))
        os.system('mkdir -p {}'.format(out_folder))

        for i in range(len(protocols)):
            output_fn = os.path.join(out_folder, protocols[i] + '.nii.gz')
            new_sitk = sitk.GetImageFromArray(preds[i].astype(np.int32))
            new_sitk.CopyInformation(output['sitk'])
            sitk.WriteImage(new_sitk, "{}_Seg.nii.gz".format(output['img_id']))

        # Print outputs
        print('ID={}; input_dim={}; time={};'.format(output['img_id'],
                                                     img.shape,
                                                     time.time() - t0))
Example #11
0
def predict(args):
    # Read in the csv with the file names you would want to predict on
    file_names = pd.read_csv(args.csv,
                             dtype=object,
                             keep_default_na=False,
                             na_values=[]).as_matrix()

    print('Loading from {}'.format(args.model_path))
    my_predictor = predictor.from_saved_model(args.model_path)

    # Fetch the output probability op of the trained network
    y_prob = my_predictor._fetch_tensors['y_prob']
    print('Got y_prob as {}'.format(y_prob))
    num_classes = y_prob.get_shape().as_list()[-1]

    mode = (tf.estimator.ModeKeys.PREDICT
            if args.predict_only else tf.estimator.ModeKeys.EVAL)

    # Iterate through the files, predict on the full volumes and compute a Dice
    # coefficient
    for output in read_fn(file_references=file_names,
                          mode=mode,
                          params=READER_PARAMS):
        t0 = time.time()

        # Parse the read function output and add a dummy batch dimension as
        # required
        img = np.expand_dims(output['features']['x'], axis=0)

        print('running inference on {} with img {} and op {}'.format(
            my_predictor._feed_tensors['x'], img.shape, y_prob))
        # Do a sliding window inference with our DLTK wrapper
        pred = sliding_window_segmentation_inference(
            session=my_predictor.session,
            ops_list=[y_prob],
            sample_dict={my_predictor._feed_tensors['x']: img},
            batch_size=32)[0]

        # Calculate the prediction from the probabilities
        pred = np.argmax(pred, -1)

        if not args.predict_only:
            lbl = np.expand_dims(output['labels']['y'], axis=0)
            # Calculate the Dice coefficient
            dsc = metrics.dice(pred, lbl, num_classes)[1:].mean()

        # Save the file as .nii.gz using the header information from the
        # original sitk image
        output_fn = os.path.join(args.export_path,
                                 '{}_seg.nii.gz'.format(output['img_name']))

        new_sitk = sitk.GetImageFromArray(pred[0].astype(np.int32))
        new_sitk.CopyInformation(output['sitk'])

        sitk.WriteImage(new_sitk, output_fn)

        if args.predict_only:
            print('Id={}; time={:0.2} secs; output_path={};'.format(
                output['img_name'],
                time.time() - t0, output_fn))
        else:
            # Print outputs
            print(
                'Id={}; Dice={:0.4f} time={:0.2} secs; output_path={};'.format(
                    output['img_name'], dsc,
                    time.time() - t0, output_fn))