Пример #1
0
def run_seg_qc(args):
    seg_qc.main(args)
Пример #2
0
def main(args):
    """
    Segment hippocampus using a trained CNN
    :param args: subj_dir, subj, t1, out, bias, force
    :return: prediction (segmentation file)
    """
    parser = parsefn()
    subj_dir, subj, t1, out, bias, ign_ort, num_mc, force = parse_inputs(
        parser, args)
    pred_name = 'T1acq_hipp_pred' if hasattr(args, 'subj') else 'hipp_pred'

    if out is None:
        prediction = os.path.join(subj_dir, "%s_%s.nii.gz" % (subj, pred_name))
    else:
        prediction = out

    if os.path.exists(prediction) and force is False:
        print("\n %s already exists" % prediction)

    else:
        start_time = datetime.now()

        hfb = os.path.realpath(__file__)
        hyper_dir = str(Path(hfb).parents[2])

        model_json = os.path.join(hyper_dir, 'models', 'hipp_model.json')
        model_weights = os.path.join(hyper_dir, 'models',
                                     'hipp_model_weights.h5')

        assert os.path.exists(
            model_weights
        ), "%s model does not exist ... please download and rerun script" % model_weights

        # pred preprocess dir
        pred_dir = os.path.join('%s' % os.path.abspath(subj_dir),
                                'pred_process')
        if not os.path.exists(pred_dir):
            os.mkdir(pred_dir)

        training_mod = "t1"

        if bias is True:
            t1_bias = os.path.join(
                subj_dir, "%s_nu.nii.gz" % os.path.basename(t1).split('.')[0])
            biascorr.main(["-i", "%s" % t1, "-o", "%s" % t1_bias])
            in_ort = t1_bias
        else:
            in_ort = t1

        # check orientation
        r_orient = 'RPI'
        l_orient = 'LPI'
        t1_ort = os.path.join(
            subj_dir,
            "%s_std_orient.nii.gz" % os.path.basename(t1).split('.')[0])

        if ign_ort is False:
            check_orient(in_ort, r_orient, l_orient, t1_ort)

        # threshold at 10 percentile of non-zero voxels
        thresh_file = os.path.join(
            pred_dir,
            "%s_thresholded.nii.gz" % os.path.basename(t1).split('.')[0])
        in_thresh = t1_ort if os.path.exists(t1_ort) else t1
        threshold_img(in_thresh, training_mod, 10, thresh_file)

        # standardize
        std_file = os.path.join(
            pred_dir, "%s_thresholded_standardized.nii.gz" %
            os.path.basename(t1).split('.')[0])
        standard_img(thresh_file, std_file)

        # resample images
        t1_img = nib.load(std_file)
        res = resample(t1_img, [160, 160, 128])
        res_file = os.path.join(
            pred_dir, "%s_thresholded_resampled.nii.gz" %
            os.path.basename(t1).split('.')[0])
        res.to_filename(res_file)

        std = nib.load(res_file)
        test_data = np.zeros((1, 1, 160, 160, 128),
                             dtype=t1_img.get_data_dtype())
        test_data[0, 0, :, :, :] = std.get_data()

        print(
            colored("\n predicting initial hippocampus segmentation", 'green'))

        pred = run_test_case(test_data=test_data,
                             model_json=model_json,
                             model_weights=model_weights,
                             affine=res.affine,
                             output_label_map=True,
                             labels=1)

        # resample back
        pred_res = resample_to_img(pred, t1_img)
        pred_th = math_img('img > 0.5', img=pred_res)

        # largest conn comp
        init_pred_name = os.path.join(pred_dir,
                                      "%s_hipp_init_pred.nii.gz" % subj)
        get_largest_two_comps(pred_th, init_pred_name)

        # trim seg to size
        trim_seg = os.path.join(pred_dir,
                                "%s_hipp_init_pred_trimmed.nii.gz" % subj)
        trim_img_to_size(init_pred_name, trim_seg)

        # trim t1
        t1_zoom = os.path.join(pred_dir, "%s_hipp_region.nii.gz" % subj)
        trim_like.main(
            ['-i %s' % thresh_file,
             '-r %s' % trim_seg,
             '-o %s' % t1_zoom])

        # --------------
        # 2nd model
        # --------------

        pred_shape = [112, 112, 64]

        t1_zoom_img = nib.load(t1_zoom)
        test_zoom_data = np.zeros(
            (1, 1, pred_shape[0], pred_shape[1], pred_shape[2]),
            dtype=t1_zoom_img.get_data_dtype())

        # standardize
        std_file_trim = os.path.join(
            pred_dir, "%s_trimmed_thresholded_standardized.nii.gz" %
            os.path.basename(t1).split('.')[0])
        standard_img(t1_zoom, std_file_trim)

        # resample images
        t1_img = nib.load(std_file_trim)
        res_zoom = resample(t1_img, pred_shape)
        res_file = os.path.join(
            pred_dir,
            "%s_trimmed_resampled.nii.gz" % os.path.basename(t1).split('.')[0])
        res_zoom.to_filename(res_file)

        test_zoom_data[0, 0, :, :, :] = res_zoom.get_data()

        model_zoom_json = os.path.join(hyper_dir, 'models',
                                       'hipp_zoom_mcdp_model.json')
        model_zoom_weights = os.path.join(hyper_dir, 'models',
                                          'hipp_zoom_mcdp_model_weights.h5')

        assert os.path.exists(
            model_zoom_weights
        ), "%s model does not exits ... please download and rerun script" % model_zoom_weights

        print(
            colored(
                "\n predicting hippocampus segmentation using MC Dropout with %s samples"
                % num_mc, 'green'))

        pred_zoom_s = np.zeros(
            (num_mc, pred_shape[0], pred_shape[1], pred_shape[2]),
            dtype=res_zoom.get_data_dtype())

        for sample_id in range(num_mc):
            pred = run_test_case(test_data=test_zoom_data,
                                 model_json=model_zoom_json,
                                 model_weights=model_zoom_weights,
                                 affine=res_zoom.affine,
                                 output_label_map=True,
                                 labels=1)
            pred_zoom_s[sample_id, :, :, :] = pred.get_data()
            nib.save(pred,
                     os.path.join(pred_dir, "hipp_pred_%s.nii.gz" % sample_id))

        # computing mean
        pred_zoom_mean = pred_zoom_s.mean(axis=0)
        # pred_zoom_mean = np.median(pred_zoom_s, axis=0)
        pred_zoom = nib.Nifti1Image(pred_zoom_mean, res_zoom.affine)

        # resample back
        pred_zoom_res = resample_to_img(pred_zoom, t1_zoom_img)
        pred_zoom_name = os.path.join(
            pred_dir, "%s_trimmed_hipp_pred_mean_prob.nii.gz" % subj)
        nib.save(pred_zoom_res, pred_zoom_name)

        # reslice like
        t1_ref = t1_ort if os.path.exists(t1_ort) else t1
        pred_zoom_res_t1 = os.path.join(
            pred_dir, "%s_%s_hipp_pred_mean.nii.gz" % (subj, pred_name))
        reslice_like(pred_zoom_name, t1_ref, pred_zoom_res_t1)

        # thr
        pred_zoom_res_t1_img = nib.load(pred_zoom_res_t1)
        pred_zoom_th = math_img('img > 0.5', img=pred_zoom_res_t1_img)

        # largest 2 conn comp
        # comb_comps_zoom_bin_cmp = os.path.join(pred_dir, "%s_hipp_pred_mean_bin.nii.gz" % subj)
        bin_prediction = os.path.join(subj_dir,
                                      "%s_%s_bin.nii.gz" % (subj, pred_name))
        get_largest_two_comps(pred_zoom_th, bin_prediction)

        # split seg sides
        split_seg_sides(bin_prediction, prediction)

        print(colored("\n generating mosaic image for qc", 'green'))

        seg_qc.main([
            '-i',
            '%s' % t1_ref, '-s',
            '%s' % prediction, '-d', '1', '-g', '3'
        ])

        endstatement.main(
            'Hippocampus prediction (Using MC Dropout) and mosaic generation',
            '%s' % (datetime.now() - start_time))