コード例 #1
0
def run_main(args):
    import sct_utils as sct
    from spinalcordtoolbox.mtsat import mtsat

    sct.start_stream_logger()

    fname_mtsat, fname_t1map = mtsat.compute_mtsat_from_file(
        args.mt,
        args.pd,
        args.t1,
        args.trmt,
        args.trpd,
        args.trt1,
        args.famt,
        args.fapd,
        args.fat1,
        fname_b1map=args.b1map,
        fname_mtsat=args.omtsat,
        fname_t1map=args.ot1map,
        verbose=1)

    sct.display_viewer_syntax([fname_mtsat, fname_t1map],
                              colormaps=['gray', 'gray'],
                              minmax=['-10,10', '0, 3'],
                              opacities=['1', '1'],
                              verbose=args.v)
コード例 #2
0
# ==========================================================================================
def main(args=None):

    # initialization
    verbose = 1

    # check user arguments
    if not args:
        args = sys.argv[1:]

    # Get parser info
    parser = get_parser()
    arguments = parser.parse(sys.argv[1:])
    di = arguments['-di']
    da = arguments['-da']
    db = arguments['-db']

    # Compute MSCC
    MSCC = mscc(di, da, db)

    # Display results
    sct.printv('\nMSCC = ' + str(MSCC) + '\n', verbose, 'info')


# START PROGRAM
# ==========================================================================================
if __name__ == "__main__":
    sct.start_stream_logger()
    # call main function
    main()
コード例 #3
0
def main(args):
    import io
    import sct_utils as sct
    import pickle
    import numpy as np
    import matplotlib.pyplot as plt

    sct.start_stream_logger()

    # make sure number of inputs and labels are the same
    if len(arguments.input) != len(arguments.label):
        raise RuntimeError("Mismatch between # of files and labels")

    # fs = 10  # font size
    nb_plots = args.input.__len__()

    list_data = []
    text_results = []  # numerical results to display inside the figure
    for fname_pickle in args.input:
        df = pickle.load(io.open(fname_pickle, "rb"))
        # filter lines based on status. For status definition, see sct_pipeline
        # Note: the > 0 test is to filter out NaN
        df_dice = df.query(
            "(status != 200) & (status != 201) & (dice > 0 )")["dice"]
        list_data.append(df_dice.get_values())
        # compute statistics
        count_passed = df.status[df.status == 0].count()
        count_failed = df.status[df.status == 99].count()
        count_crashed_run = df.status[df.status == 1].count()
        count_crashed_integrity = df.status[df.status == 2].count()
        count_total = count_passed + count_failed + count_crashed_run + count_crashed_integrity
        text_results.append('\n'.join([
            "PASS: {}/{}".format(count_passed,
                                 count_total), "FAIL: {}".format(count_failed),
            "CRASH_RUN: " + str(count_crashed_run),
            "CRASH_INTEGRITY: " + str(count_crashed_integrity)
        ]))

    pos = np.arange(nb_plots)

    # plot fig
    fig, ax = plt.subplots(1)

    plt.violinplot(list_data,
                   pos,
                   points=100,
                   widths=0.8,
                   showmeans=True,
                   showextrema=True,
                   showmedians=True,
                   bw_method=0.5)
    plt.grid(axis='y')
    plt.ylabel('Dice coefficient')
    plt.xticks(pos, args.label)
    ax.spines['right'].set_visible(False)
    ax.spines['left'].set_visible(False)
    ax.spines['top'].set_visible(False)
    ylim = ax.get_ylim()
    for i in range(nb_plots):
        plt.text(i + 0.02,
                 ylim[0] + 0.01,
                 text_results[i],
                 horizontalalignment='left',
                 verticalalignment='bottom')
    plt.savefig('violin_plot.png')
コード例 #4
0
def run_main():
    sct.start_stream_logger()
    parser = get_parser()
    args = sys.argv[1:]
    arguments = parser.parse(args)

    # Input filename
    fname_input_data = arguments["-i"]
    fname_data = os.path.abspath(fname_input_data)

    # Method used
    method = 'optic'
    if "-method" in arguments:
        method = arguments["-method"]

    # Contrast type
    contrast_type = ''
    if "-c" in arguments:
        contrast_type = arguments["-c"]
    if method == 'optic' and not contrast_type:
        # Contrast must be
        error = 'ERROR: -c is a mandatory argument when using Optic method.'
        sct.printv(error, type='error')
        return

    # Ga between slices
    interslice_gap = 10.0
    if "-gap" in arguments:
        interslice_gap = float(arguments["-gap"])

    # Output folder
    if "-ofolder" in arguments:
        folder_output = sct.slash_at_the_end(arguments["-ofolder"], slash=1)
    else:
        folder_output = './'

    # Remove temporary files
    remove_temp_files = True
    if "-r" in arguments:
        remove_temp_files = bool(int(arguments["-r"]))

    # Outputs a ROI file
    output_roi = False
    if "-roi" in arguments:
        output_roi = bool(int(arguments["-roi"]))

    # Verbosity
    verbose = 0
    if "-v" in arguments:
        verbose = int(arguments["-v"])

    if method == 'viewer':
        path_data, file_data, ext_data = sct.extract_fname(fname_data)

        # create temporary folder
        temp_folder = sct.TempFolder()
        temp_folder.copy_from(fname_data)
        temp_folder.chdir()

        # make sure image is in SAL orientation, as it is the orientation used by the viewer
        image_input = Image(fname_data)
        image_input_orientation = orientation(image_input,
                                              get=True,
                                              verbose=False)
        reoriented_image_filename = sct.add_suffix(file_data + ext_data,
                                                   "_SAL")
        cmd_image = 'sct_image -i "%s" -o "%s" -setorient SAL -v 0' % (
            fname_data, reoriented_image_filename)
        sct.run(cmd_image, verbose=False)

        # extract points manually using the viewer
        fname_points = viewer_centerline(image_fname=reoriented_image_filename,
                                         interslice_gap=interslice_gap,
                                         verbose=verbose)

        if fname_points is not None:
            image_points_RPI = sct.add_suffix(fname_points, "_RPI")
            cmd_image = 'sct_image -i "%s" -o "%s" -setorient RPI -v 0' % (
                fname_points, image_points_RPI)
            sct.run(cmd_image, verbose=False)

            image_input_reoriented = Image(image_points_RPI)

            # fit centerline, smooth it and return the first derivative (in physical space)
            x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(
                image_points_RPI,
                algo_fitting='nurbs',
                nurbs_pts_number=3000,
                phys_coordinates=True,
                verbose=verbose,
                all_slices=False)
            centerline = Centerline(x_centerline_fit, y_centerline_fit,
                                    z_centerline, x_centerline_deriv,
                                    y_centerline_deriv, z_centerline_deriv)

            # average centerline coordinates over slices of the image
            x_centerline_fit_rescorr, y_centerline_fit_rescorr, z_centerline_rescorr, x_centerline_deriv_rescorr, y_centerline_deriv_rescorr, z_centerline_deriv_rescorr = centerline.average_coordinates_over_slices(
                image_input_reoriented)

            # compute z_centerline in image coordinates for usage in vertebrae mapping
            voxel_coordinates = image_input_reoriented.transfo_phys2pix([[
                x_centerline_fit_rescorr[i], y_centerline_fit_rescorr[i],
                z_centerline_rescorr[i]
            ] for i in range(len(z_centerline_rescorr))])
            x_centerline_voxel = [coord[0] for coord in voxel_coordinates]
            y_centerline_voxel = [coord[1] for coord in voxel_coordinates]
            z_centerline_voxel = [coord[2] for coord in voxel_coordinates]

            # compute z_centerline in image coordinates with continuous precision
            voxel_coordinates = image_input_reoriented.transfo_phys2continuouspix(
                [[
                    x_centerline_fit_rescorr[i], y_centerline_fit_rescorr[i],
                    z_centerline_rescorr[i]
                ] for i in range(len(z_centerline_rescorr))])
            x_centerline_voxel_cont = [coord[0] for coord in voxel_coordinates]
            y_centerline_voxel_cont = [coord[1] for coord in voxel_coordinates]
            z_centerline_voxel_cont = [coord[2] for coord in voxel_coordinates]

            # Create an image with the centerline
            image_input_reoriented.data *= 0
            min_z_index, max_z_index = int(round(
                min(z_centerline_voxel))), int(round(max(z_centerline_voxel)))
            for iz in range(min_z_index, max_z_index + 1):
                image_input_reoriented.data[
                    int(round(x_centerline_voxel[iz - min_z_index])),
                    int(round(y_centerline_voxel[iz - min_z_index])),
                    int(
                        iz
                    )] = 1  # if index is out of bounds here for hanning: either the segmentation has holes or labels have been added to the file

            # Write the centerline image
            sct.printv('\nWrite NIFTI volumes...', verbose)
            fname_centerline_oriented = file_data + '_centerline' + ext_data
            image_input_reoriented.setFileName(fname_centerline_oriented)
            image_input_reoriented.changeType('uint8')
            image_input_reoriented.save()

            sct.printv('\nSet to original orientation...', verbose)
            sct.run('sct_image -i ' + fname_centerline_oriented +
                    ' -setorient ' + image_input_orientation + ' -o ' +
                    fname_centerline_oriented)

            # create a txt file with the centerline
            fname_centerline_oriented_txt = file_data + '_centerline.txt'
            file_results = open(fname_centerline_oriented_txt, 'w')
            for i in range(min_z_index, max_z_index + 1):
                file_results.write(
                    str(int(i)) + ' ' +
                    str(round(x_centerline_voxel_cont[i - min_z_index], 2)) +
                    ' ' +
                    str(round(y_centerline_voxel_cont[i - min_z_index], 2)) +
                    '\n')
            file_results.close()

            fname_centerline_oriented_roi = optic.centerline2roi(
                fname_image=fname_centerline_oriented,
                folder_output='./',
                verbose=verbose)

            # return to initial folder
            temp_folder.chdir_undo()

            # copy result to output folder
            shutil.copy(temp_folder.get_path() + fname_centerline_oriented,
                        folder_output)
            shutil.copy(temp_folder.get_path() + fname_centerline_oriented_txt,
                        folder_output)
            if output_roi:
                shutil.copy(
                    temp_folder.get_path() + fname_centerline_oriented_roi,
                    folder_output)
            centerline_filename = folder_output + fname_centerline_oriented

        else:
            centerline_filename = 'error'

        # delete temporary folder
        if remove_temp_files:
            temp_folder.cleanup()

    else:
        # condition on verbose when using OptiC
        if verbose == 1:
            verbose = 2

        # OptiC models
        path_script = os.path.dirname(__file__)
        path_sct = os.path.dirname(path_script)
        optic_models_path = os.path.join(path_sct, 'data/optic_models',
                                         '{}_model'.format(contrast_type))

        # Execute OptiC binary
        _, centerline_filename = optic.detect_centerline(
            image_fname=fname_data,
            contrast_type=contrast_type,
            optic_models_path=optic_models_path,
            folder_output=folder_output,
            remove_temp_files=remove_temp_files,
            output_roi=output_roi,
            verbose=verbose)

    sct.printv('\nDone! To view results, type:', verbose)
    sct.printv(
        "fslview " + fname_input_data + " " + centerline_filename +
        " -l Red -b 0,1 -t 0.7 &\n", verbose, 'info')
コード例 #5
0
                              fname_sc=fname_sc,
                              fname_ref=fname_ref,
                              path_template=path_template,
                              path_ofolder=path_results,
                              verbose=verbose)

    # run the analyze
    lesion_obj.analyze()

    # remove tmp_dir
    if rm_tmp:
        shutil.rmtree(lesion_obj.tmp_dir)

    printv(
        '\nDone! To view the labeled lesion file (one value per lesion), type:',
        verbose)
    if fname_ref is not None:
        printv(
            'fslview ' + fname_mask + ' ' + path_results +
            lesion_obj.fname_label + ' -l Red-Yellow -t 0.7 & \n', verbose,
            'info')
    else:
        printv(
            'fslview ' + path_results + lesion_obj.fname_label +
            ' -l Red-Yellow -t 0.7 & \n', verbose, 'info')


if __name__ == "__main__":
    start_stream_logger()
    main()
コード例 #6
0
def test(path_data='', parameters=''):
    verbose = 0
    dice_threshold = 0.9
    add_path_for_template = False  # if absolute path or no path to template is provided, then path to data should not be added.

    # initializations
    dice_template2anat = float('NaN')
    dice_anat2template = float('NaN')
    output = ''

    if not parameters:
        parameters = '-i t2/t2.nii.gz -l t2/labels.nii.gz -s t2/t2_seg.nii.gz ' \
                     '-param step=1,type=seg,algo=centermassrot,metric=MeanSquares:step=2,type=seg,algo=bsplinesyn,iter=5,metric=MeanSquares ' \
                     '-t template/ -r 0'
        add_path_for_template = True  # in this case, path to data should be added

    parser = sct_register_to_template.get_parser()
    dict_param = parser.parse(parameters.split(), check_file_exist=False)
    if add_path_for_template:
        dict_param_with_path = parser.add_path_to_file(deepcopy(dict_param),
                                                       path_data,
                                                       input_file=True)
    else:
        dict_param_with_path = parser.add_path_to_file(deepcopy(dict_param),
                                                       path_data,
                                                       input_file=True,
                                                       do_not_add_path=['-t'])
    param_with_path = parser.dictionary_to_string(dict_param_with_path)

    # Check if input files exist
    if not (os.path.isfile(dict_param_with_path['-i'])
            and os.path.isfile(dict_param_with_path['-l'])
            and os.path.isfile(dict_param_with_path['-s'])):
        status = 200
        output = 'ERROR: the file(s) provided to test function do not exist in folder: ' + path_data
        return status, output, DataFrame(data={
            'status': int(status),
            'output': output
        },
                                         index=[path_data])
        # return status, output, DataFrame(
        #     data={'status': status, 'output': output,
        #           'dice_template2anat': float('nan'), 'dice_anat2template': float('nan')},
        #     index=[path_data])

    # if template is not specified, use default
    # if not os.path.isdir(dict_param_with_path['-t']):
    #     status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
    #     dict_param_with_path['-t'] = path_sct + default_template
    #     param_with_path = parser.dictionary_to_string(dict_param_with_path)

    # get contrast folder from -i option.
    # We suppose we can extract it as the first object when spliting with '/' delimiter.
    contrast_folder = ''
    input_filename = ''
    if dict_param['-i'][0] == '/':
        dict_param['-i'] = dict_param['-i'][1:]
    input_split = dict_param['-i'].split('/')
    if len(input_split) == 2:
        contrast_folder = input_split[0] + '/'
        input_filename = input_split[1]
    else:
        input_filename = input_split[0]
    if not contrast_folder:  # if no contrast folder, send error.
        status = 201
        output = 'ERROR: when extracting the contrast folder from input file in command line: ' + dict_param[
            '-i'] + ' for ' + path_data
        return status, output, DataFrame(data={
            'status': int(status),
            'output': output
        },
                                         index=[path_data])
        # return status, output, DataFrame(
        #     data={'status': status, 'output': output, 'dice_template2anat': float('nan'), 'dice_anat2template': float('nan')}, index=[path_data])

    # create output path
    # TODO: create function for that
    import time, random
    subject_folder = path_data.split('/')
    if subject_folder[-1] == '' and len(subject_folder) > 1:
        subject_folder = subject_folder[-2]
    else:
        subject_folder = subject_folder[-1]
    path_output = sct.slash_at_the_end(
        'sct_register_to_template_' + subject_folder + '_' +
        time.strftime("%y%m%d%H%M%S") + '_' + str(random.randint(1, 1000000)),
        slash=1)
    param_with_path += ' -ofolder ' + path_output
    sct.create_folder(path_output)

    # log file
    # TODO: create function for that
    import sys
    fname_log = path_output + 'output.log'

    sct.pause_stream_logger()
    file_handler = sct.add_file_handler_to_logger(filename=fname_log,
                                                  mode='w',
                                                  log_format="%(message)s")
    #
    # stdout_log = file(fname_log, 'w')
    # redirect to log file
    # stdout_orig = sys.stdout
    # sys.stdout = stdout_log

    cmd = 'sct_register_to_template ' + param_with_path
    output += '\n====================================================================================================\n' + cmd + '\n====================================================================================================\n\n'  # copy command
    time_start = time.time()
    try:
        status, o = sct.run(cmd, verbose)
    except:
        status, o = 1, 'ERROR: Function crashed!'
    output += o
    duration = time.time() - time_start

    # if command ran without error, test integrity
    if status == 0:
        # get filename_template_seg
        fname_template_seg = get_file_label(
            sct.slash_at_the_end(dict_param_with_path['-t'], 1) + 'template/',
            'spinal cord',
            output='filewithpath')
        # apply transformation to binary mask: template --> anat
        sct.run(
            'sct_apply_transfo -i ' + fname_template_seg + ' -d ' +
            dict_param_with_path['-s'] + ' -w ' + path_output +
            'warp_template2anat.nii.gz' + ' -o ' + path_output +
            'test_template2anat.nii.gz -x nn', verbose)
        # apply transformation to binary mask: anat --> template
        sct.run(
            'sct_apply_transfo -i ' + dict_param_with_path['-s'] + ' -d ' +
            fname_template_seg + ' -w ' + path_output +
            'warp_anat2template.nii.gz' + ' -o ' + path_output +
            'test_anat2template.nii.gz -x nn', verbose)
        # compute dice coefficient between template segmentation warped into anat and segmentation from anat
        cmd = 'sct_dice_coefficient -i ' + dict_param_with_path[
            '-s'] + ' -d ' + path_output + 'test_template2anat.nii.gz'
        status1, output1 = sct.run(cmd, verbose)
        # parse output and compare to acceptable threshold
        dice_template2anat = float(
            output1.split('3D Dice coefficient = ')[1].split('\n')[0])
        if dice_template2anat < dice_threshold:
            status1 = 99
        # compute dice coefficient between segmentation from anat warped into template and template segmentation
        # N.B. here we use -bmax because the FOV of the anat is smaller than the template
        cmd = 'sct_dice_coefficient -i ' + fname_template_seg + ' -d ' + path_output + 'test_anat2template.nii.gz -bmax 1'
        status2, output2 = sct.run(cmd, verbose)
        # parse output and compare to acceptable threshold
        dice_anat2template = float(
            output2.split('3D Dice coefficient = ')[1].split('\n')[0])
        if dice_anat2template < dice_threshold:
            status2 = 99
        # check if at least one integrity status was equal to 99
        if status1 == 99 or status2 == 99:
            status = 99

        # concatenate outputs
        output = output + output1 + output2

    # transform results into Pandas structure
    results = DataFrame(data={
        'status': int(status),
        'output': output,
        'dice_template2anat': dice_template2anat,
        'dice_anat2template': dice_anat2template,
        'duration [s]': duration
    },
                        index=[path_data])

    sct.log.info(output)
    sct.remove_handler(file_handler)
    sct.start_stream_logger()

    return status, output, results