Ejemplo n.º 1
0
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    param = Param()
    param.fname_data = os.path.abspath(arguments.i)

    if arguments.p is not None:
        param.process = (arguments.p).split(',')
        if param.process[0] not in param.process_list:
            printv(
                parser.error('ERROR: Process ' + param.process[0] +
                             ' is not recognized.'))
    if arguments.size is not None:
        param.size = arguments.size
    if arguments.f is not None:
        param.shape = arguments.f
    if arguments.o is not None:
        param.fname_out = os.path.abspath(arguments.o)
    if arguments.r is not None:
        param.remove_temp_files = arguments.r

    # run main program
    create_mask(param)
Ejemplo n.º 2
0
def main(argv=None):
    """
    Entry point for sct_apply_transfo
    :param argv: list of input arguments.
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    input_filename = arguments.i
    fname_dest = arguments.d
    warp_filename = arguments.w
    warpinv_filename = arguments.winv

    transform = Transform(input_filename=input_filename,
                          fname_dest=fname_dest,
                          list_warp=warp_filename,
                          list_warpinv=warpinv_filename)

    transform.crop = arguments.crop
    transform.output_filename = arguments.o
    transform.interp = arguments.x
    transform.remove_temp_files = arguments.r
    transform.verbose = verbose

    transform.apply()
Ejemplo n.º 3
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # create param objects
    param = Param()

    # set param arguments ad inputted by user
    list_fname_src = arguments.i
    fname_dest = arguments.d
    list_fname_warp = arguments.w
    param.fname_out = arguments.o

    # if arguments.ofolder is not None
    #     path_results = arguments.ofolder
    if arguments.x is not None:
        param.interp = arguments.x
    if arguments.r is not None:
        param.rm_tmp = arguments.r

    # check if list of input files and warping fields have same length
    assert len(list_fname_src) == len(
        list_fname_warp), "ERROR: list of files are not of the same length"

    # merge src images to destination image
    try:
        merge_images(list_fname_src, fname_dest, list_fname_warp, param)
    except Exception as e:
        printv(str(e), 1, 'error')

    display_viewer_syntax([fname_dest, os.path.abspath(param.fname_out)])
Ejemplo n.º 4
0
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # create param object
    param = Param()
    param_glcm = ParamGLCM()

    # set param arguments ad inputted by user
    param.fname_im = arguments.i
    param.fname_seg = arguments.m

    if arguments.ofolder is not None:
        param.path_results = arguments.ofolder

    if not os.path.isdir(param.path_results) and os.path.exists(
            param.path_results):
        printv(
            "ERROR output directory %s is not a valid directory" %
            param.path_results, 1, 'error')
    if not os.path.exists(param.path_results):
        os.makedirs(param.path_results)

    if arguments.feature is not None:
        param_glcm.feature = arguments.feature
    if arguments.distance is not None:
        param_glcm.distance = arguments.distance
    if arguments.angle is not None:
        param_glcm.angle = arguments.angle

    if arguments.dim is not None:
        param.dim = arguments.dim
    if arguments.r is not None:
        param.rm_tmp = bool(arguments.r)

    # create the GLCM constructor
    glcm = ExtractGLCM(param=param, param_glcm=param_glcm)
    # run the extraction
    fname_out_lst = glcm.extract()

    # remove tmp_dir
    if param.rm_tmp:
        rmtree(glcm.tmp_dir)

    printv('\nDone! To view results, type:', param.verbose)
    printv(
        'fsleyes ' + arguments.i + ' ' +
        ' -cm red-yellow -a 70.0 '.join(fname_out_lst) +
        ' -cm Red-Yellow -a 70.0 & \n', param.verbose, 'info')
Ejemplo n.º 5
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # initialization
    param = ParamMoco(is_diffusion=True, group_size=3, metric='MI', smooth='1')

    # Fetch user arguments
    param.fname_data = arguments.i
    param.fname_bvecs = arguments.bvec
    param.fname_bvals = arguments.bval
    param.bval_min = arguments.bvalmin
    param.group_size = arguments.g
    param.fname_mask = arguments.m
    param.interp = arguments.x
    param.path_out = arguments.ofolder
    param.remove_temp_files = arguments.r
    if arguments.param is not None:
        param.update(arguments.param)

    path_qc = arguments.qc
    qc_fps = arguments.qc_fps
    qc_dataset = arguments.qc_dataset
    qc_subject = arguments.qc_subject
    qc_seg = arguments.qc_seg

    mutually_inclusive_args = (path_qc, qc_seg)
    is_qc_none, is_seg_none = [arg is None for arg in mutually_inclusive_args]
    if not (is_qc_none == is_seg_none):
        raise parser.error(
            "Both '-qc' and '-qc-seg' are required in order to generate a QC report."
        )

    # run moco
    fname_output_image = moco_wrapper(param)

    set_global_loglevel(
        verbose)  # moco_wrapper changes verbose to 0, see issue #3341

    # QC report
    if path_qc is not None:
        generate_qc(fname_in1=fname_output_image,
                    fname_in2=param.fname_data,
                    fname_seg=qc_seg,
                    args=sys.argv[1:],
                    path_qc=os.path.abspath(path_qc),
                    fps=qc_fps,
                    dataset=qc_dataset,
                    subject=qc_subject,
                    process='sct_dmri_moco')

    display_viewer_syntax([fname_output_image, param.fname_data],
                          mode='ortho,ortho')
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    param = Param()

    fname_src = arguments.d
    fname_transfo = arguments.w
    warp_atlas = arguments.a
    warp_spinal_levels = arguments.s
    folder_out = arguments.ofolder
    path_template = arguments.t
    path_qc = arguments.qc
    qc_dataset = arguments.qc_dataset
    qc_subject = arguments.qc_subject
    folder_template = param.folder_template
    folder_atlas = param.folder_atlas
    folder_spinal_levels = param.folder_spinal_levels
    file_info_label = param.file_info_label
    list_labels_nn = param.list_labels_nn

    # call main function
    w = WarpTemplate(fname_src, fname_transfo, warp_atlas, warp_spinal_levels, folder_out, path_template,
                     folder_template, folder_atlas, folder_spinal_levels, file_info_label, list_labels_nn, verbose)

    path_template = os.path.join(w.folder_out, w.folder_template)

    # Deal with QC report
    if path_qc is not None:
        try:
            fname_wm = os.path.join(
                w.folder_out, w.folder_template, spinalcordtoolbox.metadata.get_file_label(path_template, id_label=4))  # label = 'white matter mask (probabilistic)'
            generate_qc(
                fname_src, fname_seg=fname_wm, args=sys.argv[1:], path_qc=os.path.abspath(path_qc), dataset=qc_dataset,
                subject=qc_subject, process='sct_warp_template')
        # If label is missing, get_file_label() throws a RuntimeError
        except RuntimeError:
            printv("QC not generated since expected labels are missing from template", type="warning")

    # Deal with verbose
    try:
        display_viewer_syntax(
            [fname_src,
             spinalcordtoolbox.metadata.get_file_label(path_template, id_label=1, output="filewithpath"),  # label = 'T2-weighted template'
             spinalcordtoolbox.metadata.get_file_label(path_template, id_label=5, output="filewithpath"),  # label = 'gray matter mask (probabilistic)'
             spinalcordtoolbox.metadata.get_file_label(path_template, id_label=4, output="filewithpath")],  # label = 'white matter mask (probabilistic)'
            colormaps=['gray', 'gray', 'red-yellow', 'blue-lightblue'],
            opacities=['1', '1', '0.5', '0.5'],
            minmax=['', '0,4000', '0.4,1', '0.4,1'],
            verbose=verbose)
    # If label is missing, continue silently
    except RuntimeError:
        pass
Ejemplo n.º 7
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    data_name = arguments.d
    if arguments.o is not None:
        dest_folder = arguments.o
    else:
        dest_folder = os.path.join(os.path.abspath(os.curdir), data_name)

    url = DICT_URL[data_name]
    install_data(url, dest_folder, keep=arguments.k)

    printv('Done!\n', verbose)
Ejemplo n.º 8
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    fname_input_data = os.path.abspath(arguments.i)
    img_input = Image(fname_input_data)
    img_seg = propseg(img_input, arguments)
    fname_seg = img_seg.absolutepath
    path_qc = arguments.qc
    qc_dataset = arguments.qc_dataset
    qc_subject = arguments.qc_subject
    if path_qc is not None:
        generate_qc(fname_in1=fname_input_data, fname_seg=fname_seg, args=arguments, path_qc=os.path.abspath(path_qc),
                    dataset=qc_dataset, subject=qc_subject, process='sct_propseg')
    display_viewer_syntax([fname_input_data, fname_seg], colormaps=['gray', 'red'], opacities=['', '1'])
Ejemplo n.º 9
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv if argv else ['--help'])
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # Initialization
    param = Param()
    input_t1 = arguments.t1
    input_fname_output = None
    input_tr_min = 500
    input_tr_max = 3500
    input_tr = None
    fname_output_file = arguments.o
    if arguments.ofig is not None:
        input_fname_output = arguments.ofig
    if arguments.b is not None:
        input_tr_min = arguments.b[0]
        input_tr_max = arguments.b[1]
    if arguments.tr is not None:
        input_tr = arguments.tr

    graph = ErnstAngle(input_t1, tr=input_tr, fname_output=input_fname_output)
    if input_tr is not None:
        printv("\nValue of the Ernst Angle with T1=" + str(graph.t1) +
               "ms and TR=" + str(input_tr) + "ms :",
               verbose=verbose,
               type='info')
        printv(str(graph.getErnstAngle(input_tr)))
        if input_tr > input_tr_max:
            input_tr_max = input_tr + 500
        elif input_tr < input_tr_min:
            input_tr_min = input_tr - 500
        # save text file
        try:
            f = open(fname_output_file, 'w')
            f.write(str(graph.getErnstAngle(input_tr)))
            f.close()
        except:
            printv('\nERROR: Cannot open file' + fname_output_file, '1',
                   'error')

    if verbose == 2:
        graph.draw(input_tr_min, input_tr_max)
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    fname_mask = arguments.m
    fname_sc = arguments.s
    fname_ref = arguments.i

    # Path to template
    path_template = arguments.f
    # TODO: check this in the parser
    # if not os.path.isdir(path_template) and os.path.exists(path_template):
    #     path_template = None
    #     printv("ERROR output directory %s is not a valid directory" % path_template, 1, 'error')

    # Output Folder
    path_results = arguments.ofolder
    # if not os.path.isdir(path_results) and os.path.exists(path_results):
    #     printv("ERROR output directory %s is not a valid directory" % path_results, 1, 'error')
    if not os.path.exists(path_results):
        os.makedirs(path_results)

    # Remove temp folder
    if arguments.r is not None:
        rm_tmp = bool(arguments.r)
    else:
        rm_tmp = True

    # create the Lesion constructor
    lesion_obj = AnalyzeLeion(fname_mask=fname_mask,
                              fname_sc=fname_sc,
                              fname_ref=fname_ref,
                              path_template=path_template,
                              path_ofolder=path_results,
                              verbose=verbose)

    # run the analyze
    lesion_obj.analyze()

    # remove tmp_dir
    if rm_tmp:
        rmtree(lesion_obj.tmp_dir)

    printv(
        '\nDone! To view the labeled lesion file (one value per lesion), type:',
        verbose)
    if fname_ref is not None:
        printv(
            'fsleyes ' + fname_mask + ' ' +
            os.path.join(path_results, lesion_obj.fname_label) +
            ' -cm red-yellow -a 70.0 & \n', verbose, 'info')
    else:
        printv(
            'fsleyes ' + os.path.join(path_results, lesion_obj.fname_label) +
            ' -cm red-yellow -a 70.0 & \n', verbose, 'info')
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    param = Param()
    if param.debug:
        printv('\n*** WARNING: DEBUG MODE ON ***\n')
    else:
        input_fname = arguments.i
        input_second_fname = ''
        output_fname = 'hausdorff_distance.txt'
        resample_to = 0.1

        if arguments.d is not None:
            input_second_fname = arguments.d
        if arguments.thinning is not None:
            param.thinning = bool(arguments.thinning)
        if arguments.resampling is not None:
            resample_to = arguments.resampling
        if arguments.o is not None:
            output_fname = arguments.o
        param.verbose = verbose

        tmp_dir = tmp_create()
        im1_name = "im1.nii.gz"
        copy(input_fname, os.path.join(tmp_dir, im1_name))
        if input_second_fname != '':
            im2_name = 'im2.nii.gz'
            copy(input_second_fname, os.path.join(tmp_dir, im2_name))
        else:
            im2_name = None

        curdir = os.getcwd()
        os.chdir(tmp_dir)

        # now = time.time()
        input_im1 = Image(
            resample_image(im1_name,
                           binary=True,
                           thr=0.5,
                           npx=resample_to,
                           npy=resample_to))
        input_im1.absolutepath = os.path.basename(input_fname)
        if im2_name is not None:
            input_im2 = Image(
                resample_image(im2_name,
                               binary=True,
                               thr=0.5,
                               npx=resample_to,
                               npy=resample_to))
            input_im2.absolutepath = os.path.basename(input_second_fname)
        else:
            input_im2 = None

        computation = ComputeDistances(input_im1, im2=input_im2, param=param)

        # TODO change back the orientatin of the thinned image
        if param.thinning:
            computation.thinning1.thinned_image.save(
                os.path.join(
                    curdir,
                    add_suffix(os.path.basename(input_fname), '_thinned')))
            if im2_name is not None:
                computation.thinning2.thinned_image.save(
                    os.path.join(
                        curdir,
                        add_suffix(os.path.basename(input_second_fname),
                                   '_thinned')))

        os.chdir(curdir)

        res_fic = open(output_fname, 'w')
        res_fic.write(computation.res)
        res_fic.write('\n\nInput 1: ' + input_fname)
        res_fic.write('\nInput 2: ' + input_second_fname)
        res_fic.close()
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv if argv else ['--help'])
    verbose = complete_test = arguments.complete
    set_global_loglevel(verbose=verbose)

    print("SCT info:")
    print("- version: {}".format(__version__))
    print("- path: {0}".format(__sct_dir__))

    # initialization
    install_software = 0
    e = 0
    os_running = 'not identified'

    # complete test
    if complete_test:
        print(run_proc('date', verbose))
        print(run_proc('whoami', verbose))
        print(run_proc('pwd', verbose))
        bash_profile = os.path.expanduser("~/.bash_profile")
        if os.path.isfile(bash_profile):
            with io.open(bash_profile, "r") as f:
                print(f.read())
        bashrc = os.path.expanduser("~/.bashrc")
        if os.path.isfile(bashrc):
            with io.open(bashrc, "r") as f:
                print(f.read())

    # check OS
    platform_running = sys.platform
    if platform_running.find('darwin') != -1:
        os_running = 'osx'
    elif platform_running.find('linux') != -1:
        os_running = 'linux'

    print('OS: ' + os_running + ' (' + platform.platform() + ')')
    print('CPU cores: Available: {}, Used by ITK functions: {}'.format(
        psutil.cpu_count(),
        int(os.getenv('ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS', 0))))

    ram = psutil.virtual_memory()
    factor_MB = 1024 * 1024
    print('RAM: Total: {}MB, Used: {}MB, Available: {}MB'.format(
        ram.total // factor_MB, ram.used // factor_MB,
        ram.available // factor_MB))

    if arguments.short:
        sys.exit()

    # check if Python path is within SCT path
    print_line('Check Python executable')
    path_python = sys.executable
    if __sct_dir__ in path_python:
        print_ok()
        print('  Using bundled python {} at {}'.format(sys.version,
                                                       path_python))
    else:
        print_warning()
        print('  Using system python which is unsupported: {}'.format(
            path_python))

    # check if data folder is empty
    print_line('Check if data are installed')
    if os.path.isdir(__data_dir__):
        print_ok()
    else:
        print_fail()

    for dep_pkg, dep_ver_spec in get_dependencies():
        if dep_ver_spec is None:
            print_line('Check if %s is installed' % (dep_pkg))
        else:
            print_line('Check if %s (%s) is installed' %
                       (dep_pkg, dep_ver_spec))

        try:
            module_name, suppress_stderr = resolve_module(dep_pkg)
            module = module_import(module_name, suppress_stderr)
            version = get_version(module)

            if dep_ver_spec is not None and version is not None and dep_ver_spec != version:
                print_warning(more=(" (%s != %s mandated version))" %
                                    (version, dep_ver_spec)))
            elif version is not None:
                print_ok(more=(" (%s)" % version))
            else:
                print_ok()

        except ImportError as err:
            print_fail()
            print(err)
            install_software = 1

    print_line('Check if spinalcordtoolbox is installed')
    try:
        importlib.import_module('spinalcordtoolbox')
        print_ok()
    except ImportError:
        print_fail()
        install_software = 1

    # Check ANTs integrity
    print_line('Check ANTs compatibility with OS ')
    cmd = 'isct_test_ants'
    status, output = run_proc(cmd, verbose=0, raise_exception=False)
    if status == 0:
        print_ok()
    else:
        print_fail()
        print(output)
        e = 1
    if complete_test:
        print('>> ' + cmd)
        print((status, output), '\n')

    # check PropSeg compatibility with OS
    print_line('Check PropSeg compatibility with OS ')
    status, output = run_proc('isct_propseg',
                              verbose=0,
                              raise_exception=False,
                              is_sct_binary=True)
    if status in (0, 1):
        print_ok()
    else:
        print_fail()
        print(output)
        e = 1
    if complete_test:
        print((status, output), '\n')

    print_line('Check if DISPLAY variable is set')
    try:
        os.environ['DISPLAY']
        print_ok()

        # Further check with PyQt specifically
        print_line('Check if figure can be opened with PyQt')
        from PyQt5.QtWidgets import QApplication, QLabel
        try:
            app = QApplication([])
            label = QLabel('Hello World!')
            label.show()
            label.close()
            print_ok()
        except Exception as err:
            print_fail()
            print(err)

    except KeyError:
        print_fail()

    print('')
    sys.exit(e + install_software)
Ejemplo n.º 13
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # Initialization
    param = Param()
    start_time = time.time()

    fname_anat = arguments.i
    fname_centerline = arguments.s
    param.algo_fitting = arguments.algo_fitting

    if arguments.smooth is not None:
        sigmas = arguments.smooth
    remove_temp_files = arguments.r
    if arguments.o is not None:
        fname_out = arguments.o
    else:
        fname_out = extract_fname(fname_anat)[1] + '_smooth.nii'

    # Display arguments
    printv('\nCheck input arguments...')
    printv('  Volume to smooth .................. ' + fname_anat)
    printv('  Centerline ........................ ' + fname_centerline)
    printv('  Sigma (mm) ........................ ' + str(sigmas))
    printv('  Verbose ........................... ' + str(verbose))

    # Check that input is 3D:
    nx, ny, nz, nt, px, py, pz, pt = Image(fname_anat).dim
    dim = 4  # by default, will be adjusted later
    if nt == 1:
        dim = 3
    if nz == 1:
        dim = 2
    if dim == 4:
        printv(
            'WARNING: the input image is 4D, please split your image to 3D before smoothing spinalcord using :\n'
            'sct_image -i ' + fname_anat + ' -split t -o ' + fname_anat,
            verbose, 'warning')
        printv('4D images not supported, aborting ...', verbose, 'error')

    # Extract path/file/extension
    path_anat, file_anat, ext_anat = extract_fname(fname_anat)
    path_centerline, file_centerline, ext_centerline = extract_fname(
        fname_centerline)

    path_tmp = tmp_create(basename="smooth_spinalcord")

    # Copying input data to tmp folder
    printv('\nCopying input data to tmp folder and convert to nii...', verbose)
    copy(fname_anat, os.path.join(path_tmp, "anat" + ext_anat))
    copy(fname_centerline, os.path.join(path_tmp,
                                        "centerline" + ext_centerline))

    # go to tmp folder
    curdir = os.getcwd()
    os.chdir(path_tmp)

    # convert to nii format
    im_anat = convert(Image('anat' + ext_anat))
    im_anat.save('anat.nii', mutable=True, verbose=verbose)
    im_centerline = convert(Image('centerline' + ext_centerline))
    im_centerline.save('centerline.nii', mutable=True, verbose=verbose)

    # Change orientation of the input image into RPI
    printv('\nOrient input volume to RPI orientation...')

    img_anat_rpi = Image("anat.nii").change_orientation("RPI")
    fname_anat_rpi = add_suffix(img_anat_rpi.absolutepath, "_rpi")
    img_anat_rpi.save(path=fname_anat_rpi, mutable=True)

    # Change orientation of the input image into RPI
    printv('\nOrient centerline to RPI orientation...')

    img_centerline_rpi = Image("centerline.nii").change_orientation("RPI")
    fname_centerline_rpi = add_suffix(img_centerline_rpi.absolutepath, "_rpi")
    img_centerline_rpi.save(path=fname_centerline_rpi, mutable=True)

    # Straighten the spinal cord
    # straighten segmentation
    printv('\nStraighten the spinal cord using centerline/segmentation...',
           verbose)
    cache_sig = cache_signature(
        input_files=[fname_anat_rpi, fname_centerline_rpi],
        input_params={"x": "spline"})
    cachefile = os.path.join(curdir, "straightening.cache")
    if cache_valid(cachefile, cache_sig) and os.path.isfile(
            os.path.join(
                curdir, 'warp_curve2straight.nii.gz')) and os.path.isfile(
                    os.path.join(
                        curdir,
                        'warp_straight2curve.nii.gz')) and os.path.isfile(
                            os.path.join(curdir, 'straight_ref.nii.gz')):
        # if they exist, copy them into current folder
        printv('Reusing existing warping field which seems to be valid',
               verbose, 'warning')
        copy(os.path.join(curdir, 'warp_curve2straight.nii.gz'),
             'warp_curve2straight.nii.gz')
        copy(os.path.join(curdir, 'warp_straight2curve.nii.gz'),
             'warp_straight2curve.nii.gz')
        copy(os.path.join(curdir, 'straight_ref.nii.gz'),
             'straight_ref.nii.gz')
        # apply straightening
        run_proc([
            'sct_apply_transfo', '-i', fname_anat_rpi, '-w',
            'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o',
            'anat_rpi_straight.nii', '-x', 'spline'
        ], verbose)
    else:
        run_proc([
            'sct_straighten_spinalcord', '-i', fname_anat_rpi, '-o',
            'anat_rpi_straight.nii', '-s', fname_centerline_rpi, '-x',
            'spline', '-param', 'algo_fitting=' + param.algo_fitting
        ], verbose)
        cache_save(cachefile, cache_sig)
        # move warping fields locally (to use caching next time)
        copy('warp_curve2straight.nii.gz',
             os.path.join(curdir, 'warp_curve2straight.nii.gz'))
        copy('warp_straight2curve.nii.gz',
             os.path.join(curdir, 'warp_straight2curve.nii.gz'))

    # Smooth the straightened image along z
    printv('\nSmooth the straightened image...')

    img = Image("anat_rpi_straight.nii")
    out = img.copy()

    if len(sigmas) == 1:
        sigmas = [sigmas[0] for i in range(len(img.data.shape))]
    elif len(sigmas) != len(img.data.shape):
        raise ValueError(
            "-smooth need the same number of inputs as the number of image dimension OR only one input"
        )

    sigmas = [sigmas[i] / img.dim[i + 4] for i in range(3)]
    out.data = smooth(out.data, sigmas)
    out.save(path="anat_rpi_straight_smooth.nii")

    # Apply the reversed warping field to get back the curved spinal cord
    printv(
        '\nApply the reversed warping field to get back the curved spinal cord...'
    )
    run_proc([
        'sct_apply_transfo', '-i', 'anat_rpi_straight_smooth.nii', '-o',
        'anat_rpi_straight_smooth_curved.nii', '-d', 'anat.nii', '-w',
        'warp_straight2curve.nii.gz', '-x', 'spline'
    ], verbose)

    # replace zeroed voxels by original image (issue #937)
    printv('\nReplace zeroed voxels by original image...', verbose)
    nii_smooth = Image('anat_rpi_straight_smooth_curved.nii')
    data_smooth = nii_smooth.data
    data_input = Image('anat.nii').data
    indzero = np.where(data_smooth == 0)
    data_smooth[indzero] = data_input[indzero]
    nii_smooth.data = data_smooth
    nii_smooth.save('anat_rpi_straight_smooth_curved_nonzero.nii')

    # come back
    os.chdir(curdir)

    # Generate output file
    printv('\nGenerate output file...')
    generate_output_file(
        os.path.join(path_tmp, "anat_rpi_straight_smooth_curved_nonzero.nii"),
        fname_out)

    # Remove temporary files
    if remove_temp_files == 1:
        printv('\nRemove temporary files...')
        rmtree(path_tmp)

    # Display elapsed time
    elapsed_time = time.time() - start_time
    printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) +
           's\n')

    display_viewer_syntax([fname_anat, fname_out], verbose=verbose)
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    input_filename = arguments.i
    centerline_file = arguments.s

    sc_straight = SpinalCordStraightener(input_filename, centerline_file)

    if arguments.dest is not None:
        sc_straight.use_straight_reference = True
        sc_straight.centerline_reference_filename = str(arguments.dest)

    if arguments.ldisc_input is not None:
        if not sc_straight.use_straight_reference:
            printv(
                'Warning: discs position are not taken into account if reference is not provided.'
            )
        else:
            sc_straight.discs_input_filename = str(arguments.ldisc_input)
            sc_straight.precision = 4.0
    if arguments.ldisc_dest is not None:
        if not sc_straight.use_straight_reference:
            printv(
                'Warning: discs position are not taken into account if reference is not provided.'
            )
        else:
            sc_straight.discs_ref_filename = str(arguments.ldisc_dest)
            sc_straight.precision = 4.0

    # Handling optional arguments
    sc_straight.remove_temp_files = arguments.r
    sc_straight.interpolation_warp = arguments.x
    sc_straight.output_filename = arguments.o
    sc_straight.path_output = arguments.ofolder
    path_qc = arguments.qc
    sc_straight.verbose = verbose

    # if arguments.cpu_nb is not None:
    #     sc_straight.cpu_number = arguments.cpu-nb)
    if arguments.disable_straight2curved:
        sc_straight.straight2curved = False
    if arguments.disable_curved2straight:
        sc_straight.curved2straight = False

    if arguments.speed_factor:
        sc_straight.speed_factor = arguments.speed_factor

    if arguments.xy_size:
        sc_straight.xy_size = arguments.xy_size

    sc_straight.param_centerline = ParamCenterline(
        algo_fitting=arguments.centerline_algo,
        smooth=arguments.centerline_smooth)
    if arguments.param is not None:
        params_user = arguments.param
        # update registration parameters
        for param in params_user:
            param_split = param.split('=')
            if param_split[0] == 'precision':
                sc_straight.precision = float(param_split[1])
            if param_split[0] == 'threshold_distance':
                sc_straight.threshold_distance = float(param_split[1])
            if param_split[0] == 'accuracy_results':
                sc_straight.accuracy_results = int(param_split[1])
            if param_split[0] == 'template_orientation':
                sc_straight.template_orientation = int(param_split[1])

    fname_straight = sc_straight.straighten()

    printv("\nFinished! Elapsed time: {} s".format(sc_straight.elapsed_time),
           verbose)

    # Generate QC report
    if path_qc is not None:
        path_qc = os.path.abspath(path_qc)
        qc_dataset = arguments.qc_dataset
        qc_subject = arguments.qc_subject
        generate_qc(fname_straight,
                    args=arguments,
                    path_qc=os.path.abspath(path_qc),
                    dataset=qc_dataset,
                    subject=qc_subject,
                    process=os.path.basename(__file__.strip('.py')))

    display_viewer_syntax([fname_straight], verbose=verbose)
Ejemplo n.º 15
0
def main(argv=None):
    """Main function."""
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    fname_image = os.path.abspath(arguments.i)
    contrast_type = arguments.c

    ctr_algo = arguments.centerline

    if arguments.brain is None:
        if contrast_type in ['t2s', 'dwi']:
            brain_bool = False
        if contrast_type in ['t1', 't2']:
            brain_bool = True
    else:
        brain_bool = bool(arguments.brain)

    if bool(arguments.brain) and ctr_algo == 'svm':
        printv('Please only use the flag "-brain 1" with "-centerline cnn".',
               1, 'warning')
        sys.exit(1)

    kernel_size = arguments.kernel
    if kernel_size == '3d' and contrast_type == 'dwi':
        kernel_size = '2d'
        printv(
            '3D kernel model for dwi contrast is not available. 2D kernel model is used instead.',
            type="warning")

    if ctr_algo == 'file' and arguments.file_centerline is None:
        printv(
            'Please use the flag -file_centerline to indicate the centerline filename.',
            1, 'warning')
        sys.exit(1)

    if arguments.file_centerline is not None:
        manual_centerline_fname = arguments.file_centerline
        ctr_algo = 'file'
    else:
        manual_centerline_fname = None

    if arguments.o is not None:
        fname_out = arguments.o
    else:
        path, file_name, ext = extract_fname(fname_image)
        fname_out = file_name + '_seg' + ext

    threshold = arguments.thr

    if threshold is not None:
        if threshold > 1.0 or (threshold < 0.0 and threshold != -1.0):
            raise SyntaxError(
                "Threshold should be between 0 and 1, or equal to -1 (no threshold)"
            )

    remove_temp_files = arguments.r

    path_qc = arguments.qc
    qc_dataset = arguments.qc_dataset
    qc_subject = arguments.qc_subject
    output_folder = arguments.ofolder

    # check if input image is 2D or 3D
    check_dim(fname_image, dim_lst=[2, 3])

    # Segment image

    im_image = Image(fname_image)
    # note: below we pass im_image.copy() otherwise the field absolutepath becomes None after execution of this function
    im_seg, im_image_RPI_upsamp, im_seg_RPI_upsamp = \
        deep_segmentation_spinalcord(im_image.copy(), contrast_type, ctr_algo=ctr_algo,
                                     ctr_file=manual_centerline_fname, brain_bool=brain_bool, kernel_size=kernel_size,
                                     threshold_seg=threshold, remove_temp_files=remove_temp_files, verbose=verbose)

    # Save segmentation
    fname_seg = os.path.abspath(os.path.join(output_folder, fname_out))
    im_seg.save(fname_seg)

    # Generate QC report
    if path_qc is not None:
        generate_qc(fname_image,
                    fname_seg=fname_seg,
                    args=sys.argv[1:],
                    path_qc=os.path.abspath(path_qc),
                    dataset=qc_dataset,
                    subject=qc_subject,
                    process='sct_deepseg_sc')
    display_viewer_syntax([fname_image, fname_seg],
                          colormaps=['gray', 'red'],
                          opacities=['', '0.7'])
Ejemplo n.º 16
0
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # initializations
    output_type = None
    dim_list = ['x', 'y', 'z', 't']

    fname_in = arguments.i

    im_in_list = [Image(fname) for fname in fname_in]
    if len(im_in_list) > 1 and arguments.concat is None and arguments.omc is None:
        parser.error("Multi-image input is only supported for the '-concat' and '-omc' arguments.")

    # Apply initialization steps to all input images first
    if arguments.set_sform_to_qform:
        [im.set_sform_to_qform() for im in im_in_list]
    elif arguments.set_qform_to_sform:
        [im.set_qform_to_sform() for im in im_in_list]

    # Most sct_image options don't accept multi-image input, so here we simply separate out the first image
    # TODO: Extend the options so that they iterate through the list of images (to support multi-image input)
    im_in = im_in_list[0]

    if arguments.o is not None:
        fname_out = arguments.o
    else:
        fname_out = None

    # Run command
    # Arguments are sorted alphabetically (not according to the usage order)
    if arguments.concat is not None:
        dim = arguments.concat
        assert dim in dim_list
        dim = dim_list.index(dim)
        im_out = [concat_data(im_in_list, dim)]

    elif arguments.copy_header is not None:
        if fname_out is None:
            raise ValueError("Need to specify output image with -o!")
        im_dest = Image(arguments.copy_header)
        im_dest_new = im_in.copy()
        im_dest_new.data = im_dest.data.copy()
        # im_dest.header = im_in.header
        im_dest_new.absolutepath = im_dest.absolutepath
        im_out = [im_dest_new]

    elif arguments.display_warp:
        visualize_warp(im_warp=im_in, im_grid=None, step=3, rm_tmp=True)
        im_out = None

    elif arguments.getorient:
        orient = im_in.orientation
        im_out = None

    elif arguments.keep_vol is not None:
        index_vol = (arguments.keep_vol).split(',')
        for iindex_vol, vol in enumerate(index_vol):
                index_vol[iindex_vol] = int(vol)
        im_out = [remove_vol(im_in, index_vol, todo='keep')]

    elif arguments.mcs:
        if len(im_in.data.shape) != 5:
            printv(parser.error('ERROR: -mcs input need to be a multi-component image'))
        im_out = multicomponent_split(im_in)

    elif arguments.omc:
        im_ref = im_in_list[0]
        for im in im_in_list:
            if im.data.shape != im_ref.data.shape:
                printv(parser.error('ERROR: -omc inputs need to have all the same shapes'))
            del im
        im_out = [multicomponent_merge(im_in_list=im_in_list)]

    elif arguments.pad is not None:
        ndims = len(im_in.data.shape)
        if ndims != 3:
            printv('ERROR: you need to specify a 3D input file.', 1, 'error')
            return

        pad_arguments = arguments.pad.split(',')
        if len(pad_arguments) != 3:
            printv('ERROR: you need to specify 3 padding values.', 1, 'error')

        padx, pady, padz = pad_arguments
        padx, pady, padz = int(padx), int(pady), int(padz)
        im_out = [pad_image(im_in, pad_x_i=padx, pad_x_f=padx, pad_y_i=pady,
                            pad_y_f=pady, pad_z_i=padz, pad_z_f=padz)]

    elif arguments.pad_asym is not None:
        ndims = len(im_in.data.shape)
        if ndims != 3:
            printv('ERROR: you need to specify a 3D input file.', 1, 'error')
            return

        pad_arguments = arguments.pad_asym.split(',')
        if len(pad_arguments) != 6:
            printv('ERROR: you need to specify 6 padding values.', 1, 'error')

        padxi, padxf, padyi, padyf, padzi, padzf = pad_arguments
        padxi, padxf, padyi, padyf, padzi, padzf = int(padxi), int(padxf), int(padyi), int(padyf), int(padzi), int(padzf)
        im_out = [pad_image(im_in, pad_x_i=padxi, pad_x_f=padxf, pad_y_i=padyi, pad_y_f=padyf, pad_z_i=padzi, pad_z_f=padzf)]

    elif arguments.remove_vol is not None:
        index_vol = (arguments.remove_vol).split(',')
        for iindex_vol, vol in enumerate(index_vol):
            index_vol[iindex_vol] = int(vol)
        im_out = [remove_vol(im_in, index_vol, todo='remove')]

    elif arguments.setorient is not None:
        printv(im_in.absolutepath)
        im_out = [change_orientation(im_in, arguments.setorient)]

    elif arguments.setorient_data is not None:
        im_out = [change_orientation(im_in, arguments.setorient_data, data_only=True)]

    elif arguments.header is not None:
        header = im_in.header
        # Necessary because of https://github.com/nipy/nibabel/issues/480#issuecomment-239227821
        if hasattr(im_in, "im_file"):
            header.structarr['scl_slope'] = im_in.im_file.dataobj.slope
            header.structarr['scl_inter'] = im_in.im_file.dataobj.inter
        printv(create_formatted_header_string(header=header, output_format=arguments.header), verbose=verbose)
        im_out = None

    elif arguments.split is not None:
        dim = arguments.split
        assert dim in dim_list
        dim = dim_list.index(dim)
        im_out = split_data(im_in, dim)

    elif arguments.type is not None:
        output_type = arguments.type
        im_out = [im_in]

    elif arguments.to_fsl is not None:
        space_files = arguments.to_fsl
        if len(space_files) > 2 or len(space_files) < 1:
            printv(parser.error('ERROR: -to-fsl expects 1 or 2 arguments'))
            return
        spaces = [ Image(s) for s in space_files ]
        if len(spaces) < 2:
            spaces.append(None)
        im_out = [ displacement_to_abs_fsl(im_in, spaces[0], spaces[1]) ]

    # If these arguments are used standalone, simply pass the input image to the output (the affines were set earlier)
    elif arguments.set_sform_to_qform or arguments.set_qform_to_sform:
        im_out = [im_in]

    else:
        im_out = None
        printv(parser.error('ERROR: you need to specify an operation to do on the input image'))

    # in case fname_out is not defined, use first element of input file name list
    if fname_out is None:
        fname_out = fname_in[0]

    # Write output
    if im_out is not None:
        printv('Generate output files...', verbose)
        # if only one output
        if len(im_out) == 1 and arguments.split is None:
            im_out[0].save(fname_out, dtype=output_type, verbose=verbose)
            display_viewer_syntax([fname_out], verbose=verbose)
        if arguments.mcs:
            # use input file name and add _X, _Y _Z. Keep the same extension
            l_fname_out = []
            for i_dim in range(3):
                l_fname_out.append(add_suffix(fname_out or fname_in[0], '_' + dim_list[i_dim].upper()))
                im_out[i_dim].save(l_fname_out[i_dim], verbose=verbose)
            display_viewer_syntax(fname_out)
        if arguments.split is not None:
            # use input file name and add _"DIM+NUMBER". Keep the same extension
            l_fname_out = []
            for i, im in enumerate(im_out):
                l_fname_out.append(add_suffix(fname_out or fname_in[0], '_' + dim_list[dim].upper() + str(i).zfill(4)))
                im.save(l_fname_out[i])
            display_viewer_syntax(l_fname_out)

    elif arguments.getorient:
        printv(orient)

    elif arguments.display_warp:
        printv('Warping grid generated.', verbose, 'info')
Ejemplo n.º 17
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # Set param arguments ad inputted by user
    fname_in = arguments.i
    contrast = arguments.c

    # Segmentation or Centerline line
    if arguments.s is not None:
        fname_seg = arguments.s
        if not os.path.isfile(fname_seg):
            fname_seg = None
            printv(
                'WARNING: -s input file: "' + arguments.s +
                '" does not exist.\nDetecting PMJ without using segmentation information',
                1, 'warning')
    else:
        fname_seg = None

    # Output Folder
    if arguments.ofolder is not None:
        path_results = arguments.ofolder
        if not os.path.isdir(path_results) and os.path.exists(path_results):
            printv(
                "ERROR output directory %s is not a valid directory" %
                path_results, 1, 'error')
        if not os.path.exists(path_results):
            os.makedirs(path_results)
    else:
        path_results = '.'
    if arguments.o is not None:
        fname_o = arguments.o
    else:
        fname_o = extract_fname(fname_in)[1] + '_pmj.nii.gz'

    path_qc = arguments.qc

    # Remove temp folder
    rm_tmp = bool(arguments.r)

    # Initialize DetectPMJ
    detector = DetectPMJ(fname_im=fname_in,
                         contrast=contrast,
                         fname_seg=fname_seg,
                         path_out=path_results,
                         verbose=verbose,
                         fname_out=fname_o)

    # run the extraction
    fname_out, tmp_dir = detector.apply()

    # Remove tmp_dir
    if rm_tmp:
        rmtree(tmp_dir)

    # View results
    if fname_out is not None:
        if path_qc is not None:
            from spinalcordtoolbox.reports.qc import generate_qc
            generate_qc(fname_in,
                        fname_seg=fname_out,
                        args=sys.argv[1:],
                        path_qc=os.path.abspath(path_qc),
                        process='sct_detect_pmj')

        display_viewer_syntax([fname_in, fname_out], colormaps=['gray', 'red'])
Ejemplo n.º 18
0
def main(argv=None):
    """Main function."""
    parser = get_parser()
    arguments = parser.parse_args(argv if argv else ['--help'])
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    fname_image = arguments.i
    contrast_type = arguments.c

    ctr_algo = arguments.centerline

    brain_bool = bool(arguments.brain)
    if arguments.brain is None and contrast_type in ['t2s', 't2_ax']:
        brain_bool = False

    output_folder = arguments.ofolder

    if ctr_algo == 'file' and arguments.file_centerline is None:
        printv(
            'Please use the flag -file_centerline to indicate the centerline filename.',
            1, 'error')
        sys.exit(1)

    if arguments.file_centerline is not None:
        manual_centerline_fname = arguments.file_centerline
        ctr_algo = 'file'
    else:
        manual_centerline_fname = None

    remove_temp_files = arguments.r

    algo_config_stg = '\nMethod:'
    algo_config_stg += '\n\tCenterline algorithm: ' + str(ctr_algo)
    algo_config_stg += '\n\tAssumes brain section included in the image: ' + str(
        brain_bool) + '\n'
    printv(algo_config_stg)

    # Segment image
    from spinalcordtoolbox.image import Image
    from spinalcordtoolbox.deepseg_lesion.core import deep_segmentation_MSlesion
    im_image = Image(fname_image)
    im_seg, im_labels_viewer, im_ctr = deep_segmentation_MSlesion(
        im_image,
        contrast_type,
        ctr_algo=ctr_algo,
        ctr_file=manual_centerline_fname,
        brain_bool=brain_bool,
        remove_temp_files=remove_temp_files,
        verbose=verbose)

    # Save segmentation
    fname_seg = os.path.abspath(
        os.path.join(
            output_folder,
            extract_fname(fname_image)[1] + '_lesionseg' +
            extract_fname(fname_image)[2]))
    im_seg.save(fname_seg)

    if ctr_algo == 'viewer':
        # Save labels
        fname_labels = os.path.abspath(
            os.path.join(
                output_folder,
                extract_fname(fname_image)[1] + '_labels-centerline' +
                extract_fname(fname_image)[2]))
        im_labels_viewer.save(fname_labels)

    if verbose == 2:
        # Save ctr
        fname_ctr = os.path.abspath(
            os.path.join(
                output_folder,
                extract_fname(fname_image)[1] + '_centerline' +
                extract_fname(fname_image)[2]))
        im_ctr.save(fname_ctr)

    display_viewer_syntax([fname_image, fname_seg],
                          colormaps=['gray', 'red'],
                          opacities=['', '0.7'])
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv if argv else ['--help'])
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # initializations
    initz = ''
    initcenter = ''
    fname_initlabel = ''
    file_labelz = 'labelz.nii.gz'
    param = Param()

    fname_in = os.path.abspath(arguments.i)
    fname_seg = os.path.abspath(arguments.s)
    contrast = arguments.c
    path_template = os.path.abspath(arguments.t)
    scale_dist = arguments.scale_dist
    path_output = arguments.ofolder
    param.path_qc = arguments.qc
    if arguments.discfile is not None:
        fname_disc = os.path.abspath(arguments.discfile)
    else:
        fname_disc = None
    if arguments.initz is not None:
        initz = arguments.initz
        if len(initz) != 2:
            raise ValueError(
                '--initz takes two arguments: position in superior-inferior direction, label value'
            )
    if arguments.initcenter is not None:
        initcenter = arguments.initcenter
    # if user provided text file, parse and overwrite arguments
    if arguments.initfile is not None:
        file = open(arguments.initfile, 'r')
        initfile = ' ' + file.read().replace('\n', '')
        arg_initfile = initfile.split(' ')
        for idx_arg, arg in enumerate(arg_initfile):
            if arg == '-initz':
                initz = [int(x) for x in arg_initfile[idx_arg + 1].split(',')]
                if len(initz) != 2:
                    raise ValueError(
                        '--initz takes two arguments: position in superior-inferior direction, label value'
                    )
            if arg == '-initcenter':
                initcenter = int(arg_initfile[idx_arg + 1])
    if arguments.initlabel is not None:
        # get absolute path of label
        fname_initlabel = os.path.abspath(arguments.initlabel)
    if arguments.param is not None:
        param.update(arguments.param[0])
    remove_temp_files = arguments.r
    clean_labels = arguments.clean_labels
    laplacian = arguments.laplacian

    path_tmp = tmp_create(basename="label_vertebrae")

    # Copying input data to tmp folder
    printv('\nCopying input data to tmp folder...', verbose)
    Image(fname_in).save(os.path.join(path_tmp, "data.nii"))
    Image(fname_seg).save(os.path.join(path_tmp, "segmentation.nii"))

    # Go go temp folder
    curdir = os.getcwd()
    os.chdir(path_tmp)

    # Straighten spinal cord
    printv('\nStraighten spinal cord...', verbose)
    # check if warp_curve2straight and warp_straight2curve already exist (i.e. no need to do it another time)
    cache_sig = cache_signature(input_files=[fname_in, fname_seg], )
    cachefile = os.path.join(curdir, "straightening.cache")
    if cache_valid(cachefile, cache_sig) and os.path.isfile(
            os.path.join(
                curdir, "warp_curve2straight.nii.gz")) and os.path.isfile(
                    os.path.join(
                        curdir,
                        "warp_straight2curve.nii.gz")) and os.path.isfile(
                            os.path.join(curdir, "straight_ref.nii.gz")):
        # if they exist, copy them into current folder
        printv('Reusing existing warping field which seems to be valid',
               verbose, 'warning')
        copy(os.path.join(curdir, "warp_curve2straight.nii.gz"),
             'warp_curve2straight.nii.gz')
        copy(os.path.join(curdir, "warp_straight2curve.nii.gz"),
             'warp_straight2curve.nii.gz')
        copy(os.path.join(curdir, "straight_ref.nii.gz"),
             'straight_ref.nii.gz')
        # apply straightening
        s, o = run_proc([
            'sct_apply_transfo', '-i', 'data.nii', '-w',
            'warp_curve2straight.nii.gz', '-d', 'straight_ref.nii.gz', '-o',
            'data_straight.nii'
        ])
    else:
        sct_straighten_spinalcord.main(argv=[
            '-i',
            'data.nii',
            '-s',
            'segmentation.nii',
            '-r',
            str(remove_temp_files),
            '-v',
            str(verbose),
        ])
        cache_save(cachefile, cache_sig)

    # resample to 0.5mm isotropic to match template resolution
    printv('\nResample to 0.5mm isotropic...', verbose)
    s, o = run_proc([
        'sct_resample', '-i', 'data_straight.nii', '-mm', '0.5x0.5x0.5', '-x',
        'linear', '-o', 'data_straightr.nii'
    ],
                    verbose=verbose)

    # Apply straightening to segmentation
    # N.B. Output is RPI
    printv('\nApply straightening to segmentation...', verbose)
    run_proc(
        'isct_antsApplyTransforms -d 3 -i %s -r %s -t %s -o %s -n %s' %
        ('segmentation.nii', 'data_straightr.nii',
         'warp_curve2straight.nii.gz', 'segmentation_straight.nii', 'Linear'),
        verbose=verbose,
        is_sct_binary=True,
    )
    # Threshold segmentation at 0.5
    run_proc([
        'sct_maths', '-i', 'segmentation_straight.nii', '-thr', '0.5', '-o',
        'segmentation_straight.nii'
    ], verbose)

    # If disc label file is provided, label vertebrae using that file instead of automatically
    if fname_disc:
        # Apply straightening to disc-label
        printv('\nApply straightening to disc labels...', verbose)
        run_proc(
            'sct_apply_transfo -i %s -d %s -w %s -o %s -x %s' %
            (fname_disc, 'data_straightr.nii', 'warp_curve2straight.nii.gz',
             'labeldisc_straight.nii.gz', 'label'),
            verbose=verbose)
        label_vert('segmentation_straight.nii',
                   'labeldisc_straight.nii.gz',
                   verbose=1)

    else:
        # create label to identify disc
        printv('\nCreate label to identify disc...', verbose)
        fname_labelz = os.path.join(path_tmp, file_labelz)
        if initz or initcenter:
            if initcenter:
                # find z centered in FOV
                nii = Image('segmentation.nii').change_orientation("RPI")
                nx, ny, nz, nt, px, py, pz, pt = nii.dim  # Get dimensions
                z_center = int(np.round(nz / 2))  # get z_center
                initz = [z_center, initcenter]

            im_label = create_labels_along_segmentation(
                Image('segmentation.nii'), [(initz[0], initz[1])])
            im_label.data = dilate(im_label.data, 3, 'ball')
            im_label.save(fname_labelz)

        elif fname_initlabel:
            Image(fname_initlabel).save(fname_labelz)

        else:
            # automatically finds C2-C3 disc
            im_data = Image('data.nii')
            im_seg = Image('segmentation.nii')
            if not remove_temp_files:  # because verbose is here also used for keeping temp files
                verbose_detect_c2c3 = 2
            else:
                verbose_detect_c2c3 = 0
            im_label_c2c3 = detect_c2c3(im_data,
                                        im_seg,
                                        contrast,
                                        verbose=verbose_detect_c2c3)
            ind_label = np.where(im_label_c2c3.data)
            if not np.size(ind_label) == 0:
                im_label_c2c3.data[ind_label] = 3
            else:
                printv(
                    'Automatic C2-C3 detection failed. Please provide manual label with sct_label_utils',
                    1, 'error')
                sys.exit()
            im_label_c2c3.save(fname_labelz)

        # dilate label so it is not lost when applying warping
        dilate(Image(fname_labelz), 3, 'ball').save(fname_labelz)

        # Apply straightening to z-label
        printv('\nAnd apply straightening to label...', verbose)
        run_proc(
            'isct_antsApplyTransforms -d 3 -i %s -r %s -t %s -o %s -n %s' %
            (file_labelz, 'data_straightr.nii', 'warp_curve2straight.nii.gz',
             'labelz_straight.nii.gz', 'NearestNeighbor'),
            verbose=verbose,
            is_sct_binary=True,
        )
        # get z value and disk value to initialize labeling
        printv('\nGet z and disc values from straight label...', verbose)
        init_disc = get_z_and_disc_values_from_label('labelz_straight.nii.gz')
        printv('.. ' + str(init_disc), verbose)

        # apply laplacian filtering
        if laplacian:
            printv('\nApply Laplacian filter...', verbose)
            run_proc([
                'sct_maths', '-i', 'data_straightr.nii', '-laplacian', '1',
                '-o', 'data_straightr.nii'
            ], verbose)

        # detect vertebral levels on straight spinal cord
        init_disc[1] = init_disc[1] - 1
        vertebral_detection('data_straightr.nii',
                            'segmentation_straight.nii',
                            contrast,
                            param,
                            init_disc=init_disc,
                            verbose=verbose,
                            path_template=path_template,
                            path_output=path_output,
                            scale_dist=scale_dist)

    # un-straighten labeled spinal cord
    printv('\nUn-straighten labeling...', verbose)
    run_proc(
        'isct_antsApplyTransforms -d 3 -i %s -r %s -t %s -o %s -n %s' %
        ('segmentation_straight_labeled.nii', 'segmentation.nii',
         'warp_straight2curve.nii.gz', 'segmentation_labeled.nii',
         'NearestNeighbor'),
        verbose=verbose,
        is_sct_binary=True,
    )

    if clean_labels:
        # Clean labeled segmentation
        printv(
            '\nClean labeled segmentation (correct interpolation errors)...',
            verbose)
        clean_labeled_segmentation('segmentation_labeled.nii',
                                   'segmentation.nii',
                                   'segmentation_labeled.nii')

    # label discs
    printv('\nLabel discs...', verbose)
    printv('\nUn-straighten labeled discs...', verbose)
    run_proc(
        'sct_apply_transfo -i %s -d %s -w %s -o %s -x %s' %
        ('segmentation_straight_labeled_disc.nii', 'segmentation.nii',
         'warp_straight2curve.nii.gz', 'segmentation_labeled_disc.nii',
         'label'),
        verbose=verbose,
        is_sct_binary=True,
    )

    # come back
    os.chdir(curdir)

    # Generate output files
    path_seg, file_seg, ext_seg = extract_fname(fname_seg)
    fname_seg_labeled = os.path.join(path_output,
                                     file_seg + '_labeled' + ext_seg)
    printv('\nGenerate output files...', verbose)
    generate_output_file(os.path.join(path_tmp, "segmentation_labeled.nii"),
                         fname_seg_labeled)
    generate_output_file(
        os.path.join(path_tmp, "segmentation_labeled_disc.nii"),
        os.path.join(path_output, file_seg + '_labeled_discs' + ext_seg))
    # copy straightening files in case subsequent SCT functions need them
    generate_output_file(os.path.join(path_tmp, "warp_curve2straight.nii.gz"),
                         os.path.join(path_output,
                                      "warp_curve2straight.nii.gz"),
                         verbose=verbose)
    generate_output_file(os.path.join(path_tmp, "warp_straight2curve.nii.gz"),
                         os.path.join(path_output,
                                      "warp_straight2curve.nii.gz"),
                         verbose=verbose)
    generate_output_file(os.path.join(path_tmp, "straight_ref.nii.gz"),
                         os.path.join(path_output, "straight_ref.nii.gz"),
                         verbose=verbose)

    # Remove temporary files
    if remove_temp_files == 1:
        printv('\nRemove temporary files...', verbose)
        rmtree(path_tmp)

    # Generate QC report
    if param.path_qc is not None:
        path_qc = os.path.abspath(arguments.qc)
        qc_dataset = arguments.qc_dataset
        qc_subject = arguments.qc_subject
        labeled_seg_file = os.path.join(path_output,
                                        file_seg + '_labeled' + ext_seg)
        generate_qc(fname_in,
                    fname_seg=labeled_seg_file,
                    args=argv,
                    path_qc=os.path.abspath(path_qc),
                    dataset=qc_dataset,
                    subject=qc_subject,
                    process='sct_label_vertebrae')

    display_viewer_syntax([fname_in, fname_seg_labeled],
                          colormaps=['', 'subcortical'],
                          opacities=['1', '0.5'])
Ejemplo n.º 20
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    if (arguments.list_tasks is False
            and arguments.install_task is None
            and (arguments.i is None or arguments.task is None)):
        parser.error("You must specify either '-list-tasks', '-install-task', or both '-i' + '-task'.")

    # Deal with task
    if arguments.list_tasks:
        deepseg.models.display_list_tasks()

    if arguments.install_task is not None:
        for name_model in deepseg.models.TASKS[arguments.install_task]['models']:
            deepseg.models.install_model(name_model)
        exit(0)

    # Deal with input/output
    for file in arguments.i:
        if not os.path.isfile(file):
            parser.error("This file does not exist: {}".format(file))

    # Verify if the task is part of the "official" tasks, or if it is pointing to paths containing custom models
    if len(arguments.task) == 1 and arguments.task[0] in deepseg.models.TASKS:
        # Check if all input images are provided
        required_contrasts = deepseg.models.get_required_contrasts(arguments.task[0])
        n_contrasts = len(required_contrasts)
        # Get pipeline model names
        name_models = deepseg.models.TASKS[arguments.task[0]]['models']
    else:
        n_contrasts = len(arguments.i)
        name_models = arguments.task

    if len(arguments.i) != n_contrasts:
        parser.error(
            "{} input files found. Please provide all required input files for the task {}, i.e. contrasts: {}."
            .format(len(arguments.i), arguments.task, ', '.join(required_contrasts)))

    # Check modality order
    if len(arguments.i) > 1 and arguments.c is None:
        parser.error(
            "Please specify the order in which you put the contrasts in the input images (-i) with flag -c, e.g., "
            "-c t1 t2")

    # Run pipeline by iterating through the models
    fname_prior = None
    output_filenames = None
    for name_model in name_models:
        # Check if this is an official model
        if name_model in list(deepseg.models.MODELS.keys()):
            # If it is, check if it is installed
            path_model = deepseg.models.folder(name_model)
            if not deepseg.models.is_valid(path_model):
                printv("Model {} is not installed. Installing it now...".format(name_model))
                deepseg.models.install_model(name_model)
        # If it is not, check if this is a path to a valid model
        else:
            path_model = os.path.abspath(name_model)
            if not deepseg.models.is_valid(path_model):
                parser.error("The input model is invalid: {}".format(path_model))

        # Order input images
        if arguments.c is not None:
            input_filenames = []
            for required_contrast in deepseg.models.MODELS[name_model]['contrasts']:
                for provided_contrast, input_filename in zip(arguments.c, arguments.i):
                    if required_contrast == provided_contrast:
                        input_filenames.append(input_filename)
        else:
            input_filenames = arguments.i

        # Call segment_nifti
        options = {**vars(arguments), "fname_prior": fname_prior}
        nii_lst, target_lst = imed_inference.segment_volume(path_model, input_filenames, options=options)

        # Delete intermediate outputs
        if fname_prior and os.path.isfile(fname_prior) and arguments.r:
            logger.info("Remove temporary files...")
            os.remove(fname_prior)

        output_filenames = []
        # Save output seg
        for nii_seg, target in zip(nii_lst, target_lst):
            if 'o' in options and options['o'] is not None:
                # To support if the user adds the extension or not
                extension = ".nii.gz" if ".nii.gz" in options['o'] else ".nii" if ".nii" in options['o'] else ""
                if extension == "":
                    fname_seg = options['o'] + target if len(target_lst) > 1 else options['o']
                else:
                    fname_seg = options['o'].replace(extension, target + extension) if len(target_lst) > 1 \
                        else options['o']
            else:
                fname_seg = ''.join([sct.image.splitext(input_filenames[0])[0], target + '.nii.gz'])

            # If output folder does not exist, create it
            path_out = os.path.dirname(fname_seg)
            if not (path_out == '' or os.path.exists(path_out)):
                os.makedirs(path_out)

            nib.save(nii_seg, fname_seg)
            output_filenames.append(fname_seg)

        # Use the result of the current model as additional input of the next model
        fname_prior = fname_seg

    for output_filename in output_filenames:
        display_viewer_syntax([arguments.i[0], output_filename], colormaps=['gray', 'red'], opacities=['', '0.7'])
Ejemplo n.º 21
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv if argv else ['--help'])
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    param_default = Param()

    overwrite = 0  # TODO: Not used. Why?
    fname_data = get_absolute_path(arguments.i)
    path_label = arguments.f
    method = arguments.method
    fname_output = arguments.o
    append_csv = arguments.append
    combine_labels = arguments.combine
    labels_user = arguments.l
    adv_param_user = arguments.param  # TODO: Not used. Why?
    slices = parse_num_list(arguments.z)
    levels = parse_num_list(arguments.vert)
    fname_vertebral_labeling = arguments.vertfile
    perslice = arguments.perslice
    perlevel = arguments.perlevel
    fname_normalizing_label = arguments.norm_file  # TODO: Not used. Why?
    normalization_method = arguments.norm_method  # TODO: Not used. Why?
    label_to_fix = arguments.fix_label  # TODO: Not used. Why?
    fname_output_metric_map = arguments.output_map  # TODO: Not used. Why?
    fname_mask_weight = arguments.mask_weighted  # TODO: Not used. Why?
    discard_negative_values = int(
        arguments.discard_neg_val)  # TODO: Not used. Why?

    # check if path_label is a file (e.g., single binary mask) instead of a folder (e.g., SCT atlas structure which
    # contains info_label.txt file)
    if os.path.isfile(path_label):
        # Label is a single file
        indiv_labels_ids = [0]
        indiv_labels_files = [path_label]
        combined_labels_ids = []
        label_struc = {
            0:
            LabelStruc(id=0,
                       name=extract_fname(path_label)[1],
                       filename=path_label)
        }
        # set path_label to empty string, because indiv_labels_files will replace it from now on
        path_label = ''
    elif os.path.isdir(path_label):
        # Labels is an SCT atlas folder structure
        # Parse labels according to the file info_label.txt
        # Note: the "combined_labels_*" is a list of single labels that are defined in the section defined by the keyword
        # "# Keyword=CombinedLabels" in info_label.txt.
        # TODO: redirect to appropriate Sphinx documentation
        # TODO: output Class instead of multiple variables.
        #   Example 1:
        #     label_struc[2].id = (2)
        #     label_struc[2].name = "left fasciculus cuneatus"
        #     label_struc[2].filename = "PAM50_atlas_02.nii.gz"
        #   Example 2:
        #     label_struc[51].id = (1, 2, 3, ..., 29)
        #     label_struc[51].name = "White Matter"
        #     label_struc[51].filename = ""  # no name because it is combined
        indiv_labels_ids, indiv_labels_names, indiv_labels_files, \
            combined_labels_ids, combined_labels_names, combined_labels_id_groups, map_clusters \
            = read_label_file(path_label, param_default.file_info_label)

        label_struc = {}
        # fill IDs for indiv labels
        for i_label in range(len(indiv_labels_ids)):
            label_struc[indiv_labels_ids[i_label]] = LabelStruc(
                id=indiv_labels_ids[i_label],
                name=indiv_labels_names[i_label],
                filename=indiv_labels_files[i_label],
                map_cluster=[
                    indiv_labels_ids[i_label] in map_cluster
                    for map_cluster in map_clusters
                ].index(True))
        # fill IDs for combined labels
        # TODO: problem for defining map_cluster: if labels overlap two regions, e.g. WM and GM (e.g. id=50),
        #  map_cluster will take value 0, which is wrong.
        for i_label in range(len(combined_labels_ids)):
            label_struc[combined_labels_ids[i_label]] = LabelStruc(
                id=combined_labels_id_groups[i_label],
                name=combined_labels_names[i_label],
                map_cluster=[
                    indiv_labels_ids[i_label] in map_cluster
                    for map_cluster in map_clusters
                ].index(True))
    else:
        raise RuntimeError(path_label + ' does not exist')

    # check syntax of labels asked by user
    labels_id_user = check_labels(indiv_labels_ids + combined_labels_ids,
                                  parse_num_list(labels_user))
    nb_labels = len(indiv_labels_files)

    # Load data and systematically reorient to RPI because we need the 3rd dimension to be z
    printv('\nLoad metric image...', verbose)
    input_im = Image(fname_data).change_orientation("RPI")

    data = Metric(data=input_im.data, label='')
    # Load labels
    labels_tmp = np.empty([nb_labels], dtype=object)
    for i_label in range(nb_labels):
        im_label = Image(os.path.join(
            path_label, indiv_labels_files[i_label])).change_orientation("RPI")
        labels_tmp[i_label] = np.expand_dims(
            im_label.data, 3)  # TODO: generalize to 2D input label
    labels = np.concatenate(labels_tmp[:], 3)  # labels: (x,y,z,label)
    # Load vertebral levels
    if levels:
        im_vertebral_labeling = Image(
            fname_vertebral_labeling).change_orientation("RPI")
    else:
        im_vertebral_labeling = None

    # Get dimensions of data and labels
    nx, ny, nz = data.data.shape
    nx_atlas, ny_atlas, nz_atlas, nt_atlas = labels.shape

    # Check dimensions consistency between atlas and data
    if (nx, ny, nz) != (nx_atlas, ny_atlas, nz_atlas):
        printv('\nERROR: Metric data and labels DO NOT HAVE SAME DIMENSIONS.',
               1,
               type='error')

    # Combine individual labels for estimation
    if combine_labels:
        # Add entry with internal ID value (99) which corresponds to combined labels
        label_struc[99] = LabelStruc(id=labels_id_user,
                                     name=','.join(
                                         [str(i) for i in labels_id_user]),
                                     map_cluster=None)
        labels_id_user = [99]

    for id_label in labels_id_user:
        printv('Estimation for label: ' + label_struc[id_label].name, verbose)
        agg_metric = extract_metric(data,
                                    labels=labels,
                                    slices=slices,
                                    levels=levels,
                                    perslice=perslice,
                                    perlevel=perlevel,
                                    vert_level=im_vertebral_labeling,
                                    method=method,
                                    label_struc=label_struc,
                                    id_label=id_label,
                                    indiv_labels_ids=indiv_labels_ids)

        save_as_csv(agg_metric,
                    fname_output,
                    fname_in=fname_data,
                    append=append_csv)
        append_csv = True  # when looping across labels, need to append results in the same file
    display_open(fname_output)
Ejemplo n.º 22
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # See if there's a configuration file and import those options
    if arguments.config is not None:
        print('configuring')
        with open(arguments.config, 'r') as conf:
            _, ext = os.path.splitext(arguments.config)
            if ext == '.json':
                config = json.load(conf)
            if ext == '.yml' or ext == '.yaml':
                config = yaml.load(conf, Loader=yaml.Loader)

        # Warn people if they're overriding their config file
        if len(argv) > 2:
            warnings.warn(
                UserWarning(
                    'Using the `-config|-c` flag with additional arguments is discouraged'
                ))

        # Check for unsupported arguments
        orig_keys = set(vars(arguments).keys())
        config_keys = set(config.keys())
        if orig_keys != config_keys:
            for k in config_keys.difference(orig_keys):
                del config[k]  # Remove the unknown key
                warnings.warn(
                    UserWarning(
                        'Unknown key "{}" found in your configuration file, ignoring.'
                        .format(k)))

        # Update the default to match the config
        parser.set_defaults(**config)

        # Reparse the arguments
        arguments = parser.parse_args(argv)

    if arguments.script is None:
        parser.error(
            "The -script argument must be provided, either via command-line or via the -config/-c argument."
        )

    # Set up email notifications if desired
    do_email = arguments.email_to is not None
    if do_email:
        email_to = arguments.email_to
        if arguments.email_from is not None:
            email_from = arguments.email_from
        else:
            email_from = arguments.email_to

        smtp_host, smtp_port = arguments.email_host.split(":")
        smtp_port = int(smtp_port)
        email_pass = getpass('Please input your email password:\n')

        def send_notification(subject, message):
            send_email(email_to,
                       email_from,
                       subject=subject,
                       message=message,
                       passwd=email_pass,
                       smtp_host=smtp_host,
                       smtp_port=smtp_port)

        while True:
            send_test = input(
                'Would you like to send a test email to validate your settings? [Y/n]:\n'
            )
            if send_test.lower() in ['', 'y', 'n']:
                break
            else:
                print('Please input y or n')

            if send_test.lower() in ['', 'y']:
                send_notification('sct_run_batch: test notification',
                                  'Looks good')

    # Set up output directories and create them if they don't already exist
    path_output = os.path.abspath(os.path.expanduser(arguments.path_output))
    path_results = os.path.join(path_output, 'results')
    path_data_processed = os.path.join(path_output, 'data_processed')
    path_log = os.path.join(path_output, 'log')
    path_qc = os.path.join(path_output, 'qc')
    path_segmanual = os.path.abspath(
        os.path.expanduser(arguments.path_segmanual))
    script = os.path.abspath(os.path.expanduser(arguments.script))
    path_data = os.path.abspath(os.path.expanduser(arguments.path_data))

    for pth in [
            path_output, path_results, path_data_processed, path_log, path_qc
    ]:
        os.makedirs(pth, exist_ok=True)

    # Check that the script can be found
    if not os.path.exists(script):
        raise FileNotFoundError(
            'Couldn\'t find the script script at {}'.format(script))

    # Setup overall log
    batch_log = open(os.path.join(path_log, arguments.batch_log), 'w')

    # Duplicate init_sct message to batch_log
    print('\n--\nSpinal Cord Toolbox ({})\n'.format(__version__),
          file=batch_log,
          flush=True)

    # Tee IO to batch_log and std(out/err)
    orig_stdout = sys.stdout
    orig_stderr = sys.stderr

    sys.stdout = Tee(batch_log, orig_stdout)
    sys.stderr = Tee(batch_log, orig_stderr)

    def reset_streams():
        sys.stdout = orig_stdout
        sys.stderr = orig_stderr

        # Display OS

    print("INFO SYSTEM")
    print("-----------")
    platform_running = sys.platform
    if platform_running.find('darwin') != -1:
        os_running = 'osx'
    elif platform_running.find('linux') != -1:
        os_running = 'linux'
    print('OS: ' + os_running + ' (' + platform.platform() + ')')

    # Display number of CPU cores
    print('CPU cores: Available: {} | Threads used by ITK Programs: {}'.format(
        multiprocessing.cpu_count(), arguments.itk_threads))

    # Display RAM available
    print("RAM: Total {} MB | Available {} MB | Used {} MB".format(
        int(psutil.virtual_memory().total / 1024 / 1024),
        int(psutil.virtual_memory().available / 1024 / 1024),
        int(psutil.virtual_memory().used / 1024 / 1024),
    ))

    # Log the current arguments (in yaml because it's cleaner)
    print('\nINPUT ARGUMENTS')
    print("---------------")
    print(yaml.dump(vars(arguments)))

    # Display script version info
    print("SCRIPT")
    print("------")
    print("git commit: {}".format(
        __get_commit(path_to_git_folder=os.path.dirname(script))))
    print("git origin: {}".format(
        __get_git_origin(path_to_git_folder=os.path.dirname(script))))
    print("Copying script to output folder...")
    try:
        # Copy the script and record the new location
        script_copy = os.path.abspath(
            shutil.copy(script, arguments.path_output))
        print("{} -> {}".format(script, script_copy))
        script = script_copy
    except shutil.SameFileError:
        print("Input and output folder are the same. Skipping copy.")
        pass
    except IsADirectoryError:
        print("Input folder is a directory (not a file). Skipping copy.")
        pass

    print("Setting execute permissions for script file {} ...".format(
        arguments.script))
    script_stat = os.stat(script)
    os.chmod(script, script_stat.st_mode | S_IEXEC)

    # Display data version info
    print("\nDATA")
    print("----")
    print("git commit: {}".format(__get_commit(path_to_git_folder=path_data)))
    print("git origin: {}\n".format(
        __get_git_origin(path_to_git_folder=path_data)))

    # Find subjects and process inclusion/exclusions
    subject_dirs = [
        f for f in os.listdir(path_data)
        if f.startswith(arguments.subject_prefix)
    ]

    # Handle inclusion lists
    assert not ((arguments.include is not None) and (arguments.include_list is not None)),\
        'Only one of `include` and `include-list` can be used'

    if arguments.include is not None:
        subject_dirs = [
            f for f in subject_dirs
            if re.search(arguments.include, f) is not None
        ]

    if arguments.include_list is not None:
        # TODO decide if we should warn users if one of their inclusions isn't around
        subject_dirs = [f for f in subject_dirs if f in arguments.include_list]

    # Handle exclusions
    assert not ((arguments.exclude is not None) and (arguments.exclude_list is not None)),\
        'Only one of `exclude` and `exclude-list` can be used'

    if arguments.exclude is not None:
        subject_dirs = [
            f for f in subject_dirs if re.search(arguments.exclude, f) is None
        ]

    if arguments.exclude_list is not None:
        subject_dirs = [
            f for f in subject_dirs if f not in arguments.exclude_list
        ]

    # Determine the number of jobs we can run simulataneously
    if arguments.jobs < 1:
        jobs = multiprocessing.cpu_count() + arguments.jobs
    else:
        jobs = arguments.jobs

    print("RUNNING")
    print("-------")
    print("Processing {} subjects in parallel. (Worker processes used: {}).".
          format(len(subject_dirs), jobs))

    # Run the jobs, recording start and end times
    start = datetime.datetime.now()

    # Trap errors to send an email if a script fails.
    try:
        with multiprocessing.Pool(jobs) as p:
            run_single_dir = functools.partial(
                run_single,
                script=script,
                script_args=arguments.script_args,
                path_segmanual=path_segmanual,
                path_data=path_data,
                path_data_processed=path_data_processed,
                path_results=path_results,
                path_log=path_log,
                path_qc=path_qc,
                itk_threads=arguments.itk_threads,
                continue_on_error=arguments.continue_on_error)
            results = list(p.imap(run_single_dir, subject_dirs))
    except Exception as e:
        if do_email:
            message = (
                'Oh no there has been the following error in your pipeline:\n\n'
                '{}'.format(e))
            try:
                # I consider the multiprocessing error more significant than a potential email error, this
                # ensures that the multiprocessing error is signalled.
                send_notification('sct_run_batch errored', message)
            except Exception:
                raise e

            raise e
        else:
            raise e

    end = datetime.datetime.now()

    # Check for failed subjects
    fails = [
        sd for (sd, ret) in zip(subject_dirs, results) if ret.returncode != 0
    ]

    if len(fails) == 0:
        status_message = '\nHooray! your batch completed successfully :-)\n'
    else:
        status_message = (
            '\nYour batch completed but some subjects may have not completed '
            'successfully, please consult the logs for:\n'
            '{}\n'.format('\n'.join(fails)))
    print(status_message)

    # Display timing
    duration = end - start
    timing_message = ('Started: {} | Ended: {} | Duration: {}\n'.format(
        start.strftime('%Hh%Mm%Ss'), end.strftime('%Hh%Mm%Ss'),
        (datetime.datetime.utcfromtimestamp(0) +
         duration).strftime('%Hh%Mm%Ss')))
    print(timing_message)

    if do_email:
        send_notification('sct_run_batch: Run completed',
                          status_message + timing_message)

    open_cmd = 'open' if sys.platform == 'darwin' else 'xdg-open'

    print(
        'To open the Quality Control (QC) report on a web-browser, run the following:\n'
        '{} {}/index.html'.format(open_cmd, path_qc))

    if arguments.zip:
        file_zip = 'sct_run_batch_{}'.format(time.strftime('%Y%m%d%H%M%S'))
        path_tmp = os.path.join(tempfile.mkdtemp(), file_zip)
        os.makedirs(os.path.join(path_tmp, file_zip))
        for folder in [path_log, path_qc, path_results]:
            shutil.copytree(
                folder,
                os.path.join(path_tmp, file_zip,
                             os.path.split(folder)[-1]))
        shutil.make_archive(os.path.join(path_output, file_zip), 'zip',
                            path_tmp)
        shutil.rmtree(path_tmp)
        print("\nOutput zip archive: {}.zip".format(
            os.path.join(path_output, file_zip)))

    reset_streams()
    batch_log.close()
Ejemplo n.º 23
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # initialize parameters
    param = Param()

    # Initialization
    fname_output = ''
    path_out = ''
    fname_src_seg = ''
    fname_dest_seg = ''
    fname_src_label = ''
    fname_dest_label = ''
    generate_warpinv = 1

    start_time = time.time()

    # get arguments
    fname_src = arguments.i
    fname_dest = arguments.d
    if arguments.iseg is not None:
        fname_src_seg = arguments.iseg
    if arguments.dseg is not None:
        fname_dest_seg = arguments.dseg
    if arguments.ilabel is not None:
        fname_src_label = arguments.ilabel
    if arguments.dlabel is not None:
        fname_dest_label = arguments.dlabel
    if arguments.o is not None:
        fname_output = arguments.o
    if arguments.ofolder is not None:
        path_out = arguments.ofolder
    if arguments.owarp is not None:
        fname_output_warp = arguments.owarp
    else:
        fname_output_warp = ''
    if arguments.initwarp is not None:
        fname_initwarp = os.path.abspath(arguments.initwarp)
    else:
        fname_initwarp = ''
    if arguments.initwarpinv is not None:
        fname_initwarpinv = os.path.abspath(arguments.initwarpinv)
    else:
        fname_initwarpinv = ''
    if arguments.m is not None:
        fname_mask = arguments.m
    else:
        fname_mask = ''
    padding = arguments.z
    if arguments.param is not None:
        paramregmulti_user = arguments.param
        # update registration parameters
        for paramStep in paramregmulti_user:
            paramregmulti.addStep(paramStep)
    path_qc = arguments.qc
    qc_dataset = arguments.qc_dataset
    qc_subject = arguments.qc_subject

    identity = arguments.identity
    interp = arguments.x
    remove_temp_files = arguments.r

    # printv(arguments)
    printv('\nInput parameters:')
    printv('  Source .............. ' + fname_src)
    printv('  Destination ......... ' + fname_dest)
    printv('  Init transfo ........ ' + fname_initwarp)
    printv('  Mask ................ ' + fname_mask)
    printv('  Output name ......... ' + fname_output)
    # printv('  Algorithm ........... '+paramregmulti.algo)
    # printv('  Number of iterations  '+paramregmulti.iter)
    # printv('  Metric .............. '+paramregmulti.metric)
    printv('  Remove temp files ... ' + str(remove_temp_files))
    printv('  Verbose ............. ' + str(verbose))

    # update param
    param.verbose = verbose
    param.padding = padding
    param.fname_mask = fname_mask
    param.remove_temp_files = remove_temp_files

    # Get if input is 3D
    printv('\nCheck if input data are 3D...', verbose)
    check_dim(fname_src, dim_lst=[3])
    check_dim(fname_dest, dim_lst=[3])

    # Check if user selected type=seg, but did not input segmentation data
    if 'paramregmulti_user' in locals():
        if True in ['type=seg' in paramregmulti_user[i] for i in range(len(paramregmulti_user))]:
            if fname_src_seg == '' or fname_dest_seg == '':
                printv('\nERROR: if you select type=seg you must specify -iseg and -dseg flags.\n', 1, 'error')

    # Put source into destination space using header (no estimation -- purely based on header)
    # TODO: Check if necessary to do that
    # TODO: use that as step=0
    # printv('\nPut source into destination space using header...', verbose)
    # run_proc('isct_antsRegistration -d 3 -t Translation[0] -m MI[dest_pad.nii,src.nii,1,16] -c 0 -f 1 -s 0 -o
    # [regAffine,src_regAffine.nii] -n BSpline[3]', verbose)
    # if segmentation, also do it for seg

    fname_src2dest, fname_dest2src, _, _ = \
        register_wrapper(fname_src, fname_dest, param, paramregmulti, fname_src_seg=fname_src_seg,
                         fname_dest_seg=fname_dest_seg, fname_src_label=fname_src_label,
                         fname_dest_label=fname_dest_label, fname_mask=fname_mask, fname_initwarp=fname_initwarp,
                         fname_initwarpinv=fname_initwarpinv, identity=identity, interp=interp,
                         fname_output=fname_output,
                         fname_output_warp=fname_output_warp,
                         path_out=path_out)

    # display elapsed time
    elapsed_time = time.time() - start_time
    printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', verbose)

    if path_qc is not None:
        if fname_dest_seg:
            generate_qc(fname_src2dest, fname_in2=fname_dest, fname_seg=fname_dest_seg, args=argv,
                        path_qc=os.path.abspath(path_qc), dataset=qc_dataset, subject=qc_subject,
                        process='sct_register_multimodal')
        else:
            printv('WARNING: Cannot generate QC because it requires destination segmentation.', 1, 'warning')

    if generate_warpinv:
        display_viewer_syntax([fname_src, fname_dest2src], verbose=verbose)
    display_viewer_syntax([fname_dest, fname_src2dest], verbose=verbose)
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv if argv else ['--help'])
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # Input filename
    fname_input_data = arguments.i
    fname_data = os.path.abspath(fname_input_data)

    # Method used
    method = arguments.method

    # Contrast type
    contrast_type = arguments.c
    if method == 'optic' and not contrast_type:
        # Contrast must be
        error = "ERROR: -c is a mandatory argument when using 'optic' method."
        printv(error, type='error')
        return

    # Gap between slices
    interslice_gap = arguments.gap

    param_centerline = ParamCenterline(algo_fitting=arguments.centerline_algo,
                                       smooth=arguments.centerline_smooth,
                                       minmax=True)

    # Output folder
    if arguments.o is not None:
        file_output = arguments.o
    else:
        path_data, file_data, ext_data = extract_fname(fname_data)
        file_output = os.path.join(path_data, file_data + '_centerline')

    if method == 'viewer':
        # Manual labeling of cord centerline
        im_labels = _call_viewer_centerline(Image(fname_data),
                                            interslice_gap=interslice_gap)
    elif method == 'fitseg':
        im_labels = Image(fname_data)
    elif method == 'optic':
        # Automatic detection of cord centerline
        im_labels = Image(fname_data)
        param_centerline.algo_fitting = 'optic'
        param_centerline.contrast = contrast_type
    else:
        printv(
            "ERROR: The selected method is not available: {}. Please look at the help."
            .format(method),
            type='error')
        return

    # Extrapolate and regularize (or detect if optic) cord centerline
    im_centerline, arr_centerline, _, _ = get_centerline(
        im_labels, param=param_centerline, verbose=verbose)

    # save centerline as nifti (discrete) and csv (continuous) files
    im_centerline.save(file_output + '.nii.gz')
    np.savetxt(file_output + '.csv', arr_centerline.transpose(), delimiter=",")

    display_viewer_syntax([fname_input_data, file_output + '.nii.gz'],
                          colormaps=['gray', 'red'],
                          opacities=['', '1'])

    path_qc = arguments.qc
    qc_dataset = arguments.qc_dataset
    qc_subject = arguments.qc_subject

    # Generate QC report
    if path_qc is not None:
        generate_qc(fname_input_data,
                    fname_seg=file_output + '.nii.gz',
                    args=sys.argv[1:],
                    path_qc=os.path.abspath(path_qc),
                    dataset=qc_dataset,
                    subject=qc_subject,
                    process='sct_get_centerline')
    display_viewer_syntax([fname_input_data, file_output + '.nii.gz'],
                          colormaps=['gray', 'red'],
                          opacities=['', '0.7'])
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv if argv else ['--help'])
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # Initialization
    slices = ''
    group_funcs = (('MEAN', func_wa), ('STD', func_std))  # functions to perform when aggregating metrics along S-I

    fname_segmentation = get_absolute_path(arguments.i)
    fname_vert_levels = ''
    if arguments.o is not None:
        file_out = os.path.abspath(arguments.o)
    else:
        file_out = ''
    if arguments.append is not None:
        append = arguments.append
    else:
        append = 0
    if arguments.vert is not None:
        vert_levels = arguments.vert
    else:
        vert_levels = ''
    remove_temp_files = arguments.r
    if arguments.vertfile is not None:
        fname_vert_levels = arguments.vertfile
    if arguments.perlevel is not None:
        perlevel = arguments.perlevel
    else:
        perlevel = None
    if arguments.z is not None:
        slices = arguments.z
    if arguments.perslice is not None:
        perslice = arguments.perslice
    else:
        perslice = None
    angle_correction = arguments.angle_corr
    param_centerline = ParamCenterline(
        algo_fitting=arguments.centerline_algo,
        smooth=arguments.centerline_smooth,
        minmax=True)
    path_qc = arguments.qc
    qc_dataset = arguments.qc_dataset
    qc_subject = arguments.qc_subject

    # update fields
    metrics_agg = {}
    if not file_out:
        file_out = 'csa.csv'

    metrics, fit_results = compute_shape(fname_segmentation,
                                         angle_correction=angle_correction,
                                         param_centerline=param_centerline,
                                         verbose=verbose)
    for key in metrics:
        if key == 'length':
            # For computing cord length, slice-wise length needs to be summed across slices
            metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
                                                            levels=parse_num_list(vert_levels), perslice=perslice,
                                                            perlevel=perlevel, vert_level=fname_vert_levels,
                                                            group_funcs=(('SUM', func_sum),))
        else:
            # For other metrics, we compute the average and standard deviation across slices
            metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
                                                            levels=parse_num_list(vert_levels), perslice=perslice,
                                                            perlevel=perlevel, vert_level=fname_vert_levels,
                                                            group_funcs=group_funcs)
    metrics_agg_merged = merge_dict(metrics_agg)
    save_as_csv(metrics_agg_merged, file_out, fname_in=fname_segmentation, append=append)

    # QC report (only show CSA for clarity)
    if path_qc is not None:
        generate_qc(fname_segmentation, args=arguments, path_qc=os.path.abspath(path_qc), dataset=qc_dataset,
                    subject=qc_subject, path_img=_make_figure(metrics_agg_merged, fit_results),
                    process='sct_process_segmentation')

    display_open(file_out)
Ejemplo n.º 26
0
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # initialize parameters
    param = Param()

    fname_data = arguments.i
    fname_bvecs = arguments.bvec
    average = arguments.a
    remove_temp_files = arguments.r
    path_out = arguments.ofolder

    fname_bvals = arguments.bval
    if arguments.bvalmin:
        param.bval_min = arguments.bvalmin

    # Initialization
    start_time = time.time()

    # printv(arguments)
    printv('\nInput parameters:', verbose)
    printv('  input file ............' + fname_data, verbose)
    printv('  bvecs file ............' + fname_bvecs, verbose)
    printv('  bvals file ............' + fname_bvals, verbose)
    printv('  average ...............' + str(average), verbose)

    # Get full path
    fname_data = os.path.abspath(fname_data)
    fname_bvecs = os.path.abspath(fname_bvecs)
    if fname_bvals:
        fname_bvals = os.path.abspath(fname_bvals)

    # Extract path, file and extension
    path_data, file_data, ext_data = extract_fname(fname_data)

    # create temporary folder
    path_tmp = tmp_create(basename="dmri_separate")

    # copy files into tmp folder and convert to nifti
    printv('\nCopy files into temporary folder...', verbose)
    ext = '.nii'
    dmri_name = 'dmri'
    b0_name = file_data + '_b0'
    b0_mean_name = b0_name + '_mean'
    dwi_name = file_data + '_dwi'
    dwi_mean_name = dwi_name + '_mean'

    if not convert(fname_data, os.path.join(path_tmp, dmri_name + ext)):
        printv('ERROR in convert.', 1, 'error')
    copy(fname_bvecs, os.path.join(path_tmp, "bvecs"), verbose=verbose)

    # go to tmp folder
    curdir = os.getcwd()
    os.chdir(path_tmp)

    # Get size of data
    im_dmri = Image(dmri_name + ext)
    printv('\nGet dimensions data...', verbose)
    nx, ny, nz, nt, px, py, pz, pt = im_dmri.dim
    printv(
        '.. ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz) + ' x ' + str(nt),
        verbose)

    # Identify b=0 and DWI images
    printv(fname_bvals)
    index_b0, index_dwi, nb_b0, nb_dwi = identify_b0(fname_bvecs, fname_bvals,
                                                     param.bval_min, verbose)

    # Split into T dimension
    printv('\nSplit along T dimension...', verbose)
    im_dmri_split_list = split_data(im_dmri, 3)
    for im_d in im_dmri_split_list:
        im_d.save()

    # Merge b=0 images
    printv('\nMerge b=0...', verbose)
    fname_in_list_b0 = []
    for it in range(nb_b0):
        fname_in_list_b0.append(dmri_name + '_T' + str(index_b0[it]).zfill(4) +
                                ext)
    im_in_list_b0 = [Image(fname) for fname in fname_in_list_b0]
    concat_data(im_in_list_b0, 3).save(b0_name + ext)

    # Average b=0 images
    if average:
        printv('\nAverage b=0...', verbose)
        img = Image(b0_name + ext)
        out = img.copy()
        dim_idx = 3
        if len(np.shape(img.data)) < dim_idx + 1:
            raise ValueError("Expecting image with 4 dimensions!")
        out.data = np.mean(out.data, dim_idx)
        out.save(path=b0_mean_name + ext)

    # Merge DWI
    fname_in_list_dwi = []
    for it in range(nb_dwi):
        fname_in_list_dwi.append(dmri_name + '_T' +
                                 str(index_dwi[it]).zfill(4) + ext)
    im_in_list_dwi = [Image(fname) for fname in fname_in_list_dwi]
    concat_data(im_in_list_dwi, 3).save(dwi_name + ext)

    # Average DWI images
    if average:
        printv('\nAverage DWI...', verbose)
        img = Image(dwi_name + ext)
        out = img.copy()
        dim_idx = 3
        if len(np.shape(img.data)) < dim_idx + 1:
            raise ValueError("Expecting image with 4 dimensions!")
        out.data = np.mean(out.data, dim_idx)
        out.save(path=dwi_mean_name + ext)

    # come back
    os.chdir(curdir)

    # Generate output files
    fname_b0 = os.path.abspath(os.path.join(path_out, b0_name + ext_data))
    fname_dwi = os.path.abspath(os.path.join(path_out, dwi_name + ext_data))
    fname_b0_mean = os.path.abspath(
        os.path.join(path_out, b0_mean_name + ext_data))
    fname_dwi_mean = os.path.abspath(
        os.path.join(path_out, dwi_mean_name + ext_data))
    printv('\nGenerate output files...', verbose)
    generate_output_file(os.path.join(path_tmp, b0_name + ext),
                         fname_b0,
                         verbose=verbose)
    generate_output_file(os.path.join(path_tmp, dwi_name + ext),
                         fname_dwi,
                         verbose=verbose)
    if average:
        generate_output_file(os.path.join(path_tmp, b0_mean_name + ext),
                             fname_b0_mean,
                             verbose=verbose)
        generate_output_file(os.path.join(path_tmp, dwi_mean_name + ext),
                             fname_dwi_mean,
                             verbose=verbose)

    # Remove temporary files
    if remove_temp_files == 1:
        printv('\nRemove temporary files...', verbose)
        rmtree(path_tmp, verbose=verbose)

    # display elapsed time
    elapsed_time = time.time() - start_time
    printv(
        '\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's',
        verbose)

    return fname_b0, fname_b0_mean, fname_dwi, fname_dwi_mean
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)
    param = Param()

    # Initialization
    fname_warp_final = ''  # concatenated transformations
    if arguments.o is not None:
        fname_warp_final = arguments.o
    fname_dest = arguments.d
    fname_warp_list = arguments.w
    warpinv_filename = arguments.winv

    # Parse list of warping fields
    printv('\nParse list of warping fields...', verbose)
    use_inverse = []
    fname_warp_list_invert = []
    for idx_warp, path_warp in enumerate(fname_warp_list):
        # Check if this transformation should be inverted
        if path_warp in warpinv_filename:
            use_inverse.append('-i')
            fname_warp_list_invert += [[
                use_inverse[idx_warp], fname_warp_list[idx_warp]
            ]]
        else:
            use_inverse.append('')
            fname_warp_list_invert += [[path_warp]]
        path_warp = fname_warp_list[idx_warp]
        if path_warp.endswith((".nii", ".nii.gz")) \
                and Image(fname_warp_list[idx_warp]).header.get_intent()[0] != 'vector':
            raise ValueError(
                "Displacement field in {} is invalid: should be encoded"
                " in a 5D file with vector intent code"
                " (see https://nifti.nimh.nih.gov/pub/dist/src/niftilib/nifti1.h"
                .format(path_warp))

    # check if destination file is 3d
    check_dim(fname_dest, dim_lst=[3])

    # Here we take the inverse of the warp list, because sct_WarpImageMultiTransform concatenates in the reverse order
    fname_warp_list_invert.reverse()
    fname_warp_list_invert = functools.reduce(lambda x, y: x + y,
                                              fname_warp_list_invert)

    # Check file existence
    printv('\nCheck file existence...', verbose)
    check_file_exist(fname_dest, verbose)
    for i in range(len(fname_warp_list)):
        check_file_exist(fname_warp_list[i], verbose)

    # Get output folder and file name
    if fname_warp_final == '':
        path_out, file_out, ext_out = extract_fname(param.fname_warp_final)
    else:
        path_out, file_out, ext_out = extract_fname(fname_warp_final)

    # Check dimension of destination data (cf. issue #1419, #1429)
    im_dest = Image(fname_dest)
    if im_dest.dim[2] == 1:
        dimensionality = '2'
    else:
        dimensionality = '3'

    cmd = [
        'isct_ComposeMultiTransform', dimensionality, 'warp_final' + ext_out,
        '-R', fname_dest
    ] + fname_warp_list_invert
    _, output = run_proc(cmd, verbose=verbose, is_sct_binary=True)

    # check if output was generated
    if not os.path.isfile('warp_final' + ext_out):
        raise ValueError(f"Warping field was not generated! {output}")

    # Generate output files
    printv('\nGenerate output files...', verbose)
    generate_output_file('warp_final' + ext_out,
                         os.path.join(path_out, file_out + ext_out))
Ejemplo n.º 28
0
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    # initializations
    output_type = None
    dim_list = ['x', 'y', 'z', 't']

    fname_in = arguments.i
    n_in = len(fname_in)

    if arguments.o is not None:
        fname_out = arguments.o
    else:
        fname_out = None

    # Run command
    # Arguments are sorted alphabetically (not according to the usage order)
    if arguments.concat is not None:
        dim = arguments.concat
        assert dim in dim_list
        dim = dim_list.index(dim)
        im_out = [concat_data(fname_in, dim)]  # TODO: adapt to fname_in

    elif arguments.copy_header is not None:
        if fname_out is None:
            raise ValueError("Need to specify output image with -o!")
        im_in = Image(fname_in[0])
        im_dest = Image(arguments.copy_header)
        im_dest_new = im_in.copy()
        im_dest_new.data = im_dest.data.copy()
        # im_dest.header = im_in.header
        im_dest_new.absolutepath = im_dest.absolutepath
        im_out = [im_dest_new]

    elif arguments.display_warp:
        im_in = fname_in[0]
        visualize_warp(im_in, fname_grid=None, step=3, rm_tmp=True)
        im_out = None

    elif arguments.getorient:
        im_in = Image(fname_in[0])
        orient = im_in.orientation
        im_out = None

    elif arguments.keep_vol is not None:
        index_vol = (arguments.keep_vol).split(',')
        for iindex_vol, vol in enumerate(index_vol):
            index_vol[iindex_vol] = int(vol)
        im_in = Image(fname_in[0])
        im_out = [remove_vol(im_in, index_vol, todo='keep')]

    elif arguments.mcs:
        im_in = Image(fname_in[0])
        if n_in != 1:
            printv(parser.error('ERROR: -mcs need only one input'))
        if len(im_in.data.shape) != 5:
            printv(
                parser.error(
                    'ERROR: -mcs input need to be a multi-component image'))
        im_out = multicomponent_split(im_in)

    elif arguments.omc:
        im_ref = Image(fname_in[0])
        for fname in fname_in:
            im = Image(fname)
            if im.data.shape != im_ref.data.shape:
                printv(
                    parser.error(
                        'ERROR: -omc inputs need to have all the same shapes'))
            del im
        im_out = [multicomponent_merge(fname_in)]  # TODO: adapt to fname_in

    elif arguments.pad is not None:
        im_in = Image(fname_in[0])
        ndims = len(im_in.data.shape)
        if ndims != 3:
            printv('ERROR: you need to specify a 3D input file.', 1, 'error')
            return

        pad_arguments = arguments.pad.split(',')
        if len(pad_arguments) != 3:
            printv('ERROR: you need to specify 3 padding values.', 1, 'error')

        padx, pady, padz = pad_arguments
        padx, pady, padz = int(padx), int(pady), int(padz)
        im_out = [
            pad_image(im_in,
                      pad_x_i=padx,
                      pad_x_f=padx,
                      pad_y_i=pady,
                      pad_y_f=pady,
                      pad_z_i=padz,
                      pad_z_f=padz)
        ]

    elif arguments.pad_asym is not None:
        im_in = Image(fname_in[0])
        ndims = len(im_in.data.shape)
        if ndims != 3:
            printv('ERROR: you need to specify a 3D input file.', 1, 'error')
            return

        pad_arguments = arguments.pad_asym.split(',')
        if len(pad_arguments) != 6:
            printv('ERROR: you need to specify 6 padding values.', 1, 'error')

        padxi, padxf, padyi, padyf, padzi, padzf = pad_arguments
        padxi, padxf, padyi, padyf, padzi, padzf = int(padxi), int(padxf), int(
            padyi), int(padyf), int(padzi), int(padzf)
        im_out = [
            pad_image(im_in,
                      pad_x_i=padxi,
                      pad_x_f=padxf,
                      pad_y_i=padyi,
                      pad_y_f=padyf,
                      pad_z_i=padzi,
                      pad_z_f=padzf)
        ]

    elif arguments.remove_vol is not None:
        index_vol = (arguments.remove_vol).split(',')
        for iindex_vol, vol in enumerate(index_vol):
            index_vol[iindex_vol] = int(vol)
        im_in = Image(fname_in[0])
        im_out = [remove_vol(im_in, index_vol, todo='remove')]

    elif arguments.setorient is not None:
        printv(fname_in[0])
        im_in = Image(fname_in[0])
        im_out = [change_orientation(im_in, arguments.setorient)]

    elif arguments.setorient_data is not None:
        im_in = Image(fname_in[0])
        im_out = [
            change_orientation(im_in, arguments.setorient_data, data_only=True)
        ]

    elif arguments.split is not None:
        dim = arguments.split
        assert dim in dim_list
        im_in = Image(fname_in[0])
        dim = dim_list.index(dim)
        im_out = split_data(im_in, dim)

    elif arguments.type is not None:
        output_type = arguments.type
        im_in = Image(fname_in[0])
        im_out = [im_in]  # TODO: adapt to fname_in

    elif arguments.to_fsl is not None:
        space_files = arguments.to_fsl
        if len(space_files) > 2 or len(space_files) < 1:
            printv(parser.error('ERROR: -to-fsl expects 1 or 2 arguments'))
            return
        spaces = [Image(s) for s in space_files]
        if len(spaces) < 2:
            spaces.append(None)
        im_out = [
            displacement_to_abs_fsl(Image(fname_in[0]), spaces[0], spaces[1])
        ]

    else:
        im_out = None
        printv(
            parser.error(
                'ERROR: you need to specify an operation to do on the input image'
            ))

    # in case fname_out is not defined, use first element of input file name list
    if fname_out is None:
        fname_out = fname_in[0]

    # Write output
    if im_out is not None:
        printv('Generate output files...', verbose)
        # if only one output
        if len(im_out) == 1 and arguments.split is None:
            im_out[0].save(fname_out, dtype=output_type, verbose=verbose)
            display_viewer_syntax([fname_out], verbose=verbose)
        if arguments.mcs:
            # use input file name and add _X, _Y _Z. Keep the same extension
            l_fname_out = []
            for i_dim in range(3):
                l_fname_out.append(
                    add_suffix(fname_out or fname_in[0],
                               '_' + dim_list[i_dim].upper()))
                im_out[i_dim].save(l_fname_out[i_dim], verbose=verbose)
            display_viewer_syntax(fname_out)
        if arguments.split is not None:
            # use input file name and add _"DIM+NUMBER". Keep the same extension
            l_fname_out = []
            for i, im in enumerate(im_out):
                l_fname_out.append(
                    add_suffix(fname_out or fname_in[0],
                               '_' + dim_list[dim].upper() + str(i).zfill(4)))
                im.save(l_fname_out[i])
            display_viewer_syntax(l_fname_out)

    elif arguments.getorient:
        printv(orient)

    elif arguments.display_warp:
        printv('Warping grid generated.', verbose, 'info')
def main(argv=None):
    parser = get_parser()
    arguments = parser.parse_args(argv if argv else ['--help'])
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    fname_input1 = arguments.i
    fname_input2 = arguments.d

    tmp_dir = tmp_create()  # create tmp directory
    tmp_dir = os.path.abspath(tmp_dir)

    # copy input files to tmp directory
    # for fname in [fname_input1, fname_input2]:
    copy(fname_input1, tmp_dir)
    copy(fname_input2, tmp_dir)
    fname_input1 = ''.join(extract_fname(fname_input1)[1:])
    fname_input2 = ''.join(extract_fname(fname_input2)[1:])

    curdir = os.getcwd()
    os.chdir(tmp_dir)  # go to tmp directory

    if arguments.bin is not None:
        fname_input1_bin = add_suffix(fname_input1, '_bin')
        run_proc(['sct_maths', '-i', fname_input1, '-bin', '0', '-o', fname_input1_bin])
        fname_input1 = fname_input1_bin
        fname_input2_bin = add_suffix(fname_input2, '_bin')
        run_proc(['sct_maths', '-i', fname_input2, '-bin', '0', '-o', fname_input2_bin])
        fname_input2 = fname_input2_bin

    # copy header of im_1 to im_2
    from spinalcordtoolbox.image import Image
    im_1 = Image(fname_input1)
    im_2 = Image(fname_input2)
    im_2.header = im_1.header
    im_2.save()

    cmd = ['isct_dice_coefficient', fname_input1, fname_input2]

    if vars(arguments)["2d_slices"] is not None:
        cmd += ['-2d-slices', str(vars(arguments)["2d_slices"])]
    if arguments.b is not None:
        bounding_box = arguments.b
        cmd += ['-b'] + bounding_box
    if arguments.bmax is not None and arguments.bmax == 1:
        cmd += ['-bmax']
    if arguments.bzmax is not None and arguments.bzmax == 1:
        cmd += ['-bzmax']
    if arguments.o is not None:
        path_output, fname_output, ext = extract_fname(arguments.o)
        cmd += ['-o', fname_output + ext]

    rm_tmp = bool(arguments.r)

    # # Computation of Dice coefficient using Python implementation.
    # # commented for now as it does not cover all the feature of isct_dice_coefficient
    # #from spinalcordtoolbox.image import Image, compute_dice
    # #dice = compute_dice(Image(fname_input1), Image(fname_input2), mode='3d', zboundaries=False)
    # #printv('Dice (python-based) = ' + str(dice), verbose)

    status, output = run_proc(cmd, verbose, is_sct_binary=True)

    os.chdir(curdir)  # go back to original directory

    # copy output file into original directory
    if arguments.o is not None:
        copy(os.path.join(tmp_dir, fname_output + ext), os.path.join(path_output, fname_output + ext))

    # remove tmp_dir
    if rm_tmp:
        rmtree(tmp_dir)

    printv(output, verbose)
Ejemplo n.º 30
0
def main(argv=None):
    """
    Main function
    :param argv:
    :return:
    """
    parser = get_parser()
    arguments = parser.parse_args(argv)
    verbose = arguments.v
    set_global_loglevel(verbose=verbose)

    dim_list = ['x', 'y', 'z', 't']

    fname_in = arguments.i
    fname_out = arguments.o
    output_type = arguments.type

    # Open file(s)
    im = Image(fname_in)
    data = im.data  # 3d or 4d numpy array
    dim = im.dim

    # run command
    if arguments.otsu is not None:
        param = arguments.otsu
        data_out = sct_math.otsu(data, param)

    elif arguments.adap is not None:
        param = arguments.adap
        data_out = sct_math.adap(data, param[0], param[1])

    elif arguments.otsu_median is not None:
        param = arguments.otsu_median
        data_out = sct_math.otsu_median(data, param[0], param[1])

    elif arguments.thr is not None:
        param = arguments.thr
        data_out = sct_math.threshold(data, param)

    elif arguments.percent is not None:
        param = arguments.percent
        data_out = sct_math.perc(data, param)

    elif arguments.bin is not None:
        bin_thr = arguments.bin
        data_out = sct_math.binarize(data, bin_thr=bin_thr)

    elif arguments.add is not None:
        data2 = get_data_or_scalar(arguments.add, data)
        data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
        data_out = np.sum(data_concat, axis=3)

    elif arguments.sub is not None:
        data2 = get_data_or_scalar(arguments.sub, data)
        data_out = data - data2

    elif arguments.laplacian is not None:
        sigmas = arguments.laplacian
        if len(sigmas) == 1:
            sigmas = [sigmas for i in range(len(data.shape))]
        elif len(sigmas) != len(data.shape):
            printv(
                parser.error(
                    'ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'
                ))
        # adjust sigma based on voxel size
        sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
        # smooth data
        data_out = sct_math.laplacian(data, sigmas)

    elif arguments.mul is not None:
        data2 = get_data_or_scalar(arguments.mul, data)
        data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
        data_out = np.prod(data_concat, axis=3)

    elif arguments.div is not None:
        data2 = get_data_or_scalar(arguments.div, data)
        data_out = np.divide(data, data2)

    elif arguments.mean is not None:
        dim = dim_list.index(arguments.mean)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.mean(data, dim)

    elif arguments.rms is not None:
        dim = dim_list.index(arguments.rms)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.sqrt(np.mean(np.square(data.astype(float)), dim))

    elif arguments.std is not None:
        dim = dim_list.index(arguments.std)
        if dim + 1 > len(
                np.shape(data)):  # in case input volume is 3d and dim=t
            data = data[..., np.newaxis]
        data_out = np.std(data, dim, ddof=1)

    elif arguments.smooth is not None:
        sigmas = arguments.smooth
        if len(sigmas) == 1:
            sigmas = [sigmas[0] for i in range(len(data.shape))]
        elif len(sigmas) != len(data.shape):
            printv(
                parser.error(
                    'ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'
                ))
        # adjust sigma based on voxel size
        sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
        # smooth data
        data_out = sct_math.smooth(data, sigmas)

    elif arguments.dilate is not None:
        if arguments.shape in ['disk', 'square'] and arguments.dim is None:
            printv(
                parser.error(
                    'ERROR: -dim is required for -dilate with 2D morphological kernel'
                ))
        data_out = sct_math.dilate(data,
                                   size=arguments.dilate,
                                   shape=arguments.shape,
                                   dim=arguments.dim)

    elif arguments.erode is not None:
        if arguments.shape in ['disk', 'square'] and arguments.dim is None:
            printv(
                parser.error(
                    'ERROR: -dim is required for -erode with 2D morphological kernel'
                ))
        data_out = sct_math.erode(data,
                                  size=arguments.erode,
                                  shape=arguments.shape,
                                  dim=arguments.dim)

    elif arguments.denoise is not None:
        # parse denoising arguments
        p, b = 1, 5  # default arguments
        list_denoise = (arguments.denoise).split(",")
        for i in list_denoise:
            if 'p' in i:
                p = int(i.split('=')[1])
            if 'b' in i:
                b = int(i.split('=')[1])
        data_out = sct_math.denoise_nlmeans(data,
                                            patch_radius=p,
                                            block_radius=b)

    elif arguments.symmetrize is not None:
        data_out = (data + data[list(range(data.shape[0] -
                                           1, -1, -1)), :, :]) / float(2)

    elif arguments.mi is not None:
        # input 1 = from flag -i --> im
        # input 2 = from flag -mi
        im_2 = Image(arguments.mi)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='mi',
                           metric_full='Mutual information',
                           verbose=verbose)
        data_out = None

    elif arguments.minorm is not None:
        im_2 = Image(arguments.minorm)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='minorm',
                           metric_full='Normalized Mutual information',
                           verbose=verbose)
        data_out = None

    elif arguments.corr is not None:
        # input 1 = from flag -i --> im
        # input 2 = from flag -mi
        im_2 = Image(arguments.corr)
        compute_similarity(im,
                           im_2,
                           fname_out,
                           metric='corr',
                           metric_full='Pearson correlation coefficient',
                           verbose=verbose)
        data_out = None

    # if no flag is set
    else:
        data_out = None
        printv(
            parser.error(
                'ERROR: you need to specify an operation to do on the input image'
            ))

    if data_out is not None:
        # Write output
        nii_out = Image(fname_in)  # use header of input file
        nii_out.data = data_out
        nii_out.save(fname_out, dtype=output_type)
    # TODO: case of multiple outputs
    # assert len(data_out) == n_out
    # if n_in == n_out:
    #     for im_in, d_out, fn_out in zip(nii, data_out, fname_out):
    #         im_in.data = d_out
    #         im_in.absolutepath = fn_out
    #         if arguments.w is not None:
    #             im_in.hdr.set_intent('vector', (), '')
    #         im_in.save()
    # elif n_out == 1:
    #     nii[0].data = data_out[0]
    #     nii[0].absolutepath = fname_out[0]
    #     if arguments.w is not None:
    #             nii[0].hdr.set_intent('vector', (), '')
    #     nii[0].save()
    # elif n_out > n_in:
    #     for dat_out, name_out in zip(data_out, fname_out):
    #         im_out = nii[0].copy()
    #         im_out.data = dat_out
    #         im_out.absolutepath = name_out
    #         if arguments.w is not None:
    #             im_out.hdr.set_intent('vector', (), '')
    #         im_out.save()
    # else:
    #     printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))

    # display message
    if data_out is not None:
        display_viewer_syntax([fname_out], verbose=verbose)
    else:
        printv('\nDone! File created: ' + fname_out, verbose, 'info')