Esempio n. 1
0
        save_as_csv(agg_metric, fname_output, fname_in=fname_data, append=append_csv)
        append_csv = True  # when looping across labels, need to append results in the same file
    sct.display_open(fname_output)


if __name__ == "__main__":

    init_sct()

    param_default = Param()

    parser = get_parser()
    arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help'])

    overwrite = 0  # TODO: Not used. Why?
    fname_data = sct.get_absolute_path(arguments.i)
    path_label = arguments.f
    method = arguments.method
    fname_output = arguments.o
    append_csv = arguments.append
    combine_labels = arguments.combine
    labels_user = arguments.l
    adv_param_user = arguments.param  # TODO: Not used. Why?
    slices_of_interest = arguments.z
    vertebral_levels = arguments.vert
    fname_vertebral_labeling = arguments.vertfile
    perslice = arguments.perslice
    perlevel = arguments.perlevel
    fname_normalizing_label = arguments.norm_file  # TODO: Not used. Why?
    normalization_method = arguments.norm_method  # TODO: Not used. Why?
    label_to_fix = arguments.fix_label  # TODO: Not used. Why?
Esempio n. 2
0
def main(args):
    parser = get_parser()
    arguments = parser.parse(args)

    # Initialization
    slices = ''
    group_funcs = (('MEAN', func_wa), ('STD', func_std))  # functions to perform when aggregating metrics along S-I

    fname_segmentation = sct.get_absolute_path(arguments['-i'])
    fname_vert_levels = ''
    if '-o' in arguments:
        file_out = os.path.abspath(arguments['-o'])
    else:
        file_out = ''
    if '-append' in arguments:
        append = int(arguments['-append'])
    else:
        append = 0
    if '-vert' in arguments:
        vert_levels = arguments['-vert']
    else:
        vert_levels = ''
    if '-r' in arguments:
        remove_temp_files = arguments['-r']
    if '-vertfile' in arguments:
        fname_vert_levels = arguments['-vertfile']
    if '-perlevel' in arguments:
        perlevel = arguments['-perlevel']
    else:
        perlevel = None
    if '-z' in arguments:
        slices = arguments['-z']
    if '-perslice' in arguments:
        perslice = arguments['-perslice']
    else:
        perslice = None
    if '-angle-corr' in arguments:
        if arguments['-angle-corr'] == '1':
            angle_correction = True
        elif arguments['-angle-corr'] == '0':
            angle_correction = False
    param_centerline = ParamCenterline(
        algo_fitting=arguments['-centerline-algo'],
        smooth=arguments['-centerline-smooth'],
        minmax=True)
    path_qc = arguments.get("-qc", None)
    qc_dataset = arguments.get("-qc-dataset", None)
    qc_subject = arguments.get("-qc-subject", None)

    verbose = int(arguments.get('-v'))
    sct.init_sct(log_level=verbose, update=True)  # Update log level

    # update fields
    metrics_agg = {}
    if not file_out:
        file_out = 'csa.csv'

    metrics, fit_results = process_seg.compute_shape(fname_segmentation,
                                                     angle_correction=angle_correction,
                                                     param_centerline=param_centerline,
                                                     verbose=verbose)
    for key in metrics:
        metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
                                                        levels=parse_num_list(vert_levels), perslice=perslice,
                                                        perlevel=perlevel, vert_level=fname_vert_levels,
                                                        group_funcs=group_funcs)
    metrics_agg_merged = _merge_dict(metrics_agg)
    save_as_csv(metrics_agg_merged, file_out, fname_in=fname_segmentation, append=append)

    # QC report (only show CSA for clarity)
    if path_qc is not None:
        generate_qc(fname_segmentation, args=args, path_qc=os.path.abspath(path_qc), dataset=qc_dataset,
                    subject=qc_subject, path_img=_make_figure(metrics_agg_merged, fit_results),
                    process='sct_process_segmentation')

    sct.display_open(file_out)
def main():

    #Initialization
    directory = ""
    fname_template = ''
    n_l = 0
    verbose = param.verbose

    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hi:t:n:v:')
    except getopt.GetoptError:
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ("-i"):
            directory = arg
        elif opt in ("-t"):
            fname_template = arg
        elif opt in ('-n'):
            n_l = int(arg)
        elif opt in ('-v'):
            verbose = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_template == '' or directory == '':
        usage()

    # check existence of input files
    print '\nCheck if file exists ...\n'
    sct.check_file_exist(fname_template)
    sct.check_folder_exist(directory)

    path_template, file_template, ext_template = sct.extract_fname(
        fname_template)
    template_absolute_path = sct.get_absolute_path(fname_template)

    os.chdir(directory)

    n_i = len([
        name for name in os.listdir('.')
        if (os.path.isfile(name) and name.endswith(".nii.gz")
            and name != 'template_landmarks.nii.gz')
    ])  # number of landmark images

    average = zeros((n_i, n_l))
    compteur = 0

    for file in os.listdir('.'):
        if file.endswith(".nii.gz") and file != 'template_landmarks.nii.gz':
            print file
            img = nibabel.load(file)
            data = img.get_data()
            X, Y, Z = (data > 0).nonzero()
            Z = [Z[i] for i in Z.argsort()]
            Z.reverse()

            for i in xrange(n_l):
                if i < len(Z):
                    average[compteur][i] = Z[i]

            compteur = compteur + 1

    average = array([
        int(round(mean([average[average[:, i] > 0, i]]))) for i in xrange(n_l)
    ])

    #print average

    print template_absolute_path
    print '\nGet dimensions of template...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(template_absolute_path)
    print '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz)
    print '.. voxel size:  ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(
        pz) + 'mm'

    img = nibabel.load(template_absolute_path)
    data = img.get_data()
    hdr = img.get_header()
    data[:, :, :] = 0
    compteur = 1
    for i in average:
        print int(round(nx / 2.0)), int(round(ny / 2.0)), int(round(i)), int(
            round(compteur))
        data[int(round(nx / 2.0)),
             int(round(ny / 2.0)),
             int(round(i))] = int(round(compteur))
        compteur = compteur + 1

    print '\nSave volume ...'
    #hdr.set_data_dtype('float32') # set imagetype to uint8
    # save volume
    #data = data.astype(float32, copy =False)
    img = nibabel.Nifti1Image(data, None, hdr)
    file_name = 'template_landmarks.nii.gz'
    nibabel.save(img, file_name)
    print '\nFile created : ' + file_name
def main(args):
    parser = get_parser()
    arguments = parser.parse(args)
    param = Param()

    # Initialization
    slices = param.slices
    group_funcs = (('MEAN', func_wa), ('STD', func_std)
                   )  # functions to perform when aggregating metrics along S-I

    fname_segmentation = sct.get_absolute_path(arguments['-i'])
    fname_vert_levels = ''
    if '-o' in arguments:
        file_out = os.path.abspath(arguments['-o'])
    else:
        file_out = ''
    if '-append' in arguments:
        append = int(arguments['-append'])
    else:
        append = 0
    if '-vert' in arguments:
        vert_levels = arguments['-vert']
    else:
        vert_levels = ''
    if '-r' in arguments:
        remove_temp_files = arguments['-r']
    if '-vertfile' in arguments:
        fname_vert_levels = arguments['-vertfile']
    if '-perlevel' in arguments:
        perlevel = arguments['-perlevel']
    else:
        perlevel = Param().perlevel
    if '-z' in arguments:
        slices = arguments['-z']
    if '-perslice' in arguments:
        perslice = arguments['-perslice']
    else:
        perslice = Param().perslice
    if '-angle-corr' in arguments:
        if arguments['-angle-corr'] == '1':
            angle_correction = True
        elif arguments['-angle-corr'] == '0':
            angle_correction = False

    verbose = int(arguments.get('-v'))
    sct.init_sct(log_level=verbose, update=True)  # Update log level

    # update fields
    metrics_agg = {}
    if not file_out:
        file_out = 'csa.csv'

    metrics = process_seg.compute_shape(fname_segmentation,
                                        algo_fitting='bspline',
                                        angle_correction=angle_correction,
                                        verbose=verbose)
    for key in metrics:
        metrics_agg[key] = aggregate_per_slice_or_level(
            metrics[key],
            slices=parse_num_list(slices),
            levels=parse_num_list(vert_levels),
            perslice=perslice,
            perlevel=perlevel,
            vert_level=fname_vert_levels,
            group_funcs=group_funcs)
    metrics_agg_merged = _merge_dict(metrics_agg)
    save_as_csv(metrics_agg_merged,
                file_out,
                fname_in=fname_segmentation,
                append=append)
    sct.display_open(file_out)
def main():
    
    #Initialization
    directory = ""
    fname_template = ''
    n_l = 0 
    verbose = param.verbose
         
    try:
         opts, args = getopt.getopt(sys.argv[1:],'hi:t:n:v:')
    except getopt.GetoptError:
        usage()
    for opt, arg in opts :
        if opt == '-h':
            usage()
        elif opt in ("-i"):
            directory = arg 
        elif opt in ("-t"):
            fname_template = arg  
        elif opt in ('-n'):
            n_l = int(arg)                
        elif opt in ('-v'):
            verbose = int(arg)
    
    # display usage if a mandatory argument is not provided
    if fname_template == '' or directory == '':
        usage()
        
    # check existence of input files
    print'\nCheck if file exists ...\n'
    sct.check_file_exist(fname_template)
    sct.check_folder_exist(directory)

    path_template, file_template, ext_template = sct.extract_fname(fname_template)
    template_absolute_path = sct.get_absolute_path(fname_template)
   
    os.chdir(directory)
   
    n_i = len([name for name in os.listdir('.') if (os.path.isfile(name) and name.endswith(".nii.gz") and name!='template_landmarks.nii.gz')])  # number of landmark images

    average = zeros((n_i,n_l))
    compteur = 0
    
    for file in os.listdir('.'):
        if file.endswith(".nii.gz") and file != 'template_landmarks.nii.gz':
            print file
            img = nibabel.load(file)
            data = img.get_data()
            X,Y,Z = (data>0).nonzero()
            Z = [Z[i] for i in Z.argsort()]
            Z.reverse()
            
            for i in xrange(n_l):
                if i < len(Z):
                    average[compteur][i] = Z[i]
            
            compteur = compteur + 1

    average = array([int(round(mean([average[average[:,i]>0,i]]))) for i in xrange(n_l)]) 
      
    #print average     
    
    print template_absolute_path
    print '\nGet dimensions of template...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(template_absolute_path)
    print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
    print '.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
    
    img = nibabel.load(template_absolute_path)
    data = img.get_data()
    hdr = img.get_header()
    data[:,:,:] = 0
    compteur = 1
    for i in average:
        print int(round(nx/2.0)),int(round(ny/2.0)),int(round(i)),int(round(compteur))
        data[int(round(nx/2.0)),int(round(ny/2.0)),int(round(i))] = int(round(compteur))
        compteur = compteur + 1
        
    
    print '\nSave volume ...'
    #hdr.set_data_dtype('float32') # set imagetype to uint8
    # save volume
    #data = data.astype(float32, copy =False)
    img = nibabel.Nifti1Image(data, None, hdr)
    file_name = 'template_landmarks.nii.gz'
    nibabel.save(img,file_name)
    print '\nFile created : ' + file_name
Esempio n. 6
0
def main(args):
    parser = get_parser()
    arguments = parser.parse(args)
    param = Param()

    # Initialization
    slices = param.slices
    angle_correction = True
    use_phys_coord = True
    group_funcs = (('MEAN', func_wa), ('STD', func_std))  # functions to perform when aggregating metrics along S-I

    fname_segmentation = sct.get_absolute_path(arguments['-i'])
    name_process = arguments['-p']
    fname_vert_levels = ''
    if '-o' in arguments:
        file_out = os.path.abspath(arguments['-o'])
    else:
        file_out = ''
    if '-append' in arguments:
        append = int(arguments['-append'])
    else:
        append = 0
    if '-vert' in arguments:
        vert_levels = arguments['-vert']
    else:
        vert_levels = ''
    if '-r' in arguments:
        remove_temp_files = arguments['-r']
    if '-vertfile' in arguments:
        fname_vert_levels = arguments['-vertfile']
    if '-perlevel' in arguments:
        perlevel = arguments['-perlevel']
    else:
        perlevel = Param().perlevel
    if '-v' in arguments:
        verbose = int(arguments['-v'])
    if '-z' in arguments:
        slices = arguments['-z']
    if '-perslice' in arguments:
        perslice = arguments['-perslice']
    else:
        perslice = Param().perslice
    if '-a' in arguments:
        param.algo_fitting = arguments['-a']
    if '-no-angle' in arguments:
        if arguments['-no-angle'] == '1':
            angle_correction = False
        elif arguments['-no-angle'] == '0':
            angle_correction = True
    if '-use-image-coord' in arguments:
        if arguments['-use-image-coord'] == '1':
            use_phys_coord = False
        if arguments['-use-image-coord'] == '0':
            use_phys_coord = True

    # update fields
    param.verbose = verbose
    metrics_agg = {}
    if not file_out:
        file_out = name_process + '.csv'

    if name_process == 'centerline':
        process_seg.extract_centerline(fname_segmentation, verbose=param.verbose,
                                       algo_fitting=param.algo_fitting, use_phys_coord=use_phys_coord,
                                       file_out=file_out)

    if name_process == 'csa':
        metrics = process_seg.compute_csa(fname_segmentation, algo_fitting=param.algo_fitting,
                                          type_window=param.type_window, window_length=param.window_length,
                                          angle_correction=angle_correction, use_phys_coord=use_phys_coord,
                                          remove_temp_files=remove_temp_files, verbose=verbose)

        for key in metrics:
            metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
                                                            levels=parse_num_list(vert_levels), perslice=perslice,
                                                            perlevel=perlevel, vert_level=fname_vert_levels,
                                                            group_funcs=group_funcs)
        metrics_agg_merged = merge_dict(metrics_agg)
        save_as_csv(metrics_agg_merged, file_out, fname_in=fname_segmentation, append=append)
        sct.printv('\nFile created: '+file_out, verbose=1, type='info')

    if name_process == 'label-vert':
        if '-discfile' in arguments:
            fname_discs = arguments['-discfile']
        else:
            sct.printv('\nERROR: Disc label file is mandatory (flag: -discfile).\n', 1, 'error')
        process_seg.label_vert(fname_segmentation, fname_discs, verbose=verbose)

    if name_process == 'shape':
        fname_discs = None
        if '-discfile' in arguments:
            fname_discs = arguments['-discfile']
        metrics = process_seg.compute_shape(fname_segmentation, remove_temp_files=remove_temp_files, verbose=verbose)
        for key in metrics:
            metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
                                                            levels=parse_num_list(vert_levels), perslice=perslice,
                                                            perlevel=perlevel, vert_level=fname_vert_levels,
                                                            group_funcs=group_funcs)
        metrics_agg_merged = merge_dict(metrics_agg)
        save_as_csv(metrics_agg_merged, file_out, fname_in=fname_segmentation, append=append)
        sct.printv('\nFile created: ' + file_out, verbose=1, type='info')
def main(args=None):
    parser = get_parser()
    if args:
        arguments = parser.parse_args(args)
    else:
        arguments = parser.parse_args(
            args=None if sys.argv[1:] else ['--help'])

    # Initialization
    slices = ''
    group_funcs = (('MEAN', func_wa), ('STD', func_std)
                   )  # functions to perform when aggregating metrics along S-I

    fname_segmentation = sct.get_absolute_path(arguments.i)
    fname_vert_levels = ''
    if arguments.o is not None:
        file_out = os.path.abspath(arguments.o)
    else:
        file_out = ''
    if arguments.append is not None:
        append = arguments.append
    else:
        append = 0
    if arguments.vert is not None:
        vert_levels = arguments.vert
    else:
        vert_levels = ''
    remove_temp_files = arguments.r
    if arguments.vertfile is not None:
        fname_vert_levels = arguments.vertfile
    if arguments.perlevel is not None:
        perlevel = arguments.perlevel
    else:
        perlevel = None
    if arguments.z is not None:
        slices = arguments.z
    if arguments.perslice is not None:
        perslice = arguments.perslice
    else:
        perslice = None
    angle_correction = arguments.angle_corr
    param_centerline = ParamCenterline(algo_fitting=arguments.centerline_algo,
                                       smooth=arguments.centerline_smooth,
                                       minmax=True)
    path_qc = arguments.qc
    qc_dataset = arguments.qc_dataset
    qc_subject = arguments.qc_subject

    verbose = int(arguments.v)
    init_sct(log_level=verbose, update=True)  # Update log level

    # update fields
    metrics_agg = {}
    if not file_out:
        file_out = 'csa.csv'

    metrics, fit_results = process_seg.compute_shape(
        fname_segmentation,
        angle_correction=angle_correction,
        param_centerline=param_centerline,
        verbose=verbose)
    for key in metrics:
        if key == 'length':
            # For computing cord length, slice-wise length needs to be summed across slices
            metrics_agg[key] = aggregate_per_slice_or_level(
                metrics[key],
                slices=parse_num_list(slices),
                levels=parse_num_list(vert_levels),
                perslice=perslice,
                perlevel=perlevel,
                vert_level=fname_vert_levels,
                group_funcs=(('SUM', func_sum), ))
        else:
            # For other metrics, we compute the average and standard deviation across slices
            metrics_agg[key] = aggregate_per_slice_or_level(
                metrics[key],
                slices=parse_num_list(slices),
                levels=parse_num_list(vert_levels),
                perslice=perslice,
                perlevel=perlevel,
                vert_level=fname_vert_levels,
                group_funcs=group_funcs)
    metrics_agg_merged = merge_dict(metrics_agg)
    save_as_csv(metrics_agg_merged,
                file_out,
                fname_in=fname_segmentation,
                append=append)

    # QC report (only show CSA for clarity)
    if path_qc is not None:
        generate_qc(fname_segmentation,
                    args=args,
                    path_qc=os.path.abspath(path_qc),
                    dataset=qc_dataset,
                    subject=qc_subject,
                    path_img=_make_figure(metrics_agg_merged, fit_results),
                    process='sct_process_segmentation')

    sct.display_open(file_out)
        save_as_csv(agg_metric, fname_output, fname_in=fname_data, append=append)
        append = True  # when looping across labels, need to append results in the same file
    sct.display_open(fname_output)


if __name__ == "__main__":

    sct.init_sct()

    param_default = Param()

    parser = get_parser()
    arguments = parser.parse(sys.argv[1:])

    overwrite = 0
    fname_data = sct.get_absolute_path(arguments['-i'])
    path_label = arguments['-f']
    method = arguments['-method']
    fname_output = arguments['-o']
    if '-append' in arguments:
        append = int(arguments['-append'])
    else:
        append = 0
    if '-l' in arguments:
        labels_user = arguments['-l']
    else:
        labels_user = ''
    if '-param' in arguments:
        adv_param_user = arguments['-param']
    else:
        adv_param_user = ''
def main(args):
    parser = get_parser()
    arguments = parser.parse(args)
    param = Param()

    # Initialization
    slices = param.slices
    group_funcs = (('MEAN', func_wa), ('STD', func_std))  # functions to perform when aggregating metrics along S-I

    fname_segmentation = sct.get_absolute_path(arguments['-i'])
    fname_vert_levels = ''
    if '-o' in arguments:
        file_out = os.path.abspath(arguments['-o'])
    else:
        file_out = ''
    if '-append' in arguments:
        append = int(arguments['-append'])
    else:
        append = 0
    if '-vert' in arguments:
        vert_levels = arguments['-vert']
    else:
        vert_levels = ''
    if '-r' in arguments:
        remove_temp_files = arguments['-r']
    if '-vertfile' in arguments:
        fname_vert_levels = arguments['-vertfile']
    if '-perlevel' in arguments:
        perlevel = arguments['-perlevel']
    else:
        perlevel = Param().perlevel
    if '-z' in arguments:
        slices = arguments['-z']
    if '-perslice' in arguments:
        perslice = arguments['-perslice']
    else:
        perslice = Param().perslice
    if '-angle-corr' in arguments:
        if arguments['-angle-corr'] == '1':
            angle_correction = True
        elif arguments['-angle-corr'] == '0':
            angle_correction = False

    verbose = int(arguments.get('-v'))
    sct.init_sct(log_level=verbose, update=True)  # Update log level

    # update fields
    metrics_agg = {}
    if not file_out:
        file_out = 'csa.csv'

    metrics = process_seg.compute_shape(fname_segmentation,
                                        algo_fitting='bspline',
                                        angle_correction=angle_correction,
                                        verbose=verbose)
    for key in metrics:
        metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
                                                        levels=parse_num_list(vert_levels), perslice=perslice,
                                                        perlevel=perlevel, vert_level=fname_vert_levels,
                                                        group_funcs=group_funcs)
    metrics_agg_merged = _merge_dict(metrics_agg)
    save_as_csv(metrics_agg_merged, file_out, fname_in=fname_segmentation, append=append)
    sct.display_open(file_out)