def main():
    # Create the parser
    pipeline_description = textwrap.dedent('''
    Pipeline to perform a bias field correction on an input image
    or a list of input images.

    The pipeline uses the N4 algorithm, implemented within an Insight ToolKit framework.
    It takes a structural image as input, and optionally an input mask.

    The implementation comes from a contribution from Nicholas J. Tustison, James C. Gee
    in the Insight Journal paper: https://hdl.handle.net/10380/3053

    List of the binaries necessary for this pipeline:

    * FSL: fslmaths
    * niftyreg: reg_aladin, reg_resample
    * NifTK: niftkN4BiasCorrection

    ''')
    parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description=pipeline_description)
    """ Input images """
    parser.add_argument('-i', '--img', dest='input_img',
                        type=str, nargs='+',
                        metavar='input_img', 
                        help='Image file or list of input images',
                        required=True) 
    parser.add_argument('-m', '--mask', dest='input_mask',
                        type=str, nargs='+',
                        metavar='input_mask', 
                        help='Mask image or list of mask images (optional)', 
                        required=False)
    """ Output argument """
    parser.add_argument('-o', '--output_dir',
                        dest='output_dir',
                        type=str,
                        metavar='directory', 
                        help='Output directory containing the registration result\n' +
                        'Default is the current directory',
                        default=os.path.abspath('.'), 
                        required=False)
    parser.add_argument('--output_pre',
                        dest='output_pre', 
                        type=str,
                        metavar='prefix', 
                        help='Output result prefix',
                        default='', 
                        required=False)
    parser.add_argument('--output_suf',
                        dest='output_suf',
                        type=str,
                        metavar='suffix', 
                        help='Output result suffix',
                        default='', 
                        required=False)

    # Add default arguments in the parser
    default_parser_argument(parser)

    # Parse the input arguments
    args = parser.parse_args()

    # Check the parsed arguments
    if args.input_mask is not None:
        if not len(args.input_img) == len(args.input_mask):
            print('ERROR: The number of input and mask images are expected to be the same.')
            print(str(len(args.input_img))+' image(s) versus ' + str(len(args.input_mask)) + ' mask(s). Exit.')
            sys.exit(1)
    
    result_dir = os.path.abspath(args.output_dir)
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)

    workflow = create_n4_bias_correction_workflow([os.path.abspath(f) for f in args.input_img],
                                                  result_dir,
                                                  [os.path.abspath(f) for f in args.input_mask] if args.input_mask else None)

    # output the graph if required
    if args.graph is True:
        generate_graph(workflow=workflow)
        sys.exit(0)

    # Edit the qsub arguments based on the input arguments
    qsubargs_time = '05:00:00'
    qsubargs_mem = '2.9G'
    if args.use_qsub is True and args.openmp_core > 1:
        qsubargs_mem = str(max(0.95, 2.9/args.openmp_core)) + 'G'

    qsubargs = '-l s_stack=10240 -j y -b y -S /bin/csh -V'
    qsubargs = qsubargs + ' -l h_rt=' + qsubargs_time
    qsubargs = qsubargs + ' -l tmem=' + qsubargs_mem + ' -l h_vmem=' + qsubargs_mem + ' -l vf=' + qsubargs_mem

    run_workflow(workflow=workflow,
                 qsubargs=qsubargs,
                 parser=args)
          r.get_node('input_node'), 'in_bval_file')
r.connect(merge_initial_dwis, 'out_bvecs',
          r.get_node('input_node'), 'in_bvec_file')
if args.t1.lower().endswith('.zip'):
    zip_dir = os.path.join(result_dir, os.path.basename(args.t1)[:-4])
    if not os.path.exists(zip_dir):
        os.mkdir(zip_dir)
    os.system('unzip -d %s -j -o %s > /dev/null' % (zip_dir, args.t1))
    t1_unzipped = find_files(zip_dir, '.nii.gz')
    if not t1_unzipped:
        sys.stderr.write('ERROR: T1 files not found in the zip given.\n')
        sys.exit(1)
    t1 = t1_unzipped[0]
else:
    t1 = os.path.abspath(args.t1)
r.inputs.input_node.in_t1_file = t1
if args.t1_mask:
    r.inputs.input_node.in_t1_mask_file = os.path.abspath(args.t1_mask)
if do_susceptibility_correction:
    r.inputs.input_node.in_fm_magnitude_file = os.path.abspath(args.fieldmapmag)
    r.inputs.input_node.in_fm_phase_file = os.path.abspath(args.fieldmapphase)

if args.graph is True:
    generate_graph(workflow=r)
    sys.exit(0)

qsubargs = '-l h_rt=02:00:00 -l tmem=2.9G -l h_vmem=2.9G -l vf=2.9G \
-l s_stack=10240 -j y -b y -S /bin/csh -V'
run_workflow(workflow=r,
             qsubargs=qsubargs)
Ejemplo n.º 3
0
def main():

    help_message = "Help !"
    parser = argparse.ArgumentParser(description=help_message)
    # Input images
    parser.add_argument('-i',
                        '--input_img',
                        dest='input_img',
                        metavar='image',
                        nargs='+',
                        help='Input structural image(s)')
    parser.add_argument('-p',
                        '--input_par',
                        dest='input_par',
                        metavar='par',
                        help='Input parcellation image(s) from GIF',
                        nargs='+')
    parser.add_argument('-s',
                        '--subject_ids',
                        dest='subject_ids',
                        metavar='list',
                        help='list of the subject Ids',
                        nargs='+')
    parser.add_argument('-a',
                        '--ages',
                        type=float,
                        dest='ages',
                        metavar='list',
                        help='list of the subject ages',
                        nargs='+')

    # Other inputs
    parser.add_argument(
        '-l',
        '--label_val',
        dest='input_lab',
        metavar='val',
        type=int,
        nargs='+',
        help='Label index (indices) to consider for the refinement')
    parser.add_argument(
        '--rigid_it',
        type=int,
        dest='rigid_iteration',
        metavar='number',
        help='Number of iteration to perform for the rigid step (default is 3)',
        default=3)
    parser.add_argument(
        '--affine_it',
        type=int,
        dest='affine_iteration',
        metavar='number',
        help=
        'Number of iteration to perform for the affine step (default is 3)',
        default=3)
    parser.add_argument('-r',
                        '--reduc_rate',
                        type=float,
                        dest='reduct_rate',
                        metavar='number',
                        help='decimation rate for the mesh extraction method',
                        default=0.6)
    parser.add_argument('-xml_dkw',
                        type=int,
                        dest='xml_dkw',
                        metavar='number',
                        help='Diffeo Kernel width',
                        default=11)
    parser.add_argument('-xml_dkt',
                        dest='xml_dkt',
                        metavar='string',
                        help=' Diffeo Kernel type',
                        default="Exact")
    parser.add_argument('-xml_dtp',
                        type=int,
                        dest='xml_dtp',
                        metavar='number',
                        help='Diffeo: number of time points',
                        default=30)
    parser.add_argument('-xml_dsk',
                        type=float,
                        dest='xml_dsk',
                        metavar='number',
                        help='Diffeo: smoothing kernel width',
                        default=0.5)
    parser.add_argument('-xml_dcps',
                        type=int,
                        dest='xml_dcps',
                        metavar='number',
                        help='Diffeos: Initial spacing for Control Points',
                        default=5)
    parser.add_argument(
        '-xml_dcpp',
        dest='xml_dcpp',
        metavar='number',
        help="Diffeos: name of a file containing positions of control points. "
        +
        "In case of conflict with initial-cp-spacing, if a file name is given in "
        +
        "initial-cp-position and initial-cp-spacing is set, the latter is ignored and "
        + "control point positions in the file name are used.",
        default='x')
    parser.add_argument('-xml_dfcp',
                        dest='xml_dfcp',
                        metavar='On/Off',
                        help='Diffeos: Freeze the Control Points',
                        default="Off")
    parser.add_argument('-xml_dmi',
                        type=int,
                        dest='xml_dmi',
                        metavar='number',
                        help='Diffeos: Maximum of descent iterations',
                        default=100)
    parser.add_argument(
        '-xml_dat',
        type=float,
        dest='xml_dat',
        metavar='number',
        help='Diffeos: adaptative tolerence for the gradient descent',
        default=0.00005)
    parser.add_argument('-xml_dls',
                        type=int,
                        dest='xml_dls',
                        metavar='number',
                        help='Diffeos: Maximum line search iterations',
                        default=20)
    parser.add_argument(
        '-xml_ods',
        type=float,
        nargs='+',
        dest='xml_ods',
        metavar='number',
        help='Object: weight of the object in the fidelity-to-data term',
        default=[0.5])
    parser.add_argument('-xml_okw',
                        type=int,
                        nargs='+',
                        dest='xml_okw',
                        metavar='number',
                        help='Object: Kernel width',
                        default=[5])
    parser.add_argument('-xml_ot',
                        nargs='+',
                        dest='xml_ot',
                        metavar='number',
                        help='Object type',
                        default=["NonOrientedSurfaceMesh"])

    # Output directory
    parser.add_argument('-o',
                        '--output_dir',
                        dest='output_dir',
                        metavar='output_dir',
                        help='Output directory where to save the results',
                        required=True,
                        default='test_workflow')

    # Add default arguments in the parser
    default_parser_argument(parser)
    args = parser.parse_args()
    result_dir = os.path.abspath(args.output_dir)
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)
    input_img = [os.path.abspath(f) for f in args.input_img]
    input_par = [os.path.abspath(f) for f in args.input_par]
    # Create the workflow
    workflow = create_spatio_temporal_analysis(labels=args.input_lab,
                                               reduction_rate=args.reduct_rate,
                                               scan_number=len(input_img))
    workflow.base_dir = result_dir
    workflow.inputs.input_node.input_images = input_img
    workflow.inputs.input_node.input_parcellations = input_par
    workflow.inputs.input_node.input_ref = input_img[0]
    workflow.inputs.input_node.subject_ids = args.subject_ids
    workflow.inputs.input_node.ages = args.ages
    workflow.inputs.input_node.xml_dkw = args.xml_dkw
    workflow.inputs.input_node.xml_dkt = args.xml_dkt
    workflow.inputs.input_node.xml_dtp = args.xml_dtp
    workflow.inputs.input_node.xml_dsk = args.xml_dsk
    workflow.inputs.input_node.xml_dcps = args.xml_dcps
    workflow.inputs.input_node.xml_dcpp = args.xml_dcpp
    workflow.inputs.input_node.xml_dfcp = args.xml_dfcp
    workflow.inputs.input_node.xml_dmi = args.xml_dmi
    workflow.inputs.input_node.xml_dat = args.xml_dat
    workflow.inputs.input_node.xml_dls = args.xml_dls
    workflow.inputs.input_node.xml_ods = args.xml_ods
    workflow.inputs.input_node.xml_okw = args.xml_okw
    workflow.inputs.input_node.xml_ot = args.xml_ot

    # Edit the qsub arguments based on the input arguments
    qsubargs_time = '02:00:00'
    qsubargs_mem = '1.9G'
    if args.use_qsub is True and args.openmp_core > 1:
        qsubargs_mem = str(max(0.95, 1.9 / args.openmp_core)) + 'G'

    qsubargs = '-l s_stack=10240 -j y -b y -S /bin/csh -V'
    qsubargs = qsubargs + ' -l h_rt=' + qsubargs_time
    qsubargs = qsubargs + ' -l tmem=' + qsubargs_mem + ' -l h_vmem=' + qsubargs_mem + ' -l vf=' + qsubargs_mem

    # Create a data sink
    ds = pe.Node(nio.DataSink(parameterization=False), name='data_sink')

    workflow.connect([
        (workflow.get_node('output_node'), ds, [('extracted_meshes',
                                                 '@extracted_meshes')]),
        (workflow.get_node('output_node'), ds, [('param_diffeo_file',
                                                 '@param_diffeo_file')]),
        (workflow.get_node('output_node'), ds, [('param_object_file',
                                                 '@param_object_file')]),
        (workflow.get_node('output_node'), ds, [('out_AgeToOnsetNorm_file',
                                                 '@out_AgeToOnsetNorm_file')]),
        (workflow.get_node('output_node'), ds, [('out_centroid_mat_file',
                                                 '@out_centroid_mat_file')]),
        (workflow.get_node('output_node'), ds, [('out_init_shape_vtk_file',
                                                 '@out_init_shape_vtk_file')]),
        (workflow.get_node('output_node'), ds,
         [('out_vertices_centroid_file', '@out_vertices_centroid_file')]),
        (workflow.get_node('output_node'), ds, [('transported_res_mom',
                                                 '@transported_res_mom')]),
        (workflow.get_node('output_node'), ds, [('transported_res_vect',
                                                 '@transported_res_vect')]),
        (workflow.get_node('output_node'), ds, [('out_file_CP', '@out_file_CP')
                                                ]),
        (workflow.get_node('output_node'), ds, [('out_file_MOM',
                                                 '@out_file_MOM')]),
    ])

    if args.graph is True:
        generate_graph(workflow=workflow)
        sys.exit(0)

    run_workflow(workflow=workflow, qsubargs=qsubargs, parser=args)
    os.path.abspath(args.input_img),
    os.path.abspath(args.input_coeff),
    result_dir,
    offsets=[args.offset_x, args.offset_y, args.offset_z],
    scanner=args.scanner,
    radius=args.radius,
    interp=args.inter,
    throughplaneonly=args.throughplaneonly,
    inplaneonly=args.inplaneonly)

workflow.base_dir = result_dir
"""
output the graph if required
"""
if args.graph is True:
    generate_graph(workflow=workflow)
    sys.exit(0)
"""
Edit the qsub arguments based on the input arguments
"""
qsubargs_time = '01:00:00'
qsubargs_mem = '2.9G'
if args.use_qsub is True and args.openmp_core > 1:
    qsubargs_mem = str(max(0.95, 2.9 / args.openmp_core)) + 'G'

qsubargs = '-l s_stack=10240 -j y -b y -S /bin/csh -V'
qsubargs = qsubargs + ' -l h_rt=' + qsubargs_time
qsubargs = qsubargs + ' -l tmem=' + qsubargs_mem + ' -l h_vmem=' + qsubargs_mem + ' -l vf=' + qsubargs_mem

run_workflow(workflow=workflow, qsubargs=qsubargs, parser=args)
def main():

    help_message = "perform used for shape analysis of hippocampi in epilepsia"
    parser = argparse.ArgumentParser(description=help_message)
    # Input images
    parser.add_argument('-i',
                        '--input_img',
                        dest='input_img',
                        metavar='image',
                        nargs='+',
                        help='Input structural image(s)')
    parser.add_argument('-fseg',
                        '--flip_seg',
                        dest='flip_seg',
                        metavar='par',
                        help='Input segmention image(s) for right side',
                        nargs='+')
    parser.add_argument('-nfseg',
                        '--no_flip_seg',
                        dest='no_flip_seg',
                        metavar='par',
                        help='Input segmention image(s) for left side',
                        nargs='+')
    parser.add_argument('-s',
                        '--subject_ids',
                        dest='subject_ids',
                        metavar='list',
                        help='list of the subject Ids',
                        nargs='+')
    parser.add_argument('-fi',
                        '--right_ep_id',
                        dest='flip_id',
                        metavar='list',
                        help='list of the subject which have to be flipped',
                        nargs='+')
    parser.add_argument(
        '-nfi',
        '--left_ep_id',
        dest='no_flip_id',
        metavar='list',
        help='list of the subject which doent need to be flipped',
        nargs='+')

    # Other inputs
    parser.add_argument(
        '-l',
        '--label_val',
        dest='input_lab',
        metavar='val',
        type=int,
        nargs='+',
        help='Label index (indices) to consider for the refinement')
    parser.add_argument(
        '--rigid_it',
        type=int,
        dest='rigid_iteration',
        metavar='number',
        help='Number of iteration to perform for the rigid step (default is 3)',
        default=3)
    parser.add_argument(
        '--affine_it',
        type=int,
        dest='affine_iteration',
        metavar='number',
        help=
        'Number of iteration to perform for the affine step (default is 3)',
        default=3)
    parser.add_argument('-r',
                        '--reduc_rate',
                        type=float,
                        dest='reduct_rate',
                        metavar='number',
                        help='decimation rate for the mesh extraction method',
                        default=0.1)
    parser.add_argument('-xml_dkw',
                        type=int,
                        dest='xml_dkw',
                        metavar='number',
                        help='Diffeo Kernel width',
                        default=11)
    parser.add_argument('-xml_dkt',
                        dest='xml_dkt',
                        metavar='string',
                        help=' Diffeo Kernel type',
                        default="Exact")
    parser.add_argument('-xml_dtp',
                        type=int,
                        dest='xml_dtp',
                        metavar='number',
                        help='Diffeo: number of time points',
                        default=30)
    parser.add_argument('-xml_dsk',
                        type=float,
                        dest='xml_dsk',
                        metavar='number',
                        help='Diffeo: smoothing kernel width',
                        default=0.5)
    parser.add_argument('-xml_dcps',
                        type=int,
                        dest='xml_dcps',
                        metavar='number',
                        help='Diffeos: Initial spacing for Control Points',
                        default=5)
    parser.add_argument(
        '-xml_dcpp',
        dest='xml_dcpp',
        metavar='number',
        help="Diffeos: name of a file containing positions of control points. "
        +
        "In case of conflict with initial-cp-spacing, if a file name is given in "
        +
        "initial-cp-position and initial-cp-spacing is set, the latter is ignored and "
        + "control point positions in the file name are used.",
        default='x')
    parser.add_argument('-xml_dfcp',
                        dest='xml_dfcp',
                        metavar='On/Off',
                        help='Diffeos: Freeze the Control Points',
                        default="Off")
    parser.add_argument('-xml_dmi',
                        type=int,
                        dest='xml_dmi',
                        metavar='number',
                        help='Diffeos: Maximum of descent iterations',
                        default=100)
    parser.add_argument(
        '-xml_dat',
        type=float,
        dest='xml_dat',
        metavar='number',
        help='Diffeos: adaptative tolerence for the gradient descent',
        default=0.00005)
    parser.add_argument('-xml_dls',
                        type=int,
                        dest='xml_dls',
                        metavar='number',
                        help='Diffeos: Maximum line search iterations',
                        default=20)
    parser.add_argument(
        '-xml_ods',
        type=float,
        nargs='+',
        dest='xml_ods',
        metavar='number',
        help='Object: weight of the object in the fidelity-to-data term',
        default=[0.5])
    parser.add_argument('-xml_okw',
                        type=int,
                        nargs='+',
                        dest='xml_okw',
                        metavar='number',
                        help='Object: Kernel width',
                        default=[5])
    parser.add_argument('-xml_ot',
                        nargs='+',
                        dest='xml_ot',
                        metavar='number',
                        help='Object type',
                        default=["NonOrientedSurfaceMesh"])

    # Output directory
    parser.add_argument('-o',
                        '--output_dir',
                        dest='output_dir',
                        metavar='output_dir',
                        help='Output directory where to save the results',
                        required=True,
                        default='test_workflow')

    # Add default arguments in the parser
    default_parser_argument(parser)
    args = parser.parse_args()
    result_dir = os.path.abspath(args.output_dir)
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)
    input_img = [os.path.abspath(f) for f in args.input_img]
    flip_seg = [os.path.abspath(f) for f in args.flip_seg]
    no_flip_seg = [os.path.abspath(f) for f in args.no_flip_seg]
    print "nb segmentation to flip: " + str(len(args.flip_id))
    print "nb segmentation to not flip: " + str(len(args.no_flip_id))
    labels = [[args.input_lab[0]]] * (len(args.flip_id) + len(args.no_flip_id))
    # Not relevent when flipping the controls:
    # if len(args.flip_id)+len(args.no_flip_id)!=len(args.subject_ids):
    #     raise IOError("to be flipped ID and other ID should be the same size as the whole ID list. " \
    #                   "len(args.flip_id) = "+str(len(args.flip_id))+ " " \
    #                   "len(args.no_flip_id) = "+str(len(args.no_flip_id)) +" " \
    #                   "len(args.subject_ids) = "+str(len(args.subject_ids)))
    print labels
    # Create the workflow
    workflow = create_preprocessing_shape_analysis_epilepsy_flipping(
        labels=labels,
        reduction_rate=args.reduct_rate,
        scan_number=len(input_img))
    workflow.base_dir = result_dir
    workflow.inputs.input_node.input_images = input_img
    workflow.inputs.input_node.input_ref = input_img[0]
    workflow.inputs.input_node.flip_id = args.flip_id
    workflow.inputs.input_node.no_flip_id = args.no_flip_id
    workflow.inputs.input_node.flip_seg = flip_seg
    workflow.inputs.input_node.no_flip_seg = no_flip_seg
    workflow.inputs.input_node.subject_ids = args.subject_ids
    # workflow.inputs.input_node.ages = args.ages
    # workflow.inputs.input_node.xml_dkw = args.xml_dkw
    # workflow.inputs.input_node.xml_dkt = args.xml_dkt
    # workflow.inputs.input_node.xml_dtp = args.xml_dtp
    # workflow.inputs.input_node.xml_dsk = args.xml_dsk
    # workflow.inputs.input_node.xml_dcps = args.xml_dcps
    # workflow.inputs.input_node.xml_dcpp = args.xml_dcpp
    # workflow.inputs.input_node.xml_dfcp = args.xml_dfcp
    # workflow.inputs.input_node.xml_dmi = args.xml_dmi
    # workflow.inputs.input_node.xml_dat = args.xml_dat
    # workflow.inputs.input_node.xml_dls = args.xml_dls
    # workflow.inputs.input_node.xml_ods = args.xml_ods
    # workflow.inputs.input_node.xml_okw = args.xml_okw
    # workflow.inputs.input_node.xml_ot = args.xml_ot

    # Edit the qsub arguments based on the input arguments
    qsubargs_time = '02:00:00'
    qsubargs_mem = '1.9G'
    if args.use_qsub is True and args.openmp_core > 1:
        qsubargs_mem = str(max(0.95, 1.9 / args.openmp_core)) + 'G'

    qsubargs = '-l s_stack=10240 -j y -b y -S /bin/csh -V'
    qsubargs = qsubargs + ' -l h_rt=' + qsubargs_time
    qsubargs = qsubargs + ' -l tmem=' + qsubargs_mem + ' -l h_vmem=' + qsubargs_mem + ' -l vf=' + qsubargs_mem

    # Create a data sink
    ds = pe.Node(nio.DataSink(parameterization=False), name='data_sink')

    workflow.connect([
        (workflow.get_node('output_node'), ds, [('extracted_meshes',
                                                 '@extracted_meshes')]),
    ])

    if args.graph is True:
        generate_graph(workflow=workflow)
        sys.exit(0)

    run_workflow(workflow=workflow, qsubargs=qsubargs, parser=args)
def main():

    print 'perform_spatiotemporal_shape_longitudinal.py  -i /Users/clairec/DataAndResults/examples/longitudinal/images/imageS1_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS1_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS2_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS2_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS3_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS3_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS4_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS4_2.nii.gz -p /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS1_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS1_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS2_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS2_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS3_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS3_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS4_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS4_2.nii.gz -l 59 -s S1 S1 S2 S2 S3 S3 S4 S4 -a 0 1 1 2 2 3 3 4 --nb_fup 2 2 2 2 -o test_longitudinal_11nov'
    # perform_spatiotemporal_shape_longitudinal.py  -i /Users/clairec/DataAndResults/examples/longitudinal/images/imageS1_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS1_1b.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS1_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS2_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS2_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS3_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS3_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS4_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/images/imageS4_2.nii.gz -p /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS1_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS1_1b.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS1_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS2_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS2_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS3_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS3_2.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS4_1.nii.gz /Users/clairec/DataAndResults/examples/longitudinal/parcellations/parcellationS4_2.nii.gz -l 59 -s S1 S1 S1 S2 S2 S3 S3 S4 S4 -a 0 0.5 1 1 2 2 3 3 4 --nb_fup 3 2 2 2 -o test_longitudinal_17feb
    help_message = "Help !"
    parser = argparse.ArgumentParser(description=help_message)
    # Input images
    parser.add_argument(
        '-i',
        '--input_img',
        dest='input_img',
        metavar='image',
        nargs='+',
        help=
        'Input structural image(s). S1_BS S1_tp1 S1_tp2 ... S2_BS S2_tp1 S2_tp2 ...'
    )
    parser.add_argument('-p',
                        '--input_par',
                        dest='input_par',
                        metavar='par',
                        help='Input parcellation image(s) from GIF',
                        nargs='+')
    parser.add_argument('-s',
                        '--subject_ids',
                        dest='subject_ids',
                        metavar='list',
                        help='list of the subject Ids',
                        nargs='+')
    parser.add_argument('-a',
                        '--ages',
                        type=float,
                        dest='ages',
                        metavar='list',
                        help='list of the subject ages',
                        nargs='+')
    parser.add_argument(
        '--nb_fup',
        type=int,
        dest='nb_followup',
        metavar='list',
        help='list of numbers of follow up (including the baseline) ' +
        'per subject (including the baseline. if 1 follow up, put 2',
        nargs='+')

    # Other inputs
    parser.add_argument(
        '-l',
        '--label_val',
        dest='input_lab',
        metavar='val',
        type=int,
        nargs='+',
        help='Label index (indices) to consider for the refinement')
    parser.add_argument(
        '--rigid_it',
        type=int,
        dest='rigid_iteration',
        metavar='number',
        help='Number of iteration to perform for the rigid step (default is 3)',
        default=3)
    parser.add_argument(
        '--affine_it',
        type=int,
        dest='affine_iteration',
        metavar='number',
        help=
        'Number of iteration to perform for the affine step (default is 3)',
        default=3)
    parser.add_argument('-r',
                        '--reduc_rate',
                        type=float,
                        dest='reduct_rate',
                        metavar='number',
                        help='decimation rate for the mesh extraction method',
                        default=0.2)
    parser.add_argument('-xml_dkw',
                        type=int,
                        dest='xml_dkw',
                        metavar='number',
                        help='Diffeo Kernel width',
                        default=10)
    parser.add_argument('-xml_dkt',
                        dest='xml_dkt',
                        metavar='string',
                        help=' Diffeo Kernel type',
                        default="Exact")
    parser.add_argument('-xml_dtp',
                        type=int,
                        dest='xml_dtp',
                        metavar='number',
                        help='Diffeo: number of time points',
                        default=30)
    parser.add_argument('-xml_dsk',
                        type=float,
                        dest='xml_dsk',
                        metavar='number',
                        help='Diffeo: smoothing kernel width',
                        default=0.5)
    parser.add_argument('-xml_dcps',
                        type=int,
                        dest='xml_dcps',
                        metavar='number',
                        help='Diffeos: Initial spacing for Control Points',
                        default=5)
    parser.add_argument(
        '-xml_dcpp',
        dest='xml_dcpp',
        metavar='number',
        help="Diffeos: name of a file containing positions of control points. "
        +
        "In case of conflict with initial-cp-spacing, if a file name is given in "
        +
        "initial-cp-position and initial-cp-spacing is set, the latter is ignored and "
        + "control point positions in the file name are used.",
        default='x')
    parser.add_argument('-xml_dfcp',
                        dest='xml_dfcp',
                        metavar='On/Off',
                        help='Diffeos: Freeze the Control Points',
                        default="Off")
    parser.add_argument('-xml_dmi',
                        type=int,
                        dest='xml_dmi',
                        metavar='number',
                        help='Diffeos: Maximum of descent iterations',
                        default=5)
    parser.add_argument(
        '-xml_dat',
        type=float,
        dest='xml_dat',
        metavar='number',
        help='Diffeos: adaptative tolerence for the gradient descent',
        default=0.00005)
    parser.add_argument('-xml_dls',
                        type=int,
                        dest='xml_dls',
                        metavar='number',
                        help='Diffeos: Maximum line search iterations',
                        default=20)
    parser.add_argument(
        '-xml_ods',
        type=float,
        nargs='+',
        dest='xml_ods',
        metavar='number',
        help='Object: weight of the object in the fidelity-to-data term',
        default=[0.5])
    parser.add_argument('-xml_okw',
                        type=int,
                        nargs='+',
                        dest='xml_okw',
                        metavar='number',
                        help='Object: Kernel width',
                        default=[4])
    parser.add_argument('-xml_ot',
                        nargs='+',
                        dest='xml_ot',
                        metavar='number',
                        help='Object type',
                        default=["NonOrientedSurfaceMesh"])

    # Output directory
    parser.add_argument('-o',
                        '--output_dir',
                        dest='output_dir',
                        metavar='output_dir',
                        help='Output directory where to save the results',
                        required=True,
                        default='test_workflow')

    # Add default arguments in the parser
    default_parser_argument(parser)
    args = parser.parse_args()
    result_dir = os.path.abspath(args.output_dir)
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)
    input_img = [os.path.abspath(f) for f in args.input_img]
    input_par = [os.path.abspath(f) for f in args.input_par]

    nb_fup = args.nb_followup

    # extract baselines
    subjects_ids = args.subject_ids
    ages = args.ages
    img_baselines = []
    par_baselines = []
    time_baselines = []
    time_followups = []
    subject_ids_unique = []
    k = 0
    index = 0
    while index < len(subjects_ids):
        print str(index) + " " + str(len(subjects_ids))
        print str(k) + " " + str(len(nb_fup))
        print "nb of follow up for subject " + subjects_ids[
            index] + " is : " + str(nb_fup[k])
        for f in range(nb_fup[k]):
            img_baselines.append(input_img[index])
            par_baselines.append(input_par[index])
            time_baselines.append(ages[index])
            subject_ids_unique.append(subjects_ids[index])
            index += 1
        if index == len(ages):
            break
        time_followups.append(ages[index])
        k += 1
        if k == len(nb_fup):
            break

    # Create the workflow
    workflow = create_spatio_temporal_longitudinal_analysis(
        labels=args.input_lab,
        nb_followup=nb_fup,
        scan_number=len(input_img),
    )
    workflow.base_dir = result_dir
    workflow.inputs.input_node.input_images = input_img
    workflow.inputs.input_node.input_parcellations = input_par
    workflow.inputs.input_node.input_ref = input_img[0]
    workflow.inputs.input_node.subject_ids = args.subject_ids
    workflow.inputs.input_node.ages = args.ages
    workflow.inputs.input_node.xml_dkw = args.xml_dkw
    workflow.inputs.input_node.xml_dkt = args.xml_dkt
    workflow.inputs.input_node.xml_dtp = args.xml_dtp
    workflow.inputs.input_node.xml_dsk = args.xml_dsk
    workflow.inputs.input_node.xml_dcps = args.xml_dcps
    workflow.inputs.input_node.xml_dcpp = args.xml_dcpp
    workflow.inputs.input_node.xml_dfcp = args.xml_dfcp
    workflow.inputs.input_node.xml_dmi = args.xml_dmi
    workflow.inputs.input_node.xml_dat = args.xml_dat
    workflow.inputs.input_node.xml_dls = args.xml_dls
    workflow.inputs.input_node.xml_ods = args.xml_ods
    workflow.inputs.input_node.xml_okw = args.xml_okw
    workflow.inputs.input_node.xml_ot = args.xml_ot

    # Edit the qsub arguments based on the input arguments
    qsubargs_time = '02:00:00'
    qsubargs_mem = '1.9G'
    if args.use_qsub is True and args.openmp_core > 1:
        qsubargs_mem = str(max(0.95, 1.9 / args.openmp_core)) + 'G'

    qsubargs = '-l s_stack=10240 -j y -b y -S /bin/csh -V'
    qsubargs = qsubargs + ' -l h_rt=' + qsubargs_time
    qsubargs = qsubargs + ' -l tmem=' + qsubargs_mem + ' -l h_vmem=' + qsubargs_mem + ' -l vf=' + qsubargs_mem

    # Create a data sink
    ds = pe.Node(nio.DataSink(parameterization=False,
                              base_directory=result_dir),
                 name='data_sink')

    workflow.connect([
        (workflow.get_node('output_node'), ds, [('extracted_meshes',
                                                 '@extracted_meshes')]),
        (workflow.get_node('output_node'), ds, [('xml_diffeo_global',
                                                 '@xml_diffeo_global')]),
        (workflow.get_node('output_node'), ds, [('xml_object_global',
                                                 '@xml_object_global')]),
        (workflow.get_node('output_node'), ds, [('b0_ageNorm_file',
                                                 '@b0_ageNorm_file')]),
        (workflow.get_node('output_node'), ds, [('centroid_b0_vtk_file',
                                                 '@centroid_b0_vtk_file')]),
        (workflow.get_node('output_node'), ds, [('xmlDiffeo_indiv',
                                                 '@xmlDiffeo_indiv')]),
        (workflow.get_node('output_node'), ds, [('CP_file_global',
                                                 '@CP_file_global')]),
        (workflow.get_node('output_node'), ds, [('MOM_file_global',
                                                 '@MOM_file_global')]),
        (workflow.get_node('output_node'), ds, [('struct_mat', '@struct_mat')
                                                ]),
        (workflow.get_node('output_node'), ds, [('global_traj_files_vtk',
                                                 '@global_traj_files_vtk')]),
        (workflow.get_node('output_node'), ds, [('transported_res_mom',
                                                 '@transported_res_mom')]),
        (workflow.get_node('output_node'), ds, [('transported_res_vect',
                                                 '@transported_res_vect')]),
    ])

    if args.graph is True:
        generate_graph(workflow=workflow)
        sys.exit(0)

    run_workflow(workflow=workflow, qsubargs=qsubargs, parser=args)
def main():
    """
    Create the help message
    """

    pipeline_description = textwrap.dedent('''
    Pipeline to run the complte freesurfer pipeline on structural images
    ''')
    """
    Create the parser
    """

    parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
                            description=pipeline_description)

    # Input images
    parser.add_argument('--input_t1w',
                        dest='input_t1w',
                        metavar='FILE',
                        help='List of T1w Nifti file(s) to process',
                        nargs='+',
                        required=True)
    parser.add_argument('--input_t2w',
                        dest='input_t2w',
                        metavar='FILE',
                        help='Optional list of T2w Nifti file(s) to process',
                        nargs='+')
    parser.add_argument('--input_sid',
                        dest='input_sid',
                        metavar='FILE',
                        help='Optional list subject ID',
                        nargs='+')

    # Output directory
    parser.add_argument('-o',
                        '--output_dir',
                        dest='output_dir',
                        metavar='DIR',
                        help='Output directory where to save the results',
                        default=os.getcwd())
    """
    Add default arguments in the parser
    """

    default_parser_argument(parser)
    """
    Parse the input arguments
    """

    args = parser.parse_args()
    """
    Create the output folder if it does not exists
    """

    result_dir = os.path.abspath(args.output_dir)
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)
    """
    Check the length of the input lists
    """
    if args.input_t2w is not None:
        if len(args.input_t1w) is not len(args.input_t2w):
            raise Exception('The numbers of T1w and T2w files differ')
    if args.input_sid is not None:
        if len(args.input_t1w) is not len(args.input_sid):
            raise Exception('The numbers of T1w files and subject ID differ')
    """
    Create the workflow that generates a cross sectional groupwise and extract diffusion features subject-wise
    """
    workflow = pe.Workflow(name='freesurfer')
    workflow.base_output_dir = 'freesurfer'
    input_node = pe.Node(interface=niu.IdentityInterface(
        fields=['T1_files', 'T2_files', 'subject_id']),
                         name='input_node')

    input_node.inputs.T1_files = [os.path.abspath(f) for f in args.input_t1w]
    if args.input_t2w is not None:
        input_node.inputs.T2_files = [
            os.path.abspath(f) for f in args.input_t2w
        ]
    if args.input_sid is not None:
        input_node.inputs.subject_id = args.input_sid
    recon = None
    if args.input_t2w is not None and args.input_sid is not None:
        recon = pe.MapNode(interface=ReconAll(),
                           iterfield=['T1_files', 'T2_file', 'subject_id'],
                           name='recon')
        workflow.connect(input_node, 'T2_files', recon, 'T2_file')
        workflow.connect(input_node, 'subject_id', recon, 'subject_id')
        recon.inputs.use_T2 = True
    elif args.input_t2w is not None:
        recon = pe.MapNode(interface=ReconAll(),
                           iterfield=['T1_files', 'T2_file'],
                           name='recon')
        workflow.connect(input_node, 'T2_files', recon, 'T2_file')
        recon.inputs.use_T2 = True
    elif args.input_sid is not None:
        recon = pe.MapNode(interface=ReconAll(),
                           iterfield=['T1_files', 'subject_id'],
                           name='recon')
        workflow.connect(input_node, 'subject_id', recon, 'subject_id')
    workflow.connect(input_node, 'T1_files', recon, 'T1_files')
    recon.inputs.subjects_dir = result_dir
    recon.inputs.openmp = args.openmp_core
    """
    output the graph if required
    """

    if args.graph is True:
        generate_graph(workflow=workflow)
        sys.exit(0)
    """
    Edit the qsub arguments based on the input arguments
    """

    qsubargs_time = '48:00:00'
    qsubargs_mem = '5.9G'
    if args.use_qsub is True and args.openmp_core > 1:
        qsubargs_mem = str(max(0.95, 5.9 / args.openmp_core)) + 'G'

    qsubargs = '-l s_stack=10240 -j y -b y -S /bin/csh -V'
    qsubargs = qsubargs + ' -l h_rt=' + qsubargs_time
    qsubargs = qsubargs + ' -l tmem=' + qsubargs_mem + ' -l h_vmem=' + qsubargs_mem + ' -l vf=' + qsubargs_mem
    """
    Run the workflow
    """

    run_workflow(workflow=workflow, qsubargs=qsubargs, parser=args)
Ejemplo n.º 8
0
def main():
    """
    Create the help messahe
    """

    pipeline_description = textwrap.dedent('''
    Pipeline to perform a groupwise between tensor encoded files and extract subject level regional feature
    extraction.

    It uses the DTITK framework (http://dti-tk.sourceforge.net/) to perform rigid/affine/non-linear registration
    between tensor images, using the deviatoric part of the tensor as similarity measure.

    At a later stage, it uses the MNI JHU FA template in order to extract subject specific diffusion features
    (FA-MD-AD-RD) in White matter regions of interest. See the following citation for details about the atlas:
    Oishi, Kenichi, et al. MRI atlas of human white matter. Academic Press, 2010.

    List of the binaries necessary for this pipeline:

    * FSL: fslmaths, fslsplit
    * niftyreg: reg_aladin, reg_f3d, reg_resample
    * dtitk: [all]
    ''')
    """
    Create the parser
    """

    parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
                            description=pipeline_description)

    # Input images
    parser.add_argument(
        '--input_img',
        dest='input_img',
        metavar='FILE',
        help='List of Nifti file(s) to include in the processing',
        nargs='+',
        required=True)

    # Other inputs
    parser.add_argument(
        '--rigid_it',
        type=int,
        dest='rigid_iteration',
        metavar='INT',
        help='Number of iteration to perform for the rigid step (default is 3)',
        default=3)
    parser.add_argument(
        '--affine_it',
        type=int,
        dest='affine_iteration',
        metavar='INT',
        help=
        'Number of iteration to perform for the affine step (default is 3)',
        default=3)
    parser.add_argument(
        '--nonrigid_it',
        type=int,
        dest='nonrigid_iteration',
        metavar='INT',
        help=
        'Number of iteration to perform for the nonrigid step (default is 6)',
        default=6)
    parser.add_argument(
        '--biomarkers',
        nargs='+',
        metavar='STRING',
        dest='biomarkers',
        default=['fa', 'tr', 'ad', 'rd'],
        help='Optional: indicate what biomarkers you want extracted, ' +
        'The choices are fa tr ad rd. Indicate as a list separated ' +
        'with space. e.g. --biomarkers fa md')
    # Output directory
    parser.add_argument('-o',
                        '--output_dir',
                        dest='output_dir',
                        metavar='DIR',
                        help='Output directory where to save the results',
                        default=os.getcwd())
    """
    Add default arguments in the parser
    """

    default_parser_argument(parser)
    """
    Parse the input arguments
    """

    args = parser.parse_args()
    """
    Create the output folder if it does not exists
    """

    result_dir = os.path.abspath(args.output_dir)
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)
    """
    Create the workflow that generates a cross sectional groupwise and extract diffusion features subject-wise
    """

    workflow = create_tensor_groupwise_and_feature_extraction_workflow(
        [os.path.abspath(f) for f in args.input_img],
        result_dir,
        rig_iteration=args.rigid_iteration,
        aff_iteration=args.affine_iteration,
        nrr_iteration=args.nonrigid_iteration,
        biomarkers=args.biomarkers)
    """
    output the graph if required
    """

    if args.graph is True:
        generate_graph(workflow=workflow)
        sys.exit(0)
    """
    Edit the qsub arguments based on the input arguments
    """

    qsubargs_time = '01:00:00'
    qsubargs_mem = '1.9G'
    if args.use_qsub is True and args.openmp_core > 1:
        qsubargs_mem = str(max(0.95, 1.9 / args.openmp_core)) + 'G'

    qsubargs = '-l s_stack=10240 -j y -b y -S /bin/csh -V'
    qsubargs = qsubargs + ' -l h_rt=' + qsubargs_time
    qsubargs = qsubargs + ' -l tmem=' + qsubargs_mem + ' -l h_vmem=' + qsubargs_mem + ' -l vf=' + qsubargs_mem
    """
    Run the workflow
    """

    run_workflow(workflow=workflow, qsubargs=qsubargs, parser=args)
Ejemplo n.º 9
0
        '/Users/clairec/DataAndResults/Nipype_tests/test_symmetric_11Avril/spatio_temporal_analysis/gw_binary_to_meshes/extract_mesh/mapflow/_extract_mesh13/parcellationS3_2_swapDim_axe_z_merged_60_maths_maths_res_mesh.vtk'
    ],
    [
        '/Users/clairec/DataAndResults/Nipype_tests/test_symmetric_11Avril/spatio_temporal_analysis/gw_binary_to_meshes/extract_mesh/mapflow/_extract_mesh6/parcellationS4_1_merged_59_maths_maths_res_mesh.vtk',
        '/Users/clairec/DataAndResults/Nipype_tests/test_symmetric_11Avril/spatio_temporal_analysis/gw_binary_to_meshes/extract_mesh/mapflow/_extract_mesh14/parcellationS4_1_swapDim_axe_z_merged_60_maths_maths_res_mesh.vtk'
    ],
    [
        '/Users/clairec/DataAndResults/Nipype_tests/test_symmetric_11Avril/spatio_temporal_analysis/gw_binary_to_meshes/extract_mesh/mapflow/_extract_mesh7/parcellationS4_2_merged_59_maths_maths_res_mesh.vtk',
        '/Users/clairec/DataAndResults/Nipype_tests/test_symmetric_11Avril/spatio_temporal_analysis/gw_binary_to_meshes/extract_mesh/mapflow/_extract_mesh15/parcellationS4_2_swapDim_axe_z_merged_60_maths_maths_res_mesh.vtk'
    ]
]

node_wf = atlas_computation(map_node_use=True, name='wf_as_node')
workflow.connect(input_node, 'input_vtk_meshes', node_wf,
                 'input_node.input_vtk_meshes')
workflow.connect(input_node, 'subject_ids', node_wf, 'input_node.subject_ids')
workflow.connect(input_node, 'subject_ids_2', node_wf,
                 'input_node.subject_ids_2')

# convertVTK2txt = pe.Node(interface=VTKPolyDataReader(),
#                          name='convertVTK2txt')
# workflow.connect(input_node, 'in_struct', convertVTK2txt, 'in_struct')
# workflow.connect(input_node, 'in_filenames', convertVTK2txt, 'in_filenames')

import logging
logger = logging.getLogger('workflow')
logger.setLevel(logging.DEBUG)
run_workflow(workflow=workflow, qsubargs=None, parser=None)

print "the workflow finished to run"
Ejemplo n.º 10
0
def main():

    """
    Main function call
    """

    pipeline_description = textwrap.dedent('''

    Cross-section Tract-Based Spatial Statistics (TBSS) pipeline as described in http://10.1371/journal.pone.0045996

    The required input are the tensor images. If the design matrix and contrast file are provided, the permutation
    test is performed, otherwise not.

    The input tensor images can be specified using the --input_img argument as a list.

    The design matrix (--mat) and contrast (--con) can be generated as described on
    this page: http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/GLM with the Glm_gui executable.
    Note that the file order has to match the design matrix.

    The user also need to specify an output directory (--output_dir) where all the
    result files will be saved.

    Using the --g argument you can generate the pipeline graph without running the
    actual pipeline.

    List of the binaries necessary for this pipeline:

    * FSL: fslmaths, fslsplit
    * niftyreg: reg_aladin, reg_f3d, reg_resample
    * dtitk: [all]
    ''')

    """
    Create the arguments parser
    """

    parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description=pipeline_description)

    # Input
    parser.add_argument('--input_img',
                        dest='input_img',
                        metavar='input_img',
                        help='List of Nifti file(s) to include in the analysis',
                        nargs='+',
                        required=True)
    parser.add_argument('--mat',
                        dest='design_mat',
                        metavar='design_mat',
                        help='Design matrix file to be used by randomised on the skeletonised FA maps.' +
                             ' Required to run the statistical analysis')
    parser.add_argument('--con',
                        dest='design_con',
                        metavar='design_con',
                        help='Design contrast file to be used by randomised on the skeletonised FA maps.' +
                             ' Required to run the statistical analysis')
    parser.add_argument('--skeleton_thr',
                        type=float,
                        dest='skeleton_threshold',
                        metavar='thr',
                        help='Threshold value to use to binarise the TBSS skeleton (default is 0.2)',
                        default=0.2)

    # Output directory
    parser.add_argument('-o', '--output_dir',
                        dest='output_dir',
                        metavar='output_dir',
                        help='Output directory where to save the results',
                        default=os.getcwd(),
                        required=False)

    # Others
    parser.add_argument('--no_randomise',
                        dest='run_randomise',
                        help='Do not perform the randomise test on the skeletonised FA maps (permutation test)',
                        action='store_false',
                        default=True)

    """
    Add default arguments in the parser
    """

    default_parser_argument(parser)

    """
    Parse the input arguments
    """

    args = parser.parse_args()

    """
    Read the input images
    """

    input_images = [os.path.abspath(f) for f in args.input_img]

    """
    Read the design files
    """

    if args.run_randomise is True:
        # Assign the design matrix variable
        if args.design_mat is None:
            print('No design matrix has been specified. Exit')
            sys.exit()
        # Assign the design contrast variable
        design_matrix = os.path.abspath(args.design_mat)
        if args.design_con is None:
            print('No design contrast has been specified. Exit')
            sys.exit()

        # Check that the number of file agrees with the design matrix and contrast
        with open(design_matrix, 'r') as f:
            for line in f:
                if '/NumPoints' in line:
                    if int(line.split()[1]) is not len(input_images):
                        print('Incompatible image number and design matrix file')
                        print(line.split()[1]+' images are expected and '+str(len(input_images))+' are specified')
                        exit()

    """
    Create the workflow
    """

    workflow = create_cross_sectional_tbss_pipeline(in_files=input_images,
                                                    output_dir=args.output_dir,
                                                    name='tbss_cross_sectional',
                                                    skeleton_threshold=args.skeleton_threshold,
                                                    design_mat=args.design_mat,
                                                    design_con=args.design_con,
                                                    )

    """
    output the graph if required
    """

    if args.graph is True:
        generate_graph(workflow=workflow)
        sys.exit(0)

    """
    Edit the qsub arguments based on the input arguments
    """

    qsubargs_time = '01:00:00'
    qsubargs_mem = '1.9G'
    if args.use_qsub is True and args.openmp_core > 1:
        qsubargs_mem = str(max(0.95, 1.9/args.openmp_core)) + 'G'

    qsubargs = '-l s_stack=10240 -j y -b y -S /bin/csh -V'
    qsubargs = qsubargs + ' -l h_rt=' + qsubargs_time
    qsubargs = qsubargs + ' -l tmem=' + qsubargs_mem + ' -l h_vmem=' + qsubargs_mem + ' -l vf=' + qsubargs_mem

    """
    Run the workflow
    """

    run_workflow(workflow=workflow,
                 qsubargs=qsubargs,
                 parser=args)
def main():
    # Create the parser
    pipeline_description = textwrap.dedent('''
    Pipeline to perform a boundary shift integral (BSI) between two time points images
    or a list of different time point images.

    ''')
    parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description=pipeline_description)
    # Input images
    parser.add_argument('-i', '--img', dest='input_img',
                        type=str, nargs='+',
                        metavar='input_img',
                        help='Image file or list of input images',
                        required=True)
    parser.add_argument('-m', '--mask', dest='input_mask',
                        type=str, nargs='+',
                        metavar='input_mask',
                        help='Mask image or list of mask images (optional)',
                        required=False)
    # Output argument
    parser.add_argument('-o', '--output_dir',
                        dest='output_dir',
                        type=str,
                        metavar='directory',
                        help='Output directory containing the BSI results\n' +
                        'Default is the current directory',
                        default=os.path.abspath('.'),
                        required=False)

    # Add default arguments in the parser
    default_parser_argument(parser)

    # Parse the input arguments
    args = parser.parse_args()

    # Check the parsed arguments
    if args.input_mask is not None:
        if not len(args.input_img) == len(args.input_mask):
            print('ERROR: The number of input and mask images are expected to be the same.')
            print(str(len(args.input_img))+' image(s) versus ' + str(len(args.input_mask)) + ' mask(s). Exit.')
            sys.exit(1)

    result_dir = os.path.abspath(args.output_dir)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    workflow = create_boundary_shift_integral([os.path.abspath(f) for f in args.input_img],
                                              result_dir,
                                              [os.path.abspath(f) for f in args.input_mask] if args.input_mask else None)

    # output the graph if required
    if args.graph is True:
        generate_graph(workflow=workflow)
        sys.exit(0)

    # Edit the qsub arguments based on the input arguments
    qsubargs_time = '05:00:00'
    qsubargs_mem = '2.9G'
    if args.use_qsub is True and args.openmp_core > 1:
        qsubargs_mem = str(max(0.95, 2.9/args.openmp_core)) + 'G'

    qsubargs = '-l s_stack=10240 -j y -b y -S /bin/csh -V'
    qsubargs = qsubargs + ' -l h_rt=' + qsubargs_time
    qsubargs = qsubargs + ' -l tmem=' + qsubargs_mem + ' -l h_vmem=' + qsubargs_mem + ' -l vf=' + qsubargs_mem

    run_workflow(workflow=workflow,
                 qsubargs=qsubargs,
                 parser=args)