Esempio n. 1
0
def test_iap():
    sys.argv = [sys.argv[0]]
    pos_keys = ['positional_str', 'positional_bool', 'positional_int',
                'positional_float']

    opt_keys = ['optional_str', 'optional_bool', 'optional_int',
                'optional_float']

    pos_results = ['test', 0, 10, 10.2]
    opt_results = ['opt_test', 1, 20, 20.2]

    inputs = inputs_from_results(opt_results, opt_keys)
    inputs.extend(inputs_from_results(pos_results))

    sys.argv.extend(inputs)
    parser = IntrospectiveArgumentParser()
    parser.add_workflow(dummy_flow)
    args = parser.get_flow_args()
    all_keys = pos_keys + opt_keys
    all_results = pos_results + opt_results

    # Test if types and order are respected
    for k, v in zip(all_keys, all_results):
        npt.assert_equal(args[k], v)

    # Test if **args really fits dummy_flow's arguments
    return_values = dummy_flow(**args)
    npt.assert_array_equal(return_values, all_results + [2.0])
Esempio n. 2
0
def test_iap():
    sys.argv = [sys.argv[0]]
    pos_keys = [
        'positional_str', 'positional_bool', 'positional_int',
        'positional_float'
    ]

    opt_keys = [
        'optional_str', 'optional_bool', 'optional_int', 'optional_float'
    ]

    pos_results = ['test', 0, 10, 10.2]
    opt_results = ['opt_test', True, 20, 20.2]

    inputs = inputs_from_results(opt_results, opt_keys, optional=True)
    inputs.extend(inputs_from_results(pos_results))

    sys.argv.extend(inputs)
    parser = IntrospectiveArgumentParser()
    dummy_flow = TestFlow()
    parser.add_workflow(dummy_flow)
    args = parser.get_flow_args()
    all_keys = pos_keys + opt_keys
    all_results = pos_results + opt_results

    # Test if types and order are respected
    for k, v in zip(all_keys, all_results):
        npt.assert_equal(args[k], v)

    # Test if **args really fits dummy_flow's arguments
    return_values = dummy_flow.run(**args)
    npt.assert_array_equal(return_values, all_results + [2.0])
Esempio n. 3
0
def test_optional_str():
    # Test optional and variable str argument exists but does not have a value
    sys.argv = [sys.argv[0]]
    inputs = ['--optional_str_1']
    sys.argv.extend(inputs)
    parser = IntrospectiveArgumentParser()
    dummy_flow = DummyWorkflowOptionalStr()
    parser.add_workflow(dummy_flow)
    args = parser.get_flow_args()
    all_keys = ['optional_str_1']
    all_results = [[]]
    # Test if types and order are respected
    for k, v in zip(all_keys, all_results):
        npt.assert_equal(args[k], v)
    # Test if **args really fits dummy_flow's arguments
    return_values = dummy_flow.run(**args)
    npt.assert_array_equal(return_values, all_results + ['default'])

    # Test optional and variable str argument exists and has a value
    sys.argv = [sys.argv[0]]
    inputs = ['--optional_str_1', 'test']
    sys.argv.extend(inputs)
    parser = IntrospectiveArgumentParser()
    dummy_flow = DummyWorkflowOptionalStr()
    parser.add_workflow(dummy_flow)
    args = parser.get_flow_args()
    all_keys = ['optional_str_1']
    all_results = [['test']]
    # Test if types and order are respected
    for k, v in zip(all_keys, all_results):
        npt.assert_equal(args[k], v)
    # Test if **args really fits dummy_flow's arguments
    return_values = dummy_flow.run(**args)
    npt.assert_array_equal(return_values, all_results + ['default'])

    # Test optional str empty arguments
    sys.argv = [sys.argv[0]]
    inputs = ['--optional_str_2']
    sys.argv.extend(inputs)
    parser = IntrospectiveArgumentParser()
    dummy_flow = DummyWorkflowOptionalStr()
    parser.add_workflow(dummy_flow)
    with npt.assert_raises(SystemExit) as cm:
        parser.get_flow_args()
    npt.assert_equal(cm.exception.code, 2)
Esempio n. 4
0
def test_nargs():
    sys.argv = [sys.argv[0]]
    var_args = ['1', '2', '3', '4', '5', '6', '7', '8']
    optionnals = ['--optional_int', '2']
    sys.argv.extend(var_args + optionnals)

    parser = IntrospectiveArgumentParser()
    parser.add_workflow(nargs_flow)
    args = parser.get_flow_args()
    var_ints, opt_int = nargs_flow(**args)
    npt.assert_equal(len(var_ints), len(var_args))
Esempio n. 5
0
def test_nargs():
    sys.argv = [sys.argv[0]]
    var_args = ['1', '2', '3', '4', '5', '6', '7', '8']
    optionnals = ['--optional_int', '2']
    sys.argv.extend(var_args + optionnals)

    parser = IntrospectiveArgumentParser()
    parser.add_workflow(nargs_flow)
    args = parser.get_flow_args()
    var_ints, opt_int = nargs_flow(**args)
    npt.assert_equal(len(var_ints), len(var_args))
Esempio n. 6
0
def run_flow(flow):
    """ Wraps the process of building an argparser that reflects the workflow
    that we want to run along with some generic parameters like logging,
    force and output strategies. The resulting parameters are then fed to
    the workflow's run method.
    """
    parser = IntrospectiveArgumentParser()
    sub_flows_dicts = parser.add_workflow(flow)

    # Common workflow arguments
    parser.add_argument('--force',
                        dest='force',
                        action='store_true',
                        default=False,
                        help='Force overwriting output files.')

    parser.add_argument('--out_strat',
                        action='store',
                        dest='out_strat',
                        metavar='string',
                        required=False,
                        default='append',
                        help='Strategy to manage output creation.')

    parser.add_argument('--mix_names',
                        dest='mix_names',
                        action='store_true',
                        default=False,
                        help='Prepend mixed input names to output names.')

    # Add logging parameters common to all workflows
    msg = 'Log messsages display level. Accepted options include CRITICAL,'
    msg += ' ERROR, WARNING, INFO, DEBUG and NOTSET (default INFO).'
    parser.add_argument('--log_level',
                        action='store',
                        dest='log_level',
                        metavar='string',
                        required=False,
                        default='INFO',
                        help=msg)

    parser.add_argument('--log_file',
                        action='store',
                        dest='log_file',
                        metavar='string',
                        required=False,
                        default='',
                        help='Log file to be saved.')

    args = parser.get_flow_args()

    logging.basicConfig(filename=args['log_file'],
                        format='%(levelname)s:%(message)s',
                        level=get_level(args['log_level']))

    # Output management parameters
    flow._force_overwrite = args['force']
    flow._output_strategy = args['out_strat']
    flow._mix_names = args['mix_names']

    # Keep only workflow related parameters
    del args['force']
    del args['log_level']
    del args['log_file']
    del args['out_strat']
    del args['mix_names']

    # Remove subflows related params
    for sub_flow, params_dict in iteritems(sub_flows_dicts):
        for key, _ in iteritems(params_dict):
            if key in args.keys():
                params_dict[key] = args.pop(key)

                # Rename dictionary key to the original param name
                params_dict[key.split('.')[1]] = params_dict.pop(key)

    if sub_flows_dicts:
        flow.set_sub_flows_optionals(sub_flows_dicts)

    return flow.run(**args)
Esempio n. 7
0
def run_flow(flow):
    """ Wraps the process of building an argparser that reflects the workflow
    that we want to run along with some generic parameters like logging,
    force and output strategies. The resulting parameters are then fed to
    the workflow's run method.
    """
    parser = IntrospectiveArgumentParser()
    sub_flows_dicts = parser.add_workflow(flow)

    # Common workflow arguments
    parser.add_argument('--force', dest='force',
                        action='store_true', default=False,
                        help='Force overwriting output files.')

    parser.add_argument('--version', action='version',
                        version='DIPY {}'.format(dipy_version))

    parser.add_argument('--out_strat', action='store', dest='out_strat',
                        metavar='string', required=False, default='absolute',
                        help='Strategy to manage output creation.')

    parser.add_argument('--mix_names', dest='mix_names',
                        action='store_true', default=False,
                        help='Prepend mixed input names to output names.')

    # Add logging parameters common to all workflows
    msg = 'Log messsages display level. Accepted options include CRITICAL,'
    msg += ' ERROR, WARNING, INFO, DEBUG and NOTSET (default INFO).'
    parser.add_argument('--log_level', action='store', dest='log_level',
                        metavar='string', required=False, default='INFO',
                        help=msg)

    parser.add_argument('--log_file', action='store', dest='log_file',
                        metavar='string', required=False, default='',
                        help='Log file to be saved.')

    args = parser.get_flow_args()

    logging.basicConfig(filename=args['log_file'],
                        format='%(levelname)s:%(message)s',
                        level=get_level(args['log_level']))

    # Output management parameters
    flow._force_overwrite = args['force']
    flow._output_strategy = args['out_strat']
    flow._mix_names = args['mix_names']

    # Keep only workflow related parameters
    del args['force']
    del args['log_level']
    del args['log_file']
    del args['out_strat']
    del args['mix_names']

    # Remove subflows related params
    for sub_flow, params_dict in iteritems(sub_flows_dicts):
        for key, _ in iteritems(params_dict):
            if key in args.keys():
                params_dict[key] = args.pop(key)

                # Rename dictionary key to the original param name
                params_dict[key.split('.')[1]] = params_dict.pop(key)

    if sub_flows_dicts:
        flow.set_sub_flows_optionals(sub_flows_dicts)

    return flow.run(**args)
Esempio n. 8
0
            if hasattr(tractography_data, 'signal'):
                signal = tractography_data.signal.get_data()
                data = signal[:, :, :, 0]
                affine = np.eye(4)

        if f.endswith('.nii.gz') or f.endswith('.nii'):

            img = nib.load(f)
            data = img.get_data()
            affine = img.get_affine()
            if verbose:
                print(affine)

    # tmp save
    # tractogram = nib.streamlines.Tractogram(tractograms[0])
    # tractogram.apply_affine(img.affine)
    # nib.streamlines.save(tractogram, "tmp.tck")
    # exit()

    horizon(tractograms, data, affine, cluster, cluster_thr, random_colors,
            length_lt, length_gt, clusters_lt, clusters_gt)


parser = IntrospectiveArgumentParser()
parser.add_workflow(horizon_flow)

if __name__ == '__main__':
    args = parser.get_flow_args()
    horizon_flow(**args)