Exemple #1
0
def arg_parse_params():
    """ parse the input parameters

    :return dict: parameters
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('-i',
                        '--path_pattern_images',
                        type=str,
                        help='path to the input image',
                        required=True)
    parser.add_argument('-l',
                        '--path_pattern_landmarks',
                        type=str,
                        help='path to the input landmarks',
                        required=True)
    parser.add_argument('-csv',
                        '--path_csv',
                        type=str,
                        required=True,
                        help='path to coordinate csv file')
    parser.add_argument('--mode',
                        type=str,
                        required=False,
                        help='type of combination of registration pairs',
                        default=OPTIONS_COMBINE[0],
                        choices=OPTIONS_COMBINE)
    args = parse_arg_params(parser, upper_dirs=['path_csv'])
    return args
Exemple #2
0
    def main(cls, params=None):
        """ run the Main of selected experiment

        :param cls: class of selected benchmark
        :param dict params: set of input parameters
        """
        if not params:
            arg_parser = create_basic_parser(cls.__name__)
            arg_parser = cls.extend_parse(arg_parser)
            params = parse_arg_params(arg_parser)

        logging.info('running...')
        benchmark = cls(params)
        benchmark.run()
        path_expt = benchmark.params['path_exp']
        logging.info('Done.')
        return params, path_expt
Exemple #3
0
def arg_parse_params():
    """ argument parser from cmd

    :return dict:
    """
    # SEE: https://docs.python.org/3/library/argparse.html
    parser = argparse.ArgumentParser()
    parser.add_argument('-a',
                        '--path_annots',
                        type=str,
                        required=False,
                        help='path to folder with annotations')
    parser.add_argument('-d',
                        '--path_dataset',
                        type=str,
                        required=False,
                        help='path to the output directory - dataset')
    parser.add_argument('--scales',
                        type=int,
                        required=False,
                        nargs='*',
                        help='generated scales for the dataset',
                        default=DEFAULT_SCALES)
    parser.add_argument('--nb_selected',
                        type=float,
                        required=False,
                        default=None,
                        help='number ot ration of selected landmarks')
    parser.add_argument('--nb_total',
                        type=int,
                        required=False,
                        default=None,
                        help='total number of generated landmarks')
    parser.add_argument('--nb_workers',
                        type=int,
                        required=False,
                        default=NB_WORKERS,
                        help='number of processes in parallel')
    args = parse_arg_params(parser)
    if not is_iterable(args['scales']):
        args['scales'] = [args['scales']]
    return args
Exemple #4
0
def arg_parse_params():
    """ parse the input parameters
    :return dict: parameters
    """
    # SEE: https://docs.python.org/3/library/argparse.html
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--path_image', type=str, required=True, help='path to the input image')
    parser.add_argument('-l', '--path_landmarks', type=str, required=True, help='path to the input landmarks')
    parser.add_argument('-o', '--path_out', type=str, required=True, help='path to the output folder')
    parser.add_argument(
        '-n', '--nb_samples', type=int, required=False, help='number of deformed images', default=NB_DEFORMATIONS
    )
    parser.add_argument(
        '--visual', action='store_true', required=False, default=False, help='visualise the landmarks in images'
    )
    parser.add_argument(
        '--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of processes in parallel'
    )
    args = parse_arg_params(parser, upper_dirs=['path_out'])
    args['visual'] = bool(args['visual'])
    return args
Exemple #5
0
 def test_argparse(self):
     with patch('argparse._sys.argv', ['script.py']):
         args = parse_arg_params(argparse.ArgumentParser())
         self.assertIsInstance(args, dict)
Exemple #6
0
    logging.info('Compute landmarks statistic.')
    _compute_lnds_stat = partial(ImRegBenchmark.compute_registration_statistic,
                                 df_experiments=df_experiments,
                                 path_dataset=path_dataset,
                                 path_experiment=path_experiment,
                                 path_reference=path_reference)
    # NOTE: this has to run in SINGLE thread so there is SINGLE table instance
    list(
        iterate_mproc_map(_compute_lnds_stat,
                          df_experiments.iterrows(),
                          desc='Statistic',
                          nb_workers=1))

    name_results, _ = os.path.splitext(os.path.basename(path_results))
    path_results = os.path.join(path_output, name_results + '_NEW.csv')
    logging.debug('exporting CSV results: %s', path_results)
    df_experiments.to_csv(path_results)

    path_json = export_summary_json(df_experiments, path_experiment,
                                    path_output, min_landmarks, details)
    return path_json


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)

    arg_params = parse_arg_params(create_parser())
    logging.info('running...')
    main(**arg_params)
    logging.info('DONE')
Exemple #7
0
def main(params, cls_benchmark):
    """ run the Main of selected experiment

    :param {str: str|float} params: set of input parameters
    :param cls_benchmark: class of selected benchmark
    """
    logging.info('running...')
    logging.info(__doc__)
    benchmark = cls_benchmark(params)
    benchmark.run()
    path_expt = benchmark.params['path_exp']
    del benchmark
    logging.info('Done.')
    return path_expt


# RUN by given parameters
if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    arg_parser = create_basic_parse()
    arg_parser = extend_parse(arg_parser)
    arg_params = parse_arg_params(arg_parser)
    path_expt = main(arg_params, BmTemplate)

    if arg_params.get('run_comp_benchmark', False):
        # from bm_experiments import bm_comp_perform
        # bm_comp_perform.main(path_expt)
        logging.info('Here you can call the separate benchmark'
                     ' to measure your computer performances.')