Ejemplo n.º 1
0
def test(args):
    # parse model-index.yml
    model_index_file = MMCLS_ROOT / 'model-index.yml'
    model_index = load(str(model_index_file))
    model_index.build_models_with_collections()
    models = OrderedDict({model.name: model for model in model_index.models})

    script_name = osp.join('tools', 'test.py')
    port = args.port

    commands = []
    if args.models:
        patterns = [re.compile(pattern) for pattern in args.models]
        filter_models = {}
        for k, v in models.items():
            if any([re.match(pattern, k) for pattern in patterns]):
                filter_models[k] = v
        if len(filter_models) == 0:
            print('No model found, please specify models in:')
            print('\n'.join(models.keys()))
            return
        models = filter_models

    preview_script = ''
    for model_info in models.values():

        if model_info.results is None:
            continue

        script_path = create_test_job_batch(commands, model_info, args, port,
                                            script_name)
        preview_script = script_path or preview_script
        port += 1

    command_str = '\n'.join(commands)

    preview = Table()
    preview.add_column(str(preview_script))
    preview.add_column('Shell command preview')
    preview.add_row(
        Syntax.from_path(preview_script,
                         background_color='default',
                         line_numbers=True,
                         word_wrap=True),
        Syntax(command_str,
               'bash',
               background_color='default',
               line_numbers=True,
               word_wrap=True))
    console.print(preview)

    if args.run:
        os.system(command_str)
    else:
        console.print('Please set "--run" to start the job')
Ejemplo n.º 2
0
def check(filepath):
    """Check if the file syntax is valid"""
    if filepath == "":
        filepath = "model-index.yml"
        if not os.path.isfile(filepath):
            click.echo(
                "ERROR: model-index.yml not found in current directory.")
            return

    click.echo(f'Checking {filepath}')
    mi = load(filepath)
    mi.check()
Ejemplo n.º 3
0
def summary(args):
    model_index_file = MMCLS_ROOT / 'model-index.yml'
    model_index = load(str(model_index_file))
    model_index.build_models_with_collections()
    models = OrderedDict({model.name: model for model in model_index.models})

    work_dir = Path(args.work_dir)

    if args.models:
        patterns = [re.compile(pattern) for pattern in args.models]
        filter_models = {}
        for k, v in models.items():
            if any([re.match(pattern, k) for pattern in patterns]):
                filter_models[k] = v
        if len(filter_models) == 0:
            print('No model found, please specify models in:')
            print('\n'.join(models.keys()))
            return
        models = filter_models

    summary_data = {}
    for model_name, model_info in models.items():

        if model_info.results is None:
            continue

        # Skip if not found result file.
        result_file = work_dir / model_name / 'result.pkl'
        if not result_file.exists():
            summary_data[model_name] = {}
            continue

        with open(result_file, 'rb') as file:
            results = pickle.load(file)
        date = datetime.fromtimestamp(result_file.lstat().st_mtime)

        expect_metrics = model_info.results[0].metrics

        # extract metrics
        summary = {'date': date.strftime('%Y-%m-%d')}
        for key_yml, key_res in METRICS_MAP.items():
            if key_yml in expect_metrics:
                assert key_res in results, \
                    f'{model_name}: No metric "{key_res}"'
                expect_result = float(expect_metrics[key_yml])
                result = float(results[key_res])
                summary[key_yml] = dict(expect=expect_result, result=result)

        summary_data[model_name] = summary

    show_summary(summary_data)
    if args.save:
        save_summary(summary_data, models, work_dir)
def train(args):
    models_cfg = load(str(Path(__file__).parent / 'bench_train.yml'))
    models_cfg.build_models_with_collections()
    models = {model.name: model for model in models_cfg.models}

    script_name = osp.join('tools', 'train.py')
    port = args.port

    commands = []
    if args.models:
        patterns = [re.compile(pattern) for pattern in args.models]
        filter_models = {}
        for k, v in models.items():
            if any([re.match(pattern, k) for pattern in patterns]):
                filter_models[k] = v
        if len(filter_models) == 0:
            print('No model found, please specify models in:')
            print('\n'.join(models.keys()))
            return
        models = filter_models

    for model_info in models.values():
        months = model_info.data.get('Months', range(1, 13))
        if datetime.now().month in months:
            script_path = create_train_job_batch(commands, model_info, args,
                                                 port, script_name)
            port += 1

    command_str = '\n'.join(commands)

    preview = Table()
    preview.add_column(str(script_path))
    preview.add_column('Shell command preview')
    preview.add_row(
        Syntax.from_path(script_path,
                         background_color='default',
                         line_numbers=True,
                         word_wrap=True),
        Syntax(command_str,
               'bash',
               background_color='default',
               line_numbers=True,
               word_wrap=True))
    console.print(preview)

    if args.run:
        os.system(command_str)
    else:
        console.print('Please set "--run" to start the job')
def summary(args):
    models_cfg = load(str(Path(__file__).parent / 'bench_train.yml'))
    models = {model.name: model for model in models_cfg.models}

    work_dir = Path(args.work_dir)
    dir_map = {p.name: p for p in work_dir.iterdir() if p.is_dir()}

    if args.models:
        patterns = [re.compile(pattern) for pattern in args.models]
        filter_models = {}
        for k, v in models.items():
            if any([re.match(pattern, k) for pattern in patterns]):
                filter_models[k] = v
        if len(filter_models) == 0:
            print('No model found, please specify models in:')
            print('\n'.join(models.keys()))
            return
        models = filter_models

    summary_data = {}
    for model_name, model_info in models.items():

        # Skip if not found any log file.
        if model_name not in dir_map:
            summary_data[model_name] = {}
            continue
        sub_dir = dir_map[model_name]
        log_files = list(sub_dir.glob('*.log.json'))
        if len(log_files) == 0:
            continue
        log_file = sorted(log_files)[-1]

        # parse train log
        with open(log_file) as f:
            json_logs = [json.loads(s) for s in f.readlines()]
            val_logs = [
                log for log in json_logs
                if 'mode' in log and log['mode'] == 'val'
            ]

        if len(val_logs) == 0:
            continue

        expect_metrics = model_info.results[0].metrics

        # extract metrics
        summary = {'log_file': log_file}
        for key_yml, key_res in METRICS_MAP.items():
            if key_yml in expect_metrics:
                assert key_res in val_logs[-1], \
                    f'{model_name}: No metric "{key_res}"'
                expect_result = float(expect_metrics[key_yml])
                last = float(val_logs[-1][key_res])
                best_log = sorted(val_logs, key=lambda x: x[key_res])[-1]
                best = float(best_log[key_res])
                best_epoch = int(best_log['epoch'])

                summary[key_yml] = dict(expect=expect_result,
                                        last=last,
                                        best=best,
                                        best_epoch=best_epoch)
        summary_data[model_name] = summary

    show_summary(summary_data)
    if args.save:
        save_summary(summary_data, models, work_dir)