Esempio n. 1
0
def _cmd_tune(args):
    transport_launcher = device_util.DeviceTransportLauncher(
        {'use_tracker': True})
    log_util.config(['autotune', args.model_spec], logging.INFO)
    model_inst, _ = model.instantiate_from_spec(args.model_spec)
    tasks, log_file = tune_model(args, transport_launcher, model_inst)
    analyze(args, model_inst, tasks, log_file, promote=True)
Esempio n. 2
0
def _cmd_launch_transport(args):
    log_util.config(['autotune', args.model_spec])
    transport_launcher = device_util.DeviceTransportLauncher(
        {'use_tracker': True})
    generate_config = not args.skip_writing_transport_config

    launch_kw = {'generate_config': generate_config}
    if generate_config:
        model_inst, _ = model.instantiate_from_spec(args.model_spec)
        index_and_task = None
        if args.task_index is not None:
            tasks = model_inst.extract_tunable_tasks(model_inst.build_model())
            index_and_task = (args.task_index, tasks[args.task_index])
        launch_kw[
            'generate_config_func'] = tvm.micro.device.arm.stm32f746xx.generate_config
        launch_kw['generate_config_kw'] = {
            'section_constraints':
            model_inst.section_constraints(index_and_task)
        }

    with transport_launcher.launch(**launch_kw):
        print('Transport launched. Press Ctrl+C to terminate.')
        try:
            while True:
                time.sleep(10)
        except KeyboardInterrupt:
            print('Caught SIGINT; shutting down')
Esempio n. 3
0
def _cmd_rpc_dev_config(args):
    log_util.config([], logging.INFO, console_only=True)
    transport_launcher = device_util.DeviceTransportLauncher(
        {'use_tracker': True})
    model_inst, _ = model.instantiate_from_spec(args.model_spec)
    index_and_task = None
    if args.task_index is not None:
        tasks = model_inst.extract_tunable_tasks(model_inst.build_model())
        index_and_task = (args.task_index, tasks[args.task_index])

    transport_launcher.generate_rpc_server_configs(
        tvm.micro.device.arm.stm32f746xx.generate_config, {
            'section_constraints':
            model_inst.section_constraints(index_and_task)
        })
    transport_launcher.generate_openocd_configs()
    print(
        f'Wrote OpenOCD and RPC server configs underneath {transport_launcher.work_dirtree_root}'
    )
Esempio n. 4
0
def main():
    args = parse_args()

    log_util.config(['eval', '-'.join(args.model_specs)], level=args.log_level)

    model_inst_setting = collections.OrderedDict()
    for spec in args.model_specs:
        assert spec not in model_inst_setting, f'spec {spec} given twice'
        model_inst_setting[spec] = model.instantiate_from_spec(spec)

    validate_against = None
    if args.validate_against:
        assert args.validate_against not in model_inst_setting, (
            f'--validate-against={args.validate_against} also given in model_specs '
            'command-line argument')
        validate_against = model.instantiate_from_spec(args.validate_against)
        model_inst_setting[args.validate_against] = validate_against

    dataset_generator_name = next(iter(
        model_inst_setting.values()))[0].dataset_generator_name()
    for spec, (m, _) in model_inst_setting.items():
        m.dataset_generator_name() == dataset_generator_name, (
            f'expected all models to share the same dataset, but {spec} has '
            f'{m.dataset_generator_name()}')
    dataset_gen = dataset.DatasetGenerator.instantiate(
        dataset_generator_name, {'shuffle': not validate_against})

    util.DEBUG_MODE = args.use_debug_executor

    samples = dataset_gen.generate(args.num_samples)
    results = {}
    with contextlib.ExitStack() as all_models_stack:
        if args.debug_micro_execution:
            _LOG.warn(
                'NOTE: to debug micro execution, compiled object files will be left in your '
                'temp directory at: %s',
                contrib_utils.TempDirectory._DEBUG_PARENT_DIR)
            _LOG.warn(
                'This is a limitation of the current microTVM compilation structure'
            )

            all_models_stack.enter_context(
                contrib_utils.TempDirectory.set_keep_for_debug())

        for spec, (model_inst, setting) in model_inst_setting.items():
            with contextlib.ExitStack() as model_stack:
                if args.use_tuned_schedule:
                    if args.use_tuned_schedule == USE_DEFAULT_TUNED_SCHEDULE:
                        tuned_schedule = autotvm_log_util.get_default_log_path(
                            autotvm_log_util.compute_job_name(
                                spec, model_inst))
                        if not os.path.exists(tuned_schedule):
                            _LOG.warning(
                                'Tuned schedule for %s not found; proceeding without: %s',
                                spec, tuned_schedule)
                            tuned_schedule = None

                    else:
                        tuned_schedule = args.use_tuned_schedule

                    if tuned_schedule is not None:
                        model_stack.enter_context(
                            autotvm.apply_history_best(tuned_schedule))

                compiled = model_inst.build_model()
                results[spec] = globals()[f'eval_{setting}'](args, model_inst,
                                                             compiled, samples)

    if args.validate_against:
        for i in range(args.num_samples):
            allclose = {}
            for model_spec in args.model_specs:
                allclose[model_spec] = np.allclose(
                    results[model_spec][i]['label'].astype('float32'), results[
                        args.validate_against][i]['label'].astype('float32'))

            _LOG.info(f'Sample {i} ---->')
            rows = []
            rows.append([['model_name', 'setting', 'config']] +
                        [x for x in range(10)])

            def _add_row(model_spec, values):
                model_spec_parts = model_spec.split(':', 3)
                if len(model_spec_parts) == 2:
                    model_spec_parts.append('')

                rows.append([model_spec_parts] + values)

            for model_spec in args.model_specs:
                color = ''
                if model_spec != args.validate_against:
                    if not allclose[model_spec]:
                        level = logging.ERROR
                    else:
                        level = logging.INFO
                    _add_row(model_spec, list(results[model_spec][i]['label']))

            _add_row(args.validate_against,
                     results[args.validate_against][i]['label'].tolist())

            spacings = []
            for c in range(0, 3):
                spacing = max(len(r[0][c]) + 1 for r in rows)
                spacings.append(f'{{0:{spacing}s}}')

            _LOG.info(''.join(
                [spacings[c].format(rows[0][0][c]) for c in range(0, 3)] +
                ['{0:5d}'.format(c) for c in rows[0][1:]]))
            format_string = f'{{0:{spacing}s}}'
            for r in rows[1:]:
                model_spec_parts = ''.join(
                    [spacings[c].format(r[0][c]) for c in range(0, 3)])
                color = r[1]
                result_str = ''.join([' {0:+04d}'.format(y) for y in r[1:]])
                _LOG.log(level, '%s%s', model_spec_parts, result_str)
Esempio n. 5
0
def _cmd_analyze(args):
    log_util.config([], logging.INFO, console_only=True)
    model_inst, _ = model.instantiate_from_spec(args.model_spec)
    tasks = model_inst.extract_tunable_tasks(model_inst.build_model())
    analyze(args, model_inst, tasks, args.log_file, promote=args.promote)
def main():
    args = parse_args()
    model_inst, _ = model.instantiate_from_spec(args.model_specs[0])
    generate_project(model_inst, args.project_dir)