Example #1
0
def analyze(args, model_inst, tasks, tmp_log_file, promote=False):
    _LOG.info('Best configs:')
    for i, task in enumerate(reversed(tasks)):
        # show best config from tuning
        dispatch_context = tvm.autotvm.apply_history_best(tmp_log_file)
        best_config = dispatch_context.query(task.target, task.workload)
        _LOG.info(f'  task.target: {task.target}')
        _LOG.info(f'  task {i}: {best_config}')

    # store best record in a cache file
    best_log_file = os.path.splitext(tmp_log_file)[0]
    tvm.autotvm.record.pick_best(tmp_log_file, best_log_file)
    _LOG.info(f'Wrote best configs to {best_log_file}')
    if promote:
        job_name = autotvm_log_util.compute_job_name(args.model_spec,
                                                     model_inst)
        autotvm_log_util.promote(job_name, best_log_file)
        _LOG.info(
            f'Promoted {best_log_file} to the default tuning log for model spec {args.model_spec}'
        )
Example #2
0
def main():
    args = parse_args()

    log_util.config(['eval', '-'.join(args.model_specs)], level=args.log_level)

    model_inst_setting = collections.OrderedDict()
    for spec in args.model_specs:
        assert spec not in model_inst_setting, f'spec {spec} given twice'
        model_inst_setting[spec] = model.instantiate_from_spec(spec)

    validate_against = None
    if args.validate_against:
        assert args.validate_against not in model_inst_setting, (
            f'--validate-against={args.validate_against} also given in model_specs '
            'command-line argument')
        validate_against = model.instantiate_from_spec(args.validate_against)
        model_inst_setting[args.validate_against] = validate_against

    dataset_generator_name = next(iter(
        model_inst_setting.values()))[0].dataset_generator_name()
    for spec, (m, _) in model_inst_setting.items():
        m.dataset_generator_name() == dataset_generator_name, (
            f'expected all models to share the same dataset, but {spec} has '
            f'{m.dataset_generator_name()}')
    dataset_gen = dataset.DatasetGenerator.instantiate(
        dataset_generator_name, {'shuffle': not validate_against})

    util.DEBUG_MODE = args.use_debug_executor

    samples = dataset_gen.generate(args.num_samples)
    results = {}
    with contextlib.ExitStack() as all_models_stack:
        if args.debug_micro_execution:
            _LOG.warn(
                'NOTE: to debug micro execution, compiled object files will be left in your '
                'temp directory at: %s',
                contrib_utils.TempDirectory._DEBUG_PARENT_DIR)
            _LOG.warn(
                'This is a limitation of the current microTVM compilation structure'
            )

            all_models_stack.enter_context(
                contrib_utils.TempDirectory.set_keep_for_debug())

        for spec, (model_inst, setting) in model_inst_setting.items():
            with contextlib.ExitStack() as model_stack:
                if args.use_tuned_schedule:
                    if args.use_tuned_schedule == USE_DEFAULT_TUNED_SCHEDULE:
                        tuned_schedule = autotvm_log_util.get_default_log_path(
                            autotvm_log_util.compute_job_name(
                                spec, model_inst))
                        if not os.path.exists(tuned_schedule):
                            _LOG.warning(
                                'Tuned schedule for %s not found; proceeding without: %s',
                                spec, tuned_schedule)
                            tuned_schedule = None

                    else:
                        tuned_schedule = args.use_tuned_schedule

                    if tuned_schedule is not None:
                        model_stack.enter_context(
                            autotvm.apply_history_best(tuned_schedule))

                compiled = model_inst.build_model()
                results[spec] = globals()[f'eval_{setting}'](args, model_inst,
                                                             compiled, samples)

    if args.validate_against:
        for i in range(args.num_samples):
            allclose = {}
            for model_spec in args.model_specs:
                allclose[model_spec] = np.allclose(
                    results[model_spec][i]['label'].astype('float32'), results[
                        args.validate_against][i]['label'].astype('float32'))

            _LOG.info(f'Sample {i} ---->')
            rows = []
            rows.append([['model_name', 'setting', 'config']] +
                        [x for x in range(10)])

            def _add_row(model_spec, values):
                model_spec_parts = model_spec.split(':', 3)
                if len(model_spec_parts) == 2:
                    model_spec_parts.append('')

                rows.append([model_spec_parts] + values)

            for model_spec in args.model_specs:
                color = ''
                if model_spec != args.validate_against:
                    if not allclose[model_spec]:
                        level = logging.ERROR
                    else:
                        level = logging.INFO
                    _add_row(model_spec, list(results[model_spec][i]['label']))

            _add_row(args.validate_against,
                     results[args.validate_against][i]['label'].tolist())

            spacings = []
            for c in range(0, 3):
                spacing = max(len(r[0][c]) + 1 for r in rows)
                spacings.append(f'{{0:{spacing}s}}')

            _LOG.info(''.join(
                [spacings[c].format(rows[0][0][c]) for c in range(0, 3)] +
                ['{0:5d}'.format(c) for c in rows[0][1:]]))
            format_string = f'{{0:{spacing}s}}'
            for r in rows[1:]:
                model_spec_parts = ''.join(
                    [spacings[c].format(r[0][c]) for c in range(0, 3)])
                color = r[1]
                result_str = ''.join([' {0:+04d}'.format(y) for y in r[1:]])
                _LOG.log(level, '%s%s', model_spec_parts, result_str)
Example #3
0
def tune_model(args, transport_launcher, model_inst):
    compiled_model = model_inst.build_model()
    tasks = model_inst.extract_tunable_tasks(compiled_model)
    _LOG.info(f'extracted {len(tasks)} tasks: {tasks}')
    for i, t in enumerate(tasks):
        _LOG.info(f' * Task {i:d}: config space is {len(t.config_space)}')

    if args.single_task_index:
        assert len(tasks) >= args.single_task_index, (
            f'--single-task-index={args.single_task_index}, but extracted only {len(tasks)} tasks'
        )

        tasks = [tasks[args.single_task_index]]

    _LOG.info('[Tuning]')
    logging.getLogger('autotvm').setLevel(logging.INFO)

    # create tmp log file
    job_name = autotvm_log_util.compute_job_name(args.model_spec, model_inst)
    tuning_log = autotvm_log_util.gen_tuning_log_path(job_name)
    tmp_log_file = f'{tuning_log}.tmp'
    assert not os.path.exists(tmp_log_file)

    tmp_log_file_dir = os.path.dirname(tmp_log_file)
    if not os.path.isdir(tmp_log_file_dir):
        os.makedirs(tmp_log_file_dir)

    for i, task in enumerate(tasks):
        with contextlib.ExitStack() as exit_stack:
            section_constraints = model_inst.section_constraints(
                task_index_and_task=(i, task))
            if args.pre_launched_tracker_hostport:
                tracker_host, tracker_port = args.pre_launched_tracker_hostport.rsplit(
                    ':', 1)
                tracker_port = int(tracker_port)
                _LOG.warning(
                    'with pre-launched tracker, the micro.Session device config may become '
                    'out of sync with the device config used here to build models'
                )
                target_num_servers = 1
            else:
                tracker_host, tracker_port = transport_launcher.tracker_host_port_tuple
                exit_stack.enter_context(
                    transport_launcher.launch(
                        stm32f746xx.generate_config,
                        {'section_constraints': section_constraints}))
                target_num_servers = transport_launcher.num_instances

            num_servers = 0
            while num_servers < target_num_servers:
                num_servers = get_num_devices(
                    tracker_host, tracker_port,
                    transport_launcher.rpc_tracker_key)
                if num_servers < target_num_servers:
                    _LOG.info(
                        f'Found {num_servers} RPC servers under key {transport_launcher.rpc_tracker_key}, '
                        f'waiting for {target_num_servers} total to become available'
                    )
            _LOG.info(
                f'Discovered {num_servers} available RPC servers for key '
                f'{transport_launcher.rpc_tracker_key}')
            assert num_servers > 0, (
                f'No servers available on the tracker for key {transport_launcher.rpc_tracker_key}'
            )

            dev_config = stm32f746xx.generate_config(
                tracker_host,
                tracker_port,
                section_constraints=section_constraints)

            measure_option = model_inst.get_autotvm_measure_option(
                num_servers, tracker_host, tracker_port,
                transport_launcher.rpc_tracker_key, dev_config, i, task)

            _tune_one_task(args, measure_option, i, len(tasks), tmp_log_file,
                           task)

    return tasks, tmp_log_file