コード例 #1
0
def test_check_correctness():
    task, target = get_sample_task()

    measure_option = autotvm.measure_option(
        builder=autotvm.LocalBuilder(),
        runner=autotvm.LocalRunner(check_correctness=True))

    def _callback_correct(tuner, measure_inputs, measure_results):
        for _, res in zip(measure_inputs, measure_results):
            assert res.error_no == 0

    tuner = autotvm.tuner.RandomTuner(task)
    tuner.tune(n_trial=2,
               measure_option=measure_option,
               callbacks=[_callback_correct])

    # a bad template
    n = 128
    target = tvm.target.Target("llvm -device=bad_device")
    task = autotvm.task.create("testing/bad_matmul",
                               args=(n, n, n, "float32"),
                               target=target)

    def _callback_wrong(tuner, measure_inputs, measure_results):
        for _, res in zip(measure_inputs, measure_results):
            assert res.error_no == MeasureErrorNo.WRONG_ANSWER

    tuner = autotvm.tuner.RandomTuner(task)
    tuner.tune(n_trial=2,
               measure_option=measure_option,
               callbacks=[_callback_wrong])
コード例 #2
0
def test_file_io():
    temp = utils.tempdir()
    file_path = temp.relpath("temp.log")

    tsk, target = get_sample_task()
    inputs = [
        MeasureInput(target, tsk, tsk.config_space.get(i))
        for i in range(0, 10)
    ]
    results = [MeasureResult((i, ), 0, 0, 0) for i in range(0, 10)]

    invalid_inp = MeasureInput(target, tsk, tsk.config_space.get(10))
    invalid_res = MeasureResult((10, ), 0, 0, 0)

    # Erase the entity map to test if it will be ignored when loading back.
    invalid_inp.config._entity_map = {}

    with open(file_path, "w") as fo:
        cb = autotvm.callback.log_to_file(fo)
        cb(None, inputs, results)
        cb(None, [invalid_inp], [invalid_res])

    ref = zip(inputs, results)
    for x, y in zip(ref, autotvm.record.load_from_file(file_path)):
        assert x[1] == y[1]
コード例 #3
0
def test_task_tuner_without_measurement():
    """test task and tuner without measurement"""
    task, target = get_sample_task()

    class DummyRunner(Runner):
        def __init__(self):
            super(DummyRunner, self).__init__(1, 1)

        def run(self, measure_inputs, build_results):
            return [MeasureResult((np.random.random(),), 0, 0.2, time.time())
                    for _ in range(len(measure_inputs))]

        def get_build_kwargs(self):
            return {}

    measure_option = autotvm.measure_option(
        builder=autotvm.LocalBuilder(),
        runner=DummyRunner()
    )

    logging.info("%s", task.config_space)

    for tuner_class in [autotvm.tuner.RandomTuner,
                        autotvm.tuner.GridSearchTuner,
                        autotvm.tuner.GATuner,
                        autotvm.tuner.XGBTuner]:
        tuner = tuner_class(task)
        tuner.tune(n_trial=10, measure_option=measure_option)
        assert tuner.best_flops > 1
コード例 #4
0
def test_check_hashmismatch():
    logging.info("test hash mismatch check")

    task, target = get_sample_task()

    ctx = tvm.context(str(target))
    if not ctx.exist:
        logging.warning(
            "Skip this test because there is no supported device for test")

    measure_option = autotvm.measure_option('local', do_fork=False)
    measure_batch = autotvm.measure.create_measure_batch(task, measure_option)

    inputs = list()
    cfg = task.config_space.get(np.random.randint(len(task.config_space)))
    # notvalidh is not a valid CRC32 hash (not hex)
    cfg.code_hash = 'notvalidh'
    inputs.append((MeasureInput(target, task, cfg)))

    try:
        results = measure_batch(inputs)
        assert False, "HashMismatchError should be raised"
    except HashMismatchError:
        pass

    del measure_batch
コード例 #5
0
def test_task_tuner_without_measurement():
    """test task and tuner without measurement"""
    task, target = get_sample_task()

    class DummyRunner(Runner):
        def __init__(self):
            super(DummyRunner, self).__init__(1, 1)

        def run(self, measure_inputs, build_results):
            return [
                MeasureResult((np.random.random(), ), 0, 0.2, time.time())
                for _ in range(len(measure_inputs))
            ]

        def get_build_kwargs(self):
            return {}

    measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(),
                                            runner=DummyRunner())

    logging.info("%s", task.config_space)

    for tuner_class in [
            autotvm.tuner.RandomTuner, autotvm.tuner.GridSearchTuner,
            autotvm.tuner.GATuner, autotvm.tuner.XGBTuner
    ]:
        tuner = tuner_class(task)
        tuner.tune(n_trial=10, measure_option=measure_option)
        assert tuner.best_flops > 1
コード例 #6
0
def test_check_correctness():
    task, target = get_sample_task()

    measure_option = autotvm.measure_option(
        builder=autotvm.LocalBuilder(),
        runner=autotvm.LocalRunner(check_correctness=True)
    )

    def _callback_correct(tuner, measure_inputs, measure_results):
        for inp, res in zip(measure_inputs, measure_results):
            assert res.error_no == 0

    tuner = autotvm.tuner.RandomTuner(task)
    tuner.tune(n_trial=2, measure_option=measure_option,
               callbacks=[_callback_correct])

    # a bad template
    n = 128
    target = tvm.target.create("llvm -device=bad_device")
    task = autotvm.task.create(bad_matmul, args=(n, n, n, 'float32'), target=target)

    def _callback_wrong(tuner, measure_inputs, measure_results):
        for inp, res in zip(measure_inputs, measure_results):
            assert res.error_no == MeasureErrorNo.WRONG_ANSWER

    tuner = autotvm.tuner.RandomTuner(task)
    tuner.tune(n_trial=2, measure_option=measure_option,
               callbacks=[_callback_wrong])
コード例 #7
0
def test_update():
    task, target = get_sample_task()
    tuner = autotvm.tuner.XGBTuner(task)
    n_records = 5
    records = get_sample_records(n=n_records)
    tuner.update([inp for inp, _ in records], [res for _, res in records])
    assert len(tuner.xs) == n_records
    assert len(tuner.ys) == n_records
    assert len(tuner.visited) == n_records
    assert all(x in tuner.visited for x in tuner.xs)
コード例 #8
0
ファイル: test_autotvm_record.py プロジェクト: LANHUIYING/tvm
def test_apply_history_best():
    tsk, target = get_sample_task()

    records = [
        (MeasureInput(target, tsk, tsk.config_space.get(0)), MeasureResult((0.1,), 0, 2.3, 0)),
        (MeasureInput(target, tsk, tsk.config_space.get(1)), MeasureResult((0.3,), 0, 2.3, 0)),
        (MeasureInput(target, tsk, tsk.config_space.get(2)), MeasureResult((0.01,), 0, 2.3, 0)),
        (MeasureInput(target, tsk, tsk.config_space.get(4)), MeasureResult((0.4,), 0, 2.3, 0))
    ]
    hist_best = ApplyHistoryBest(records)
    x = hist_best.query(target, tsk.workload)
    assert str(x) == str(tsk.config_space.get(2))
コード例 #9
0
def test_db_save_replay():
    logging.info("test db save (from measure_batch) and replay ...")
    _db = database.DummyDatabase()
    _db.flush()

    task, target = get_sample_task()

    ctx = tvm.context(str(target))
    if not ctx.exist:
        logging.warning(
            "Skip this test because there is no supported device for test")

    measure_option = autotvm.measure_option('local',
                                            do_fork=False,
                                            timeout=2,
                                            replay_db=_db)
    measure_batch = autotvm.measure.create_measure_batch(task, measure_option)

    batch_size = 2

    ct = 0
    all_inputs = list()
    all_results = list()
    batches = list()
    tuner = autotvm.tuner.RandomTuner(task)
    while ct < TRIAL_LIMIT:
        inputs = list()
        for i in range(batch_size):
            cfg = tuner.next_batch(1)[0]
            inputs.append((MeasureInput(target, task, cfg)))
            all_inputs.append(inputs[-1])
        batches.append(inputs)
        results = measure_batch(inputs)
        all_results += results
        ct += 1
    callback = autotvm.callback.log_to_database(_db)
    callback(None, all_inputs, all_results)

    assert len(_db.db.keys()) == batch_size * TRIAL_LIMIT, \
        "%d vs %d" % (len(_db.db.keys()), batch_size * TRIAL_LIMIT)

    all_results_2 = measure_batch(all_inputs)
    all_results_3 = measure_batch(all_inputs)

    for i in range(len(all_results)):
        encr1 = encode(all_inputs[i], all_results[i])
        encr2 = encode(all_inputs[i], all_results_2[i])
        encr3 = encode(all_inputs[i], all_results_3[i])
        assert encr1 == encr2, "EXPECTED MATCH WITH SAVE REPLAY (first replay), got MISMATCH"
        assert encr2 == encr3, "EXPECTED MATCH WITH SAVE REPLAY (second replay), got MISMATCH"

    del measure_batch
コード例 #10
0
def test_fit():
    task, target = get_sample_task()
    records = get_sample_records(n=500)

    base_model = XGBoostCostModel(task, feature_type="itervar", loss_type="rank")
    base_model.fit_log(records, plan_size=32)

    upper_model = XGBoostCostModel(task, feature_type="itervar", loss_type="rank")
    upper_model.load_basemodel(base_model)

    xs = np.arange(10)
    ys = np.arange(10)

    upper_model.fit(xs, ys, plan_size=32)
コード例 #11
0
def test_fit():
    task, target = get_sample_task()
    records = get_sample_records(n=500)

    base_model = XGBoostCostModel(task, feature_type='itervar', loss_type='rank')
    base_model.fit_log(records, plan_size=32)

    upper_model = XGBoostCostModel(task, feature_type='itervar', loss_type='rank')
    upper_model.load_basemodel(base_model)

    xs = np.arange(10)
    ys = np.arange(10)

    upper_model.fit(xs, ys, plan_size=32)
コード例 #12
0
def test_apply_history_best():
    tsk, target = get_sample_task()

    records = [(MeasureInput(target, tsk, tsk.config_space.get(0)),
                MeasureResult((0.1, ), 0, 2.3, 0)),
               (MeasureInput(target, tsk, tsk.config_space.get(1)),
                MeasureResult((0.3, ), 0, 2.3, 0)),
               (MeasureInput(target, tsk, tsk.config_space.get(2)),
                MeasureResult((0.01, ), 0, 2.3, 0)),
               (MeasureInput(target, tsk, tsk.config_space.get(4)),
                MeasureResult((0.4, ), 0, 2.3, 0))]
    hist_best = ApplyHistoryBest(records)
    x = hist_best.query(target, tsk.workload)
    assert str(x) == str(tsk.config_space.get(2))
コード例 #13
0
ファイル: test_autotvm_record.py プロジェクト: LANHUIYING/tvm
def test_file_io():
    temp = util.tempdir()
    file_path = temp.relpath("temp.log")

    tsk, target = get_sample_task()
    inputs = [MeasureInput(target, tsk, tsk.config_space.get(i)) for i in range(0, 10)]
    results = [MeasureResult((i, ), 0, 0, 0) for i in range(0, 10)]

    with open(file_path, "w") as fo:
        cb = autotvm.callback.log_to_file(fo)
        cb(None, inputs, results)

    ref = zip(inputs, results)
    for x, y in zip(ref, autotvm.record.load_from_file(file_path)):
        assert x[1] == y[1]
コード例 #14
0
ファイル: test_autotvm_record.py プロジェクト: LANHUIYING/tvm
def test_load_dump():
    task, target = get_sample_task()

    inp = MeasureInput(target, task, task.config_space.get(0))
    result = MeasureResult((2.0, 2.23, 0.23, 0.123, 0.234, 0.123), MeasureErrorNo.NO_ERROR,
                           2.3, time.time())

    for protocol in ['json', 'pickle']:
        row = encode(inp, result, protocol=protocol)
        inp_2, result_2 = decode(row, protocol=protocol)

        assert measure_str_key(inp) == measure_str_key(inp_2), \
            "%s vs %s" % (measure_str_key(inp), measure_str_key(inp_2))
        assert result.costs == result_2.costs
        assert result.error_no == result_2.error_no
        assert result.timestamp == result_2.timestamp
コード例 #15
0
def test_load_dump():
    task, target = get_sample_task()

    inp = MeasureInput(target, task, task.config_space.get(0))
    result = MeasureResult((2.0, 2.23, 0.23, 0.123, 0.234, 0.123),
                           MeasureErrorNo.NO_ERROR, 2.3, time.time())

    for protocol in ['json', 'pickle']:
        row = encode(inp, result, protocol=protocol)
        inp_2, result_2 = decode(row, protocol=protocol)

        assert measure_str_key(inp) == measure_str_key(inp_2), \
            "%s vs %s" % (measure_str_key(inp), measure_str_key(inp_2))
        assert result.costs == result_2.costs
        assert result.error_no == result_2.error_no
        assert result.timestamp == result_2.timestamp
コード例 #16
0
def test_tuner():
    task, target = get_sample_task()
    records = get_sample_records(n=10)

    tuner = autotvm.tuner.XGBTuner(task)
    tuner.load_history(records, min_seed_records=10)
    # Confirm that loading history successfully loaded a
    # base_model.
    assert tuner.cost_model.base_model is not None

    tuner = autotvm.tuner.XGBTuner(task)
    tuner.load_history(records, min_seed_records=11)
    # Confirm that loading history did not load base_model
    # when not enough records according to `min_seed_records`
    # are provided
    assert tuner.cost_model.base_model is None
コード例 #17
0
def test_task_tuner_without_measurement():
    """test task and tuner without measurement"""
    task, _ = get_sample_task()

    measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(),
                                            runner=DummyRunner())

    logging.info("%s", task.config_space)

    for tuner_class in [
            autotvm.tuner.RandomTuner, autotvm.tuner.GridSearchTuner,
            autotvm.tuner.GATuner, autotvm.tuner.XGBTuner
    ]:
        tuner = tuner_class(task)
        tuner.tune(n_trial=10, measure_option=measure_option)
        assert tuner.best_flops > 1
コード例 #18
0
def test_min_repeat_ms():
    task, target = get_sample_task()

    measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(),
                                            runner=autotvm.LocalRunner(
                                                number=1, min_repeat_ms=100))

    def _callback(tuner, measure_inputs, measure_results):
        for inp, res in zip(measure_inputs, measure_results):
            if res.error_no != 0:
                continue

            assert 1000 * np.mean(res.costs) * \
                   measure_option['runner'].cur_number >= 100

    tuner = autotvm.tuner.RandomTuner(task)
    tuner.tune(n_trial=5, measure_option=measure_option, callbacks=[_callback])
コード例 #19
0
def test_random_tuner():
    """Test RandomTuner"""

    task, _ = get_sample_task()
    measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())

    tuner = autotvm.tuner.RandomTuner(task, range_idx=(8, 15))
    assert tuner.range_length == 8
    assert tuner.index_offset == 8

    # Tuner should only focus on the specified range and should visit all indices
    tuner.tune(n_trial=8, measure_option=measure_option)
    assert tuner.counter == 8
    assert not tuner.has_next()
    visited = set()
    for idx in tuner.visited:
        assert idx not in visited
        assert 8 <= idx <= 15
コード例 #20
0
def test_file_io():
    temp = util.tempdir()
    file_path = temp.relpath("temp.log")

    tsk, target = get_sample_task()
    inputs = [
        MeasureInput(target, tsk, tsk.config_space.get(i))
        for i in range(0, 10)
    ]
    results = [MeasureResult((i, ), 0, 0, 0) for i in range(0, 10)]

    with open(file_path, "w") as fo:
        cb = autotvm.callback.log_to_file(fo)
        cb(None, inputs, results)

    ref = zip(inputs, results)
    for x, y in zip(ref, autotvm.record.load_from_file(file_path)):
        assert x[1] == y[1]
コード例 #21
0
def test_min_repeat_ms():
    task, target = get_sample_task()

    measure_option = autotvm.measure_option(
        builder=autotvm.LocalBuilder(),
        runner=autotvm.LocalRunner(number=1, min_repeat_ms=100)
    )

    def _callback(tuner, measure_inputs, measure_results):
        for inp, res in zip(measure_inputs, measure_results):
            if res.error_no != 0:
                continue

            assert 1000 * np.mean(res.costs) * \
                   measure_option['runner'].cur_number >= 100

    tuner = autotvm.tuner.RandomTuner(task)
    tuner.tune(n_trial=5, measure_option=measure_option,
               callbacks=[_callback])
コード例 #22
0
def test_gridsearch_tuner():
    """Test GridSearchTuner"""

    task, _ = get_sample_task()
    measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())

    # When no range index, range_length should be the length of config space
    tuner = autotvm.tuner.GridSearchTuner(task)
    assert tuner.range_length == len(task.config_space)
    assert tuner.index_offset == 0

    # With range index, range_length should be the length of the specified range
    tuner = autotvm.tuner.GridSearchTuner(task, range_idx=(8, 15))
    assert tuner.range_length == 8
    assert tuner.index_offset == 8

    # Tuner should only focus on the specified range
    tuner.tune(n_trial=8, measure_option=measure_option)
    assert tuner.counter == 8
    assert not tuner.has_next()
コード例 #23
0
def test_db_filter():
    logging.info("test db filter ...")

    # Pick a GPU target because there are more likely to be failures/invalid configs
    task, target = get_sample_task()

    ctx = tvm.context(str(target))
    if not ctx.exist:
        logging.warning(
            "Skip this test because there is no supported device for test")

    batch_size = 2

    measure_option = autotvm.measure_option('local', do_fork=False, timeout=2)
    measure_batch = autotvm.measure.create_measure_batch(task, measure_option)

    ct = 0
    all_inputs = list()
    all_results = list()
    batches = list()
    tuner = autotvm.tuner.RandomTuner(task)
    while ct < TRIAL_LIMIT:
        inputs = list()
        for i in range(batch_size):
            cfg = tuner.next_batch(1)[0]
            inputs.append((MeasureInput(target, task, cfg)))
            all_inputs.append(inputs[-1])
        batches.append(inputs)
        results = measure_batch(inputs)
        all_results += results
        ct += 1

    del measure_batch

    db = database.DummyDatabase()
    db.flush()

    # First setting, memoize one input at a time, check that each is saved and replayed
    measure_option = autotvm.measure_option('local',
                                            do_fork=False,
                                            timeout=2,
                                            replay_db=db)
    measure_batch = autotvm.measure.create_measure_batch(task, measure_option)

    for i in range(len(all_inputs) + 1):
        db.flush()
        for j in range(i):
            db.save(all_inputs[j], all_results[j])

        for k in range(len(batches)):
            batch = batches[k]
            batch_result = measure_batch(batch)
            for l in range(batch_size):
                all_idx = k * batch_size + l
                assert batch_result[l] is not None
                if all_idx < i:
                    assert encode(batch[l], batch_result[l]) == encode(batch[l], all_results[all_idx]), \
                        "(no retry) EXPECTED MATCH, GOT MISMATCH"
                else:
                    assert encode(batch[l], batch_result[l]) != encode(batch[l], all_results[all_idx]), \
                        "(no retry) EXPECTED MISMATCH, GOT MATCH"

    del measure_batch
コード例 #24
0
def test_tuner():
    task, target = get_sample_task()
    records = get_sample_records(n=100)

    tuner = autotvm.tuner.XGBTuner(task)
    tuner.load_history(records)
コード例 #25
0
def test_tuner():
    task, target = get_sample_task()
    records = get_sample_records(n=100)

    tuner = autotvm.tuner.XGBTuner(task)
    tuner.load_history(records)