Пример #1
0
def test_file_io():
    temp = utils.tempdir()
    file_path = temp.relpath("temp.log")

    tsk, target = get_sample_task()
    inputs = [
        MeasureInput(target, tsk, tsk.config_space.get(i))
        for i in range(0, 10)
    ]
    results = [MeasureResult((i, ), 0, 0, 0) for i in range(0, 10)]

    invalid_inp = MeasureInput(target, tsk, tsk.config_space.get(10))
    invalid_res = MeasureResult((10, ), 0, 0, 0)

    # Erase the entity map to test if it will be ignored when loading back.
    invalid_inp.config._entity_map = {}

    with open(file_path, "w") as fo:
        cb = autotvm.callback.log_to_file(fo)
        cb(None, inputs, results)
        cb(None, [invalid_inp], [invalid_res])

    ref = zip(inputs, results)
    for x, y in zip(ref, autotvm.record.load_from_file(file_path)):
        assert x[1] == y[1]
Пример #2
0
def test_file_io():
    temp = utils.tempdir()
    file_path = temp.relpath("temp.log")

    tsk, target = get_sample_task()
    inputs = [
        MeasureInput(target, tsk, tsk.config_space.get(i))
        for i in range(0, 10)
    ]
    results = [MeasureResult((i, ), 0, 0, 0) for i in range(0, 10)]

    invalid_inp = MeasureInput(target, tsk, tsk.config_space.get(10))
    invalid_res = MeasureResult((10, ), 0, 0, 0)

    # Erase the entity map to test if it will be ignored when loading back.
    invalid_inp.config._entity_map = {}

    with open(file_path, "w") as fo:
        cb = autotvm.callback.log_to_file(fo)
        cb(None, inputs, results)
        cb(None, [invalid_inp], [invalid_res])

    ref = zip(inputs, results)
    for x, y in zip(ref, autotvm.record.load_from_file(file_path)):
        assert x[1] == y[1]

    # Confirm functionality of multiple file loads
    hist_best = ApplyHistoryBest([file_path, file_path])
    x = hist_best.query(target, tsk.workload)
    assert str(x) == str(inputs[0][2])
def test_update():
    task, target = get_sample_task()
    tuner = autotvm.tuner.XGBTuner(task)
    n_records = 5
    records = get_sample_records(n=n_records)
    tuner.update([inp for inp, _ in records], [res for _, res in records])
    assert len(tuner.xs) == n_records
    assert len(tuner.ys) == n_records
    assert len(tuner.visited) == n_records
    assert all(x in tuner.visited for x in tuner.xs)
def test_tuner():
    task, target = get_sample_task()
    records = get_sample_records(n=10)

    tuner = autotvm.tuner.XGBTuner(task)
    tuner.load_history(records, min_seed_records=10)
    # Confirm that loading history successfully loaded a
    # base_model.
    assert tuner.cost_model.base_model is not None

    tuner = autotvm.tuner.XGBTuner(task)
    tuner.load_history(records, min_seed_records=11)
    # Confirm that loading history did not load base_model
    # when not enough records according to `min_seed_records`
    # are provided
    assert tuner.cost_model.base_model is None
Пример #5
0
def test_apply_history_best():
    tsk, target = get_sample_task()

    records = [
        (MeasureInput(target, tsk, tsk.config_space.get(0)),
         MeasureResult((0.1, ), 0, 2.3, 0)),
        (MeasureInput(target, tsk, tsk.config_space.get(1)),
         MeasureResult((0.3, ), 0, 2.3, 0)),
        (MeasureInput(target, tsk, tsk.config_space.get(2)),
         MeasureResult((0.01, ), 0, 2.3, 0)),
        (MeasureInput(target, tsk, tsk.config_space.get(4)),
         MeasureResult((0.4, ), 0, 2.3, 0)),
    ]
    hist_best = ApplyHistoryBest(records)
    x = hist_best.query(target, tsk.workload)
    assert str(x) == str(tsk.config_space.get(2))
def test_task_tuner_without_measurement():
    """test task and tuner without measurement"""
    task, _ = get_sample_task()

    measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())

    logging.info("%s", task.config_space)

    for tuner_class in [
        autotvm.tuner.RandomTuner,
        autotvm.tuner.GridSearchTuner,
        autotvm.tuner.GATuner,
        autotvm.tuner.XGBTuner,
    ]:
        tuner = tuner_class(task)
        tuner.tune(n_trial=10, measure_option=measure_option)
        assert tuner.best_flops > 1
def test_fit():
    task, target = get_sample_task()
    records = get_sample_records(n=500)

    base_model = XGBoostCostModel(task,
                                  feature_type="itervar",
                                  loss_type="rank")
    base_model.fit_log(records, plan_size=32)

    upper_model = XGBoostCostModel(task,
                                   feature_type="itervar",
                                   loss_type="rank")
    upper_model.load_basemodel(base_model)

    xs = np.arange(10)
    ys = np.arange(10)

    upper_model.fit(xs, ys, plan_size=32)
Пример #8
0
def test_load_dump():
    task, target = get_sample_task()

    inp = MeasureInput(target, task, task.config_space.get(0))
    result = MeasureResult((2.0, 2.23, 0.23, 0.123, 0.234, 0.123),
                           MeasureErrorNo.NO_ERROR, 2.3, time.time())

    for protocol in ["json", "pickle"]:
        row = encode(inp, result, protocol=protocol)
        inp_2, result_2 = decode(row, protocol=protocol)

        assert measure_str_key(inp) == measure_str_key(inp_2), "%s vs %s" % (
            measure_str_key(inp),
            measure_str_key(inp_2),
        )
        assert result.costs == result_2.costs
        assert result.error_no == result_2.error_no
        assert result.timestamp == result_2.timestamp
Пример #9
0
def test_random_tuner():
    """Test RandomTuner"""

    task, _ = get_sample_task()
    measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(),
                                            runner=DummyRunner())

    tuner = autotvm.tuner.RandomTuner(task, range_idx=(8, 15))
    assert tuner.range_length == 8
    assert tuner.index_offset == 8

    # Tuner should only focus on the specified range and should visit all indices
    tuner.tune(n_trial=8, measure_option=measure_option)
    assert tuner.counter == 8
    assert not tuner.has_next()
    visited = set()
    for idx in tuner.visited:
        assert idx not in visited
        assert 8 <= idx <= 15
Пример #10
0
def test_gridsearch_tuner():
    """Test GridSearchTuner"""

    task, _ = get_sample_task()
    measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(),
                                            runner=DummyRunner())

    # When no range index, range_length should be the length of config space
    tuner = autotvm.tuner.GridSearchTuner(task)
    assert tuner.range_length == len(task.config_space)
    assert tuner.index_offset == 0

    # With range index, range_length should be the length of the specified range
    tuner = autotvm.tuner.GridSearchTuner(task, range_idx=(8, 15))
    assert tuner.range_length == 8
    assert tuner.index_offset == 8

    # Tuner should only focus on the specified range
    tuner.tune(n_trial=8, measure_option=measure_option)
    assert tuner.counter == 8
    assert not tuner.has_next()
def test_fit():
    task, target = get_sample_task()
    records = get_sample_records(n=500)

    base_model = XGBoostCostModel(task,
                                  feature_type="itervar",
                                  loss_type="rank")
    base_model.fit_log(records, plan_size=32)

    upper_model = XGBoostCostModel(task,
                                   feature_type="itervar",
                                   loss_type="rank")
    upper_model.load_basemodel(base_model)

    xs = np.arange(10)
    ys = np.arange(10)

    upper_model.fit(xs, ys, plan_size=32)

    # feature lengths are not guaranteed to always be the same
    upper_model.predict(np.ones(12))
    upper_model.predict(np.ones(8))