Example #1
0
def test_named_timer(profiling_env):
    buf = StringIO()

    prof.start("foo")
    prof.stop("foo", file=buf)

    out = buf.getvalue()
    assert " foo " == re.search(" foo ", out).group(0)
Example #2
0
    def test_named_timer(self):
        buf = StringIO()

        prof.start("foo")
        prof.stop("foo", file=buf)

        out = buf.getvalue()
        self._test(" foo ", re.search(" foo ", out).group(0))
Example #3
0
def eval_classifier_instance(job, db, classifier, instance,
                             err_fn, training):
    # Get relevant values from instance.
    oracle = instance.get_string_value(instance.class_index)
    scenario = instance.get_string_value(0)

    # Get default value.
    try:
        baseline = training.default
    except AttributeError:
        training.default = get_one_r(db, training)
        baseline = training.default

    # Classify instance, and convert to params ID.
    prof.start()
    value = classifier.classify_instance(instance)
    elapsed = prof.elapsed()
    attr = instance.dataset.attribute(instance.class_index)
    predicted = attr.value(value)

    correct = 1 if predicted == oracle else 0
    if correct:
        illegal = 0
        refused = 0
        performance, speedup = perf_fn(db, scenario, predicted,
                                       oracle, baseline)
    else:
        # Determine if predicted workgroup size is valid or not. A
        # valid predicted is one which is within the max_wgsize for
        # that particular instance.
        max_wgsize_attr = instance.dataset.attribute_by_name("kern_max_wg_size")
        max_wgsize_attr_index = max_wgsize_attr.index
        max_wgsize = int(instance.get_value(max_wgsize_attr_index))

        wg_c, wg_r = unhash_params(predicted)

        illegal = 0 if wg_c * wg_r < max_wgsize else 1
        if illegal:
            refused = 0
        else:
            try:
                db.runtime(scenario, predicted)
                refused = 0
            except lab.db.Error:
                refused = 1

        if illegal or refused:
            predicted = err_fn(instance, max_wgsize, wg_c, wg_r, baseline)

        performance, speedup = perf_fn(db, scenario, predicted,
                                       oracle, baseline)

    db.add_classification_result(job, classifier,
                                 err_fn, training, scenario,
                                 oracle, predicted, baseline, correct, illegal,
                                 refused, performance, speedup, elapsed)
Example #4
0
  def get_oclverified(self, s: session_t) -> bool:
    if self.oclverified == None:
      prof.start("dsmith oclgrind")

      testcase = s.query(Testcase) \
        .filter(Testcase.id == self.id) \
        .scalar()

      self.oclverified = oclgrind.verify_dsmith_testcase(testcase)
      s.commit()
      prof.stop("dsmith oclgrind")

    return self.oclverified
Example #5
0
def eval_classifiers(db, classifiers, err_fns, job, training, testing):
    """
    Cross validate a set of classifiers and err_fns.
    """
    for classifier in classifiers:
        meta = Classifier(classifier)
        prof.start("train classifier")
        meta.build_classifier(training)
        prof.stop("train classifier")
        basename = ml.classifier_basename(classifier.classname)

        for err_fn in err_fns:
            io.debug(job, basename, err_fn.func.__name__, testing.num_instances)
            for j,instance in enumerate(testing):
                eval_classifier_instance(job, db, meta, instance, err_fn,
                                         training)
            db.commit()
Example #6
0
  def get_gpuverified(self, s: session_t) -> bool:
    if self.gpuverified == None:
      prof.start("dsmith gpuverify")
      src = s.query(Program.src) \
        .join(Testcase) \
        .filter(Testcase.id == self.id) \
        .scalar()

      try:
        clgen.gpuverify(src, ["--local_size=64", "--num_groups=128"])
        self.gpuverified = True
      except clgen.GPUVerifyException:
        self.gpuverified = False
      s.commit()
      prof.stop("dsmith gpuverify")

    return self.gpuverified
Example #7
0
def eval_linear_models(db, models):
    rows = db.execute(
        "SELECT "
        "    scenario_stats.scenario, "
        "    kernels.max_wg_size, "
        "    scenario_stats.oracle_param "
        "FROM scenarios "
        "LEFT JOIN scenario_stats "
        "  ON scenarios.id=scenario_stats.scenario "
        "LEFT JOIN kernels "
        "  ON scenarios.kernel=kernels.id"
    ).fetchall()

    baseline = db.one_r()[0]

    prof.start("Linear models")
    for scenario,max_wgsize,oracle in rows:
        for model in models:
            wg_c, wg_r = model.predict(scenario, max_wgsize, oracle)

            try:
                prediction = hash_params(wg_c, wg_r)
                illegal = 0 if wg_c * wg_r < max_wgsize else 1
                correct = 1 if prediction == oracle else 0
                db.runtime(scenario, prediction)
                refused = 0

                reshape_param = prediction
                reshape_perf, reshape_speedup = perf_fn(db, scenario,
                                                        prediction, oracle,
                                                        baseline)
                baseline_perf, baseline_speedup = reshape_perf, reshape_speedup
                random_param = prediction
                random_perf, random_speedup = reshape_perf, reshape_speedup
            except lab.db.Error:
                refused = not illegal
                reshape_param = reshape(db, scenario, max_wgsize, wg_c, wg_r)
                reshape_perf, reshape_speedup = perf_fn(db, scenario,
                                                        reshape_param,
                                                        oracle, baseline)

                baseline_perf, baseline_speedup = perf_fn(db, scenario,
                                                          baseline, oracle,
                                                          baseline)

                random_param = random.choice(db.W_legal(scenario))
                random_perf, random_speedup = perf_fn(db, scenario,
                                                      random_param, oracle,
                                                      baseline)

            db.add_model_result(model.id(), "reshape_fn", scenario, oracle,
                                reshape_param, correct, illegal, refused,
                                reshape_perf, reshape_speedup)
            db.add_model_result(model.id(), "default_fn", scenario, oracle,
                                baseline, correct, illegal, refused,
                                baseline_perf, baseline_speedup)
            db.add_model_result(model.id(), "random_fn", scenario, oracle,
                                random_param, correct, illegal, refused,
                                random_perf, random_speedup)
    db.commit()
    prof.stop("Linear models")
Example #8
0
def eval_speedup_regressors(db, classifiers, baseline, rank_fn,
                            table, job, training, testing):
    maxwgsize_index = testing.attribute_by_name("kern_max_wg_size").index
    wg_c_index = testing.attribute_by_name("wg_c").index
    wg_r_index = testing.attribute_by_name("wg_r").index
    insert_str = ("INSERT INTO {} VALUES "
                  "(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)".format(table))

    for classifier in classifiers:
        meta = Classifier(classifier)
        prof.start("train classifier")
        meta.build_classifier(training)
        prof.stop("train classifier")
        basename = ml.classifier_basename(classifier.classname)
        classifier_id = db.classifier_id(classifier)

        io.debug(job, basename, testing.num_instances)
        scenarios = set([instance.get_string_value(0)
                         for instance in testing])
        instances = zip(scenarios, [
            (instance for instance in testing if
             instance.get_string_value(0) == scenario).next()
            for scenario in scenarios
        ])

        for scenario,instance in instances:
            maxwgsize = int(instance.get_value(maxwgsize_index))
            wlegal = space.enumerate_wlegal_params(maxwgsize)
            predictions = []

            elapsed = 0
            for params in wlegal:
                wg_c, wg_r = unhash_params(params)

                instance.set_value(wg_c_index, wg_c)
                instance.set_value(wg_r_index, wg_r)

                # Predict the speedup for a particular set of
                # parameters.
                prof.start()
                predicted = meta.classify_instance(instance)
                elapsed += prof.elapsed()
                predictions.append((params, predicted))

            # Rank the predictions from highest to lowest speedup.
            predictions = sorted(predictions, key=lambda x: x[1], reverse=True)

            row = db.execute(
                "SELECT "
                "    oracle_param,"
                "    ("
                "        SELECT mean FROM runtime_stats "
                "        WHERE scenario=? AND params=?"
                "    ) * 1.0 / oracle_runtime  AS oracle_speedup,"
                "    worst_runtime / oracle_runtime AS actual_range "
                "FROM scenario_stats "
                "WHERE scenario=?",
                (scenario,baseline,scenario)).fetchone()
            actual = row[:2]

            predicted_range = predictions[-1][1] - predictions[0][1]
            actual_range = row[2] - row[1]

            num_attempts = 1
            while True:
                predicted = predictions.pop(0)

                try:
                    speedup = db.speedup(scenario, baseline, predicted[0])
                    perf = db.perf(scenario, predicted[0])

                    try:
                        speedup_he = self.speedup(scenario, HE_PARAM, predicted[0])
                    except:
                        speedup_he = None

                    try:
                        speedup_mo = self.speedup(scenario, MO_PARAM, predicted[0])
                    except:
                        speedup_mo = None

                    db.execute(insert_str,
                               (job, classifier_id, scenario, actual[0],
                                actual[1], predicted[0], predicted[1],
                                actual_range, predicted_range,
                                num_attempts,
                                1 if predicted[0] == actual[0] else 0,
                                perf, speedup, speedup_he, speedup_mo, elapsed))
                    break

                except _db.MissingDataError:
                    num_attempts += 1
                    pass

            db.commit()
Example #9
0
def test_timers(profiling_env):
    x = len(list(prof.timers()))
    prof.start("new timer")
    assert len(list(prof.timers())) == x + 1
    prof.stop("new timer")
    assert len(list(prof.timers())) == x
Example #10
0
def test_stop_twice_error(profiling_env):
    prof.start("foo")
    prof.stop("foo")
    with pytest.raises(KeyError):
        prof.stop("foo")
Example #11
0
 def test_timers(self):
     x = len(list(prof.timers()))
     prof.start("new timer")
     self.assertEqual(len(list(prof.timers())), x + 1)
     prof.stop("new timer")
     self.assertEqual(len(list(prof.timers())), x)
Example #12
0
 def test_stop_twice_error(self):
     prof.start("foo")
     prof.stop("foo")
     with self.assertRaises(KeyError):
         prof.stop("foo")