Esempio n. 1
0
    def __init__(self, path, tables={}, enable_traces=True):
        """
        Arguments:
            path (str): The path to the database file.
            tables (dictionary of {str: tuple of str}, optional): A diction
              of {name: schema} pairs, where a schema is list of tuple pairs,
              of the form: (name, type).
           enable_traces(bool, optional): Enable traces for user
             defined functions and aggregates.
        """
        self.path = fs.path(path)

        # Create directory if needed.
        parent_dir = fs.dirname(path)
        if parent_dir:
            fs.mkdir(parent_dir)

        self.connection = sql.connect(self.path)

        for name,schema in six.iteritems(tables):
            self.create_table(name, schema)

        io.debug("Opened connection to '{0}'".format(self.path))

        # Register exit handler
        atexit.register(self.close)

        # Enable traces for user defined functions and aggregates. See:
        #
        # https://docs.python.org/2/library/sqlite3.html#sqlite3.enable_callback_tracebacks
        if enable_traces:
            sql.enable_callback_tracebacks(True)
Esempio n. 2
0
    def __init__(self, path, basecache=None):
        """
    Create a new JSON cache.

    Optionally supports populating the cache with values of an
    existing cache.

    Arguments:
       basecache (TransientCache, optional): Cache to populate this new
         cache with.
    """

        super(JsonCache, self).__init__()
        self.path = fs.abspath(path)

        if fs.exists(self.path) and fs.read_file(self.path):
            io.debug(("Loading cache '{0}'".format(self.path)))
            with open(self.path) as file:
                self._data = json.load(file)

        if basecache is not None:
            for key, val in basecache.items():
                self._data[key] = val

        # Register exit handler
        atexit.register(self.write)
Esempio n. 3
0
    def __init__(self, path, basecache=None):
        """
        Create a new JSON cache.

        Optionally supports populating the cache with values of an
        existing cache.

        Arguments:
           basecache (TransientCache, optional): Cache to populate this new
             cache with.
        """

        super(JsonCache, self).__init__()
        self.path = fs.abspath(path)

        if fs.exists(self.path):
            io.debug(("Loading cache '{0}'".format(self.path)))
            with open(self.path) as file:
                self._data = json.load(file)

        if basecache is not None:
            for key,val in basecache.items():
                self._data[key] = val

        # Register exit handler
        atexit.register(self.write)
Esempio n. 4
0
    def AddStencilRuntime(self, device_name, device_count,
                          north, south, east, west, data_width,
                          data_height, type_in, type_out, source,
                          max_wg_size, wg_c, wg_r, runtime):
        """
        Add a new stencil runtime.

        Args:

            device_name (str): The name of the execution device.
            device_count (int): The number of execution devices.
            north (int): The stencil shape north direction.
            south (int): The stencil shape south direction.
            east (int): The stencil shape east direction.
            west (int): The stencil shape west direction.
            data_width (int): The number of columns of data.
            data_height (int): The number of rows of data.
            type_in (str): The input data type.
            type_out (str): The output data type.
            source (str): The stencil kernel source code.
            max_wg_size (int): The maximum kernel workgroup size.
            wg_c (int): The workgroup size used (columns).
            wg_r (int): The workgroup size used (rows).
            runtime (double): The measured runtime in milliseconds.

        """
        # Parse arguments.
        device_name = util.parse_str(device_name)
        device_count = int(device_count)
        north = int(north)
        south = int(south)
        east = int(east)
        west = int(west)
        data_width = int(data_width)
        data_height = int(data_height)
        type_in = util.parse_str(type_in)
        type_out = util.parse_str(type_out)
        source = util.parse_str(source)
        max_wg_size = int(max_wg_size)
        wg_c = int(wg_c)
        wg_r = int(wg_r)
        runtime = float(runtime)

        # Lookup IDs
        device = self.db.device_id(device_name, device_count)
        kernel = self.db.kernel_id(north, south, east, west,
                                   max_wg_size, source)
        dataset = self.db.datasets_id(data_width, data_height,
                                      type_in, type_out)
        scenario = self.db.scenario_id(device, kernel, dataset)
        params = self.db.params_id(wg_c, wg_r)

        # Add entry into runtimes table.
        self.db.add_runtime(scenario, params, runtime)
        self.db.commit()

        io.debug(("AddStencilRuntime({scenario}, {params}, {runtime})"
                  .format(scenario=scenario[:8], params=params,
                          runtime=runtime)))
Esempio n. 5
0
 def write(self):
     """
     Write contents of cache to disk.
     """
     io.debug("Storing cache '{0}'".format(self.path))
     with open(self.path, "w") as file:
         json.dump(self._data, file, sort_keys=True, indent=2,
                   separators=(',', ': '))
def get_instcount(opencl_path):
    io.debug("Reading file '%s'" % opencl_path)

    bitcode_path = fs.path("/tmp/temp.bc")

    host.system(clang_cmd(opencl_path, bitcode_path))
    instcount_output = host.check_output(instcount_cmd(bitcode_path))
    counts = parse_instcount(instcount_output)
    return counts
Esempio n. 7
0
    def RequestStencilParams(self, device_name, device_count,
                             north, south, east, west, data_width,
                             data_height, type_in, type_out, source,
                             max_wg_size):
        """
        Request parameter values for a SkelCL stencil operation.

        Determines the parameter values to use for a SkelCL stencil
        operation, using a machine learning classifier to predict the
        optimal parameter values given a set of features determined
        from the arguments.

        Args:

            device_name (str): The name of the execution device.
            device_count (int): The number of execution devices.
            north (int): The stencil shape north direction.
            south (int): The stencil shape south direction.
            east (int): The stencil shape east direction.
            west (int): The stencil shape west direction.
            data_width (int): The number of columns of data.
            data_height (int): The number of rows of data.
            type_in (str): The input data type.
            type_out (str): The output data type.
            max_wg_size (int): The maximum kernel workgroup size.
            source (str): The stencil kernel source code.

        Returns:
            A tuple of work group size values, e.g.

            (16,32)
        """

        start_time = time.time()

        # Parse arguments.
        device_name = util.parse_str(device_name)
        device_count = int(device_count)
        north = int(north)
        south = int(south)
        east = int(east)
        west = int(west)
        data_width = int(data_width)
        data_height = int(data_height)
        source = util.parse_str(source)
        max_wg_size = int(max_wg_size)

        # TODO: Perform feature extraction & classification
        wg = (64, 32)

        end_time = time.time()

        io.debug(("RequestStencilParams() -> "
                  "({c}, {r}) [{t:.3f}s]"
                  .format(c=wg[0], r=wg[1], t=end_time - start_time)))

        return wg
Esempio n. 8
0
def get_instcount(opencl_path):
    io.debug("Reading file '%s'" % opencl_path)

    bitcode_path = fs.path("/tmp/temp.bc")

    host.system(clang_cmd(opencl_path, bitcode_path))
    instcount_output = host.check_output(instcount_cmd(bitcode_path))
    counts = parse_instcount(instcount_output)
    return counts
Esempio n. 9
0
def get_jobs():
    joblist = "jobs/{}.txt".format(system.HOSTNAME)

    io.debug(joblist)

    if fs.isfile(joblist):
        return open(joblist).readlines()
    else:
        return []
Esempio n. 10
0
    def test_dataset_folds(self):
        if not ml.MODULE_SUPPORTED: return
        dataset = ml.Dataset.load(self.arff)
        folds = dataset.folds(nfolds=10)

        self._test(10, len(folds))
        for training,testing in folds:
            io.debug("Training:", training.num_instances,
                     "Testing:", testing.num_instances)
            self._test(True, training.num_instances > testing.num_instances)
Esempio n. 11
0
 def write(self):
     """
 Write contents of cache to disk.
 """
     io.debug("Storing cache '{0}'".format(self.path))
     with open(self.path, "w") as file:
         json.dump(self._data,
                   file,
                   sort_keys=True,
                   indent=2,
                   separators=(',', ': '))
Esempio n. 12
0
def run_eval(db, dataset, eval_fn, eval_type="", nfolds=10):
    # Cross validation using both synthetic and real data.
    folds = dataset.folds(nfolds, seed=SEED)
    print()
    io.info("CROSS VALIDATION")
    io.info("Size of training set:", folds[0][0].num_instances)
    io.info("Size of testing set: ", folds[0][1].num_instances)

    for i,fold in enumerate(folds):
        training, testing = fold
        io.debug("Cross-validating", eval_type, "- fold", i + 1, "of", nfolds)
        eval_fn("xval", training, testing)
Esempio n. 13
0
def run_synthetic_benchmarks(iterations=250):
    """
    Sample the space of synthetic benchmarks.
    """
    allargs = list(experiment.SIMPLEBIG_ARGS)

    random.shuffle(allargs)

    for devargs in experiment.DEVARGS:
        for simplebigargs in allargs:
            args = labm8.flatten(simplebigargs + (devargs,))
            io.debug(" ".join(args))
            cmd_str = " ".join(args)

            sample_simplebig(args, iterations=iterations)
Esempio n. 14
0
def eval_classifiers(db, classifiers, err_fns, job, training, testing):
    """
    Cross validate a set of classifiers and err_fns.
    """
    for classifier in classifiers:
        meta = Classifier(classifier)
        prof.start("train classifier")
        meta.build_classifier(training)
        prof.stop("train classifier")
        basename = ml.classifier_basename(classifier.classname)

        for err_fn in err_fns:
            io.debug(job, basename, err_fn.func.__name__, testing.num_instances)
            for j,instance in enumerate(testing):
                eval_classifier_instance(job, db, meta, instance, err_fn,
                                         training)
            db.commit()
Esempio n. 15
0
 def test_debug(self):
     out = StringIO()
     io.debug("foo", file=out)
     self._test("DEBUG", re.search("DEBUG", out.getvalue()).group(0))
Esempio n. 16
0
def migrate_0_to_1(old):
  """
  SkelCL database migration script.

  Arguments:

      old (SkelCLDatabase): The database to migrate
  """

  def get_source(checksum):
    query = old.execute("SELECT source FROM kernels WHERE checksum = ?",
                        (checksum,))
    return query.fetchone()[0]

  def get_device_attr(device_id, name, count):
    query = old.execute("SELECT * FROM devices WHERE name = ?",
                        (name,))
    attr = query.fetchone()

    # Splice into the new
    newattr = (device_id, attr[0], count) + attr[2:]
    return newattr

  def process_row(tmp, row):
    # Get column values from row.
    host = row[0]
    dev_name = row[1]
    dev_count = row[2]
    kern_checksum = row[3]
    north = row[4]
    south = row[5]
    east = row[6]
    west = row[7]
    data_width = row[8]
    data_height = row[9]
    max_wg_size = row[10]
    wg_c = row[11]
    wg_r = row[12]
    runtime = row[13]
    type_in = "float"
    type_out = "float"

    # Lookup source code.
    source = get_source(kern_checksum)
    user_source = get_user_source(source)

    kernel_id = hash_kernel(north, south, east, west, max_wg_size, source)
    device_id = hash_device(dev_name, dev_count)
    data_id = hash_data(data_width, data_height, type_in, type_out)
    scenario_id = hash_scenario(host, device_id, kernel_id, data_id)
    params_id = hash_workgroup_size(wg_c, wg_r)

    device_attr = get_device_attr(device_id, dev_name, dev_count)

    # Add database entries.
    tmp.execute("INSERT OR IGNORE INTO kernels VALUES (?,?,?,?,?,?,?)",
                (kernel_id, north, south, east, west, max_wg_size, user_source))

    placeholders = ",".join(["?"] * len(device_attr))
    tmp.execute("INSERT OR IGNORE INTO devices VALUES (" + placeholders + ")",
                device_attr)

    tmp.execute("INSERT OR IGNORE INTO data VALUES (?,?,?,?,?)",
                (data_id, data_width, data_height, type_in, type_out))

    tmp.execute("INSERT OR IGNORE INTO params VALUES (?,?,?)",
                (params_id, wg_c, wg_r))

    tmp.execute("INSERT OR IGNORE INTO scenarios VALUES (?,?,?,?,?)",
                (scenario_id, host, device_id, kernel_id, data_id))

    tmp.execute("INSERT INTO runtimes VALUES (?,?,?)",
                (scenario_id, params_id, runtime))

  # Create temporary database
  tmp = _db.Database("/tmp/omnitune.skelcl.migration.db")

  # Clear anything that's already in the database.
  for table in tmp.tables:
    tmp.drop_table(table)

  io.info("Migrating database to version 1.")

  backup_path = old.path + ".0"
  io.info("Creating backup of old database at '{0}'".format(backup_path))
  fs.cp(old.path, backup_path)

  io.debug("Migration: creating tables ...")

  # Create table: kernels
  tmp.create_table("version",
                   (("version", "integer"),))

  # Set database version
  tmp.execute("INSERT INTO version VALUES (1)")

  # Create table: kernels
  tmp.create_table("kernels",
                   (("id", "text primary key"),
                    ("north", "integer"),
                    ("south", "integer"),
                    ("east", "integer"),
                    ("west", "integer"),
                    ("max_wg_size", "integer"),
                    ("source", "text")))

  # Create table: devices
  tmp.create_table("devices",
                   (("id", "text primary key"),
                    ("name", "text"),
                    ("count", "integer"),
                    ("address_bits", "integer"),
                    ("double_fp_config", "integer"),
                    ("endian_little", "integer"),
                    ("execution_capabilities", "integer"),
                    ("extensions", "text"),
                    ("global_mem_cache_size", "integer"),
                    ("global_mem_cache_type", "integer"),
                    ("global_mem_cacheline_size", "integer"),
                    ("global_mem_size", "integer"),
                    ("host_unified_memory", "integer"),
                    ("image2d_max_height", "integer"),
                    ("image2d_max_width", "integer"),
                    ("image3d_max_depth", "integer"),
                    ("image3d_max_height", "integer"),
                    ("image3d_max_width", "integer"),
                    ("image_support", "integer"),
                    ("local_mem_size", "integer"),
                    ("local_mem_type", "integer"),
                    ("max_clock_frequency", "integer"),
                    ("max_compute_units", "integer"),
                    ("max_constant_args", "integer"),
                    ("max_constant_buffer_size", "integer"),
                    ("max_mem_alloc_size", "integer"),
                    ("max_parameter_size", "integer"),
                    ("max_read_image_args", "integer"),
                    ("max_samplers", "integer"),
                    ("max_work_group_size", "integer"),
                    ("max_work_item_dimensions", "integer"),
                    ("max_work_item_sizes_0", "integer"),
                    ("max_work_item_sizes_1", "integer"),
                    ("max_work_item_sizes_2", "integer"),
                    ("max_write_image_args", "integer"),
                    ("mem_base_addr_align", "integer"),
                    ("min_data_type_align_size", "integer"),
                    ("native_vector_width_char", "integer"),
                    ("native_vector_width_double", "integer"),
                    ("native_vector_width_float", "integer"),
                    ("native_vector_width_half", "integer"),
                    ("native_vector_width_int", "integer"),
                    ("native_vector_width_long", "integer"),
                    ("native_vector_width_short", "integer"),
                    ("preferred_vector_width_char", "integer"),
                    ("preferred_vector_width_double", "integer"),
                    ("preferred_vector_width_float", "integer"),
                    ("preferred_vector_width_half", "integer"),
                    ("preferred_vector_width_int", "integer"),
                    ("preferred_vector_width_long", "integer"),
                    ("preferred_vector_width_short", "integer"),
                    ("queue_properties", "integer"),
                    ("single_fp_config", "integer"),
                    ("type", "integer"),
                    ("vendor", "text"),
                    ("vendor_id", "text"),
                    ("version", "text")))

  # Create table: data
  tmp.create_table("data",
                   (("id", "text primary key"),
                    ("width", "integer"),
                    ("height", "integer"),
                    ("tin", "text"),
                    ("tout", "text")))

  # Create table: params
  tmp.create_table("params",
                   (("id", "text primary key"),
                    ("wg_c", "integer"),
                    ("wg_r", "integer")))

  # Create table: scenarios
  tmp.create_table("scenarios",
                   (("id", "text primary key"),
                    ("host", "text"),
                    ("device", "text"),
                    ("kernel", "text"),
                    ("data", "text")))

  # Create table: runtimes
  tmp.create_table("runtimes",
                   (("scenario", "text"),
                    ("params", "text"),
                    ("runtime", "real")))

  i = 0
  for row in old.execute("SELECT * from runtimes"):
    process_row(tmp, row)
    i += 1
    if not i % 2500:
      io.debug("Processed", i, "rows ...")
      if not i % 5000:
        tmp.commit()

  tmp.commit()

  old_path = old.path
  tmp_path = tmp.path

  # Copy migrated database over the original one.
  fs.cp(tmp_path, old_path)
  fs.rm(tmp_path)

  old.close()
  tmp.close()
  io.info("Migration completed.")
Esempio n. 17
0
def migrate_2_to_3(old):
  """
  SkelCL database migration script.

  Arguments:

      old (SkelCLDatabase): The database to migrate
  """

  def _old_kernel2new(old_id):
    kernel = old.execute("SELECT north,south,east,west,max_wg_size,source "
                         "FROM kernels WHERE id=?",
                         (old_id,)).fetchone()
    if kernel:
      return tmp.kernel_id(*kernel)

  def _old_scenario2new(old_id):
    device, old_kernel, dataset = old.execute("SELECT device,kernel,dataset "
                                              "FROM scenarios WHERE id=?",
                                              (old_id,)).fetchone()
    kernel = _old_kernel2new(old_kernel)
    return tmp.scenario_id(device, kernel, dataset)

  # TODO: Un-comment out code!

  # Create temporary database
  fs.rm("/tmp/omnitune.skelcl.migration.db")
  tmp = _db.Database("/tmp/omnitune.skelcl.migration.db")
  tmp.attach(old.path, "rhs")

  io.info("Migrating database to version 3.")

  backup_path = old.path + ".2"
  io.info("Creating backup of old database at '{0}'".format(backup_path))
  fs.cp(old.path, backup_path)

  tmp_path = tmp.path
  old_path = old.path

  tmp.run("create_tables")

  # Populate feature and lookup tables.
  for row in old.execute("SELECT * FROM devices"):
    features = row[1:]
    id = hash_device(*features)
    io.debug("Features extracted for device", id)
    row = (id,) + features
    tmp.execute("INSERT INTO devices VALUES " +
                placeholders(*row), row)

    row = (features[0], features[1], id)
    tmp.execute("INSERT INTO device_lookup VALUES " +
                placeholders(*row), row)
    tmp.commit()

  for row in old.execute("SELECT * FROM kernels"):
    args = row[1:]
    tmp.kernel_id(*args)

  for row in old.execute("SELECT * FROM datasets"):
    features = row[1:]
    id = hash_dataset(*features)
    io.debug("Features extracted for dataset", id)
    row = (id,) + features
    tmp.execute("INSERT INTO datasets VALUES " +
                placeholders(*row), row)

    row = features + (id,)
    tmp.execute("INSERT INTO dataset_lookup VALUES " +
                placeholders(*row), row)
    tmp.commit()

  # Populate kernel_names table.
  for row in old.execute("SELECT * FROM kernel_names"):
    old_id = row[0]
    synthetic, name = row[1:]

    kernel = _old_kernel2new(old_id)
    if kernel:
      row = (kernel, synthetic, name)
      tmp.execute("INSERT OR IGNORE INTO kernel_names VALUES " +
                  placeholders(*row), row)
  tmp.commit()

  # Populate scenarios table.
  for row in old.execute("SELECT * FROM scenarios"):
    old_id, _, device, old_kernel, dataset = row
    kernel = _old_kernel2new(old_kernel)
    new_id = hash_scenario(device, kernel, dataset)

    row = (new_id, device, kernel, dataset)
    tmp.execute("INSERT OR IGNORE INTO scenarios VALUES " +
                placeholders(*row), row)
  tmp.commit()

  # Populate params table.
  tmp.execute("INSERT INTO params SELECT * from rhs.params")
  tmp.commit()

  scenario_replacements = {
    row[0]: _old_scenario2new(row[0])
    for row in old.execute("SELECT * FROM scenarios")
  }

  tmp.execute("INSERT INTO runtimes SELECT * from rhs.runtimes")
  for old_id, new_id in scenario_replacements.iteritems():
    io.info("Runtimes", old_id, "->", new_id)
    tmp.execute("UPDATE runtimes SET scenario=? WHERE scenario=?",
                (new_id, old_id))
  tmp.commit()

  # Sanity checks
  bad = False
  for row in tmp.execute("SELECT DISTINCT scenario FROM runtimes"):
    count = tmp.execute("SELECT Count(*) FROM scenarios WHERE id=?",
                        (row[0],)).fetchone()[0]
    if count != 1:
      io.error("Bad scenario count:", row[0], count)
      bad = True

  if bad:
    io.fatal("Failed sanity check, aborting.")
  else:
    io.info("Passed sanity check.")

  # Copy migrated database over the original one.
  fs.cp(tmp_path, old_path)
  fs.rm(tmp_path)

  old.close()
  tmp.close()
  io.info("Migration completed.")
Esempio n. 18
0
def test_debug():
    out = StringIO()
    io.debug("foo", file=out)
    assert "DEBUG" == re.search("DEBUG", out.getvalue()).group(0)
Esempio n. 19
0
def eval_speedup_regressors(db, classifiers, baseline, rank_fn,
                            table, job, training, testing):
    maxwgsize_index = testing.attribute_by_name("kern_max_wg_size").index
    wg_c_index = testing.attribute_by_name("wg_c").index
    wg_r_index = testing.attribute_by_name("wg_r").index
    insert_str = ("INSERT INTO {} VALUES "
                  "(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)".format(table))

    for classifier in classifiers:
        meta = Classifier(classifier)
        prof.start("train classifier")
        meta.build_classifier(training)
        prof.stop("train classifier")
        basename = ml.classifier_basename(classifier.classname)
        classifier_id = db.classifier_id(classifier)

        io.debug(job, basename, testing.num_instances)
        scenarios = set([instance.get_string_value(0)
                         for instance in testing])
        instances = zip(scenarios, [
            (instance for instance in testing if
             instance.get_string_value(0) == scenario).next()
            for scenario in scenarios
        ])

        for scenario,instance in instances:
            maxwgsize = int(instance.get_value(maxwgsize_index))
            wlegal = space.enumerate_wlegal_params(maxwgsize)
            predictions = []

            elapsed = 0
            for params in wlegal:
                wg_c, wg_r = unhash_params(params)

                instance.set_value(wg_c_index, wg_c)
                instance.set_value(wg_r_index, wg_r)

                # Predict the speedup for a particular set of
                # parameters.
                prof.start()
                predicted = meta.classify_instance(instance)
                elapsed += prof.elapsed()
                predictions.append((params, predicted))

            # Rank the predictions from highest to lowest speedup.
            predictions = sorted(predictions, key=lambda x: x[1], reverse=True)

            row = db.execute(
                "SELECT "
                "    oracle_param,"
                "    ("
                "        SELECT mean FROM runtime_stats "
                "        WHERE scenario=? AND params=?"
                "    ) * 1.0 / oracle_runtime  AS oracle_speedup,"
                "    worst_runtime / oracle_runtime AS actual_range "
                "FROM scenario_stats "
                "WHERE scenario=?",
                (scenario,baseline,scenario)).fetchone()
            actual = row[:2]

            predicted_range = predictions[-1][1] - predictions[0][1]
            actual_range = row[2] - row[1]

            num_attempts = 1
            while True:
                predicted = predictions.pop(0)

                try:
                    speedup = db.speedup(scenario, baseline, predicted[0])
                    perf = db.perf(scenario, predicted[0])

                    try:
                        speedup_he = self.speedup(scenario, HE_PARAM, predicted[0])
                    except:
                        speedup_he = None

                    try:
                        speedup_mo = self.speedup(scenario, MO_PARAM, predicted[0])
                    except:
                        speedup_mo = None

                    db.execute(insert_str,
                               (job, classifier_id, scenario, actual[0],
                                actual[1], predicted[0], predicted[1],
                                actual_range, predicted_range,
                                num_attempts,
                                1 if predicted[0] == actual[0] else 0,
                                perf, speedup, speedup_he, speedup_mo, elapsed))
                    break

                except _db.MissingDataError:
                    num_attempts += 1
                    pass

            db.commit()
Esempio n. 20
0
def migrate_0_to_1(old):
    """
    SkelCL database migration script.

    Arguments:

        old (SkelCLDatabase): The database to migrate
    """
    def get_source(checksum):
        query = old.execute("SELECT source FROM kernels WHERE checksum = ?",
                            (checksum,))
        return query.fetchone()[0]

    def get_device_attr(device_id, name, count):
        query = old.execute("SELECT * FROM devices WHERE name = ?",
                            (name,))
        attr = query.fetchone()

        # Splice into the new
        newattr = (device_id, attr[0], count) + attr[2:]
        return newattr

    def process_row(tmp, row):
        # Get column values from row.
        host = row[0]
        dev_name = row[1]
        dev_count = row[2]
        kern_checksum = row[3]
        north = row[4]
        south = row[5]
        east = row[6]
        west = row[7]
        data_width = row[8]
        data_height = row[9]
        max_wg_size = row[10]
        wg_c = row[11]
        wg_r = row[12]
        runtime = row[13]
        type_in = "float"
        type_out = "float"

        # Lookup source code.
        source = get_source(kern_checksum)
        user_source = get_user_source(source)

        kernel_id = hash_kernel(north, south, east, west, max_wg_size, source)
        device_id = hash_device(dev_name, dev_count)
        data_id = hash_data(data_width, data_height, type_in, type_out)
        scenario_id = hash_scenario(host, device_id, kernel_id, data_id)
        params_id = hash_workgroup_size(wg_c, wg_r)

        device_attr = get_device_attr(device_id, dev_name, dev_count)

        # Add database entries.
        tmp.execute("INSERT OR IGNORE INTO kernels VALUES (?,?,?,?,?,?,?)",
                    (kernel_id,north,south,east,west,max_wg_size,user_source))

        placeholders = ",".join(["?"] * len(device_attr))
        tmp.execute("INSERT OR IGNORE INTO devices VALUES (" + placeholders + ")",
                    device_attr)

        tmp.execute("INSERT OR IGNORE INTO data VALUES (?,?,?,?,?)",
                    (data_id, data_width, data_height, type_in, type_out))

        tmp.execute("INSERT OR IGNORE INTO params VALUES (?,?,?)",
                    (params_id, wg_c, wg_r))

        tmp.execute("INSERT OR IGNORE INTO scenarios VALUES (?,?,?,?,?)",
                    (scenario_id, host, device_id, kernel_id, data_id))

        tmp.execute("INSERT INTO runtimes VALUES (?,?,?)",
                    (scenario_id, params_id, runtime))

    # Create temporary database
    tmp = _db.Database("/tmp/omnitune.skelcl.migration.db")

    # Clear anything that's already in the database.
    for table in tmp.tables:
        tmp.drop_table(table)

    io.info("Migrating database to version 1.")

    backup_path = old.path + ".0"
    io.info("Creating backup of old database at '{0}'".format(backup_path))
    fs.cp(old.path, backup_path)

    io.debug("Migration: creating tables ...")

    # Create table: kernels
    tmp.create_table("version",
                     (("version",                         "integer"),))

    # Set database version
    tmp.execute("INSERT INTO version VALUES (1)")

    # Create table: kernels
    tmp.create_table("kernels",
                     (("id",                              "text primary key"),
                      ("north",                           "integer"),
                      ("south",                           "integer"),
                      ("east",                            "integer"),
                      ("west",                            "integer"),
                      ("max_wg_size",                     "integer"),
                      ("source",                          "text")))

    # Create table: devices
    tmp.create_table("devices",
                     (("id",                              "text primary key"),
                      ("name",                            "text"),
                      ("count",                           "integer"),
                      ("address_bits",                    "integer"),
                      ("double_fp_config",                "integer"),
                      ("endian_little",                   "integer"),
                      ("execution_capabilities",          "integer"),
                      ("extensions",                      "text"),
                      ("global_mem_cache_size",           "integer"),
                      ("global_mem_cache_type",           "integer"),
                      ("global_mem_cacheline_size",       "integer"),
                      ("global_mem_size",                 "integer"),
                      ("host_unified_memory",             "integer"),
                      ("image2d_max_height",              "integer"),
                      ("image2d_max_width",               "integer"),
                      ("image3d_max_depth",               "integer"),
                      ("image3d_max_height",              "integer"),
                      ("image3d_max_width",               "integer"),
                      ("image_support",                   "integer"),
                      ("local_mem_size",                  "integer"),
                      ("local_mem_type",                  "integer"),
                      ("max_clock_frequency",             "integer"),
                      ("max_compute_units",               "integer"),
                      ("max_constant_args",               "integer"),
                      ("max_constant_buffer_size",        "integer"),
                      ("max_mem_alloc_size",              "integer"),
                      ("max_parameter_size",              "integer"),
                      ("max_read_image_args",             "integer"),
                      ("max_samplers",                    "integer"),
                      ("max_work_group_size",             "integer"),
                      ("max_work_item_dimensions",        "integer"),
                      ("max_work_item_sizes_0",           "integer"),
                      ("max_work_item_sizes_1",           "integer"),
                      ("max_work_item_sizes_2",           "integer"),
                      ("max_write_image_args",            "integer"),
                      ("mem_base_addr_align",             "integer"),
                      ("min_data_type_align_size",        "integer"),
                      ("native_vector_width_char",        "integer"),
                      ("native_vector_width_double",      "integer"),
                      ("native_vector_width_float",       "integer"),
                      ("native_vector_width_half",        "integer"),
                      ("native_vector_width_int",         "integer"),
                      ("native_vector_width_long",        "integer"),
                      ("native_vector_width_short",       "integer"),
                      ("preferred_vector_width_char",     "integer"),
                      ("preferred_vector_width_double",   "integer"),
                      ("preferred_vector_width_float",    "integer"),
                      ("preferred_vector_width_half",     "integer"),
                      ("preferred_vector_width_int",      "integer"),
                      ("preferred_vector_width_long",     "integer"),
                      ("preferred_vector_width_short",    "integer"),
                      ("queue_properties",                "integer"),
                      ("single_fp_config",                "integer"),
                      ("type",                            "integer"),
                      ("vendor",                          "text"),
                      ("vendor_id",                       "text"),
                      ("version",                         "text")))

    # Create table: data
    tmp.create_table("data",
                     (("id",                              "text primary key"),
                      ("width",                           "integer"),
                      ("height",                          "integer"),
                      ("tin",                             "text"),
                      ("tout",                            "text")))

    # Create table: params
    tmp.create_table("params",
                     (("id",                              "text primary key"),
                      ("wg_c",                            "integer"),
                      ("wg_r",                            "integer")))

    # Create table: scenarios
    tmp.create_table("scenarios",
                     (("id",                              "text primary key"),
                      ("host",                            "text"),
                      ("device",                          "text"),
                      ("kernel",                          "text"),
                      ("data",                            "text")))

    # Create table: runtimes
    tmp.create_table("runtimes",
                     (("scenario",                        "text"),
                      ("params",                          "text"),
                      ("runtime",                         "real")))

    i = 0
    for row in old.execute("SELECT * from runtimes"):
        process_row(tmp, row)
        i += 1
        if not i % 2500:
            io.debug("Processed", i, "rows ...")
            if not i % 5000:
                tmp.commit()

    tmp.commit()

    old_path = old.path
    tmp_path = tmp.path

    # Copy migrated database over the original one.
    fs.cp(tmp_path, old_path)
    fs.rm(tmp_path)

    old.close()
    tmp.close()
    io.info("Migration completed.")
Esempio n. 21
0
def migrate_2_to_3(old):
    """
    SkelCL database migration script.

    Arguments:

        old (SkelCLDatabase): The database to migrate
    """
    def _old_kernel2new(old_id):
        kernel = old.execute("SELECT north,south,east,west,max_wg_size,source "
                             "FROM kernels WHERE id=?",
                             (old_id,)).fetchone()
        if kernel:
            return tmp.kernel_id(*kernel)

    def _old_scenario2new(old_id):
        device, old_kernel, dataset = old.execute("SELECT device,kernel,dataset "
                                                  "FROM scenarios WHERE id=?",
                                                  (old_id,)).fetchone()
        kernel = _old_kernel2new(old_kernel)
        return tmp.scenario_id(device, kernel, dataset)

    # TODO: Un-comment out code!

    # Create temporary database
    fs.rm("/tmp/omnitune.skelcl.migration.db")
    tmp = _db.Database("/tmp/omnitune.skelcl.migration.db")
    tmp.attach(old.path, "rhs")

    io.info("Migrating database to version 3.")

    backup_path = old.path + ".2"
    io.info("Creating backup of old database at '{0}'".format(backup_path))
    fs.cp(old.path, backup_path)

    tmp_path = tmp.path
    old_path = old.path

    tmp.run("create_tables")

    # Populate feature and lookup tables.
    for row in old.execute("SELECT * FROM devices"):
        features = row[1:]
        id = hash_device(*features)
        io.debug("Features extracted for device", id)
        row = (id,) + features
        tmp.execute("INSERT INTO devices VALUES " +
                    placeholders(*row), row)

        row = (features[0], features[1], id)
        tmp.execute("INSERT INTO device_lookup VALUES " +
                    placeholders(*row), row)
        tmp.commit()

    for row in old.execute("SELECT * FROM kernels"):
        args = row[1:]
        tmp.kernel_id(*args)

    for row in old.execute("SELECT * FROM datasets"):
        features = row[1:]
        id = hash_dataset(*features)
        io.debug("Features extracted for dataset", id)
        row = (id,) + features
        tmp.execute("INSERT INTO datasets VALUES " +
                    placeholders(*row), row)

        row = features + (id,)
        tmp.execute("INSERT INTO dataset_lookup VALUES " +
                    placeholders(*row), row)
        tmp.commit()

    # Populate kernel_names table.
    for row in old.execute("SELECT * FROM kernel_names"):
        old_id = row[0]
        synthetic, name = row[1:]

        kernel = _old_kernel2new(old_id)
        if kernel:
            row = (kernel, synthetic, name)
            tmp.execute("INSERT OR IGNORE INTO kernel_names VALUES " +
                        placeholders(*row), row)
    tmp.commit()

    # Populate scenarios table.
    for row in old.execute("SELECT * FROM scenarios"):
        old_id, _, device, old_kernel, dataset = row
        kernel = _old_kernel2new(old_kernel)
        new_id = hash_scenario(device, kernel, dataset)

        row = (new_id, device, kernel, dataset)
        tmp.execute("INSERT OR IGNORE INTO scenarios VALUES " +
                    placeholders(*row), row)
    tmp.commit()

    # Populate params table.
    tmp.execute("INSERT INTO params SELECT * from rhs.params")
    tmp.commit()

    scenario_replacements = {
        row[0]: _old_scenario2new(row[0])
        for row in old.execute("SELECT * FROM scenarios")
    }

    tmp.execute("INSERT INTO runtimes SELECT * from rhs.runtimes")
    for old_id, new_id in scenario_replacements.iteritems():
        io.info("Runtimes", old_id, "->", new_id)
        tmp.execute("UPDATE runtimes SET scenario=? WHERE scenario=?",
                    (new_id, old_id))
    tmp.commit()

    # Sanity checks
    bad = False
    for row in tmp.execute("SELECT DISTINCT scenario FROM runtimes"):
        count = tmp.execute("SELECT Count(*) FROM scenarios WHERE id=?",
                            (row[0],)).fetchone()[0]
        if count != 1:
            io.error("Bad scenario count:", row[0], count)
            bad = True

    if bad:
        io.fatal("Failed sanity check, aborting.")
    else:
        io.info("Passed sanity check.")

    # Copy migrated database over the original one.
    fs.cp(tmp_path, old_path)
    fs.rm(tmp_path)

    old.close()
    tmp.close()
    io.info("Migration completed.")