示例#1
0
def unpack_directory_if_needed(path: str) -> str:
    """
    If path is a tarball, unpack it. If path doesn't exist but there is a
    tarball with the same name, unpack it.

    Parameters
    ----------
    path : str
        Path to directory or tarball.

    Returns
    -------
    str
        Path to directory.

    Raises
    ------
    clgen.InternalError
        If unable to extract archive.
    """
    if fs.isdir(path):
        return path

    if fs.isfile(path) and path.endswith(".tar.bz2"):
        log.info("unpacking '{}'".format(path))
        tar.unpack_archive(path)
        return re.sub(r'.tar.bz2$', '', path)

    if fs.isfile(path + ".tar.bz2"):
        log.info("unpacking '{}'".format(path + ".tar.bz2"))
        tar.unpack_archive(path + ".tar.bz2")
        return path

    raise clgen.InternalError("cannot interpret archive '{path}'"
                              .format(**vars()))
示例#2
0
def preprocess_inplace(paths: str,
                       max_num_workers: int = cpu_count(),
                       attempt: int = 1) -> None:
    """
    Preprocess a list of files in place.

    Arguments:
        paths (str[]): List of paths.
        max_num_workers (int, optional): Number of processes to spawn.
    """
    if attempt >= MAX_OS_RETRIES:
        raise clgen.InternalError("Failed to process files")

    num_workers = min(len(paths), max_num_workers)

    try:
        log.info('spawned', num_workers, 'worker threads to process',
                 len(paths), 'files ...')
        with clgen.terminating(Pool(num_workers)) as pool:
            pool.map(_preprocess_inplace_worker, paths)
    except OSError as e:
        log.error(e)

        # Try again with fewer threads.
        # See: https://github.com/ChrisCummins/clgen/issues/64
        max_num_workers = max(int(max_num_workers / 2), 1)
        preprocess_inplace(paths,
                           max_num_workers=max_num_workers,
                           attempt=attempt + 1)
示例#3
0
    def cache(self, model: clgen.Model):
        """
        Return sampler cache.

        Parameters
        ----------
        model : clgen.Model
            CLgen model.

        Returns
        -------
        labm8
            FSCache: Cache.
        """
        sampler_model_hash = crypto.sha1_str(self.hash + model.hash)

        cache = clgen.mkcache("sampler", sampler_model_hash)

        # validate metadata against cache
        self.stats = {
            "time": 0,
            "progress": 0
        }
        meta = deepcopy(self.to_json())
        if cache.get("META"):
            cached_meta = jsonutil.read_file(cache["META"])

            if "stats" in cached_meta:
                self.stats = cached_meta["stats"]
                del cached_meta["stats"]

            if "created" in cached_meta["sampler"]:
                del cached_meta["sampler"]["created"]
            del meta["sampler"]["created"]

            if "min_samples" in cached_meta["sampler"]:
                del cached_meta["sampler"]["min_samples"]
            del meta["sampler"]["min_samples"]

            if "min_kernels" in cached_meta["sampler"]:
                del cached_meta["sampler"]["min_kernels"]
            del meta["sampler"]["min_kernels"]

            if meta != cached_meta:
                raise clgen.InternalError("sampler metadata mismatch")
        else:
            self._flush_meta(cache)

        return cache
示例#4
0
def gpuverify(src: str,
              args: list,
              id: str = 'anon',
              timeout: int = 60) -> str:
    """
    Run GPUverify over kernel.

    Parameters
    ----------
    src : str
        OpenCL source.
    id : str, optional
        OpenCL source name.

    Returns
    -------
    str
        OpenCL source.

    Raises
    ------
    GPUVerifyException
        If GPUverify finds a bug.
    InternalError
        If GPUverify fails.
    """
    # FIXME: GPUVerify support on macOS.
    from labm8 import system
    if not system.is_linux():
        raise clgen.InternalError("GPUVerify only supported on Linux!")

    # GPUverify can't read from stdin.
    with NamedTemporaryFile('w', suffix='.cl') as tmp:
        tmp.write(src)
        tmp.flush()
        cmd = ['timeout', '-s9',
               str(timeout), native.GPUVERIFY, tmp.name] + args

        process = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
        stdout, stderr = process.communicate()

    if process.returncode == -9:  # timeout signal
        raise GPUVerifyTimeoutException(
            f"GPUveryify failed to complete with {timeout} seconds")
    elif process.returncode != 0:
        raise GPUVerifyException(stderr.decode('utf-8'))

    return src
示例#5
0
 def weighted_pick(weights, temperature, vocab_size):
     """
     requires that all probabilities are >= 0, i.e.:
       assert all(x >= 0 for x in weights)
     See: https://github.com/ChrisCummins/clgen/issues/120
     """
     #assert(all(x >= 0 for x in weights))
     weights += weights.min()
     t = np.cumsum(weights)
     s = np.sum(weights)
     for _ in range(10):
         pick = int(np.searchsorted(t, np.random.rand(1) * s))
         if pick < vocab_size:
             return pick
     raise clgen.InternalError(
         "Weighted pick kept selecting invalid character")
示例#6
0
def preprocess_inplace(paths: List[str],
                       max_num_workers: int = cpu_count(),
                       max_attempts: int = 100,
                       attempt: int = 1) -> None:
    """
    Preprocess a list of files in place.

    Parameters
    ----------
    paths : List[str]
        List of paths.
    max_num_workers : int, optional
        Number of processes to spawn.
    max_attempts : int, optional
        In case of an OSError or TimeoutError, this number of attempts will be
        made.
    """
    if attempt > max_attempts:
        raise clgen.InternalError(
            f"Failed to process files after {max_attempts} attempts")
    elif attempt > 1:
        log.warning("preprocess attempt #.", attempt)

    num_workers = min(len(paths), max_num_workers)

    try:
        log.info('spawned', num_workers, 'worker threads to process',
                 len(paths), 'files ...')
        with clgen.terminating(Pool(num_workers)) as pool:
            pool.map(_preprocess_inplace_worker, paths)
    except (OSError, TimeoutError) as e:
        log.error(e)

        # Try again with fewer threads.
        # See: https://github.com/ChrisCummins/clgen/issues/64
        max_num_workers = max(int(max_num_workers / 2), 1)
        preprocess_inplace(paths,
                           max_num_workers=max_num_workers,
                           attempt=attempt + 1,
                           max_attempts=max_attempts)
示例#7
0
    def __init__(self, contentid: str, path: str=None, **opts):
        """
        Instantiate a corpus.

        If this is a new corpus, a number of files will be created, which may
        take some time.

        Parameters
        ----------
        contentid : str
            ID of corpus content.
        path : str, optional
            Path to corpus.
        **opts
            Keyword options.
        """
        # Validate options
        for key in opts.keys():
            if key not in DEFAULT_CORPUS_OPTS:
                raise clgen.UserError(
                    "Unsupported corpus option '{}'. Valid keys: {}".format(
                        key, ','.join(sorted(DEFAULT_CORPUS_OPTS.keys()))))

        self.opts = deepcopy(DEFAULT_CORPUS_OPTS)
        types.update(self.opts, opts)
        self.opts["id"] = contentid

        # check that contentid exists
        self.language = clgen.Language.from_str(opts.get("language"))
        if (path is None and
            not fs.isdir(clgen.cachepath("contentfiles", f"{self.language}-{contentid}"))):
            raise clgen.UserError("corpus {self.language}-{contentid} not found"
                                  .format(**vars()))

        self.contentid = contentid
        self.contentcache = clgen.mkcache("contentfiles", f"{self.language}-{contentid}")
        self.kernels_db = self.contentcache.keypath('kernels.db')

        self.hash = self._hash(contentid, self.opts)
        self.cache = clgen.mkcache("corpus", f"{self.language}-{self.hash}")

        log.debug("contentfiles {self.contentid}".format(**vars()))
        log.debug("corpus {hash}".format(hash=self.hash))

        # validate metadata against cache
        self.stats = {
            "preprocess_time": 0
        }
        meta = deepcopy(self.to_json())
        if self.cache.get("META"):
            cached_meta = jsonutil.read_file(self.cache["META"])
            self.stats = cached_meta["stats"]  # restore stats

            if "created" in cached_meta:
                del cached_meta["created"]
            del meta["created"]

            if "stats" in cached_meta:
                del cached_meta["stats"]
            del meta["stats"]

            if meta != cached_meta:
                raise clgen.InternalError("corpus metadata mismatch")
        else:
            self._flush_meta()

        with self.lock.acquire(replace_stale=True):
            self._create_files(path)
示例#8
0
def _preprocess_db(db_path: str,
                   max_num_workers: int = cpu_count(),
                   max_attempts: int = 100,
                   attempt: int = 1,
                   **preprocess_opts) -> None:
    """
    Preprocess OpenCL dataset.

    Parameters
    ----------
    db_path : str
        OpenCL kernels dataset.
    max_num_workers : int, optional
        Number of processes to spawn.
    max_attempts : int, optional
        In case of an OSError or TimeoutError, this number of attempts will be
        made.
    """
    if attempt > max_attempts:
        raise clgen.InternalError(
            f"failed to preprocess files after {max_attempts} attempts")

    log.verbose("determining jobs")

    contentfiles = set(dbutil.kernel_ids(db_path, "ContentFiles"))
    preprocessedfiles = set(dbutil.kernel_ids(db_path, "PreprocessedFiles"))

    ncontentfiles = len(contentfiles)
    npreprocessedfiles = len(preprocessedfiles)

    todo = contentfiles - preprocessedfiles
    ntodo = len(todo)

    # check we have something to do
    if not ntodo:
        return

    todo_ratio = ntodo / ncontentfiles

    log.info("{ntodo} ({todo_ratio:.1%}) samples need preprocessing".format(
        **vars()))

    log.verbose("creating jobs")

    # Determine if we need to inline kernels when creating jobs
    db = sqlite3.connect(db_path)
    c = db.cursor()
    c.execute(
        "SELECT name FROM sqlite_master WHERE type='table' AND name='ContentMeta';"
    )
    meta_table = c.fetchone()
    c.close()
    db.close()
    if meta_table:
        get_kernel = lambda kid: dbutil.get_inlined_kernel(
            db_path, kid, lang=preprocess_opts["lang"])
    else:
        get_kernel = lambda kid: dbutil.get_kernel(
            db_path, kid, table="ContentFiles")

    # create jobs
    jobs = [{
        "id": kid,
        "src": get_kernel(kid),
        "preprocess_opts": preprocess_opts,
    } for kid in todo]

    random.shuffle(jobs)

    # split size
    worker_njobs = math.ceil(ntodo / max_num_workers)

    # producer-consumer queue
    queue = Queue(maxsize=128)

    log.verbose(f"assigning {ntodo} jobs to {max_num_workers} threads")

    try:
        # our worker threads. these busy little bees will do the heavy lifting
        # of preprocessing the contentfiles, pushing their results onto
        # the queue
        producers = [
            PreprocessWorker(jobs[i:i + worker_njobs], queue)
            for i in range(0, ntodo, worker_njobs)
        ]

        # fly, my pretties, fly!
        for producer in producers:
            producer.start()

        # consume the results from the worker threads from the main thread
        for i in progressbar.ProgressBar()(range(ntodo)):
            # pull a fresh result from the queue (block if necessary)
            try:
                result = queue.get(timeout=90)
            except QueueEmpty as e:
                raise TimeoutError('failed to fetch result after 90 seconds. '
                                   'something went wrong') from e

            # insert result into database
            db = dbutil.connect(db_path)
            c = db.cursor()
            c.execute("INSERT INTO PreprocessedFiles VALUES(?,?,?)",
                      (result["id"], result["status"], result["contents"]))
            c.close()
            db.commit()
            db.close()

        for producer in producers:
            producer.join()

    except (OSError, TimeoutError) as e:
        log.error(e)

        if attempt > 2 and not i:
            log.warning("no progress has been made since previous attempt. "
                        "I'm not going to try another attempt.")
            return

        # Try again with fewer threads.
        # See: https://github.com/ChrisCummins/clgen/issues/64
        max_num_workers = max(int(max_num_workers / 2), 1)
        _preprocess_db(db_path,
                       max_num_workers=max_num_workers,
                       attempt=attempt + 1,
                       max_attempts=max_attempts,
                       **preprocess_opts)
示例#9
0
    def __init__(self, corpus: clgen.Corpus, **opts):
        """
        Instantiate model.

        Parameters
        ----------
        corpus : clgen.Corpus
            Corpus instance.
        **opts
            Training options.
        """
        assert(isinstance(corpus, clgen.Corpus))

        def _hash(corpus: clgen.Corpus, opts: dict) -> str:
            """ compute model hash """
            hashopts = deepcopy(opts)
            del hashopts["created"]
            del hashopts["train_opts"]["epochs"]
            return crypto.sha1_list(corpus.hash, *types.dict_values(hashopts))

        # Validate options
        for key in opts:
            if key not in DEFAULT_MODEL_OPTS:
                raise clgen.UserError(
                    "Unsupported model option '{}'. Valid keys: {}".format(
                        key, ','.join(sorted(DEFAULT_MODEL_OPTS.keys()))))

        # set properties
        self.opts = types.update(deepcopy(DEFAULT_MODEL_OPTS), opts)
        self.corpus = corpus
        self.hash = _hash(self.corpus, self.opts)
        self.cache = clgen.mkcache("model", f"{corpus.language}-{self.hash}")

        log.debug("model", self.hash)

        # validate metadata against cache, and restore stats
        self.stats = {
            "epoch_times": [],
            "epoch_costs": [],
            "epoch_batches": []
        }
        meta = deepcopy(self.to_json())
        if self.cache.get("META"):
            cached_meta = jsonutil.read_file(self.cache["META"])
            self.stats = cached_meta["stats"]  # restore stats

            if "created" in cached_meta:
                del cached_meta["created"]
            del meta["created"]

            if "created" in cached_meta["corpus"]:
                del cached_meta["corpus"]["created"]
            del meta["corpus"]["created"]

            if "stats" in cached_meta:
                del cached_meta["stats"]
            del meta["stats"]

            if "epochs" in cached_meta["train_opts"]:
                del cached_meta["train_opts"]["epochs"]
            del meta["train_opts"]["epochs"]

            if meta != cached_meta:
                log.error("Computed META:", jsonutil.format_json(meta))
                raise clgen.InternalError(
                    "metadata mismatch in model %s" % self.cache["META"])
        else:
            self._flush_meta()
示例#10
0
def preprocess_contentfiles(db_path: str,
                            max_num_workers: int = cpu_count(),
                            attempt: int = 1) -> None:
    """
    Preprocess OpenCL dataset.

    Arguments:
        db_path (str): OpenCL kernels dataset.
        max_num_workers (int, optional): Number of processes to spawn.
    """
    def _finalize(db_path, cache):
        """Tidy up after worker threads finish"""
        log.debug("worker finalize")

        db = dbutil.connect(db_path)
        c = db.cursor()

        # import results from worker threads
        for outpath in fs.ls(cache.path, abspaths=True):
            with open(outpath) as infile:
                for line in infile:
                    c.execute(
                        'INSERT OR REPLACE INTO PreprocessedFiles '
                        'VALUES(?,?,?)', json.loads(line))

        # write changes to database and remove cache
        db.commit()
        db.close()
        cache.empty()

    if attempt >= MAX_OS_RETRIES:
        raise clgen.InternalError("failed to preprocess files")

    num_contentfiles = dbutil.num_rows_in(db_path, 'ContentFiles')
    num_preprocessedfiles = dbutil.num_rows_in(db_path, 'PreprocessedFiles')
    log.info("{n} ({r:.1%}) files need preprocessing".format(
        n=num_contentfiles - num_preprocessedfiles,
        r=(num_contentfiles - num_preprocessedfiles) / num_contentfiles))

    # split into mulitple jobs of a maximum size
    jobsize = min(512, num_contentfiles)
    numjobs = math.ceil(num_contentfiles / jobsize)
    for j, offset in enumerate(range(0, num_contentfiles, jobsize)):
        num_preprocessedfiles = dbutil.num_rows_in(db_path,
                                                   'PreprocessedFiles')
        num_workers = min(num_contentfiles, max_num_workers)
        files_per_worker = math.ceil(jobsize / num_workers)

        # temporary cache used for worker thread results
        cache = Cache("{pid}.preprocess".format(pid=os.getpid()))
        # each worker thread receives a range of database indices to preprocess,
        # and a JSON file to write results into
        jobs = [{
            "db_in":
            db_path,
            "db_index_range":
            (offset + i * files_per_worker,
             offset + i * files_per_worker + files_per_worker),
            "json_out":
            fs.path(cache.path, "{i}.json".format(i=i))
        } for i in range(num_workers)]

        # spool up worker threads then finalize
        log.info('job {j} of {numjobs}: spawning {num_workers} worker threads '
                 'to process {jobsize} files ...'.format(**vars()))
        try:
            with clgen.terminating(Pool(num_workers)) as pool:
                pool.map(_preprocess_db_worker, jobs)
        except OSError as e:
            _finalize(db_path, cache)
            log.error(e)

            # Try again with fewer threads.
            # See: https://github.com/ChrisCummins/clgen/issues/64
            max_num_workers = max(int(max_num_workers / 2), 1)
            preprocess_contentfiles(db_path,
                                    max_num_workers=max_num_workers,
                                    attempt=attempt + 1)
        except Exception as e:
            _finalize(db_path, cache)
            raise e
        _finalize(db_path, cache)