def main(): import sys log.init(verbose=True) m = model.from_json(clgen.load_json_file(sys.argv[1])) s = sampler.from_json({ "kernels": { "args": [ "__global float*", "__global float*", "__global float*", "const int" ], "max_length": 5000, "temperature": 1 }, "sampler": { "batch_size": 1000, "max_batches": 1, "static_checker": False, "dynamic_checker": False } }) print("Corpus size:", m.corpus.size) print("Vocab size: ", m.corpus.vocab_size) print() clgen.platform_info() print() outpath = "./benchmark-" + fs.basename(sys.argv[1]) info = evaluate(m, s) clgen.write_file(outpath, clgen.format_json(info))
def main(): log.init(verbose=True) m = model.from_json(clgen.load_json_file(sys.argv[1])) c = corpus.Corpus.from_json({"path": "~/data/github"}) print("CLgen: ", clgen.version()) print("Corpus size:", c.size) print("Vocab size: ", c.vocab_size) m.train() p, _ = corpus.most_common_prototypes(c, 20) for i, row in enumerate(p): outpath = "./inference-p" + str(i + 1) + "-" + fs.basename(sys.argv[1]) if fs.exists(outpath): continue _, prototype = row argspec = [' '.join(x.split()[:-1]) for x in prototype.split(',')] print("argspec", ','.join([str(x) for x in argspec])) s = sampler.from_json({ "kernels": { "args": argspec, "max_length": 5000 }, "sampler": { "batch_size": 2000, "max_batches": 1, "static_checker": False, "dynamic_checker": False } }) info = evaluate(m, s) clgen.write_file(outpath, clgen.format_json(info))
def to_dist(self, distpath: str, author: str = None) -> str: """ Create a dist file. Arguments: distpath (str): Path to dist file. author (str, optional): Author name. Returns: str: Path to generated distfile. """ outpath = fs.abspath(distpath) + ".tar.bz2" if fs.exists(outpath): raise DistError("file {} exists".format(outpath)) meta = self.meta if author is not None: meta["author"] = author log.debug(clgen.format_json(meta)) try: tar = tarfile.open(outpath, 'w:bz2') # write meta metapath = mktemp(prefix="clgen-", suffix=".json") clgen.write_file(metapath, clgen.format_json(meta)) log.debug("metafile:", metapath) # create tarball tar.add(metapath, arcname="meta.json") # pack contents: for path in meta["contents"]: abspath = fs.path(cache.ROOT, path) log.verbose("packing", abspath) tar.add(abspath, arcname=fs.path("contents", path)) # tidy up fs.rm(metapath) tar.close() except Exception as e: tar.close() fs.rm(metapath) fs.rm(outpath) raise e return outpath
def main(): log.init(verbose=True) m = model.from_json(clgen.load_json_file(sys.argv[1])) c = corpus.Corpus.from_json({"path": "~/data/github"}) print("CLgen: ", clgen.version()) print("Corpus size:", c.size) print("Vocab size: ", c.vocab_size) m.train() p, _ = corpus.most_common_prototypes(c, 20) for i, row in enumerate(p): outpath = "./inference-p" + str(i + 1) + "-" + fs.basename(sys.argv[1]) if fs.exists(outpath): print("skipped result for", outpath) continue else: print("starting result for", outpath) _, prototype = row argspec = [' '.join(x.split()[:-1]) for x in prototype.split(',')] print("argspec", ','.join([str(x) for x in argspec])) s = sampler.from_json({ "kernels": { "args": argspec, "max_length": 5000 }, "sampler": { "batch_size": 2000, "max_batches": 1, "static_checker": False, "dynamic_checker": False } }) info = evaluate(m, s) clgen.write_file(outpath, clgen.format_json(info))
def write_log(log, logpath): clgen.write_file(logpath, clgen.format_json(log))
def __repr__(self) -> str: """ String representation. """ return "{hash}: {data}".format(hash=self.hash, data=clgen.format_json(self.opts))