def _main(model_file: TextIO, sampler_file: TextIO) -> None: model_json = jsonutil.loads(model_file.read()) model = clgen.Model.from_json(model_json) sampler_json = jsonutil.loads(sampler_file.read()) sampler = clgen.Sampler.from_json(sampler_json) model.train() sampler.sample(model)
def _main(model_file: TextIO, sampler_file: TextIO) -> None: model_json = jsonutil.loads(model_file.read()) model = clgen.Model.from_json(model_json) caches = [model.corpus.cache, model.cache] if sampler_file: sampler_json = jsonutil.loads(sampler_file.read()) sampler = clgen.Sampler.from_json(sampler_json) caches.append(sampler.cache(model)) files = sorted( types.flatten(c.ls(abspaths=True, recursive=True) for c in caches)) print('\n'.join(files))
def test_loads(): a_str = """{ "a": 1, // this has comments "b": [1, 2, 3] } # end comment // begin with comment """ a = jsonutil.loads(a_str) assert a == {'a': 1, 'b': [1, 2, 3]}
def test_loads(self): a_str = """{ "a": 1, // this has comments "b": [1, 2, 3] } # end comment // begin with comment """ a = jsonutil.loads(a_str) self.assertEqual(a["a"], 1) self.assertEqual(a["b"], [1, 2, 3]) self.assertFalse("c" in a)
def test_loads_malformed(self): a_str = """bad json {asd,,}""" with self.assertRaises(ValueError): jsonutil.loads(a_str)
def main(self, args: List[str]=sys.argv[1:]): """ A deep learning program generator for the OpenCL programming language. The core operations of CLgen are: 1. OpenCL files are collected from a model specification file. 2. These files are preprocessed into an OpenCL kernel database. 3. A training corpus is generated from the input files. 4. A machine learning model is trained on the corpus of files. 5. The trained model is sampled for new kernels. 6. The samples are tested for compilability. This program automates the execution of all six stages of the pipeline. The pipeline can be interrupted and resumed at any time. Results are cached across runs. If installed with CUDA support, NVIDIA GPUs will be used to improve performance where possible. """ parser = ArgumentParser( prog="clgen", description=inspect.getdoc(self), epilog=""" For information about a specific command, run `clgen <command> --help`. """ + __help_epilog__, formatter_class=RawDescriptionHelpFormatter) # TODO: # parser.add_argument( # "-l", "--lang", metavar="<language>", # help="programming language (default: OpenCL)") parser.add_argument( "-v", "--verbose", action="store_true", help="increase output verbosity") parser.add_argument( "--version", action="store_true", help="show version information and exit") parser.add_argument( "--debug", action="store_true", help="in case of error, print debugging information") parser.add_argument( "--profile", action="store_true", help=("enable internal API profiling. When combined with --verbose, " "prints a complete profiling trace")) parser.add_argument( "--corpus-dir", metavar="<corpus>", type=FileType("r"), help="print path to corpus cache") parser.add_argument( "--model-dir", metavar="<model>", type=FileType("r"), help="print path to model cache") parser.add_argument( "--sampler-dir", metavar=("<model>", "<sampler>"), type=FileType("r"), nargs=2, help="print path to sampler cache") subparser = parser.add_subparsers(title="available commands") subparsers = [ _register_test_parser, _register_train_parser, _register_sample_parser, _register_db_parser, _register_fetch_parser, _register_ls_parser, _register_preprocess_parser, _register_features_parser, _register_atomize_parser, _register_cache_parser, ] for register_fn in subparsers: register_fn(subparser) args = parser.parse_args(args) # set log level log.init(args.verbose) # set debug option if args.debug: os.environ["DEBUG"] = "1" # set profile option if args.profile: prof.enable() # options whch override the normal argument parsing process. if args.version: version = clgen.version() print(f"clgen {version} made with \033[1;31m♥\033[0;0m by " "Chris Cummins <*****@*****.**>.") elif args.corpus_dir: model = clgen.Model.from_json(jsonutil.loads(args.corpus_dir.read())) print(model.corpus.cache.path) elif args.model_dir: model = clgen.Model.from_json(jsonutil.loads(args.model_dir.read())) print(model.cache.path) elif args.sampler_dir: model = clgen.Model.from_json(jsonutil.loads(args.sampler_dir[0].read())) sampler = clgen.Sampler.from_json(jsonutil.loads(args.sampler_dir[1].read())) print(sampler.cache(model).path) else: # strip the arguments from the top-level parser dispatch_func = args.dispatch_func opts = vars(args) del opts["version"] del opts["verbose"] del opts["debug"] del opts["profile"] del opts["corpus_dir"] del opts["model_dir"] del opts["sampler_dir"] del opts["dispatch_func"] run(dispatch_func, **opts)
def _main(model_file: TextIO) -> None: model_json = jsonutil.loads(model_file.read()) model = clgen.Model.from_json(model_json) model.train() log.info("done.")
def test_loads_malformed(): a_str = """bad json {asd,,}""" with pytest.raises(ValueError): jsonutil.loads(a_str)