Beispiel #1
0
def test_enable_disable():
    assert not prof.is_enabled()
    prof.disable()
    assert not prof.is_enabled()
    prof.enable()
    assert prof.is_enabled()
    prof.disable()
    assert not prof.is_enabled()
Beispiel #2
0
def DoFlagsAction():
    """Do the action requested by the command line flags."""
    if not FLAGS.config:
        raise app.UsageError("Missing required argument: '--config'")
    config_path = pathlib.Path(FLAGS.config)
    if not config_path.is_file():
        raise app.UsageError(f"File not found: '{config_path}'")
    config = pbutil.FromFile(config_path, clgen_pb2.Instance())
    os.environ['PWD'] = str(config_path.parent)

    if FLAGS.clgen_profiling:
        prof.enable()

    instance = Instance(config)
    with instance.Session():
        if FLAGS.print_cache_path == 'corpus':
            print(instance.model.corpus.cache.path)
            return
        elif FLAGS.print_cache_path == 'model':
            print(instance.model.cache.path)
            return
        elif FLAGS.print_cache_path == 'sampler':
            print(instance.model.SamplerCache(instance.sampler))
            return
        elif FLAGS.print_cache_path:
            raise app.UsageError(
                f"Invalid --print_cache_path argument: '{FLAGS.print_cache_path}'"
            )

        if FLAGS.print_preprocessed:
            print(instance.model.corpus.GetTextCorpus(shuffle=False))
            return

        # The default action is to sample the model.
        if FLAGS.stop_after == 'corpus':
            instance.model.corpus.Create()
        elif FLAGS.stop_after == 'train':
            instance.model.Train()
            logging.info('Model: %s', instance.model.cache.path)
        elif FLAGS.stop_after:
            raise app.UsageError(
                f"Invalid --stop_after argument: '{FLAGS.stop_after}'")
        elif FLAGS.export_model:
            instance.model.Train()
            export_dir = pathlib.Path(FLAGS.export_model)
            for path in instance.model.InferenceManifest():
                relpath = pathlib.Path(
                    os.path.relpath(path, instance.model.cache.path))
                (export_dir / relpath.parent).mkdir(parents=True,
                                                    exist_ok=True)
                shutil.copyfile(path, export_dir / relpath)
                print(export_dir / relpath)
        else:
            instance.model.Sample(instance.sampler, FLAGS.min_samples)
Beispiel #3
0
def DoFlagsAction():
  """Do the action requested by the command line flags."""
  config = ConfigFromFlags()

  if FLAGS.clgen_profiling:
    prof.enable()

  instance = Instance(config)
  with instance.Session():
    if FLAGS.print_cache_path == 'corpus':
      print(instance.model.corpus.cache.path)
      return
    elif FLAGS.print_cache_path == 'model':
      print(instance.model.cache.path)
      return
    elif FLAGS.print_cache_path == 'sampler':
      print(instance.model.SamplerCache(instance.sampler))
      return
    elif FLAGS.print_cache_path:
      raise app.UsageError(
          f"Invalid --print_cache_path argument: '{FLAGS.print_cache_path}'")

    # The default action is to sample the model.
    if FLAGS.stop_after == 'corpus':
      instance.model.corpus.Create()
    elif FLAGS.stop_after == 'train':
      instance.m.Train()
      app.Log(1, 'Model: %s', instance.model.cache.path)
    elif FLAGS.stop_after:
      raise app.UsageError(
          f"Invalid --stop_after argument: '{FLAGS.stop_after}'")
    elif FLAGS.export_model:
      instance.ExportPretrainedModel(pathlib.Path(FLAGS.export_model))
    else:
      sample_observers = SampleObserversFromFlags()
      instance.Sample(sample_observers)
Beispiel #4
0
def main(self, args: List[str]=sys.argv[1:]):
    """
    A deep learning program generator for the OpenCL programming language.

    The core operations of CLgen are:

       1. OpenCL files are collected from a model specification file.
       2. These files are preprocessed into an OpenCL kernel database.
       3. A training corpus is generated from the input files.
       4. A machine learning model is trained on the corpus of files.
       5. The trained model is sampled for new kernels.
       6. The samples are tested for compilability.

    This program automates the execution of all six stages of the pipeline.
    The pipeline can be interrupted and resumed at any time. Results are cached
    across runs. If installed with CUDA support, NVIDIA GPUs will be used to
    improve performance where possible.
    """
    parser = ArgumentParser(
        prog="clgen",
        description=inspect.getdoc(self),
        epilog="""
For information about a specific command, run `clgen <command> --help`.

""" + __help_epilog__,
        formatter_class=RawDescriptionHelpFormatter)

    # TODO:
    # parser.add_argument(
    #     "-l", "--lang", metavar="<language>",
    #     help="programming language (default: OpenCL)")
    parser.add_argument(
        "-v", "--verbose", action="store_true",
        help="increase output verbosity")
    parser.add_argument(
        "--version", action="store_true",
        help="show version information and exit")
    parser.add_argument(
        "--debug", action="store_true",
        help="in case of error, print debugging information")
    parser.add_argument(
        "--profile", action="store_true",
        help=("enable internal API profiling. When combined with --verbose, "
              "prints a complete profiling trace"))

    parser.add_argument(
        "--corpus-dir", metavar="<corpus>",
        type=FileType("r"),
        help="print path to corpus cache")
    parser.add_argument(
        "--model-dir", metavar="<model>",
        type=FileType("r"),
        help="print path to model cache")
    parser.add_argument(
        "--sampler-dir", metavar=("<model>", "<sampler>"),
        type=FileType("r"), nargs=2,
        help="print path to sampler cache")

    subparser = parser.add_subparsers(title="available commands")

    subparsers = [
        _register_test_parser,
        _register_train_parser,
        _register_sample_parser,
        _register_db_parser,
        _register_fetch_parser,
        _register_ls_parser,
        _register_preprocess_parser,
        _register_features_parser,
        _register_atomize_parser,
        _register_cache_parser,
    ]

    for register_fn in subparsers:
        register_fn(subparser)

    args = parser.parse_args(args)

    # set log level
    log.init(args.verbose)

    # set debug option
    if args.debug:
        os.environ["DEBUG"] = "1"

    # set profile option
    if args.profile:
        prof.enable()

    # options whch override the normal argument parsing process.
    if args.version:
        version = clgen.version()
        print(f"clgen {version} made with \033[1;31m♥\033[0;0m by "
              "Chris Cummins <*****@*****.**>.")
    elif args.corpus_dir:
        model = clgen.Model.from_json(jsonutil.loads(args.corpus_dir.read()))
        print(model.corpus.cache.path)
    elif args.model_dir:
        model = clgen.Model.from_json(jsonutil.loads(args.model_dir.read()))
        print(model.cache.path)
    elif args.sampler_dir:
        model = clgen.Model.from_json(jsonutil.loads(args.sampler_dir[0].read()))
        sampler = clgen.Sampler.from_json(jsonutil.loads(args.sampler_dir[1].read()))
        print(sampler.cache(model).path)
    else:
        # strip the arguments from the top-level parser
        dispatch_func = args.dispatch_func
        opts = vars(args)
        del opts["version"]
        del opts["verbose"]
        del opts["debug"]
        del opts["profile"]
        del opts["corpus_dir"]
        del opts["model_dir"]
        del opts["sampler_dir"]
        del opts["dispatch_func"]

        run(dispatch_func, **opts)
Beispiel #5
0
 def test_enable_disable(self):
     self.assertTrue(prof.is_enabled())
     prof.disable()
     self.assertFalse(prof.is_enabled())
     prof.enable()
     self.assertTrue(prof.is_enabled())
Beispiel #6
0
def main(self, args: List[str] = sys.argv[1:]):
  """
  Compiler fuzzing through deep learning.
  """
  parser = ArgumentParser(
      prog="dsmith",
      description=inspect.getdoc(self),
      epilog=__help_epilog__,
      formatter_class=RawDescriptionHelpFormatter)

  parser.add_argument(
      "--config", metavar="<path>", type=FileType("r"), dest="rc_path",
      help=f"path to configuration file (default: '{dsmith.RC_PATH}')")
  parser.add_argument(
      "-v", "--verbose", action="store_true",
      help="increase output verbosity")
  parser.add_argument(
      "--debug", action="store_true",
      help="debugging output verbosity")
  parser.add_argument(
      "--db-debug", action="store_true",
      help="additional database debugging output")
  parser.add_argument(
      "--version", action="store_true",
      help="show version information and exit")
  parser.add_argument(
      "--profile", action="store_true",
      help=("enable internal API profiling. When combined with --verbose, "
            "prints a complete profiling trace"))
  parser.add_argument("command", metavar="<command>", nargs="*",
                      help=("command to run. If not given, run an "
                            "interactive prompt"))

  args = parser.parse_args(args)

  # set log level
  if args.debug:
    loglvl = logging.DEBUG
    os.environ["DEBUG"] = "1"

    # verbose stack traces. see: https://pymotw.com/2/cgitb/
    import cgitb
    cgitb.enable(format='text')
  elif args.verbose:
    loglvl = logging.INFO
  else:
    loglvl = logging.WARNING

  # set database log level
  if args.db_debug:
    os.environ["DB_DEBUG"] = "1"

  # configure logger
  logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
                      level=loglvl)

  # set profile option
  if args.profile:
    prof.enable()

  # load custom config:
  if args.rc_path:
    path = fs.abspath(args.rc_path.name)
    logging.debug(
        f"loading configuration file '{Colors.BOLD}{path}{Colors.END}'")
    dsmith.init_globals(args.rc_path.name)

  # options whch override the normal argument parsing process.
  if args.version:
    print(dsmith.__version_str__)
  else:
    if len(args.command):
      # if a command was given, run it
      run_command(" ".join(args.command))
    else:
      # no command was given, fallback to interactive prompt
      repl()