示例#1
0
def run_synthesis_eval(cmd_args):
    benchmarks: Dict[str, Type[Benchmark]] = discover_benchmarks()
    path_matcher: Pattern = re.compile(cmd_args.path_regex)
    results = []

    model_store: ModelStore = None
    if not cmd_args.load_models_on_demand:
        logger.info("Loading models ahead of time")
        path_map = {'function-model': cmd_args.function_model_dir}
        arg_model_paths = glob.glob(cmd_args.arg_model_dir +
                                    '/*/*/model_best.pickle')
        for path in arg_model_paths:
            func_name, arg_name = path.split('/')[-3:-1]
            path_map[func_name, arg_name] = os.path.dirname(path)

        model_store: ModelStore = ModelStore(path_map)
        logger.info("Loaded models")

    for qual_name, benchmark_cls in benchmarks.items():
        if not path_matcher.match(qual_name):
            continue

        try:
            logger.info("Running benchmark {}".format(qual_name))
            with SignalTimeout(seconds=cmd_args.timeout):
                evaluator = NeuralSynthesisEvaluator(benchmark_cls(),
                                                     cmd_args,
                                                     model_store=model_store)
                result = evaluator.run(qual_name)

            results.append(result)
            logger.info("Result for {} : {}".format(qual_name, results[-1]))

        except TimeoutError:
            logger.info("Timed out for {}".format(qual_name))
            result = {
                'benchmark': qual_name,
                'num_seqs_explored': {},
                'num_candidates_generated': {},
                'solution_found': False,
                'time': cmd_args.timeout
            }

            results.append(result)

        except Exception as e:
            logger.warn("Failed for {}".format(qual_name))
            logging.exception(e)

    if not cmd_args.load_models_on_demand:
        model_store.close()

    results = pd.DataFrame(results)
    print(results)
    with open(cmd_args.outfile, 'w') as f:
        results.to_csv(f)
示例#2
0
def run_synthesis_eval(cmd_args):
    benchmarks: Dict[str, Type[Benchmark]] = discover_benchmarks()
    path_matcher: Pattern = re.compile(cmd_args.path_regex)
    results = []

    for qual_name, benchmark_cls in benchmarks.items():
        if not path_matcher.match(qual_name):
            continue

        try:
            logger.info("Running benchmark {}".format(qual_name))
            result = call_with_timeout(run_synthesis_for_benchmark,
                                       qual_name,
                                       cmd_args,
                                       timeout=cmd_args.timeout)
            results.append(result)
            logger.info("Result for {} : {}".format(qual_name, results[-1]))

        except TimeoutError:
            logger.info("Timed out for {}".format(qual_name))
            result = {
                'benchmark': qual_name,
                'num_seqs_explored': {},
                'num_candidates_generated': {},
                'solution_found': False,
                'time': cmd_args.timeout
            }

            results.append(result)

        except Exception as e:
            logger.warn("Failed for {}".format(qual_name))
            logging.exception(e)

    results = pd.DataFrame(results)
    print(results)
    with open(cmd_args.outfile, 'w') as f:
        results.to_csv(f)
示例#3
0
def run_generator_model_eval(cmd_args: ArgNamespace):
    benchmarks: Dict[str, Type[Benchmark]] = discover_benchmarks()
    path_matcher: Pattern = re.compile(cmd_args.path_regex)
    results = []
    for qual_name, benchmark_cls in benchmarks.items():
        if not path_matcher.match(qual_name):
            continue

        try:
            logger.info("Running benchmark {}".format(qual_name))
            benchmark = benchmark_cls()
            evaluator = GeneratorModelEvaluator(benchmark, cmd_args)
            results.append(evaluator.run(qual_name))
            logger.info("Result for {} : {}".format(qual_name, results[-1]))

        except Exception as e:
            logger.warn("Failed for {}".format(qual_name))
            logging.exception(e)

    results = pd.DataFrame(results)
    print(results)
    with open(cmd_args.outfile, 'w') as f:
        results.to_csv(f)
示例#4
0
def run_synthesis_for_benchmark(qual_name: str, cmd_args: ArgNamespace):
    benchmarks: Dict[str, Type[Benchmark]] = discover_benchmarks()
    benchmark_cls = benchmarks[qual_name]
    benchmark = benchmark_cls()
    evaluator = NeuralSynthesisEvaluator(benchmark, cmd_args)
    return evaluator.run(qual_name)