def run(self, qual_name: str): inputs, output, funcs, seqs = self.benchmark.unwrap() iospec: IOSpec = IOSpec(inputs, output) iospec.funcs = funcs iospec.seqs = seqs model = RelGraphInterface.from_model_dir(self.model_dir) if self.cmd_args.use_old_featurization: from autopandas_v2.ml.featurization_old.featurizer import RelationGraph from autopandas_v2.ml.featurization_old.options import GraphOptions else: from autopandas_v2.ml.featurization.featurizer import RelationGraph from autopandas_v2.ml.featurization.options import GraphOptions options = GraphOptions() graph: RelationGraph = RelationGraph(options) graph.from_input_output(inputs, output) encoding = graph.get_encoding(get_mapping=False) str_seqs, probs = list( zip(*model.predict_graphs([encoding], top_k=self.top_k)[0])) ground_truth = ':'.join(funcs[i] for i in seqs[0]) pos = -1 for idx, pred in enumerate(str_seqs, 1): if pred == ground_truth: pos = idx break return { 'benchmark': qual_name, 'ground_truth': ground_truth, 'rank': pos }
def run(self, qual_name: str): inputs, output, funcs, seqs = self.benchmark.unwrap() iospec: IOSpec = IOSpec(inputs, output) iospec.funcs = funcs iospec.seqs = seqs engine: NeuralEngine = NeuralEngine(iospec, self.function_model_dir, top_k=self.cmd_args.top_k_function) engine.max_depth = max(len(i) for i in seqs) engine.stop_first_solution = True engine.use_spec_funcs = True engine.use_spec_seqs = True engine.argument_engine = 'beam-search' engine.arg_model_dir = self.arg_model_dir engine.arg_top_k = self.cmd_args.top_k_args engine.use_old_featurization = self.cmd_args.use_old_featurization start_time = time.time() solution_found = engine.search() return { 'benchmark': qual_name, 'num_seqs_explored': engine.stats.num_seqs_explored, 'num_candidates_generated': dict(engine.stats.num_cands_generated), 'solution_found': solution_found, 'time': time.time() - start_time }
def __init__(self, func_seq: List[BaseGenerator], cmd_args: ArgNamespace = None): iospec: IOSpec = IOSpec(inputs=[], output=None) super().__init__(iospec, cmd_args) self.func_seq = func_seq self.max_depth = len(func_seq) self.stats = None
def check(self, inputs: List[Any], output: Any, funcs: List[str], seqs: List[List[int]], model_dir: str): iospec: IOSpec = IOSpec(inputs, output) iospec.funcs = funcs iospec.seqs = seqs engine: BFSEngine = BFSEngine(iospec) engine.max_depth = max(len(i) for i in seqs) engine.silent = True engine.stop_first_solution = True engine.use_spec_funcs = True engine.use_spec_seqs = True engine.argument_engine = 'beam-search' engine.arg_model_dir = model_dir # try: # engine.search() # except Exception as e: # logging.exception(e) return self.check_engine(engine)
def run(self, qual_name: str): inputs, output, funcs, seqs = self.benchmark.unwrap() iospec: IOSpec = IOSpec(inputs, output) iospec.funcs = funcs iospec.seqs = seqs engine: BFSEngine = BFSEngine(iospec) engine.max_depth = max(len(i) for i in seqs) engine.silent = True engine.stop_first_solution = True engine.use_spec_funcs = True engine.use_spec_seqs = True engine.argument_engine = 'beam-search' engine.arg_model_dir = self.model_dir solution_found = engine.search() return { 'benchmark': qual_name, 'num_candidates_generated': dict(engine.stats.num_cands_generated), 'solution_found': solution_found }
def run(self, qual_name: str): inputs, output, funcs, seqs = self.benchmark.unwrap() iospec: IOSpec = IOSpec(inputs, output) iospec.funcs = funcs iospec.seqs = seqs if self.cmd_args.engine == 'neural': engine: NeuralEngine = NeuralEngine( iospec, self.function_model_dir, top_k=self.cmd_args.top_k_function) engine.max_depth = max(len(i) for i in seqs) engine.stop_first_solution = True engine.use_spec_funcs = True engine.use_spec_seqs = True engine.argument_engine = 'beam-search' engine.arg_model_dir = self.arg_model_dir engine.arg_top_k = self.cmd_args.top_k_args engine.use_old_featurization = self.cmd_args.use_old_featurization engine.model_store = self.model_store else: raise Exception("Engine {} not recognized".format( self.cmd_args.engine)) start_time = time.time() solution_found = engine.search() return { 'benchmark': qual_name, 'ground_truth_depth': min(len(i) for i in seqs), 'num_seqs_explored': engine.stats.num_seqs_explored, 'num_candidates_generated': dict(engine.stats.num_cands_generated), 'solution_found': solution_found, 'solution': repr(str(engine.solutions[0])) if solution_found else '', 'time': time.time() - start_time }