def train(self, output_dir): with tempfile.TemporaryDirectory() as tmpdir: loss_fn = nn.Sequential( OrderedDict([ ("loss", Apply( module=Loss(), in_keys=[ "rule_probs", "token_probs", "reference_probs", "ground_truth_actions", ], out_key="action_sequence_loss", )), ("pick", mlprogram.nn.Function(Pick("action_sequence_loss"))) ])) collate = Collate(word_nl_query=CollateOptions(True, 0, -1), nl_query_features=CollateOptions(True, 0, -1), reference_features=CollateOptions(True, 0, -1), actions=CollateOptions(True, 0, -1), previous_actions=CollateOptions(True, 0, -1), previous_action_rules=CollateOptions( True, 0, -1), history=CollateOptions(False, 1, 0), hidden_state=CollateOptions(False, 0, 0), state=CollateOptions(False, 0, 0), ground_truth_actions=CollateOptions(True, 0, -1)).collate qencoder, aencoder = \ self.prepare_encoder(train_dataset, Parser()) transform = Map(self.transform_cls(qencoder, aencoder, Parser())) model = self.prepare_model(qencoder, aencoder) optimizer = self.prepare_optimizer(model) train_supervised(tmpdir, output_dir, train_dataset, model, optimizer, loss_fn, EvaluateSynthesizer(test_dataset, self.prepare_synthesizer( model, qencoder, aencoder), {"accuracy": Accuracy()}, top_n=[5]), "accuracy@5", lambda x: collate(transform(x)), 1, Epoch(100), evaluation_interval=Epoch(100), snapshot_interval=Epoch(100), threshold=1.0) return qencoder, aencoder
def prepare_synthesizer(self, model, qencoder, aencoder): transform_input = Compose( OrderedDict([("extract_reference", Apply(module=mlprogram.nn.Function(tokenize), in_keys=[["text_query", "str"]], out_key="reference")), ("encode_query", Apply(module=EncodeWordQuery(qencoder), in_keys=["reference"], out_key="word_nl_query"))])) transform_action_sequence = Compose( OrderedDict([("add_previous_action", Apply( module=AddPreviousActions(aencoder, n_dependent=1), in_keys=["action_sequence", "reference"], constants={"train": False}, out_key="previous_actions", )), ("add_action", Apply( module=AddActions(aencoder, n_dependent=1), in_keys=["action_sequence", "reference"], constants={"train": False}, out_key="actions", )), ("add_state", AddState("state")), ("add_hidden_state", AddState("hidden_state")), ("add_history", AddState("history"))])) collate = Collate(word_nl_query=CollateOptions(True, 0, -1), nl_query_features=CollateOptions(True, 0, -1), reference_features=CollateOptions(True, 0, -1), actions=CollateOptions(True, 0, -1), previous_actions=CollateOptions(True, 0, -1), previous_action_rules=CollateOptions(True, 0, -1), history=CollateOptions(False, 1, 0), hidden_state=CollateOptions(False, 0, 0), state=CollateOptions(False, 0, 0), ground_truth_actions=CollateOptions(True, 0, -1)) return BeamSearch( 5, 20, ActionSequenceSampler(aencoder, is_subtype, transform_input, transform_action_sequence, collate, model))
def test_rescore(self): def transform(state: str) -> Environment: return Environment({"x": torch.tensor([int(state)])}) collate = Collate(x=CollateOptions(False, 0, 0)) sampler = SamplerWithValueNetwork(MockSampler(), transform, collate, MockValueNetwork()) zero = SamplerState(0, sampler.initialize(0)) samples = list(sampler.batch_k_samples([zero], [3])) assert [ DuplicatedSamplerState(SamplerState(0, "00"), 1), DuplicatedSamplerState(SamplerState(1, "01"), 1), DuplicatedSamplerState(SamplerState(2, "02"), 1) ] == samples
def test_REINFORCESynthesizer(): synthesizer = MockSynthesizer() synthesizer.model.model.weight.data[:] = 10.0 optimizer = torch.optim.SGD(synthesizer.model.parameters(), 0.1) synthesizer = REINFORCESynthesizer( synthesizer=synthesizer, model=synthesizer.model, optimizer=optimizer, loss_fn=Loss(), reward=Reward(), collate=Collate(x=CollateOptions(False, 0, 0)).collate, n_rollout=1, device=torch.device("cpu"), baseline_momentum=0.9, max_try_num=1, ) input = Environment({"x": torch.tensor(1.0)}) for i, x in enumerate(synthesizer(input)): assert i < 100 if x.output == 1: break
def prepare_synthesizer(self, model, qencoder, cencoder, aencoder): transform_input = Compose( OrderedDict([("extract_reference", Apply(module=mlprogram.nn.Function(tokenize), in_keys=[["text_query", "str"]], out_key="reference")), ("encode_word_query", Apply(module=EncodeWordQuery(qencoder), in_keys=["reference"], out_key="word_nl_query")), ("encode_char", Apply(module=EncodeCharacterQuery(cencoder, 10), in_keys=["reference"], out_key="char_nl_query"))])) transform_action_sequence = Compose( OrderedDict([("add_previous_action", Apply( module=AddPreviousActions(aencoder), in_keys=["action_sequence", "reference"], constants={"train": False}, out_key="previous_actions", )), ("add_previous_action_rule", Apply( module=AddPreviousActionRules( aencoder, 4, ), in_keys=["action_sequence", "reference"], constants={"train": False}, out_key="previous_action_rules", )), ("add_tree", Apply( module=AddActionSequenceAsTree(aencoder), in_keys=["action_sequence", "reference"], constants={"train": False}, out_key=["adjacency_matrix", "depthes"], )), ("add_query", Apply( module=AddQueryForTreeGenDecoder(aencoder, 4), in_keys=["action_sequence", "reference"], constants={"train": False}, out_key="action_queries", ))])) collate = Collate(word_nl_query=CollateOptions(True, 0, -1), char_nl_query=CollateOptions(True, 0, -1), nl_query_features=CollateOptions(True, 0, -1), reference_features=CollateOptions(True, 0, -1), previous_actions=CollateOptions(True, 0, -1), previous_action_rules=CollateOptions(True, 0, -1), depthes=CollateOptions(False, 1, 0), adjacency_matrix=CollateOptions(False, 0, 0), action_queries=CollateOptions(True, 0, -1), ground_truth_actions=CollateOptions(True, 0, -1)) return BeamSearch( 5, 20, ActionSequenceSampler(aencoder, is_subtype, transform_input, transform_action_sequence, collate, model))
def reinforce(self, train_dataset, encoder, output_dir): with tempfile.TemporaryDirectory() as tmpdir: interpreter = self.interpreter() collate = Collate( test_case_tensor=CollateOptions(False, 0, 0), variables_tensor=CollateOptions(True, 0, 0), previous_actions=CollateOptions(True, 0, -1), hidden_state=CollateOptions(False, 0, 0), state=CollateOptions(False, 0, 0), ground_truth_actions=CollateOptions(True, 0, -1), reward=CollateOptions(False, 0, 0) ) collate_fn = Sequence(OrderedDict([ ("to_episode", Map(self.to_episode(encoder, interpreter))), ("flatten", Flatten()), ("transform", Map(self.transform( encoder, interpreter, Parser()))), ("collate", collate.collate) ])) model = self.prepare_model(encoder) optimizer = self.prepare_optimizer(model) train_REINFORCE( output_dir, tmpdir, output_dir, train_dataset, self.prepare_synthesizer(model, encoder, interpreter), model, optimizer, torch.nn.Sequential(OrderedDict([ ("policy", torch.nn.Sequential(OrderedDict([ ("loss", Apply( module=mlprogram.nn.action_sequence.Loss( reduction="none" ), in_keys=[ "rule_probs", "token_probs", "reference_probs", "ground_truth_actions", ], out_key="action_sequence_loss", )), ("weight_by_reward", Apply( [("reward", "lhs"), ("action_sequence_loss", "rhs")], "action_sequence_loss", mlprogram.nn.Function(Mul()))) ]))), ("value", torch.nn.Sequential(OrderedDict([ ("reshape_reward", Apply( [("reward", "x")], "value_loss_target", Reshape([-1, 1]))), ("BCE", Apply( [("value", "input"), ("value_loss_target", "target")], "value_loss", torch.nn.BCELoss(reduction='sum'))), ("reweight", Apply( [("value_loss", "lhs")], "value_loss", mlprogram.nn.Function(Mul()), constants={"rhs": 1e-2})), ]))), ("aggregate", Apply( ["action_sequence_loss", "value_loss"], "loss", AggregatedLoss())), ("normalize", Apply( [("loss", "lhs")], "loss", mlprogram.nn.Function(Div()), constants={"rhs": 1})), ("pick", mlprogram.nn.Function( Pick("loss"))) ])), EvaluateSynthesizer( train_dataset, self.prepare_synthesizer(model, encoder, interpreter, rollout=False), {}, top_n=[]), "generation_rate", metrics.use_environment( metric=metrics.TestCaseResult( interpreter=interpreter, metric=metrics.use_environment( metric=metrics.Iou(), in_keys=["actual", "expected"], value_key="actual", ) ), in_keys=["test_cases", "actual"], value_key="actual", transform=Threshold(threshold=0.9, dtype="float"), ), collate_fn, 1, 1, Epoch(10), evaluation_interval=Epoch(10), snapshot_interval=Epoch(10), use_pretrained_model=True, use_pretrained_optimizer=True, threshold=1.0)
def pretrain(self, output_dir): dataset = Dataset(4, 1, 2, 1, 45, seed=0) """ """ train_dataset = ListDataset([ Environment( {"ground_truth": Circle(1)}, set(["ground_truth"]), ), Environment( {"ground_truth": Rectangle(1, 2)}, set(["ground_truth"]), ), Environment( {"ground_truth": Rectangle(1, 1)}, set(["ground_truth"]), ), Environment( {"ground_truth": Rotation(45, Rectangle(1, 1))}, set(["ground_truth"]), ), Environment( {"ground_truth": Translation(1, 1, Rectangle(1, 1))}, set(["ground_truth"]), ), Environment( {"ground_truth": Difference(Circle(1), Circle(1))}, set(["ground_truth"]), ), Environment( {"ground_truth": Union(Rectangle(1, 2), Circle(1))}, set(["ground_truth"]), ), Environment( {"ground_truth": Difference(Rectangle(1, 1), Circle(1))}, set(["ground_truth"]), ), ]) with tempfile.TemporaryDirectory() as tmpdir: interpreter = self.interpreter() train_dataset = data_transform( train_dataset, Apply( module=AddTestCases(interpreter), in_keys=["ground_truth"], out_key="test_cases", is_out_supervision=False, )) encoder = self.prepare_encoder(dataset, Parser()) collate = Collate( test_case_tensor=CollateOptions(False, 0, 0), variables_tensor=CollateOptions(True, 0, 0), previous_actions=CollateOptions(True, 0, -1), hidden_state=CollateOptions(False, 0, 0), state=CollateOptions(False, 0, 0), ground_truth_actions=CollateOptions(True, 0, -1) ) collate_fn = Sequence(OrderedDict([ ("to_episode", Map(self.to_episode(encoder, interpreter))), ("flatten", Flatten()), ("transform", Map(self.transform( encoder, interpreter, Parser()))), ("collate", collate.collate) ])) model = self.prepare_model(encoder) optimizer = self.prepare_optimizer(model) train_supervised( tmpdir, output_dir, train_dataset, model, optimizer, torch.nn.Sequential(OrderedDict([ ("loss", Apply( module=Loss( reduction="sum", ), in_keys=[ "rule_probs", "token_probs", "reference_probs", "ground_truth_actions", ], out_key="action_sequence_loss", )), ("normalize", # divided by batch_size Apply( [("action_sequence_loss", "lhs")], "loss", mlprogram.nn.Function(Div()), constants={"rhs": 1})), ("pick", mlprogram.nn.Function( Pick("loss"))) ])), None, "score", collate_fn, 1, Epoch(100), evaluation_interval=Epoch(10), snapshot_interval=Epoch(100) ) return encoder, train_dataset
def prepare_synthesizer(self, model, encoder, interpreter, rollout=True): collate = Collate( test_case_tensor=CollateOptions(False, 0, 0), input_feature=CollateOptions(False, 0, 0), test_case_feature=CollateOptions(False, 0, 0), reference_features=CollateOptions(True, 0, 0), variables_tensor=CollateOptions(True, 0, 0), previous_actions=CollateOptions(True, 0, -1), hidden_state=CollateOptions(False, 0, 0), state=CollateOptions(False, 0, 0), ground_truth_actions=CollateOptions(True, 0, -1) ) subsampler = ActionSequenceSampler( encoder, IsSubtype(), Sequence(OrderedDict([ ("tinput", Apply( module=TransformInputs(), in_keys=["test_cases"], out_key="test_case_tensor", )), ("tvariable", Apply( module=TransformVariables(), in_keys=["variables", "test_case_tensor"], out_key="variables_tensor" )), ])), Compose(OrderedDict([ ("add_previous_actions", Apply( module=AddPreviousActions(encoder, n_dependent=1), in_keys=["action_sequence", "reference"], out_key="previous_actions", constants={"train": False}, )), ("add_state", AddState("state")), ("add_hidden_state", AddState("hidden_state")) ])), collate, model, rng=np.random.RandomState(0)) subsampler = mlprogram.samplers.transform( subsampler, Parser().unparse ) subsynthesizer = SMC( 5, 1, subsampler, max_try_num=1, to_key=Pick("action_sequence"), rng=np.random.RandomState(0) ) sampler = SequentialProgramSampler( subsynthesizer, Apply( module=TransformInputs(), in_keys=["test_cases"], out_key="test_case_tensor", ), collate, model.encode_input, interpreter=interpreter, expander=Expander(), rng=np.random.RandomState(0)) if rollout: sampler = FilteredSampler( sampler, metrics.use_environment( metric=metrics.TestCaseResult( interpreter, metric=metrics.use_environment( metric=metrics.Iou(), in_keys=["actual", "expected"], value_key="actual", ) ), in_keys=["test_cases", "actual"], value_key="actual" ), 1.0 ) return SMC(3, 1, sampler, rng=np.random.RandomState(0), to_key=Pick("interpreter_state"), max_try_num=1) else: sampler = SamplerWithValueNetwork( sampler, Sequence(OrderedDict([ ("tinput", Apply( module=TransformInputs(), in_keys=["test_cases"], out_key="test_case_tensor", )), ("tvariable", Apply( module=TransformVariables(), in_keys=["variables", "test_case_tensor"], out_key="variables_tensor" )), ])), collate, torch.nn.Sequential(OrderedDict([ ("encoder", model.encoder), ("value", model.value), ("pick", mlprogram.nn.Function( Pick("value"))) ]))) synthesizer = SynthesizerWithTimeout( SMC(3, 1, sampler, rng=np.random.RandomState(0), to_key=Pick("interpreter_state"), max_try_num=1), 1 ) return FilteredSynthesizer( synthesizer, metrics.use_environment( metric=metrics.TestCaseResult( interpreter, metric=metrics.use_environment( metric=metrics.Iou(), in_keys=["actual", "expected"], value_key="actual", ) ), in_keys=["test_cases", "actual"], value_key="actual" ), 1.0 )
if arg0 == arg1: return True if arg0 == "Ysub" and arg1 == "Y": return True return False def create_encoder(): return ActionSequenceEncoder(Samples( [Root2X, Root2Y, X2Y_list, Ysub2Str], [R, X, Y, Ysub, Y_list, Str], [("Str", "x"), ("Int", "1")]), 0) collate = Collate(input=CollateOptions(False, 0, -1), length=CollateOptions(False, 0, -1)) def create_transform_input(reference: List[Token[str, str]]): def transform_input(env): env["reference"] = reference env["input"] = torch.zeros((1,)) return env return transform_input def transform_action_sequence(kwargs): kwargs["length"] = \ torch.tensor(len(kwargs["action_sequence"].action_sequence)) return kwargs
def __call__(self, input, n_required_output=None): n_required_output = n_required_output or 1 for _ in range(n_required_output): yield Result(input["value"], 0, True, 1) @pytest.fixture def synthesizer(model): return MockSynthesizer(model) def reward(sample, output): return sample["value"] == output collate = Collate(output=CollateOptions(False, 0, 0), value=CollateOptions(False, 0, 0), reward=CollateOptions(False, 0, 0), ground_truth=CollateOptions(False, 0, 0)) class DummyModel(nn.Module): def __init__(self): super().__init__() self.m = nn.Linear(1, 1) def forward(self, kwargs): kwargs["value"] = self.m(kwargs["value"].float()) return kwargs