def test_state(): state = State.new("E=MC2", "Matter and energy are interchangable") assert state.truth == Truth.TRUE assert not state.isEnd() state = State.new("E=MC2", "E=MC2") assert state.isEnd() assert state.truth == Truth.TRUE
def test_lexical_model_predict(self, backend, word_map): model = LexicalModel.build(word_map) model.compile( optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) value = model.predict(State.new("we eat bananas for lunch .", "we eat bananas for lunch .")) assert value.shape == (1,3)
def test_agent_act(): agent = Agent(MockActorModel(), top_k=1) state = State.new("E=MC2", "Matter and energy are interchangable") actions = agent.act(state, [GiveUpAction()]) assert len(actions) == 1 _, action = actions[0] assert isinstance(action, GiveUpAction)
def test_lexical_model_update(self, backend, word_map): model = LexicalModel.build(word_map) model.compile( optimizer='adagrad', loss='categorical_crossentropy', metrics=['accuracy']) for i in range(10): model.enqueue((State.new("we eat bananas for lunch .", "we eat bananas for lunch ."), Truth.TRUE)) model.enqueue((State.new("we eat bananas for lunch .", "we don't eat bananas for lunch ."), Truth.FALSE)) model.enqueue((State.new("we eat bananas for lunch .", "we eat bananas ."), Truth.TRUE)) model.enqueue((State.new("we eat bananas for lunch .", "we eat lunch ."), Truth.TRUE)) model.update() value = model.predict(State.new("we eat bananas for lunch .", "we eat bananas for lunch .", Truth.TRUE)) assert value.shape == (1,3) assert value.argmax() == Truth.TRUE.value
def test_agent_feedback(): agent = Agent(MockActorModel(), top_k=1) state = State.new("E=MC2", "Matter and energy are interchangable") action = GiveUpAction() reward = 0 agent.incorporate_feedback(state, action, reward) assert len(agent.actor_model.queue) == 1 segment = agent.actor_model.queue[0] assert len(segment) == 2 (state_, action_), reward_ = segment assert state_ == state assert action_ == action assert reward_ == reward
def make_state(row, do_annotate=False): L = { "entailment": 2, "neutral": 1, "contradiction": 0, } if do_annotate: source = row.sentence1 target = row.sentence2 gold_truth = Truth(L[row.gold_label]) return State.new(source, target, gold_truth) else: source = make_sentence(row.sentence1, row.sentence1_parse) target = make_sentence(row.sentence2, row.sentence2_parse) truth = Truth.TRUE gold_truth = Truth(L[row.gold_label]) return State(source, target, truth, gold_truth)
def do_rte(args): tree = ElementTree(file=args.input) LABELS = { "YES": Truth.TRUE, "NO": Truth.FALSE, "UNKNOWN": Truth.NEUTRAL, } for pair in tqdm(tree.findall("pair")): assert pair.get("entailment") in LABELS label = LABELS[pair.get("entailment")] source = pair.findtext("t") target = pair.findtext("h") try: state = State.new(source, target, gold_truth=label) args.output.write(json.dumps(state.json) + "\n") except AssertionError as e: logging.warning(e.args)
def test_agenda_environment(): agent = Agent(MockActorModel(), top_k=1) agenda = AgendaEnvironment(agent, MockCriticModel(), action_generator=lambda _: [GiveUpAction()], reward_fn=mock_reward_fn) state = State.new("E=MC2", "Matter and energy are interchangable", Truth.TRUE) state_ = agenda.run_episode(state) assert state_.isEnd() assert state_.truth == Truth.NEUTRAL assert len(agent.actor_model.queue) == 1 _, reward = agent.actor_model.queue[0] assert reward == 0 # Reward here is still 0 assert len(agenda.critic_model.queue) == 1 _, reward_ = agenda.critic_model.queue[0] assert reward_ < 0