Example #1
0
def test_read_annotated_subset(src_filename):
    src_filename = os.path.join(annotations_home, src_filename)
    if 'mismatched' in src_filename:
        split = 'validation_mismatched'
    else:
        split = 'validation_matched'
    data = nli.read_annotated_subset(src_filename, mnli[split])
    assert len(data) == 495
Example #2
0
                                         assess_reader=None,
                                         random_state=42,
                                         vectorize=False)

# The return value of `nli.experiment` contains the information we need to make predictions on new examples.
#
# Next, we load in the 'matched' condition annotations ('mismatched' would work as well):

# In[31]:

matched_ann_filename = os.path.join(ANNOTATIONS_HOME,
                                    "multinli_1.0_matched_annotations.txt")

# In[32]:

matched_ann = nli.read_annotated_subset(matched_ann_filename, MULTINLI_HOME)

# The following function uses `rnn_multinli_experiment` to make predictions on annotated examples, and harvests some other information that is useful for error analysis:

# In[33]:


def predict_annotated_example(ann, experiment_results):
    model = experiment_results['model']
    phi = experiment_results['phi']
    ex = ann['example']
    prem = ex.sentence1_parse
    hyp = ex.sentence2_parse
    feats = phi(prem, hyp)
    pred = model.predict([feats])[0]
    gold = ex.gold_label
def test_read_annotated_subset(src_filename):
    src_filename = os.path.join(annotations_home, src_filename)
    data = nli.read_annotated_subset(src_filename, multinli_home)
    assert len(data) == 495