Ejemplo n.º 1
0
    # unfreeze top layers and train
    # for param in list(model.parameters())[:-4]:
    #     param.requires_grad = False
    trainer = Trainer(model=model,
                      optimizer=optimizer,
                      iterator=iterator,
                      validation_iterator=iterator,
                      train_dataset=train_dataset,
                      validation_dataset=validation_dataset,
                      patience=3,
                      num_epochs=1,
                      cuda_device=-1)
    trainer.train()

    # precision, recall, f-score on validation set
    validation_list = read_json(cached_path(VALIDATION_PATH))
    claim_predictor = ClaimCrfPredictor(model, dataset_reader=reader)
    y_pred, y_true = [], []
    for val in validation_list:
        pred = claim_predictor.predict_json(val)
        logits = torch.FloatTensor(pred['logits'])
        best_paths = model.crf.viterbi_tags(
            torch.FloatTensor(pred['logits']).unsqueeze(0),
            torch.LongTensor(pred['mask']).unsqueeze(0))
        predicted_labels = best_paths[0][0]
        y_pred.extend(predicted_labels)
        y_true.extend(val['labels'])
    y_true = np.array(y_true).astype(int)
    y_pred = np.array(y_pred).astype(int)
    print(precision_recall_fscore_support(y_true, y_pred, average='binary'))
Ejemplo n.º 2
0
    num_classes, constraints, include_start_end_transitions = 2, None, False
    model.classifier_feedforward._linear_layers = ModuleList([
        torch.nn.Linear(2 * EMBEDDING_DIM, EMBEDDING_DIM),
        torch.nn.Linear(EMBEDDING_DIM, num_classes)
    ])
    model.crf = ConditionalRandomField(
        num_classes,
        constraints,
        include_start_end_transitions=include_start_end_transitions)
    model.label_projection_layer = TimeDistributed(
        Linear(2 * EMBEDDING_DIM, num_classes))
    model.load_state_dict(torch.load(cached_path(WEIGHT_PATH)))

    reader = CrfPubmedRCTReader()
    claim_predictor = ClaimCrfPredictor(model, dataset_reader=reader)

    fixture_path = os.path.join('..', 'pubmed-rct', 'PubMed_200k_RCT',
                                'fixtures_crf.json')
    examples = read_json(fixture_path)
    pred_list = []
    for example in examples:
        sentences = sent_tokenize(example['abstract'])
        instance = reader.text_to_instance(sents=sentences)
        pred = claim_predictor.predict_instance(instance)
        logits = torch.FloatTensor(pred['logits'])
        best_paths = model.crf.viterbi_tags(
            torch.FloatTensor(pred['logits']).unsqueeze(0),
            torch.LongTensor(pred['mask']).unsqueeze(0))
        pred_list.append(best_paths[0][0])
    print(pred_list)
Ejemplo n.º 3
0
"""
Example of discourse prediction using ``discourse`` library
created using AllenNLP
"""
import os
import sys
import json
sys.path.insert(0, '..')

from discourse.predictors import DiscourseClassifierPredictor
from discourse.dataset_readers import PubmedRCTReader
from discourse.models import DiscourseClassifier
from discourse import read_json

from allennlp.models.archival import load_archive
from allennlp.common.file_utils import cached_path
from allennlp.service.predictors import Predictor

MODEL_PATH = 'https://s3-us-west-2.amazonaws.com/pubmed-rct/model.tar.gz'
archive = load_archive(cached_path(MODEL_PATH))
predictor = Predictor.from_archive(archive, 'discourse_predictor')

if __name__ == '__main__':
    fixture_path = os.path.join('..', 'pubmed-rct', 'PubMed_200k_RCT',
                                'fixtures.json')
    json_sentences = read_json(fixture_path)
    for json_sentence in json_sentences:
        output = predictor.predict_json(json_sentence)
        output['sentence'] = json_sentence['sentence']
        print(output)

def flatten_dataset(df):
    """
    Flatten Gold Standard JSON data for Claim Extraction
    """
    sentence_data = []
    for _, r in df.iterrows():
        sentence_data.extend(list(zip(r.sentences, r.labels)))
    flatten_df = pd.DataFrame(sentence_data, columns=['sentence', 'label'])
    return flatten_df


if __name__ == '__main__':
    # read dataset
    train_df = pd.DataFrame(read_json(cached_path(TRAIN_PATH)))
    validation_df = pd.DataFrame(read_json(cached_path(VALIDATION_PATH)))
    test_df = pd.DataFrame(read_json(cached_path(TEST_PATH)))

    train_df = flatten_dataset(train_df)
    validation_df = flatten_dataset(validation_df)
    test_df = flatten_dataset(test_df)

    # prepare dataset
    train_df['class_probability'] = train_df.sentence.map(
        lambda x: predictor.predict_json({'sentence': x})['class_probabilities'
                                                          ])
    train_df['sentence_vector'] = train_df.sentence.map(
        lambda x: get_sentence_vector(x, ft_model))
    X_train = np.hstack((np.vstack(train_df['sentence_vector'].values),
                         np.vstack(train_df['class_probability'].values)))