def test(embeddingsFile, existingModel, predictionsFile,
         documents_are_sequences, sentenceCNN, charCNN, uniLSTM, useBERT):
    embeddings, vocab = event_reader.load_embeddings(embeddingsFile,
                                                     vocab_size,
                                                     word_embedding_dim)

    testSentences, testBookIndex = event_reader.prepare_annotations_from_folder(
        test_folder, documents_are_sequences, useBERT)

    testC, testX, testP, testW, testY, testL = transform_examples(
        testSentences, vocab, useBERT)

    test_generator = single_generator(testC, testX, testP, testW, testY, testL)

    test_metadata = event_reader.convert_to_index(testSentences)

    model = event_cnn(embeddings, sentenceCNN, charCNN, uniLSTM, useBERT)

    model.load_weights(existingModel)
    predictionFile = predictionsFile
    out = open(predictionFile, "w", encoding="utf-8")
    gold = []
    preds = []
    c = 0
    for step in range(len(testL)):
        batch, y = next(test_generator)

        probs = model.predict_on_batch(batch)

        _, length, _ = y.shape
        for i in range(length):
            out.write("%s\t%s\t%s\t%.20f\n" %
                      ('\t'.join([str(x) for x in test_metadata[c]]),
                       int(probs[0][i][0] > 0.5), y[0][i][0], probs[0][i][0]))

            preds.append(probs[0][i][0] >= 0.5)
            gold.append(y[0][i][0])
            c += 1

    f, p, r, correct, trials, trues = event_eval.check_f1_two_lists(
        gold, preds)

    print("precision: %.3f %s/%s" % (p, correct, trials))
    print("recall: %.3f %s/%s" % (r, correct, trues))
    print("F: %.3f" % f)

    event_eval.check_f1_two_lists(gold, preds)
    out.close()
def train(embeddingsFile, writePath, documents_are_sequences, sentenceCNN,
          charCNN, pad, uniLSTM, useBERT):

    global devC, devX, devP, devW, devY, devL

    embeddings, vocab = event_reader.load_embeddings(embeddingsFile,
                                                     vocab_size,
                                                     word_embedding_dim)

    trainSentences, trainBookIndex = event_reader.prepare_annotations_from_folder(
        train_folder, documents_are_sequences, useBERT)
    devSentences, devBookIndex = event_reader.prepare_annotations_from_folder(
        dev_folder, documents_are_sequences, useBERT)

    devC, devX, devP, devW, devY, devL = transform_examples(
        devSentences, vocab, useBERT)
    trainC, trainX, trainP, trainW, trainY, trainL = transform_examples(
        trainSentences, vocab, useBERT)

    max_sequence_length = 0
    for length in trainL:
        if length > max_sequence_length:
            max_sequence_length = length
    for length in devL:
        if length > max_sequence_length:
            max_sequence_length = length

    print("Max length: %s" % max_sequence_length)

    model = event_cnn(embeddings, sentenceCNN, charCNN, uniLSTM, useBERT)

    tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
    checkpoint = ModelCheckpoint("%s.hdf5" % writePath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=15,
                                   verbose=0,
                                   mode='auto')

    if pad:
        trainC, trainX, trainP, trainW, trainY, trainL = pad_all(
            trainC, trainX, trainP, trainW, trainY, trainL,
            max_sequence_length, useBERT)
        devC, devX, devP, devW, devY, devL = pad_all(devC, devX, devP, devW,
                                                     devY, devL,
                                                     max_sequence_length,
                                                     useBERT)

        batchF1 = BatchF1()

        model.fit([trainC, trainW, trainX, trainP],
                  trainY,
                  validation_data=([devC, devW, devX, devP], devY),
                  epochs=num_epochs,
                  batch_size=batch_size,
                  callbacks=[batchF1, tensorboard, checkpoint, early_stopping])

    else:
        train_generator = single_generator(trainC, trainX, trainP, trainW,
                                           trainY, trainL)
        dev_generator = single_generator(devC, devX, devP, devW, devY, devL)
        generatorF1 = GeneratorF1()

        model.fit_generator(
            train_generator,
            steps_per_epoch=len(trainL),
            validation_data=dev_generator,
            validation_steps=len(devL),
            epochs=num_epochs,
            callbacks=[generatorF1, tensorboard, checkpoint, early_stopping])
import event_eval
import event_reader

if __name__ == "__main__":

    outputFile = sys.argv[1]

    nlp = spacy.load('en', disable=['ner,parser'])
    nlp.remove_pipe('ner')
    nlp.remove_pipe('parser')

    train_folder = "../data/bert/train"
    dev_folder = "../data/bert/dev"
    test_folder = "../data/bert/test"

    testSentences, _ = event_reader.prepare_annotations_from_folder(
        test_folder)
    test_metadata = event_reader.convert_to_index(testSentences)

    golds = []
    preds = []

    for sentence in testSentences:
        tokens_list = [word[0] for word in sentence]
        tokens = nlp.tokenizer.tokens_from_list(tokens_list)
        nlp.tagger(tokens)
        for idx, token in enumerate(tokens):
            pred = 0
            if token.tag_.startswith("V"):
                pred = 1
            preds.append(pred)
            label = sentence[idx][1]
	print(bestModel)
	final_test_preds=bestModel.predict(test_X)

	out=open(outputFile, "w", encoding="utf-8")
	for idx, pred in enumerate(final_test_preds):
		out.write("%s\t%s\n" % ('\t'.join([str(x) for x in test_metadata[idx]]), pred))
	out.close()



if __name__ == "__main__":

	read_countability("../data/esl.cd")
	read_embeddings("../data/guten.vectors.txt")

	trainSentences, _ = event_reader.prepare_annotations_from_folder(train_folder)
	devSentences, _ = event_reader.prepare_annotations_from_folder(dev_folder)
	testSentences, _ = event_reader.prepare_annotations_from_folder(test_folder)

	test_metadata=event_reader.convert_to_index(testSentences)

	functions=[get_word, get_context, get_dep, get_lemma, get_wordnet, get_pos, get_nsubj_features, get_embedding]
	train_eval(trainSentences, devSentences, testSentences, "../results/featurized.preds.txt", functions)

	functions=[get_context, get_dep, get_lemma, get_wordnet, get_pos, get_nsubj_features, get_embedding]
	train_eval(trainSentences, devSentences, testSentences, "../results/featurized.word_ablation.preds.txt", functions)

	functions=[get_word, get_context, get_dep, get_wordnet, get_pos, get_nsubj_features, get_embedding]
	train_eval(trainSentences, devSentences, testSentences, "../results/featurized.lemma_ablation.preds.txt", functions)

	functions=[get_word, get_context, get_dep, get_lemma, get_wordnet, get_nsubj_features, get_embedding]