print("---------------------------------") accuracy = correct / (correct + incorrect) print('Tagging accuracy for test set of %s sentences: %.4f' % (test_size, accuracy)) # Print answers for 4b, 5 and 6 bad_tags, good_tags, answer4b = answer_question4b() print('\nA tagged-by-your-model version of a sentence:') print(bad_tags) print('The tagged version of this sentence from the corpus:') print(good_tags) print('\nDiscussion of the difference:') print(answer4b[:280]) answer5 = answer_question5() print('\nFor Q5:') print(answer5[:500]) answer6 = answer_question6() print('\nFor Q6:') print(answer6[:500]) if __name__ == '__main__': if len(sys.argv) > 1 and sys.argv[1] == '--answers': import adrive2_embed from autodrive_embed import run, carefulBind with open("userErrs.txt", "w") as errlog: run(globals(), answers, adrive2_embed.a2answers, errlog) else: answers()
), ('a4e', 'list(ttags)'), ('a5', 'answer5'), ('a6', 'answer6') ], globals(), errlog) errs += cerrs try: model.initialise('attack') except NotImplementedError: pass except Exception as e: errs += 1 print("Exception in initialising model in adrive2:\n%s" % repr(e), file=errlog) traceback.print_tb(sys.exc_info()[2], None, errlog) (cerrs, nans) = carefulBind([('a3c', 'model.get_viterbi_value("VERB",0)'), ('a3d', 'model.get_backpointer_value("VERB",0)')], globals(), errlog) ans.update(nans) errs += cerrs return (ans, errs) if __name__ == '__main__': from autodrive_embed import run, answers, HMM, carefulBind with open("userErrs.txt", "w") as errlog: run(answers, a2answers, errlog)