Exemple #1
0
optparser.add_argument('-p', dest='doprint', action='store_true',
                       help='Print to png/pdf files')
optparser.add_argument('--prefix', default='', help='Plot file name prefix')
optparser.add_argument('--mpl', dest='usempl', action='store_true',
                       help='Use Matplotlib')
optparser.add_argument('-b', '--batch', action='store_true', help='Batch mode')
optparser.add_argument('-c', dest='clnameglob', metavar='classifier',
                       help='Only plot matching classifiers (globs allowed)')
options = optparser.parse_args()
locals().update(_import_args(options))


import sys

from utils import plot_conf
rfiles = plot_conf(options.yamlfile, options.schema, options.files)
if not rfiles:
    sys.exit('Config parsing error.')

prefix = 'plots/{}'.format(options.prefix)

from config import classifiers, sessions

if options.clnameglob:
    # only process matching classifiers
    from fnmatch import fnmatchcase
    for key in classifiers:
        if not fnmatchcase(key, options.clnameglob):
            del classifiers[key]

from fixes import ROOT
Exemple #2
0
optparser.add_argument('--title', action='store_true',
                       help='Add title to plots')
optparser.add_argument('--dump', default=None, type=str,
                       help='Dump correlation plots matching glob')
options = optparser.parse_args()


from pprint import pprint
import sys
if options.verbose and not options.lcorrns:
    print('Error: verbose depends on linear correlations!')
    optparser.print_help()
    sys.exit()

from utils import plot_conf
rfiles = plot_conf(options.yamlfile, options.schema, options.files)
if not rfiles:
    sys.exit('Config parsing error.')

from rplot.rdir import Rdir
fnames = [rfile[0]['file'] for rfile in rfiles]
pathtool = Rdir(fnames)

# FIXME: only processes first file
rfileconf = rfiles[0]

# guess session from file name
from utils import session_from_path
session = session_from_path(rfileconf[0]['file'])
prefix = 'plots/{}'.format(session)
Exemple #3
0
                opts["save_model_path"]),
                                    "a",
                                    encoding="utf8")
            for (option, value) in opts.items():
                hyperParams_file.write("{}: {}\n".format(option, value))

            print("Number of Epochs", opts["epochs"])

            # Train model
            trainer.train(train_dataloader, dev_dataloader, opts)

            # Evaluate model by computing F-score on test set
            precisions = compute_precision(ner_model, test_dataloader,
                                           label_vocabulary,
                                           encoded_test_labels, opts)
            curr_f1 = precisions["f1"]
            print("\n\nF-1 score: {}\n\n".format(curr_f1))
            print("Confusion matrix\n", precisions["confusion_matrix"])

            # Print, plot, and save the confusion matrix
            file_name = "TestSet_Confusion_Matrix"
            plot_conf(file_name, precisions["confusion_matrix"],
                      opts["save_model_path"])

            # Add F-score obtained by this model to dict storing all F-scores
            F1_scores_dict.update({opts["save_model_path"]: curr_f1})

# Find the model that achieved the highest F-score
best = max(F1_scores_dict.items(), key=lambda x: x[1])
print("Best model in {} scord F1 of {}".format(best[0], best[1]))
Exemple #4
0
trainer.train(train_dataloader, dev_dataloader, opts)

# Create file to write all the model hyperparameters and training cofiguration in
hyperParams_file = open("{}/hyper-parameters.txt".format(
    opts["save_model_path"]),
                        "a",
                        encoding="utf8")
for (option, value) in opts.items():
    hyperParams_file.write("{}: {}\n".format(option, value))

# Evaluate model by computing precision, confusion matrix and recall & F-score on dev set
precisions = compute_precision(ner_model, dev_dataloader, label_vocabulary,
                               encoded_dev_labels, opts)
per_class_precision = precisions["per_class_precision"]
print("Micro Precision: {}\nMacro Precision: {}".format(
    precisions["micro_precision"], precisions["macro_precision"]))
print("Per class Precision:")
for idx_class, precision in sorted(enumerate(per_class_precision),
                                   key=lambda elem: -elem[1]):
    label = int_to_label[(idx_class)] if idx_class != 0 else int_to_label[(
        idx_class)]
    print(label, precision)
print("Rcall: {}\n\nF-1 score: {}\n\n".format(precisions["recall"],
                                              precisions["f1"]))

# Print, plot, and save the confusion matrix
print("Confusion matrix\n", precisions["confusion_matrix"])
plot_conf("DevSet_Confusion_Matrix", precisions["confusion_matrix"],
          opts["save_model_path"])