示例#1
0
def parse_example(parser, sentence):
	# load dataset
	print('Loading embeddings and ids for parsing')
	dataset = load_datasets()
	config = dataset.model_config
	device = torch.device(
		"cuda") if torch.cuda.is_available() else torch.device("cpu")

	# Make sure the parser is in evaluation mode so it's not using things like dropout
	parser.eval()

	parse_sentence(sentence, parser, device, dataset)
示例#2
0
def test(parser):

    # load dataset
    print('Loading data for testing')
    dataset = load_datasets()
    config = dataset.model_config
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

    # Make sure the parser is in evaluation mode so it's not using things like dropout
    parser.eval()

    # Compute UAS (unlabeled attachment score), which is the standard evaluate metric for parsers
    compute_dependencies(parser, device, dataset.test_data, dataset)
    valid_UAS = get_UAS(dataset.test_data)
    print("- test UAS: {:.2f}".format(valid_UAS * 100.0))

    parser.eval()
    test_string = "I shot an elephant with a banana"
    parse_sentence(test_string, parser, device, dataset)
示例#3
0
def test(parser):
	# load dataset
	print('Loading data for testing')
	dataset = load_datasets()
	config = dataset.model_config
	device = torch.device(
		"cuda") if torch.cuda.is_available() else torch.device("cpu")

	# Make sure the parser is in evaluation mode so it's not using things like dropout
	parser.eval()

	# Compute UAS (unlabeled attachment score), which is the standard evaluate metric for parsers.
	#
	# For details see
	# http://www.morganclaypool.com/doi/abs/10.2200/S00169ED1V01Y200901HLT002
	# Chapter 6.1
	compute_dependencies(parser, device, dataset.test_data, dataset)
	valid_UAS = get_UAS(dataset.test_data)
	print("- test UAS: {:.2f}".format(valid_UAS * 100.0))

	parser.eval()
	test_string = "I shot an elephant with a banana"
	parse_sentence(test_string, parser, device, dataset)