Esempio n. 1
0
def answer_conversation(questions, tagmeToken, hyperparameters, number_of_frontier_nodes):
	answers = []
	result 	= answer_complete_question(questions[0], tagmeToken)
	graph 	= gp.expand_context_with_statements(None, [result['context']], qa=True) 
	answers.append(result['answers'])
	for counter, question in enumerate(questions[1:]):
		turn = counter + 2
		answer, graph 	= answer_follow_up_question(question, turn, graph, hyperparameters, number_of_frontier_nodes)
		answers.append(answer)
	return answers
Esempio n. 2
0
def determine_attributes(candidates, context, turn):
	for candidate in candidates:
		# create a temporal context and include the candidates' statement there
		temp_context = context.copy()
		temp_context = gp.expand_context_with_statements(temp_context, [candidate['statement']])
		entity_nodes = gp.get_all_qa_nodes(temp_context)
		if candidate['type'] == 'entity':
			total_weighted_distance = 0
			for entity_node in entity_nodes:
				# increase distance by 1 to avoid zero division
				distance = gp.get_distance(temp_context, candidate['entity'], entity_node[0])
				total_weighted_distance += float(1/float(distance)) * turn_rating_higher_better(entity_node[1]['turn'], turn)
			context_relevance = total_weighted_distance / float(len(entity_nodes))
			priors = priors_of_entity(candidate['entity'])
		elif candidate['type'] == 'qualifier_object':
			total_weighted_distance = 0
			for entity_node in entity_nodes:
				# increase distance by 1 to avoid zero division
				distance = gp.get_distance(temp_context, candidate['qualifier_object'], entity_node[0]) 
				total_weighted_distance += float(1/float(distance)) * turn_rating_higher_better(entity_node[1]['turn'], turn)
			context_relevance = total_weighted_distance / float(len(entity_nodes))
			priors = priors_of_entity(candidate['qualifier_object'])
		elif candidate['type'] == 'predicate':
			# priors = priors_of_predicate(candidate['predicate'])
			total_weighted_distance = 0
			for entity_node in entity_nodes:
				# every predicate label should be unique (to differ between them in the graph); predicate should already be in as in context
				predicate_label = candidate['predicate'] + "-" + str(gp.predicate_nodes[candidate['predicate']]-1)
				distance = gp.get_distance(temp_context, predicate_label, entity_node[0]) 		
				total_weighted_distance += float(1/float(distance)) * turn_rating_higher_better(entity_node[1]['turn'], turn)
			context_relevance = total_weighted_distance / float(len(entity_nodes))
			priors = priors_of_predicate(candidate['predicate'])
		elif candidate['type'] == 'qualifier_predicate':
			# priors = priors_of_predicate(candidate['qualifier_predicate'])
			total_weighted_distance = 0
			for entity_node in entity_nodes:
				# every predicate label should be unique (to differ between them in the graph); predicate should already be in as in context
				predicate_label = candidate['qualifier_predicate'] + "-" + str(gp.qualifier_predicate_nodes[candidate['qualifier_predicate']]-1)
				distance = gp.get_distance(temp_context, predicate_label, entity_node[0]) 
				total_weighted_distance += float(1/float(distance)) * turn_rating_higher_better(entity_node[1]['turn'], turn)
			context_relevance = total_weighted_distance / float(len(entity_nodes))
			priors = priors_of_predicate(candidate['qualifier_predicate'])
		candidate['score'] = {'context_relevance': context_relevance , 'priors': priors}
	return candidates