def demonstrate_source_lookup(source_uri): print 'Demonstrating LookUp source API' lookup = LookUp() data = lookup.search_source(source_uri) r = Result(data) r.print_raw_result() print
def get_all_tuples_of_relations(self): ''' Returns list of tuples of relations which connects 'self.concepts' sequence in the path. ''' relations_tuples = [] for index, (concept1, concept2) in enumerate(pairwise(self.concepts)): search = Search(start=concept1, end=concept2) data = search.search() result = Result(data) if result.get_num_found() > 0: edges = result.parse_all_edges() if index == 0: for e in edges: if not [e.rel] in relations_tuples: relations_tuples.append([e.rel]) else: for relation_tuple in relations_tuples: if len(relation_tuple) == index: for e in edges: relation_tuple_copy = copy.deepcopy( relation_tuple) relation_tuple_copy.append(e.rel) if not relation_tuple_copy in relations_tuples: relations_tuples.append( relation_tuple_copy) for relation_tuple in relations_tuples: if len(relation_tuple) != len(self.concepts) - 1: relations_tuples.remove(relation_tuple) return relations_tuples
def get_all_tuples_of_relations(self): ''' Returns list of tuples of relations which connects 'self.concepts' sequence in the path. ''' relations_tuples = [] for index, (concept1, concept2) in enumerate(pairwise(self.concepts)): search = Search(start=concept1, end=concept2) data = search.search() result = Result(data) if result.get_num_found() > 0: edges = result.parse_all_edges() if index == 0: for e in edges: if not [e.rel] in relations_tuples: relations_tuples.append([e.rel]) else: for relation_tuple in relations_tuples: if len(relation_tuple) == index: for e in edges: relation_tuple_copy = copy.deepcopy(relation_tuple) relation_tuple_copy.append(e.rel) if not relation_tuple_copy in relations_tuples: relations_tuples.append(relation_tuple_copy) for relation_tuple in relations_tuples: if len(relation_tuple) != len(self.concepts)-1: relations_tuples.remove(relation_tuple) return relations_tuples
def printRelations(self, term): lookup = LookUp(limit=50) response = lookup.search_concept(term) response = Result(response) edges = response.parse_all_edges(clean_self_ref = True) for edge in edges: print("%s --> %s --> %s" % (edge.start, edge.rel, edge.end))
def countRelations(self, term): lookup = LookUp(limit=50) response = lookup.search_concept(term) response = Result(response) edges = response.parse_all_edges(clean_self_ref = True) i = 0 for edge in edges: i+=1 return i
def makeConceptMap(self, term, modifier): lookup = LookUp(limit=500) response = lookup.search_concept(term) response = Result(response) edges = response.parse_all_edges(clean_self_ref = True) conceptMap = {} for edge in edges: if edge.rel == self.getRelationUri(modifier): conceptMap[edge.start] = edge.end return conceptMap
def get_all_tuples_of_concepts(self): ''' Returns list of tuples of concepts connected by 'self.relations' sequence in the path. ''' graph = nx.DiGraph() for relation in self.relations: search = Search(rel=relation, limit=1000) data = search.search() result = Result(data) edges = result.parse_all_edges() for e in edges: graph.add_node(e.start) graph.add_node(e.end) graph.add_edge(e.start, e.end) graph[e.start][e.end]['relation'] = relation concepts_tuples = [] for index, r in enumerate(self.relations): for edge in graph.edges(data=True): start = edge[0] end = edge[1] relation = edge[2]['relation'] if relation == r: if index == 0: concept_tuple = [start, end] concepts_tuples.append(concept_tuple) else: for concept_tuple in concepts_tuples: if len(concept_tuple) == index + 1: concepts_tuples.remove(concept_tuple) next_candidates = [] for next_edge in graph.edges(data=True): next_edge_start = next_edge[0] next_edge_end = next_edge[1] next_edge_relation = next_edge[2][ 'relation'] if next_edge_relation == r and concept_tuple[ -1] == next_edge_start: next_candidates.append(next_edge_end) for candidate in next_candidates: concept_tuple_copy = copy.deepcopy( concept_tuple) concept_tuple_copy.append(candidate) concepts_tuples.append(concept_tuple_copy) return concepts_tuples
def does_exist(self, print_where_breaks=False): ''' Checks whether this path exists in ConceptNet5. :param print_where_breaks: prints the assertion, if the assertion does not exist in the path. ''' for assertion in self.assertions: search = Search(start=assertion.start, rel=assertion.relation, end=assertion.end, limit=1000) data = search.search() result = Result(data) if result.get_num_found() == 0: if print_where_breaks == True: print('Assertion breaking the path: [ %s --> (%s) --> %s) ]' % ( assertion.start, assertion.relation, assertion.end)) return False return True
def demonstrate_association(): print 'Demonstrating Association API' a = Association(filter='/c/en/dog', limit=1) data = a.get_similar_concepts('cat') r = Result(data) r.print_raw_result() print a = Association() data = a.get_similar_concepts_by_term_list( ['toast', 'cereal', 'juice', 'egg']) r = Result(data) r.print_raw_result() print r.parse_all_edges() print
def demonstrate_lookup(concept): print 'Demonstrating LookUp concept API' lookup = LookUp(offset=1, limit=1) data = lookup.search_concept(concept) r = Result(data) edges = r.parse_all_edges() for edge in edges: edge.print_edge() edge.print_all_attrs() print print 'Demonstrating LookUp concept API cleaning self referencing edges' lookup = LookUp(offset=1, limit=1) data = lookup.search_concept(concept) r = Result(data) r.print_raw_result() print
def get_all_tuples_of_concepts(self): ''' Returns list of tuples of concepts connected by 'self.relations' sequence in the path. ''' graph = nx.DiGraph() for relation in self.relations: search = Search(rel=relation, limit=1000) data = search.search() result = Result(data) edges = result.parse_all_edges() for e in edges: graph.add_node(e.start) graph.add_node(e.end) graph.add_edge(e.start, e.end) graph[e.start][e.end]['relation'] = relation concepts_tuples = [] for index, r in enumerate(self.relations): for edge in graph.edges(data=True): start = edge[0] end = edge[1] relation = edge[2]['relation'] if relation == r: if index == 0: concept_tuple = [start, end] concepts_tuples.append(concept_tuple) else: for concept_tuple in concepts_tuples: if len(concept_tuple) == index+1: concepts_tuples.remove(concept_tuple) next_candidates = [] for next_edge in graph.edges(data=True): next_edge_start = next_edge[0] next_edge_end = next_edge[1] next_edge_relation = next_edge[2]['relation'] if next_edge_relation == r and concept_tuple[-1] == next_edge_start: next_candidates.append(next_edge_end) for candidate in next_candidates: concept_tuple_copy = copy.deepcopy(concept_tuple) concept_tuple_copy.append(candidate) concepts_tuples.append(concept_tuple_copy) return concepts_tuples
def does_exist(self, print_where_breaks=False): ''' Checks whether this path exists in ConceptNet5. :param print_where_breaks: prints the assertion, if the assertion does not exist in the path. ''' for assertion in self.assertions: search = Search(start=assertion.start, rel=assertion.relation, end=assertion.end, limit=1000) data = search.search() result = Result(data) if result.get_num_found() == 0: if print_where_breaks == True: print 'Assertion breaking the path: [ %s --> (%s) --> %s) ]' % ( assertion.start, assertion.relation, assertion.end) return False return True
def demonstrate_search(): print 'Demonstrating Search API' s = Search(rel='/c/en/be_often_compare_to') data = s.search() r = Result(data) r.print_raw_result() print s = Search(text='mariah carey', surfaceText='dion', something='anything') data = s.search() r = Result(data) r.print_raw_result() print
def demonstrate_association(): print 'Demonstrating Association API' a = Association(filter='/c/en/dog', limit=1) data = a.get_similar_concepts('cat') r = Result(data) r.print_raw_result() print a = Association() data = a.get_similar_concepts_by_term_list(['toast', 'cereal', 'juice', 'egg']) r = Result(data) r.print_raw_result() print r.parse_all_edges() print
from conceptnet5_client.web.api import Search from conceptnet5_client.web.api import Association from conceptnet5_client.utils.result import Result # get how similar cats and dogs a = Association(filter='/c/en/dog', limit=1) data = a.get_similar_concepts('cat') r = Result(data) # print results in key = value format r.print_raw_result() a = Association() data = a.get_similar_concepts_by_term_list(['toast', 'cereal', 'juice', 'egg']) r = Result(data) # print results in key = value format r.print_raw_result()
ques = prenlp.preprocess(q.question) ans_a = prenlp.preprocess(q.a) ans_b = prenlp.preprocess(q.b) ans_c = prenlp.preprocess(q.c) ans_d = prenlp.preprocess(q.d) print "Question: " + str(q.question) print "A: " + str(q.a) print "B: " + str(q.b) print "C: " + str(q.c) print "D: " + str(q.d) # Generate Semantic Graph from Question a = Association(filter="/c/en", limit=30) semnet = a.get_similar_concepts_by_term_list(ques) r = Result(semnet) # Parse Similarity similar = r.get_similar() if len(similar) > 0: # Splice Leading API Directory for word in similar: word[0] = word[0][6:] print "\n" print similar print "\n" # Compute Score Probabilities prob = [str(computeScore(similar, ans_a)),
ans_a = prenlp.preprocess(q.a) ans_b = prenlp.preprocess(q.b) ans_c = prenlp.preprocess(q.c) ans_d = prenlp.preprocess(q.d) print "[Question]: " + str(ques) + "\n" print "[Answer Choices]" print "A: " + str(ans_a) print "B: " + str(ans_b) print "C: " + str(ans_c) print "D: " + str(ans_d) print print "[Correct Answer]: " + str(q.ans) + "\n" # Perform Semantic Association Search on Question a = Association() data = a.get_similar_concepts_by_term_list(ques) r = Result(data) # Obtain Associtative Edges r.parse_all_edges() print # Perform an Exhaustive Entity Frequency Search # Check to see which one contains the most amount of relevant entities. #print "[Predicted Answer]: " #print "[Correct Answer]: " + str(q.ans) + "\n"
ques = prenlp.preprocess(q.question) ans_a = prenlp.preprocess(q.a) ans_b = prenlp.preprocess(q.b) ans_c = prenlp.preprocess(q.c) ans_d = prenlp.preprocess(q.d) print "[Question]: " + str(ques) + "\n" print "[Answer Choices]" print "A: " + str(ans_a) print "B: " + str(ans_b) print "C: " + str(ans_c) print "D: " + str(ans_d) print print "[Correct Answer]: " + str(q.ans) + "\n" # Perform Semantic Association Search on Question a = Association() data = a.get_similar_concepts_by_term_list(ques) r = Result(data) # Obtain Associtative Edges r.parse_all_edges() print # Perform an Exhaustive Entity Frequency Search # Check to see which one contains the most amount of relevant entities. #print "[Predicted Answer]: " #print "[Correct Answer]: " + str(q.ans) + "\n"
ques = prenlp.preprocess(q.question) ans_a = prenlp.preprocess(q.a) ans_b = prenlp.preprocess(q.b) ans_c = prenlp.preprocess(q.c) ans_d = prenlp.preprocess(q.d) print "Question: " + str(q.question) print "A: " + str(q.a) print "B: " + str(q.b) print "C: " + str(q.c) print "D: " + str(q.d) # Generate Semantic Graph from Question a = Association(filter="/c/en", limit=30) semnet = a.get_similar_concepts_by_term_list(ques) r = Result(semnet) # Parse Similarity similar = r.get_similar() if len(similar) > 0: # Splice Leading API Directory for word in similar: word[0] = word[0][6:] print "\n" print similar print "\n" # Compute Score Probabilities prob = [