def it_should_search_test(self): vectorSpace = VectorSpace(self.documents) eq_(vectorSpace.search(["cat"]), [ 0.14487566959813258, 0.1223402602604157, 0.07795622058966725, 0.05586504042763477 ])
def run(data, queries, max_response=10): all_documents = [] for entry in data: all_documents.append(entry["raw_data"]) vector_space = VectorSpace(all_documents) #Search for cat indexed_result = {} result = vector_space.search([queries]) index = 0 for entry in result: indexed_result[index] = entry index += 1 sorted_resp = sorted(indexed_result.items(), key=operator.itemgetter(1), reverse=True) sorted_resp = sorted_resp[:int(max_response) + 1] response = {} rank = 1 for entry in sorted_resp: data_index = entry[0] response[rank] = data[data_index] rank += 1 return response
def it_should_find_related_test(self): vector_space = VectorSpace(self.documents) eq_(vector_space.related(0), [1.0000000000000002, 0.9999999999999998, 0.0])
def it_should_search_test(self): vector_space = VectorSpace(self.documents, transforms = []) eq_(vector_space.search(["cat"]), [1.0, 0.7071067811865475, 0.0])
def it_should_find_return_similarity_rating_test(self): vectorSpace = VectorSpace(self.documents) eq_(vectorSpace.related(0), [1.0, 0.9922455760198575, 0.08122814162371816, 0.0762173599906487])
queries = pickle.load(open("QueryStrings.p", "rb")) print "total queries" print len(queries) print "loaded queries" documents = pickle.load(open("documentContentList2.p", "rb")) print "loaded documents" docIds = pickle.load(open("docIdList.p", "rb")) print len(docIds) print "loaded doc ids" documents = list(documents) print "loaded documents" print len(documents) #documents = documents[:100] #docIds = docIds[:100] #queries = queries[:10] vector_space = VectorSpace(documents) print "finished conversion" print "load user click file" userQueriesAndClicks = pickle.load( open("user_specific_positive_negative_examples_dic_test", "rb")) print "finished loading user click file" #queryResults = dict( [ (x[0], (x[1], x[2])) for x in userQueriesAndClicks_strict[userID] ]) # given a query and a ranking, this function provides the relevanceJudgements list as # required by averagePrecision def turnIntoBinaryRelevanceThing(query, ranking, relevantDocuments): #rel = self.relevantDocuments[query] binarized = []
# Create the corpus file_content_all=[] corpus='AspectJ' creator = sourceCorpusCreator() sourcepath = "E:\PhD\LSI\Repo\\"+corpus+"\SourceAndBugData244\\" keywordsfilepath='E:\PhD\LSI\Repo\\'+corpus+'\data\keyword-documents.txt' #querypath="E:\PhD\LSI\Repo\\"+corpus+"\BugData\\" source_content_all={} source_content_all=creator.CorpusCreatorDict(sourcepath, '.java') print ('Total files in corpus ') print(len(source_content_all)) print (source_content_all) vector_space = VectorSpace(source_content_all) file_path_all=vector_space.get_file_path_all() print (file_path_all) document_ID_file_info_mapping=vector_space.get_document_ID_file_info_mapping() print (document_ID_file_info_mapping) keywords_docs_string=str(vector_space.vector_index_to_keyword_mapping) file_read_write=FileReadWrite(sourcepath) file_read_write.writeFiles(keywordsfilepath, keywords_docs_string) print (len(vector_space.vector_index_to_keyword_mapping)) #import pdb #pdb.set_trace() print ("Keyords-document vector/matrix") print ('length of vector_space.collection_of_document_term_vectors') print len(vector_space.collection_of_document_term_vectors) document_term_matrix=vector_space.collection_of_document_term_vectors
import pandas as pd import pickle from semanticpy.vector_space import VectorSpace data = pd.read_json('cc_jokes_valid.json') df = pd.DataFrame(data) dfList = df['content'].tolist() #builds vector space model and saves to picle (takes long time) vector_space = VectorSpace(dfList) filehandler = open('vsm.obj', 'w') pickle.dump(vector_space, filehandler)
def vector_space_mapping(self): v = VectorSpace(self.documents) matrix = v.collection_of_document_term_vectors return matrix