コード例 #1
0
    def more_like_this2(self, item_doc, result_num):
        similar_questions = []
        if not item_doc:
            item_doc.append(ResultItem(None, 1.0, "No Title", 0))
        query = ""
        if item_doc.doc:
            query += self.document_to_query(item_doc.doc)

        query = remove_unified_stop_lists(query)
        queryparser = QueryParser(Version.LUCENE_CURRENT, "term",
                                  self.analyzer)

        if query:
            try:
                like_query = queryparser.parse(query)
                hits = self.searcher.search(like_query, result_num).scoreDocs

                for i, hit in enumerate(hits):
                    doc = self.searcher.doc(hit.doc)
                    similar_questions.append(doc.get("question_id"))

            except Exception as e:
                print "Question Searcher: Error: %s" % e
                # write_search_log("Question Searcher: Error: %s" % e + "\n")
                print(traceback.format_exc())

        # self.searchermgr.decRef(self.searcher)
        # self.searchermgr.release(self.searcher)
        # self.searcher = None
        # self.directory.close()
        # self.directory = None
        return similar_questions

    # def release(self, searcher):
コード例 #2
0
ファイル: Anil_Code_Query.py プロジェクト: pombredanne/facoy
def Generator(code):
    file_content = code
    print '1. Origianl Query : ', file_content
    ast = parse(file_content, resolve=False)
    query = add_code_keyword_into_document(file_content, ast)
    print "2. Right after alternation & before the removing stop words : ", query
    query = remove_unified_stop_lists(query)
    print '3. Right after the stop words removing : ', query
    return query
コード例 #3
0
    def more_like_this2(self, limit, item_doc, score_logs_for_each, user_query,
                        flag):  #flag = UQ(1) or not(0)
        bench_result = []
        query = ""
        if flag == 1:
            query += user_query
            # item_doc = ResultItem(None, 0.0, "No Title", 'None','None', None)

        if flag == 0 and item_doc.doc:
            query += self.document_to_query(item_doc.doc)

        query = remove_unified_stop_lists(query)

        queryparser = QueryParser(Version.LUCENE_CURRENT, "typed_method_call",
                                  self.analyzer)
        if query:
            try:
                parsed_query = queryparser.parse(query)
                hits = self.searcher.search(parsed_query, limit).scoreDocs
                temp = 1
                for i, hit in enumerate(hits):
                    doc = self.searcher.doc(hit.doc)
                    matched = doc.get('file').split('/')[9].split('.')[0]
                    score_logs_for_each += str(matched) + '\t' + str(
                        round(hit.score, 2)) + '\n'
                    matched_terms = self.get_matched_keywords2(
                        parsed_query, hit.doc)
                    # print "Matched Terms : ", matched_terms

                    # print("File %s" % temp, doc.get("file"), "//", doc.get("file_content"))
                    temp += 1

                    file_path = doc.get("file")
                    content = None
                    try:
                        with open(file_path) as f:
                            content = f.read()
                    except:
                        pass

                    if content:
                        item = BenchResultItem(doc.get("file"), content,
                                               matched_terms,
                                               hit.score, item_doc,
                                               doc.get("line_numbers"),
                                               hit.doc)
                        bench_result.append(item)

            except Exception as e:
                print "BenchSearcher Error: %s" % e
                print(traceback.format_exc())

        # self.searchermgr.release()
        # self.searcher = None
        # self.directory.close()
        # self.directory = None
        return bench_result, score_logs_for_each
コード例 #4
0
    def more_like_this3(self, limit, score_logs_for_each, user_query):
        query = ""
        bench_result = []
        # if not item_doc:
        # 	item_doc.append(ResultItem(None, 1.0, "No Title", 0, 0))
        # if item_doc.doc:
        # 	query += self.document_to_query(item_doc.doc)

        query += user_query
        query = remove_unified_stop_lists(query)

        queryparser = QueryParser(Version.LUCENE_CURRENT, "typed_method_call",
                                  self.analyzer)
        if query:
            try:
                parsed_query = queryparser.parse(query)
                hits = self.searcher.search(parsed_query, limit).scoreDocs
                temp = 1
                for i, hit in enumerate(hits):
                    score_logs_for_each += str(round(hit.score, 2)) + '\n'
                    doc = self.searcher.doc(hit.doc)
                    matched_terms = self.get_matched_keywords2(
                        parsed_query, hit.doc)
                    # print "Matched Terms : ", matched_terms

                    # print("File %s" % temp, doc.get("file"), "//", doc.get("file_content"))
                    temp += 1

                    file_path = doc.get("file")
                    content = None
                    try:
                        with open(file_path) as f:
                            content = f.read()
                    except:
                        pass

                    if content:
                        item = BenchResultItem_UQ(doc.get("file"), content,
                                                  matched_terms, hit.score,
                                                  doc.get("line_numbers"),
                                                  hit.doc)
                        bench_result.append(item)

            except Exception as e:
                print "BenchSearcher Error: %s" % e
                print(traceback.format_exc())

        # self.searchermgr.decRef(self.searcher)
        # self.searchermgr.release(self.searcher)
        # self.searcher = None
        # self.directory.close()
        # self.directory = None
        return bench_result, score_logs_for_each
コード例 #5
0
def Generator(code):
    file_content = code

    # print '1. Origianl Query : ', file_content
    ast = parse(file_content, resolve=False)  # newJavaParser를 사용하여 자바 코드 파싱
    query = add_code_keyword_into_document(file_content, ast)
    # print "Query before the removing stop words : ", query
    # write_search_log("\nQuery before the removing stop words : " + str(query))

    # print '2. Right after the code query generator : ', query
    query = remove_unified_stop_lists(query)
    # print '3. Right after the stop words removing : ', query
    # print "Transformed user code query : ", query
    # write_search_log("\nTransformed user code query : " + str(query))

    return query
コード例 #6
0
    def more_like_this2(self, limit, item_doc, user_query,
                        flag):  #flag = UQ(1) or not(0)
        results = []
        query = ""
        if flag == 1:
            query += user_query
            # item_doc = ResultItem(None, 0.0, "No Title", 'None','None', None)

        if flag == 0 and item_doc.doc:
            query += self.document_to_query(item_doc.doc)

        query = remove_unified_stop_lists(query)

        queryparser = QueryParser(Version.LUCENE_CURRENT, "typed_method_call",
                                  self.analyzer)
        if query:
            try:
                parsed_query = queryparser.parse(query)
                hits = self.searcher.search(parsed_query, limit).scoreDocs
                temp = 1
                for i, hit in enumerate(hits):
                    doc = self.searcher.doc(hit.doc)
                    matched_terms = self.get_matched_keywords2(
                        parsed_query, hit.doc)
                    temp += 1
                    file_path = doc.get("file")
                    content = None
                    try:
                        with open(file_path) as f:
                            content = f.read()
                    except:
                        pass

                    if content:
                        item = GithubResultItem(doc.get("file"), content,
                                                matched_terms, hit.score,
                                                item_doc,
                                                doc.get("line_numbers"),
                                                hit.doc)
                        results.append(item)

            except Exception as e:
                print "GitHub Searcher Error: %s" % e
                print(traceback.format_exc())

        return results
コード例 #7
0
	def more_like_this2(self, limit, score_logs_for_each, user_query, flag):
		bench_result = []
		query = ""
		if flag == 1:
			query += user_query

		query = remove_unified_stop_lists(query)
		queryparser = QueryParser(Version.LUCENE_CURRENT, "typed_method_call", self.analyzer)
		if query:
			try:
				parsed_query = queryparser.parse(query)
				hits = self.searcher.search(parsed_query, limit).scoreDocs
				temp = 1
				for i, hit in enumerate(hits):
					doc = self.searcher.doc(hit.doc)
					matched = doc.get('file').split('/')[9].split('.')[0]
					score_logs_for_each += str(matched) + '\t' + str(round(hit.score, 2)) + '\n'
					matched_terms = self.get_matched_keywords2(parsed_query, hit.doc)
					temp += 1

					file_path = doc.get("file")
					content = None
					try:
						with open(file_path) as f:
							content = f.read()
					except:
						pass

					if content:
						item = BenchResultItem_UQ(doc.get("file"), content, matched_terms, hit.score, doc.get("line_numbers"), hit.doc)
						bench_result.append(item)

			except Exception as e:
				print "BenchSearcher Error: %s" % e
				print(traceback.format_exc())

		return bench_result, score_logs_for_each
コード例 #8
0
    def more_like_this2(
        self, item_doc, result_num
    ):  #들어온 질문 docs들에 대해 순회하면서 최종 query로 생성하고 Question Index에서 비슷한거 검색할 것.
        similar_questions = []
        if not item_doc:
            item_doc.append(ResultItem(None, 1.0, "No Title", 0))
        query = ""
        if item_doc.doc:
            query += self.document_to_query(item_doc.doc)

        query = remove_unified_stop_lists(query)
        queryparser = QueryParser(Version.LUCENE_CURRENT, "term",
                                  self.analyzer)

        if query:  #########이 시점에서의 Unified Query는 Tokenization, Stemming 이 되어있음..########
            try:
                like_query = queryparser.parse(query)
                hits = self.searcher.search(
                    like_query,
                    result_num).scoreDocs  #Q와 비슷한 Q들 상위 3개씩의 결과 그럼 총 9개

                for i, hit in enumerate(hits):
                    doc = self.searcher.doc(hit.doc)
                    similar_questions.append(doc.get("question_id"))

            except Exception as e:
                print "Question Searcher: Error: %s" % e
                # write_search_log("Question Searcher: Error: %s" % e + "\n")
                print(traceback.format_exc())

        # self.searchermgr.decRef(self.searcher)
        # self.searchermgr.release(self.searcher)
        # self.searcher = None
        # self.directory.close()
        # self.directory = None
        return similar_questions
コード例 #9
0
ファイル: Project_Searcher.py プロジェクト: pombredanne/facoy
    def more_like_this2(self, limit, item_doc, user_query):
        github_result = []
        if not item_doc:
            item_doc.append(ResultItem(None, 1.0, "No Title", 0, 0))

        query = ""
        if item_doc.doc:
            query += self.document_to_query(item_doc.doc)
        query += user_query
        query = remove_unified_stop_lists(query)
        print '................................................................................................'
        print "Project Searcher Unified Query :", query
        print '................................................................................................'
        write_search_log(
            "................................................................................................\n"
            + "Project Searcher Unified Query : " +
            str(query.encode('utf-8')) + "\n" +
            "................................................................................................\n"
        )
        queryparser = QueryParser(Version.LUCENE_CURRENT, "typed_method_call",
                                  self.analyzer)
        if query:
            try:
                like_query = queryparser.parse(query)
                hits = self.searcher.search(like_query,
                                            limit).scoreDocs  #answer 1개당 10개씩
                temp = 1
                for i, hit in enumerate(hits):
                    doc = self.searcher.doc(hit.doc)
                    matched_terms = self.get_matched_keywords2(
                        like_query, hit.doc)
                    #print "Matched Terms : ", matched_terms

                    print("File %s" % temp, doc.get("file"), "//",
                          doc.get("file_content")
                          )  #, "line_numbers", doc.get("line_numbers"))
                    write_search_log("File " + str(temp) +
                                     str(doc.get("file")) + "//" +
                                     str(doc.get("file_content")) + "\n")
                    temp += 1

                    file_path = doc.get("file")
                    print 'file_path = ', file_path
                    content = None
                    try:
                        with open(file_path) as f:
                            content = f.read()
                    except:
                        print "CAN'T OPEN THE FILE"
                        pass

                    if content:
                        item = GithubResultItem(doc.get("file"), content,
                                                matched_terms, hit.score,
                                                item_doc,
                                                doc.get("line_numbers"),
                                                hit.doc)
                        # print item.score
                        github_result.append(item)

            except Exception as e:
                print "GitSearcher Error: %s" % e
                print(traceback.format_exc())

        #sorted(github_result, key=attrgetter())

            print 'github_result : ', github_result
        return github_result