Beispiel #1
0
    def btn_search(self):

        quary = self.searchBox.text()

        #build the parser
        argparser.add_argument("--q", default=quary)
        argparser.add_argument("--max-results", help="Max results", default=10)
        argparser.add_argument("--closedCaption",
                               help="closed caption",
                               default="true")
        args = argparser.parse_args()

        #try and get results
        try:
            result = searcher(args)
        except HttpError:
            print("An HTTP error %d occurred:")

        #print ("get results")
        #print (args)
        #print(result)

        formattedList = ""
        #number of items returned
        print(result.__len__())
        for item in result:
            #print("results added: " + item)
            formattedList += "\n" + item

        self.myLabel.setText(formattedList)
Beispiel #2
0
def wiki_searcher(request):
    """Return a wiki document searcher with default parameters."""
    return (
        searcher(request)(Document)
        .query_fields("title__text", "content__text", "summary__text", "keywords__text")
        .weight(title=6, content=1, keywords=4, summary=2)
    )
Beispiel #3
0
def question_searcher(request):
    """Return a question searcher with default parameters."""
    return (
        searcher(request)(Question)
        .query_fields("title__text", "question_content__text", "answer_content__text")
        .weight(title=4, question_content=3, answer_content=3)
        .group_by("question_id", "-@group")  # nop in elasticutils
        .highlight(before_match="<b>", after_match="</b>", limit=settings.SEARCH_SUMMARY_LENGTH)
    )
Beispiel #4
0
def main():
    starttime = time.time()
    # open documents and queries files
    documents_file = open('ohsumed.88-91', 'r')
    queries_file = open('query.ohsu.1-63', 'r')
    # record documents on the RAM directory
    directory, analyzer = index.indexer(documents_file)
    # search queries in the documents
    startsearchtime = time.time()
    search.searcher(directory, analyzer, queries_file)
    endsearchtime = time.time()
    searchtime = endsearchtime - startsearchtime
    print("Running time of only search:", searchtime)
    # closing files used in the algorithm
    documents_file.close()
    queries_file.close()
    endtime = time.time()
    running_time = endtime - starttime
    print("Running time of the algorithm using the HW data:", running_time)
Beispiel #5
0
def question_searcher(request):
    """Return a question searcher with default parameters."""
    return (searcher(request)(Question)
                .query_fields('title__text',
                              'question_content__text',
                              'answer_content__text')
                .weight(title=4, question_content=3, answer_content=3)
                .group_by('question_id', '-@group')  # nop in elasticutils
                .highlight(before_match='<b>',
                           after_match='</b>',
                           limit=settings.SEARCH_SUMMARY_LENGTH))
Beispiel #6
0
def question_searcher(request):
    """Return a question searcher with default parameters."""
    return (searcher(request)(Question)
                .query_fields('title__text',
                              'question_content__text',
                              'answer_content__text')
                .weight(title=4, question_content=3, answer_content=3)
                .group_by('question_id', '-@group')  # nop in elasticutils
                .highlight(before_match='<b>',
                           after_match='</b>',
                           limit=settings.SEARCH_SUMMARY_LENGTH))
Beispiel #7
0
def discussion_searcher(request):
    """Return a forum searcher with default parameters."""
    if waffle.flag_is_active(request, 'elasticsearch'):
        index_model = Thread
    else:
        # The index is on Post but with the Thread.title for the
        # Thread related to the Post. We base the S off Post because
        # we need to excerpt content.
        index_model = Post

    return (searcher(request)(index_model).weight(title=2, content=1)
                                          .group_by('thread_id', '-@group')
                                          .query_fields('title__text',
                                                        'content__text')
                                          .order_by('created'))
Beispiel #8
0
def discussion_searcher(request):
    """Return a forum searcher with default parameters."""
    if waffle.flag_is_active(request, 'elasticsearch'):
        index_model = Thread
    else:
        # The index is on Post but with the Thread.title for the
        # Thread related to the Post. We base the S off Post because
        # we need to excerpt content.
        index_model = Post

    return (searcher(request)(index_model).weight(title=2, content=1)
                                          .group_by('thread_id', '-@group')
                                          .query_fields('title__text',
                                                        'content__text')
                                          .order_by('created'))
Beispiel #9
0
def wiki_searcher(request):
    """Return a wiki document searcher with default parameters."""
    return (searcher(request)(Document).query_fields(
        'title__text', 'content__text', 'summary__text',
        'keywords__text').weight(title=6, content=1, keywords=4, summary=2))
Beispiel #10
0
def main():
    test = search.searcher('Tech.db')
    test.query('google amazon')  #検索したい単語を入力
Beispiel #11
0
import pprint
from search import searcher
obj = searcher()
query = 'note9'
result = obj.search(query, sum_op = 'text', SENTENCES_COUNT=2)
# print(pprint.pprint(result))
Beispiel #12
0
from search import youtube_search as searcher
from apiclient.errors import HttpError
from oauth2client.tools import argparser



quary = input("Enter a song to search for")


argparser.add_argument("--q", help="Search term", default = quary)
argparser.add_argument("--max-results", help="Max results", default=1)
argparser.add_argument("--closedCaption", help = "closed caption", default = "true")
args = argparser.parse_args()

    
try:
    result = searcher(args)
except HttpError:
    print ("An HTTP error %d occurred:")

print ("get results")
print (args)

print (result)