Пример #1
0
def search_(search_query):
    search_results = search(search_query)

    and_ids = search(search_query)['ands']
    or_ids = search(search_query)['ors']

    #Each key in 'ands' and 'ors' has a list of integers that represent
    #the ids of the meteorites.

    ands = {'countries': [], 'meteorites': [], 'classifications': []}
    ors = {'countries': [], 'meteorites': [], 'classifications': []}

    for model, ids in and_ids.items():
        for id in ids:
            m_info = requests.get('http://meteorite-landings.me/api/' + model + '/' + str(id)).json()
            ands[model].append(m_info)


    for values in or_ids.values():
        for model, ids in values.items():
            for id in ids:
                m_info = requests.get('http://meteorite-landings.me/api/' + model + '/' + str(id)).json()
                ors[model].append(m_info)

    return jsonify(ors = ors, ands = ands)
Пример #2
0
def search_with_start(start_ind):
    t_routes = []
    this_start = start_ind[0]
    this_end = start_ind[1]
    print(start_ind)
    start_time = time.time()
    for ind in range(this_start, this_end + 1):
        print('Process ' + str(start_ind[2]) + ': start number ' + str(ind))
        a = len(t_routes)
        search.search(search.normalOrders.index[ind])
        t_nodes, t_times = search.resultsNodes, search.resultsTime
        t_routes += merge.format_transform(t_nodes, t_times, search.orders)
        print('Process ' + str(start_ind[2]) + ': end number ' + str(ind) +
              ' with start length ' + str(a) + ' end length ' + str(len(t_routes)) +
              ' add ' + str(len(t_nodes)))
        clear_search_results()
        now_time = time.time()
        if now_time - start_time >= 1200.0:
            f1 = open('temp_res/ori_routes' + str(start_ind[2]), 'wb')
            pickle.dump((ind, this_end, t_routes), f1)
            f1.close()
            start_time = now_time
    f1 = open('temp_res/ori_routes_C' + str(start_ind[2]), 'wb')
    pickle.dump(t_routes, f1)
    f1.close()
    return t_routes
Пример #3
0
 def test_search(self):
     search.search(base.get_dbo(), FakeSession(), "test")
     keywords = [ "os", "notforadoption", "notmicrochipped", "hold", "quarantine", "deceased", 
         "forpublish", "people", "vets", "retailers", "staff", "fosterers", "volunteers", "shelters",
         "aco", "homechecked", "homecheckers", "members", "donors", "reservenohomecheck",
         "overduedonations", "activelost", "activefound" ]
     for k in keywords:
         search.search(base.get_dbo(), FakeSession(), k)
Пример #4
0
    def do_GET(self):
        global squid_hostname
        global squid_port
        global google_domain
        global yahoo_domain
        global keyword
        
        parts = self.path.split("?") #Extract requested file and get parameters from path
        path = parts[0]
        
        #Extract variables from get parameters
        try:
            arguments = {}
            arguments["q"] = None #Variable for search request. Default None to prevent errors if no search request was started
            if (len(parts) > 1):
                raw_arguments = parts[1].split("&")
                for raw_argument in raw_arguments[:]:
                    argument = raw_argument.split("=", 1)
                    arguments[argument[0]] = argument[1]
                    if (argument[0] == "p"): # Yahoo uses search?p= so lets copy that to q=, which is what Google uses.
                        arguments["q"] = argument[1]
        except:
            print ("No get parameters")
        
        print (path)
        
        #Decide wether a search or the style.css was requested
        if (path == "/style.css"):
            self.document = open('style.css', 'r').read()
            self.send_response(200)
            self.send_header('Content-type', 'text/html')
            self.end_headers()
            self.wfile.write(bytes(self.document, "utf-8"))
        elif (path == "/proxy.pac"):
            self.document = open('proxy.pac', 'r').read()
            self.document = self.document.replace('<keyword>', keyword.lower(), 1)
            self.document = self.document.replace('<google_domain>', google_domain, 1)
            self.document = self.document.replace('<yahoo_domain>', yahoo_domain, 1)
            self.document = self.document.replace('<squid_host>', squid_hostname, 2)
            self.document = self.document.replace('<squid_port>', str(squid_port), 2)
            self.send_response(200)
            self.send_header('Content-type', 'x-ns-proxy-autoconfig')
            self.end_headers()
            self.wfile.write(bytes(self.document, "utf-8"))
        elif (arguments["q"] != None):
            arguments["q"] = arguments["q"].replace(keyword + '+', '', 1)
            arguments["q"] = arguments["q"].replace('+', ' ')
            arguments["q"] = arguments["q"].replace('! ', '')
            command = commands(self)
            search(command).search(arguments["q"])
        else:
            self.send_response(404)
            self.send_header('Content-type', 'text/html')
            self.end_headers()
            self.wfile.write(bytes('Not found. Please visit <a href="https://github.com/HcDevel/Siri-API/wiki/_pages">https://github.com/HcDevel/Siri-API/wiki/_pages</a>', "utf-8"))

        return
Пример #5
0
def main():
    opts = read_opts()

    if not opts.c_backend:
        print "WARNING: training in pure python. Run with -c option to enable the (much faster) C++ backend"

    feature_descriptors = faces.list_feature_descriptors((16, 16))
    data = []
    print "loading faces..."
    faces.load_data_dir("Face16", 1, feature_descriptors, data, opts.num_faces, opts.c_backend)
    faces.load_data_dir("Nonface16", -1, feature_descriptors, data, opts.num_other, opts.c_backend)

    print "suffling..."
    random.shuffle(data)
    if opts.sample_size:
        train_data = data[: opts.sample_size]
        validation_data = data[opts.sample_size : opts.sample_size + opts.validate_size]
    elif opts.validate_size:
        train_data = []
        validation_data = data[: opts.validate_size]
    else:
        train_data = data
        validation_data = []

    if opts.load_classifier:
        with open(opts.load_classifier) as in_file:
            classifier = serializer.load(in_file)
    else:
        print "training boosted classifier..."
        if not train_data:
            print "specify some training data with the -s flag."
            exit(1)
        classifier = boost.train_classifier(train_data, opts.num_iterations)
        print classifier

    if train_data:
        print "training error:"
        classify(classifier, train_data)

    if validation_data:
        print "validation error:"
        if opts.each_iter:
            classify_with_all_iterations(classifier, validation_data, opts.plot_iters)
        else:
            classify(classifier, validation_data)

    if opts.plot_features:
        plot_features(classifier, feature_descriptors)

    if opts.search_image:
        search.search(classifier, opts.search_image, feature_descriptors, opts.c_backend)

    if opts.save_classifier:
        with open(opts.save_classifier, "w") as out_file:
            serializer.dump(classifier, out_file)
Пример #6
0
 def search(self, item):
     print "SEARCH"
     add_here(history_list[self.window_index][here[self.window_index][0]][0], self.window_index)
     if history_list[self.window_index][here[self.window_index][0]][0] != "*\\*":
         result = search(str(item), history_list[self.window_index][here[self.window_index][0]][0])
     else:
         result = search(str(item), history_list[self.window_index][here[self.window_index][0] - 1][0])
     print result
     if result:
         self.ui.listView.clear()
         listView(result, self.ui.listView)
         add_here("*\\*", self.window_index)
Пример #7
0
 def testDateSearch(self):
     self.assertEquals([["# 0001-01-01\n", "{0}   \n".format(write.time)
                         + write.prepareContent("Start of datesearch")],
                        ["# 0015-01-01\n", "{0}   \n".format(write.time)
                         + write.prepareContent("Middle of datesearch")]],
                        search.search("", "0001-01-01", "0015-01-01"))
     self.assertEquals([["# 0001-01-01\n", "{0}   \n".format(write.time)
                         + write.prepareContent("Start of datesearch")],
                        ["# 0015-01-01\n", "{0}   \n".format(write.time)
                         + write.prepareContent("Middle of datesearch")],
                        ["# 0031-01-01\n", "{0}   \n".format(write.time)
                         + write.prepareContent("End of datesearch")]],
                        search.search("", "0001-01-01", "0031-01-01"))
Пример #8
0
	def do_GET(self):
		parsed_path = urlparse.urlparse(self.path)
		# res = []
		res = {}

		if parsed_path.path == '/analyze':
			url = parsed_path.query.split('=')[1]
			while isShorten(url) == True:
				_, url = get(url)
			res['url'] = url
			prot, domain, path = parseURL(url)
			res['evil'] = isEvil(domain)
			num, title, content = search(url)
			res['num'] = num
			res['title'] = title
			res['content'] = content
			if prot != 'https':
				res['grade'] = 'F'
			else:
				res['grade'] = grading(url)
		elif parsed_path.path == '/expand':
			url = parsed_path.query.split('=')[1]
			while isShorten(url) == True:
				_, url = get(url)
			res['url'] = url
		elif parsed_path.path == '/check':
			url = parsed_path.query.split('=')[1]
			_, domain, _=parseURL(url)
			res['evil'] = isEvil(domain)
		elif parsed_path.path == '/grade':
			url = parsed_path.query.split('=')[1]
			while isShorten(url) == True:
				_, url = get(url)
			print('URL:', url)
			grade = grading(url)
			res['grade'] = grade
			print('Grade:', grade)
		elif parsed_path.path == '/search':
			url = parsed_path.query.split('=')[1]
			num, title, content = search(url)
			res['num'] = num
			res['title'] = title
			res['content'] = content
			# print('Content:', content.decode('utf-8'))
		
		self.send_response(200)
		self.end_headers()
		result = makeHTML(json.dumps(res))
		self.wfile.write(result.encode('utf-8'))
		return
Пример #9
0
    def search(self, searchParam={}):
        """
        search is the main function: it is processing the request and put the element in the queue
        """
        #self.initVoteThreads()
        searchResult = []
        pageLimit = self.constants["BASE_CONFIG"]["search_page_limit"]
        if "searchParams" in self.enviroment_variables["configValues"]:
            if "pageLimit" in self.enviroment_variables["configValues"]["searchParams"]:
                pageLimit = self.enviroment_variables["configValues"]["searchParams"]["pageLimit"]

        # put the request in a queue, so we can have multithread searching for multiaccount
        for page in xrange(1, pageLimit+1):
            self.logger.info("Request for " + searchParam["term"] + " type: " + searchParam["typeSearch"] + " sort: " + searchParam["sort"] + " page: " + str(page))
            tmpSearchArray = search(self, term=searchParam["term"], typeSearch=searchParam["typeSearch"], sort=searchParam["sort"], categories=searchParam["categories"], page=page)
            if len(tmpSearchArray) > 0 and searchParam["typeSearch"] in tmpSearchArray and len(tmpSearchArray[searchParam["typeSearch"]]) > 0:
                for item in tmpSearchArray[searchParam["typeSearch"]]:
                    # check if i can vote and if i can comment and i can follow
                    # need to check in config
                    # put it in a function...
                    self.constants["QUEUE_VOTE"].put(item)
                    self.constants["QUEUE_COMMENT"].put(item)
                    # self.constants["QUEUE_FOLLOW"].put(self.follow(item))
                
                searchResult = searchResult + tmpSearchArray[searchParam["typeSearch"]]
            
            time.sleep(randint(15, 35)) # I need to wait some seconds to do another search
        
        # add result in all active queue and|or in file

        return searchResult
Пример #10
0
    def DO_PLUGIN_SEARCH(self, args, criterion, exact):
        search_terms = args
        url = "http://sourcemod.net/plugins.php?search=1&%s=%s" % (criterion, search_terms)

        db_search_terms = search_terms.replace("%", "\\%").replace("*", "%")
        if not exact:
            db_search_terms = "%" + db_search_terms + "%"

        search_args = {criterion: db_search_terms}
        plugins = search.search(**search_args)

        length = len(plugins)
        if length == 0:
            # No results found
            return "No results found for \x02%s\x02" % (args)
        elif length == 1:
            plugin = plugins[0]
            return "\x02%s\x02, by %s: %s  " "( http://forums.alliedmods.net/showthread.php?p=%s )" % (
                plugin["title"],
                plugin["author"],
                plugin["description"],
                plugin["postid"],
            )
        elif length < 7:
            return "Displaying \x02%d\x02 results: %s ( %s )" % (
                length,
                ",".join(map(lambda o: o["title"], plugins)),
                url,
            )
        else:
            return "First \x026\x02 results of \x02%d\x02: %s ( %s )" % (
                length,
                ", ".join(map(lambda o: o["title"], plugins[:6])),
                url,
            )
Пример #11
0
def duenote(index):
	target = list_of_readable_files[int(index)]
	text = documents.get_text_from_file(target)

	textSnippets = search.search(text)
	data = []
	for text in textSnippets:
		labels = classify(text)
		data.append(labels)

	scheduling, todo = parseJSON(data)

	google_calendar = GoogleCalendar()
	google_task = GoogleTask()

	for t in todo:
		print ("Sending task\n")
		google_task.add_task(t)
		t.due_date = datetime.strftime(parser.parse(t.due_date), '%m/%d/%y')

	for s in scheduling:
		print ("Sending cal\n")
		google_calendar.send_to_google_calendar(s)
		s.start_time = datetime.strftime(parser.parse(s.start_time), '%m/%d/%y')

	return render_template('duenote.html', scheduling=scheduling,todo=todo)
Пример #12
0
def search_results(page, mentee_topic_choice=None):
    mentee_topic_choice = mentee_topic_choice or request.form.get("searchtopics")
    print "~~~~~~~~~~~~~~~~mentee_topic_choice"
    print mentee_topic_choice
    mentor_data = search.search(mentee_topic_choice)
    if mentor_data:

        start_index = (page - 1) * (PER_PAGE)
        end_index = (page) * (PER_PAGE)

        ment_count = len(mentor_data)
        users = mentor_data[start_index:end_index]
        # users = mentor_data.paginate(page, PER_PAGE, False)

        if not users and page != 1:
            abort(404)
        pagination_per_page = pagination.Pagination(page, PER_PAGE, ment_count)
        search_topic = search.search_topic_display(mentee_topic_choice)
        return render_template(
            "searchresults.html",
            search_topic_display=search_topic,
            pagination=pagination_per_page,
            users=users,
            mentee_topic_choice=mentee_topic_choice,
        )
    messages = flash("Sorry! There are no mentors under this search topic")
    return redirect(url_for("index"))
Пример #13
0
def find():
    from search import search

    fromEnvVars = FromEnvVars( name, description, hints = "CCTBX_ROOT" )

    paths = search( [fromEnvVars] )
    return paths
Пример #14
0
    def DO_PLUGIN_SEARCH(self, args, criterion, exact):
        search_terms = args.replace(" ", "+")
        url = "http://sourcemod.net/plugins.php?search=1&%s=%s" % (criterion, search_terms)

        db_search_terms = search_terms.replace('%', '\\%').replace('*', '%')
        if not exact:
            db_search_terms = '%' + db_search_terms + '%'

        search_args = { criterion: db_search_terms }
        plugins = search.search(**search_args)

        length = len(plugins)
        if length == 0:
            # No results found
            return "No results found for \x02%s\x02" % (args)
        elif length == 1:
            plugin = plugins[0]
            return "\x02%s\x02, by %s: %s  "\
                "( http://forums.alliedmods.net/showthread.php?p=%s )" % (plugin['title'], plugin['author'],
                                                                          plugin['description'], plugin['postid'])
        elif length < 7:
            return "Displaying \x02%d\x02 results: %s ( %s )" % (length, ",".join(map(lambda o: o['title'], plugins)),
                                                                 url)
        else:
            return "First \x026\x02 results of \x02%d\x02: %s ( %s )" % (length, ", ".join(map(lambda o: o['title'],
                                                                                               plugins[:6])), url)
Пример #15
0
    def __init__(self, proxies={'http': 'http://127.0.0.1:8080',
        'https': 'http://127.0.0.1:8080'}):
        """
        Creates an instance of the ZAP api client.

        :Parameters:
           - `proxies`: dictionary of ZAP proxies to use.
           
        Note that all of the other classes in this directory are generated
        new ones will need to be manually added to this file
        """
        self.__proxies = proxies
        
        self.acsrf = acsrf(self)
        self.ajaxSpider = ajaxSpider(self)
        self.ascan = ascan(self)
        self.authentication = authentication(self)
        self.autoupdate = autoupdate(self)
        self.brk = brk(self)
        self.context = context(self)
        self.core = core(self)
        self.forcedUser = forcedUser(self)
        self.httpsessions = httpSessions(self)
        self.importLogFiles = importLogFiles(self)
        self.params = params(self)
        self.pnh = pnh(self)
        self.pscan = pscan(self)
        self.script = script(self)
        self.search = search(self)
        self.selenium = selenium(self)
        self.sessionManagement = sessionManagement(self)
        self.spider = spider(self)
        self.users = users(self)
Пример #16
0
    def test_bridge_has_shab_cookie(self):
        """
        When bridge has shab cookie, abtest_manager.reload should be called and
        shab cookie should be removed from bridge response headers.
        """
        # Set bridge response text to be good JSON.
        mock_bridge_response = MockBridgeResponseWithShabCookie()
        mock_bridge_response.text = '{"search_result": {"results_good": true, "primary_parametric_fields": ["good"]}}'

        mock_requests_get = mock.MagicMock(
            name="mock_requests_get", return_value=mock_bridge_response)
        mock_get_http_headers = mock.MagicMock(
            name='mock_get_http_headers', return_value={})

        with mock.patch('requests.get', mock_requests_get):
            with mock.patch('serpng.lib.http_utils.get_http_headers', mock_get_http_headers):
                mock_request = MockRequest()
                mock_request.abtest_manager.reload_cookie = mock.MagicMock(name='mock_reload_cookie')

                # pylint: disable=W0612
                # (result and user_data are not tested in this test case)

                # result_sj added for SJ Ads A/B test. 
                response_headers, result, result_sj, user_data = search.search(
                    request=mock_request, query=MockQuery())

                mock_request.abtest_manager.reload_cookie.assert_called_with('shab_val', True)
                self.assertEqual(response_headers, {'set-cookie': 'has_comma has_comma, no_comma no_comma'})
Пример #17
0
def search_search():
    query = request.args.get('q')
    start = request.args.get('from')
    size = request.args.get('size')
    # return render_template('search.html.jinja2', results=search.search('scrapi', query, start, size))
    results, count = search.search('scrapi', query, start, size)
    return json.dumps(results)
Пример #18
0
def search(request):
    if request.GET.has_key('q'):
        try:
            q = request.GET['q']
            results = s.search(q)
            gene_ids= []
            for result in results:
                if result['doc_type'] == 'GENE':
                    gene_ids.append(result['id'])

            gene_objs = Gene.objects.filter(pk__in=gene_ids)
            species_genes = {}
            species_names = {}
            genes = []
            for gene_obj in gene_objs:
                species_names[gene_obj.species.id] = gene_obj.species.name
                bicluster_ids = [b.id for b in gene_obj.bicluster_set.all()]
                regulates = Bicluster.objects.filter(influences__name__contains=gene_obj.name)
                _, influence_biclusters = get_influence_biclusters(gene_obj)

                if not species_genes.has_key(gene_obj.species.id):
                    species_genes[gene_obj.species.id] = []
                genes = species_genes[gene_obj.species.id]

                genes.append(GeneResultEntry(gene_obj.id, gene_obj.name,
                                             gene_obj.species.id,
                                             gene_obj.description,
                                             bicluster_ids,
                                             influence_biclusters,
                                             regulates))
        except Exception as e:
            error_message = str(e)
    return render_to_response('search.html', locals())
Пример #19
0
def solve(totals, goal, init=None):
    '''
    Search for shortest path to solve the pouring problem.

    totals -- a tuple of capacities (numbers) of glasses

    goal -- a number indicating the volume level we want 
            to have any one of the glasses contain

    init -- optional tuple of initial levels for each glass

    If start is not specified, we set the starting levels of each 
    glass to zero.

    We start the search at the start state and follow paths of 
    successor states (generated by `next`) until we reach the goal.

    After reaching the goal state, we return the shortest path
    found, a sequence of states from start to goal state.

    '''
    done = lambda state: state.contains(goal)
    init = init or tuple(0 for t in totals)
    glasses = (Glass(i,t) for i,t in zip(init, totals))
    start = State(Glasses(glasses), action=None)
    return search(start, next, done)
Пример #20
0
    def test_good_bridge_response(self):
        """When the bridge response is good, good response_header, result and user_data should be constructed and returned."""
        # Set bridge response text to be good JSON.
        mock_bridge_response = MockBridgeResponse()
        mock_bridge_response.text = '{"search_result": {"results_good": true, "primary_parametric_fields": ["good"]}}'

        mock_requests_get = mock.MagicMock(
            name="mock_requests_get", return_value=mock_bridge_response)
        mock_get_http_headers = mock.MagicMock(
            name='mock_get_http_headers', return_value={})

        with mock.patch('requests.get', mock_requests_get):
            with mock.patch('serpng.lib.http_utils.get_http_headers', mock_get_http_headers):
                # result_sj added for SJ Ads A/B test.
                response_headers, result, result_sj, user_data = search.search(
                    request=MockRequest(), query=MockQuery())

                expected_search_result = MockSearchResult(
                    request=MockRequest(),
                    search_result_json={'results_good': True, 'primary_parametric_fields': ['good']},
                    bridge_search_query=''
                )
                self.assertEqual(expected_search_result, result)

                expected_user_data = MockUserData(
                    json_response={'search_result': {'results_good': True, 'primary_parametric_fields': ['good']}}
                )
                self.assertEqual(expected_user_data, user_data)

                self.assertEqual(response_headers, {'a': 'b'})
Пример #21
0
def websearch(bot, data):
    """Search the web for a query.

    use ! to send result to channel, and @ to receive as personal message
    """
    if data["message"][0] == "!":
	query = data["message"].replace("!search ", "")
	destination = "to"
    else: 
	query = data["message"].replace("@search ", "")
	destination = "from"
    
    try:
	results = search.search(query)
    except URLError:
	bot.send("Sorry, I dun goofed",channel=data[destination])
	return
    
    if (results == []):
	bot.send("No search results for \"{}\"".format(query),
		 channel=data[destination])
	return
    
    bot.send("Web results for \"{}\":".format(query),channel=data[destination])
    
    for result in results:
	bot.send("* {}: {}".format(result[0], result[1]), channel=data[destination])
Пример #22
0
    def do_GET(self):
        start = time.time()
        out = codecs.getwriter("utf-8")(self.wfile, 'xmlcharrefreplace')

        self.send_response(200, "OK")
        self.send_header("Content-Type", "text/xml")
        self.end_headers()

        out.write('<?xml version="1.0" encoding="utf-8" ?>')

        query = urlparse.urlparse(self.path).query
        params = urlparse.parse_qs(query)

        search.search(params, self.wfile)
        end = time.time()
        print >>sys.stderr, "Elapsed:", end - start
Пример #23
0
def searchView(request):
  """
  Renders search.html using the search results of a query entered
  into the search bar

  @type request: string
  @param request: An HTML request

  @rtype: HTML page
  @return: Rendered version of search.html
  """

  sform = SearchForm(request.POST)

  # Ensure form is valid
  if not sform.is_valid():
    return index(request)

  user_query = sform.cleaned_data['search_query']

  # Get search results. Format: [ MatchObject, MatchObject, ...]
  search_result = search(user_query)

  # Construct a dictionary from search results to pass along to html template
  search_dict = {"results" : [], "query" : ''}
  for match in search_result :
    # Make list for each MatchObject in search_result.
    # List format = [MatchObject, type of object, name of object, [LiObject, LiObject, ...] ]
    result_list = [match]
    result_list.extend(getTypeNameImage(match.idref))

    search_dict["results"].append(result_list)
  search_dict["query"] = sform.cleaned_data['search_query']

  return render(request, 'wcdb/search.html', search_dict)
Пример #24
0
def main():
    arguments = docopt(doc, version='Python Package Manager 0.1')
    if arguments['install']:
        if arguments['<package>']:
            install(arguments['<package>'])
        else:
            #find requirements.txt
            #find package.json
            #load dependence list
            #call install for all deps
            pass
    elif arguments['search']:
        if arguments['<package>']:
            search(arguments['<package>'])

    return 1
Пример #25
0
def my_form_post():
    keyword = request.form['text']
    keyword = keyword.upper().lower()
    data_type = get_id_type(request.form['platform'])
    resultSearch = []
    start_download = False
    word_id = 0

    print "adresse ip : ", request.remote_addr
    conn = manage_database.connect_database()
    manage_database.add_connection_user(conn, request.remote_addr)
    if keyword == None or keyword == "":
        return (render_template("index.html", type=data_type, dataResults=[]))
        
    data_ret = manage_database.search_word(conn, keyword, request.form['platform'])

    if data_ret == None:
        start_download = True
        cursor = conn.cursor()
        cursor.execute('''INSERT INTO keyword (word, nb) VALUES(?, ?)''', (keyword, 1,))
        word_id = cursor.lastrowid
        conn.commit()
        """
        search_results = search(keyword)
        image_search = search_image(keyword)
        news_results = searchNews(keyword)
        videos_search = search_video(keyword)
        torrent_search = search_torrent(keyword)
        manage_database.fill_new_entry(conn, keyword, search_results, image_search, \
                                       news_results, videos_search, torrent_search)
        """
    else:
        resultSearch = data_ret

    thread_download = threading.Thread(target=optimise_request.download, args=(keyword, word_id))
    if request.form['platform'] == "search":
        if data_ret == None:
            resultSearch = search(keyword)
    elif request.form['platform'] == "images":
        if data_ret == None:
            resultSearch = search_image(keyword)
    elif request.form['platform'] == "news":
        if data_ret == None:
            resultSearch = searchNews(keyword)
    elif request.form['platform'] == "videos":
        if data_ret == None:
            resultSearch = search_video(keyword)
    elif request.form['platform'] == "torrent":
        if data_ret == None:
            resultSearch = search_torrent(keyword)

    if start_download == True:
        thread_download.start()

    conn.close()

    if resultSearch != None:
        return (render_template("index.html", type=data_type, dataResults=resultSearch))
    return (render_template("index.html", type=data_type, dataResults=[]))
Пример #26
0
def find_ancestors(z, arcs_from):
    initial_paths = [SearchPath(0, vertex) for vertex in z]
    expand = make_expand(arcs_from, feasible_sign=-1)
    extract_state = lambda path : path.extract_state()
    result = search(initial_paths, expand, has_reached_goal, extract_state)
    closed = result['closed']
    z_ancestors = set(closed.keys())
    return z_ancestors
Пример #27
0
def update_result_search(conn, keyword):
    search_results = search(keyword)
    image_search = search_image(keyword)
    news_results = searchNews(keyword)
    videos_search = search_video(keyword)
    torrent_search = search_torrent(keyword)
    manage_database.update_row(conn, keyword, search_results, image_search,\
                               news_results, videos_search, torrent_search)
Пример #28
0
def analyse_args(args):
    try:
        option, value = split_arg(args[0])
    except IndexError:
        return create_session()

    allowed_opts = {
        # these don't take any value, and don't respond if an invalid value is passed
        'help': 'print (help_string)',
        'configure': 'Session(is_bare = True).reconfigure()',
        'change-pass': '******',
        'random': 'random(create_session())',
        # these demand one value (`backup` requires a valid location, while the other three require a datetime format)
        'write': 'Story(create_session(), when = value, is_write = True).write()',
        'view': 'Story(create_session(), when = value, check_path = True).view()',
        'backup': 'backup(create_session(), backup_loc = value)',
        'encrypt': 'Story(create_session(), when = value, check_path = True).encrypt()',
    }

    try:
        if option == 'search':      # special handling for `search`
            args.extend(['lang=None', 'start=start', 'end=end', 'grep'])
            options, values = zip(*map(split_arg, args))
            grep_val = '0' if 'ugly' in options else values[options.index('grep')]
            search(session = create_session(),
                   word = value,
                   lang = values[options.index('lang')],
                   start = values[options.index('start')],
                   end = values[options.index('end')],
                   grep = int(grep_val) if grep_val and grep_val.isdigit() else 7)      # '7' is rather smooth
        elif option == 'stats':     # ... and `stats`
            args.extend(['lang=None'])
            options, values = zip(*map(split_arg, args))
            stats(session = create_session(),
                  speed = int(value) if value and value.isdigit() else None,
                  lang = values[options.index('lang')])
        else:
            exec(allowed_opts[option])
        exit('')

    except KeyError:
        print ERROR, 'Invalid arguments! Continuing with the default...'
        return create_session()
    except (KeyboardInterrupt, EOFError):
        sleep(CAPTURE_WAIT)
        exit('\nGoodbye...\n')
Пример #29
0
 def GET(self):
     i = web.input(q="", page=1)
     page = int(i.page)
     nmatched, results = search.search(i.q, page=page-1)
     if len(results) == 1 and page == 1:
         raise web.seeother(results[0].id)
     else:
         return render.search(results, nmatched, page)
Пример #30
0
def is_d_connected(x, y, z, arcs_from):
    initial_paths = [SearchPath(0, vertex, vertex, 0, 0) for vertex in x]
    z_ancestors = find_ancestors(z, arcs_from)
    is_blocked = make_blocked_predicate(z, z_ancestors)
    expand = make_expand(arcs_from, is_blocked)
    has_reached_goal = make_goal_test(y)
    extract_state = lambda path : path.extract_state()
    result = search(initial_paths, expand, has_reached_goal, extract_state)
    return result['reached_goal']
Пример #31
0
def test_search_for_fail_on_no_connection():
    """
    Testing search if does not crash if the database does not exist.
    """
    test_engine = create_engine('sqlite:///')
    Base.metadata.create_all(test_engine)
    test_session = sessionmaker(bind=test_engine)
    test_session = test_session()

    result = search.search("DEMO123", session=test_session)

    print("Result is:\n", result)

    expected_result = json.dumps({"status": "OK", "results": []})

    print("Expected:\n", expected_result)

    assert result == expected_result
Пример #32
0
def get_collection(collection: str):
    model = model_selector(collection)

    query = request.args.get("query", None, type=str)
    populate = request.args.get("populate", False, type=json.loads)

    if query == None:
        return make_response({"error": "No query provided."}, 400)


    resp: list = search(collection, query, populate)

    if len(resp) > 20:
        return make_response({"error": "To many results. Consider being more " +
                              "specific.", "matches": len(resp)}, 400)

    resp = json.dumps(resp, ensure_ascii=False, indent=4)
    return resp
Пример #33
0
    def do_GET(self):
        self.do_HEAD()

        _GET = parse_qs(urlparse(self.path).query)

        if 'q' in _GET:
            result = search.search(_GET['q'][0])

            self.wfile.write("<html><head><title>Search result</title></head>")
            self.wfile.write("<body>")
            self.wfile.write("<p>Result in " + str(result['time']) +
                             " seconds</p>")
            for page in result['pages']:
                self.wfile.write("<p>" + page[1] + "</p>")
            self.wfile.write("<p>You accessed path: %s</p>" % self.path)
            self.wfile.write("</body></html>")

        self.finish()
Пример #34
0
def dosearch():
    query = request.args['query']
    qtype = request.args['query_type']
    page_num = int(request.args['page_num'])
    results = 0
    search_results = search.search(query, qtype, page_num)

    if len(search_results) > 0:
        if isinstance(search_results[-1][0], int):
            results = search_results[-1][0]
        del search_results[-1]

    return render_template('results.html',
                           query=query,
                           results=results,
                           search_results=search_results,
                           qtype=qtype,
                           pageNum=page_num)
Пример #35
0
    def search(self, searchParam={}):
        """
        search is the main function: it is processing the request and put the element in the queue
        """
        #self.initVoteThreads()
        searchResult = []
        pageLimit = self.constants["BASE_CONFIG"]["search_page_limit"]
        if "searchParams" in self.enviroment_variables["configValues"]:
            if "pageLimit" in self.enviroment_variables["configValues"][
                    "searchParams"]:
                pageLimit = self.enviroment_variables["configValues"][
                    "searchParams"]["pageLimit"]

        # put the request in a queue, so we can have multithread searching for multiaccount
        for page in xrange(1, pageLimit + 1):
            self.logger.info("Request for " + searchParam["term"] + " type: " +
                             searchParam["typeSearch"] + " sort: " +
                             searchParam["sort"] + " page: " + str(page))
            tmpSearchArray = search(self,
                                    term=searchParam["term"],
                                    typeSearch=searchParam["typeSearch"],
                                    sort=searchParam["sort"],
                                    categories=searchParam["categories"],
                                    page=page)
            if len(tmpSearchArray) > 0 and searchParam[
                    "typeSearch"] in tmpSearchArray and len(
                        tmpSearchArray[searchParam["typeSearch"]]) > 0:
                for item in tmpSearchArray[searchParam["typeSearch"]]:
                    # check if i can vote and if i can comment and i can follow
                    # need to check in config
                    # put it in a function...
                    self.constants["QUEUE_VOTE"].put(item)
                    self.constants["QUEUE_COMMENT"].put(item)
                    # self.constants["QUEUE_FOLLOW"].put(self.follow(item))

                searchResult = searchResult + tmpSearchArray[
                    searchParam["typeSearch"]]

            time.sleep(randint(
                15, 35))  # I need to wait some seconds to do another search

        # add result in all active queue and|or in file

        return searchResult
Пример #36
0
def smart_update_package():
    # init count of packages to be updated
    package_update_count = 0

    # get packages installed with their version number
    get_package_with_ver = subprocess.Popen("pacman -Qm > ./ver_packages.txt",
                                            shell=True)
    get_package_with_ver.wait()

    with open(directory + "/ver_packages.txt", "r") as packages:
        installed_packages_ver = [package.strip() for package in packages]
    rm_file = subprocess.Popen("rm -rf ./ver_packages.txt", shell=True)
    rm_file.wait()

    # get packages installed without version number
    get_package = subprocess.Popen("pacman -Qqm > ./packages.txt", shell=True)
    get_package.wait()

    with open(directory + "/packages.txt", "r") as packages:
        installed_packages = [package.strip() for package in packages]
    rm_file = subprocess.Popen("rm -rf ./packages.txt", shell=True)
    rm_file.wait()

    # really loopy way to download packages. explanation below
    # check if the package with the version in the user system is equal to the package with the version available in the aur
    # if it is, update; else, break the loop
    for package_name in installed_packages:
        package_data = search.search(package_name, "name")
        for result in package_data["results"]:
            # check if the package name is exactly the same. lemonbar matches only to lemonbar and not lemonbar-xft-git
            if result["Name"] == package_name:
                aur_package_with_ver = "%s %s" % (result["Name"],
                                                  result["Version"])
                for package_with_ver in installed_packages_ver:
                    # if the version of the package installed in the system is not equal to the version in the aur, update
                    if package_with_ver != aur_package_with_ver:
                        retrieve_file(package_name)
                        extract_tar(package_name)
                        package_update_count += 1
                        # exit out of loop after we download package
                        break

    if not package_update_count:
        print("all packages are up to date")
Пример #37
0
    def parse_result(self, connstream, r):
        if r != None and isinstance(r,Result):
            if r.data:
                #print("Source:\n" + r.source + "\nData:\n" + str(r.data))
                searchhits, regexhits = search().apply_terms(r.data)
                print("Searchhits: " + str(searchhits))
                print("Regexhits: " + str(regexhits))
                esr = es_result()
                esr.source = r.source
                esr.referrer = r.referrer
                esr.dataHash = r.dataHash
                esr.dataBytes = r.dataBytes

                if len(regexhits) > 0:
                    esr.regex_hit = 1
                    esr.regex_hits = "\n".join(regexhits)
                else:
                    esr.regex_hit = 0
                if len(searchhits) > 0:
                    esr.searchterm_hit = 1
                    esr.searchterm_hits = "\n".join(searchhits)
                else:
                    esr.searchterm_hit = 0

                if esr.searchterm_hit or esr.regex_hit:
                    esr.data = base64.b64encode(r.data)
                else:
                    esr.data = ""

                esr.timeStart = r.timeStart
                esr.timeEnd = r.timeEnd
                c = r.crawlerConfig
                esr.config_name = c.name
                esr.config_location = c.location
                esr.config_protocol = c.protocol
                esr.config_speed = c.speed
                esr.config_depth = c.depth
                esr.config_maxDepth = c.maxDepth
                esr.config_options = c.options
                print("Saving to elasticsearch.")
                esr.save()
                print("Save complete.")
            return True
        return False
Пример #38
0
def toollist(request):
    request.encoding = "utf-8"
    _GG("Log").d("toollist GET :", request.GET, "; POST :", request.POST,
                 "; FILES :", request.FILES)
    # 获取工具列表键值
    tlkey = request.GET.get("k", "all")
    if tlkey not in TlKeyMap:
        tlkey = "all"
    # 校验提交的数据
    if "searchText" in request.POST:
        # 判断是否为所有工具模块
        if tlkey == "all":
            return search(request)
        # 搜索其他模块
        searchText = request.POST["searchText"]
        title = TlKeyMap[tlkey]
        result = {
            "MAIN_HOME_TITLE": settings.MAIN_HOME_TITLE,
            "MAIN_HOME_URL": settings.MAIN_HOME_URL,
            "RESOURCE_URL": settings.RESOURCE_URL,
            "HOME_TITLE": settings.HOME_TITLE,
            "HOME_URL": settings.HOME_URL,
            "HEAD_TITLE": f"搜索{title}工具",
            "tlkey": tlkey,
            "searchText": searchText,
            "isSearchNone": False,
            "toolInfoList": [],
        }
        # 根据searchText搜索工具信息列表
        result["toolInfoList"].extend(serachToolListByName(tlkey, searchText))
        # 判断是否搜索出了结果
        if searchText:
            result["isSearchNone"] = len(result["toolInfoList"]) == 0
        return render(request, "toollist_item.html", result)
    return render(
        request, "toollist.html", {
            "MAIN_HOME_TITLE": settings.MAIN_HOME_TITLE,
            "MAIN_HOME_URL": settings.MAIN_HOME_URL,
            "RESOURCE_URL": settings.RESOURCE_URL,
            "HOME_TITLE": settings.HOME_TITLE,
            "HOME_URL": settings.HOME_URL,
            "HEAD_TITLE": f"工具列表",
            "tlkey": tlkey,
        })
Пример #39
0
def query(self, args):
    task_id = self.request.id
    slogger('query', 'query in progress, task_id={}'.format(task_id))
    # Don't touch this:
    self.update_state(state='PROGRESS')
    time.sleep(1.5) # a short dwell is necessary for other async processes to catch-up
    # Change all of this to whatever you want:
    results = search(args)
    slogger('query', 'check results and process if necessary')
    # Only write an Excel file for download if there were actual results
    if len(results) > 0:
        # Save locally in Excel format then copy the file to S3 because any local
        # files store locally in the container are highly volatile and
        # will likely be deleted before the user has a chance to download
        reports_folder = os.path.join(os.path.dirname(
            os.path.realpath(__file__)), 'reports')
        # Use the Celery task id as the filename (we change it to something more userfriendly later)
        excel_filename = '{}.xlsx'.format(task_id)
        local_excel_path = os.path.join(reports_folder, excel_filename)
        slogger('query', 'saving full report locally as {}'.format(local_excel_path))
        excel_writer = pd.ExcelWriter(local_excel_path, engine="xlsxwriter")
        # Here you can customize the name of the Excel sheet:
        pd.DataFrame(results).to_excel(excel_writer, sheet_name="BoilerplateResults", index=False)
        excel_writer.save()
        # Copy to S3 if enabled
        if config.DISABLE_S3 == False:
            bucket_name = os.environ['S3_BUCKET_NAME']
            # We prefix the S3 key with the name of the app - you can change this if you want:
            s3_excel_key = '{}/{}'.format(config.DASH_APP_NAME, excel_filename)
            slogger('query', 'copying {} to S3 bucket {} with key {}'.format(local_excel_path, bucket_name, s3_excel_key))
            client = boto3.client('s3', 
                                    aws_access_key_id=os.environ['S3_ACCESS_KEY_ID'], 
                                    aws_secret_access_key=os.environ['S3_SECRET_ACCESS_KEY'])
            body = open(local_excel_path, 'rb')
            client.put_object(Bucket=bucket_name, Key=s3_excel_key, Body=body)
        else:
            slogger('query', 'caution - S3 is disabled so the Download Excel link will be broken!')
    else:
        slogger('query', 'empty results - nothing was saved')
    
    # Return results for display
    slogger('query', 'return results')
    return results
def dosearch():
    """
    TODO:
    Use request.args to extract other information
    you may need for pagination.
    """

    query = request.args['query']
    qtype = request.args['query_type']
    """print("Page number: {page}".format(page = request.args.get('page')))"""
    page = request.args.get('page', 1)
    page = int(page)
    offset = (page - 1) * 20

    next = page + 1
    previous = page - 1
    if previous < 1:
        previous = 1
    """if len(tokens) > 1:
        for x in range(1, len(query)):
            tokens = tokens + "+{next_tk}".format(next_tk = query[x])"""

    url = "/search?query_type={x}&query={y}".format(x=qtype, y=query)

    search_results = search.search(qtype, offset, query)

    length = int(search_results[-1][0])
    del search_results[-1]

    lower_bound = offset + 1
    upper_bound = lower_bound + 19
    if upper_bound > length:
        upper_bound = length

    return render_template('results.html',
                           query=query,
                           query_type=qtype,
                           results=length,
                           search_results=search_results,
                           x=lower_bound,
                           y=upper_bound,
                           page=page,
                           url=url)
Пример #41
0
def dosearch_old(page_num, page_turns):
    query = ""
    qtype = ""
    page_num = int(page_num)
    page_turns = int(page_turns)

    is_new_query = False
    page_num = page_num + page_turns

    """
    TODO:
    Use request.args to extract other information
    you may need for pagination.
    """

    search_results = search.search(query, qtype, is_new_query, page_num)
    for result in search_results:
        song, author, score = result
        print(song)
Пример #42
0
def dosearch():
    query = request.args['query']
    qtype = request.args['query_type']
    page = request.args['p']
    """
    TODO:
    Use request.args to extract other information
    you may need for pagination.
    # """

    if page == "1":
        search_results = search.search(query, qtype)
    else:
        search_results = search.search_view(query, qtype, page)

    return render_template('results.html',
                           query=query,
                           results=len(search_results),
                           search_results=search_results)
Пример #43
0
def search_component(query):
    """
    Index page w/ search results

    :return: rendered Page
    """
    # text = request.form['search_text']
    text = query

    # !!!!!!
    # WARNING: no text sanitation done here. Expected to be done in search!
    # !!!!!!

    search_results = search(text)
    json_results = json.loads(search_results)

    return render_template('index.html',
                           searchResult=json_results,
                           searchComponent=text)
Пример #44
0
def test_search_substring():
    starter()
    #one user logs in one channel and one message and searches its substring
    message_list1 = []
    user = auth.auth_login('*****@*****.**', 'youshallpass')
    token = user.get('token')

    ch = channels.channels_create(token, '1234', True)

    #user sends message hey to channel
    m_id = message.message_send(token, ch.get('channel_id'), 'hey')
    message_details = message.get_message_details(
        m_id.get('message_id')).get('message')

    message_list1.append(message_details)
    #searches hey
    message_list2 = search.search(token, 'ey')

    assert message_list1 == message_list2
Пример #45
0
 def test_exact_string_search_3(self):
     searcher = search()
     inputString = "Hello. My name is Eric. I am writing this simple test to check to see how well my parsing algorithm is performing. hello again, dont forget my name: Eric. That is all."
     queries = ['hello','LASDGLKGSVLIUGAEOUGSVLUHwe;ofrw.k?bwri;hqf.IBALIUGqleiugwKUGwrliugwrgOUGFW ;OURW;U','Eric','my name is Eric!','my name is Eric']
     self.assertEqual(
     [0,115],
     searcher.exact_query(queries[0],inputString))
     self.assertEqual(
     [],
     searcher.exact_query(queries[1],inputString))
     self.assertEqual(
     [18,149],
     searcher.exact_query(queries[2],inputString))
     self.assertEqual(
     [],
     searcher.exact_query(queries[3],inputString))
     self.assertEqual(
     [7],
     searcher.exact_query(queries[4],inputString))
Пример #46
0
 def test_exact_string_search_1(self):
     searcher = search()
     inputString = "Hello world!"
     queries = ['hello','LASDGLKGSVLIUGAEOUGSVLUHwe;ofrw.k?bwri;hqf.IBALIUGqleiugwKUGwrliugwrgOUGFW ;OURW;U','Eric','my name is Eric!','my name is Eric']
     self.assertEqual(
     [0],
     searcher.exact_query(queries[0],inputString))
     self.assertEqual(
     [],
     searcher.exact_query(queries[1],inputString))
     self.assertEqual(
     [],
     searcher.exact_query(queries[2],inputString))
     self.assertEqual(
     [],
     searcher.exact_query(queries[3],inputString))
     self.assertEqual(
     [],
     searcher.exact_query(queries[4],inputString))
Пример #47
0
    def execute(self, filename, searchMethod, save):
        self.initialize(filename)

        if self.maze is None:
            print("No maze created")
            raise SystemExit

        start_time = time.perf_counter()
        path, statesExplored = search(self.maze, searchMethod)
        time_taken = time.perf_counter() - start_time

        pygame.init()
        self.displaySurface = pygame.display.set_mode(
            (self.windowWidth, self.windowHeight), pygame.HWSURFACE)
        self.displaySurface.fill((255, 255, 255))
        pygame.display.flip()
        pygame.display.set_caption(self.windowTitle)

        print("Results")
        print("Path Length:", len(path))
        print("States Explored:", statesExplored)
        print("Time taken:", time_taken)
        print(path)
        self.drawPath(path)

        self.drawMaze()
        self.drawStart()
        self.drawObjective()

        pygame.display.flip()
        if save is not None:
            pygame.image.save(self.displaySurface, save)
            self.running = False

        clock = pygame.time.Clock()

        while self.running:
            pygame.event.pump()
            keys = pygame.key.get_pressed()
            clock.tick(self.fps)

            if keys[K_ESCAPE]:
                raise SystemExit
Пример #48
0
def test_example_unit_tests():
    # Example tests, these do not count as your tests

    # Basic search, function #1
    assert search('dog') == DOG

    # Advanced search option 1, function #2
    expected = [['Mexican dog-faced bat', 'AnomieBOT', 1255316429, 1138],
                ['Guide dog', 'Sarranduin', 1165601603, 7339]]
    assert article_length(8000, DOG.copy()) == expected

    # Advanced search option 2, function #3
    expected = [['Black dog (ghost)', 'SmackBot', 1220471117, 14746],
                ['Mexican dog-faced bat', 'AnomieBOT', 1255316429, 1138],
                ['Dalmatian (dog)', 'J. Spencer', 1207793294, 26582]]
    assert article_count(3, DOG.copy()) == expected

    # Advanced search option 3, function #4
    expected = ['Guide dog', 'Sarranduin', 1165601603, 7339]
    assert random_article(3, DOG.copy()) == expected

    # Advanced search option 4, function #5
    #assert favorite_author('J. Spencer', DOG.copy()) == True

    # Advanced search option 5, function #6
    expected = [['Black dog (ghost)', 'SmackBot'],
                ['Mexican dog-faced bat', 'AnomieBOT'],
                ['Dalmatian (dog)', 'J. Spencer'], ['Guide dog', 'Sarranduin'],
                ['Sun dog', 'Hellbus']]
    assert title_author(DOG.copy()) == expected

    # Advanced search option 6, function #7
    expected = [['Black dog (ghost)', 'SmackBot', 1220471117, 14746],
                ['Mexican dog-faced bat', 'AnomieBOT', 1255316429, 1138],
                ['Dalmatian (dog)', 'J. Spencer', 1207793294, 26582],
                ['Guide dog', 'Sarranduin', 1165601603, 7339],
                ['Sun dog', 'Hellbus', 1208969289, 18050],
                [
                    'Spain national beach soccer team', 'Pegship', 1233458894,
                    1526
                ], ['Will Johnson (soccer)', 'Mayumashu', 1218489712, 3562],
                ['Steven Cohen (soccer)', 'Scouselad10', 1237669593, 2117]]
    assert multiple_keywords('soccer', DOG.copy()) == expected
Пример #49
0
def get_author_or_tag_results(path):
    results = search(path)
    dirs = list(results.pop('books'))

    # If there's only 1 book, the combinations of tags/authors
    # doesn't add any extra info, so dead end here
    if len(dirs) == 1:
        return dirs

    # this ensures that we don't have author or tag directories if there are none
    for key in ['authors', 'tags']:
        key_results = list(results[key])
        vals_in_path = get_search_terms_of_type(key, path)
        filtered = [val for val in key_results if val not in vals_in_path]

        if len(filtered) > 0:
            dirs.append(key)

    return dirs
Пример #50
0
def dosearch():
    pageNum = request.args.get('page', -1)
    query = request.args['query']
    qtype = request.args['query_type']
    pageNum = int(pageNum)

    search_results = search.search(query, qtype, pageNum)

    pageNum = search_results[0]
    rows = search_results[1]
    numResults = search_results[2]

    return render_template('results.html',
                           query=query,
                           query_type=qtype,
                           results=len(rows),
                           totalResults=numResults,
                           search_results=rows,
                           page=pageNum)
Пример #51
0
    def __init__(self, proxies=None, apikey=None):
        """
        Creates an instance of the ZAP api client.

        :Parameters:
           - `proxies`: dictionary of ZAP proxies to use.

        Note that all of the other classes in this directory are generated
        new ones will need to be manually added to this file
        """
        self.__proxies = proxies or {
            'http': 'http://127.0.0.1:8080',
            'https': 'http://127.0.0.1:8080'
        }
        self.__apikey = apikey

        self.acsrf = acsrf(self)
        self.ajaxSpider = ajaxSpider(self)
        self.ascan = ascan(self)
        self.authentication = authentication(self)
        self.authorization = authorization(self)
        self.autoupdate = autoupdate(self)
        self.brk = brk(self)
        self.context = context(self)
        self.core = core(self)
        self.forcedUser = forcedUser(self)
        self.httpsessions = httpSessions(self)
        self.importLogFiles = importLogFiles(self)
        self.params = params(self)
        self.pnh = pnh(self)
        self.pscan = pscan(self)
        self.reveal = reveal(self)
        self.script = script(self)
        self.search = search(self)
        self.selenium = selenium(self)
        self.sessionManagement = sessionManagement(self)
        self.spider = spider(self)
        self.stats = stats(self)
        self.users = users(self)

        # not very nice, but prevents warnings when accessing the ZAP API via https
        requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
Пример #52
0
def test_unit_test_soccer():
    soccer_search_results = [
        'Spain national beach soccer team', 'Will Johnson (soccer)',
        'Steven Cohen (soccer)', 'Craig Martin (soccer)',
        "United States men's national soccer team 2009 results",
        'China national soccer team', "Wake Forest Demon Deacons men's soccer"
    ]

    # Basic search, function #1
    assert search('soccer') == soccer_search_results

    # Advanced search option 1, function #2
    expected = [
        'Will Johnson (soccer)', 'Steven Cohen (soccer)',
        'Craig Martin (soccer)'
    ]
    assert title_length(21, soccer_search_results.copy()) == expected

    # Advanced search option 2, function #3
    assert article_count(4, soccer_search_results.copy()) == [
        'Spain national beach soccer team', 'Will Johnson (soccer)',
        'Steven Cohen (soccer)', 'Craig Martin (soccer)'
    ]

    # Advanced search option 3, function #4
    assert random_article(6, soccer_search_results.copy()
                          ) == "Wake Forest Demon Deacons men's soccer"

    # Advanced search option 4, function #5
    assert favorite_article('Will Johnson (soccer)',
                            soccer_search_results.copy()) == True

    # Advanced search option 5, function #6
    expected = [
        'Spain national beach soccer team', 'Will Johnson (soccer)',
        'Steven Cohen (soccer)', 'Craig Martin (soccer)',
        "United States men's national soccer team 2009 results",
        'China national soccer team', "Wake Forest Demon Deacons men's soccer",
        'USC Trojans volleyball', 'Mets de Guaynabo (volleyball)'
    ]
    assert multiple_keywords('volleyball',
                             soccer_search_results.copy()) == expected
Пример #53
0
def search_page(request):
    if "query" not in request.GET:
        return render(request, "web_search/index.html")
    else:
        query = request.GET["query"]
        ranker = int(request.GET["r"]) if "r" in request.GET else 1
        results_all = search(query, index_table, doc_info, ranker)
        results = results_all[0:20]
        for i in range(len(results)):
            r = results[i]
            url = "http://" + doc_url[r[0]]
            results[i] = (url, r[0] + ": " + url, r[1])  # url, anchor, score
        # Compute the NDCG
        service = build("customsearch", "v1", developerKey=API_KEY)
        res = service.cse().list(q=query + " site:ics.uci.edu", cx=CX,
                                 num=10).execute()
        urls = [result['formattedUrl'] for result in res['items']]
        valid_urls = dict()
        i = 0
        for url in urls:
            print url
            if i > 4:
                break
            parsed = urlparse(url)
            if parsed.scheme:
                url = url[len(parsed.scheme) + 3:]
            if url[-1] == "/":
                url = url[0:-1]
            if url in url_doc:
                valid_urls[url] = i
                i += 1
        print valid_urls
        dcg = 0
        for i in range(5):
            url = results[i][0][7:]
            if url in valid_urls:
                dcg += (5 - valid_urls[url]) / (math.log(i +
                                                         1, 2) if i > 0 else 1)
        ndcg = dcg / (5 + 4 + 3 / math.log(3, 2) + 2 / math.log(4, 2) +
                      1 / math.log(5, 2))
        print ndcg
        return render(request, "web_search/results.html", {"results": results})
Пример #54
0
 def do_GET(s):
     """Respond to a GET request."""
     print(s.path)
     getParams = parse_qs(urlparse(s.path).query)
     print(getParams)
     ans = None
     query = getParams
     query_str = getParams['text'][0]
     request = getParams['request'][0]
     if request == '1':
         ans = get_suggestions(query_str)
         print(ans[0])
         print(json.dumps(ans[0]))
     else:
         ans = search(query)
     s.send_response(200)
     s.send_header("Content-type", "application/json")
     s.send_header("Access-Control-Allow-Origin", "*")
     s.end_headers()
     s.wfile.write(bytes(json.dumps(ans), 'utf-8'))
Пример #55
0
    def get_new_word(self, token, example):
        word_info = search(token)
        answer = "No"
        if word_info is not None:

            self.update_dict(word_info)
            answer = self.ask_func(f"{token}は分かりませんでした。検査したので、これは正解ですか?",
                                   ["Yes", "No"])

        if answer == "No":
            token_type = self.ask_func(
                f"<{token}>って言う言葉はどのタイプですか?",
                [a.name for a in self.manager.word_types] + ["Skip"])
            if token_type != "Skip":
                return self.manager.learn_word(token, token_type, example)
            else:
                self.add_reply("すみません、別の話の方がね。もう一度チャットしてみよう")
        else:
            return self.manager.learn_word(token, word_info.types[0], example)
        return None
Пример #56
0
def perform_command(to_update, phone, command, body):
    """

    Args:
        to_update:
        phone:
        command:
        body:

    Returns:

    """
    results, array_resp, to_search = "", [], body[:25]
    if command.lower() in ALLOWED_COMMAND:
        if "#google" in command.lower():
            print("[+] Search for :", to_search)
            results = search(to_search, 3)

        if "#wiki" in command.lower():
            print("[+] Wikipedia for :", to_search)
            results = wikki(to_search)

        print("[+] results: ", results)
        if len(results) > 5:
            chunks = string_divide(results, 200)
            for chunk in chunks:
                print("[+] Sending :", chunk)
                try_send_sms(phone, chunk)
        else:
            try_send_sms(phone, "Any relevant results, try another search !")
            print("[+] Any relevant results, try another search !")

        print("[+]Update Made in the database !")

        to_update["command"]["status"] = True
        print("to_update:", to_update)
        Sms().update({
            "from_number": phone,
            "command.label": command,
            "command.body": body
        }, to_update)
Пример #57
0
def update_kd(node_root, data, point):
    # format given data
    data = data.replace(" ", "").split(",")
    for i in range(len(data)):
        data[i] = data[i].split("=")

    node = search(node_root, point)
    if node == False:
        return False
    temp_point = point.copy()
    change = 0
    # search for changes in axes from input data(if change exists then node will be deleted and inserted)
    # Change the value for the other arguments given in the input
    for i in range(len(data)):
        atribute = data[i][0]
        value = data[i][1]
        if atribute == "Latitude":
            change = 1
            temp_point[0] = float(value)
        elif atribute == "Longitude":
            change = 1
            temp_point[1] = float(value)
        elif atribute == "Altitude":
            change = 1
            temp_point[2] = float(value)
        else:
            strr = "node." + atribute + " = " + "\"" + value + "\""
            exec(strr)

    # call delete for removing node and use insert for updating the data in a new node
    if change == 1:
        temp = node
        delete_kd(node_root, point)
        res = insert_kd(node_root, temp_point, [
            temp.Name, temp.City, temp.Country, temp.IATA, temp.ICAO,
            temp_point[0], temp_point[1], temp_point[2], temp.Timezone,
            temp.DST, temp.Tz_database_time_zone, temp.Type, temp.Source
        ], temp.Airport_ID)
        return res

    return node
Пример #58
0
    def Find(self, request, context):
        stop_event = threading.Event()

        def on_rpc_done():
            _LOGGER.debug("Attempting to regain servicer thread.")
            stop_event.set()

        context.add_callback(on_rpc_done)
        candidates = []
        try:
            candidates = list(
                search.search(request.desired_name,
                              request.ideal_hamming_distance, stop_event,
                              self._maximum_hashes))
        except search.ResourceLimitExceededError:
            _LOGGER.info("Cancelling RPC due to exhausted resources.")
            context.cancel()
        _LOGGER.debug("Servicer thread returning.")
        if not candidates:
            return hash_name_pb2.HashNameResponse()
        return candidates[-1]
Пример #59
0
    def do_text(self, arg):
        """
        sends text messages to people entered in Contacts.
        Usage: text <name> -m <message>...
        """
        
        # Searching Contacts for the receipients number
        found = search.search(arg, Contacts, session)

        # If the receipients number is not in contacts, a message saying the same is sent
        if found[0] == 1:
            print found [1]
        # Sending text if receipients number is in contacts
        elif found[0] == 2:
            receiver_number = found[2]
            print "Texting %s ......" %(arg['<name>'])
            feedback = sms.text(receiver_number, ' '.join(arg['<message>']))
            print feedback[0]['status'] # Giving results of sending the text

        else:
            pass
Пример #60
0
    def FindRange(self, request, context):
        stop_event = threading.Event()

        def on_rpc_done():
            _LOGGER.debug("Attempting to regain servicer thread.")
            stop_event.set()

        context.add_callback(on_rpc_done)
        secret_generator = search.search(
            request.desired_name,
            request.ideal_hamming_distance,
            stop_event,
            self._maximum_hashes,
            interesting_hamming_distance=request.interesting_hamming_distance)
        try:
            for candidate in secret_generator:
                yield candidate
        except search.ResourceLimitExceededError:
            _LOGGER.info("Cancelling RPC due to exhausted resources.")
            context.cancel()
        _LOGGER.debug("Regained servicer thread.")