Ejemplo n.º 1
0
  def get(self):
    search_terms = self.request.get('q')
    parms = {'q': search_terms}
    i_page = unicode(self.request.get('page'))
    if i_page:
      parms['page'] = i_page 
    query_string = urllib.urlencode(parms)
    url = 'http://www.citeulike.org/search/all?'
    socket = urllib.urlopen(url+query_string)
    txt = socket.read()
    socket.close()
    entries, pages = citeulike.search_metadata(txt, i_page)

    attrs = {
        'entries': entries, 
        'query': 'citeulike.org/search/all?'+query_string.lower(),
        'search_pages': pages}
    template = Template(open('views/search.html').read())
    s = template.render_unicode(attributes=attrs)
    self.response.out.write(s)
    
    # Record the search
    search = Search(query=search_terms, 
                    encoded=query_string,
                    searched_at=datetime.now())
    search.put()
Ejemplo n.º 2
0
async def on_message(message):
    # we do not want the bot to reply to itself
    if message.author == client.user:
        return

    if message.content.startswith("!suggest "):
        string = clean_content(message.content, "!suggest")
        hobj = hunspell.HunSpell("dictionaries/en_US.dic", "dictionaries/en_US.aff")
        if not hobj.spell(string):
            await message.channel.send(
                'Did you maybe mean "' + hobj.suggest(string)[0] + '"?'
            )
        else:
            await message.channel.send("Seems fine to me.")

    if message.content.startswith("!search "):
        string = clean_content(message.content, "!search")
        hobj = hunspell.HunSpell("dictionaries/en_US.dic", "dictionaries/en_US.aff")
        if not hobj.spell(string):
            data = Search(hobj.suggest(string)[0])
        else:
            data = Search(string)
        await message.channel.send("", embed=data.performSearch())

    if message.content.startswith("!build ") or message.content.startswith("!builds "):
        await message.channel.send(**BuildResponder(message).getReply())

    if message.content.startswith("!skill ") or message.content.startswith("!skills "):
        await message.channel.send(**SkillResponder(message).getReply())

    if message.content.startswith("!github"):
        await message.channel.send("https://github.com/rbridge/discord-divinity-bot")

    if message.content.startswith("!help"):
        await message.channel.send(**HelpResponder(message).getReply())
Ejemplo n.º 3
0
  def test_search_model(self):
    """ Does basic model work? """

    testsearch = Search(
      location='test place',
      date='2020-01-01',
      dates='6-10-20',
      deaths='100',
      cases='200',
      change_cases='10',
      change_deaths='1'
      created_at=datetime.now(),
      description='Testing testing 123'
    )

    testsearch.id = 1234
    db.session.add(testsearch)
    db.session.commit()

    # Search should have no user ID.
    self.assertIsNone(testsearch.user_id)

    # Test user relationship
    self.u.searches.append(testsearch)
    db.session.commit()

    # Search should have user ID.
    self.assertEqual(len(self.u.searches), 2)
Ejemplo n.º 4
0
def search_box(request):
	if request.method == 'GET':
		recent_search = Search(search_term=request.GET['search_term'].replace(" ",""),attribute=request.GET['attr'])
		recent_search.save()
		if request.GET['attr']=='movies':
			search_movie = request.GET['search_term'].replace(" ","")
			return HttpResponseRedirect ('/moviesearch/'+search_movie+'/')
		else:
			search_actor = request.GET['search_term'].replace(" ","")
			return HttpResponseRedirect ('/actorsearch/'+search_actor+'/')
Ejemplo n.º 5
0
def home():
    if 'username' in session:
        user = User().get(session['username'])
        if request.method == "POST":
            search = Search(request.form['search'])
            result_dict = search.get_result()
            return render_template('home.html', books=result_dict, user=user)
        else:
            return render_template('home.html', books={}, user=user)
    else:
        return "You must login"
Ejemplo n.º 6
0
def save_notes(user_id):
    g.user = User.query.get_or_404(user_id)
    word, pos, user_id, note = [request.json[k] for k in ('word', 'pos', 'user_id', 'note')]
    if len(Search.query.filter(Search.word==word, Search.user_id == user_id).all()) == 0:
        search = Search(word=word, pos=pos, note=note, user_id=user_id)
        db.session.add(search)
        db.session.commit()
        return 'Note created'
    else:
        search = Search.query.filter(Search.word==word, Search.user_id == user_id).one()
        search.note = note
        db.session.commit()
        return 'Note saved'
Ejemplo n.º 7
0
 def get(self, request, format=None):
     try:
         query = request.GET.get('query')
         try:
             if request.user is AnonymousUser:
                 raise User.DoesNotExist()
             search = Search(user=request.user, query=query)
             search.save()
         except User.DoesNotExist:
             print "Anonymous Search"
         songs = search_songs(query)
         serial = SongSerializer(songs, many=True)
         return Response(serial.data, status=status.HTTP_200_OK)
     except MultiValueDictKeyError:
         return Response(request.data, status=status.HTTP_400_BAD_REQUEST)
Ejemplo n.º 8
0
def get_matches(es, context_terms):
    """Read search terms from the current request from and return relevant documents from elasticsearch index.

    Arguments:
        - es: elasticsearch client connection
        - context_terms (list[str]): list of terms indicating the context under which to calculate violence-type relevance
    """
    # save search to db
    _search = Search('unauthenticated', context_terms)
    db.session.add(_search)
    db.session.commit()

    # search elasticsearch index
    query = {"query": {"bool": {
        "should": [
            {"match": {"search_tags": {
                'query': s.strip(),
                "fuzziness": "AUTO",
                "minimum_should_match": "50%"}}}
            for s in context_terms.split(';')]}},
        "size": max_docs}
    response = es.search(
        'nssd', 'doc', query,
        _source_include=["violence_tags"])

    # return only the hits, removing search meta data
    return response['hits']['hits']
Ejemplo n.º 9
0
  def test_create(self):
    testsearch = Search.create({
      'location': 'Test place',
      'date': '2020-07-07',
      'dates': '6-10-20',
      'deaths': '402',
      'cases': '6916',
      'change_deaths': '1',
      'change_cases': '10',
      'description': 'Testing description'
    })

    testsearch.id = 1234
    db.session.add(testsearch)
    db.session.commit()

    testsearch = Search.query.get(1234)

    self.assertIsNotNone(testsearch)
    self.assertEqual(testsearch.location, 'Test place')
    self.assertEqual(testsearch.date, '2020-07-07')
    self.assertEqual(testsearch.dates, '6-10-20')
    self.assertEqual(testsearch.deaths, '402')
    self.assertEqual(testsearch.cases, '6916')
    self.assertEqual(testsearch.description, 'Testing description')
    self.assertIsInstance(testsearch.created_at, datetime)
Ejemplo n.º 10
0
    def response(self):
        search = Search(self.event, self.search_key)
        db.session.add(search)
        db.session.commit()

        response = get_response_and_update(search, 5)
        return response
Ejemplo n.º 11
0
    def search_tag(self, instance):

        tag = instance.text
        search = Search(searchterm=tag)
        db.add(search)
        db.commit()
        App.get_running_app().root.next_screen("front_screen")
Ejemplo n.º 12
0
  def setUp(self):
    db.drop_all()
    db.create_all()
    
    u = User.signup('testuser', '*****@*****.**', 'testpwd')
    uid = 1111
    u.id = uid

    db.session.commit()

    s = Search.create({
      'location': 'Test location',
      'date': '2020-07-07',
      'dates': '6-10-20,6-11-20,6-12-20,6-13-20,6-14-20',
      'deaths': '402,406,410,416,422',
      'cases': '6916,6985,7051,7107,7151',
      'change_deaths': '1,2,3,4,5',
      'change_cases': '10,20,30,40,50',
      'created_at': datetime.now(),
      'description': 'Test description'
      })
    
    sid = 2222
    s.id = 2222

    u.searches.append(s)
    db.session.commit()

    self.u = u
    self.s = s
Ejemplo n.º 13
0
    def setUp(self):

        db.drop_all()
        db.create_all()

        self.client = app.test_client()
        self.testuser = User.signup(username='******',
                                    email='*****@*****.**',
                                    password='******')
        self.testuser_id = 1000
        self.testuser.id = self.testuser_id

        self.testsearch = Search.create({
            'location': 'Test location',
            'date': '2020-07-07',
            'dates': '6-10-20,6-11-20,6-12-20,6-13-20,6-14-20',
            'deaths': '402,406,410,416,422',
            'cases': '6916,6985,7051,7107,7151',
            'change_deaths': '1,2,3,4,5',
            'change_cases': '10,20,30,40,50',
            'created_at': datetime.now(),
            'description': 'Test description'
        })

        self.testsearch_id = 2000
        self.testsearch.id = self.testsearch_id

        db.session.commit()

        self.testuser.searches.append(self.testsearch)

        db.session.commit()
Ejemplo n.º 14
0
def index(request):
    if request.method == 'GET' and request.GET.get('query'):
        query = request.GET.get('query')
        #Sort by sort url parameter:        
        sort = request.GET.get('sort', SITE_SEARCH_DEFAULT_SORT)
        query_kwargs = {'sort': sort}
        sorts_list = get_sorts_list(request)
        
        ctype_filters = get_ctype_filters(request)
        #Filter by content type if URL param is set:        
        ctype_filter = request.GET.get('content_type')
        if ctype_filter and ctype_filter in [c['id'] for c in ctype_filters]:
            app_label, model = ctype_filter.split(".")
            query_kwargs['app_label'] = app_label
            query_kwargs['model'] = model
        results = Search.query(query, **query_kwargs)
        #FIXME: paginate results
        extra_context = {
            'results': results,
            'sorts': sorts_list,
            'filters': ctype_filters,
            'query_string': query            
        }
    else:
        extra_context = {'results': None}
    return simple.direct_to_template(request, 
        template="site_search/index.html",
        extra_context=extra_context
    )
Ejemplo n.º 15
0
def search():
    form = SearchForm()

    if form.validate_on_submit():
        match_id = form.query.data
        error = False

        search_log = Search(current_user.get_id(), match_id, request.access_route[0])

        # Trim whitespace chars
        match_id = match_id.strip()

        # Normalize input (in case of unicode variants of chars; can break at urllib.urlencode level later on without this)
        match_id = unicodedata.normalize('NFKC', match_id)

        # If not a decimal input, let's try pull match id from inputs we recognise
        if not unicode.isdecimal(match_id):
            # Pull out any numbers in the search query and interpret as a match id.
            search = re.search(r'([0-9]+)', match_id)
            if search is not None:
                match_id = search.group(1)

        if unicode.isdecimal(match_id):
            _replay = Replay.query.filter(Replay.id == match_id).first()

            # If we don't have match_id in database, check if it's a valid match via the WebAPI and if so add it to DB.
            if not _replay:
                flash('Sorry, we do not have any replay stored for match ID {}'.format(match_id), 'danger')
                return redirect(request.referrer or url_for("index"))

            if _replay:
                search_log.replay_id = _replay.id
                search_log.success = True
                db.session.add(search_log)
                db.session.commit()
                return redirect(url_for("replays.replay", _id=match_id))

        # We only get this far if there was an error or the matchid is invalid.
        if error:
            flash("Replay {} was not on our database, and we encountered errors trying to add it.  Please try again later.".format(match_id), "warning")
        else:
            flash("Invalid match id.  If this match id corresponds to a practice match it is also interpreted as invalid - Dotabank is unable to access practice lobby replays.", "danger")

        search_log.success = False
        db.session.add(search_log)
        db.session.commit()
    return redirect(request.referrer or url_for("index"))
Ejemplo n.º 16
0
def get_previous_searches():
    '''Build a list of tuples (url, keywords) describing 30 most
    recent searches stored by the application.'''
    previous_searches = []

    for search in Search.query_by_most_recent().fetch(30):
        previous_searches.append((search.url, search.keywords))

    return previous_searches
Ejemplo n.º 17
0
 def post(self):
     result_template = the_jinja_env.get_template('templates/result.html')
     age = int(self.request.get("age"))
     mode_of_transportation = self.request.get("mode of travel")
     range = int(self.request.get("distance"))
     zip_code = int(self.request.get("zip code"))
     data = Search(age=age,
                   mode_of_transportation=mode_of_transportation,
                   range=range,
                   zip_code=zip_code)
     data.put()
     search_data = {
         "age": age,
         "transportation": mode_of_transportation,
         "range": range,
         "location": zip_code
     }
     self.response.write(result_template.render(search_data))
Ejemplo n.º 18
0
def create_user(fbid):
    """
    TODO: write description
    """
    user = get_user_by_id(fbid)
    if user is None:
        user = User(user_id=fbid,
                    current_search=Search(criteria=[], user_storyline=None),
                    searches=[])
        user.save()
Ejemplo n.º 19
0
def get_results_for_queries(queries, include_stack_overflow, search_id, api_key):

    # Create a new fetch index.
    last_fetch_index = Search.select(fn.Max(Search.fetch_index)).scalar() or 0
    fetch_index = last_fetch_index + 1

    for query in queries:
        get_results(
            query['query'], query['package'], include_stack_overflow,
            fetch_index, search_id, api_key)
Ejemplo n.º 20
0
    def save_to_db(self, const):
        # store in db, uses self.data Extract objects, iterate through and generate the appropriate injections for the db

        if const is "search_term":
            s_db = Search(date=timezone.now(), term=self.data[0].search_term)
            print "Adding %s data into db." % s_db
            s_db.save()
            for q in self.data:
                print q
                # save data around Search term for each Extract object in self.data
                # each Extract object has multiple links, get them all and associate to the created search term
                try:
                    for url in q.job_urls:
                        l_db = Links(search=s_db, link=url)
                        l_db.save()
                    # each Extract object has a single location, get it and associate it to search term
                    if q.loc != "":
                        loc_db = Location(city=q.city, state=q.state)
                        loc_db.save()
                    # each Extract object has a summary attribute that has all the data, modify the data pool to fit the parameters specified by user
                    # and store the data in a Results table associated to its Search table
                    summary = q.pool_summary(
                        pos=self.pos, with_filter=self.with_filter, lower=self.lower, with_bigrams=self.with_bigrams
                    )
                    data = summary[("Word", "Word_Count", "POS_Tag")]
                    for tup in data:
                        w = str(tup[0])
                        c = tup[1]
                        try:
                            p = str(tup[2])
                        except IndexError:
                            p = ""
                        r_db = Results(
                            search=s_db, location=loc_db, word=w, count=c, pos=p, is_bigram=self.with_bigrams
                        )
                        r_db.save()
                except:
                    if q.loc != "":
                        loc_db = Location(city=q.city, state=q.state)
                        loc_db.save()
                    r_db = Results(search=s_db, location=loc_db, word="N/A", count=0, pos="", is_bigram=False)
                    r_db.save()
Ejemplo n.º 21
0
def close_search(fbid):
    """
    TODO: write description
    """
    user = get_user_by_id(fbid)
    if user is not None:
        if user.current_search is not None:
            user.searches.append(user.current_search)
            user.current_search = Search(criteria=[], user_storyline=None)
            user.update(current_search=user.current_search)
            user.save()
Ejemplo n.º 22
0
def search():
    searchterms = request.args['query']
    searchterms = searchterms.split()
    searchterms = ["%"+searchterm+"%" for searchterm in searchterms]
    ## need to search the following tables: restaurants, cuisine, food items
    ## not the most efficient way to do it, but it'll work for now
    s = Search(searchterms)
    restaurants_ranked = s.search_db()
    result = []
    for restaurant in restaurants_ranked:
        rdict={}
        rdict['id'] = restaurant.id
	rdict['name'] = restaurant.name
        rdict['address_city'] = restaurant.get_address().city
        rdict['cuisine'] = [c.name for c in restaurant.get_cuisines()]
        rdict['food_items'] = [fi.name for fi in restaurant.get_food_items()]
        rdict['top_food_items'] = [tfi.name for tfi in restaurant.get_top_food_items()]
	rdict['deliveries'] = [(d.id, d.order_time) for d in restaurant.get_deliveries()]
        result.append(rdict)
    print result
    return jsonify(results=result)  
Ejemplo n.º 23
0
def search():
    keyword = request.args.get('keyword')
    tweets = Search().search(keyword)

    if len(tweets) > 0:
        if Search.less_than_5minutes(tweets[0].creation_time):
            # tweets = []
            # for x in tweets:
            #     dictionar = {'text': x.text}
            #     tweets.append(dictionar)
            tweets = [{'text': x.text} for x in tweets]
        else:
            print("insert new tweets in db")
            search_result = Twitter().search_tweets(keyword)
            tweets = search_result['statuses']
            for item in tweets:
                tweet = Search(keyword=keyword, text=item['text'])
                Search().add_search(tweet)
    # Keyword isn t in DB
    else:
        print("!!!!!!!!!!!NOT FOUND IN DATABASE")
        search_result = Twitter().search_tweets(keyword)
        tweets = search_result['statuses']
        for item in tweets:
            tweet = Search(keyword=keyword, text=item['text'])
            Search().add_search(tweet)

    return render_template("index.html", tweets=tweets, keyword=keyword)
Ejemplo n.º 24
0
def save_search():
	""" If user is logged in, save search to user, else save to 
	session and redirect user to login 
	"""

	if current_user.is_authenticated:
		s = Search.create(request.json)
		current_user.searches.append(s)
		db.session.commit()
		return 'saved'
	else:
		session['search'] = serialize(request.json)
		return 'login'
Ejemplo n.º 25
0
def search_post():
    form = SearchForm()
    posts = None
    count = 0
    limit = 5
    try:
        page = int(request.args.get('page', 1))
    except ValueError:
        page = 1

    if request.method == 'POST':
        if form.validate_on_submit():
            try:
                posts, count = Search.search_post(form.searchtext.data, limit=limit, page=page)
                session['search_query'] = form.searchtext.data
            except:
                flash(gettext('Error while searching, please retry later'), 'error')
        else:
            flash(gettext('Invalid submission, please check the message below'), 'error')
    else:
        if 'search_query' in session:
            form.searchtext.data = session['search_query']
            try:
                posts, count = Search.search_post(form.searchtext.data, limit=limit, page=page)
            except:
                flash(gettext('Error while searching, please retry later'), 'error')
    
    pagination = Pagination(page=page,
        per_page=limit,
        total=count,
        record_name=gettext('posts'),
        alignment='right',
        bs_version=3)

    return render_template("blog/post-search.html",
        title=gettext('Search'),
        form=form,
        posts=posts,
        pagination=pagination)
Ejemplo n.º 26
0
 def save_to_db(self,const):
     # store in db, uses self.data Extract objects, iterate through and generate the appropriate injections for the db
     
     if const is "search_term":
         s_db = Search(date=timezone.now(),term=self.data[0].search_term)
         print "Adding %s data into db."% s_db
         s_db.save()
         for q in self.data:
             print q
             # save data around Search term for each Extract object in self.data
             # each Extract object has multiple links, get them all and associate to the created search term
             try:
                 for url in q.job_urls:
                     l_db = Links(search=s_db, link=url)
                     l_db.save()
                 # each Extract object has a single location, get it and associate it to search term
                 if q.loc != "":
                     loc_db = Location(city=q.city,state=q.state)
                     loc_db.save()
                 # each Extract object has a summary attribute that has all the data, modify the data pool to fit the parameters specified by user
                 # and store the data in a Results table associated to its Search table
                 summary = q.pool_summary(pos=self.pos, with_filter=self.with_filter, lower=self.lower, with_bigrams=self.with_bigrams)
                 data = summary[('Word', 'Word_Count', 'POS_Tag')]
                 for tup in data:
                     w = str(tup[0])
                     c = tup[1]
                     try:
                         p = str(tup[2])
                     except IndexError:
                         p = ""
                     r_db = Results(search=s_db,location=loc_db,word=w,count=c,pos=p,is_bigram=self.with_bigrams)
                     r_db.save()
             except:
                 if q.loc != "":
                     loc_db = Location(city=q.city,state=q.state)
                     loc_db.save()
                 r_db = Results(search=s_db,location=loc_db,word="N/A",count=0,pos="",is_bigram=False)
                 r_db.save()
Ejemplo n.º 27
0
 def create_search_normal(file):
     f = load_json(file)
     nonlocal searchID
     for (k, val) in f.items():
         name = val['name']['name-USen']
         category = file.split(".")[0]
         id = val['id']
         new = Search(name=name,
                      category=category,
                      id=id,
                      searchID=searchID)
         db.session.add(new)
         db.session.commit()
         searchID += 1
Ejemplo n.º 28
0
    def post(self):
        query = self.request.get('q', '')
        page_token = self.request.get('page', '')

        # The user must enter a valid search query. If not, redirect to
        # index, providing an error which will determine the response
        # generated by IndexHandler
        if not query:
            return self.redirect(build_url('/', [
                ('error', 'empty_query')
            ]))

        # Save search query to the High Replication Datastore
        search = Search(parent=search_group_key())
        search.keywords = urllib.unquote(query)
        search.url = build_url('/search', [
            ('q', query)
        ])
        search.put()

        # Redirect to a URL that will trigger the YouTube API request
        # and render the response.
        return self.redirect(search.url)
Ejemplo n.º 29
0
def searches():
    if not request.args or not request.args['key'] or request.args['key'] != IOS_API_KEY:
        abort(401)
    if request.method == "POST":
        if not request.json or not 'google_places_id' in request.json or not 'ios_device_id' in request.json:
            abort(400)
        search = Search(
                    google_places_id = request.json['google_places_id'],
                    ios_device_id=request.json['ios_device_id'])
        if 'name' in request.json:
            search.populate(name=request.json['name'])
        if 'vicinity' in request.json:
            search.populate(vicinity=request.json['vicinity'])
        try:
            search.put()
            return jsonify(search.to_dict())
        except CapabilityDisabledError:
            abort(400)
    elif request.method == "GET":
        if not request.args['ios_device_id']:
            logging.error("request.json: %s", request.json)
            logging.error("request.args[ios_device_id]: %@", request.args['ios_device_id'])
            abort(400)
        ios_device_id = request.args['ios_device_id']
        searches = Search.query(Search.ios_device_id==ios_device_id).order(-Search.created_at).fetch(20)
        searchesDeduped = []
        searchesGooglePlacesIds = set()
        for search in searches:
            if search.google_places_id not in searchesGooglePlacesIds:
                if search.name != None and search.vicinity != None:
                    searchesDeduped.append(search)
                    searchesGooglePlacesIds.add(search.google_places_id)
        logging.info("searchesGooglePlacesIds: %s", searchesGooglePlacesIds)
        return jsonify({"searches": [search.to_dict() for search in searchesDeduped]})
    else:
        abort(401)
Ejemplo n.º 30
0
    def post(self, request, **kwargs):
        category = request.POST.get('category')
        location = request.POST.get('location')

        context = self.get_context_data(**kwargs)
        context['map'] = True

        if category:
            search = Search(**{'text': 'Category: {0}'.format(category)})
            search.save()
            museums = Museum.objects.filter(types__code=category)
            geojson = self.get_geojson(**{'name': category, 'museums': museums})
            context["jsonfile"] = category
        elif location:
            search = Search(**{'text': location})
            search.save()
            # Inputs: If just a state/abbrev given, show all items for that state only, no radius
            # Otherwise, geocode the result, run the vicenty distance
            if location.lower() in self.states_and_abbrevs:
                if len(location) != 2:
                    location = STATES_NORMALIZED.get(location.lower())
                context["jsonfile"] = location
                # TEMPORARY: EXCLUDE 11K GENERAL MUSEUMS FOR NOW -- Can always add them back later
                museums = Museum.objects.filter(state=location).exclude(types__code='GMU')
                if museums.count() > 0:
                    geojson = self.get_geojson(**{'name': location, 'museums': museums})
                    # By this point, location is always a two-letter abbreviation
                    address, (latitude, longitude) = self.geolocator.geocode(''.join([state_tuple[1] for state_tuple in US_STATES if state_tuple[0] == location]))
            else:
                try:
                    museums = []
                    address, (latitude, longitude) = self.geolocator.geocode(location)
                except Exception:
                    context["jsonfile"] = ""
                else:
                    if latitude and longitude:
                        all_museums = Museum.objects.exclude(types__code='GMU')

                        for museum in all_museums:
                            dist = vincenty(
                                (museum.latitude, museum.longitude), 
                                (latitude, longitude)
                            ).miles

                            if dist <= RADIUS:
                                museums.append(museum)

                        context["jsonfile"] = hashlib.sha256(location).hexdigest()[:8]
                        geojson = self.get_geojson(**{'name': context["jsonfile"], 'museums': museums})
                        context["latitude"] = latitude
                        context["longitude"] = longitude

        # context["geojson_path"] = PATH_PREFIX
        context['museums'] = museums

        return render(request, self.template_name, context)
Ejemplo n.º 31
0
 def post(self):
     result_template = the_jinja_env.get_template('templates/result.html')
     zip_code = self.request.get("zip_code")
     address = self.request.get("address")
     activity = self.request.get("activity")
     state = self.request.get("state")
     city = self.request.get("city")
     data = Search(zip_code=zip_code, address=address, activity=activity)
     search_data = {
         "address": address,
         "zip_code": zip_code,
         "activity": activity,
         "state": state,
         "city": city
     }
     self.response.write(result_template.render(search_data))
Ejemplo n.º 32
0
 def create_search_items(files):
     index = 1
     nonlocal searchID
     for string in files:
         housewares = load_json(string)
         for (k, item) in housewares.items():
             name = item[0]['name']['name-USen']
             category = 'items'
             id = index
             new_item = Search(name=name,
                               category=category,
                               id=id,
                               searchID=searchID)
             db.session.add(new_item)
             db.session.commit()
             index += 1
             searchID += 1
Ejemplo n.º 33
0
def search_results(message):
    srchquery = []
    covers = []
    search_provider = g.user.searchapi
    try:
        if search_provider == "lastfm":
            lastfm_search(message, srchquery, covers)
        elif search_provider == "discogs":
            discogs_search(message, srchquery)
    except (IndexError, pylast.WSError):
        flash("Couldn't find any albums", "warning")
        return redirect(url_for('index'))
    db.session.add(Search(search_term=message, user=g.user))
    g.user.searches_num += 1
    db.session.commit()
    return render_template("search.html",
                           srchquery=srchquery,
                           search_type=search_provider)
Ejemplo n.º 34
0
def login():
    """ Show login page with login form """

    # If user has been redirected to save a search, show flash message.
    if request.args.get('saveSearch') and request.method == 'GET':
        flash(
            Markup(
                'Please log in to save search. Don\'t have an account? Register <a href="/signup">here</a>'
            ), 'danger')

    if current_user.is_authenticated:
        return redirect(url_for('index'))

    form = LoginForm()

    if form.validate_on_submit():
        username = form.username.data
        password = form.password.data
        user = User.authenticate(username, password)

        if user:
            login_user(user)

            # If user has saved search in session, save it to database and delete session.
            if 'search' in session:
                s = Search.create(session['search'])
                user.searches.append(s)
                db.session.commit()
                del session['search']
                flash("Search saved", 'success')
                return redirect(f'/user/{user.username}/searches')

            return redirect(url_for('index'))

        flash("Invalid credentials.", 'danger')

    return render_template('/login.html',
                           form=form,
                           btnText="Log in",
                           cancel='index',
                           color="#99d3FF")
Ejemplo n.º 35
0
 def __init__(self, **kwargs):
     super(SadpandaApp, self).__init__(**kwargs)
     Window.bind(on_keyboard=self.onBackBtn)
     # Makes sure only non-h is the default.
     existfilters = db.query(Filters).order_by(Filters.id.desc()).first()
     if existfilters:
         pass
     else:
         clearstart = Filters(nonh=1,
                              doujinshi=0,
                              manga=0,
                              artistcg=0,
                              gamecg=0,
                              western=0,
                              imageset=0,
                              cosplay=0,
                              asianporn=0,
                              misc=0)
         db.add(clearstart)
         db.commit()
     clearsearch = Search(searchterm=" ")
     db.add(clearsearch)
     db.commit()
Ejemplo n.º 36
0
def search():
    try:
        myTwitter = Twitter({
            'key': app.config['TWITTER_API_CLIENT_KEY'],
            'secret': app.config['TWITTER_API_CLIENT_SECRET']
        })
        keyword = request.args.get('keyword')
        tweets = Search().search(keyword)

        if len(tweets) > 0:
            if Search.less_than_5minutes(tweets[0].creation_time):
                tweets = [{'text': x.text} for x in tweets]
            else:
                print("insert new tweets in db")
                search_result = myTwitter.search_tweets(keyword)
                tweets = search_result['statuses']
                for item in tweets:
                    tweet = Search(keyword=keyword, text=item['text'])
                    Search().add_search(tweet)
        # Keyword isn t in DB
        else:
            print("!!!!!!!!!!!NOT FOUND IN DATABASE")
            search_result = myTwitter.search_tweets(keyword)
            tweets = search_result['statuses']
            for item in tweets:
                tweet = Search(keyword=keyword, text=item['text'])
                Search().add_search(tweet)

        if len(tweets) <= 0:
            flash('No results were found.', 'warning')
        else:
            flash('{} results were found.'.format(len(tweets)), 'success')

        return render_template("index.html",
                               tweets=tweets,
                               keyword=keyword,
                               tweetsy=json.dumps(tweets))
    except:
        flash(app.config['ERROR_MSG'].format('Could not get search results'),
              'danger')
        return render_template("index.html",
                               keyword=request.args.get('keyword'))
Ejemplo n.º 37
0
def index(request):
    if request.method == 'POST':  # If the form has been submitted...
        form = SearchForm(request.POST)  # A form bound to the POST data
        if form.is_valid():  # All validation rules pass

            valor = Search()

            valor.sdata = request.POST['sdata']
            valor.sdate = request.POST['sdate']

            valor.save()

            return render(request, 'form.html', {
                'form': form,
            })
    else:
        form = SearchForm()  # An unbound form

    return render(request, 'form.html', {
        'form': form,
    })
Ejemplo n.º 38
0
def search():
    form = SearchForm()

    if form.validate_on_submit():
        match_id = form.query.data
        error = False

        search_log = Search(current_user.get_id(), match_id,
                            request.access_route[0])

        # Trim whitespace chars
        match_id = match_id.strip()

        # Normalize input (in case of unicode variants of chars; can break at urllib.urlencode level later on without this)
        match_id = unicodedata.normalize('NFKC', match_id)

        # If not a decimal input, let's try pull match id from inputs we recognise
        if not unicode.isdecimal(match_id):
            # Pull out any numbers in the search query and interpret as a match id.
            search = re.search(r'([0-9]+)', match_id)
            if search is not None:
                match_id = search.group(1)

        if unicode.isdecimal(match_id):
            _replay = Replay.query.filter(Replay.id == match_id).first()

            # If we don't have match_id in database, check if it's a valid match via the WebAPI and if so add it to DB.
            if not _replay:
                try:
                    # Only continue if the WebAPI doesn't throw an error for this match ID, and if the match ID for the
                    # info returned matches the match_id we sent (Fixes edge-case bug that downed Dotabank once, where
                    # a user searched 671752079671752079 and the WebAPI returned details for 368506255).
                    match_data = steam.api.interface(
                        "IDOTA2Match_570").GetMatchDetails(
                            match_id=match_id).get("result")
                    if "error" not in match_data.keys() and int(
                            match_data.get("match_id")) == int(match_id):
                        # Use get_or_create in case of race-hazard where another request (e.g. double submit) has already processed this replay while we were waiting for match_data.
                        # DOESN'T FIX A FOOKIN THINGA
                        _replay, created = Replay.get_or_create(
                            id=match_id, skip_webapi=True)

                        if created:
                            _replay._populate_from_webapi(match_data)
                            db.session.add(_replay)
                            queued = Replay.add_gc_job(_replay,
                                                       skip_commit=True)
                            if queued:
                                flash(
                                    "Replay {} was not in our database, so we've added it to the job queue to be parsed!"
                                    .format(match_id), "info")
                                try:
                                    db.session.commit()
                                except IntegrityError:
                                    db.session.rollback()
                                    pass  # F*****g piece of shit.
                            else:
                                db.session.rollback()
                                error = True
                except steam.api.HTTPError:
                    error = True

            if _replay:
                search_log.replay_id = _replay.id
                search_log.success = True
                db.session.add(search_log)
                db.session.commit()
                return redirect(url_for("replays.replay", _id=match_id))

        # We only get this far if there was an error or the matchid is invalid.
        if error:
            flash(
                "Replay {} was not on our database, and we encountered errors trying to add it.  Please try again later."
                .format(match_id), "warning")
        else:
            flash(
                "Invalid match id.  If this match id corresponds to a practice match it is also interpreted as invalid - Dotabank is unable to access practice lobby replays.",
                "danger")

        search_log.success = False
        db.session.add(search_log)
        db.session.commit()
    return redirect(request.referrer or url_for("index"))
Ejemplo n.º 39
0
def get_results(query, package, include_stack_overflow, fetch_index, search_id, api_key):

    # Make request for search results
    params = DEFAULT_PARAMS.copy()
    params['key'] = api_key
    params['cx'] = search_id
    params['q'] = query
    if not include_stack_overflow:
        params['siteSearch'] = 'stackoverflow.com'
        params['siteSearchFilter'] = 'e'  # 'e' for 'exclude'
    response = make_request(default_requests_session.get, SEARCH_URL, params=params)

    # Pause so that we don't bombard the server with requests
    time.sleep(REQUEST_DELAY)

    # If request resulted in error, the response is null.  Skip over this query.
    if response is None:
        return

    # Parse search results
    soup = BeautifulSoup(response.content, 'html.parser')
    url = soup.find('opensearch:Url')
    entry_count = len(soup.find_all('entry'))

    # The Atom spec for the search API
    # (https://developers.google.com/custom-search/json-api/v1/reference/cse/list#response)
    # mentions that the estimated results count may be a long integer.
    # To my knowledge, peewee (our ORM) doesn't support long integer fields.
    # So, I cast this to an integer instead and cross my fingers there is no overflow.
    search = Search.create(
        fetch_index=fetch_index,
        query=query,
        page_index=0,
        requested_count=REQUESTED_RESULT_COUNT,
        result_count_on_page=entry_count,
        estimated_results_count=int(
            soup.find('cse:searchinformation').find('cse:totalresults').text),
        package=package,
    )

    # Fetch the first "entry" or search result
    entry = soup.entry

    # Save all of the search results from first to last.
    # Maintaining consistency with our query scraping, ranking starts at 1.
    for rank in range(1, entry_count + 1):

        # Extract fields from the entry
        updated_datetime_without_milliseconds = re.sub('\.\d\d\dZ', 'Z', entry.updated.text)
        updated_datetime = datetime.datetime.strptime(
            updated_datetime_without_milliseconds,
            "%Y-%m-%dT%H:%M:%SZ"
        )
        link = entry.link['href']
        snippet = entry.summary.string
        title = entry.title.text
        url = entry.id.text

        # Create a record for this search result
        SearchResult.create(
            search=search,
            title=title,
            snippet=snippet,
            link=link,
            url=url,
            updated_date=updated_datetime,
            rank=rank,
        )

        # To my knowledge, this is the only method for which it is strongly implied in
        # the BeautifulSoup documentation that you are fetching the next result
        # in the sequence.  I also assume that the search API is returning results
        # in the order of decreasing relevance, such that rank increases (gets bigger)
        # with each successive entry visited.
        entry = entry.find_next('entry')
Ejemplo n.º 40
0
 def goto_front(self):
     blanksearch = Search(searchterm=" ")
     db.add(blanksearch)
     db.commit()
     self.next_screen("front_screen")
Ejemplo n.º 41
0
 def savesearch(self):
     newsearch = Search(searchterm=self.ids.searchstring.text)
     db.add(newsearch)
     db.commit()
     self.dismiss()
Ejemplo n.º 42
0
 def get(self):
    
   attrs = { 'searches': Search.all().order('-searched_at').fetch(5) }
   template = Template(open('views/index.html').read()) 
   s = template.render_unicode(attributes=attrs)
   self.response.out.write(s)
Ejemplo n.º 43
0
def search():
    form = SearchForm()

    if form.validate_on_submit():
        match_id = form.query.data
        error = False

        search_log = Search(current_user.get_id(), match_id, request.access_route[0])

        # Trim whitespace chars
        match_id = match_id.strip()

        # If not a decimal input, let's try pull match id from inputs we recognise
        if not unicode.isdecimal(unicode(match_id)):
            # Dota 2 protocol or dotabuff links
            search = re.search(r'(?:matchid=|matches\/)([0-9]+)', match_id)
            if search is not None:
                match_id = search.group(1)

        if unicode.isdecimal(unicode(match_id)):
            _replay = Replay.query.filter(Replay.id == match_id).first()

            # If we don't have match_id in database, check if it's a valid match via the WebAPI and if so add it to DB.
            if not _replay:
                try:
                    match_data = steam.api.interface("IDOTA2Match_570").GetMatchDetails(match_id=match_id).get("result")
                    if "error" not in match_data.keys():
                        # Use get_or_create in case of race-hazard where another request (e.g. double submit) has already processed this replay while we were waiting for match_data.
                        # DOESN'T FIX A FOOKIN THINGA
                        _replay, created = Replay.get_or_create(id=match_id, skip_webapi=True)

                        if created:
                            _replay._populate_from_webapi(match_data)
                            db.session.add(_replay)
                            queued = Replay.add_gc_job(_replay, skip_commit=True)
                            if queued:
                                flash("Replay {} was not in our database, so we've added it to the job queue to be parsed!".format(match_id), "info")
                                try:
                                    db.session.commit()
                                except IntegrityError:
                                    db.session.rollback()
                                    pass  # F*****g piece of shit.
                            else:
                                db.session.rollback()
                                error = True
                except steam.api.HTTPError:
                    error = True

            if _replay:
                search_log.replay_id = _replay.id
                search_log.success = True
                db.session.add(search_log)
                db.session.commit()
                return redirect(url_for("replays.replay", _id=match_id))

        # We only get this far if there was an error or the matchid is invalid.
        if error:
            flash("Replay {} was not on our database, and we encountered errors trying to add it.  Please try again later.".format(match_id), "warning")
        else:
            flash("Invalid match id.  If this match id corresponds to a practice match it is also interpreted as invalid - Dotabank is unable to access practice lobby replays.", "danger")

        search_log.success = False
        db.session.add(search_log)
        db.session.commit()
    return redirect(request.referrer or url_for("index"))
Ejemplo n.º 44
0
 def __init__(self, event):
     self.event = event
     self.last_search = Search.last(event)
Ejemplo n.º 45
0
def search():
    form = SearchForm()

    if form.validate_on_submit():
        match_id = form.query.data
        error = False

        search_log = Search(current_user.get_id(), match_id, request.access_route[0])

        # Trim whitespace chars
        match_id = match_id.strip()

        # Normalize input (in case of unicode variants of chars; can break at urllib.urlencode level later on without this)
        match_id = unicodedata.normalize('NFKC', match_id)

        # If not a decimal input, let's try pull match id from inputs we recognise
        if not unicode.isdecimal(match_id):
            # Pull out any numbers in the search query and interpret as a match id.
            search = re.search(r'([0-9]+)', match_id)
            if search is not None:
                match_id = search.group(1)

        if unicode.isdecimal(match_id):
            _replay = Replay.query.filter(Replay.id == match_id).first()

            # If we don't have match_id in database, check if it's a valid match via the WebAPI and if so add it to DB.
            if not _replay:
                try:
                    # Only continue if the WebAPI doesn't throw an error for this match ID, and if the match ID for the
                    # info returned matches the match_id we sent (Fixes edge-case bug that downed Dotabank once, where
                    # a user searched 671752079671752079 and the WebAPI returned details for 368506255).
                    match_data = steam.api.interface("IDOTA2Match_570").GetMatchDetails(match_id=match_id).get("result")
                    if "error" not in match_data.keys() and int(match_data.get("match_id")) == int(match_id):
                        # Use get_or_create in case of race-hazard where another request (e.g. double submit) has already processed this replay while we were waiting for match_data.
                        # DOESN'T FIX A FOOKIN THINGA
                        _replay, created = Replay.get_or_create(id=match_id, skip_webapi=True)

                        if created:
                            _replay._populate_from_webapi(match_data)
                            db.session.add(_replay)
                            queued = Replay.add_gc_job(_replay, skip_commit=True)
                            if queued:
                                flash("Replay {} was not in our database, so we've added it to the job queue to be parsed!".format(match_id), "info")
                                try:
                                    db.session.commit()
                                except IntegrityError:
                                    db.session.rollback()
                                    pass  # F*****g piece of shit.
                            else:
                                db.session.rollback()
                                error = True
                except steam.api.HTTPError:
                    error = True

            if _replay:
                search_log.replay_id = _replay.id
                search_log.success = True
                db.session.add(search_log)
                db.session.commit()
                return redirect(url_for("replays.replay", _id=match_id))

        # We only get this far if there was an error or the matchid is invalid.
        if error:
            flash("Replay {} was not on our database, and we encountered errors trying to add it.  Please try again later.".format(match_id), "warning")
        else:
            flash("Invalid match id.  If this match id corresponds to a practice match it is also interpreted as invalid - Dotabank is unable to access practice lobby replays.", "danger")

        search_log.success = False
        db.session.add(search_log)
        db.session.commit()
    return redirect(request.referrer or url_for("index"))
Ejemplo n.º 46
0
def create_search():
    searchID = 1

    def create_search_normal(file):
        f = load_json(file)
        nonlocal searchID
        for (k, val) in f.items():
            name = val['name']['name-USen']
            category = file.split(".")[0]
            id = val['id']
            new = Search(name=name,
                         category=category,
                         id=id,
                         searchID=searchID)
            db.session.add(new)
            db.session.commit()
            searchID += 1

    def create_search_items(files):
        index = 1
        nonlocal searchID
        for string in files:
            housewares = load_json(string)
            for (k, item) in housewares.items():
                name = item[0]['name']['name-USen']
                category = 'items'
                id = index
                new_item = Search(name=name,
                                  category=category,
                                  id=id,
                                  searchID=searchID)
                db.session.add(new_item)
                db.session.commit()
                index += 1
                searchID += 1

    db.session.query(Search).delete()
    #normal add
    files = [
        'villagers.json', 'songs.json', 'sea.json', 'fish.json', 'bugs.json',
        'art.json'
    ]
    for file in files:
        create_search_normal(file)
    #fossil add
    fossils = load_json('fossils.json')
    index = 1
    for (k, fossil) in fossils.items():
        name = fossil['name']['name-USen']
        category = 'fossils'
        id = index
        new = Search(name=name, category=category, id=id, searchID=searchID)
        db.session.add(new)
        db.session.commit()
        index += 1
        searchID += 1
    #items add
    item_files = ['houseware.json', 'misc.json', 'wallmounted.json']
    create_search_items(item_files)
    #construction add
    construction = load_json('construction.json')
    index = 1
    for cons in construction:
        name = cons['name']
        category = 'construction'
        id = index
        new = Search(name=name, category=category, id=id, searchID=searchID)
        db.session.add(new)
        db.session.commit()
        index += 1
        searchID += 1
    #recipe add
    recipes = load_json('recipes.json')
    index = 1
    for recipe in recipes:
        name = recipe['name']
        category = 'recipes'
        id = index
        new_item = Search(name=name,
                          category=category,
                          id=id,
                          searchID=searchID)
        db.session.add(new_item)
        db.session.commit()
        index += 1
        searchID += 1
    #reactions add
    reactions = load_json('reactions.json')
    index = 1
    for reaction in reactions:
        name = reaction['name']
        category = 'reactions'
        id = index
        new_item = Search(name=name,
                          category=category,
                          id=id,
                          searchID=searchID)
        db.session.add(new_item)
        db.session.commit()
        index += 1
        searchID += 1
    #clothing add
    clothes_list = [
        'accessories.json', 'bags.json', 'bottoms.json', 'clothing_other.json',
        'dress_up.json', 'headwear.json', 'shoes.json', 'socks.json',
        'tops.json', 'umbrellas.json'
    ]
    index = 1
    for clothes in clothes_list:
        cloth = load_json(clothes)
        for c in cloth:
            name = c['name']
            category = 'clothes'
            id = index
            new_item = Search(name=name,
                              category=category,
                              id=id,
                              searchID=searchID)
            db.session.add(new_item)
            db.session.commit()
            index += 1
            searchID += 1
    #tools add
    tools = load_json('tools.json')
    index = 1
    for tool in tools:
        name = tool['name']
        category = 'tools'
        id = index
        new_tool = Search(name=name,
                          category=category,
                          id=id,
                          searchID=searchID)
        db.session.add(new_tool)
        db.session.commit()
        index += 1
        searchID += 1
    #floor add
    floors = load_json('floors.json')
    index = 1
    for floor in floors:
        name = floor['name']
        category = 'floors'
        id = index
        new_floor = Search(name=name,
                           category=category,
                           id=id,
                           searchID=searchID)
        db.session.add(new_floor)
        db.session.commit()
        index += 1
        searchID += 1
    #wallpaper add
    wallpapers = load_json('wallpapers.json')
    index = 1
    for wallpaper in wallpapers:
        name = wallpaper['name']
        category = 'wallpapers'
        id = index
        new_wallpaper = Search(name=name,
                               category=category,
                               id=id,
                               searchID=searchID)
        db.session.add(new_wallpaper)
        db.session.commit()
        index += 1
        searchID += 1
    #rug add
    rugs = load_json('rugs.json')
    index = 1
    for rug in rugs:
        name = rug['name']
        category = 'rugs'
        id = index
        new_rug = Search(name=name,
                         category=category,
                         id=id,
                         searchID=searchID)
        db.session.add(new_rug)
        db.session.commit()
        index += 1
        searchID += 1
Ejemplo n.º 47
0
 def create_search_blacklist(item):
     search = Search()
     search.blacklist = True
     search.request   = item['request']
     return search