def home(request): context = { 'q': None, 'events_found': None, 'search_error': None, 'tags': None, 'possible_tags': None, 'channels': None, 'possible_channels': None } if request.GET.get('q'): form = forms.SearchForm(request.GET) else: form = forms.SearchForm() if request.GET.get('q') and form.is_valid(): context['q'] = form.cleaned_data['q'] privacy_filter = {} privacy_exclude = {} qs = Event.objects.scheduled_or_processing() if request.user.is_active: if is_contributor(request.user): privacy_exclude = {'privacy': Event.PRIVACY_COMPANY} else: privacy_filter = {'privacy': Event.PRIVACY_PUBLIC} qs = qs.approved() extra = {} rest, params = split_search(context['q'], ('tag', 'channel')) if params.get('tag'): tags = Tag.objects.filter(name__iexact=params['tag']) if tags: context['q'] = rest context['tags'] = extra['tags'] = tags else: # is the search term possibly a tag? all_tag_names = Tag.objects.all().values_list('name', flat=True) tags_regex = re.compile( r'\b(%s)\b' % ('|'.join(re.escape(x) for x in all_tag_names),), re.I ) # next we need to turn all of these into a Tag QuerySet # because we can't do `filter(name__in=tags_regex.findall(...))` # because that case sensitive. tag_ids = [] for match in tags_regex.findall(rest): tag_ids.extend( Tag.objects.filter(name__iexact=match) .values_list('id', flat=True) ) possible_tags = Tag.objects.filter( id__in=tag_ids ) for tag in possible_tags: regex = re.compile(re.escape(tag.name), re.I) tag._query_string = regex.sub( '', context['q'], ) tag._query_string += ' tag: %s' % tag.name # reduce all excess whitespace into 1 tag._query_string = re.sub( '\s\s+', ' ', tag._query_string ) tag._query_string = tag._query_string.strip() context['possible_tags'] = possible_tags if params.get('channel'): channels = Channel.objects.filter(name__iexact=params['channel']) if channels: context['q'] = rest context['channels'] = extra['channels'] = channels else: # is the search term possibly a channel? all_channel_names = ( Channel.objects.all().values_list('name', flat=True) ) channels_regex = re.compile( r'\b(%s)\b' % ('|'.join(re.escape(x) for x in all_channel_names),), re.I ) channel_ids = [] for match in channels_regex.findall(rest): channel_ids.extend( Channel.objects .filter(name__iexact=match).values_list('id', flat=True) ) possible_channels = Channel.objects.filter( id__in=channel_ids ) for channel in possible_channels: regex = re.compile(re.escape(channel.name), re.I) channel._query_string = regex.sub( '', context['q'], ) channel._query_string += ' channel: %s' % channel.name # reduce all excess whitespace into 1 channel._query_string = re.sub( '\s\s+', ' ', channel._query_string ) channel._query_string = channel._query_string.strip() context['possible_channels'] = possible_channels events = _search( qs, context['q'], privacy_filter=privacy_filter, privacy_exclude=privacy_exclude, sort=request.GET.get('sort'), **extra ) if not events.count() and utils.possible_to_or_query(context['q']): events = _search( qs, context['q'], privacy_filter=privacy_filter, privacy_exclude=privacy_exclude, sort=request.GET.get('sort'), fuzzy=True ) try: page = int(request.GET.get('page', 1)) if page < 1: raise ValueError except ValueError: return http.HttpResponseBadRequest('Invalid page') # we use the paginator() function to get the Paginator # instance so we can avoid calling `events.count()` for the # header of the page where it says "XX events found" try: with transaction.atomic(): pager, events_paged = paginator(events, page, 10) _database_error_happened = False except DatabaseError: _database_error_happened = True # don't feed the trolls, just return nothing found pager, events_paged = paginator(Event.objects.none(), 1, 10) next_page_url = prev_page_url = None def url_maker(page): querystring = {'q': context['q'].encode('utf-8'), 'page': page} querystring = urllib.urlencode(querystring) return '%s?%s' % (reverse('search:home'), querystring) if events_paged.has_next(): next_page_url = url_maker(events_paged.next_page_number()) if events_paged.has_previous(): prev_page_url = url_maker(events_paged.previous_page_number()) context['events_paged'] = events_paged context['next_page_url'] = next_page_url context['prev_page_url'] = prev_page_url context['events_found'] = pager.count context['channels'] = get_event_channels(events_paged) log_searches = settings.LOG_SEARCHES and '_nolog' not in request.GET if ( log_searches and not _database_error_happened and request.GET['q'].strip() ): logged_search = LoggedSearch.objects.create( term=request.GET['q'][:200], results=events.count(), page=page, user=request.user.is_authenticated() and request.user or None ) request.session['logged_search'] = ( logged_search.pk, time.time() ) elif request.GET.get('q'): context['search_error'] = form.errors['q'] else: context['events'] = [] context['form'] = form return render(request, 'search/home.html', context)
def home(request): context = { 'q': None, 'events_found': None, 'search_error': None, } if request.GET.get('q'): form = forms.SearchForm(request.GET) else: form = forms.SearchForm() if request.GET.get('q') and form.is_valid(): context['q'] = request.GET.get('q') privacy_filter = {} privacy_exclude = {} if request.user.is_active: if is_contributor(request.user): privacy_exclude = {'privacy': Event.PRIVACY_COMPANY} else: privacy_filter = {'privacy': Event.PRIVACY_PUBLIC} events = _search( request.GET.get('q'), privacy_filter=privacy_filter, privacy_exclude=privacy_exclude, sort=request.GET.get('sort'), ) try: page = int(request.GET.get('page', 1)) if page < 1: raise ValueError except ValueError: return http.HttpResponseBadRequest('Invalid page') # we use the paginator() function to get the Paginator # instance so we can avoid calling `events.count()` for the # header of the page where it says "XX events found" pager, events_paged = paginator(events, page, 10) next_page_url = prev_page_url = None def url_maker(page): querystring = {'q': context['q'], 'page': page} querystring = urllib.urlencode(querystring) return '%s?%s' % (reverse('search:home'), querystring) if events_paged.has_next(): next_page_url = url_maker(events_paged.next_page_number()) if events_paged.has_previous(): prev_page_url = url_maker(events_paged.previous_page_number()) context['events_paged'] = events_paged context['next_page_url'] = next_page_url context['prev_page_url'] = prev_page_url context['events_found'] = pager.count elif request.GET.get('q'): context['search_error'] = form.errors['q'] else: context['events'] = [] context['form'] = form return render(request, 'search/home.html', context)
def home(request): context = { 'q': None, 'events_found': None, 'search_error': None, 'tags': None, 'possible_tags': None, 'channels': None, 'possible_channels': None } if request.GET.get('q'): form = forms.SearchForm(request.GET) else: form = forms.SearchForm() if request.GET.get('q') and form.is_valid(): context['q'] = form.cleaned_data['q'] privacy_filter = {} privacy_exclude = {} qs = Event.objects.scheduled_or_processing() if request.user.is_active: if is_contributor(request.user): privacy_exclude = {'privacy': Event.PRIVACY_COMPANY} else: privacy_filter = {'privacy': Event.PRIVACY_PUBLIC} qs = qs.approved() extra = {} rest, params = split_search(context['q'], ('tag', 'channel')) if params.get('tag'): tags = Tag.objects.filter(name__iexact=params['tag']) if tags: context['q'] = rest context['tags'] = extra['tags'] = tags else: # is the search term possibly a tag? all_tag_names = Tag.objects.all().values_list('name', flat=True) tags_regex = re.compile( r'\b(%s)\b' % ('|'.join(re.escape(x) for x in all_tag_names), ), re.I) # next we need to turn all of these into a Tag QuerySet # because we can't do `filter(name__in=tags_regex.findall(...))` # because that case sensitive. tag_ids = [] for match in tags_regex.findall(rest): tag_ids.extend( Tag.objects.filter(name__iexact=match).values_list( 'id', flat=True)) possible_tags = Tag.objects.filter(id__in=tag_ids) for tag in possible_tags: regex = re.compile(re.escape(tag.name), re.I) tag._query_string = regex.sub( '', context['q'], ) tag._query_string += ' tag: %s' % tag.name # reduce all excess whitespace into 1 tag._query_string = re.sub('\s\s+', ' ', tag._query_string) tag._query_string = tag._query_string.strip() context['possible_tags'] = possible_tags if params.get('channel'): channels = Channel.objects.filter(name__iexact=params['channel']) if channels: context['q'] = rest context['channels'] = extra['channels'] = channels else: # is the search term possibly a channel? all_channel_names = (Channel.objects.all().values_list('name', flat=True)) channels_regex = re.compile( r'\b(%s)\b' % ('|'.join(re.escape(x) for x in all_channel_names), ), re.I) channel_ids = [] for match in channels_regex.findall(rest): channel_ids.extend( Channel.objects.filter(name__iexact=match).values_list( 'id', flat=True)) possible_channels = Channel.objects.filter(id__in=channel_ids) for channel in possible_channels: regex = re.compile(re.escape(channel.name), re.I) channel._query_string = regex.sub( '', context['q'], ) channel._query_string += ' channel: %s' % channel.name # reduce all excess whitespace into 1 channel._query_string = re.sub('\s\s+', ' ', channel._query_string) channel._query_string = channel._query_string.strip() context['possible_channels'] = possible_channels events = _search(qs, context['q'], privacy_filter=privacy_filter, privacy_exclude=privacy_exclude, sort=request.GET.get('sort'), **extra) if not events.count() and utils.possible_to_or_query(context['q']): events = _search(qs, context['q'], privacy_filter=privacy_filter, privacy_exclude=privacy_exclude, sort=request.GET.get('sort'), fuzzy=True) try: page = int(request.GET.get('page', 1)) if page < 1: raise ValueError except ValueError: return http.HttpResponseBadRequest('Invalid page') # we use the paginator() function to get the Paginator # instance so we can avoid calling `events.count()` for the # header of the page where it says "XX events found" try: with transaction.atomic(): pager, events_paged = paginator(events, page, 10) _database_error_happened = False except DatabaseError: _database_error_happened = True # don't feed the trolls, just return nothing found pager, events_paged = paginator(Event.objects.none(), 1, 10) next_page_url = prev_page_url = None def url_maker(page): querystring = {'q': context['q'].encode('utf-8'), 'page': page} querystring = urllib.urlencode(querystring) return '%s?%s' % (reverse('search:home'), querystring) if events_paged.has_next(): next_page_url = url_maker(events_paged.next_page_number()) if events_paged.has_previous(): prev_page_url = url_maker(events_paged.previous_page_number()) context['events_paged'] = events_paged context['next_page_url'] = next_page_url context['prev_page_url'] = prev_page_url context['events_found'] = pager.count context['channels'] = get_event_channels(events_paged) log_searches = settings.LOG_SEARCHES and '_nolog' not in request.GET if (log_searches and not _database_error_happened and request.GET['q'].strip()): logged_search = LoggedSearch.objects.create( term=request.GET['q'][:200], results=events.count(), page=page, user=request.user.is_authenticated() and request.user or None) request.session['logged_search'] = (logged_search.pk, time.time()) elif request.GET.get('q'): context['search_error'] = form.errors['q'] else: context['events'] = [] context['form'] = form return render(request, 'search/home.html', context)
def home(request): context = { 'q': None, 'events_found': None, 'search_error': None, } if request.GET.get('q'): form = forms.SearchForm(request.GET) else: form = forms.SearchForm() if request.GET.get('q') and form.is_valid(): context['q'] = form.cleaned_data['q'] privacy_filter = {} privacy_exclude = {} if request.user.is_active: if is_contributor(request.user): privacy_exclude = {'privacy': Event.PRIVACY_COMPANY} else: privacy_filter = {'privacy': Event.PRIVACY_PUBLIC} events = _search( context['q'], privacy_filter=privacy_filter, privacy_exclude=privacy_exclude, sort=request.GET.get('sort'), ) if not events.count() and utils.possible_to_or_query(context['q']): events = _search( context['q'], privacy_filter=privacy_filter, privacy_exclude=privacy_exclude, sort=request.GET.get('sort'), fuzzy=True ) try: page = int(request.GET.get('page', 1)) if page < 1: raise ValueError except ValueError: return http.HttpResponseBadRequest('Invalid page') # we use the paginator() function to get the Paginator # instance so we can avoid calling `events.count()` for the # header of the page where it says "XX events found" try: pager, events_paged = paginator(events, page, 10) except DatabaseError: # If the fulltext SQL causes a low-level Postgres error, # Django re-wraps the exception as a django.db.utils.DatabaseError # exception and then unfortunately you can't simply do # django.db.transaction.rollback() because the connection is dirty # deeper down. # Thanks http://stackoverflow.com/a/7753748/205832 # This is supposedly fixed in Django 1.6 connection._rollback() # don't feed the trolls, just return nothing found pager, events_paged = paginator(Event.objects.none(), 1, 10) next_page_url = prev_page_url = None def url_maker(page): querystring = {'q': context['q'], 'page': page} querystring = urllib.urlencode(querystring) return '%s?%s' % (reverse('search:home'), querystring) if events_paged.has_next(): next_page_url = url_maker(events_paged.next_page_number()) if events_paged.has_previous(): prev_page_url = url_maker(events_paged.previous_page_number()) context['events_paged'] = events_paged context['next_page_url'] = next_page_url context['prev_page_url'] = prev_page_url context['events_found'] = pager.count elif request.GET.get('q'): context['search_error'] = form.errors['q'] else: context['events'] = [] context['form'] = form return render(request, 'search/home.html', context)