def meeting_main(): year = request.args.get('year') date_ = request.args.get('date') view_type = request.args.get('type', 'list') if view_type == 'list': return render_template('meetings-list.html') # find the right calendar if not year: if date_: year = format_date(date(*map(int, date_.split('-'))), 'yyyy') else: year = date.today().year if not date_: # find the latest meeting date for the selected year d = latest_meeting_date(year) if d: return redirect( url_for('meeting_main', type='calendar', date=format_date(d, 'yyyy-MM-dd'))) # meetings of the day (optional) meetings_of_the_day = None if date_: if not date_re.match(date_): abort(404) date_ = date(*map(int, date_.split('-'))) meetings_of_the_day = Meeting.query.filter_by(date=date_) # meetings of the year meetings_of_the_year =\ db_session.query(Meeting.date)\ .filter(Meeting.year == year)\ .group_by(Meeting.date) meetings_of_the_year = ({ 'date': meeting_date, 'url': url_for('meeting_main', type='calendar', date=format_date(meeting_date, 'yyyy-MM-dd')) } for (meeting_date, ) in meetings_of_the_year) return render_template( 'meetings-calendar.html', year=int(year), date=date_, meetings_of_the_year=meetings_of_the_year, meetings_of_the_day=meetings_of_the_day, )
def meeting_main(): year = request.args.get('year') date_ = request.args.get('date') view_type = request.args.get('type', 'list') if view_type=='list': return render_template('meetings-list.html') # find the right calendar if not year: if date_: year = format_date(date(*map(int, date_.split('-'))), 'yyyy') else: year = date.today().year if not date_: # find the latest meeting date for the selected year d = latest_meeting_date(year) if d: return redirect(url_for('meeting_main', type='calendar', date=format_date(d, 'yyyy-MM-dd'))) # meetings of the day (optional) meetings_of_the_day = None if date_: if not date_re.match(date_): abort(404) date_ = date(*map(int, date_.split('-'))) meetings_of_the_day = Meeting.query.filter_by(date=date_) # meetings of the year meetings_of_the_year =\ db_session.query(Meeting.date)\ .filter(Meeting.year == year)\ .group_by(Meeting.date) meetings_of_the_year = ( { 'date': meeting_date, 'url': url_for('meeting_main', type='calendar', date=format_date(meeting_date, 'yyyy-MM-dd')) } for (meeting_date,) in meetings_of_the_year ) return render_template('meetings-calendar.html', year=int(year), date=date_, meetings_of_the_year=meetings_of_the_year, meetings_of_the_day=meetings_of_the_day, )
def meeting_main(): year = request.args.get('year', date.today().year) date_ = request.args.get('date') # meetings of the day (optional) meetings_of_the_day = None if date_: if not date_re.match(date_): abort(404) date_ = date(*map(int, date_.split('-'))) meetings_of_the_day = Meeting.query.filter_by(date=date_) # meetings of the year meetings_of_the_year =\ db_session.query(Meeting.date)\ .filter(Meeting.year == year)\ .group_by(Meeting.date) meetings_of_the_year = ( { 'date': meeting_date, 'url': url_for('meeting_main', date=format_date(meeting_date, 'yyyy-MM-dd')) } for (meeting_date,) in meetings_of_the_year ) return render_template('meetings.html', year=int(year), date=date_, meetings_of_the_year=meetings_of_the_year, meetings_of_the_day=meetings_of_the_day, )
def jinja2_filter_format_date(date_str, format='long'): if date_str: date = dateutil.parser.parse(date_str) native = date.replace(tzinfo=None) else: return "" return format_date(native, format=format)
def inject_context(): registration_open = mdl.Setting.get('registration_open', default=False) event_date = mdl.Setting.get('event_date', None) location_display = mdl.Setting.get('event_location', '') coords = mdl.Setting.get('location_coords', '') if event_date and event_date >= datetime.now().date(): date_display = format_date(event_date, format='long') else: date_display = '' location_display = '' coords = '' return dict( Role=mdl.Role, Setting=mdl.Setting, __version__=__version__, comment_prefix=COMMENT_PREFIX, date_display=date_display, localconf=current_app.localconf, location_coords=coords, location_display=location_display, mdtemplate=mdtemplate, registration_open=registration_open, tabular_prefix=TABULAR_PREFIX, )
def macro(self, content, arguments, page_url, alternative): if arguments is None: tm = time.time() # always UTC else: # XXX looks like args are split at ':' -> <Arguments([u'2010-12-31T23', u'59', u'00'], {})> stamp = arguments[0] tm = self.parse_time(stamp) return format_date(datetime.utcfromtimestamp(tm))
def local_date(datestamp): """ Returns a babel formatted local date """ from flask.ext.babel import format_date if datestamp: return format_date(datestamp)
def _value(self): if self.raw_data: return ' '.join(self.raw_data) else: date_fmt = get_locale().date_formats['short'].pattern date_fmt = date_fmt.replace('MMMM', 'MM')\ .replace('MMM', 'MM') # force numerical months return format_date(self.data, date_fmt) if self.data else ''
def test_macro(self): macro_obj = Macro() # when arguments is None result = macro_obj.macro('content', None, 'page_url', 'alternative') test_time = time.time() test_time = format_date(datetime.utcfromtimestamp(test_time)) assert result == test_time arguments = ['2011-08-07T11:11:11+0533', 'argument2'] result = macro_obj.macro('content', arguments, 'page_url', 'alternative') expected = u'Aug 7, 2011' assert result == expected
def prettyDate(dateStr, forceYear=False, addPrefix=False): if not dateStr: return gettext('date.unknown') year, month, day = [int(component) for component in dateStr.split('-')] if year == 0: return gettext('date.unknown') if month == 0: parsedDate = date(year, 1, 1) format = gettext('date.format.year_only') if addPrefix: format = gettext('date.in_year_%(year)s', year=format) elif day == 0: parsedDate = date(year, month, 1) format = gettext('date.format.year_month') if addPrefix: format = gettext('date.in_month_%(month)s', month=format) else: parsedDate = date(year, month, day) format = gettext( 'date.format.date_with_year') if forceYear or year != date.today( ).year else gettext('date.format.date_without_year') if addPrefix: format = gettext('date.on_day_%(day)s', day=format) daysDiff = (parsedDate - date.today()).days if daysDiff == 0: return gettext('date.tonight') elif daysDiff == -1: return gettext('date.yesterday_night') elif daysDiff == 1: return gettext('date.tomorrow_night') elif daysDiff > 0 and daysDiff < 7: format = gettext('date.format.next_day') elif daysDiff < 0 and daysDiff > -7: format = gettext('date.format.previous_day') formattedDate = format_date(parsedDate, format) return formattedDate
def format_date(item): year = item.get("year") if item.get("year_circa"): return year #year_from == year_to == int(year) below year_from = item.get("year_from") month = item.get("month") day = item.get("day") if all([year_from, month, day]): date = datetime.date(year_from, month, day) return babel.format_date(date, format="d MMMM y") elif all([year_from, month]): #babel is unable to format month correctly for Russian language #using own implementation here return (translate_month(month) + ", " + year) else: return year
def format_date(item): year = item.get("year"); if item.get("year_circa"): return year #year_from == year_to == int(year) below year_from = item.get("year_from") month = item.get("month") day = item.get("day") if all([year_from, month, day]): date = datetime.date(year_from, month, day) return babel.format_date(date, format="d MMMM y") elif all([year_from, month]): #babel is unable to format month correctly for Russian language #using own implementation here return (translate_month(month) + ", " + year) else: return year
def prettyDate(dateStr, forceYear=False, addPrefix=False): if not dateStr: return gettext('date.unknown') year, month, day = [int(component) for component in dateStr.split('-')] if year == 0: return gettext('date.unknown') if month == 0: parsedDate = date(year, 1, 1) format = gettext('date.format.year_only') if addPrefix: format = gettext('date.in_year_%(year)s', year=format) elif day == 0: parsedDate = date(year, month, 1) format = gettext('date.format.year_month') if addPrefix: format = gettext('date.in_month_%(month)s', month=format) else: parsedDate = date(year, month, day) format = gettext('date.format.date_with_year') if forceYear or year != date.today().year else gettext('date.format.date_without_year') if addPrefix: format = gettext('date.on_day_%(day)s', day=format) daysDiff = (parsedDate - date.today()).days if daysDiff == 0: return gettext('date.tonight') elif daysDiff == -1: return gettext('date.yesterday_night') elif daysDiff == 1: return gettext('date.tomorrow_night') elif daysDiff > 0 and daysDiff < 7: format = gettext('date.format.next_day') elif daysDiff < 0 and daysDiff > -7: format = gettext('date.format.previous_day') formattedDate = format_date(parsedDate, format) return formattedDate
def index(): """Render index page. Supported outputs: html, json, csv, rss. """ if not request.args and not request.form: return render( 'index.html', ) try: search = Search(request) except: return render( 'index.html', ) search.results, search.suggestions,\ search.answers, search.infoboxes = search.search(request) for result in search.results: if not search.paging and engines[result['engine']].paging: search.paging = True # check if HTTPS rewrite is required if settings['server']['https_rewrite']\ and result['parsed_url'].scheme == 'http': skip_https_rewrite = False # check if HTTPS rewrite is possible for target, rules, exclusions in https_rules: # check if target regex match with url if target.match(result['url']): # process exclusions for exclusion in exclusions: # check if exclusion match with url if exclusion.match(result['url']): skip_https_rewrite = True break # skip https rewrite if required if skip_https_rewrite: break # process rules for rule in rules: try: # TODO, precompile rule p = re.compile(rule[0]) # rewrite url if possible new_result_url = p.sub(rule[1], result['url']) except: break # parse new url new_parsed_url = urlparse(new_result_url) # continiue if nothing was rewritten if result['url'] == new_result_url: continue # get domainname from result # TODO, does only work correct with TLD's like # asdf.com, not for asdf.com.de # TODO, using publicsuffix instead of this rewrite rule old_result_domainname = '.'.join( result['parsed_url'].hostname.split('.')[-2:]) new_result_domainname = '.'.join( new_parsed_url.hostname.split('.')[-2:]) # check if rewritten hostname is the same, # to protect against wrong or malicious rewrite rules if old_result_domainname == new_result_domainname: # set new url result['url'] = new_result_url # target has matched, do not search over the other rules break if search.request_data.get('format', 'html') == 'html': if 'content' in result: result['content'] = highlight_content(result['content'], search.query.encode('utf-8')) # noqa result['title'] = highlight_content(result['title'], search.query.encode('utf-8')) else: if 'content' in result: result['content'] = html_to_text(result['content']).strip() # removing html content and whitespace duplications result['title'] = ' '.join(html_to_text(result['title']) .strip().split()) if len(result['url']) > 74: url_parts = result['url'][:35], result['url'][-35:] result['pretty_url'] = u'{0}[...]{1}'.format(*url_parts) else: result['pretty_url'] = result['url'] for engine in result['engines']: if engine in favicons: result['favicon'] = engine # TODO, check if timezone is calculated right if 'publishedDate' in result: if result['publishedDate'].replace(tzinfo=None)\ >= datetime.now() - timedelta(days=1): timedifference = datetime.now() - result['publishedDate']\ .replace(tzinfo=None) minutes = int((timedifference.seconds / 60) % 60) hours = int(timedifference.seconds / 60 / 60) if hours == 0: result['publishedDate'] = gettext(u'{minutes} minute(s) ago').format(minutes=minutes) # noqa else: result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes) # noqa else: result['pubdate'] = result['publishedDate']\ .strftime('%a, %d %b %Y %H:%M:%S %z') result['publishedDate'] = format_date(result['publishedDate']) if search.request_data.get('format') == 'json': return Response(json.dumps({'query': search.query, 'results': search.results}), mimetype='application/json') elif search.request_data.get('format') == 'csv': csv = UnicodeWriter(cStringIO.StringIO()) keys = ('title', 'url', 'content', 'host', 'engine', 'score') if search.results: csv.writerow(keys) for row in search.results: row['host'] = row['parsed_url'].netloc csv.writerow([row.get(key, '') for key in keys]) csv.stream.seek(0) response = Response(csv.stream.read(), mimetype='application/csv') cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query) response.headers.add('Content-Disposition', cont_disp) return response elif search.request_data.get('format') == 'rss': response_rss = render( 'opensearch_response_rss.xml', results=search.results, q=search.request_data['q'], number_of_results=len(search.results), base_url=get_base_url() ) return Response(response_rss, mimetype='text/xml') return render( 'results.html', results=search.results, q=search.request_data['q'], selected_categories=search.categories, paging=search.paging, pageno=search.pageno, base_url=get_base_url(), suggestions=search.suggestions, answers=search.answers, infoboxes=search.infoboxes, theme=get_current_theme_name() )
def date_format(value, format="EEEE d. MMMM yyyy"): return format_date(value, format)
def date(d, format='short'): return format_date(d, format)
def date(value, format="EE, d MMMM y"): if isinstance(value, datetime.date): return babel.format_date(value, format) else: return babel.format_date(local_dt(value), format)
def render_view(self, field): return (format_date(field.object_data) if field.object_data else u'')
def format_time(value, format='full'): if type(value) is str or type(value) is unicode: datetimeobj = datetime.datetime.strptime(value, '%H:%M:%S') return babel.format_date(datetimeobj.time(), format) else: return babel.format_time(value, format)
def filter_date(value): format="EE dd MMM y" return format_date(value, format)
def date(datetime, ref=current_user, year=False): fmt = date_formats.get(get_locale().language, 'EEE, dd MMM') + (' yyyy' if year else '') date = in_timezone_of(datetime, ref).date() return format_date(date, fmt, rebase=False)
def filter_date_from_unix(value): value = datetime.datetime.fromtimestamp(value) format="EE dd MMM y" return format_date(value, format)
def index(): """Render index page. Supported outputs: html, json, csv, rss. """ if not request.args and not request.form: return render( 'index.html', ) try: search = Search(request) except: return render( 'index.html', ) if plugins.call('pre_search', request, locals()): search.search(request) plugins.call('post_search', request, locals()) for result in search.result_container.get_ordered_results(): plugins.call('on_result', request, locals()) if not search.paging and engines[result['engine']].paging: search.paging = True if search.request_data.get('format', 'html') == 'html': if 'content' in result: result['content'] = highlight_content(result['content'], search.query.encode('utf-8')) # noqa result['title'] = highlight_content(result['title'], search.query.encode('utf-8')) else: if result.get('content'): result['content'] = html_to_text(result['content']).strip() # removing html content and whitespace duplications result['title'] = ' '.join(html_to_text(result['title']).strip().split()) result['pretty_url'] = prettify_url(result['url']) # TODO, check if timezone is calculated right if 'publishedDate' in result: result['pubdate'] = result['publishedDate'].strftime('%Y-%m-%d %H:%M:%S%z') if result['publishedDate'].replace(tzinfo=None) >= datetime.now() - timedelta(days=1): timedifference = datetime.now() - result['publishedDate'].replace(tzinfo=None) minutes = int((timedifference.seconds / 60) % 60) hours = int(timedifference.seconds / 60 / 60) if hours == 0: result['publishedDate'] = gettext(u'{minutes} minute(s) ago').format(minutes=minutes) else: result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes) # noqa else: result['publishedDate'] = format_date(result['publishedDate']) if search.request_data.get('format') == 'json': return Response(json.dumps({'query': search.query, 'results': search.result_container.get_ordered_results()}), mimetype='application/json') elif search.request_data.get('format') == 'csv': csv = UnicodeWriter(cStringIO.StringIO()) keys = ('title', 'url', 'content', 'host', 'engine', 'score') csv.writerow(keys) for row in search.result_container.get_ordered_results(): row['host'] = row['parsed_url'].netloc csv.writerow([row.get(key, '') for key in keys]) csv.stream.seek(0) response = Response(csv.stream.read(), mimetype='application/csv') cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query) response.headers.add('Content-Disposition', cont_disp) return response elif search.request_data.get('format') == 'rss': response_rss = render( 'opensearch_response_rss.xml', results=search.result_container.get_ordered_results(), q=search.request_data['q'], number_of_results=search.result_container.results_length(), base_url=get_base_url() ) return Response(response_rss, mimetype='text/xml') return render( 'results.html', results=search.result_container.get_ordered_results(), q=search.request_data['q'], selected_categories=search.categories, paging=search.paging, pageno=search.pageno, base_url=get_base_url(), suggestions=search.result_container.suggestions, answers=search.result_container.answers, infoboxes=search.result_container.infoboxes, theme=get_current_theme_name(), favicons=global_favicons[themes.index(get_current_theme_name())] )
def local_date(datestamp): """ Returns a babel formatted local date """ if datestamp: return format_date(datestamp)
def created_on(self): return format_date(self.model.created_on)
def render_view(self, field): return format_date(field.object_data)
def _jinja2_filter_date(date): return format_date(date)
def index(): """Render index page. Supported outputs: html, json, csv, rss. """ if not request.args and not request.form: return render('index.html') try: search = Search(request) except: return render('index.html') # TODO moar refactor - do_search integration into Search class search.results, search.suggestions = do_search(search.query, request, search.engines, search.pageno, search.lang) for result in search.results: if not search.paging and engines[result['engine']].paging: search.paging = True if search.request_data.get('format', 'html') == 'html': if 'content' in result: result['content'] = highlight_content(result['content'], search.query.encode('utf-8')) # noqa result['title'] = highlight_content(result['title'], search.query.encode('utf-8')) else: if 'content' in result: result['content'] = html_to_text(result['content']).strip() # removing html content and whitespace duplications result['title'] = ' '.join(html_to_text(result['title']) .strip().split()) if len(result['url']) > 74: url_parts = result['url'][:35], result['url'][-35:] result['pretty_url'] = u'{0}[...]{1}'.format(*url_parts) else: result['pretty_url'] = result['url'] for engine in result['engines']: if engine in favicons: result['favicon'] = engine # TODO, check if timezone is calculated right if 'publishedDate' in result: if result['publishedDate'].replace(tzinfo=None)\ >= datetime.now() - timedelta(days=1): timedifference = datetime.now() - result['publishedDate']\ .replace(tzinfo=None) minutes = int((timedifference.seconds / 60) % 60) hours = int(timedifference.seconds / 60 / 60) if hours == 0: result['publishedDate'] = gettext(u'{minutes} minute(s) ago').format(minutes=minutes) # noqa else: result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes) # noqa else: result['pubdate'] = result['publishedDate']\ .strftime('%a, %d %b %Y %H:%M:%S %z') result['publishedDate'] = format_date(result['publishedDate']) if search.request_data.get('format') == 'json': return Response(json.dumps({'query': search.query, 'results': search.results}), mimetype='application/json') elif search.request_data.get('format') == 'csv': csv = UnicodeWriter(cStringIO.StringIO()) keys = ('title', 'url', 'content', 'host', 'engine', 'score') if search.results: csv.writerow(keys) for row in search.results: row['host'] = row['parsed_url'].netloc csv.writerow([row.get(key, '') for key in keys]) csv.stream.seek(0) response = Response(csv.stream.read(), mimetype='application/csv') cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query) response.headers.add('Content-Disposition', cont_disp) return response elif search.request_data.get('format') == 'rss': response_rss = render( 'opensearch_response_rss.xml', results=search.results, q=search.request_data['q'], number_of_results=len(search.results), base_url=get_base_url() ) return Response(response_rss, mimetype='text/xml') return render( 'results.html', results=search.results, q=search.request_data['q'], selected_categories=search.categories, paging=search.paging, pageno=search.pageno, base_url=get_base_url(), suggestions=search.suggestions )
def format_date(value, format='full'): return babel.format_date(value, format)
def date_filter(value, format="short"): return format_date(value, format=format)
def index(): """Render index page. Supported outputs: html, json, csv, rss. """ if not request.args and not request.form: return render('index.html', ) try: search = Search(request) except: return render('index.html', ) # TODO moar refactor - do_search integration into Search class search.results, search.suggestions = do_search(search.query, request, search.engines, search.pageno, search.lang) for result in search.results: if not search.paging and engines[result['engine']].paging: search.paging = True if search.request_data.get('format', 'html') == 'html': if 'content' in result: result['content'] = highlight_content( result['content'], search.query.encode('utf-8')) # noqa result['title'] = highlight_content(result['title'], search.query.encode('utf-8')) else: if 'content' in result: result['content'] = html_to_text(result['content']).strip() # removing html content and whitespace duplications result['title'] = ' '.join( html_to_text(result['title']).strip().split()) if len(result['url']) > 74: url_parts = result['url'][:35], result['url'][-35:] result['pretty_url'] = u'{0}[...]{1}'.format(*url_parts) else: result['pretty_url'] = result['url'] for engine in result['engines']: if engine in favicons: result['favicon'] = engine # TODO, check if timezone is calculated right if 'publishedDate' in result: if result['publishedDate'].replace(tzinfo=None)\ >= datetime.now() - timedelta(days=1): timedifference = datetime.now() - result['publishedDate']\ .replace(tzinfo=None) minutes = int((timedifference.seconds / 60) % 60) hours = int(timedifference.seconds / 60 / 60) if hours == 0: result['publishedDate'] = gettext( u'{minutes} minute(s) ago').format( minutes=minutes) # noqa else: result['publishedDate'] = gettext( u'{hours} hour(s), {minutes} minute(s) ago').format( hours=hours, minutes=minutes) # noqa else: result['pubdate'] = result['publishedDate']\ .strftime('%a, %d %b %Y %H:%M:%S %z') result['publishedDate'] = format_date(result['publishedDate']) if search.request_data.get('format') == 'json': return Response(json.dumps({ 'query': search.query, 'results': search.results }), mimetype='application/json') elif search.request_data.get('format') == 'csv': csv = UnicodeWriter(cStringIO.StringIO()) keys = ('title', 'url', 'content', 'host', 'engine', 'score') if search.results: csv.writerow(keys) for row in search.results: row['host'] = row['parsed_url'].netloc csv.writerow([row.get(key, '') for key in keys]) csv.stream.seek(0) response = Response(csv.stream.read(), mimetype='application/csv') cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query) response.headers.add('Content-Disposition', cont_disp) return response elif search.request_data.get('format') == 'rss': response_rss = render('opensearch_response_rss.xml', results=search.results, q=search.request_data['q'], number_of_results=len(search.results), base_url=get_base_url()) return Response(response_rss, mimetype='text/xml') return render('results.html', results=search.results, q=search.request_data['q'], selected_categories=search.categories, paging=search.paging, pageno=search.pageno, base_url=get_base_url(), suggestions=search.suggestions)
def serialize_date(self, value, key_prefix, date_format=LOCAL_DATE_FORMAT): return format_date(value, date_format, rebase=False)
def date(value): format="EE, d MMMM y" return babel.format_date(value, format)
def index(): """Render index page. Supported outputs: html, json, csv, rss. """ if not request.args and not request.form: return render('index.html', ) try: search = Search(request) except: return render('index.html', ) if plugins.call('pre_search', request, locals()): search.search(request) plugins.call('post_search', request, locals()) for result in search.result_container.get_ordered_results(): plugins.call('on_result', request, locals()) if not search.paging and engines[result['engine']].paging: search.paging = True if search.request_data.get('format', 'html') == 'html': if 'content' in result: result['content'] = highlight_content( result['content'], search.query.encode('utf-8')) # noqa result['title'] = highlight_content(result['title'], search.query.encode('utf-8')) else: if result.get('content'): result['content'] = html_to_text(result['content']).strip() # removing html content and whitespace duplications result['title'] = ' '.join( html_to_text(result['title']).strip().split()) result['pretty_url'] = prettify_url(result['url']) # TODO, check if timezone is calculated right if 'publishedDate' in result: result['pubdate'] = result['publishedDate'].strftime( '%Y-%m-%d %H:%M:%S%z') if result['publishedDate'].replace( tzinfo=None) >= datetime.now() - timedelta(days=1): timedifference = datetime.now( ) - result['publishedDate'].replace(tzinfo=None) minutes = int((timedifference.seconds / 60) % 60) hours = int(timedifference.seconds / 60 / 60) if hours == 0: result['publishedDate'] = gettext( u'{minutes} minute(s) ago').format(minutes=minutes) else: result['publishedDate'] = gettext( u'{hours} hour(s), {minutes} minute(s) ago').format( hours=hours, minutes=minutes) # noqa else: result['publishedDate'] = format_date(result['publishedDate']) if search.request_data.get('format') == 'json': return Response(json.dumps({ 'query': search.query, 'results': search.result_container.get_ordered_results() }), mimetype='application/json') elif search.request_data.get('format') == 'csv': csv = UnicodeWriter(cStringIO.StringIO()) keys = ('title', 'url', 'content', 'host', 'engine', 'score') csv.writerow(keys) for row in search.result_container.get_ordered_results(): row['host'] = row['parsed_url'].netloc csv.writerow([row.get(key, '') for key in keys]) csv.stream.seek(0) response = Response(csv.stream.read(), mimetype='application/csv') cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query) response.headers.add('Content-Disposition', cont_disp) return response elif search.request_data.get('format') == 'rss': response_rss = render( 'opensearch_response_rss.xml', results=search.result_container.get_ordered_results(), q=search.request_data['q'], number_of_results=search.result_container.results_length(), base_url=get_base_url()) return Response(response_rss, mimetype='text/xml') return render('results.html', results=search.result_container.get_ordered_results(), q=search.request_data['q'], selected_categories=search.categories, paging=search.paging, pageno=search.pageno, base_url=get_base_url(), suggestions=search.result_container.suggestions, answers=search.result_container.answers, infoboxes=search.result_container.infoboxes, theme=get_current_theme_name(), favicons=global_favicons[themes.index( get_current_theme_name())])
def format_as_month(value): form="MMMM" return format_date(value, form).capitalize()
def index(): """Render index page. Supported outputs: html, json, csv, rss. """ if not request.args and not request.form: return render("index.html") try: search = Search(request) except: return render("index.html") if plugins.call("pre_search", request, locals()): search.search(request) plugins.call("post_search", request, locals()) for result in search.result_container.get_ordered_results(): plugins.call("on_result", request, locals()) if not search.paging and engines[result["engine"]].paging: search.paging = True if search.request_data.get("format", "html") == "html": if "content" in result: result["content"] = highlight_content(result["content"], search.query.encode("utf-8")) # noqa result["title"] = highlight_content(result["title"], search.query.encode("utf-8")) else: if result.get("content"): result["content"] = html_to_text(result["content"]).strip() # removing html content and whitespace duplications result["title"] = " ".join(html_to_text(result["title"]).strip().split()) result["pretty_url"] = prettify_url(result["url"]) # TODO, check if timezone is calculated right if "publishedDate" in result: try: # test if publishedDate >= 1900 (datetime module bug) result["pubdate"] = result["publishedDate"].strftime("%Y-%m-%d %H:%M:%S%z") except ValueError: result["publishedDate"] = None else: if result["publishedDate"].replace(tzinfo=None) >= datetime.now() - timedelta(days=1): timedifference = datetime.now() - result["publishedDate"].replace(tzinfo=None) minutes = int((timedifference.seconds / 60) % 60) hours = int(timedifference.seconds / 60 / 60) if hours == 0: result["publishedDate"] = gettext(u"{minutes} minute(s) ago").format(minutes=minutes) else: result["publishedDate"] = gettext(u"{hours} hour(s), {minutes} minute(s) ago").format( hours=hours, minutes=minutes ) # noqa else: result["publishedDate"] = format_date(result["publishedDate"]) if search.request_data.get("format") == "json": return Response( json.dumps({"query": search.query, "results": search.result_container.get_ordered_results()}), mimetype="application/json", ) elif search.request_data.get("format") == "csv": csv = UnicodeWriter(cStringIO.StringIO()) keys = ("title", "url", "content", "host", "engine", "score") csv.writerow(keys) for row in search.result_container.get_ordered_results(): row["host"] = row["parsed_url"].netloc csv.writerow([row.get(key, "") for key in keys]) csv.stream.seek(0) response = Response(csv.stream.read(), mimetype="application/csv") cont_disp = "attachment;Filename=searx_-_{0}.csv".format(search.query.encode("utf-8")) response.headers.add("Content-Disposition", cont_disp) return response elif search.request_data.get("format") == "rss": response_rss = render( "opensearch_response_rss.xml", results=search.result_container.get_ordered_results(), q=search.request_data["q"], number_of_results=search.result_container.results_length(), base_url=get_base_url(), ) return Response(response_rss, mimetype="text/xml") return render( "results.html", results=search.result_container.get_ordered_results(), q=search.request_data["q"], selected_categories=search.categories, paging=search.paging, pageno=search.pageno, base_url=get_base_url(), suggestions=search.result_container.suggestions, answers=search.result_container.answers, infoboxes=search.result_container.infoboxes, theme=get_current_theme_name(), favicons=global_favicons[themes.index(get_current_theme_name())], )
def format_date(date): format = "MMMM d" dt = datetime.combine(date, datetime.min.time()) if (datetime.utcnow() - dt).days > 365: format += " y" return babel.format_date(date, format)
def format_date(date): format = 'MMMM d' dt = datetime.combine(date, datetime.min.time()) if (datetime.utcnow() - dt).days > 365: format += ' y' return babel.format_date(date, format)