def _stale_datasets_for_user(self, data): def frequency_to_timedelta(frequency): frequency_periods = { "daily": dt.timedelta(days=2), "weekly": dt.timedelta(days=7), "fortnightly": dt.timedelta(days=14), "monthly": dt.timedelta(days=30), "quarterly": dt.timedelta(days=91), "annually": dt.timedelta(days=365), } if not frequency: pass else: return frequency_periods[frequency] stale_datasets = [] if data: for pkg in data: if 'frequency' in pkg: pkg['metadata_created'] = h.date_str_to_datetime( pkg['metadata_created']) pkg['metadata_modified'] = h.date_str_to_datetime( pkg['metadata_modified']) pkg['frequency'] = pkg.get('frequency', '') if pkg['frequency']: if pkg['frequency'] != 'irregular' and pkg[ 'frequency'] != 'notPlanned': if pkg['metadata_modified'].date( ) != pkg['metadata_created'].date(): now = dt.datetime.now() diff = now - pkg['metadata_modified'] if diff > frequency_to_timedelta( pkg['frequency']): stale_datasets.append(pkg) return stale_datasets
def get_raw_new_datasets(index): raw_new_datasets = [] week_queue = Queue.Queue() rev_stats = stats_lib.RevisionStats() new_packages_by_week = rev_stats.get_by_week('new_packages') package_revisions_by_week = rev_stats.get_by_week('package_revisions') for week_date, revs, num_revisions, cumulative_num_revisions in package_revisions_by_week: week_queue.put(week_date) for week_date, pkgs, num_packages, cumulative_num_packages in new_packages_by_week: revision_week_date = week_queue.get() while revision_week_date != week_date: raw_new_datasets.append({ 'date': h.date_str_to_datetime(revision_week_date), 'new_packages': 0}) revision_week_date = week_queue.get() raw_new_datasets.append({ 'date': h.date_str_to_datetime(week_date), 'new_packages': num_packages}) while not week_queue.empty(): revision_week_date = week_queue.get() raw_new_datasets.append({ 'date': h.date_str_to_datetime(revision_week_date), 'new_packages': 0}) return raw_new_datasets[index]['new_packages']
def output_feed(self, results, feed_title, feed_description, feed_link, feed_url, navigation_urls, feed_guid): author_name = config.get('ckan.feeds.author_name', '').strip() or \ config.get('ckan.site_id', '').strip() author_link = config.get('ckan.feeds.author_link', '').strip() or \ config.get('ckan.site_url', '').strip() # TODO language feed_class = None for plugin in plugins.PluginImplementations(plugins.IFeed): if hasattr(plugin, 'get_feed_class'): feed_class = plugin.get_feed_class() if not feed_class: feed_class = _FixedAtom1Feed feed = feed_class( feed_title, feed_link, feed_description, language=u'en', author_name=author_name, author_link=author_link, feed_guid=feed_guid, feed_url=feed_url, previous_page=navigation_urls['previous'], next_page=navigation_urls['next'], first_page=navigation_urls['first'], last_page=navigation_urls['last'], ) for pkg in results: additional_fields = {} for plugin in plugins.PluginImplementations(plugins.IFeed): if hasattr(plugin, 'get_item_additional_fields'): additional_fields = plugin.get_item_additional_fields(pkg) feed.add_item( title=pkg.get('title', ''), link=self.base_url + h.url_for(controller='package', action='read', id=pkg['id']), description=pkg.get('notes', ''), updated=h.date_str_to_datetime(pkg.get('metadata_modified')), published=h.date_str_to_datetime(pkg.get('metadata_created')), unique_id=_create_atom_id(u'/dataset/%s' % pkg['id']), author_name=pkg.get('author', ''), author_email=pkg.get('author_email', ''), categories=[t['name'] for t in pkg.get('tags', [])], enclosure=webhelpers.feedgenerator.Enclosure( self.base_url + h.url_for(controller='api', register='package', action='show', id=pkg['name'], ver='2'), unicode(len(json.dumps(pkg))), # TODO fix this u'application/json'), **additional_fields) response.content_type = feed.mime_type return feed.writeString('utf-8')
def index(self): c = p.toolkit.c stats = stats_lib.Stats() rev_stats = stats_lib.RevisionStats() c.top_rated_packages = stats.top_rated_packages() c.most_edited_packages = stats.most_edited_packages() c.largest_groups = stats.largest_groups() c.top_tags = stats.top_tags() c.top_package_creators = stats.top_package_creators() c.new_packages_by_week = rev_stats.get_by_week('new_packages') c.deleted_packages_by_week = rev_stats.get_by_week('deleted_packages') c.num_packages_by_week = rev_stats.get_num_packages_by_week() c.package_revisions_by_week = rev_stats.get_by_week('package_revisions') c.raw_packages_by_week = [] for week_date, num_packages, cumulative_num_packages in c.num_packages_by_week: c.raw_packages_by_week.append({'date': h.date_str_to_datetime(week_date), 'total_packages': cumulative_num_packages}) c.all_package_revisions = [] c.raw_all_package_revisions = [] for week_date, revs, num_revisions, cumulative_num_revisions in c.package_revisions_by_week: c.all_package_revisions.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_revisions)) c.raw_all_package_revisions.append({'date': h.date_str_to_datetime(week_date), 'total_revisions': num_revisions}) c.new_datasets = [] c.raw_new_datasets = [] for week_date, pkgs, num_packages, cumulative_num_packages in c.new_packages_by_week: c.new_datasets.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_packages)) c.raw_new_datasets.append({'date': h.date_str_to_datetime(week_date), 'new_packages': num_packages}) return p.toolkit.render('ckanext/stats/index.html')
def command(self): self._load_config() log = logging.getLogger('ckanext.opendatani') log.info('Daily dataset update frequency check started...') def frequency_to_timedelta(frequency): frequency_periods = { "daily": dt.timedelta( days=2 ), # 2 so as not to spam dataset maintainers every day "weekly": dt.timedelta(days=7), "fortnightly": dt.timedelta(days=14), "monthly": dt.timedelta(days=30), "quarterly": dt.timedelta(days=91), "annually": dt.timedelta(days=365), } if not frequency: pass else: return frequency_periods[frequency] data = toolkit.get_action('package_search')({ 'ignore_auth': True }, { 'include_private': True, 'rows': 10000000 }) if data['results']: for pkg in data['results']: if 'frequency' in pkg: pkg['metadata_created'] = h.date_str_to_datetime( pkg['metadata_created']) pkg['metadata_modified'] = h.date_str_to_datetime( pkg['metadata_modified']) pkg['frequency'] = pkg.get('frequency', '') if pkg['frequency']: if pkg['frequency'] != 'irregular' and pkg[ 'frequency'] != 'notPlanned': if pkg['metadata_modified'].date( ) != pkg['metadata_created'].date(): now = dt.datetime.now() diff = now - pkg['metadata_modified'] if diff > frequency_to_timedelta( pkg['frequency']): try: content = "Dataset " + pkg[ 'title'] + " has not been updated in its planned update frequency (" + pkg[ 'frequency'] + ")." to = pkg['contact_email'] subject = "Open Data NI: Update dataset notification" emailer.send_email( content, to, subject) log.info( 'Package "' + pkg['title'] + '" has not been updated. Sending e-mail to maintainer.' ) except Exception as e: log.error(e) log.info('Daily dataset update frequency check completed.')
def output_feed(results: list[dict[str, Any]], feed_title: str, feed_description: str, feed_link: str, feed_url: str, navigation_urls: dict[str, str], feed_guid: str) -> Response: author_name = config.get_value(u'ckan.feeds.author_name').strip() or \ config.get_value(u'ckan.site_id').strip() def remove_control_characters(s: str): if not s: return "" return "".join(ch for ch in s if unicodedata.category(ch)[0] != "C") # TODO: language feed_class: PFeedFactory = CKANFeed for plugin in plugins.PluginImplementations(plugins.IFeed): if hasattr(plugin, u'get_feed_class'): feed_class = plugin.get_feed_class() feed = feed_class( feed_title, feed_link, feed_description, language=u'en', author_name=author_name, feed_guid=feed_guid, feed_url=feed_url, previous_page=navigation_urls[u'previous'], next_page=navigation_urls[u'next'], first_page=navigation_urls[u'first'], last_page=navigation_urls[u'last'], ) for pkg in results: additional_fields: dict[str, Any] = {} for plugin in plugins.PluginImplementations(plugins.IFeed): if hasattr(plugin, u'get_item_additional_fields'): additional_fields = plugin.get_item_additional_fields(pkg) feed.add_item( title=pkg.get(u'title', u''), link=h.url_for(u'api.action', logic_function=u'package_show', id=pkg['id'], ver=3, _external=True), description=remove_control_characters(pkg.get(u'notes', u'')), updated=h.date_str_to_datetime(pkg.get(u'metadata_modified', '')), published=h.date_str_to_datetime(pkg.get(u'metadata_created', '')), unique_id=_create_atom_id(u'/dataset/%s' % pkg['id']), author_name=pkg.get(u'author', u''), author_email=pkg.get(u'author_email', u''), categories=[t[u'name'] for t in pkg.get(u'tags', [])], enclosure=_enclosure(pkg), **additional_fields) resp = make_response(feed.writeString(u'utf-8'), 200) resp.headers['Content-Type'] = u'application/atom+xml' return resp
def output_feed(self, results, feed_title, feed_description, feed_link, feed_url, navigation_urls, feed_guid): author_name = config.get('ckan.feeds.author_name', '').strip() or \ config.get('ckan.site_id', '').strip() author_link = config.get('ckan.feeds.author_link', '').strip() or \ config.get('ckan.site_url', '').strip() # TODO language feed = _FixedAtom1Feed( title=feed_title, link=feed_link, description=feed_description, language=u'en', author_name=author_name, author_link=author_link, feed_guid=feed_guid, feed_url=feed_url, previous_page=navigation_urls['previous'], next_page=navigation_urls['next'], first_page=navigation_urls['first'], last_page=navigation_urls['last'], ) for pkg in results: updated_date = pkg.get('metadata_modified') if updated_date: updated_date = h.date_str_to_datetime(updated_date) published_date = pkg.get('metadata_created') if published_date: published_date = h.date_str_to_datetime(published_date) feed.add_item( title=pkg.get('title', ''), link=urlparse.urljoin( self.base_url, h.url_for(controller='package', action='read', id=pkg['id'])), description=pkg.get('notes', ''), updated=updated_date, published=published_date, unique_id=_create_atom_id(u'/dataset/%s' % pkg['id']), author_name=pkg.get('author', ''), author_email=pkg.get('author_email', ''), categories=[t['name'] for t in pkg.get('tags', [])], enclosure=webhelpers.feedgenerator.Enclosure( urlparse.urljoin( self.base_url, h.url_for(controller='api', register='package', action='show', id=pkg['name'], ver='2')), unicode(len(json.dumps(pkg))), # TODO fix this u'application/json')) response.content_type = feed.mime_type return feed.writeString('utf-8')
def output_feed(self, results, feed_title, feed_description, feed_link, feed_url, navigation_urls, feed_guid): author_name = config.get('ckan.feeds.author_name', '').strip() or \ config.get('ckan.site_id', '').strip() author_link = config.get('ckan.feeds.author_link', '').strip() or \ config.get('ckan.site_url', '').strip() feed = _FixedAtom1Feed( title=feed_title, link=feed_link, description=feed_description, language=u'en', author_name=author_name, author_link=author_link, feed_guid=feed_guid, feed_url=feed_url, previous_page=navigation_urls['previous'], next_page=navigation_urls['next'], first_page=navigation_urls['first'], last_page=navigation_urls['last'], ) if c.language == 'fr': def lx(x): return x + '_fra' else: def lx(x): return x for pkg in results: feed.add_item( title=pkg.get(lx('title'), ''), link=self.base_url + url(str( '/api/action/package_show?id=%s' % pkg['name']) ), description=pkg.get(lx('notes'), ''), updated=date_str_to_datetime(pkg.get('metadata_modified')), published=date_str_to_datetime(pkg.get('metadata_created')), unique_id=_create_atom_id(u'/dataset/%s' % pkg['id']), author_name=pkg.get('author', ''), author_email=pkg.get('author_email', ''), categories=''.join(e['value'] for e in pkg.get('extras', []) if e['key'] == lx('keywords')).split(','), enclosure=webhelpers.feedgenerator.Enclosure( self.base_url + url(str( '/api/action/package_show?id=%s' % pkg['name'])), unicode(len(json.dumps(pkg))), # TODO fix this u'application/json') ) response.content_type = feed.mime_type return feed.writeString('utf-8')
def index(): stats = stats_lib.Stats() extra_vars: dict[str, Any] = { 'largest_groups': stats.largest_groups(), 'top_tags': stats.top_tags(), 'top_package_creators': stats.top_package_creators(), 'most_edited_packages': stats.most_edited_packages(), 'new_packages_by_week': stats.get_by_week('new_packages'), 'deleted_packages_by_week': stats.get_by_week('deleted_packages'), 'num_packages_by_week': stats.get_num_packages_by_week(), 'package_revisions_by_week': stats.get_by_week('package_revisions') } extra_vars['raw_packages_by_week'] = [] for week_date, num_packages, cumulative_num_packages\ in stats.get_num_packages_by_week(): extra_vars['raw_packages_by_week'].append({ 'date': h.date_str_to_datetime(week_date), 'total_packages': cumulative_num_packages }) extra_vars['raw_all_package_revisions'] = [] for week_date, _revs, num_revisions, _cumulative_num_revisions\ in stats.get_by_week('package_revisions'): extra_vars['raw_all_package_revisions'].append({ 'date': h.date_str_to_datetime(week_date), 'total_revisions': num_revisions }) extra_vars['raw_new_datasets'] = [] for week_date, _pkgs, num_packages, _cumulative_num_revisions\ in stats.get_by_week('new_packages'): extra_vars['raw_new_datasets'].append({ 'date': h.date_str_to_datetime(week_date), 'new_packages': num_packages }) extra_vars['raw_deleted_datasets'] = [] for week_date, _pkgs, num_packages, cumulative_num_packages\ in stats.get_by_week('deleted_packages'): extra_vars['raw_deleted_datasets'].append({ 'date': h.date_str_to_datetime(week_date), 'deleted_packages': num_packages }) return render(u'ckanext/stats/index.html', extra_vars)
def output_feed(self, results, feed_title, feed_description, feed_link, feed_url, navigation_urls, feed_guid): author_name = config.get('ckan.feeds.author_name', '').strip() or \ config.get('ckan.site_id', '').strip() author_link = config.get('ckan.feeds.author_link', '').strip() or \ config.get('ckan.site_url', '').strip() feed = _FixedAtom1Feed( title=feed_title, link=feed_link, description=feed_description, language=u'en', author_name=author_name, author_link=author_link, feed_guid=feed_guid, feed_url=feed_url, previous_page=navigation_urls['previous'], next_page=navigation_urls['next'], first_page=navigation_urls['first'], last_page=navigation_urls['last'], ) if c.language == 'fr': def lx(x): return x + '_fra' else: def lx(x): return x for pkg in results: feed.add_item( title=pkg.get(lx('title'), ''), link=self.base_url + url(str('/api/action/package_show?id=%s' % pkg['name'])), description=pkg.get(lx('notes'), ''), updated=date_str_to_datetime(pkg.get('metadata_modified')), published=date_str_to_datetime(pkg.get('metadata_created')), unique_id=_create_atom_id(u'/dataset/%s' % pkg['id']), author_name=pkg.get('author', ''), author_email=pkg.get('author_email', ''), categories=''.join(e['value'] for e in pkg.get('extras', []) if e['key'] == lx('keywords')).split(','), enclosure=webhelpers.feedgenerator.Enclosure( self.base_url + url(str('/api/action/package_show?id=%s' % pkg['name'])), unicode(len(json.dumps(pkg))), # TODO fix this u'application/json')) response.content_type = feed.mime_type return feed.writeString('utf-8')
def validate_date_inputs(field, key, data, extras, errors, context): date_error = _('Date format incorrect') time_error = _('Time format incorrect') date = None def get_input(suffix): inpt = key[0] + '_' + suffix new_key = (inpt,) + tuple(x for x in key if x != key[0]) key_value = extras.get(inpt) data[new_key] = key_value errors[new_key] = [] if key_value: del extras[inpt] if field.get('required'): not_empty(new_key, data, errors, context) return new_key, key_value date_key, value = get_input('date') value_full = '' if value: try: value_full = value date = h.date_str_to_datetime(value) except (TypeError, ValueError) as e: errors[date_key].append(date_error) time_key, value = get_input('time') if value: if not value_full: errors[date_key].append( _('Date is required when a time is provided')) else: try: value_full += ' ' + value date = h.date_str_to_datetime(value_full) except (TypeError, ValueError) as e: errors[time_key].append(time_error) tz_key, value = get_input('tz') if value: if value not in pytz.all_timezones: errors[tz_key].append('Invalid timezone') else: if isinstance(date, datetime.datetime): date = pytz.timezone(value).localize(date) return date
def output_feed(results, feed_title, feed_description, feed_link, feed_url, navigation_urls, feed_guid): author_name = config.get(u'ckan.feeds.author_name', u'').strip() or \ config.get(u'ckan.site_id', u'').strip() # TODO: language feed_class = CKANFeed for plugin in plugins.PluginImplementations(plugins.IFeed): if hasattr(plugin, u'get_feed_class'): feed_class = plugin.get_feed_class() feed = feed_class( feed_title, feed_link, feed_description, language=u'en', author_name=author_name, feed_guid=feed_guid, feed_url=feed_url, previous_page=navigation_urls[u'previous'], next_page=navigation_urls[u'next'], first_page=navigation_urls[u'first'], last_page=navigation_urls[u'last'], ) for pkg in results: additional_fields = {} for plugin in plugins.PluginImplementations(plugins.IFeed): if hasattr(plugin, u'get_item_additional_fields'): additional_fields = plugin.get_item_additional_fields(pkg) feed.add_item( title=pkg.get(u'title', u''), link=h.url_for(u'api.action', logic_function=u'package_read', id=pkg['id'], ver=3, _external=True), description=pkg.get(u'notes', u''), updated=h.date_str_to_datetime(pkg.get(u'metadata_modified')), published=h.date_str_to_datetime(pkg.get(u'metadata_created')), unique_id=_create_atom_id(u'/dataset/%s' % pkg['id']), author_name=pkg.get(u'author', u''), author_email=pkg.get(u'author_email', u''), categories=[t[u'name'] for t in pkg.get(u'tags', [])], enclosure=_enclosure(pkg), **additional_fields) resp = make_response(feed.writeString(u'utf-8'), 200) resp.headers['Content-Type'] = u'application/atom+xml' return resp
def validator(key, data, errors, context): """ 1. a JSON with dates, eg. {"1": {"to": "2016-05-28T00:00:00", "from": "2016-05-11T00:00:00"}} 2. separate fields per date and time (for form submissions): fieldname-to-date-1 = "2012-09-11" fieldname-to-time-1 = "11:00" fieldname-from-date-2 = "2014-03-03" fieldname-from-time-2 = "09:45" """ # just in case there was an error before that validator if errors[key]: return value = data[key] # 1. json if value is not missing: if isinstance(value, basestring): try: value = json.loads(value) except ValueError, e: errors[key].append( _('Invalid field structure, it is not a valid JSON')) return if not isinstance(value, dict): errors[key].append(_('Expecting valid JSON value')) return out = {} for element in sorted(value): dates = value.get(element) with_date = False #if dates['from']: if 'from' in dates: try: date = h.date_str_to_datetime(dates['from']) with_date = True except (TypeError, ValueError), e: errors[key].append( _('From value: Date format incorrect')) continue #if dates['to']: if 'to' in dates: try: date = h.date_str_to_datetime(dates['to']) with_date = True except (TypeError, ValueError), e: errors[key].append( _('To value: Date format incorrect')) continue
def index(self): c = p.toolkit.c try: context = {'model': model, 'user': c.user, 'auth_user_obj': c.userobj} logic.check_access('sysadmin', context) except logic.NotAuthorized: base.abort(403, _('Not authorized to see this page')) stats = stats_lib.Stats() rev_stats = stats_lib.RevisionStats() c.top_rated_packages = stats.top_rated_packages() c.most_edited_packages = stats.most_edited_packages() export_datasets_most_edited_to_excel(c, stats) c.largest_groups = stats.largest_groups() export_largest_groups_to_excel(c, stats) c.top_tags = stats.top_tags() export_tags_to_excel(c, stats) c.top_package_creators = stats.top_package_creators() export_top_users_to_excel(c, stats) c.new_packages_by_week = rev_stats.get_by_week('new_packages') c.deleted_packages_by_week = rev_stats.get_by_week('deleted_packages') c.num_packages_by_week = rev_stats.get_num_packages_by_week() c.package_revisions_by_week = rev_stats.get_by_week('package_revisions') c.modified_resources = stats.modified_resources() export_modified_resources_to_excel(c, stats) c.raw_packages_by_week = [] for week_date, num_packages, cumulative_num_packages in c.num_packages_by_week: c.raw_packages_by_week.append({'date': h.date_str_to_datetime(week_date), 'total_packages': cumulative_num_packages}) c.all_package_revisions = [] c.raw_all_package_revisions = [] for week_date, revs, num_revisions, cumulative_num_revisions in c.package_revisions_by_week: c.all_package_revisions.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_revisions)) c.raw_all_package_revisions.append({'date': h.date_str_to_datetime(week_date), 'total_revisions': num_revisions}) c.new_datasets = [] c.raw_new_datasets = [] for week_date, pkgs, num_packages, cumulative_num_packages in c.new_packages_by_week: c.new_datasets.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_packages)) c.raw_new_datasets.append({'date': h.date_str_to_datetime(week_date), 'new_packages': num_packages}) return p.toolkit.render('ckanext/stats/index.html')
def output_feed(self, results, feed_title, feed_description, feed_link, feed_url, navigation_urls, feed_guid): author_name = config.get("ckan.feeds.author_name", "").strip() or config.get("ckan.site_id", "").strip() author_link = config.get("ckan.feeds.author_link", "").strip() or config.get("ckan.site_url", "").strip() feed = _FixedAtom1Feed( title=feed_title, link=feed_link, description=feed_description, language=u"en", author_name=author_name, author_link=author_link, feed_guid=feed_guid, feed_url=feed_url, previous_page=navigation_urls["previous"], next_page=navigation_urls["next"], first_page=navigation_urls["first"], last_page=navigation_urls["last"], ) if c.language == "fr": def lx(x): return x + "_fra" else: def lx(x): return x for pkg in results: feed.add_item( title=pkg.get(lx("title"), ""), link=self.base_url + url(str("/api/action/package_show?id=%s" % pkg["name"])), description=pkg.get(lx("notes"), ""), updated=date_str_to_datetime(pkg.get("metadata_modified")), published=date_str_to_datetime(pkg.get("metadata_created")), unique_id=_create_atom_id(u"/dataset/%s" % pkg["id"]), author_name=pkg.get("author", ""), author_email=pkg.get("author_email", ""), categories="".join(e["value"] for e in pkg.get("extras", []) if e["key"] == lx("keywords")).split(","), enclosure=webhelpers.feedgenerator.Enclosure( self.base_url + url(str("/api/action/package_show?id=%s" % pkg["name"])), unicode(len(json.dumps(pkg))), # TODO fix this u"application/json", ), ) response.content_type = feed.mime_type return feed.writeString("utf-8")
def index(self): """Render the KPI index page.""" c = p.toolkit.c c.show_graphs = show_graphs c.kpi_goals = kpi_goals usage_stats = stats_lib.UsageStats() monthly_users = usage_stats.get_monthly_user_counts('all') c.num_users_by_month = [{ 'date': h.date_str_to_datetime(month_date), 'users': users, 'percent_complete': percentage } for month_date, users, percentage in monthly_users] if not c.num_users_by_month: c.num_users_by_month = [{ 'date': DUMMY_DATE, 'users': 0, 'percent_complete': 0 }] weekly_datasets = usage_stats.get_dataset_counts('dataset') c.raw_packages_by_week = [{ 'date': h.date_str_to_datetime(week_date), 'total_packages': cumulative_num_hits, 'percent_complete': percentage } for week_date, cumulative_num_hits, percentage in weekly_datasets] if not c.raw_packages_by_week: c.raw_packages_by_week = [{ 'date': DUMMY_DATE, 'total_packages': 0, 'percent_complete': 0 }] weekly_harvesters = usage_stats.get_dataset_counts('harvest') c.raw_harvesters_by_week = [{ 'date': h.date_str_to_datetime(week_date), 'total_packages': sources, 'percent_complete': percentage } for week_date, sources, percentage in weekly_harvesters] if not c.raw_harvesters_by_week: c.raw_harvesters_by_week = [{ 'date': DUMMY_DATE, 'total_packages': 0, 'percent_complete': 0 }] return p.toolkit.render('ckanext/kpis/index.html')
def output_feed(self, results, feed_title, feed_description, feed_link, feed_url, navigation_urls, feed_guid): author_name = config.get('ckan.feeds.author_name', '').strip() or \ config.get('ckan.site_id', '').strip() author_link = config.get('ckan.feeds.author_link', '').strip() or \ config.get('ckan.site_url', '').strip() # TODO language feed = _FixedAtom1Feed( title=feed_title, link=feed_link, description=feed_description, language=u'en', author_name=author_name, author_link=author_link, feed_guid=feed_guid, feed_url=feed_url, previous_page=navigation_urls['previous'], next_page=navigation_urls['next'], first_page=navigation_urls['first'], last_page=navigation_urls['last'], ) for pkg in results: feed.add_item( title=pkg.get('title', ''), link=self.base_url + url_for(controller='package', action='read', id=pkg['id']), description=pkg.get('notes', ''), updated=date_str_to_datetime(pkg.get('metadata_modified')), published=date_str_to_datetime(pkg.get('metadata_created')), unique_id=_create_atom_id(u'/dataset/%s' % pkg['id']), author_name=pkg.get('author', ''), author_email=pkg.get('author_email', ''), categories=[t['name'] for t in pkg.get('tags', [])], enclosure=webhelpers.feedgenerator.Enclosure( self.base_url + url_for(controller='api', register='package', action='show', id=pkg['name'], ver='2'), unicode(len(json.dumps(pkg))), # TODO fix this u'application/json') ) response.content_type = feed.mime_type return feed.writeString('utf-8')
def search(self, ver=None, register=None): log.debug('search %s params: %r' % (register, request.params)) if register == 'revision': since_time = None if 'since_id' in request.params: id = request.params['since_id'] if not id: return self._finish_bad_request( _(u'No revision specified')) rev = model.Session.query(model.Revision).get(id) if rev is None: return self._finish_not_found( _(u'There is no revision with id: %s') % id) since_time = rev.timestamp elif 'since_time' in request.params: since_time_str = request.params['since_time'] try: since_time = h.date_str_to_datetime(since_time_str) except ValueError, inst: return self._finish_bad_request('ValueError: %s' % inst) else: return self._finish_bad_request( _("Missing search term ('since_id=UUID' or " + " 'since_time=TIMESTAMP')")) revs = model.Session.query(model.Revision).\ filter(model.Revision.timestamp > since_time).\ limit(50) # reasonable enough for a page return self._finish_ok([rev.id for rev in revs])
def search(self, ver=None, register=None): log.debug('search %s params: %r' % (register, request.params)) if register == 'revision': since_time = None if request.params.has_key('since_id'): id = request.params['since_id'] if not id: return self._finish_bad_request( gettext(u'No revision specified')) rev = model.Session.query(model.Revision).get(id) if rev is None: return self._finish_not_found( gettext(u'There is no revision with id: %s') % id) since_time = rev.timestamp elif request.params.has_key('since_time'): since_time_str = request.params['since_time'] try: since_time = h.date_str_to_datetime(since_time_str) except ValueError, inst: return self._finish_bad_request('ValueError: %s' % inst) else: return self._finish_bad_request( gettext("Missing search term ('since_id=UUID' or 'since_time=TIMESTAMP')")) revs = model.Session.query(model.Revision).filter(model.Revision.timestamp>since_time) return self._finish_ok([rev.id for rev in revs])
def format_resource_items(items): ''' Take a resource item list and format nicely with blacklisting etc. ''' blacklist = ['name', 'description', 'url', 'tracking_summary', 'format', 'position', 'is_local_resource', 'datastore_active', 'on_same_domain', 'mimetype', 'state', 'url_type', 'has_views'] output = [] # regular expressions for detecting types in strings reg_ex_datetime = '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?$' reg_ex_int = '^-?\d{1,}$' reg_ex_float = '^-?\d{1,}\.\d{1,}$' for key, value in items: if not value or key in blacklist: continue # size is treated specially as we want to show in MiB etc if key == 'size': try: value = formatters.localised_filesize(int(value)) except ValueError: # Sometimes values that can't be converted to ints can sneak # into the db. In this case, just leave them as they are. pass elif isinstance(value, basestring): # check if strings are actually datetime/number etc if re.search(reg_ex_datetime, value): datetime_ = date_str_to_datetime(value) value = formatters.localised_nice_date(datetime_) elif re.search(reg_ex_float, value): value = formatters.localised_number(float(value)) elif re.search(reg_ex_int, value): value = formatters.localised_number(int(value)) elif ((isinstance(value, int) or isinstance(value, float)) and value not in (True, False)): value = formatters.localised_number(value) key = key.replace('_', ' ') output.append((key, value)) return sorted(output, key=lambda x: x[0])
def gmd(self, id): format = 'html' # response.headers['Content-Type'] = ctype response.headers['Content-Type'] = 'application/vnd.iso.19139+xml; charset=utf-8'.encode("ISO-8859-1") response.headers["Content-Disposition"] = ("attachment; filename=" + id + ".xml").encode("ISO-8859-1") package_type = self._get_package_type(id.split('@')[0]) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'for_view': True, 'auth_user_obj': c.userobj} data_dict = {'id': id} # interpret @<revision_id> or @<date> suffix split = id.split('@') if len(split) == 2: data_dict['id'], revision_ref = split if model.is_id(revision_ref): context['revision_id'] = revision_ref else: try: date = h.date_str_to_datetime(revision_ref) context['revision_date'] = date except TypeError, e: base.abort(400, _('Invalid revision format: %r') % e.args) except ValueError, e: base.abort(400, _('Invalid revision format: %r') % e.args)
def revisions(self): ''' Similar to the revision search API, lists all revisions for which a dataset or group changed in some way. URL Params: since-revision-id since-timestamp (utc) in-the-last-x-minutes ''' # parse options rev_id = request.params.get('since-revision-id') since_timestamp = request.params.get('since-timestamp') in_the_last_x_minutes = request.params.get('in-the-last-x-minutes') now = datetime.datetime.utcnow() if rev_id is not None: rev = model.Session.query(model.Revision).get(rev_id) if not rev: abort(400, 'Revision ID "%s" does not exist' % rev_id) since_timestamp = rev.timestamp elif since_timestamp is not None: try: since_timestamp = date_str_to_datetime(since_timestamp) except (ValueError, TypeError), inst: example = now.strftime('%Y-%m-%d%%20%H:%M') # e.g. 2013-11-30%2023:15 abort(400, 'Could not parse timestamp "%s": %s. Must be UTC. Example: since-time=%s' % (since_timestamp, inst, example))
def read(self, id, format='html'): if not format == 'html': ctype, extension, loader = \ self._content_type_from_extension(format) if not ctype: # An unknown format, we'll carry on in case it is a # revision specifier and re-constitute the original id id = "%s.%s" % (id, format) ctype, format, loader = "text/html; charset=utf-8", "html", \ MarkupTemplate else: ctype, format, loader = self._content_type_from_accept() response.headers['Content-Type'] = ctype package_type = self._get_package_type(id.split('@')[0]) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'for_view': True, 'auth_user_obj': c.userobj} data_dict = {'id': id} # interpret @<revision_id> or @<date> suffix split = id.split('@') if len(split) == 2: data_dict['id'], revision_ref = split if model.is_id(revision_ref): context['revision_id'] = revision_ref else: try: date = h.date_str_to_datetime(revision_ref) context['revision_date'] = date except TypeError, e: abort(400, _('Invalid revision format: %r') % e.args) except ValueError, e: abort(400, _('Invalid revision format: %r') % e.args)
def search(self, ver=None, register=None): log.debug('search %s params: %r', register, request.params) if register == 'revision': since_time = None if 'since_id' in request.params: id = request.params['since_id'] if not id: return self._finish_bad_request( _(u'No revision specified')) rev = model.Session.query(model.Revision).get(id) if rev is None: return self._finish_not_found( _(u'There is no revision with id: %s') % id) since_time = rev.timestamp elif 'since_time' in request.params: since_time_str = request.params['since_time'] try: since_time = h.date_str_to_datetime(since_time_str) except ValueError, inst: return self._finish_bad_request('ValueError: %s' % inst) else: return self._finish_bad_request( _("Missing search term ('since_id=UUID' or " + " 'since_time=TIMESTAMP')")) revs = model.Session.query(model.Revision) \ .filter(model.Revision.timestamp > since_time) \ .order_by(model.Revision.timestamp) \ .limit(50) # reasonable enough for a page return self._finish_ok([rev.id for rev in revs])
def read(self, id, format='html'): # Check we know the content type, if not then it is likely a revision # and therefore we should merge the format onto the end of id ctype,extension,loader = self._content_type_for_format(format) if not ctype: # Reconstitute the ID if we don't know what content type to use ctype = "text/html; charset=utf-8" id = "%s.%s" % (id, format) format = 'html' else: format = extension response.headers['Content-Type'] = ctype package_type = self._get_package_type(id.split('@')[0]) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'extras_as_string': True, 'for_view': True} data_dict = {'id': id} # interpret @<revision_id> or @<date> suffix split = id.split('@') if len(split) == 2: data_dict['id'], revision_ref = split if model.is_id(revision_ref): context['revision_id'] = revision_ref else: try: date = date_str_to_datetime(revision_ref) context['revision_date'] = date except TypeError, e: abort(400, _('Invalid revision format: %r') % e.args) except ValueError, e: abort(400, _('Invalid revision format: %r') % e.args)
def search(self, ver=None, register=None): log.debug("search %s params: %r" % (register, request.params)) if register == "revision": since_time = None if "since_id" in request.params: id = request.params["since_id"] if not id: return self._finish_bad_request(_(u"No revision specified")) rev = model.Session.query(model.Revision).get(id) if rev is None: return self._finish_not_found(_(u"There is no revision with id: %s") % id) since_time = rev.timestamp elif "since_time" in request.params: since_time_str = request.params["since_time"] try: since_time = h.date_str_to_datetime(since_time_str) except ValueError, inst: return self._finish_bad_request("ValueError: %s" % inst) else: return self._finish_bad_request( _("Missing search term ('since_id=UUID' or " + " 'since_time=TIMESTAMP')") ) revs = model.Session.query(model.Revision).filter(model.Revision.timestamp > since_time) return self._finish_ok([rev.id for rev in revs])
def read(self, id): package_type = self._get_package_type(id.split('@')[0]) context = { 'model': model, 'session': model.Session, 'user': c.user or c.author, 'extras_as_string': True, 'for_view': True } data_dict = {'id': id} # interpret @<revision_id> or @<date> suffix split = id.split('@') if len(split) == 2: data_dict['id'], revision_ref = split if model.is_id(revision_ref): context['revision_id'] = revision_ref else: try: date = date_str_to_datetime(revision_ref) context['revision_date'] = date except TypeError, e: abort(400, _('Invalid revision format: %r') % e.args) except ValueError, e: abort(400, _('Invalid revision format: %r') % e.args)
def search(self, ver=None, register=None): log.debug('search %s params: %r' % (register, request.params)) ver = ver or '1' # i.e. default to v1 if register == 'revision': since_time = None if request.params.has_key('since_id'): id = request.params['since_id'] if not id: return self._finish_bad_request( gettext(u'No revision specified')) rev = model.Session.query(model.Revision).get(id) if rev is None: return self._finish_not_found( gettext(u'There is no revision with id: %s') % id) since_time = rev.timestamp elif request.params.has_key('since_time'): since_time_str = request.params['since_time'] try: since_time = date_str_to_datetime(since_time_str) except ValueError, inst: return self._finish_bad_request('ValueError: %s' % inst) else: return self._finish_bad_request( gettext( "Missing search term ('since_id=UUID' or 'since_time=TIMESTAMP')" )) revs = model.Session.query( model.Revision).filter(model.Revision.timestamp > since_time) return self._finish_ok([rev.id for rev in revs])
def gmd(self, id): format = 'html' # response.headers['Content-Type'] = ctype response.headers[ 'Content-Type'] = 'application/vnd.iso.19139+xml; charset=utf-8'.encode( "ISO-8859-1") response.headers["Content-Disposition"] = ("attachment; filename=" + id + ".xml").encode("ISO-8859-1") package_type = self._get_package_type(id.split('@')[0]) context = { 'model': model, 'session': model.Session, 'user': c.user or c.author, 'for_view': True, 'auth_user_obj': c.userobj } data_dict = {'id': id} # interpret @<revision_id> or @<date> suffix split = id.split('@') if len(split) == 2: data_dict['id'], revision_ref = split if model.is_id(revision_ref): context['revision_id'] = revision_ref else: try: date = h.date_str_to_datetime(revision_ref) context['revision_date'] = date except TypeError, e: base.abort(400, _('Invalid revision format: %r') % e.args) except ValueError, e: base.abort(400, _('Invalid revision format: %r') % e.args)
def validate_date_inputs(field, key, data, extras, errors, context): date_error = _('Date format incorrect') time_error = _('Time format incorrect') date = None def get_input(suffix): inpt = key[0] + '_' + suffix new_key = (inpt,) + tuple(x for x in key if x != key[0]) value = extras.get(inpt) data[new_key] = value errors[new_key] = [] if value: del extras[inpt] if field.get('required'): not_empty(new_key, data, errors, context) return (new_key, value) date_key, value = get_input('date') value_full = '' if value: try: value_full = value date = h.date_str_to_datetime(value) except (TypeError, ValueError), e: errors[date_key].append(date_error)
def setup_template_variables(self, context, data_dict): """Setup variables available to templates""" # log.debug(pprint.pprint(data_dict)) hdl = HandleService() # Author name author_name = data_dict['package'].get('citation_info', '') if not author_name: author_name = 'Author name' # Publication year publication_year = data_dict['package'].get('iso_pubDate', '') if not publication_year: publication_year = "Publication year" else: publication_year = h.date_str_to_datetime(publication_year).year res_name = data_dict['resource'].get('name', '') res_id = tk.get_or_bust(data_dict['resource'], 'id') ver_number = tk.get_action('resource_version_number')(context, { 'id': res_id }) res_pid = data_dict['resource'].get(hdl.resource_field, '') access_date = datetime.datetime.now() tpl_variables = { 'author_name': author_name, 'publication_year': publication_year, 'res_name': res_name, 'ver_number': ver_number, 'res_pid': res_pid, 'access_date': access_date } return tpl_variables
def validate_date_inputs(field, key, data, extras, errors, context): date_error = _('Date format incorrect') time_error = _('Time format incorrect') date = None def get_input(suffix): inpt = key[0] + '_' + suffix new_key = (inpt, ) + tuple(x for x in key if x != key[0]) value = extras.get(inpt) data[new_key] = value errors[new_key] = [] if value: del extras[inpt] if field.get('required'): not_empty(new_key, data, errors, context) return (new_key, value) date_key, value = get_input('date') value_full = '' if value: try: value_full = value date = h.date_str_to_datetime(value) except (TypeError, ValueError), e: errors[date_key].append(date_error)
def index(self): c = p.toolkit.c stats = stats_lib.Stats() rev_stats = stats_lib.RevisionStats() c.top_rated_packages = stats.top_rated_packages() c.most_edited_packages = stats.most_edited_packages() c.largest_groups = stats.largest_groups() # keywords c.top_keywords = stats.top_keywords() # theme c.top_themes = stats.top_themes() # language c.top_languages = stats.top_languages() # counties c.top_countries = stats.top_countries() # subject (eurovoc concepts c.top_subjects = stats.top_subjects() #c.top_tags = stats.top_tags() c.top_package_owners = stats.top_package_owners() c.new_packages_by_week = rev_stats.get_by_week('new_packages') c.deleted_packages_by_week = rev_stats.get_by_week('deleted_packages') c.num_packages_by_week = rev_stats.get_num_packages_by_week() c.package_revisions_by_week = rev_stats.get_by_week('package_revisions') # Used in the legacy CKAN templates. c.packages_by_week = [] # Used in new CKAN templates gives more control to the templates for formatting. c.raw_packages_by_week = [] for week_date, num_packages, cumulative_num_packages in c.num_packages_by_week: c.packages_by_week.append('[new Date(%s), %s]' % (week_date.replace('-', ','), cumulative_num_packages)) c.raw_packages_by_week.append({'date': h.date_str_to_datetime(week_date), 'total_packages': cumulative_num_packages}) c.all_package_revisions = [] c.raw_all_package_revisions = [] for week_date, revs, num_revisions, cumulative_num_revisions in c.package_revisions_by_week: c.all_package_revisions.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_revisions)) c.raw_all_package_revisions.append({'date': h.date_str_to_datetime(week_date), 'total_revisions': num_revisions}) c.new_datasets = [] c.raw_new_datasets = [] for week_date, pkgs, num_packages, cumulative_num_packages in c.new_packages_by_week: c.new_datasets.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_packages)) c.raw_new_datasets.append({'date': h.date_str_to_datetime(week_date), 'new_packages': num_packages}) return p.toolkit.render('stats/index.html')
def index(self): c = p.toolkit.c stats = stats_lib.Stats() rev_stats = stats_lib.RevisionStats() c.top_rated_packages = stats.top_rated_packages() c.most_edited_packages = stats.most_edited_packages() c.largest_groups = stats.largest_groups() c.top_package_owners = stats.top_package_owners() c.summary_stats = stats.summary_stats() c.activity_counts = stats.activity_counts() c.by_org = stats.by_org() c.by_proj = stats.by_proj() c.by_data_type = stats.by_data_type() c.res_by_org = stats.res_by_org() c.res_by_format = stats.res_by_format() c.top_active_orgs = stats.top_active_orgs() c.user_access_list = stats.user_access_list() c.recent_datasets = stats.recent_datasets() c.new_packages_by_week = rev_stats.get_by_week('new_packages') c.deleted_packages_by_week = rev_stats.get_by_week('deleted_packages') c.num_packages_by_week = rev_stats.get_num_packages_by_week() c.package_revisions_by_week = rev_stats.get_by_week('package_revisions') # Used in the legacy CKAN templates. c.packages_by_week = [] # Used in new CKAN templates gives more control to the templates for formatting. c.raw_packages_by_week = [] for week_date, num_packages, cumulative_num_packages in c.num_packages_by_week: c.packages_by_week.append('[new Date(%s), %s]' % (week_date.replace('-', ','), cumulative_num_packages)) c.raw_packages_by_week.append({'date': h.date_str_to_datetime(week_date), 'total_packages': cumulative_num_packages}) c.all_package_revisions = [] c.raw_all_package_revisions = [] for week_date, revs, num_revisions, cumulative_num_revisions in c.package_revisions_by_week: c.all_package_revisions.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_revisions)) c.raw_all_package_revisions.append({'date': h.date_str_to_datetime(week_date), 'total_revisions': num_revisions}) c.new_datasets = [] c.raw_new_datasets = [] for week_date, pkgs, num_packages, cumulative_num_packages in c.new_packages_by_week: c.new_datasets.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_packages)) c.raw_new_datasets.append({'date': h.date_str_to_datetime(week_date), 'new_packages': num_packages}) return p.toolkit.render('ckanext/stats/index.html')
def isodate(value, context): if isinstance(value, datetime.datetime): return value if value == '': return None try: date = h.date_str_to_datetime(value) except (TypeError, ValueError), e: raise Invalid(_('Date format incorrect'))
def raw_new_datasets(cls): new_packages_by_week = RevisionStats.get_by_week('new_packages') new_datasets = [] raw_new_datasets = [] for week_date, pkgs, num_packages, cumulative_num_packages in new_packages_by_week: new_datasets.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_packages)) raw_new_datasets.append({'date': h.date_str_to_datetime(week_date), 'new_packages': num_packages}) return raw_new_datasets
def index(self): c = p.toolkit.c stats = stats_lib.Stats() rev_stats = stats_lib.RevisionStats() c.top_rated_packages = self.timed(stats.top_rated_packages) c.most_edited_packages = self.timed(stats.most_edited_packages) c.largest_groups = self.timed(stats.largest_groups) c.top_package_owners = self.timed(stats.top_package_owners) c.summary_stats = self.timed(stats.summary_stats) c.activity_counts = self.timed(stats.activity_counts) c.by_org = self.timed(stats.by_org) c.users_by_organisation = self.timed(stats.users_by_organisation) c.res_by_org = self.timed(stats.res_by_org) c.top_active_orgs = self.timed(stats.top_active_orgs) c.user_access_list = self.timed(stats.user_access_list) c.recent_created_datasets = self.timed(stats.recent_created_datasets) c.recent_updated_datasets = self.timed(stats.recent_updated_datasets) c.new_packages_by_week = self.timed(rev_stats.get_by_week,'new_packages') c.num_packages_by_week = self.timed(rev_stats.get_num_packages_by_week) c.package_revisions_by_week = self.timed(rev_stats.get_by_week,'package_revisions') c.recent_period = stats.recent_period # Used in the legacy CKAN templates. c.packages_by_week = [] # Used in new CKAN templates gives more control to the templates for formatting. c.raw_packages_by_week = [] for week_date, num_packages, cumulative_num_packages in c.num_packages_by_week: c.packages_by_week.append('[new Date(%s), %s]' % (week_date.replace('-', ','), cumulative_num_packages)) c.raw_packages_by_week.append({'date': h.date_str_to_datetime(week_date), 'total_packages': cumulative_num_packages}) c.all_package_revisions = [] c.raw_all_package_revisions = [] for week_date, revs, num_revisions, cumulative_num_revisions in c.package_revisions_by_week: c.all_package_revisions.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_revisions)) c.raw_all_package_revisions.append({'date': h.date_str_to_datetime(week_date), 'total_revisions': num_revisions}) c.new_datasets = [] c.raw_new_datasets = [] for week_date, pkgs, num_packages, cumulative_num_packages in c.new_packages_by_week: c.new_datasets.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_packages)) c.raw_new_datasets.append({'date': h.date_str_to_datetime(week_date), 'new_packages': num_packages}) return p.toolkit.render('ckanext/stats/index.html')
def raw_all_package_revisions(cls): package_revisions_by_week = RevisionStats.get_by_week('package_revisions') all_package_revisions = [] raw_all_package_revisions = [] for week_date, revs, num_revisions, cumulative_num_revisions in package_revisions_by_week: all_package_revisions.append('[new Date(%s), %s]' % (week_date.replace('-', ','), num_revisions)) raw_all_package_revisions.append( {'date': h.date_str_to_datetime(week_date), 'total_revisions': num_revisions}) return raw_all_package_revisions
def isodate(value, context): if isinstance(value, datetime.datetime): return value if value == '': return None try: return helpers.date_str_to_datetime(value) except (TypeError, ValueError): raise Invalid(_('Date format incorrect - isodate') + ": {}".format(value))
def raw_packages_by_week(cls): raw_packages_by_week = [] for ( week_date, num_packages, cumulative_num_packages ) in RevisionStats.get_num_packages_by_week(): raw_packages_by_week.append({ u'date': h.date_str_to_datetime(week_date), u'total_packages': cumulative_num_packages }) return raw_packages_by_week
def dataset_footnote(pkg_dict): dataset_url = url_for(controller='package', action='read', id=pkg_dict.get('id'), qualified=True) source_str = 'Source: {0}'.format(dataset_url) dataset_date = date_str_to_datetime(pkg_dict.get('metadata_modified')) date_str = 'Last updated at {0} : {1}'.format( url_for(controller='home', action='index', qualified=True), render_datetime(dataset_date, '%Y-%m-%d')) return '\n\n{0} \r\n{1}'.format(source_str, date_str)
def validator(key, data, errors, context): value = data[key] date = None if value: if isinstance(value, datetime.datetime): return value else: try: date = h.date_str_to_datetime(value) except (TypeError, ValueError), e: raise Invalid(_('Date format incorrect'))
def raw_packages_by_week(): rev_stats = RevisionStats() c.num_packages_by_week = rev_stats.get_num_packages_by_week() c.raw_packages_by_week = [] c.packages_by_week = [] for week_date, num_packages, cumulative_num_packages in c.num_packages_by_week: c.packages_by_week.append('[new Date(%s), %s]' % (week_date.replace('-', ','), cumulative_num_packages)) c.raw_packages_by_week.append({'date': h.date_str_to_datetime(week_date), 'total_packages': cumulative_num_packages}) return c.raw_packages_by_week
def validate_date(value, context): if value == '': return value elif isinstance(value, dt.datetime): return value.strftime('%Y-%m-%d') try: date = h.date_str_to_datetime(value) return date.strftime('%Y-%m-%d') except (TypeError, ValueError) as e: raise tk.ValidationError( {'constraints': ['Please provide the date in YYYY-MM-DD format']})
def output_feed(self, results, feed_title, feed_description, feed_link, feed_url, navigation_urls, feed_guid): author_name = config.get("ckan.feeds.author_name", "").strip() or config.get("ckan.site_id", "").strip() author_link = config.get("ckan.feeds.author_link", "").strip() or config.get("ckan.site_url", "").strip() # TODO language feed = _FixedAtom1Feed( title=feed_title, link=feed_link, description=feed_description, language=u"en", author_name=author_name, author_link=author_link, feed_guid=feed_guid, feed_url=feed_url, previous_page=navigation_urls["previous"], next_page=navigation_urls["next"], first_page=navigation_urls["first"], last_page=navigation_urls["last"], ) for pkg in results: feed.add_item( title=pkg.get("title", ""), link=self.base_url + h.url_for(controller="package", action="read", id=pkg["id"]), description=pkg.get("notes", ""), updated=h.date_str_to_datetime(pkg.get("metadata_modified")), published=h.date_str_to_datetime(pkg.get("metadata_created")), unique_id=_create_atom_id(u"/dataset/%s" % pkg["id"]), author_name=pkg.get("author", ""), author_email=pkg.get("author_email", ""), categories=[t["name"] for t in pkg.get("tags", [])], enclosure=webhelpers.feedgenerator.Enclosure( self.base_url + h.url_for(controller="api", register="package", action="show", id=pkg["name"], ver="2"), unicode(len(json.dumps(pkg))), # TODO fix this u"application/json", ), ) response.content_type = feed.mime_type return feed.writeString("utf-8")
def validator(key, data, errors, context): value = data[key] date_error = _('Date format incorrect') time_error = _('Time format incorrect') date = None if isinstance(value, datetime.datetime): return value if value is not missing: try: date = h.date_str_to_datetime(value) except (TypeError, ValueError), e: raise Invalid(date_error)
def metameta_convert_to_local_timestamp(str_timestamp): if not str_timestamp: return '' #calculate past time by considering utc utc_datetime = date_str_to_datetime(str_timestamp) #get local time tz_code = config.get('ckan.timezone', 'Australia/Melbourne') local = timezone(tz_code) if _is_naive(utc_datetime): utc_datetime = _make_aware(utc_datetime, pytz.utc) local_datetime = utc_datetime.astimezone(local) return local_datetime.strftime(DATETIME_FORMAT)
def publish(self): lc = LocalCKAN(username=c.user) publish_date = date_str_to_datetime( request.str_POST['publish_date'] ).strftime("%Y-%m-%d %H:%M:%S") # get a list of package id's from the for POST data for key, package_id in request.str_POST.iteritems(): if key == 'publish': lc.action.package_patch( id=package_id, portal_release_date=publish_date, ) # return us to the publishing interface redirect(h.url_for('ckanadmin_publish'))
def publish(self): lc = LocalCKAN(username=c.user) publish_date = date_str_to_datetime(request.str_POST['publish_date'] ).strftime("%Y-%m-%d %H:%M:%S") # get a list of package id's from the for POST data for key, package_id in request.str_POST.iteritems(): if key == 'publish': old = lc.action.package_show(id=package_id) lc.call_action('package_update', dict(old, portal_release_date=publish_date)) #return us to the publishing interface url = h.url_for(controller='ckanext.canada.controller:PublishController', action='search') redirect(url)
def read(self, id): context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'extras_as_string': True, 'schema': self._form_to_db_schema()} data_dict = {'id': id} # interpret @<revision_id> or @<date> suffix split = id.split('@') if len(split) == 2: data_dict['id'], revision_ref = split if model.is_id(revision_ref): context['revision_id'] = revision_ref else: try: date = date_str_to_datetime(revision_ref) context['revision_date'] = date except TypeError, e: abort(400, _('Invalid revision format: %r') % e.args) except ValueError, e: abort(400, _('Invalid revision format: %r') % e.args)
def read(self, id): custom_base.g_analitics() context = {'model': model, 'session': model.Session, 'user': c.user, 'for_view': True, 'auth_user_obj': c.userobj} data_dict = {'id': id, 'include_tracking': True} # interpret @<revision_id> or @<date> suffix split = id.split('@') if len(split) == 2: data_dict['id'], revision_ref = split if model.is_id(revision_ref): context['revision_id'] = revision_ref else: try: date = h.date_str_to_datetime(revision_ref) context['revision_date'] = date except TypeError, e: abort(400, _('Invalid revision format: %r') % e.args) except ValueError, e: abort(400, _('Invalid revision format: %r') % e.args)
def read(self, id, format="html"): # Check we know the content type, if not then it is likely a revision # and therefore we should merge the format onto the end of id ctype, extension, loader = self._content_type_for_format(format) if not ctype: # Reconstitute the ID if we don't know what content type to use ctype = "text/html; charset=utf-8" id = "%s.%s" % (id, format) format = "html" else: format = extension response.headers["Content-Type"] = ctype package_type = self._get_package_type(id.split("@")[0]) context = { "model": model, "session": model.Session, "user": c.user or c.author, "extras_as_string": True, "for_view": True, } data_dict = {"id": id} # interpret @<revision_id> or @<date> suffix split = id.split("@") if len(split) == 2: data_dict["id"], revision_ref = split if model.is_id(revision_ref): context["revision_id"] = revision_ref else: try: date = date_str_to_datetime(revision_ref) context["revision_date"] = date except TypeError, e: abort(400, _("Invalid revision format: %r") % e.args) except ValueError, e: abort(400, _("Invalid revision format: %r") % e.args)
def publish(self): packages = list() #open a new revision, so we can publish everything in one clean activity model.repo.new_revision() publish_date = date_str_to_datetime(request.str_POST['publish_date'] ).strftime("%Y-%m-%d %H:%M:%S") #get a list of package id's from the for POST data for key, package_id in request.str_POST.iteritems(): if key == 'publish': package_instance = model.Package.get(package_id) #change portal release date package_instance.extras['portal_release_date'] = publish_date #close the revision, commit to database model.Session.commit() #return us to the publishing interface url = h.url_for(controller='ckanext.canada.controller:PublishController', action='search') redirect(url)
def read(self, id, format="html"): if not format == "html": ctype, extension, loader = self._content_type_from_extension(format) if not ctype: # An unknown format, we'll carry on in case it is a # revision specifier and re-constitute the original id id = "%s.%s" % (id, format) ctype, format, loader = "text/html; charset=utf-8", "html", MarkupTemplate else: ctype, format, loader = self._content_type_from_accept() response.headers["Content-Type"] = ctype package_type = self._get_package_type(id.split("@")[0]) context = { "model": model, "session": model.Session, "user": c.user or c.author, "extras_as_string": True, "for_view": True, } data_dict = {"id": id} # interpret @<revision_id> or @<date> suffix split = id.split("@") if len(split) == 2: data_dict["id"], revision_ref = split if model.is_id(revision_ref): context["revision_id"] = revision_ref else: try: date = date_str_to_datetime(revision_ref) context["revision_date"] = date except TypeError, e: abort(400, _("Invalid revision format: %r") % e.args) except ValueError, e: abort(400, _("Invalid revision format: %r") % e.args)
def history(self, id): group_type = self._ensure_controller_matches_group_type(id) if 'diff' in request.params or 'selected1' in request.params: try: params = {'id': request.params.getone('group_name'), 'diff': request.params.getone('selected1'), 'oldid': request.params.getone('selected2'), } except KeyError: if 'group_name' in dict(request.params): id = request.params.getone('group_name') c.error = \ _('Select two revisions before doing the comparison.') else: params['diff_entity'] = 'group' h.redirect_to(controller='revision', action='diff', **params) context = {'model': model, 'session': model.Session, 'user': c.user, 'schema': self._db_to_form_schema()} data_dict = {'id': id} try: c.group_dict = self._action('group_show')(context, data_dict) c.group_revisions = self._action('group_revision_list')(context, data_dict) # TODO: remove # Still necessary for the authz check in group/layout.html c.group = context['group'] except (NotFound, NotAuthorized): abort(404, _('Group not found')) format = request.params.get('format', '') if format == 'atom': # Generate and return Atom 1.0 document. from webhelpers.feedgenerator import Atom1Feed feed = Atom1Feed( title=_(u'CKAN Group Revision History'), link=self._url_for_this_controller( action='read', id=c.group_dict['name']), description=_(u'Recent changes to CKAN Group: ') + c.group_dict['display_name'], language=unicode(get_lang()), ) for revision_dict in c.group_revisions: revision_date = h.date_str_to_datetime( revision_dict['timestamp']) try: dayHorizon = int(request.params.get('days')) except: dayHorizon = 30 dayAge = (datetime.datetime.now() - revision_date).days if dayAge >= dayHorizon: break if revision_dict['message']: item_title = u'%s' % revision_dict['message'].\ split('\n')[0] else: item_title = u'%s' % revision_dict['id'] item_link = h.url_for(controller='revision', action='read', id=revision_dict['id']) item_description = _('Log message: ') item_description += '%s' % (revision_dict['message'] or '') item_author_name = revision_dict['author'] item_pubdate = revision_date feed.add_item( title=item_title, link=item_link, description=item_description, author_name=item_author_name, pubdate=item_pubdate, ) feed.content_type = 'application/atom+xml' return feed.writeString('utf-8') return render(self._history_template(group_type), extra_vars={'group_type': group_type})
def date_str_to_datetime_or_none(date_str): from ckan.lib.helpers import date_str_to_datetime if date_str: return date_str_to_datetime(date_str) return None