def image_upload(self): filedata = request.POST.get('file-0') data_dict = request.POST.get('dict') resource_id = request.POST.get('resource_id') context = {'model': model, 'session': model.Session, 'user': c.user} try: check_access('resource_view_update', context, {'id': resource_id}) except NotAuthorized: abort(403, _('Unauthorized to upload image')) data_dict = {} data_dict['file'] = filedata data_dict['image_url'] = '' data_dict['clear_upload'] = '' upload = uploader.get_uploader('gallery/' + resource_id + "/") try: upload.update_data_dict(data_dict, 'image_url', 'file', 'clear_upload') upload.upload() return data_dict['image_url'] except ValidationError: pass
def showcase_create(context, data_dict): '''Upload the image and continue with package creation.''' # force type to 'showcase' data_dict['type'] = 'showcase' # If get_uploader is available (introduced for IUploader in CKAN 2.5), use # it, otherwise use the default uploader. # https://github.com/ckan/ckan/pull/2510 try: upload = uploader.get_uploader('showcase') except AttributeError: upload = uploader.Upload('showcase') # schema images imgs = ['icon', 'featured_image', 'image_1', 'image_2', 'image_3'] for image in imgs: if data_dict[image]: upload.update_data_dict(data_dict, image, image+'_upload', 'clear_'+ image + '_upload') upload.upload(uploader.get_max_image_size()) pkg = toolkit.get_action('package_create')(context, data_dict) return pkg
def image_uploaded(): '''View function to handle uploading arbitrary images for the home page. Passes `'image_url': 'submitted-image-path'` to template/view function. ''' data_dict = {} try: # Cleanup the data_dict for the uploader. req = request.form.copy() req.update(request.files.to_dict()) data_dict = logic.clean_dict( dict_fns.unflatten( logic.tuplize_dict( logic.parse_params(req, ignore_keys=CACHE_PARAMETERS)))) # Upload the image. upload = uploader.get_uploader('home') upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) image_url = '' # Build and return the image url. for key, value in data_dict.iteritems(): if key == 'image_url' and value and not value.startswith('http')\ and not value.startswith('/'): image_path = 'uploads/home/' value = h.url_for_static('{0}{1}'.format(image_path, value)) image_url = value except Exception as e: log.error(e) return h.redirect_to(u'ontario_theme.image_uploader', image_url=image_url)
def showcase_create(context, data_dict): '''Upload the image and continue with package creation.''' # force type to 'showcase' data_dict['type'] = 'showcase' # If get_uploader is available (introduced for IUploader in CKAN 2.5), use # it, otherwise use the default uploader. # https://github.com/ckan/ckan/pull/2510 try: upload = uploader.get_uploader('showcase') except AttributeError: upload = uploader.Upload('showcase') if 'image_upload' in data_dict: # mimetype is needed before uploading to AWS S3 upload.mimetype = getattr(data_dict['image_upload'], 'type', 'application/octet-stream') upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) pkg = toolkit.get_action('package_create')(context, data_dict) return pkg
def manage_footer_logos(self): data = dict(toolkit.request.POST) if 'save' in data: try: del data['save'] upload = uploader.get_uploader('admin') # Upload footer logos for i in range(1, 6): upload.update_data_dict(data, 'footer_logo_{0}_image_url' .format(i), 'footer_logo_{0}_upload'.format(i), 'clear_footer_logo_{0}_upload' .format(i)) upload.upload(uploader.get_max_image_size()) data = toolkit.get_action('config_option_update')({}, data) except toolkit.ValidationError, e: errors = e.error_dict error_summary = e.error_summary vars = {'data': data, 'errors': errors, 'error_summary': error_summary} return toolkit.render('admin/tayside_manage_footer_logos.html', extra_vars=vars) ctrl =\ 'ckanext.tayside.controllers.admin:AdminController' toolkit.redirect_to(controller=ctrl, action='manage_footer_logos')
def upload_archived_resource(resource_id_dir, filename, saved_file): ''' Uploads the resources to s3filestore in directory <S3FILESTORE__AWS_BUCKET_NAME>/<S3FILESTORE__AWS_STORAGE_PATH>/archived_resources/ ''' storage_path = config.get('ckanext.s3filestore.aws_storage_path') if not storage_path: log.warning('Not saved to filestore because no value for ' 'ckanext.s3filestore.aws_storage_path in config') raise ArchiveError( _('No value for ckanext.s3filestore.aws_storage_path in config')) with open(saved_file, 'rb') as save_file: upload = uploader.get_uploader('archived_resources') upload.upload_file = save_file upload.filename = filename upload.filepath = os.path.join(storage_path, 'archived_resources', resource_id_dir, filename) upload.id = filename upload.clear = False upload.upload(uploader.get_max_resource_size()) return upload, upload.filepath
def pages_upload(context, data_dict): try: p.toolkit.check_access('ckanext_pages_upload', context, data_dict) except p.toolkit.NotAuthorized: p.toolkit.abort(401, p.toolkit._('Not authorized to see this page')) if p.toolkit.check_ckan_version(min_version='2.5'): upload = uploader.get_uploader('page_images') else: upload = uploader.Upload('page_images') upload.update_data_dict(data_dict, 'image_url', 'upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) image_url = data_dict.get('image_url') if not image_mime_type_validator(upload.filepath): raise ValidationError(tk._('You can upload image file only')) if image_url: image_url = h.url_for_static('uploads/page_images/%s' % image_url, qualified=True) return image_url
def organogram_admin(self): if toolkit.request.method == 'POST': data = dict(toolkit.request.POST) if isinstance(data.get('organogram_file_upload'), cgi.FieldStorage): upload = uploader.get_uploader('organogram', data['organogram_file_url']) upload.update_data_dict(data, 'organogram_file_url', 'organogram_file_upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) organogram_file_url = upload.filename else: organogram_file_url = data.get('organogram_file_url') toolkit.get_action('config_option_update')( {}, { 'ckanext.organogram.file_url': organogram_file_url }) organogram_file_url = toolkit.get_action('config_option_show')( {}, { 'key': 'ckanext.organogram.file_url' }) extra_vars = { 'data': { 'organogram_file_url': organogram_file_url }, 'errors': {} } return toolkit.render('admin/organogram_config.html', extra_vars)
def showcase_create(context, data_dict): '''Upload the image and continue with package creation.''' # force type to 'showcase' data_dict['type'] = 'showcase' # If get_uploader is available (introduced for IUploader in CKAN 2.5), use # it, otherwise use the default uploader. # https://github.com/ckan/ckan/pull/2510 try: upload = uploader.get_uploader('showcase') except AttributeError: upload = uploader.Upload('showcase') # schema images imgs = ['icon', 'featured_image', 'image_1', 'image_2', 'image_3'] for image in imgs: if data_dict[image]: upload.update_data_dict(data_dict, image, image + '_upload', 'clear_' + image + '_upload') upload.upload(uploader.get_max_image_size()) pkg = toolkit.get_action('package_create')(context, data_dict) return pkg
def querytool_visualizations_update(context, data_dict): ''' Create new query tool visualizations :param name :param visualizations :param ''' session = context['session'] # data, errors = df.validate(data_dict, schema.querytool_schema(), # context) # if errors: # raise toolkit.ValidationError(errors) querytool = CkanextQueryTool.get(name=data_dict['name']) visualizations = CkanextQueryToolVisualizations.get(name=data_dict['name']) images = [] if visualizations: items = json.loads(visualizations.visualizations) for image in items: if image['type'] == 'image': images.append(image) new_items = json.loads(data_dict['visualizations']) if new_items: new_images = [] for new in new_items: if new['type'] == 'image': new_images.append(new['url']) if new_images or images: for old in images: old_img_url = old['url'] if old_img_url not in new_images: upload = uploader.get_uploader('vs', old_img_url) new_data = { 'image_url': old_img_url, 'image_upload': 'true', 'clear_upload': 'true' } upload.update_data_dict(new_data, 'image_url', 'image_upload', 'clear_upload') upload.upload(uploader) if not visualizations: visualizations = CkanextQueryToolVisualizations() visualizations.name = data_dict['name'] visualizations.visualizations = data_dict['visualizations'] visualizations.y_axis_column = data_dict['y_axis_column'] visualizations.ckanext_querytool_id = querytool.id visualizations.save() session.add(visualizations) session.commit()
def showcase_update(context, data_dict): upload = uploader.get_uploader('showcase', data_dict['image_url']) upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) pkg = toolkit.get_action('package_update')(context, data_dict) return pkg
def archive_download(id, resource_id, filename=None): """ Provides a direct download by either redirecting the user to the url stored or downloading an uploaded file directly. """ context = { 'model': model, 'session': model.Session, 'user': c.user, 'auth_user_obj': c.userobj } try: resource = toolkit.get_action('resource_show')(context, { 'id': resource_id }) # Quick auth check to ensure you can access this resource toolkit.check_access('package_show', context, {'id': id}) except (toolkit.ObjectNotFound, toolkit.NotAuthorized): return toolkit.abort(404, _('Resource not found')) # Archived files are only links not uploads if resource.get('url_type') != 'upload': # Return the key used for this resource in storage. # # Keys are in the form: # <uploaderpath>/<upload_to>/<2 char from resource id >/<resource id>/<filename> # # e.g.: # my_storage_path/archive/16/165900ba-3c60-43c5-9e9c-9f8acd0aa93f/data.csv relative_archive_path = os.path.join(resource['id'][:2], resource['id']) # try to get a file name from the url parsed_url = urlparse.urlparse(resource.get('url')) try: file_name = parsed_url.path.split('/')[-1] or 'resource' file_name = file_name.strip() # trailing spaces cause problems file_name = file_name.encode('ascii', 'ignore') # e.g. u'\xa3' signs except Exception: file_name = "resource" try: upload = uploader.get_uploader( os.path.join('archive', relative_archive_path)) return upload.download(file_name) except OSError: # includes FileNotFoundError return toolkit.abort(404, _('Resource data not found')) return toolkit.abort(404, _('No download is available'))
def validate_banner_image(key, flattened_data, errors, context): """ Validates banner image, save the file and update the field. """ # Get previous file, so we can remove it. banner_image = toolkit.config.get('ckanext.qdes.banner_image', '') or '' # Upload image. upload = uploader.get_uploader('qdes-admin', banner_image) upload.update_data_dict(flattened_data, ('ckanext.qdes.banner_image', ), ('banner_image_upload', ), ('clear_banner_image_upload', )) upload.upload(uploader.get_max_image_size())
def showcase_create(context, data_dict): '''Upload the image and continue with package creation.''' # force type to 'showcase' data_dict['type'] = 'showcase' upload = uploader.get_uploader('showcase') upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) pkg = toolkit.get_action('package_create')(context, data_dict) return pkg
def _upload_chart_icon(chart_type, data): if '{0}_chart_upload'.format(chart_type) in data: image_upload = data.get('{0}_chart_upload'.format(chart_type)) if isinstance(image_upload, ALLOWED_UPLOAD_TYPES): upload = uploader.get_uploader( 'chart_icons', data.get('{0}_chart'.format(chart_type))) upload.update_data_dict( data, '{0}_chart'.format(chart_type), '{0}_chart_upload'.format(chart_type), '{0}_chart_clear_upload'.format(chart_type)) upload.upload(uploader.get_max_image_size()) return upload.filename else: return data.get('{0}_chart'.format(chart_type))
def showcase_upload(context, data_dict): ''' Uploads images to be used in showcase content. ''' toolkit.check_access('ckanext_showcase_upload', context, data_dict) upload = uploader.get_uploader('showcase_image') upload.update_data_dict(data_dict, 'image_url', 'upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) image_url = data_dict.get('image_url') if image_url and image_url[0:6] not in {'http:/', 'https:'}: image_url = h.url_for_static( 'uploads/showcase_image/{}'.format(image_url), qualified=True) return {'url': image_url}
def experience_update(context, data_dict): # If get_uploader is available (introduced for IUploader in CKAN 2.5), use # it, otherwise use the default uploader. # https://github.com/ckan/ckan/pull/2510 try: upload = uploader.get_uploader('experience', data_dict['image_url']) except AttributeError: upload = uploader.Upload('experience', data_dict['image_url']) upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) pkg = toolkit.get_action('package_update')(context, data_dict) return pkg
def pages_upload(context, data_dict): try: p.toolkit.check_access('ckanext_pages_upload', context, data_dict) except p.toolkit.NotAuthorized: p.toolkit.abort(401, p.toolkit._('Not authorized to see this page')) if p.toolkit.check_ckan_version(min_version='2.5'): upload = uploader.get_uploader('page_images') else: upload = uploader.Upload('page_images') upload.update_data_dict(data_dict, 'image_url', 'upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) image_url = data_dict.get('image_url') if image_url: image_url = h.url_for_static('uploads/page_images/%s' % image_url, qualified=True) return {'url': image_url}
def showcase_update(context, data_dict): # If get_uploader is available (introduced for IUploader in CKAN 2.5), use # it, otherwise use the default uploader. # https://github.com/ckan/ckan/pull/2510 try: upload = uploader.get_uploader('showcase', data_dict['image_url']) except AttributeError: upload = uploader.Upload('showcase', data_dict['image_url']) upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) pkg = toolkit.get_action('package_update')(context, data_dict) return pkg
def config_option_update(context, data_dict): # https://github.com/ckan/ckan/blob/master/ckan/logic/action/update.py#L1198 # Handle featured image if 'ckanext.lacounts.featured_image' in data_dict: upload = uploader.get_uploader('admin') upload.update_data_dict(data_dict, 'ckanext.lacounts.featured_image', 'featured_image_upload', 'clear_featured_image_upload') upload.upload(uploader.get_max_image_size()) value = data_dict['ckanext.lacounts.featured_image'] if value \ and not value.startswith('http') \ and not value.startswith('/'): image_path = 'uploads/admin/' value = h.url_for_static('{0}{1}'.format(image_path, value)) data_dict['ckanext.lacounts.featured_image'] = value return update_core.config_option_update(context, data_dict)
def showcase_upload(context, data_dict): ''' Uploads images to be used in showcase content. ''' toolkit.check_access('ckanext_showcase_upload', context, data_dict) try: upload = uploader.get_uploader('showcase_image') except AttributeError: upload = uploader.Upload('showcase_image') upload.update_data_dict(data_dict, 'image_url', 'upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) image_url = data_dict.get('image_url') if image_url: image_url = h.url_for_static( 'uploads/showcase_image/{}'.format(image_url), qualified=True) return {'url': image_url}
def app_add(self): if c.userobj is None: tk.redirect_to( tk.url_for(controller='user', action='login', came_from=full_current_url())) form = CreateAppForm(tk.request.POST) data_dict = clean_dict( dict_fns.unflatten(tuplize_dict(parse_params(tk.request.params)))) upload = uploader.get_uploader('apps') if tk.request.POST: if form.validate(): # Upload[load image upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') try: upload.upload(uploader.get_max_image_size()) except logic.ValidationError as err: flash_error(err.error_dict['image_upload'][0]) else: app = App() form.populate_obj(app) app.author_id = c.userobj.id app.content = strip_tags(app.content) app.status = "pending" app.image_url = data_dict.get('image_url') app.save() log.debug("App data is valid. Content: %s", strip_tags(app.name)) flash_success(tk._('You successfully create app')) jobs.enqueue( send_notifications_on_change_app_status, [app, 'pending', tk.request.environ.get('CKAN_LANG')]) tk.redirect_to(app.get_absolute_url()) else: flash_error(tk._('You have errors in form')) log.info("Validate errors: %s", form.errors) context = {'form': form, 'active_boards': Board.filter_active()} log.debug('ForumController.thread_add context: %s', context) return self.__render('create_app.html', context)
def _upload_authority_file(data_dict, is_required=False): if is_required and data_dict.get('authority_file_url') == '': raise ValidationError({_('authority'): [_('Missing value')]}) if is_flask_request(): authority_file_upload = request.files.get('authority_file_upload') is_upload = authority_file_upload else: authority_file_upload = data_dict.get('authority_file_upload') is_upload = isinstance(authority_file_upload, cgi.FieldStorage) if is_upload: max_authority_size =\ int(config.get('ckanext.datagovmk.authority_file_max_size', 10)) data_dict['authority_file_upload'] =\ authority_file_upload upload = uploader.get_uploader( 'authorities', data_dict['authority_file_url'] ) upload.update_data_dict( data_dict, 'authority_file_url', 'authority_file_upload', 'clear_upload' ) try: upload.upload(max_size=max_authority_size) except toolkit.ValidationError: data_dict['authority_file_url'] =\ authority_file_upload.filename raise ValidationError({_('authority'): [_('Uploaded authority file is too large. Maximum allowed size is {size}MB.').format(size=max_authority_size)]}) authority_file = upload.filename data_dict['authority_file_url'] = authority_file else: authority_file = data_dict.get('authority_file_url') return authority_file
def pages_upload(context, data_dict): try: p.toolkit.check_access('ckanext_pages_upload', context, data_dict) except p.toolkit.NotAuthorized: p.toolkit.abort(401, p.toolkit._('Not authorized to see this page')) if p.toolkit.check_ckan_version(min_version='2.5'): upload = uploader.get_uploader('page_images') else: upload = uploader.Upload('page_images') upload.update_data_dict(data_dict, 'image_url', 'upload', 'clear_upload') upload.upload() image_url = data_dict.get('image_url') if image_url: image_url = h.url_for_static( 'uploads/page_images/%s' % image_url, qualified = True ) return {'url': image_url}
def _group_or_org_update(context, data_dict, is_org=False): model = context['model'] user = context['user'] session = context['session'] id = _get_or_bust(data_dict, 'id') group = model.Group.get(id) context["group"] = group if group is None: raise NotFound('Group was not found.') data_dict['type'] = group.type # get the schema group_plugin = lib_plugins.lookup_group_plugin(group.type) try: schema = group_plugin.form_to_db_schema_options({ 'type': 'update', 'api': 'api_version' in context, 'context': context }) except AttributeError: schema = group_plugin.form_to_db_schema() upload = uploader.get_uploader('group', group.image_url) upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') if is_org: _check_access('organization_update', context, data_dict) else: _check_access('group_update', context, data_dict) if 'api_version' not in context: # old plugins do not support passing the schema so we need # to ensure they still work try: group_plugin.check_data_dict(data_dict, schema) except TypeError: group_plugin.check_data_dict(data_dict) data, errors = lib_plugins.plugin_validate( group_plugin, context, data_dict, schema, 'organization_update' if is_org else 'group_update') log.debug('group_update validate_errs=%r user=%s group=%s data_dict=%r', errors, context.get('user'), context.get('group').name if context.get('group') else '', data_dict) if errors: session.rollback() raise ValidationError(errors) rev = model.repo.new_revision() rev.author = user if 'message' in context: rev.message = context['message'] else: rev.message = _(u'REST API: Update object %s') % data.get("name") group = model_save.group_dict_save(data, context, prevent_packages_update=is_org) if is_org: plugin_type = plugins.IOrganizationController else: plugin_type = plugins.IGroupController for item in plugins.PluginImplementations(plugin_type): item.edit(group) if is_org: activity_type = 'changed organization' else: activity_type = 'changed group' activity_dict = { 'user_id': model.User.by_name(user.decode('utf8')).id, 'object_id': group.id, 'activity_type': activity_type, } # Handle 'deleted' groups. # When the user marks a group as deleted this comes through here as # a 'changed' group activity. We detect this and change it to a 'deleted' # activity. if group.state == u'deleted': if session.query(ckan.model.Activity).filter_by( object_id=group.id, activity_type='deleted').all(): # A 'deleted group' activity for this group has already been # emitted. # FIXME: What if the group was deleted and then activated again? activity_dict = None else: # We will emit a 'deleted group' activity. activity_dict['activity_type'] = 'deleted group' if activity_dict is not None: activity_dict['data'] = { 'group': dictization.table_dictize(group, context) } activity_create_context = { 'model': model, 'user': user, 'defer_commit': True, 'ignore_auth': True, 'session': session } _get_action('activity_create')(activity_create_context, activity_dict) # TODO: Also create an activity detail recording what exactly changed # in the group. upload.upload(uploader.get_max_image_size()) if not context.get('defer_commit'): model.repo.commit() return model_dictize.group_dictize(group, context)
def orgportals_subdashboards_edit(self, org_name, subdashboard=None, data=None, errors=None, error_summary=None): if subdashboard: subdashboard = subdashboard[1:] data_dict = { 'org_name': org_name, 'subdashboard_name': subdashboard } _subdashboard = get_action('orgportals_subdashboards_show')({}, data_dict) if _subdashboard is None and len(subdashboard) > 0: p.toolkit.abort(404, _('Subdashboard not found.')) if _subdashboard is None: _subdashboard = {} if p.toolkit.request.method == 'POST' and not data: data = dict(p.toolkit.request.POST) media_items = [] for k, v in data.items(): item = {} if k.startswith('media_type'): id = k.split('_')[-1] if data['media_type_{}'.format(id)] == 'chart': item['order'] = int(id) item['media_type'] = data['media_type_{}'.format(id)] item['media_size'] = data['media_size_{}'.format(id)] item['chart_resourceview'] = data['chart_resourceview_{}'.format(id)] item['chart_subheader'] = data['chart_subheader_{}'.format(id)] media_items.append(item) elif data['media_type_{}'.format(id)] == 'youtube_video': item['order'] = int(id) item['media_type'] = data['media_type_{}'.format(id)] item['video_source'] = data['video_source_{}'.format(id)] item['video_title'] = data['video_title_{}'.format(id)] item['video_size'] = data['video_size_{}'.format(id)] item['video_title_url'] = data.get('video_title_url_{}'.format(id), item['video_source']) media_items.append(item) elif data['media_type_{}'.format(id)] == 'image': item['order'] = int(id) item['media_type'] = data['media_type_{}'.format(id)] item['image_title'] = data['media_image_title_{}'.format(id)] item['image_size'] = data.get('media_image_size_{}'.format(id), 'single') item['image_title_url'] = data.get('media_image_title_url_{}'.format(id), '') image_url = data['media_image_url_{}'.format(id)] # Upload images for topics if h.uploads_enabled(): image_upload = data['media_image_upload_{}'.format(id)] if isinstance(image_upload, cgi.FieldStorage): upload = uploader.get_uploader('portal', image_url) upload.update_data_dict(data, 'media_image_url_{}'.format(id), 'media_image_upload_{}'.format(id), 'image_clear_upload_{}'.format(id)) upload.upload(uploader.get_max_image_size()) image_url = upload.filename item['image_url'] = image_url media_items.append(item) _subdashboard['media'] = json.dumps(media_items) _subdashboard['map'] = [] _subdashboard['map_main_property'] = [] for k, v in sorted(data.items()): if k.startswith('map_main_property'): _subdashboard['map_main_property'].append(v) elif k.startswith('map_') and not k.startswith('map_enabled'): _subdashboard['map'].append(v) _subdashboard['map'] = ';'.join(_subdashboard['map']) _subdashboard['map_main_property'] = ';'.join(_subdashboard['map_main_property']) _subdashboard.update(data) _subdashboard['org_name'] = org_name _subdashboard['subdashboard_name'] = subdashboard try: junk = p.toolkit.get_action('orgportals_subdashboards_update')( data_dict=_subdashboard ) except p.toolkit.ValidationError, e: errors = e.error_dict error_summary = e.error_summary return self.orgportals_subdashboards_edit(org_name,'/' + subdashboard, data, errors, error_summary) p.toolkit.redirect_to(p.toolkit.url_for('orgportals_subdashboards_index', org_name=org_name))
def config_option_update(context, data_dict): ''' .. versionadded:: 2.4 Allows to modify some CKAN runtime-editable config options It takes arbitrary key, value pairs and checks the keys against the config options update schema. If some of the provided keys are not present in the schema a :py:class:`~ckan.plugins.logic.ValidationError` is raised. The values are then validated against the schema, and if validation is passed, for each key, value config option: * It is stored on the ``system_info`` database table * The Pylons ``config`` object is updated. * The ``app_globals`` (``g``) object is updated (this only happens for options explicitly defined in the ``app_globals`` module. The following lists a ``key`` parameter, but this should be replaced by whichever config options want to be updated, eg:: get_action('config_option_update)({}, { 'ckan.site_title': 'My Open Data site', 'ckan.homepage_layout': 2, }) :param key: a configuration option key (eg ``ckan.site_title``). It must be present on the ``update_configuration_schema`` :type key: string :returns: a dictionary with the options set :rtype: dictionary .. note:: You can see all available runtime-editable configuration options calling the :py:func:`~ckan.logic.action.get.config_option_list` action .. note:: Extensions can modify which configuration options are runtime-editable. For details, check :doc:`/extensions/remote-config-update`. .. warning:: You should only add config options that you are comfortable they can be edited during runtime, such as ones you've added in your own extension, or have reviewed the use of in core CKAN. ''' model = context['model'] _check_access('config_option_update', context, data_dict) schema = schema_.update_configuration_schema() available_options = schema.keys() provided_options = data_dict.keys() unsupported_options = set(provided_options) - set(available_options) if unsupported_options: msg = 'Configuration option(s) \'{0}\' can not be updated'.format( ' '.join(list(unsupported_options))) raise ValidationError(msg, error_summary={'message': msg}) upload = uploader.get_uploader('admin') upload.update_data_dict(data_dict, 'ckan.site_logo', 'logo_upload', 'clear_logo_upload') upload.upload(uploader.get_max_image_size()) data, errors = _validate(data_dict, schema, context) if errors: model.Session.rollback() raise ValidationError(errors) for key, value in data.iteritems(): # Set full Logo url if key =='ckan.site_logo' and value and not value.startswith('http'): value = h.url_for_static('uploads/admin/{0}'.format(value)) # Save value in database model.set_system_info(key, value) # Update CKAN's `config` object config[key] = value # Only add it to the app_globals (`g`) object if explicitly defined # there globals_keys = app_globals.app_globals_from_config_details.keys() if key in globals_keys: app_globals.set_app_global(key, value) # Update the config update timestamp model.set_system_info('ckan.config_update', str(time.time())) log.info('Updated config options: {0}'.format(data)) return data
def edit_visualizations(self, querytool=None, data=None, errors=None, error_summary=None): ''' Create or edit visualizations for the querytool :return: query edit template page ''' if querytool: querytool = querytool[1:] data_dict = { 'name': querytool } context = _get_context() try: check_access('querytool_update', context, data_dict) except NotAuthorized: abort(403, _('Not authorized to see this page')) _querytool = _get_action('querytool_get', data_dict) if _querytool is None and len(querytool) > 0: abort(404, _('Report not found.')) # Check if the data for this querytool still exists if _querytool['dataset_name']: try: _get_action('package_show', {'id': _querytool['dataset_name']}) except NotFound: abort(404, _('The data used for creating this ' 'report has been removed by ' 'the administrator.')) _visualization_items = \ _get_action('querytool_get_visualizations', data_dict) if _visualization_items is None: _visualization_items = { 'name': querytool } if toolkit.request.method == 'POST' and not data: data = dict(toolkit.request.POST) visualizations = [] text_boxes = [] images = [] maps = [] tables = [] break_lines = [] for k, v in data.items(): ''' TODO: save visualizations with key value e.g {'charts' :[] # 'images': []} for easier itteration ''' if k.startswith('chart_field_graph_'): visualization = {} id = k.split('_')[-1] visualization['type'] = 'chart' visualization['order'] = int(id) visualization['graph'] = \ data.get('chart_field_graph_{}'.format(id)) visualization['x_axis'] = \ data.get('chart_field_axis_x_{}'.format(id)) visualization['y_axis'] = \ data.get('chart_field_axis_y_{}'.format(id)) visualization['color'] = \ data.get('chart_field_color_{}'.format(id)) visualization['color_type'] = \ data.get('chart_field_color_type_{}'.format(id)) visualization['seq_color'] = \ data.get('chart_field_seq_color_{}'.format(id)) visualization['title'] = \ data.get('chart_field_title_{}'.format(id)) visualization['x_text_rotate'] = \ data.get('chart_field_x_text_rotate_{}'.format(id)) visualization['tooltip_name'] = \ data.get('chart_field_tooltip_name_{}'.format(id)) visualization['data_format'] = \ data.get('chart_field_data_format_{}'.format(id)) visualization['y_tick_format'] = \ data.get('chart_field_y_ticks_format_{}'.format(id)) visualization['x_tick_format'] = \ data.get('chart_field_x_ticks_format_{}'.format(id)) visualization['padding_bottom'] = \ data.get('chart_field_padding_bottom_{}'.format(id)) visualization['padding_top'] = \ data.get('chart_field_padding_top_{}'.format(id)) visualization['tick_count'] = \ data.get('chart_field_tick_count_{}'.format(id)) visualization['y_label'] = \ data.get('chart_field_y_label_{}'.format(id)) visualization['x_label'] = \ data.get('chart_field_x_label_{}'.format(id)) visualization['size'] = \ data.get('chart_field_size_{}'.format(id)) # visualization['chart_padding_left'] = \ # data.get('chart_field_chart_padding_left_{}'.format(id)) visualization['chart_padding_bottom'] = \ data.get('chart_field_chart_padding_bottom_{}'.format(id)) visualization['static_reference_columns'] = \ toolkit.request.POST.getall( 'chart_field_static_reference_columns_%s' % id) visualization['static_reference_label'] = \ data.get('chart_field_static_reference_label_%s' % id) visualization['dynamic_reference_type'] = \ data.get('chart_field_dynamic_reference_type_%s' % id) visualization['dynamic_reference_factor'] = \ data.get('chart_field_dynamic_reference_factor_%s' % id) visualization['dynamic_reference_label'] = \ data.get('chart_field_dynamic_reference_label_%s' % id) visualization['sort'] = \ data.get('chart_field_sort_{}'.format(id)) visualization['additional_description'] = \ data.get('chart_field_desc_{}'.format(id)) visualization['plotly'] = \ data.get('chart_field_plotly_{}'.format(id)) visualization['bar_width'] = \ data.get('chart_field_bar_width_{}'.format(id)) visualization['donut_hole'] = \ data.get('chart_field_donut_hole_{}'.format(id)) if 'chart_field_x_text_multiline_{}'.format(id) in data: visualization['x_text_multiline'] = 'true' else: visualization['x_text_multiline'] = 'false' visualization['x_tick_culling_max'] = \ data.get('chart_field_x_tick_culling_max_{}'.format(id)) if 'chart_field_legend_{}'.format(id) in data: visualization['show_legend'] = 'true' else: visualization['show_legend'] = 'false' if 'chart_field_show_annotations_{}'.format(id) in data: visualization['show_annotations'] = 'true' else: visualization['show_annotations'] = 'false' if 'chart_field_labels_{}'.format(id) in data: visualization['show_labels'] = 'true' else: visualization['show_labels'] = 'false' if 'chart_field_y_label_hide_{}'.format(id) in data: visualization['y_label_hide'] = 'true' else: visualization['y_label_hide'] = 'false' if 'chart_field_x_label_hide_{}'.format(id) in data: visualization['x_label_hide'] = 'true' else: visualization['x_label_hide'] = 'false' if 'chart_field_show_labels_as_percentages_{}'.format(id) in data: visualization['show_labels_as_percentages'] = 'true' else: visualization['show_labels_as_percentages'] = 'false' if 'chart_field_y_from_zero_{}'.format(id) in data: visualization['y_from_zero'] = 'true' else: visualization['y_from_zero'] = 'false' if 'chart_field_x_from_zero_{}'.format(id) in data: visualization['x_from_zero'] = 'true' else: visualization['x_from_zero'] = 'false' if data['chart_field_filter_name_{}'.format(id)]: visualization['filter_name'] = \ data['chart_field_filter_name_{}'.format(id)] visualization['filter_value'] = \ data['chart_field_filter_value_{}'.format(id)] visualization['filter_alias'] = \ data['chart_field_filter_alias_{}'.format(id)] visualization['filter_visibility'] = \ data['chart_field_filter_visibility_{}'.format(id)] else: visualization['filter_name'] = '' visualization['filter_value'] = '' visualization['filter_alias'] = '' visualization['filter_visibility'] = '' if 'chart_field_category_name_{}'.format(id) in data: visualization['category_name'] = \ data['chart_field_category_name_{}'.format(id)] else: visualization['category_name'] = '' print data visualizations.append(visualization) if k.startswith('text_box_description_'): text_box = {} id = k.split('_')[-1] text_box['type'] = 'text_box' text_box['order'] = int(id) text_box['description'] = \ data['text_box_description_{}'.format(id)] text_box['column_width'] = \ data.get('text_box_column_width_{}'.format(id), 'Half') text_boxes.append(text_box) if k.startswith('line_break_'): break_line = {} id = k.split('_')[-1] break_line['type'] = 'break_line' break_line['order'] = int(id) break_lines.append(break_line) if k.startswith('media_image_url_'): image = {} id = k.split('_')[-1] image['type'] = 'image' image['order'] = int(id) image_url = data['media_image_url_{}'.format(id)] if h.uploads_enabled(): image_upload = data['media_image_upload_{}'.format(id)] if isinstance(image_upload, cgi.FieldStorage): upload = uploader.get_uploader('vs', image_url) upload.update_data_dict(data, 'media_image_url_{}' .format( id), 'media_image_upload_{}' .format( id), 'False') upload.upload(uploader) image_url = upload.filename image['url'] = image_url images.append(image) if k.startswith('map_resource_'): map_item = {} id = k.split('_')[-1] map_item['type'] = 'map' map_item['order'] = int(id) map_item['map_resource'] = \ data['map_resource_{}'.format(id)] map_item['map_title_field'] = \ data['map_title_field_{}'.format(id)] map_item['map_key_field'] = \ data['map_key_field_{}'.format(id)] map_item['data_key_field'] = \ data['map_data_key_field_{}'.format(id)] map_item['map_color_scheme'] = \ data['map_color_scheme_{}'.format(id)] map_item['size'] = \ data.get('map_size_{}'.format(id)) if data.get('map_field_filter_name_{}'.format(id)): map_item['filter_name'] = \ data['map_field_filter_name_{}'.format(id)] map_item['filter_value'] = \ data['map_field_filter_value_{}'.format(id)] map_item['filter_alias'] = \ data['map_field_filter_alias_{}'.format(id)] map_item['filter_visibility'] = \ data['map_field_filter_visibility_{}'.format(id)] else: map_item['filter_name'] = '' map_item['filter_value'] = '' map_item['filter_alias'] = '' map_item['filter_visibility'] = '' maps.append(map_item) if k.startswith('table_field_title_'): table_item = {} id = k.split('_')[-1] table_item['type'] = 'table' table_item['order'] = int(id) table_item['y_axis'] = \ data['choose_y_axis_column'] table_item['main_value'] = \ data['table_main_value_{}'.format(id)] table_item['title'] = \ data['table_field_title_{}'.format(id)] table_item['data_format'] = \ data['table_data_format_{}'.format(id)] if data['table_field_filter_name_{}'.format(id)]: table_item['filter_name'] = \ data['table_field_filter_name_{}'.format(id)] table_item['filter_value'] = \ data['table_field_filter_value_{}'.format(id)] table_item['filter_alias'] = \ data['table_field_filter_alias_{}'.format(id)] table_item['filter_visibility'] = \ data['table_field_filter_visibility_{}'.format(id)] else: table_item['filter_name'] = '' table_item['filter_value'] = '' table_item['filter_alias'] = '' table_item['filter_visibility'] = '' if data['table_category_name_{}'.format(id)]: table_item['category_name'] = \ data['table_category_name_{}'.format(id)] else: table_item['category_name'] = '' tables.append(table_item) vis = visualizations + text_boxes + images + maps + tables + break_lines _visualization_items['visualizations'] = json.dumps(vis) if 'choose_y_axis_column' in data: _visualization_items['y_axis_column'] = \ data['choose_y_axis_column'] else: _visualization_items['y_axis_column'] = '' try: junk = _get_action('querytool_visualizations_update', _visualization_items) h.flash_success(_('Visualizations Successfully updated.')) except ValidationError, e: errors = e.error_dict error_summary = e.error_summary return self.querytool_edit('/' + querytool, data, errors, error_summary) if 'save-edit-data' in data.keys(): # redirect to edit data url = h.url_for('querytool_edit', querytool='/' + _querytool['name']) else: h.redirect_to('/'+h.lang()+'/group/'+_querytool['group']+'/reports') h.redirect_to(url)
def orgportals_pages_edit(self, org_name, page=None, data=None, errors=None, error_summary=None): if page: page = page[1:] data_dict = { 'org_name': org_name, 'page_name': page } _page = get_action('orgportals_pages_show')({}, data_dict) if _page is None and len(page) > 0: p.toolkit.abort(404, _('Page not found.')) if _page is None: _page = {} if p.toolkit.request.method == 'POST' and not data: if 'type' not in _page: _page['type'] = 'custom' data = dict(p.toolkit.request.POST) # Upload images for portal pages if 'image_upload' in dict(p.toolkit.request.params): image_upload = dict(p.toolkit.request.params)['image_upload'] if isinstance(image_upload, cgi.FieldStorage): upload = uploader.get_uploader('portal', data['image_url']) upload.update_data_dict(data, 'image_url', 'image_upload', 'clear_upload') upload.upload(uploader.get_max_image_size()) image_url = upload.filename else: image_url = data['image_url'] else: image_url = None if 'type' in _page and _page['type'] == 'data': _page['map'] = [] _page['map_main_property'] = [] for k, v in sorted(data.items()): if k.startswith('map_main_property'): _page['map_main_property'].append(v) elif k.startswith('map_') and not k.startswith('map_enabled'): _page['map'].append(v) _page['map'] = ';'.join(_page['map']) _page['map_main_property'] = ';'.join(_page['map_main_property']) topics = [] for k, v in data.items(): item = {} if k.startswith('topic_title'): id = k[-1] item['title'] = data['topic_title_{}'.format(id)] item['enabled'] = data['topic_enabled_{}'.format(id)] item['subdashboard'] = data['topic_subdashboard_{}'.format(id)] item['order'] = data['topic_order_{}'.format(id)] image_url = data['topic_image_url_{}'.format(id)] # Upload images for topics if h.uploads_enabled(): image_upload = data['topic_image_upload_{}'.format(id)] if isinstance(image_upload, cgi.FieldStorage): upload = uploader.get_uploader('portal', image_url) upload.update_data_dict(data, 'topic_image_url_{}'.format(id), 'topic_image_upload_{}'.format(id), 'topic_clear_upload_{}'.format(id)) upload.upload(uploader.get_max_image_size()) image_url = upload.filename item['image_url'] = image_url topics.append(item) _page['topics'] = json.dumps(topics) _page.update(data) _page['org_name'] = org_name _page['id'] = org_name _page['page_name'] = page _page['image_url'] = image_url try: junk = p.toolkit.get_action('orgportals_pages_update')( {'user': p.toolkit.c.user or p.toolkit.c.author}, data_dict=_page ) except p.toolkit.ValidationError, e: errors = e.error_dict error_summary = e.error_summary return self.orgportals_pages_edit(org_name,'/' + page, data, errors, error_summary) p.toolkit.redirect_to(p.toolkit.url_for('orgportals_pages_index', org_name=org_name))
def querytool_update(context, data_dict): ''' Create new query tool :param title :param description :param dataset :param filters :param created :param map_resource :param chart_resource :param y_axis_columns :param selection_label :param report_caption ''' # we need the querytool name in the context for name validation context['querytool'] = data_dict['querytool'] session = context['session'] data, errors = df.validate(data_dict, schema.querytool_schema(), context) if errors: raise toolkit.ValidationError(errors) querytool = CkanextQueryTool.get(name=data_dict['querytool']) visualizations = \ CkanextQueryToolVisualizations.get(name=data_dict['querytool']) # if name is not changed don't insert in visualizations table is_changed = False if visualizations: is_changed = (querytool.name == visualizations.name) if visualizations and is_changed: visualizations.name = data.get('name') visualizations.save() session.add(querytool) session.commit() if not querytool: querytool = CkanextQueryTool() items = [ 'title', 'description', 'name', 'private', 'type', 'group', 'dataset_name', 'owner_org', 'icon', 'image_url', 'image_display_url', 'filters', 'sql_string', 'related_querytools', 'chart_resource', 'y_axis_columns', 'additional_description', 'selection_label', 'report_caption' ] dataset_name = data.get('dataset_name') dataset = _get_action('package_show')(context, {'id': dataset_name}) dataset['groups'] = [{'name': str(data.get('group'))}] _get_action('package_patch')(context, dataset) image_url = data_dict['image_url'] if h.uploads_enabled(): image_upload = data_dict['image_upload'] if isinstance(image_upload, cgi.FieldStorage): upload = uploader.get_uploader('querytool', image_url) upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') upload.upload(uploader) data_dict['image_display_url'] = upload.filename data['image_display_url'] = upload.filename else: data['image_display_url'] = querytool.image_display_url for item in items: setattr(querytool, item, data.get(item)) querytool.modified = datetime.datetime.utcnow() querytool.save() session.add(querytool) session.commit()
def _group_or_org_update(context, data_dict, is_org=False): model = context["model"] user = context["user"] session = context["session"] id = _get_or_bust(data_dict, "id") group = model.Group.get(id) context["group"] = group if group is None: raise NotFound("Group was not found.") data_dict["type"] = group.type # get the schema group_plugin = lib_plugins.lookup_group_plugin(group.type) try: schema = group_plugin.form_to_db_schema_options( {"type": "update", "api": "api_version" in context, "context": context} ) except AttributeError: schema = group_plugin.form_to_db_schema() upload = uploader.get_uploader("group", group.image_url) upload.update_data_dict(data_dict, "image_url", "image_upload", "clear_upload") if is_org: _check_access("organization_update", context, data_dict) else: _check_access("group_update", context, data_dict) if "api_version" not in context: # old plugins do not support passing the schema so we need # to ensure they still work try: group_plugin.check_data_dict(data_dict, schema) except TypeError: group_plugin.check_data_dict(data_dict) data, errors = lib_plugins.plugin_validate( group_plugin, context, data_dict, schema, "organization_update" if is_org else "group_update" ) log.debug( "group_update validate_errs=%r user=%s group=%s data_dict=%r", errors, context.get("user"), context.get("group").name if context.get("group") else "", data_dict, ) if errors: session.rollback() raise ValidationError(errors) rev = model.repo.new_revision() rev.author = user if "message" in context: rev.message = context["message"] else: rev.message = _(u"REST API: Update object %s") % data.get("name") group = model_save.group_dict_save(data, context, prevent_packages_update=is_org) if is_org: plugin_type = plugins.IOrganizationController else: plugin_type = plugins.IGroupController for item in plugins.PluginImplementations(plugin_type): item.edit(group) if is_org: activity_type = "changed organization" else: activity_type = "changed group" activity_dict = { "user_id": model.User.by_name(user.decode("utf8")).id, "object_id": group.id, "activity_type": activity_type, } # Handle 'deleted' groups. # When the user marks a group as deleted this comes through here as # a 'changed' group activity. We detect this and change it to a 'deleted' # activity. if group.state == u"deleted": if session.query(ckan.model.Activity).filter_by(object_id=group.id, activity_type="deleted").all(): # A 'deleted group' activity for this group has already been # emitted. # FIXME: What if the group was deleted and then activated again? activity_dict = None else: # We will emit a 'deleted group' activity. activity_dict["activity_type"] = "deleted group" if activity_dict is not None: activity_dict["data"] = {"group": dictization.table_dictize(group, context)} activity_create_context = { "model": model, "user": user, "defer_commit": True, "ignore_auth": True, "session": session, } _get_action("activity_create")(activity_create_context, activity_dict) # TODO: Also create an activity detail recording what exactly changed # in the group. upload.upload(uploader.get_max_image_size()) if not context.get("defer_commit"): model.repo.commit() return model_dictize.group_dictize(group, context)
def config_option_update(context, data_dict): ''' .. versionadded:: 2.4 Allows to modify some CKAN runtime-editable config options It takes arbitrary key, value pairs and checks the keys against the config options update schema. If some of the provided keys are not present in the schema a :py:class:`~ckan.plugins.logic.ValidationError` is raised. The values are then validated against the schema, and if validation is passed, for each key, value config option: * It is stored on the ``system_info`` database table * The Pylons ``config`` object is updated. * The ``app_globals`` (``g``) object is updated (this only happens for options explicitly defined in the ``app_globals`` module. The following lists a ``key`` parameter, but this should be replaced by whichever config options want to be updated, eg:: get_action('config_option_update)({}, { 'ckan.site_title': 'My Open Data site', 'ckan.homepage_layout': 2, }) :param key: a configuration option key (eg ``ckan.site_title``). It must be present on the ``update_configuration_schema`` :type key: string :returns: a dictionary with the options set :rtype: dictionary .. note:: You can see all available runtime-editable configuration options calling the :py:func:`~ckan.logic.action.get.config_option_list` action .. note:: Extensions can modify which configuration options are runtime-editable. For details, check :doc:`/extensions/remote-config-update`. .. warning:: You should only add config options that you are comfortable they can be edited during runtime, such as ones you've added in your own extension, or have reviewed the use of in core CKAN. ''' model = context['model'] _check_access('config_option_update', context, data_dict) schema = schema_.update_configuration_schema() available_options = schema.keys() provided_options = data_dict.keys() unsupported_options = set(provided_options) - set(available_options) if unsupported_options: msg = 'Configuration option(s) \'{0}\' can not be updated'.format( ' '.join(list(unsupported_options))) raise ValidationError(msg, error_summary={'message': msg}) upload = uploader.get_uploader('admin') upload.update_data_dict(data_dict, 'ckan.site_logo', 'logo_upload', 'clear_logo_upload') upload.upload(uploader.get_max_image_size()) data, errors = _validate(data_dict, schema, context) if errors: model.Session.rollback() raise ValidationError(errors) for key, value in data.iteritems(): # Set full Logo url if key == 'ckan.site_logo' and value and not value.startswith('http')\ and not value.startswith('/'): image_path = 'uploads/admin/' value = h.url_for_static('{0}{1}'.format(image_path, value)) # Save value in database model.set_system_info(key, value) # Update CKAN's `config` object config[key] = value # Only add it to the app_globals (`g`) object if explicitly defined # there globals_keys = app_globals.app_globals_from_config_details.keys() if key in globals_keys: app_globals.set_app_global(key, value) # Update the config update timestamp model.set_system_info('ckan.config_update', str(time.time())) log.info('Updated config options: {0}'.format(data)) return data
def user_update(context, data_dict): '''Update a user account. Normal users can only update their own user accounts. Sysadmins can update any user account. Can not modify exisiting user's name. .. note:: Update methods may delete parameters not explicitly provided in the data_dict. If you want to edit only a specific attribute use `user_patch` instead. For further parameters see :py:func:`~ckan.logic.action.create.user_create`. :param id: the name or id of the user to update :type id: string :returns: the updated user account :rtype: dictionary ''' model = context['model'] user = author = context['user'] session = context['session'] schema = context.get('schema') or schema_.default_update_user_schema() id = _get_or_bust(data_dict, 'id') user_obj = model.User.get(id) context['user_obj'] = user_obj if user_obj is None: raise NotFound('User was not found.') _check_access('user_update', context, data_dict) upload = uploader.get_uploader('user') upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') data, errors = _validate(data_dict, schema, context) if errors: session.rollback() raise ValidationError(errors) # user schema prevents non-sysadmins from providing password_hash if 'password_hash' in data: data['_password'] = data.pop('password_hash') user = model_save.user_dict_save(data, context) activity_dict = { 'user_id': user.id, 'object_id': user.id, 'activity_type': 'changed user', } activity_create_context = { 'model': model, 'user': author, 'defer_commit': True, 'ignore_auth': True, 'session': session } _get_action('activity_create')(activity_create_context, activity_dict) # TODO: Also create an activity detail recording what exactly changed in # the user. upload.upload(uploader.get_max_image_size()) if not context.get('defer_commit'): model.repo.commit() author_obj = model.User.get(context.get('user')) include_plugin_extras = False if author_obj: include_plugin_extras = author_obj.sysadmin and 'plugin_extras' in data user_dict = model_dictize.user_dictize( user, context, include_plugin_extras=include_plugin_extras) return user_dict
def _group_or_org_update(context, data_dict, is_org=False): model = context['model'] user = context['user'] session = context['session'] id = _get_or_bust(data_dict, 'id') group = model.Group.get(id) context["group"] = group if group is None: raise NotFound('Group was not found.') data_dict['type'] = group.type # get the schema group_plugin = lib_plugins.lookup_group_plugin(group.type) try: schema = group_plugin.form_to_db_schema_options({'type': 'update', 'api': 'api_version' in context, 'context': context}) except AttributeError: schema = group_plugin.form_to_db_schema() upload = uploader.get_uploader('group', group.image_url) upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') if is_org: _check_access('organization_update', context, data_dict) else: _check_access('group_update', context, data_dict) if 'api_version' not in context: # old plugins do not support passing the schema so we need # to ensure they still work try: group_plugin.check_data_dict(data_dict, schema) except TypeError: group_plugin.check_data_dict(data_dict) data, errors = lib_plugins.plugin_validate( group_plugin, context, data_dict, schema, 'organization_update' if is_org else 'group_update') log.debug('group_update validate_errs=%r user=%s group=%s data_dict=%r', errors, context.get('user'), context.get('group').name if context.get('group') else '', data_dict) if errors: session.rollback() raise ValidationError(errors) rev = model.repo.new_revision() rev.author = user if 'message' in context: rev.message = context['message'] else: rev.message = _(u'REST API: Update object %s') % data.get("name") group = model_save.group_dict_save(data, context, prevent_packages_update=is_org) if is_org: plugin_type = plugins.IOrganizationController else: plugin_type = plugins.IGroupController for item in plugins.PluginImplementations(plugin_type): item.edit(group) if is_org: activity_type = 'changed organization' else: activity_type = 'changed group' activity_dict = { 'user_id': model.User.by_name(user.decode('utf8')).id, 'object_id': group.id, 'activity_type': activity_type, } # Handle 'deleted' groups. # When the user marks a group as deleted this comes through here as # a 'changed' group activity. We detect this and change it to a 'deleted' # activity. if group.state == u'deleted': if session.query(ckan.model.Activity).filter_by( object_id=group.id, activity_type='deleted').all(): # A 'deleted group' activity for this group has already been # emitted. # FIXME: What if the group was deleted and then activated again? activity_dict = None else: # We will emit a 'deleted group' activity. activity_dict['activity_type'] = 'deleted group' if activity_dict is not None: activity_dict['data'] = { 'group': dictization.table_dictize(group, context) } activity_create_context = { 'model': model, 'user': user, 'defer_commit': True, 'ignore_auth': True, 'session': session } _get_action('activity_create')(activity_create_context, activity_dict) # TODO: Also create an activity detail recording what exactly changed # in the group. upload.upload(uploader.get_max_image_size()) if not context.get('defer_commit'): model.repo.commit() return model_dictize.group_dictize(group, context)
def _group_or_org_update( context: Context, data_dict: DataDict, is_org: bool = False): model = context['model'] session = context['session'] id = _get_or_bust(data_dict, 'id') group = model.Group.get(id) if group is None: raise NotFound('Group was not found.') context["group"] = group data_dict['type'] = group.type # get the schema group_plugin = lib_plugins.lookup_group_plugin(group.type) try: schema = group_plugin.form_to_db_schema_options({'type': 'update', 'api': 'api_version' in context, 'context': context}) except AttributeError: schema = group_plugin.form_to_db_schema() upload = uploader.get_uploader('group') upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') if is_org: _check_access('organization_update', context, data_dict) else: _check_access('group_update', context, data_dict) if 'api_version' not in context: # old plugins do not support passing the schema so we need # to ensure they still work try: group_plugin.check_data_dict(data_dict, schema) except TypeError: group_plugin.check_data_dict(data_dict) data, errors = lib_plugins.plugin_validate( group_plugin, context, data_dict, schema, 'organization_update' if is_org else 'group_update') group = context.get('group') log.debug('group_update validate_errs=%r user=%s group=%s data_dict=%r', errors, context.get('user'), group.name if group else '', data_dict) if errors: session.rollback() raise ValidationError(errors) contains_packages = 'packages' in data_dict group = model_save.group_dict_save( data, context, prevent_packages_update=is_org or not contains_packages ) if is_org: plugin_type = plugins.IOrganizationController else: plugin_type = plugins.IGroupController for item in plugins.PluginImplementations(plugin_type): item.edit(group) upload.upload(uploader.get_max_image_size()) if not context.get('defer_commit'): model.repo.commit() return model_dictize.group_dictize(group, context)
def user_update(context: Context, data_dict: DataDict) -> ActionResult.UserUpdate: '''Update a user account. Normal users can only update their own user accounts. Sysadmins can update any user account. Can not modify exisiting user's name. .. note:: Update methods may delete parameters not explicitly provided in the data_dict. If you want to edit only a specific attribute use `user_patch` instead. For further parameters see :py:func:`~ckan.logic.action.create.user_create`. :param id: the name or id of the user to update :type id: string :returns: the updated user account :rtype: dictionary ''' model = context['model'] user = context['user'] session = context['session'] schema = context.get('schema') or schema_.default_update_user_schema() id = _get_or_bust(data_dict, 'id') user_obj = model.User.get(id) if user_obj is None: raise NotFound('User was not found.') context['user_obj'] = user_obj _check_access('user_update', context, data_dict) upload = uploader.get_uploader('user') upload.update_data_dict(data_dict, 'image_url', 'image_upload', 'clear_upload') data, errors = _validate(data_dict, schema, context) if errors: session.rollback() raise ValidationError(errors) # user schema prevents non-sysadmins from providing password_hash if 'password_hash' in data: data['_password'] = data.pop('password_hash') user = model_save.user_dict_save(data, context) upload.upload(uploader.get_max_image_size()) if not context.get('defer_commit'): with logic.guard_against_duplicated_email(data_dict['email']): model.repo.commit() author_obj = model.User.get(context.get('user')) include_plugin_extras = False if author_obj: include_plugin_extras = author_obj.sysadmin and 'plugin_extras' in data user_dict = model_dictize.user_dictize( user, context, include_plugin_extras=include_plugin_extras) return user_dict