Exemplo n.º 1
0
def workflow_revert_state_validator(key, data, errors, context):
    """
    Checks that the revert state specified in the data would not cause
    a loop in the workflow state graph.
    """
    workflow_state = context.get('workflow_state')
    if not workflow_state:
        # it's a new state - no other state reverts to this one, and no transitions
        # involving this state exist, yet
        return

    revert_state_id = _convert_missing(
        data.get(key[:-1] + ('revert_state_id', )))
    if not revert_state_id:
        return

    if revert_state_id == workflow_state.id:
        raise tk.Invalid(_("A workflow state cannot revert to itself"))

    if ckanext_model.WorkflowState.revert_path_exists(revert_state_id,
                                                      workflow_state.id):
        raise tk.Invalid(_("Revert loop in workflow state graph"))

    if ckanext_model.WorkflowTransition.path_exists(workflow_state.id,
                                                    revert_state_id):
        raise tk.Invalid(_("Forward revert in workflow state graph"))
Exemplo n.º 2
0
def user_email_validator(key, data, errors, context):
    '''HDX valiator for emails as identifiers.'''
    model = context['model']
    # Convert email to lowercase
    data[key] = data[key].lower()
    email = data[key]

    from validate_email import validate_email
    if not validate_email(email, check_mx=False, verify=False):
        raise tk.Invalid(tk._('Email address is not valid'))

    if not isinstance(email, basestring):
        raise tk.Invalid(tk._('User names must be strings'))

    users = model.User.by_email(email)
    if users:
        # A user with new_user_name already exists in the database.
        user_obj_from_context = context.get('user_obj')
        for user in users:
            if user_obj_from_context and user_obj_from_context.id == user.id:
                # If there's a user_obj in context with the same id as the
                # user found in the db, then we must be doing a user_update
                # and not updating the user name, so don't return an error.
                return
        errors[key].append(
            tk.
            _('The email address is already registered on HDX. Please use the sign in screen below.'
              ))

    return
Exemplo n.º 3
0
def dataset_state(key, data, errors, context):
    """If a dataset does not have any resources, it must be a draft"""
    data_dict = df.unflatten(data)

    if data[key] == "active":
        if "resources" not in data_dict or len(data_dict["resources"]) == 0:
            # The user wants to activate the dataset although it does not
            # contain any resources. This is not possible!
            raise toolkit.Invalid(
                "Cannot set state of dataset to 'active', because it does not "
                "contain any resources!")
        else:
            # Do not allow activating a dataset without at least one valid
            # .rtdc resource.
            # Note that DCOR-Aid first checks whether resource upload is
            # complete before uploading. If someone writes their own script
            # for uploading, they also have to use package_revise *after*
            # uploading the resources to set the state to "active".
            for res in data_dict["resources"]:
                if res["mimetype"] == "RT-DC":
                    rp = dcor_shared.get_resource_path(res["id"])
                    try:
                        with dclab.IntegrityChecker(rp) as ic:
                            insane = ic.sanity_check()
                            if not insane:
                                break
                    except ValueError:
                        # Unknown file format
                        pass
            else:
                raise toolkit.Invalid(
                    "Before activating a dataset, make sure that it contains "
                    "a valid .rtdc resource!")
Exemplo n.º 4
0
def update_field_index_validator(key, data, errors, context):

    index = data[key]

    try:
        index = int(index)
    except (ValueError, TypeError):
        raise toolkit.Invalid(toolkit._("index must be an int"))

    if index < 0:
        raise toolkit.Invalid(toolkit._("You can't have a negative index"))

    # Make sure the resource has a field with this index.
    resource_id = data[('resource_id', )]
    # We're assuming that resource_id has already been validated and is valid,
    # so resource_schema_show() won't raise an exception here.
    schema = toolkit.get_action('resource_schema_show')(
        context, {
            'resource_id': resource_id
        })
    matching_fields = []
    for field in schema.get('fields', []):
        if field['index'] == index:
            matching_fields.append(field)
    if len(matching_fields) == 0:
        raise toolkit.Invalid(
            toolkit._("There's no field with the given "
                      "index"))
    if len(matching_fields) > 1:
        raise toolkit.Invalid(
            toolkit._("There's more than one field with the "
                      "given index (this shouldn't happen, "
                      "something has gone wrong)"))

    data[key] = index
Exemplo n.º 5
0
def create_field_index_validator(key, data, errors, context):

    index = data[key]

    try:
        index = int(index)
    except (ValueError, TypeError):
        raise toolkit.Invalid(toolkit._("index must be an int"))

    if index < 0:
        raise toolkit.Invalid(toolkit._("You can't have a negative index"))

    # Make sure the resource doesn't already have a field with this index.
    resource_id = data[('resource_id', )]
    schema = toolkit.get_action('resource_schema_show')(
        context, {
            'resource_id': resource_id
        })
    for field in schema.get('fields', []):
        if field['index'] == index:
            raise toolkit.Invalid(
                toolkit._("You can't have two fields with the same index"))

    # TODO: Here we could also prevent creating fields with indexes
    # corresponding to columns that don't actually exist in the resource's CSV
    # file.

    data[key] = index
Exemplo n.º 6
0
def valid_plotly(value):
    # validator for plotly configuration, won't cover everything, but hits the high point...

    try:
        config = json.loads(value)

    except json.JSONDecodeError as e:
        raise toolkit.Invalid(
            u'Invalid JSON string near line %d column %d, %s' %
            (e.lineno, e.colno, e.msg))

    if not isinstance(config, dict):
        raise toolkit.Invalid(
            u'Incorrect plot configuration, expected object containing "traces", "layout" and/or "frames"'
        )

    if 'traces' in config:

        if not isinstance(config['traces'], list):
            raise toolkit.Invalid(
                u'Incorrect traces configuration, expecting list of objects')

        for t in config['traces']:
            if not isinstance(t, dict):
                raise toolkit.Invalid(
                    u'Incorrect traces configuration, expecting list of objects'
                )

    if 'layout' in config and not isinstance(config['layout'], dict):
        raise toolkit.Invalid(
            u'Incorrect layout configuration, object of plot axes')

    return value
Exemplo n.º 7
0
def network_config_validator(value):
    #value = data.pop(key, None)
    print("network_config_validator", value)
    if value == "invalid":
        raise tk.Invalid(u"Network config file is invalid. Please check!")
    elif value == "empty":
        raise tk.Invalid(u"Missing value")
    return value
Exemplo n.º 8
0
def file_idl_validator(value):
    #value = data.pop(key, None)
    print("file_idl_validator", value)
    if value == "invalid":
        raise tk.Invalid(u"IDL file is invalid. Please check!")
    elif value == "empty":
        raise tk.Invalid(u"Missing value")
    return value
Exemplo n.º 9
0
def resource_name(key, data, errors, context):
    """Check resource names

    - no weird characters
    - only allowed file extensions
    - unique resource names
    """
    assert key[0] == "resources"
    assert key[2] == "name"
    user = context.get('user')
    ignore_auth = context.get('ignore_auth')
    if ignore_auth or (user and authz.is_sysadmin(user)):
        # Admins know what they are doing (e.g. figshare import)
        return

    name = data[key]

    # check suffix
    if name.count("."):
        suffix = "." + name.rsplit(".", 1)[1]
    else:
        suffix = None
    if suffix not in RESOURCE_EXTS:
        raise toolkit.Invalid(
            "Unsupported file extension '{}'. ".format(suffix) +
            "Allowed file extensions are {}.".format(RESOURCE_EXTS))

    # check that filename contains valid characters
    invalid_chars = []
    for char in name:
        if char not in RESOURCE_CHARS:
            invalid_chars.append(char)
    if invalid_chars:
        raise toolkit.Invalid(u"Invalid characters in file name: {}".format(
            u"".join(invalid_chars)))

    # do not allow adding resources that exist already
    package = context.get('package')
    if package:
        package_id = package.id
    else:
        package_id = data.get(key[:-1] + ('id', ))
    pkg_dict = logic.get_action('package_show')(dict(context,
                                                     return_type='dict'), {
                                                         'id': package_id
                                                     })

    ress = pkg_dict.get("resources", [])
    if ress:
        # check name
        for item in ress:
            # Since this function is called for each and every
            # resource all the time, we have to make sure that
            # the positions are not matching.
            if key[1] != item["position"] and item["name"] == name:
                raise toolkit.Invalid(
                    "Resource with name '{}' already exists!".format(name))
Exemplo n.º 10
0
def topic_name_validator(value):
    #value = data.pop(key, None)
    if value == "":
        raise tk.Invalid(u"Missing value")
    elif type(value) is str:
        x = re.match(r'[_a-zA-z][_a-zA-z0-9]*', value)
        if x is None:
            raise tk.Invalid(u"Topic name is invalid. Please check!")
    return value
Exemplo n.º 11
0
 def callable_(key, data, errors, context):
     value = data.get(key)
     parts = value.split('/')
     if len(parts) < 2 or not parts[1]:
         raise tk.Invalid(_("Invalid path"))
     element = parts[1]
     if (in_schema and element not in schema) \
             or (not in_schema and element in schema):
         raise tk.Invalid(_("The specified key cannot be used"))
Exemplo n.º 12
0
def foreign_key_reference_validator(key, data, errors, context):
    referenced_resource_id = data[('referenced_resource_id', )]

    try:
        referenced_resource = toolkit.get_action('resource_show')(
            context, {
                'id': referenced_resource_id
            })
        referenced_schema = toolkit.get_action('resource_schema_show')(
            context, {
                'resource_id': referenced_resource_id
            })
    except toolkit.ValidationError:
        raise toolkit.Invalid(toolkit._("referenced resource id invalid"))

    # check that the field and the referenced field are in the same data
    # package
    resource_id = data[('resource_id', )]
    package_id = context['resource'].get_package_id()
    package = toolkit.get_action('package_show')(context, {'id': package_id})
    resource_ids = set(r['id'] for r in package['resources'])

    if resource_id not in resource_ids:
        raise toolkit.Invalid(
            toolkit._("referenced resource not in same dataset"))

    referenced_field = data[key]
    if isinstance(referenced_field, basestring):
        referenced_field = [referenced_field]

    # check that the length of the foreign key is the same as the referenced field
    foreign_key_field = data[('field', )]
    if isinstance(foreign_key_field, basestring):
        foreign_key_field = [foreign_key_field]

    try:
        foreign_key_field_len = len(foreign_key_field)
    except TypeError:
        raise toolkit.Invalid(toolkit._('fkey field is invalid type'))

    if len(referenced_field) != foreign_key_field_len:
        raise toolkit.Invalid(
            toolkit._("field and referenced_fields must be the same length"))

    # check that the referenced field name is valid
    referenced_field_names = [
        f['name'] for f in referenced_schema.get('fields', [])
    ]

    for field in referenced_field:
        if field not in referenced_field_names:
            raise toolkit.Invalid(toolkit._("No field with that name"))

    # add the resource name to the data dict
    data[('referenced_resource', )] = referenced_resource['name']
    data[('referenced_resource_id', )] = referenced_resource['id']
Exemplo n.º 13
0
def _validate(obj_type, value, context):

    if not value:
        return

    if ',' in value:
        values = value.split(',')
    elif ' ' in value:
        values = value.split(' ')
        if not values[0].startswith('http'):
            try:
                int(values[0])
            except ValueError:
                values = [value]
    else:
        values = [value]

    values = [v.strip() for v in values]

    new_values = []
    for esd_uri in values:
        try:
            esd_obj = toolkit.get_action('esd_{0}_show'.format(obj_type))(
                context, {
                    'id': esd_uri
                })
            new_values.append(esd_obj['uri'])
        except toolkit.ObjectNotFound:
            raise toolkit.Invalid('ESD {0} not known: {1}'.format(
                obj_type.title(), esd_uri))

    value = ' '.join(new_values)

    return value
Exemplo n.º 14
0
def org_id_or_name_exists(reference, context):
    model = context['model']
    result = model.Group.get(reference)
    if not result or result.type != 'organization':
        raise toolkit.Invalid(
            toolkit._('That organization name or ID does not exist.'))
    return reference
Exemplo n.º 15
0
def object_id_validator(
    key: FlattenKey,
    activity_dict: FlattenDataDict,
    errors: FlattenErrorDict,
    context: Context,
) -> Any:
    """Validate the 'object_id' value of an activity_dict.

    Uses the object_id_validators dict (above) to find and call an 'object_id'
    validator function for the given activity_dict's 'activity_type' value.

    Raises Invalid if the model given in context contains no object of the
    correct type (according to the 'activity_type' value of the activity_dict)
    with the given ID.

    Raises Invalid if there is no object_id_validator for the activity_dict's
    'activity_type' value.

    """
    activity_type = activity_dict[("activity_type", )]
    if activity_type in object_id_validators:
        object_id = activity_dict[("object_id", )]
        name = object_id_validators[activity_type]
        validator = cast(ContextValidator, tk.get_validator(name))
        return validator(object_id, context)
    else:
        raise tk.Invalid(
            'There is no object_id validator for activity type "%s"' %
            activity_type)
Exemplo n.º 16
0
def convert_numeric(value, context):

    if not value: return ''
    try:
        return float(value)
    except ValueError:
        raise toolkit.Invalid(_('Please enter a numeric value'))
Exemplo n.º 17
0
    def validate(cls, data_dict):
        data_dict = super(AgentQuery, cls).validate(data_dict)
        valid_agent_types = ['person', 'org', 'other']
        agent_type = toolkit.get_or_bust(data_dict, 'agent_type')
        if agent_type not in valid_agent_types:
            raise toolkit.Invalid('Agent type must be one of {0}'.format(
                ', '.join(valid_agent_types)))

        valid_params = {
            'person':
            dict(required=['family_name', 'given_names'],
                 optional=['given_names_first']),
            'org':
            dict(required=['name'], optional=['location']),
            'other':
            dict(required=[], optional=[])
        }
        required = ['agent_type'] + valid_params[agent_type]['required']
        optional = ['user_id', 'external_id', 'external_id_scheme'
                    ] + valid_params[agent_type]['optional']
        for k in required:
            if k not in data_dict:
                raise toolkit.ValidationError(
                    '{0} is a required field.'.format(k))
        if 'external_id' in data_dict and 'external_id_scheme' not in data_dict:
            raise toolkit.ValidationError(
                'external_id_scheme is a required field when external_id is set.'
            )
        all_fields = required + optional
        for k in data_dict:
            if k not in all_fields:
                data_dict[k] = None
        return data_dict
Exemplo n.º 18
0
def jsonpatch_operation_validator(operation_dict):
    """
    Checks that the supplied value is a valid JSON patch operation dictionary.
    """
    valid = (type(operation_dict) is dict) and \
            ({'op', 'path'} <= set(operation_dict.keys())) and \
            (operation_dict['op'] in ('add', 'remove', 'replace', 'move', 'copy', 'test'))

    if valid:
        try:
            jsonpointer.JsonPointer(operation_dict['path'])
        except:
            valid = False

    if valid and (operation_dict['op'] in ('move', 'copy')):
        valid = 'from' in operation_dict
        if valid:
            try:
                jsonpointer.JsonPointer(operation_dict['from'])
            except:
                valid = False

    if valid and (operation_dict['op'] in ('add', 'replace', 'test')):
        valid = 'value' in operation_dict
        if valid:
            try:
                json.dumps(operation_dict['value'])
            except:
                valid = False

    if not valid:
        raise tk.Invalid(_("Invalid JSON patch operation"))

    return operation_dict
Exemplo n.º 19
0
def _generate_resource_name(context, key, converted_data, package):

    # Get the filename of the uploaded or linked-to file from the resource's
    # URL.
    name = converted_data.get(('resources', key[1], 'url'))

    if name.endswith('/'):
        name = name[:-1]
    name = name.split('/')[-1]

    if _is_name_unique(context, name, key, converted_data, package):
        return name
    else:
        name, extension = os.path.splitext(name)
        for i in range(2, 1000):
            new_name = "{0}_{1}{2}".format(name, i, extension)
            if _is_name_unique(context, new_name, key, converted_data,
                               package):
                return new_name

        # If we get here then 2...999 were all taken.
        raise toolkit.Invalid(
            toolkit.
            _("I got bored trying to come up with "
              "a unique name for this file, you'll have to supply one yourself"
              ))
Exemplo n.º 20
0
def user_name_validator(key, data, errors, context):
    '''Validate a new user name.

    Copy of the validator with the same name from CKAN core BUT allows for change in username

    :raises ckan.lib.navl.dictization_functions.Invalid: if ``data[key]`` is
        not a string
    :rtype: None

    '''
    model = context['model']
    new_user_name = data[key]

    if not isinstance(new_user_name, string_types):
        raise tk.Invalid(tk._('User names must be strings'))

    user = model.User.get(new_user_name)
    user_obj_from_context = context.get('user_obj')
    if user is not None:
        # A user with new_user_name already exists in the database.
        if user_obj_from_context and user_obj_from_context.id == user.id:
            # If there's a user_obj in context with the same id as the user
            # found in the db, then we must be doing a user_update and not
            # updating the user name, so don't return an error.
            return
        else:
            # Otherwise return an error: there's already another user with that
            # name, so you can create a new user with that name or update an
            # existing user's name to that name.
            errors[key].append(tk._('That login name is not available.'))
def is_valid_sort(filter_string, context):
    '''takes a string, validates and returns an IssueFilter enum'''
    try:
        return issuemodel.IssueFilter[filter_string]
    except KeyError:
        msg_str = 'Cannot apply filter. "{0}" is not a valid filter'
        raise toolkit.Invalid(toolkit._(msg_str.format(filter_string)))
Exemplo n.º 22
0
def access_request_update(context, data_dict):
    user = context.get('user')
    request_id = toolkit.get_or_bust(data_dict, "id")
    request = model.Session.query(AccessRequest).get(request_id)
    if not request:
        raise toolkit.ObjectNotFound("Access Request not found")
    if request.object_type not in ['organization', 'package', 'user']:
        raise toolkit.Invalid("Unknown Object Type")

    if request.object_type == 'package':
        package = toolkit.get_action('package_show')(context, {
            'id': request.object_id
        })
        org_id = package['owner_org']
        return {
            'success':
            has_user_permission_for_group_or_org(org_id, user, 'admin')
        }
    elif request.object_type == 'organization':
        org_id = request.object_id
        return {
            'success':
            has_user_permission_for_group_or_org(org_id, user, 'admin')
        }
    elif request.object_type == 'user':
        data_dict = {
            'id':
            request.object_id,
            'renew_expiry_date':
            request.data.get(
                'user_request_type',
                USER_REQUEST_TYPE_NEW) == USER_REQUEST_TYPE_RENEWAL
        }
        return external_user_update_state(context, data_dict)
def is_valid_abuse_status(filter_string, context):
    '''takes a string, validates and returns an AbuseStatus enum'''
    try:
        return issuemodel.AbuseStatus[filter_string]
    except KeyError:
        msg_str = 'Cannot apply filter. "{0}" is not a valid abuse status'
        raise toolkit.Invalid(toolkit._(msg_str.format(filter_string)))
Exemplo n.º 24
0
def metadata_template_json_path_validator(key, data, errors, context):
    """
    Checks that the supplied JSON path is valid for the metadata template JSON of
    the supplied metadata standard. For use with the '__after' schema key in the
    metadata_json_attr_map_* schemas.
    """
    json_path = _convert_missing(data.get(key[:-1] + ('json_path', )), '')
    metadata_standard_id = _convert_missing(
        data.get(key[:-1] + ('metadata_standard_id', )))
    if not metadata_standard_id:
        id_ = data.get(key[:-1] + ('id', ))
        mapping_obj = ckanext_model.MetadataJSONAttrMap.get(id_)
        if mapping_obj:
            metadata_standard_id = mapping_obj.metadata_standard_id

    metadata_standard = ckanext_model.MetadataStandard.get(
        metadata_standard_id)
    if metadata_standard:
        metadata_template_dict = json.loads(
            metadata_standard.metadata_template_json)
        try:
            jsonpointer.resolve_pointer(metadata_template_dict, json_path)
        except jsonpointer.JsonPointerException:
            raise tk.Invalid(
                _("The supplied JSON path is not valid for the metadata template of the supplied metadata standard"
                  ))
Exemplo n.º 25
0
def is_valid_status(value, context):
    if value in issuemodel.ISSUE_STATUS:
        return value
    else:
        raise toolkit.Invalid(toolkit._(
            '{0} is not a valid status'.format(value))
        )
Exemplo n.º 26
0
def metadata_schema_unique_standard_organization_infrastructure(
        key, data, errors, context):
    """
    Checks the uniqueness of metadata_standard-organization-infrastructure for a metadata schema.
    For use with the '__after' schema key; group names should already have been converted to group ids.
    """
    metadata_standard_id = data.get(key[:-1] + ('metadata_standard_id', ))
    organization_id = data.get(key[:-1] + ('organization_id', ))
    infrastructure_id = data.get(key[:-1] + ('infrastructure_id', ))

    id_ = _convert_missing(data.get(key[:-1] + ('id', )))
    obj = ckanext_model.MetadataSchema.get(id_) if id_ else None

    # if we're updating, missing value(s) in the input data imply a partial update, so get the
    # existing value(s) and check that the updated key does not violate uniqueness
    metadata_standard_id = _convert_missing(
        metadata_standard_id, obj.metadata_standard_id if obj else None)
    organization_id = _convert_missing(organization_id,
                                       obj.organization_id if obj else None)
    infrastructure_id = _convert_missing(
        infrastructure_id, obj.infrastructure_id if obj else None)

    metadata_schema = ckanext_model.MetadataSchema.lookup(
        metadata_standard_id, organization_id, infrastructure_id)
    if metadata_schema and metadata_schema.state != 'deleted' and metadata_schema.id != id_:
        raise tk.Invalid(
            _("Unique constraint violation: %s") %
            '(metadata_standard_id, organization_id, infrastructure_id)')
Exemplo n.º 27
0
def owner_org_owns_metadata_collection(key, data, errors, context):
    """
    Checks that the owner_org specified for a metadata record is the same organization that
    owns the metadata collection to which the record is being added.
    For use with the '__after' schema key; group names should already have been converted to group ids.
    """
    model = context['model']
    session = context['session']

    organization_id = data.get(key[:-1] + ('owner_org', ))
    metadata_collection_id = _convert_missing(
        data.get(key[:-1] + ('metadata_collection_id', )))

    id_ = _convert_missing(data.get(key[:-1] + ('id', )))
    obj = model.Package.get(id_) if id_ else None

    # if we're updating, missing value(s) in the input data imply a partial update, so get the
    # existing value(s) and check that the updated combination satisfies our condition
    organization_id = _convert_missing(organization_id,
                                       obj.owner_org if obj else None)
    if obj and not metadata_collection_id:
        metadata_collection_id = session.query(model.PackageExtra.value) \
            .filter_by(package_id=id_, key='metadata_collection_id').scalar()

    metadata_collection_organization_id = session.query(model.GroupExtra.value) \
        .filter_by(group_id=metadata_collection_id, key='organization_id').scalar()

    if organization_id != metadata_collection_organization_id:
        raise tk.Invalid(
            _("owner_org must be the same organization that owns the metadata collection"
              ))
Exemplo n.º 28
0
def workflow_transition_graph_validator(key, data, errors, context):
    """
    Checks that the specified workflow transition would not cause
    a loop in the workflow state graph.
    """
    from_state_id = _convert_missing(data.get(key[:-1] + ('from_state_id', )))
    to_state_id = _convert_missing(data.get(key[:-1] + ('to_state_id', )))

    if ckanext_model.WorkflowTransition.path_exists(to_state_id,
                                                    from_state_id):
        raise tk.Invalid(_("Transition loop in workflow state graph"))

    if ckanext_model.WorkflowState.revert_path_exists(from_state_id,
                                                      to_state_id):
        raise tk.Invalid(
            _("Backward transition in workflow state graph (check revert states)"
              ))
def issue_comment_exists(issue_comment_id, context):
    issue_comment_id = is_positive_integer(issue_comment_id, context)
    result = issuemodel.IssueComment.get(issue_comment_id,
                                         session=context['session'])
    if not result:
        raise toolkit.Invalid(
            toolkit._('Issue Comment not found') + ': %s' % issue_comment_id)
    return issue_comment_id
Exemplo n.º 30
0
    def callable_(key, data, errors, context):
        object_id_or_name = data.get(key)
        if not object_id_or_name:
            return None

        result = model_class.get(object_id_or_name)
        if result:
            raise tk.Invalid('%s: %s' % (_('Already exists'), _(model_desc)))