def prefix_validator(key, data, errors, context): out = {} extras = data.get(('__extras',), {}) # values passed as lists of dicts will have been flattened into __junk junk = df.unflatten(data.get(('__junk',), {})) for field_name in junk: if not field_name.startswith(prefix): continue extras[field_name] = junk[field_name] for field_name in list(extras): if not field_name.startswith(prefix): continue data[(field_name,)] = extras.pop(field_name) for v in validator_fns: try: df.convert(v, (field_name,), data, errors, context) except df.StopOnError: break out[field_name[len(prefix):]] = data.pop((field_name,)) data[(prefix,)] = out
def subfields_validator(key, data, errors, context): # If the field is coming from the API the value will be set directly. value = data.get(key) if not value: # ... otherwise, it's a form submission so our values are stuck # unrolled in __extras. # If we're working on a package field, the key will look like: # (<field name>,) # and if we're working on a resource it'll be: # ('resources', <resource #>, <field name>) _junk = data.get(key[:-1] + ('__junk',), {}) # Group our unrolled fields by their index. values = defaultdict(dict) for k in _junk.keys(): if k[0] == key[0]: name = k[2] index = k[1] # Always pop, we don't want handled values to remain in # __extras or they'll end up on the model. values[index][name] = _junk.pop(k) # ... then turn it back into an ordered list. value = [v for k, v in sorted(values.items())] elif isinstance(value, six.string_types): value = json.loads(value) if not isinstance(value, list): # We treat all subfields as repeatable when processing, even # when they aren't defined that way in the schema. value = [value] for subfield in field.get('repeating_subfields', field.get('simple_subfields')): validators = _field_create_validators(subfield, schema, False) for entry in value: # This right here is why we recommend globally unique field # names, else you risk trampling values from the top-level # schema. Some validators like require_when_published require # other top-level fields. entry_as_data = {(k,): v for k, v in entry.items()} entry_as_data.update(data) entry_errors = defaultdict(list) for v in validators: convert( v, (subfield['field_name'],), entry_as_data, entry_errors, context ) # Any subfield errors should be added as errors to the parent # since this is the only way we have to let other plugins know # of issues. errors[key].extend( itertools.chain.from_iterable( v for v in entry_errors.itervalues() ) ) # Pull our potentially modified fields back. What if validators # modified other fields such as a top-level field? Is this # "allowed" in CKAN validators? We might have to replace # entry_as_data with a write-tracing dict to capture all # changes. for k in entry.keys(): entry[k] = entry_as_data[(k,)] # It would be preferable to just always store as a list, but some plugins # such as ckanext-restricted make assumptions on how values are stored. if 'repeating_subfields' in field: data[key] = json.dumps(value) elif value: data[key] = json.dumps(value[0])