def find_translatable_fields(cls):
     for dtype in cls.packages:
         md_cls = class_for_metadata(dtype)
         flattened_fields = md_cls.get_flattened_fields()
         for kp, uf in flattened_fields.items():
             if uf.queryTaggedValue('translatable'):
                 cls.translatable_fields[dtype].append(kp)
    def after_search(self, search_results, search_params):
        '''Try to replace displayed fields with their translations (if any).
        '''
        
        from ckanext.publicamundi.lib.metadata import fields, bound_field
        from ckanext.publicamundi.lib.metadata import class_for_metadata, translator_for
        
        uf = fields.TextField()
        lang = self.target_language()

        for pkg in search_results['results']:
            source_lang = pkg.get('language')
            if not source_lang or (source_lang == lang):
                continue # no need to translate
            dtype = pkg['dataset_type']
            md = class_for_metadata(dtype)(identifier=pkg['id']) 
            translator = translator_for(md, source_lang)
            # Lookup translations in the context of this package
            translated = False
            for k in ('title', 'notes'):
                tr = translator.get_field_translator(bound_field(uf, (k,), pkg[k]))
                yf = tr.get(lang) if tr else None
                if yf:
                    pkg[k] = yf.context.value
                    translated = True
            # If at least one translation was found, mark as translated
            if translated:
                pkg['translated_to_language'] = lang
        
        return search_results
 def find_translatable_fields(cls):
     for dtype in cls.packages:
         md_cls = class_for_metadata(dtype)
         flattened_fields = md_cls.get_flattened_fields()
         for kp, uf in flattened_fields.items():
             if uf.queryTaggedValue('translatable'):
                 cls.translatable_fields[dtype].append(kp)
示例#4
0
    def after_search(self, search_results, search_params):
        '''Try to replace displayed fields with their translations (if any).
        '''

        from ckanext.publicamundi.lib.metadata import fields, bound_field
        from ckanext.publicamundi.lib.metadata import class_for_metadata, translator_for

        uf = fields.TextField()
        lang = self.target_language()

        for pkg in search_results['results']:
            source_lang = pkg.get('language')
            if not source_lang or (source_lang == lang):
                continue  # no need to translate
            dtype = pkg['dataset_type']
            md = class_for_metadata(dtype)(identifier=pkg['id'])
            translator = translator_for(md, source_lang)
            # Lookup translations in the context of this package
            translated = False
            for k in ('title', 'notes'):
                tr = translator.get_field_translator(
                    bound_field(uf, (k, ), pkg[k]))
                yf = tr.get(lang) if tr else None
                if yf:
                    pkg[k] = yf.context.value
                    translated = True
            # If at least one translation was found, mark as translated
            if translated:
                pkg['translated_to_language'] = lang

        return search_results
示例#5
0
def postprocess_dataset_for_edit(key, data, errors, context):
    assert key[0] == '__after', (
        'This validator can only be invoked in the __after stage')

    def debug(msg):
        logger.debug('Post-processing dataset for editing: %s' % (msg))

    # The state we are moving to
    state = data.get(('state', ), '')

    # The previous state (if exists)
    pkg = context.get('package')
    prev_state = pkg.state if pkg else ''

    requested_with_api = 'api_version' in context
    is_new = not pkg

    if is_new and not requested_with_api:
        return  # only core metadata are expected

    key_prefix = dtype = data[('dataset_type', )]
    if not dtype in ext_metadata.dataset_types:
        raise Invalid('Unknown dataset-type: %s' % (dtype))

    # 1. Build metadata object

    cls = ext_metadata.class_for_metadata(dtype)
    md = cls.from_converted_data(data, for_edit=True)

    if not md:
        return  # failed to create (in resources form ?)

    data[(key_prefix, )] = md

    # 2. Validate as an object

    if not 'skip_validation' in context:
        validation_errors = md.validate(dictize_errors=True)
        # Fixme Map validation_errors to errors
        #assert not validation_errors

    # 3. Convert fields to extras

    extras_list = data[('extras', )]
    extras_list.extend(({'key': k, 'value': v} for k, v in md.to_extras()))

    # 4. Compute next state

    if 'skip_validation' in context:
        state = data[('state', )] = 'invalid'
        #data[('private',)] = True

    if not state:
        if prev_state == 'invalid':
            state = data[('state', )] = 'active'

    return
def postprocess_dataset_for_edit(key, data, errors, context):
    assert key[0] == '__after', (
        'This validator can only be invoked in the __after stage')
     
    def debug(msg):
        logger.debug('Post-processing dataset for editing: %s' %(msg))
    
    # The state we are moving to
    state = data.get(('state',), '') 
    
    # The previous state (if exists)
    pkg = context.get('package')
    prev_state = pkg.state if pkg else ''

    requested_with_api = 'api_version' in context
    is_new = not pkg

    if is_new and not requested_with_api:
        return # only core metadata are expected

    key_prefix = dtype = data[('dataset_type',)]
    if not dtype in ext_metadata.dataset_types:
        raise Invalid('Unknown dataset-type: %s' %(dtype))
    
    # 1. Build metadata object

    cls = ext_metadata.class_for_metadata(dtype)
    md = cls.from_converted_data(data, for_edit=True)

    if not md:
        return # failed to create (in resources form ?)

    data[(key_prefix,)] = md
    
    # 2. Validate as an object

    if not 'skip_validation' in context:
        validation_errors = md.validate(dictize_errors=True)
        # Fixme Map validation_errors to errors
        #assert not validation_errors
   
    # 3. Convert fields to extras
    
    extras_list = data[('extras',)]
    extras_list.extend(({'key': k, 'value': v} for k, v in md.to_extras()))
    
    # 4. Compute next state
    
    if 'skip_validation' in context:
        state = data[('state',)] = 'invalid' 
        #data[('private',)] = True
    
    if not state:
        if prev_state == 'invalid':
            state = data[('state',)] = 'active'
    
    return
    def _check_result_for_edit(self, data, result):
        key_prefix = dtype = result.get('dataset_type')
        obj_cls = class_for_metadata(dtype)

        keys = data.keys()
        core_keys = set(keys) & self.core_keys
        for key in core_keys:
            assert data[key] == result[key]

        if data.get('tags') and result.get('tags'):
            tags = set(map(lambda t: t['name'], data['tags']))
            result_tags = set(map(lambda t: t['name'], result['tags']))
            assert tags == result_tags

        dt_keys = filter(lambda t: t.startswith(key_prefix + '.'), keys)

        # Note The input data may be in either flat or nested format

        expected_obj = obj_cls()
        if key_prefix in data:
            # Load from nested input data
            expected_obj.from_dict(data[key_prefix],
                                   is_flat=0,
                                   opts={'unserialize-values': 'default'})
        else:
            # Load from flattened input data
            expected_obj.from_dict(data,
                                   is_flat=1,
                                   opts={
                                       'unserialize-keys': True,
                                       'key-prefix': key_prefix,
                                       'unserialize-values': 'default'
                                   })

        result_obj = result[key_prefix]

        # Check if expected and result objects differ. The only acceptaple
        # changes are empty fields linked to (non-empty) core CKAN metadata.
        linked_fields = {
            '.'.join(map(str, k)): k1
            for k, f, k1 in obj_cls.iter_linked_fields()
        }
        for change, key, (initial_value, result_value) in dictdiffer.diff(
                expected_obj.to_dict(), result_obj.to_dict()):
            if (key in linked_fields) and (not initial_value) and \
                    (result_value == result[linked_fields[key]]):
                pass  # is a linked field
            else:
                assert False, 'Unexpected change for key %s' % (key)
        pass
示例#8
0
    def show_package_schema(self):
        schema = super(DatasetForm, self).show_package_schema()

        # Don't show vocab tags mixed in with normal 'free' tags
        # (e.g. on dataset pages, or on the search page)
        schema['tags']['__extras'].append(
            toolkit.get_converter('free_tags_only'))

        check_not_empty = toolkit.get_validator('not_empty')
        ignore_missing = toolkit.get_validator('ignore_missing')
        convert_from_extras = toolkit.get_converter('convert_from_extras')

        schema['dataset_type'] = [convert_from_extras, check_not_empty]

        # Add package field-level converters

        get_field_processor = ext_validators.get_field_read_processor

        for dtype in self._dataset_types:
            cls1 = ext_metadata.class_for_metadata(dtype)
            opts1 = {'serialize-keys': True, 'key-prefix': dtype}
            for field_name, field in cls1.get_flattened_fields(
                    opts=opts1).items():
                schema[field_name] = [
                    convert_from_extras, ignore_missing,
                    get_field_processor(field)
                ]

        # Add before/after package-level processors

        preprocess_dataset = ext_validators.preprocess_dataset_for_read
        postprocess_dataset = ext_validators.postprocess_dataset_for_read

        schema['__before'].insert(-1, preprocess_dataset)

        if not '__after' in schema:
            schema['__after'] = []
        schema['__after'].append(postprocess_dataset)

        # Add extra top-level fields (i.e. not under a schema)

        for field_name in self._extra_fields:
            schema[field_name] = [convert_from_extras, ignore_missing]

        # Done, return updated schema

        return schema
    def show_package_schema(self):
        schema = super(DatasetForm, self).show_package_schema()

        # Don't show vocab tags mixed in with normal 'free' tags
        # (e.g. on dataset pages, or on the search page)
        schema['tags']['__extras'].append(toolkit.get_converter('free_tags_only'))
        
        check_not_empty = toolkit.get_validator('not_empty')
        ignore_missing = toolkit.get_validator('ignore_missing')
        convert_from_extras = toolkit.get_converter('convert_from_extras')
        
        schema['dataset_type'] = [convert_from_extras, check_not_empty]
       
        # Add package field-level converters
        
        get_field_processor = ext_validators.get_field_read_processor

        for dtype in self._dataset_types:
            cls1 = ext_metadata.class_for_metadata(dtype)  
            opts1 = {'serialize-keys': True, 'key-prefix': dtype}
            for field_name, field in cls1.get_flattened_fields(opts=opts1).items():
                schema[field_name] = [
                    convert_from_extras, ignore_missing, get_field_processor(field)]
          
        # Add before/after package-level processors
        
        preprocess_dataset = ext_validators.preprocess_dataset_for_read
        postprocess_dataset = ext_validators.postprocess_dataset_for_read

        schema['__before'].insert(-1, preprocess_dataset)
        
        if not '__after' in schema:
            schema['__after'] = []
        schema['__after'].append(postprocess_dataset)
        
        # Add extra top-level fields (i.e. not under a schema)
        
        for field_name in self._extra_fields:
            schema[field_name] = [convert_from_extras, ignore_missing]

        # Done, return updated schema

        return schema
    def _check_result_for_edit(self, data, result):
        key_prefix = dtype = result.get("dataset_type")
        obj_cls = class_for_metadata(dtype)

        keys = data.keys()
        core_keys = set(keys) & self.core_keys
        for key in core_keys:
            assert data[key] == result[key]

        if data.get("tags") and result.get("tags"):
            tags = set(map(lambda t: t["name"], data["tags"]))
            result_tags = set(map(lambda t: t["name"], result["tags"]))
            assert tags == result_tags

        dt_keys = filter(lambda t: t.startswith(key_prefix + "."), keys)

        # Note The input data may be in either flat or nested format

        expected_obj = obj_cls()
        if key_prefix in data:
            # Load from nested input data
            expected_obj.from_dict(data[key_prefix], is_flat=0, opts={"unserialize-values": "default"})
        else:
            # Load from flattened input data
            expected_obj.from_dict(
                data,
                is_flat=1,
                opts={"unserialize-keys": True, "key-prefix": key_prefix, "unserialize-values": "default"},
            )

        result_obj = result[key_prefix]

        # Check if expected and result objects differ. The only acceptaple
        # changes are empty fields linked to (non-empty) core CKAN metadata.
        linked_fields = {".".join(map(str, k)): k1 for k, f, k1 in obj_cls.iter_linked_fields()}
        for change, key, (initial_value, result_value) in dictdiffer.diff(expected_obj.to_dict(), result_obj.to_dict()):
            if (key in linked_fields) and (not initial_value) and (result_value == result[linked_fields[key]]):
                pass  # is a linked field
            else:
                assert False, "Unexpected change for key %s" % (key)
        pass
示例#11
0
def dataset_translation_update(context, data_dict):
    '''Translate dataset for the active language.
    
    The accepted format data_dict is as the one passed to core `package_update`. 
    
    An additional parameter is `translate_to_language` which determines the target
    language. If not supplied, the active language (from Pylons request) will be used.
    
    All non-translatable fields will be ignored. 
    
    All fields that are not present (or are empty) in source package, will also be
    ignored.

    :param id: the name or id of the package.
    :type id: string
   
    :param translate_to_language: the target language
    :type translate_to_language: string
    
    rtype: dict
    '''

    # Determine target language

    lang = _target_language(data_dict)

    # Fetch package in source language

    context.update({'translate': False, 'return_json': True})
    pkg = _get_action('package_show')(context, {'id': data_dict['id']})
    dtype = pkg['dataset_type']

    source_lang = pkg['language']
    if lang == source_lang:
        msg = 'The target language same as source language (%s)' % (lang)
        raise Invalid({'translate_to_language': msg})

    md = class_for_metadata(dtype)()
    md.from_json(pkg[dtype])

    # Check authorization

    _check_access('package_translation_update', context,
                  {'org': pkg['owner_org']})

    # Translate structured metadata

    translator = translator_for(md, source_lang)
    md = translator.translate(lang, data_dict[dtype])

    pkg[dtype] = md.to_json(return_string=False)

    # Translate core CKAN metadata

    field_translator = translator.get_field_translator
    uf = fields.TextField()
    for k in ('title', 'notes'):
        v = data_dict.get(k)
        if not (v and pkg.get(k)):
            continue  # nothing to translate
        tr = field_translator(bound_field(uf, (k, ), pkg[k]))
        if not tr:
            continue
        yf = tr.translate(lang, v)
        pkg[k] = v

    # Return translated view of this package

    pkg['translated_to_language'] = lang
    return pkg
    assert set(data) == {'notes'}
    assert data['notes'] == foo.description


def test_deduce_fields_foo():

    yield _test_deduce_fields_foo, 'foo1'
    yield _test_deduce_fields_foo, 'foo2'


if __name__ == '__main__':

    x = fixtures.foo1

    #field1 = x.get_schema().get('contact_info')

    #fc1 = x.get_field_factory(key='contact_info')
    #fc2 = x.get_field_factory(field=field1)
    #fc3 = x.get_field_factory('contact_info')
    #fc4 = x.get_field_factory(field=field1)

    #_test_schema_for_metadata('foo1')
    #_test_equality('foo1')
    #_test_inequality('foo1', 'foo2')
    test_field_accessors_with_ifoo()
    #_test_deduce_fields_foo('foo1')

    from ckanext.publicamundi.lib.metadata import (class_for, class_for_object,
                                                   class_for_metadata)
    cls1 = class_for_metadata('foo')
示例#13
0
def postprocess_dataset_for_edit(key, data, errors, context):

    #logger.debug('\nPOSTPROCESS EDIT START IS %s\n', data )  
    assert key[0] == '__after', (
        'This validator can only be invoked in the __after stage')
     
    def debug(msg):
        logger.debug('Post-processing dataset for editing: %s' %(msg))
    
    # The state we are moving to
    state = data.get(('state',), '') 
    
    # The previous state (if exists)
    pkg = context.get('package')
    prev_state = pkg.state if pkg else ''

    requested_with_api = 'api_version' in context
    is_new = not pkg

    #if is_new and not requested_with_api:
    #    return # only core metadata are expected

    key_prefix = dtype = data[('dataset_type',)]
    if not dtype in ext_metadata.dataset_types:
        raise Invalid('Unknown dataset-type: %s' %(dtype))
    
    #logger.debug('DATA IS [%s]' % ', '.join(map(str, data)) ) 
    # 1. Build metadata object

    cls = ext_metadata.class_for_metadata(dtype)
    md = cls.from_converted_data(data, for_edit=True)


    if not md:
        return # failed to create (in resources form ?)

    data[(key_prefix,)] = md
    
    #logger.debug('MD IS %s', md )
    # 2. Validate as an object

    if not 'skip_validation' in context:
        validation_errors = md.validate(dictize_errors=True)
        #logger.debug("\n\n VALIDATION ERRORS IS: %s ,errors is %s, type of errors is %s \n\n", validation_errors, errors, type(validation_errors) )
        #errors[('datacite.related_publication',)] = 'Missing Value'
        #logger.debug("\n\n validation 4 is:: %s type is %s \n\n", validation_errors['creator'],  type(validation_errors['creator']), )
        # Map validation_errors to errors
        for key, value in validation_errors.items():
            #logger.debug("\n\n validation is %s, type is %s, value is %s, type is %s\n", key, type(key), value, type(value) )
            #key = "('datacite.{}',)".format(key)
            if not isinstance(value, list) :
                # fix key-value for classes like Creator (contains multiple fields)
                k = key + '.' + next(iter(value))
                # make key compatible with errors dict (tuple)
                k = tuple([str.encode("('datacite.%s',)" % k)])
                v =  value[next(iter(value))]
                #logger.debug("\n\n key[0] is %s, value[0] is %s \n", k, v)
                if v[0][0] == 'R':       #RequiredMissing
                    errors[k] = u'Missing value'
            else:
                # make key compatible with errors dict (tuple)
                #key = tuple([str.encode("('datacite.%s',)" % key)])
                # fix error message displayed
                #logger.debug("\n\n value in validation is value[0] %s, type is %s, key is %s\n", value[0], type(value[0]), key )
                if value[0][:8] == 'Required':
                    key = tuple([str.encode("('datacite.%s',)" % key)])
                    errors[key] = u'Missing value'
                elif value[0][:7] == 'related':
                    #remove duplicate error (for wrong value)
                    key_to_remove = tuple([str.encode('datacite.%s' % key)])
                    key = tuple([str.encode("('datacite.%s',)" % key)])
                    errors[key] = u'Invalid DOI value'  
                    errors[key_to_remove] =  []

        #for k, v in errors.items():
        #    logger.debug("K: %s,type: %s v: %s,type %s ", k,type(k), v, type(v))      
        # Fixme Map validation_errors to errors  ! ! ! ! 
        #assert not validation_errors
   
    # 3. Convert fields to extras
    
    #logger.debug("\n MD IS: %s \n", md )

    # add datacite fields after ckan extras
    index = 7
    for k, v in md.to_extras():
            #logger.debug("\n Key is: %s, value is %s \n", k, v )
            data[('extras', index, 'key')] = k
            data[('extras', index, 'value')] = v
            index = index + 1 
    
    # 4. Compute next state
    
    if 'skip_validation' in context:
        state = data[('state',)] = 'invalid' 
        #data[('private',)] = True
    
    #add extra value manually
    #data['extras', 6, 'key'] = 'datacite.funder'
    #data['extras', 6, 'value'] = 'NIK'

    if not state:
        if prev_state == 'invalid':
            state = data[('state',)] = 'active'
   
    #logger.debug('\nPOSTPROCESS EDIT END Data IS %s', data )
    return
def test_factories():
    
    # Test with IFooMetadata

    f1 = factory_for(schemata.IFooMetadata)
    o1 = f1()
    verifyObject(schemata.IFooMetadata, o1)
    assert isinstance(o1, types.FooMetadata)
    
    f1 = factory_for_metadata('foo')
    o1 = f1()
    verifyObject(schemata.IFooMetadata, o1)
    assert isinstance(o1, types.FooMetadata)
    
    c1 = class_for(schemata.IFooMetadata)
    verifyObject(IIntrospective, c1, tentative=1)
    o1 = c1()
    verifyObject(schemata.IFooMetadata, o1)
    assert c1 is types.FooMetadata

    c1 = class_for_metadata('foo')
    verifyObject(IIntrospective, c1, tentative=1)
    o1 = c1()
    verifyObject(schemata.IFooMetadata, o1)
    assert c1 is types.FooMetadata
    
    # Test with AnotherFoo
    
    def check_foo1_after_init(o):
        return (
            o.baz == u'Baobab' and 
            o.temporal_extent is None and
            isinstance(o.contact_info, types.ContactInfo))
       
    f2 = factory_for(schemata.IFooMetadata, name='foo-1')
    o2 = f2()
    verifyObject(schemata.IFooMetadata, o2)
    assert isinstance(o2, AnotherFoo)
    assert check_foo1_after_init(o2)

    f2 = factory_for_metadata('foo.1')
    o2 = f2()
    verifyObject(schemata.IFooMetadata, o2)
    assert isinstance(o2, AnotherFoo)
    assert check_foo1_after_init(o2)

    c2 = class_for(schemata.IFooMetadata, 'foo-1')
    verifyObject(IIntrospective, c2, tentative=1)
    o2 = c2()
    verifyObject(schemata.IFooMetadata, o2)
    assert c2 is AnotherFoo
    assert check_foo1_after_init(o2)

    c2 = class_for_metadata('foo.1')
    verifyObject(IIntrospective, c2, tentative=1)
    o2 = c2()
    verifyObject(schemata.IFooMetadata, o2)
    assert c2 is AnotherFoo
    assert check_foo1_after_init(o2)

    # Test with non-registered names

    try:
        f3 = factory_for(schemata.IFooMetadata, name='a-non-existing-name')
    except (ValueError, LookupError) as ex:
        pass
    else:
        assert False, 'This should have failed'

    try:
        f3 = factory_for_metadata('')
    except ValueError as ex:
        pass
    else:
        assert False, 'This should have failed (a name is required!)'
   
    try:
        f3 = factory_for_metadata('foo.9')
    except (ValueError, LookupError) as ex:
        pass
    else:
        assert False, 'This should have failed'
def test_factories():

    # Test with IFooMetadata

    f1 = factory_for(schemata.IFooMetadata)
    o1 = f1()
    verifyObject(schemata.IFooMetadata, o1)
    assert isinstance(o1, types.FooMetadata)

    f1 = factory_for_metadata('foo')
    o1 = f1()
    verifyObject(schemata.IFooMetadata, o1)
    assert isinstance(o1, types.FooMetadata)

    c1 = class_for(schemata.IFooMetadata)
    verifyObject(IIntrospective, c1, tentative=1)
    o1 = c1()
    verifyObject(schemata.IFooMetadata, o1)
    assert c1 is types.FooMetadata

    c1 = class_for_metadata('foo')
    verifyObject(IIntrospective, c1, tentative=1)
    o1 = c1()
    verifyObject(schemata.IFooMetadata, o1)
    assert c1 is types.FooMetadata

    # Test with AnotherFoo

    def check_foo1_after_init(o):
        return (o.baz == u'Baobab' and o.temporal_extent is None
                and isinstance(o.contact_info, types.ContactInfo))

    f2 = factory_for(schemata.IFooMetadata, name='foo-1')
    o2 = f2()
    verifyObject(schemata.IFooMetadata, o2)
    assert isinstance(o2, AnotherFoo)
    assert check_foo1_after_init(o2)

    f2 = factory_for_metadata('foo.1')
    o2 = f2()
    verifyObject(schemata.IFooMetadata, o2)
    assert isinstance(o2, AnotherFoo)
    assert check_foo1_after_init(o2)

    c2 = class_for(schemata.IFooMetadata, 'foo-1')
    verifyObject(IIntrospective, c2, tentative=1)
    o2 = c2()
    verifyObject(schemata.IFooMetadata, o2)
    assert c2 is AnotherFoo
    assert check_foo1_after_init(o2)

    c2 = class_for_metadata('foo.1')
    verifyObject(IIntrospective, c2, tentative=1)
    o2 = c2()
    verifyObject(schemata.IFooMetadata, o2)
    assert c2 is AnotherFoo
    assert check_foo1_after_init(o2)

    # Test with non-registered names

    try:
        f3 = factory_for(schemata.IFooMetadata, name='a-non-existing-name')
    except (ValueError, LookupError) as ex:
        pass
    else:
        assert False, 'This should have failed'

    try:
        f3 = factory_for_metadata('')
    except ValueError as ex:
        pass
    else:
        assert False, 'This should have failed (a name is required!)'

    try:
        f3 = factory_for_metadata('foo.9')
    except (ValueError, LookupError) as ex:
        pass
    else:
        assert False, 'This should have failed'
示例#16
0
    def __modify_package_schema(self, schema):
        '''Define modify schema for both create/update operations.
        '''

        check_not_empty = toolkit.get_validator('not_empty')
        ignore_missing = toolkit.get_validator('ignore_missing')
        ignore_empty = toolkit.get_validator('ignore_empty')
        convert_to_extras = toolkit.get_converter('convert_to_extras')
        default = toolkit.get_validator('default')
        
        # Add dataset-type, the field that distinguishes metadata formats

        is_dataset_type = ext_validators.is_dataset_type
        schema['dataset_type'] = [
            default('ckan'), convert_to_extras, is_dataset_type,
        ]
       
        # Add package field-level validators/converters
        
        # Note We provide a union of fields for all supported schemata.
        # Of course, not all of them will be present in a specific dataset,
        # so any "required" constraint cannot be applied here.

        get_field_processor = ext_validators.get_field_edit_processor
        
        for dtype in self._dataset_types:
            cls1 = ext_metadata.class_for_metadata(dtype)  
            opts1 = {'serialize-keys': True, 'key-prefix': dtype}
            for field_name, field in cls1.get_flattened_fields(opts=opts1).items():
                # Build chain of processors for field
                schema[field_name] = [
                    ignore_missing, get_field_processor(field)]
        
        # Add before/after package-level processors

        preprocess_dataset = ext_validators.preprocess_dataset_for_edit
        postprocess_dataset = ext_validators.postprocess_dataset_for_edit
        
        schema['__before'].insert(-1, preprocess_dataset)

        if not '__after' in schema:
            schema['__after'] = []
        schema['__after'].append(postprocess_dataset)
        
        # Add extra top-level fields (i.e. not bound to a schema)
        
        for field_name in self._extra_fields:
            schema[field_name] = [ignore_empty, convert_to_extras]
        
        # Add or replace resource field-level validators/converters

        guess_resource_type = ext_validators.guess_resource_type_if_empty

        schema['resources'].update({
            'resource_type': [
                guess_resource_type, string.lower, unicode],
            'format': [
                check_not_empty, string.lower, unicode],
        })

        # Done, return updated schema

        return schema
    data = foo.deduce_fields('notes')
    assert set(data) == {'notes'}
    assert data['notes'] == foo.description

def test_deduce_fields_foo():
    
    yield _test_deduce_fields_foo, 'foo1' 
    yield _test_deduce_fields_foo, 'foo2' 

if __name__  == '__main__':
     
    x = fixtures.foo1
    
    #field1 = x.get_schema().get('contact_info')

    #fc1 = x.get_field_factory(key='contact_info')
    #fc2 = x.get_field_factory(field=field1)
    #fc3 = x.get_field_factory('contact_info')
    #fc4 = x.get_field_factory(field=field1)

    #_test_schema_for_metadata('foo1')
    #_test_equality('foo1')
    #_test_inequality('foo1', 'foo2')
    test_field_accessors_with_ifoo()
    #_test_deduce_fields_foo('foo1')

    from ckanext.publicamundi.lib.metadata import (
        class_for, class_for_object, class_for_metadata)
    cls1 = class_for_metadata('foo')
    
示例#18
0
    def after_show(self, context, pkg_dict, view=None):
        '''Hook into the validated data dict after the package is ready for display. 
        
        The main tasks here are:
         * Convert dataset_type-related parts of pkg_dict to a nested dict or an object.

        This hook is for reading purposes only, i.e for template variables, api results, 
        form initial values etc. It should *not* affect the way the read schema is used: 
        schema items declared at read_package_schema() should not be removed (though their 
        values can be changed!).
        ''' 
        c = toolkit.c
        rr = c.environ['pylons.routes_dict'] if c.environ else {}

        is_validated = context.get('validate', True)
        if not is_validated:
            return # noop: extras are not yet promoted to 1st-level fields
    
        for_view = context.get('for_view', False)
        for_edit = ( # is this package prepared for edit ?
            (rr.get('controller') == 'package' and rr.get('action') == 'edit') or
            (rr.get('controller') == 'api' and rr.get('action') == 'action' and
                rr.get('logic_function') in DatasetForm.after_show._api_edit_actions))
        return_json = ( # do we need to return a json-friendly result ?
            context.get('return_json', False) or
            (rr.get('controller') == 'api' and rr.get('action') == 'action' and
                rr.get('logic_function') in DatasetForm.after_show._api_actions))

        log1.info(
            'Package %s is shown: for-view=%s for-edit=%s api=%s', 
            pkg_dict.get('name'), for_view, for_edit, context.get('api_version'))

        # Determine dataset_type-related parameters for this package
        
        key_prefix = dtype = pkg_dict.get('dataset_type')
        if not dtype:
            return # noop: unknown dataset-type (pkg_dict has raw extras?)
 
        # Note Do not attempt to pop() flat keys here (e.g. to replace them by a 
        # nested structure), because resource forms will clear all extra fields !!

        # Turn to an object
        
        md = class_for_metadata(dtype).from_converted_data(pkg_dict)

        # Provide a different view, if not editing
        
        if (not for_edit) and view and callable(view):
            try:
                md = view(md)
            except Exception as ex:
                log1.warn('Cannot build view %r for package %r: %s',
                    view, pkg_dict.get('name'), str(ex))
                pass # noop: keep the original view
        
        pkg_dict[key_prefix] = md
        
        # Fix for json-friendly results (so json.dumps can handle them)

        if return_json:
            # Remove flat field values (won't be needed anymore)
            key_prefix_1 = key_prefix + '.'
            for k in (y for y in pkg_dict.keys() if y.startswith(key_prefix_1)):
                pkg_dict.pop(k)
            pkg_dict[key_prefix] = md.to_json(return_string=False)
         
        return pkg_dict
示例#19
0
    def __modify_package_schema(self, schema):
        '''Define modify schema for both create/update operations.
        '''

        check_not_empty = toolkit.get_validator('not_empty')
        ignore_missing = toolkit.get_validator('ignore_missing')
        ignore_empty = toolkit.get_validator('ignore_empty')
        convert_to_extras = toolkit.get_converter('convert_to_extras')
        default = toolkit.get_validator('default')

        # Add dataset-type, the field that distinguishes metadata formats

        is_dataset_type = ext_validators.is_dataset_type
        schema['dataset_type'] = [
            default('ckan'),
            convert_to_extras,
            is_dataset_type,
        ]

        # Add package field-level validators/converters

        # Note We provide a union of fields for all supported schemata.
        # Of course, not all of them will be present in a specific dataset,
        # so any "required" constraint cannot be applied here.

        get_field_processor = ext_validators.get_field_edit_processor

        for dtype in self._dataset_types:
            cls1 = ext_metadata.class_for_metadata(dtype)
            opts1 = {'serialize-keys': True, 'key-prefix': dtype}
            for field_name, field in cls1.get_flattened_fields(
                    opts=opts1).items():
                # Build chain of processors for field
                schema[field_name] = [
                    ignore_missing, get_field_processor(field)
                ]

        # Add before/after package-level processors

        preprocess_dataset = ext_validators.preprocess_dataset_for_edit
        postprocess_dataset = ext_validators.postprocess_dataset_for_edit

        schema['__before'].insert(-1, preprocess_dataset)

        if not '__after' in schema:
            schema['__after'] = []
        schema['__after'].append(postprocess_dataset)

        # Add extra top-level fields (i.e. not bound to a schema)

        for field_name in self._extra_fields:
            schema[field_name] = [ignore_empty, convert_to_extras]

        # Add or replace resource field-level validators/converters

        guess_resource_type = ext_validators.guess_resource_type_if_empty

        schema['resources'].update({
            'resource_type': [guess_resource_type, string.lower, unicode],
            'format': [check_not_empty, string.lower, unicode],
        })

        # Done, return updated schema

        return schema
def dataset_translation_update(context, data_dict):
    '''Translate dataset for the active language.
    
    The accepted format data_dict is as the one passed to core `package_update`. 
    
    An additional parameter is `translate_to_language` which determines the target
    language. If not supplied, the active language (from Pylons request) will be used.
    
    All non-translatable fields will be ignored. 
    
    All fields that are not present (or are empty) in source package, will also be
    ignored.

    :param id: the name or id of the package.
    :type id: string
   
    :param translate_to_language: the target language
    :type translate_to_language: string
    
    rtype: dict
    '''
     
    # Determine target language
    
    lang = _target_language(data_dict)

    # Fetch package in source language

    context.update({
        'translate': False,
        'return_json': True
    })
    pkg = _get_action('package_show')(context, {'id': data_dict['id']})
    dtype = pkg['dataset_type']

    source_lang = pkg['language']
    if lang == source_lang:
        msg = 'The target language same as source language (%s)' % (lang)
        raise Invalid({'translate_to_language': msg})
 
    md = class_for_metadata(dtype)()
    md.from_json(pkg[dtype])
   
    # Check authorization

    _check_access(
        'package_translation_update', context, {'org': pkg['owner_org']})
    
    # Translate structured metadata
    
    translator = translator_for(md, source_lang)
    md = translator.translate(lang, data_dict[dtype])
    
    pkg[dtype] = md.to_json(return_string=False)

    # Translate core CKAN metadata

    field_translator = translator.get_field_translator
    uf = fields.TextField()
    for k in ('title', 'notes'):
        v = data_dict.get(k)
        if not (v and pkg.get(k)):
            continue # nothing to translate
        tr = field_translator(bound_field(uf, (k,), pkg[k]))
        if not tr:
            continue 
        yf = tr.translate(lang, v)
        pkg[k] = v

    # Return translated view of this package

    pkg['translated_to_language'] = lang
    return pkg
示例#21
0
    def after_show(self, context, pkg_dict, view=None):
        '''Hook into the validated data dict after the package is ready for display. 
        
        The main tasks here are:
         * Convert dataset_type-related parts of pkg_dict to a nested dict or an object.

        This hook is for reading purposes only, i.e for template variables, api results, 
        form initial values etc. It should *not* affect the way the read schema is used: 
        schema items declared at read_package_schema() should not be removed (though their 
        values can be changed!).
        '''
        c = toolkit.c
        rr = c.environ['pylons.routes_dict'] if c.environ else {}

        is_validated = context.get('validate', True)
        if not is_validated:
            return  # noop: extras are not yet promoted to 1st-level fields

        for_view = context.get('for_view', False)
        for_edit = (  # is this package prepared for edit ?
            (rr.get('controller') == 'package' and rr.get('action') == 'edit')
            or (rr.get('controller') == 'api' and rr.get('action') == 'action'
                and rr.get('logic_function')
                in DatasetForm.after_show._api_edit_actions))
        return_json = (  # do we need to return a json-friendly result ?
            context.get('return_json', False) or
            (rr.get('controller') == 'api' and rr.get('action') == 'action' and
             rr.get('logic_function') in DatasetForm.after_show._api_actions))

        log1.info('Package %s is shown: for-view=%s for-edit=%s api=%s',
                  pkg_dict.get('name'), for_view, for_edit,
                  context.get('api_version'))

        # Determine dataset_type-related parameters for this package

        key_prefix = dtype = pkg_dict.get('dataset_type')
        if not dtype:
            return  # noop: unknown dataset-type (pkg_dict has raw extras?)

        # Note Do not attempt to pop() flat keys here (e.g. to replace them by a
        # nested structure), because resource forms will clear all extra fields !!

        # Turn to an object

        md = class_for_metadata(dtype).from_converted_data(pkg_dict)

        # Provide a different view, if not editing

        if (not for_edit) and view and callable(view):
            try:
                md = view(md)
            except Exception as ex:
                log1.warn('Cannot build view %r for package %r: %s', view,
                          pkg_dict.get('name'), str(ex))
                pass  # noop: keep the original view

        pkg_dict[key_prefix] = md

        # Fix for json-friendly results (so json.dumps can handle them)

        if return_json:
            # Remove flat field values (won't be needed anymore)
            key_prefix_1 = key_prefix + '.'
            for k in (y for y in pkg_dict.keys()
                      if y.startswith(key_prefix_1)):
                pkg_dict.pop(k)
            pkg_dict[key_prefix] = md.to_json(return_string=False)

        return pkg_dict