Beispiel #1
0
 def test_empty(self):
     cc = CollectionChangeset(self.orig_col, self.new_col)
     expected = {
         "new": OrderedDict(),
         "same": OrderedDict(),
         "changed": OrderedDict(),
         "deleted": OrderedDict(),
     }
     self.assertEqual(expected, cc.changes)
     expected_summary = ""
     self.assertEqual(expected_summary, cc.summarize())
 def test_empty(self):
     cc = CollectionChangeset(self.orig_col, self.new_col)
     expected = {
         "new": OrderedDict(),
         "same": OrderedDict(),
         "changed": OrderedDict(),
         "deleted": OrderedDict(),
     }
     self.assertEqual(expected, cc.changes)
     expected_summary = ""
     self.assertEqual(expected_summary, cc.summarize())
Beispiel #3
0
 def test_empty(self):
     cc = CollectionChangeset(self.orig_col, self.new_col)
     expected = {
         'new': OrderedDict(),
         'same': OrderedDict(),
         'changed': OrderedDict(),
         'deleted': OrderedDict(),
     }
     self.assertEqual(expected, cc.changes)
     expected_summary = ''
     self.assertEqual(expected_summary, cc.summarize())
 def test_empty(self):
     cc = CollectionChangeset(self.orig_col, self.new_col)
     expected = {
         'new': OrderedDict(),
         'same': OrderedDict(),
         'changed': OrderedDict(),
         'deleted': OrderedDict(),
     }
     self.assertEqual(expected, cc.changes)
     expected_summary = ''
     self.assertEqual(expected_summary, cc.summarize())
Beispiel #5
0
 def test_new_browser_with_ordered_versions(self):
     """When order matters, creation order is sort order."""
     browser = Browser(id='_b',
                       slug='browser',
                       versions=['u', '1.0', '2.0'])
     v_unknown = Version(id='u', version=None, browser='_b')
     v_1 = Version(id='1.0', version='1.0', browser='_b')
     v_2 = Version(id='2.0', version='2.0', browser='_b')
     self.new_col.add(browser)
     self.new_col.add(v_1)
     self.new_col.add(v_unknown)
     self.new_col.add(v_2)
     cc = CollectionChangeset(self.orig_col, self.new_col)
     expected_order = OrderedDict([
         (('browsers', 'browser'), browser),
         (('versions', 'browser', ''), v_unknown),
         (('versions', 'browser', '1.0'), v_1),
         (('versions', 'browser', '2.0'), v_2),
     ])
     expected = {
         'new': expected_order,
         'same': OrderedDict(),
         'changed': OrderedDict(),
         'deleted': OrderedDict(),
     }
     self.assertEqual(expected_order.keys(), cc.changes['new'].keys())
     self.assertEqual(expected, cc.changes)
Beispiel #6
0
 def setup_new(self):
     browser = Browser(id='_chrome', slug='chrome')
     version = Version(version='2.0', browser='_chrome')
     self.new_col.add(browser)
     self.new_col.add(version)
     resources = (browser, version)
     return resources, CollectionChangeset(self.orig_col, self.new_col)
Beispiel #7
0
    def test_new_versions_to_existing_browser(self):
        """When order matters, new items update the parent item."""
        browser = Browser(id='_b', slug='browser', versions=['u'])
        v_1 = Version(id='1.0', version='1.0', browser='_b')
        self.orig_col.add(browser)
        self.orig_col.add(v_1)

        browser_new = Browser(id='_b',
                              slug='browser',
                              versions=['u', '1.0', '2.0'])
        v_unknown = Version(id='u', version=None, browser='_b')
        v_1_same = Version(id='1.0', version='1.0', browser='_b')
        v_2 = Version(id='2.0', version='2.0', browser='_b')
        self.new_col.add(browser_new)
        self.new_col.add(v_unknown)
        self.new_col.add(v_1_same)
        self.new_col.add(v_2)
        cc = CollectionChangeset(self.orig_col, self.new_col)
        expected_order = OrderedDict([
            (('versions', 'browser', ''), v_unknown),
            (('versions', 'browser', '2.0'), v_2),
        ])
        expected = {
            'new': expected_order,
            'same': OrderedDict([
                (('versions', 'browser', '1.0'), v_1_same),
            ]),
            'changed': OrderedDict([
                (('browsers', 'browser'), browser_new),
            ]),
            'deleted': OrderedDict(),
        }
        self.assertEqual(expected_order.keys(), cc.changes['new'].keys())
        self.assertEqual(expected, cc.changes)
Beispiel #8
0
 def test_new_reference_with_dependencies(self):
     feature = Feature(id='feature',
                       slug='feature',
                       name={'en': 'Feature'},
                       sections=['section'])
     reference = Reference(id='reference',
                           feature='feature',
                           section='section')
     section = Section(id='section', specification='spec')
     spec = Specification(id='spec',
                          mdn_key='SPEC',
                          slug='spec',
                          maturity='maturity')
     maturity = Maturity(id='maturity', slug='mat')
     self.new_col.add(feature)
     self.new_col.add(reference)
     self.new_col.add(section)
     self.new_col.add(spec)
     self.new_col.add(maturity)
     cc = CollectionChangeset(self.orig_col, self.new_col)
     expected_order = OrderedDict([
         (('maturities', 'mat'), maturity),
         (('specifications', 'SPEC'), spec),
         (('sections', 'SPEC', ''), section),
         (('features', 'feature'), feature),
         (('references', 'feature', 'SPEC', ''), reference),
     ])
     expected = {
         'new': expected_order,
         'same': OrderedDict(),
         'changed': OrderedDict(),
         'deleted': OrderedDict(),
     }
     self.assertEqual(expected_order.keys(), cc.changes['new'].keys())
     self.assertEqual(expected, cc.changes)
Beispiel #9
0
 def setup_deleted(self, skip_deletes=False):
     browser = Browser(id='_chrome', slug='chrome')
     version = Version(id='_chrome_2', version='2.0', browser='_chrome')
     self.orig_col.add(browser)
     self.orig_col.add(version)
     resources = (browser, version)
     return resources, CollectionChangeset(self.orig_col, self.new_col,
                                           skip_deletes)
Beispiel #10
0
 def test_features_root(self):
     root = Feature(id='root', slug='root', parent=None)
     self.new_col.add(root)
     cc = CollectionChangeset(self.orig_col, self.new_col)
     expected = {
         'new': OrderedDict([
             (('features', 'root'), root),
         ]),
         'changed': OrderedDict(),
         'deleted': OrderedDict(),
         'same': OrderedDict(),
     }
     self.assertEqual(expected, cc.changes)
Beispiel #11
0
    def setup_new_with_dependencies(self):
        parent = Feature(id='parent', slug='parent')
        child1 = Feature(id='child1', slug='child1', parent='parent')
        child2 = Feature(id='child2', slug='child2', parent='parent')
        child3 = Feature(id='child3', slug='child3', parent='parent')
        grandchild = Feature(id='gchild', slug='grandchild', parent='child2')

        self.new_col.add(child1)
        self.new_col.add(parent)
        self.new_col.add(child2)
        self.new_col.add(child3)
        self.new_col.add(grandchild)
        resources = (parent, child1, child2, child3, grandchild)
        return resources, CollectionChangeset(self.orig_col, self.new_col)
    def setup_matched(self):
        browser = Browser(id='1', slug='chrome')
        version = Version(
            id='1', version='2.0', browser='1', note={'en': 'Second Version'})
        self.orig_col.add(browser)
        self.orig_col.add(version)

        browser_same = Browser(id='_chrome', slug='chrome')
        version_diff = Version(
            id='_version', version='2.0', browser='_chrome',
            note=OrderedDict((
                ('en', 'Second Version'),
                ('es', 'Segunda Versión'))))
        self.new_col.add(version_diff)
        self.new_col.add(browser_same)
        resources = (version, browser_same, version_diff)
        return resources, CollectionChangeset(self.orig_col, self.new_col)
Beispiel #13
0
 def test_new_section_with_dependencies(self):
     section = Section(id='section', specification='spec')
     spec = Specification(id='spec',
                          mdn_key='SPEC',
                          slug='spec',
                          maturity='maturity')
     maturity = Maturity(id='maturity', slug='mat')
     self.new_col.add(section)
     self.new_col.add(spec)
     self.new_col.add(maturity)
     cc = CollectionChangeset(self.orig_col, self.new_col)
     expected_order = OrderedDict([
         (('maturities', 'mat'), maturity),
         (('specifications', 'SPEC'), spec),
         (('sections', 'SPEC', ''), section),
     ])
     expected = {
         'new': expected_order,
         'same': OrderedDict(),
         'changed': OrderedDict(),
         'deleted': OrderedDict(),
     }
     self.assertEqual(expected_order.keys(), cc.changes['new'].keys())
     self.assertEqual(expected, cc.changes)
class FeatureExtra(object):
    """Handle new and updated data in a view_feature update"""
    def __init__(self, data, feature, context):
        self.data = data
        self.feature = feature
        self.context = context

    def is_valid(self):
        """Validate the linked data"""
        self.errors = {}
        self._process_data()
        self._validate_changes()
        return not self.errors

    def load_resource(self, resource_cls, data):
        """Load a resource, converting data to look like wire data

        Conversions:
        - Stringify IDs (5 -> "5")
        - Convert Date to ISO 8601 (2015-02-17)
        """
        rdata = {}
        wlinks = getattr(resource_cls, '_writeable_link_fields', {})
        rlinks = getattr(resource_cls, '_readonly_link_fields', {})
        link_names = set(['id'] + list(wlinks.keys()) + list(rlinks.keys()))
        for key, value in data.items():
            if key in link_names:
                if isinstance(value, list):
                    raw_ids = value
                    unlist = False
                else:
                    raw_ids = [value]
                    unlist = True
                ids = []
                for i in raw_ids:
                    if i is None:
                        ids.append(None)
                    else:
                        ids.append(str(i))
                if unlist:
                    rdata[key] = ids[0]
                else:
                    rdata[key] = ids
            else:
                rdata[key] = value
        return resource_cls(**rdata)

    def _process_data(self):
        """Load the linked data and compare to current data."""
        assert not hasattr(self, 'changes')
        assert hasattr(self, 'errors')
        r_by_t = Collection.resource_by_type

        # Create and load collection of new data
        new_collection = Collection()
        for rtype, items in self.data.items():
            resource_cls = r_by_t.get(rtype)
            if resource_cls:
                for seq, json_api_item in enumerate(items):
                    item = json_api_item.copy()
                    links = item.pop('links', {})
                    item.update(links)
                    resource = self.load_resource(resource_cls, item)
                    resource._seq = seq
                    new_collection.add(resource)

        # Create native representation of current feature data
        current_collection = Collection(DjangoResourceClient())
        feature_serializer = ViewFeatureSerializer(context=self.context)
        current_feature = feature_serializer.to_representation(self.feature)
        current_extra = current_feature.pop('_view_extra')
        del current_extra['meta']

        # Load feature into new and current collection
        current_feature_resource = self.load_resource(
            r_by_t['features'], current_feature)
        current_collection.add(current_feature_resource)
        current_feature.update(self.feature._in_extra)
        current_feature['id'] = str(current_feature['id'])
        resource_feature = self.load_resource(
            r_by_t['features'], current_feature)
        resource_feature._seq = None
        new_collection.add(resource_feature)

        # Populate collection of current data
        for rtype, items in current_extra.items():
            resource_cls = r_by_t[rtype]
            for item in items:
                resource = self.load_resource(resource_cls, item)
                current_collection.add(resource)

        # Add existing items not explicit in PUT content
        # This avoids 'delete' changes
        new_items = new_collection.get_all_by_data_id()
        for data_id, item in current_collection.get_all_by_data_id().items():
            if data_id not in new_items:
                resource = r_by_t[item._resource_type]()
                resource.from_json_api(item.to_json_api())
                resource._seq = None
                new_collection.add(resource)

        # Add existing items used in new collection to current collection
        # This avoids incorrect 'new' changes
        existing_items = current_collection.get_all_by_data_id()
        for data_id, item in new_collection.get_all_by_data_id().items():
            if item.id:
                item_id = item.id.id
                int_id = None
                existing_item = existing_items.get(data_id)
                try:
                    int_id = int(item_id)
                except ValueError:
                    pass
                if int_id and (existing_item is None):
                    rtype = item._resource_type
                    resource_cls = r_by_t[rtype]
                    model_cls, serializer_cls = view_cls_by_name[rtype]
                    obj = model_cls.objects.get(id=int_id)
                    serializer = serializer_cls()
                    data = serializer.to_representation(obj)
                    resource = self.load_resource(resource_cls, data)
                    current_collection.add(resource)

        # Load the diff
        self.changeset = CollectionChangeset(
            current_collection, new_collection)
        assert not self.changeset.changes.get('deleted')

    def add_error(self, resource_type, seq, error_dict):
        """Add a validation error for a linked resource."""
        self.errors.setdefault(
            resource_type, {}).setdefault(seq, {}).update(error_dict)

    def _validate_changes(self):
        """Validate the changes.

        Validation includes:
        - Field validation of properties
        - Disallow adding features outside of the target feature's subtree
        - Disallow additions of maturities

        Validation of links is not attempted, since most validation errors
        will be relations to new resources.  This may miss links to
        "existing" resources that aren't in the database, but those will
        be DoesNotExist exceptions in _process_data.
        """
        assert hasattr(self, 'changeset')
        assert hasattr(self, 'errors')
        assert not self.errors

        new_collection = self.changeset.new_collection
        resource_feature = new_collection.get('features', str(self.feature.id))

        # Validate with DRF serializers
        for data_id, item in new_collection.get_all_by_data_id().items():
            rtype = item._resource_type
            model_cls, serializer_cls = view_cls_by_name[rtype]
            seq = getattr(item, '_seq')
            if seq is None:
                continue

            # Does the ID imply an existing instance?
            int_id = None
            instance = None
            assert item.id
            item_id = item.id.id
            try:
                int_id = int(item_id)
            except ValueError:
                pass
            else:
                instance = model_cls.objects.get(id=int_id)

            # Validate the data with DRF serializer
            data = item.to_json_api()[rtype]
            links = data.pop('links', {})
            data.update(links)
            serializer = serializer_cls(instance=instance, data=data)
            if not serializer.is_valid():
                errors = {}
                # Discard errors in link fields, for now
                for fieldname, error in serializer.errors.items():
                    if fieldname not in links:
                        errors[fieldname] = error
                if errors:
                    self.add_error(rtype, seq, errors)

        # Validate that features are in the feature tree
        target_id = resource_feature.id.id
        for feature in new_collection.get_resources('features'):
            if feature.id.id == target_id:
                continue

            f = feature
            while (f and f.parent is not None and
                    f.parent.id != target_id):
                f = new_collection.get('features', f.parent.id)

            if f is None or f.parent.id is None:
                error = (
                    "Feature must be a descendant of feature %s." % target_id)
                self.add_error('features', feature._seq, {'parent': error})

        # Validate that "expert" objects are not added
        expert_resources = set((
            'maturities', 'specifications', 'versions', 'browsers'))
        add_error = (
            'Resource can not be created as part of this update. Create'
            ' first, and try again.')
        for item in self.changeset.changes['new'].values():
            if item._resource_type in expert_resources:
                self.add_error(
                    item._resource_type, item._seq, {'id': add_error})

        # Validate that "expert" objects are not changed
        change_err = (
            'Field can not be changed from %s to %s as part of this update.'
            ' Update the resource by itself, and try again.')
        for item in self.changeset.changes['changed'].values():
            if item._resource_type in expert_resources:
                rtype = item._resource_type
                new_json = dict(item.to_json_api()[rtype])
                new_json.update(new_json.pop('links', {}))
                orig_json = dict(item._original.to_json_api()[rtype])
                orig_json.update(orig_json.pop('links', {}))
                for key, value in orig_json.items():
                    if value != new_json.get(key, "(missing)"):
                        err = change_err % (dumps(value), dumps(new_json[key]))
                        self.add_error(rtype, item._seq, {key: err})

    def save(self, **kwargs):
        """Commit changes to linked data"""
        self.changeset.change_original_collection()

        # Adding sub-features will change the MPTT tree through direct SQL.
        # Load the new tree data from the database before parent serializer
        # overwrites it with old values.
        tree_attrs = ['lft', 'rght', 'tree_id', 'level', 'parent']
        db_feature = Feature.objects.only(*tree_attrs).get(id=self.feature.id)
        for attr in tree_attrs:
            setattr(self.feature, attr, getattr(db_feature, attr))
Beispiel #15
0
class FeatureExtra(object):
    """Handle new and updated data in a view_feature update."""
    def __init__(self, data, feature, context):
        self.data = data
        self.feature = feature
        self.context = context

    def is_valid(self):
        """Validate the linked data."""
        self.errors = {}
        self._process_data()
        self._validate_changes()
        return not self.errors

    def load_resource(self, resource_cls, data):
        """Load a resource, converting data to look like wire data.

        Conversions:
        - Stringify IDs (5 -> "5")
        - Convert Date to ISO 8601 (2015-02-17)
        """
        rdata = {}
        wlinks = getattr(resource_cls, '_writeable_link_fields', {})
        rlinks = getattr(resource_cls, '_readonly_link_fields', {})
        link_names = set(['id'] + list(wlinks.keys()) + list(rlinks.keys()))
        for key, value in data.items():
            if key in link_names:
                if isinstance(value, list):
                    raw_ids = value
                    unlist = False
                else:
                    raw_ids = [value]
                    unlist = True
                ids = []
                for i in raw_ids:
                    if i is None:
                        ids.append(None)
                    else:
                        ids.append(str(i))
                if unlist:
                    rdata[key] = ids[0]
                else:
                    rdata[key] = ids
            else:
                rdata[key] = value
        return resource_cls(**rdata)

    def _process_data(self):
        """Load the linked data and compare to current data."""
        assert not hasattr(self, 'changes'), '_process_data called twice.'
        assert hasattr(self,
                       'errors'), ('_process_data not called by is_valid().')
        r_by_t = Collection.resource_by_type

        # Create and load collection of new data
        new_collection = Collection()
        for rtype, items in self.data.items():
            resource_cls = r_by_t.get(rtype)
            if resource_cls:
                for seq, json_api_item in enumerate(items):
                    item = json_api_item.copy()
                    links = item.pop('links', {})
                    item.update(links)
                    resource = self.load_resource(resource_cls, item)
                    resource._seq = seq
                    new_collection.add(resource)

        # Create native representation of current feature data
        current_collection = Collection(DjangoResourceClient())
        feature_serializer = ViewFeatureSerializer(context=self.context)
        current_feature = feature_serializer.to_representation(self.feature)
        current_extra = current_feature.pop('_view_extra')
        del current_extra['meta']

        # Load feature into new and current collection
        current_feature_resource = self.load_resource(r_by_t['features'],
                                                      current_feature)
        current_collection.add(current_feature_resource)
        current_feature.update(self.feature._in_extra)
        current_feature['id'] = str(current_feature['id'])
        resource_feature = self.load_resource(r_by_t['features'],
                                              current_feature)
        resource_feature._seq = None
        new_collection.add(resource_feature)

        # Populate collection of current data
        for rtype, items in current_extra.items():
            resource_cls = r_by_t[rtype]
            for item in items:
                resource = self.load_resource(resource_cls, item)
                current_collection.add(resource)

        # Add existing items not explicit in PUT content
        # This avoids 'delete' changes
        new_items = new_collection.get_all_by_data_id()
        for data_id, item in current_collection.get_all_by_data_id().items():
            if data_id not in new_items:
                rtype = item._resource_type
                resource = r_by_t[rtype]()
                json_api_rep = item.to_json_api()
                json_api_rep[rtype]['id'] = item.id.id
                resource.from_json_api(json_api_rep)
                resource._seq = None
                new_collection.add(resource)

        # Add existing items used in new collection to current collection
        # This avoids incorrect 'new' changes
        existing_items = current_collection.get_all_by_data_id()
        for data_id, item in new_collection.get_all_by_data_id().items():
            if item.id:
                item_id = item.id.id
                int_id = None
                existing_item = existing_items.get(data_id)
                try:
                    int_id = int(item_id)
                except ValueError:
                    pass
                if int_id and (existing_item is None):
                    rtype = item._resource_type
                    resource_cls = r_by_t[rtype]
                    model_cls, serializer_cls = view_cls_by_name[rtype]
                    obj = model_cls.objects.get(id=int_id)
                    serializer = serializer_cls()
                    data = serializer.to_representation(obj)
                    resource = self.load_resource(resource_cls, data)
                    current_collection.add(resource)

        # Load the diff
        self.changeset = CollectionChangeset(current_collection,
                                             new_collection)
        assert not self.changeset.changes.get('deleted'), (
            'Existing items were not added, so deletions found:\n%s' %
            self.changes['deleted'])

    def add_error(self, resource_type, seq, attr_name, error):
        """Add a validation error for a linked resource."""
        resource_errors = self.errors.setdefault(resource_type, {})
        seq_errors = resource_errors.setdefault(seq, {})
        attr_errors = seq_errors.setdefault(attr_name, [])
        attr_errors.append(error)

    def _validate_changes(self):
        """Validate the changes.

        Validation includes:
        - Field validation of properties
        - Disallow adding features outside of the target feature's subtree
        - Disallow additions of maturities

        Validation of links is not attempted, since most validation errors
        will be relations to new resources.  This may miss links to
        "existing" resources that aren't in the database, but those will
        be DoesNotExist exceptions in _process_data.
        """
        assert hasattr(
            self,
            'changeset'), ('_validate_changes called before _process_data')
        assert hasattr(
            self, 'errors'), ('_validate_changes called outside of is_valid')
        assert not self.errors, '_validate_changes called twice.'

        new_collection = self.changeset.new_collection
        resource_feature = new_collection.get('features', str(self.feature.id))

        # Validate with DRF serializers
        for data_id, item in new_collection.get_all_by_data_id().items():
            rtype = item._resource_type
            model_cls, serializer_cls = view_cls_by_name[rtype]
            seq = getattr(item, '_seq')
            if seq is None:
                continue

            # Does the ID imply an existing instance?
            int_id = None
            instance = None
            assert item.id, ('ID not set for data_id "%s", item "%s".' %
                             (data_id, item))
            item_id = item.id.id
            try:
                int_id = int(item_id)
            except ValueError:
                pass
            else:
                instance = model_cls.objects.get(id=int_id)

            # Validate the data with DRF serializer
            data = item.to_json_api()[rtype]
            links = data.pop('links', {})
            data.update(links)
            serializer = serializer_cls(instance=instance, data=data)
            if not serializer.is_valid():
                # Discard errors in link fields, for now
                for fieldname, errors in serializer.errors.items():
                    if fieldname not in links:
                        for error in errors:
                            self.add_error(rtype, seq, fieldname, error)

        # Validate that features are in the feature tree
        target_id = resource_feature.id.id
        for feature in new_collection.get_resources('features'):
            if feature.id.id == target_id:
                continue

            f = feature
            while (f and f.parent is not None and f.parent.id != target_id):
                f = new_collection.get('features', f.parent.id)

            if f is None or f.parent.id is None:
                error = ('Feature must be a descendant of feature %s.' %
                         target_id)
                self.add_error('features', feature._seq, 'parent', error)

        # Validate that "expert" objects are not added
        expert_resources = set(
            ('maturities', 'specifications', 'versions', 'browsers'))
        create_error = (
            'Resource can not be created as part of this update. Create'
            ' first, and try again.')
        for item in self.changeset.changes['new'].values():
            if item._resource_type in expert_resources:
                self.add_error(item._resource_type, item._seq, 'id',
                               create_error)

        # Validate that "expert" objects are not changed
        change_err = (
            'Field can not be changed from %s to %s as part of this update.'
            ' Update the resource by itself, and try again.')
        for item in self.changeset.changes['changed'].values():
            if item._resource_type in expert_resources:
                rtype = item._resource_type
                new_json = dict(item.to_json_api()[rtype])
                new_json.update(new_json.pop('links', {}))
                orig_json = dict(item._original.to_json_api()[rtype])
                orig_json.update(orig_json.pop('links', {}))
                for key, value in orig_json.items():
                    if value != new_json.get(key, '(missing)'):
                        err = change_err % (dumps(value), dumps(new_json[key]))
                        self.add_error(rtype, item._seq, key, err)

    def save(self, **kwargs):
        """Commit changes to linked data."""
        self.changeset.change_original_collection()

        # Adding sub-features will change the MPTT tree through direct SQL.
        # Load the new tree data from the database before parent serializer
        # overwrites it with old values.
        tree_attrs = ('lft', 'rght', 'tree_id', 'level', 'parent')
        db_feature = Feature.objects.only(*tree_attrs).get(id=self.feature.id)
        for attr in tree_attrs:
            setattr(self.feature, attr, getattr(db_feature, attr))

        # Adding sub-features will make cached properties invalid
        cached_params = ('row_descendant_pks', 'descendant_pks',
                         'descendant_count', 'row_children',
                         'row_children_pks', 'page_children_pks',
                         '_child_pks_and_is_page')
        for attr in cached_params:
            try:
                delattr(self.feature, attr)
            except AttributeError:
                pass  # cached_property was not accessed during serialization
    def _process_data(self):
        """Load the linked data and compare to current data."""
        assert not hasattr(self, 'changes')
        assert hasattr(self, 'errors')
        r_by_t = Collection.resource_by_type

        # Create and load collection of new data
        new_collection = Collection()
        for rtype, items in self.data.items():
            resource_cls = r_by_t.get(rtype)
            if resource_cls:
                for seq, json_api_item in enumerate(items):
                    item = json_api_item.copy()
                    links = item.pop('links', {})
                    item.update(links)
                    resource = self.load_resource(resource_cls, item)
                    resource._seq = seq
                    new_collection.add(resource)

        # Create native representation of current feature data
        current_collection = Collection(DjangoResourceClient())
        feature_serializer = ViewFeatureSerializer(context=self.context)
        current_feature = feature_serializer.to_representation(self.feature)
        current_extra = current_feature.pop('_view_extra')
        del current_extra['meta']

        # Load feature into new and current collection
        current_feature_resource = self.load_resource(
            r_by_t['features'], current_feature)
        current_collection.add(current_feature_resource)
        current_feature.update(self.feature._in_extra)
        current_feature['id'] = str(current_feature['id'])
        resource_feature = self.load_resource(
            r_by_t['features'], current_feature)
        resource_feature._seq = None
        new_collection.add(resource_feature)

        # Populate collection of current data
        for rtype, items in current_extra.items():
            resource_cls = r_by_t[rtype]
            for item in items:
                resource = self.load_resource(resource_cls, item)
                current_collection.add(resource)

        # Add existing items not explicit in PUT content
        # This avoids 'delete' changes
        new_items = new_collection.get_all_by_data_id()
        for data_id, item in current_collection.get_all_by_data_id().items():
            if data_id not in new_items:
                resource = r_by_t[item._resource_type]()
                resource.from_json_api(item.to_json_api())
                resource._seq = None
                new_collection.add(resource)

        # Add existing items used in new collection to current collection
        # This avoids incorrect 'new' changes
        existing_items = current_collection.get_all_by_data_id()
        for data_id, item in new_collection.get_all_by_data_id().items():
            if item.id:
                item_id = item.id.id
                int_id = None
                existing_item = existing_items.get(data_id)
                try:
                    int_id = int(item_id)
                except ValueError:
                    pass
                if int_id and (existing_item is None):
                    rtype = item._resource_type
                    resource_cls = r_by_t[rtype]
                    model_cls, serializer_cls = view_cls_by_name[rtype]
                    obj = model_cls.objects.get(id=int_id)
                    serializer = serializer_cls()
                    data = serializer.to_representation(obj)
                    resource = self.load_resource(resource_cls, data)
                    current_collection.add(resource)

        # Load the diff
        self.changeset = CollectionChangeset(
            current_collection, new_collection)
        assert not self.changeset.changes.get('deleted')
Beispiel #17
0
    def _process_data(self):
        """Load the linked data and compare to current data."""
        assert not hasattr(self, 'changes'), '_process_data called twice.'
        assert hasattr(self,
                       'errors'), ('_process_data not called by is_valid().')
        r_by_t = Collection.resource_by_type

        # Create and load collection of new data
        new_collection = Collection()
        for rtype, items in self.data.items():
            resource_cls = r_by_t.get(rtype)
            if resource_cls:
                for seq, json_api_item in enumerate(items):
                    item = json_api_item.copy()
                    links = item.pop('links', {})
                    item.update(links)
                    resource = self.load_resource(resource_cls, item)
                    resource._seq = seq
                    new_collection.add(resource)

        # Create native representation of current feature data
        current_collection = Collection(DjangoResourceClient())
        feature_serializer = ViewFeatureSerializer(context=self.context)
        current_feature = feature_serializer.to_representation(self.feature)
        current_extra = current_feature.pop('_view_extra')
        del current_extra['meta']

        # Load feature into new and current collection
        current_feature_resource = self.load_resource(r_by_t['features'],
                                                      current_feature)
        current_collection.add(current_feature_resource)
        current_feature.update(self.feature._in_extra)
        current_feature['id'] = str(current_feature['id'])
        resource_feature = self.load_resource(r_by_t['features'],
                                              current_feature)
        resource_feature._seq = None
        new_collection.add(resource_feature)

        # Populate collection of current data
        for rtype, items in current_extra.items():
            resource_cls = r_by_t[rtype]
            for item in items:
                resource = self.load_resource(resource_cls, item)
                current_collection.add(resource)

        # Add existing items not explicit in PUT content
        # This avoids 'delete' changes
        new_items = new_collection.get_all_by_data_id()
        for data_id, item in current_collection.get_all_by_data_id().items():
            if data_id not in new_items:
                rtype = item._resource_type
                resource = r_by_t[rtype]()
                json_api_rep = item.to_json_api()
                json_api_rep[rtype]['id'] = item.id.id
                resource.from_json_api(json_api_rep)
                resource._seq = None
                new_collection.add(resource)

        # Add existing items used in new collection to current collection
        # This avoids incorrect 'new' changes
        existing_items = current_collection.get_all_by_data_id()
        for data_id, item in new_collection.get_all_by_data_id().items():
            if item.id:
                item_id = item.id.id
                int_id = None
                existing_item = existing_items.get(data_id)
                try:
                    int_id = int(item_id)
                except ValueError:
                    pass
                if int_id and (existing_item is None):
                    rtype = item._resource_type
                    resource_cls = r_by_t[rtype]
                    model_cls, serializer_cls = view_cls_by_name[rtype]
                    obj = model_cls.objects.get(id=int_id)
                    serializer = serializer_cls()
                    data = serializer.to_representation(obj)
                    resource = self.load_resource(resource_cls, data)
                    current_collection.add(resource)

        # Load the diff
        self.changeset = CollectionChangeset(current_collection,
                                             new_collection)
        assert not self.changeset.changes.get('deleted'), (
            'Existing items were not added, so deletions found:\n%s' %
            self.changes['deleted'])