Пример #1
0
 def on_create(self, docs):
     on_create_item(docs)
     for doc in docs:
         resolve_document_version(doc, ARCHIVE, 'POST')
         self.update_times(doc)
         self.update_stage(doc)
         convert_task_attributes_to_objectId(doc)
Пример #2
0
    def move_content(self, id, doc):
        archive_service = get_resource_service(ARCHIVE)
        archived_doc = archive_service.find_one(req=None, _id=id)

        if not archived_doc:
            raise SuperdeskApiError.notFoundError('Fail to found item with guid: %s' % id)

        current_stage_of_item = archived_doc.get('task', {}).get('stage')
        if current_stage_of_item and str(current_stage_of_item) == str(doc.get('task', {}).get('stage')):
            raise SuperdeskApiError.preconditionFailedError(message='Move is not allowed within the same stage.')

        if not is_workflow_state_transition_valid('submit_to_desk', archived_doc[config.CONTENT_STATE]):
            raise InvalidStateTransitionError()

        original = dict(archived_doc)

        send_to(archived_doc, doc.get('task', {}).get('desc'), doc.get('task', {}).get('stage'))

        if archived_doc[config.CONTENT_STATE] not in ['published', 'scheduled', 'killed']:
            archived_doc[config.CONTENT_STATE] = 'submitted'

        resolve_document_version(archived_doc, ARCHIVE, 'PATCH', original)

        del archived_doc['_id']
        archive_service.update(original['_id'], archived_doc, original)

        insert_into_versions(id_=original['_id'])

        return archived_doc
Пример #3
0
    def link_as_next_take(self, target, link):
        """
        # check if target has an associated takes package
        # if not, create it and add target as a take
        # check if the target is the last take, if not, resolve the last take
        # copy metadata from the target and add it as the next take
        # return the update link item
        """
        takes_package_id = self.get_take_package_id(target)
        archive_service = get_resource_service(ARCHIVE)
        takes_package = archive_service.find_one(req=None, _id=takes_package_id) if takes_package_id else {}

        if not link.get('_id'):
            self.__copy_metadata__(target, link, takes_package)
            archive_service.post([link])

        if not takes_package_id:
            takes_package_id = self.package_story_as_a_take(target, takes_package, link)
        else:
            self.__link_items__(takes_package, target, link)
            del takes_package['_id']
            resolve_document_version(takes_package, ARCHIVE, 'PATCH', takes_package)
            archive_service.patch(takes_package_id, takes_package)

        insert_into_versions(id_=takes_package_id)

        return link
Пример #4
0
    def create(self, docs, **kwargs):
        guid_of_item_to_be_moved = request.view_args['guid']

        guid_of_moved_items = []

        for doc in docs:
            archive_service = get_resource_service(ARCHIVE)

            archived_doc = archive_service.find_one(req=None, _id=guid_of_item_to_be_moved)
            if not archived_doc:
                raise SuperdeskApiError.notFoundError('Fail to found item with guid: %s' %
                                                      guid_of_item_to_be_moved)

            current_stage_of_item = archived_doc.get('task', {}).get('stage')
            if current_stage_of_item and str(current_stage_of_item) == str(doc.get('stage')):
                raise SuperdeskApiError.preconditionFailedError(message='Move is not allowed within the same stage.')

            if not is_workflow_state_transition_valid('submit_to_desk', archived_doc[config.CONTENT_STATE]):
                raise InvalidStateTransitionError()

            original = dict(archived_doc)

            send_to(archived_doc, doc.get('desk'), doc.get('stage'))
            archived_doc[config.CONTENT_STATE] = 'submitted'
            resolve_document_version(archived_doc, ARCHIVE, 'PATCH', original)

            del archived_doc['_id']
            archive_service.update(original['_id'], archived_doc, original)

            insert_into_versions(guid=original['_id'])

            guid_of_moved_items.append(archived_doc['guid'])

        return guid_of_moved_items
Пример #5
0
    def restore_version(self, id, doc, original):
        item_id = id
        old_version = int(doc.get("old_version", 0))
        last_version = int(doc.get("last_version", 0))
        if not all([item_id, old_version, last_version]):
            return None

        old = get_resource_service("archive_versions").find_one(
            req=None, _id_document=item_id, _current_version=old_version
        )
        if old is None:
            raise SuperdeskApiError.notFoundError("Invalid version %s" % old_version)

        curr = get_resource_service(SOURCE).find_one(req=None, _id=item_id)
        if curr is None:
            raise SuperdeskApiError.notFoundError("Invalid item id %s" % item_id)

        if curr[config.VERSION] != last_version:
            raise SuperdeskApiError.preconditionFailedError("Invalid last version %s" % last_version)
        old["_id"] = old["_id_document"]
        old["_updated"] = old["versioncreated"] = utcnow()
        set_item_expiry(old, doc)
        del old["_id_document"]

        resolve_document_version(old, "archive", "PATCH", curr)

        remove_unwanted(old)
        super().replace(id=item_id, document=old, original=curr)

        del doc["old_version"]
        del doc["last_version"]
        doc.update(old)
        return item_id
Пример #6
0
    def restore_version(self, id, doc):
        item_id = id
        old_version = int(doc.get('old_version', 0))
        last_version = int(doc.get('last_version', 0))
        if (not all([item_id, old_version, last_version])):
            return None

        old = get_resource_service('archive_versions').find_one(req=None, _id_document=item_id, _version=old_version)
        if old is None:
            raise SuperdeskApiError.notFoundError('Invalid version %s' % old_version)

        curr = get_resource_service(SOURCE).find_one(req=None, _id=item_id)
        if curr is None:
            raise SuperdeskApiError.notFoundError('Invalid item id %s' % item_id)

        if curr[config.VERSION] != last_version:
            raise SuperdeskApiError.preconditionFailedError('Invalid last version %s' % last_version)
        old['_id'] = old['_id_document']
        old['_updated'] = old['versioncreated'] = utcnow()
        set_item_expiry(old, doc)
        del old['_id_document']

        resolve_document_version(old, 'archive', 'PATCH', curr)

        remove_unwanted(old)
        res = super().replace(id=item_id, document=old)

        del doc['old_version']
        del doc['last_version']
        doc.update(old)
        return res
Пример #7
0
    def move_content(self, id, doc):
        archive_service = get_resource_service(ARCHIVE)
        archived_doc = archive_service.find_one(req=None, _id=id)

        if not archived_doc:
            raise SuperdeskApiError.notFoundError('Fail to found item with guid: %s' % id)

        current_stage_of_item = archived_doc.get('task', {}).get('stage')
        if current_stage_of_item and str(current_stage_of_item) == str(doc.get('task', {}).get('stage')):
            raise SuperdeskApiError.preconditionFailedError(message='Move is not allowed within the same stage.')

        if not is_workflow_state_transition_valid('submit_to_desk', archived_doc[ITEM_STATE]):
            raise InvalidStateTransitionError()

        original = dict(archived_doc)
        user = get_user()

        send_to(doc=archived_doc, desk_id=doc.get('task', {}).get('desc'), stage_id=doc.get('task', {}).get('stage'),
                user_id=user.get(config.ID_FIELD))

        if archived_doc[ITEM_STATE] not in {CONTENT_STATE.PUBLISHED, CONTENT_STATE.SCHEDULED, CONTENT_STATE.KILLED}:
            archived_doc[ITEM_STATE] = CONTENT_STATE.SUBMITTED
        archived_doc[ITEM_OPERATION] = ITEM_MOVE

        set_sign_off(archived_doc, original=original)
        resolve_document_version(archived_doc, ARCHIVE, 'PATCH', original)

        del archived_doc[config.ID_FIELD]
        archive_service.update(original[config.ID_FIELD], archived_doc, original)

        insert_into_versions(id_=original[config.ID_FIELD])

        return archived_doc
Пример #8
0
    def duplicate_item(self, original_doc, state=None, extra_fields=None, operation=None):
        """Duplicates an item.

        Duplicates the 'original_doc' including it's version history. If the article being duplicated is contained
        in a desk then the article state is changed to Submitted.

        :return: guid of the duplicated article
        """

        new_doc = original_doc.copy()
        self.remove_after_copy(new_doc, extra_fields)
        on_duplicate_item(new_doc, original_doc, operation)
        resolve_document_version(new_doc, SOURCE, 'PATCH', new_doc)

        if original_doc.get('task', {}).get('desk') is not None and new_doc.get(ITEM_STATE) != CONTENT_STATE.SUBMITTED:
            new_doc[ITEM_STATE] = CONTENT_STATE.SUBMITTED

        if state:
            new_doc[ITEM_STATE] = state

        convert_task_attributes_to_objectId(new_doc)
        get_model(ItemModel).create([new_doc])
        self._duplicate_versions(original_doc['_id'], new_doc)
        self._duplicate_history(original_doc['_id'], new_doc)
        app.on_archive_item_updated({'duplicate_id': new_doc['guid']}, original_doc, operation or ITEM_DUPLICATE)
        app.on_archive_item_updated({'duplicate_id': original_doc['_id']}, new_doc, operation or ITEM_DUPLICATED_FROM)

        return new_doc['guid']
Пример #9
0
    def remove_refs_in_package(self, package, ref_id_to_remove, processed_packages=None):
        """Removes residRef referenced by ref_id_to_remove from the package associations and returns the package id.

        Before removing checks if the package has been processed. If processed the package is skipped.

        :return: package[config.ID_FIELD]
        """
        groups = package[GROUPS]

        if processed_packages is None:
            processed_packages = []

        sub_package_ids = [ref['guid'] for group in groups
                           for ref in group[REFS] if ref.get('type') == CONTENT_TYPE.COMPOSITE]
        for sub_package_id in sub_package_ids:
            if sub_package_id not in processed_packages:
                sub_package = self.find_one(req=None, _id=sub_package_id)
                return self.remove_refs_in_package(sub_package, ref_id_to_remove)

        new_groups = self.remove_group_ref(package, ref_id_to_remove)
        updates = {config.LAST_UPDATED: utcnow(), GROUPS: new_groups}

        resolve_document_version(updates, ARCHIVE, 'PATCH', package)
        get_resource_service(ARCHIVE).patch(package[config.ID_FIELD], updates)
        app.on_archive_item_updated(updates, package, ITEM_UNLINK)
        insert_into_versions(id_=package[config.ID_FIELD])

        sub_package_ids.append(package[config.ID_FIELD])
        return sub_package_ids
Пример #10
0
    def restore_version(self, id, doc):
        item_id = id
        old_version = int(doc.get('old_version', 0))
        last_version = int(doc.get('last_version', 0))
        if(not all([item_id, old_version, last_version])):
            return None

        old = app.data.find_one('archive_versions', req=None, _id_document=item_id, _version=old_version)
        if old is None:
            raise SuperdeskError(payload='Invalid version %s' % old_version)

        curr = app.data.find_one('archive', req=None, _id=item_id)
        if curr is None:
            raise SuperdeskError(payload='Invalid item id %s' % item_id)

        if curr['_version'] != last_version:
            raise SuperdeskError(payload='Invalid last version %s' % last_version)
        old['_id'] = old['_id_document']
        old['_updated'] = old['versioncreated'] = utcnow()
        del old['_id_document']

        resolve_document_version(old, 'archive', 'PATCH', curr)
        res = super().replace(id=item_id, document=old)
        del doc['old_version']
        del doc['last_version']
        doc.update(old)
        return res
Пример #11
0
def build_response_document(
        document, resource, embedded_fields, latest_doc=None):
    """ Prepares a document for response including generation of ETag and
    metadata fields.

    :param document: the document to embed other documents into.
    :param resource: the resource name.
    :param embedded_fields: the list of fields we are allowed to embed.
    :param document: the latest version of document.

    .. versionadded:: 0.4
    """
    # need to update the document field since the etag must be computed on the
    # same document representation that might have been used in the collection
    # 'get' method
    document[config.DATE_CREATED] = date_created(document)
    document[config.LAST_UPDATED] = last_updated(document)
    # TODO: last_update could include consideration for embedded documents

    # generate ETag
    if config.IF_MATCH:
        document[config.ETAG] = document_etag(document)

    # hateoas links
    if config.DOMAIN[resource]['hateoas'] and config.ID_FIELD in document:
        document[config.LINKS] = {'self':
                                  document_link(resource,
                                                document[config.ID_FIELD])}

    # add version numbers
    resolve_document_version(document, resource, 'GET', latest_doc)

    # media and embedded documents
    resolve_media_files(document, resource)
    resolve_embedded_documents(document, resource, embedded_fields)
Пример #12
0
    def __publish_package_items(self, package, last_updated):
        """
        Publishes items of a package recursively
        """

        items = [ref.get('residRef') for group in package.get('groups', [])
                 for ref in group.get('refs', []) if 'residRef' in ref]

        if items:
            for guid in items:
                doc = super().find_one(req=None, _id=guid)
                original = copy(doc)
                try:
                    if doc['type'] == 'composite':
                        self.__publish_package_items(doc)

                    resolve_document_version(document=doc, resource=ARCHIVE, method='PATCH', latest_doc=doc)
                    doc[config.CONTENT_STATE] = self.published_state
                    doc[config.LAST_UPDATED] = last_updated
                    doc[config.ETAG] = document_etag(doc)
                    self.backend.update(self.datasource, guid, {config.CONTENT_STATE: doc[config.CONTENT_STATE],
                                                                config.ETAG: doc[config.ETAG],
                                                                config.VERSION: doc[config.VERSION],
                                                                config.LAST_UPDATED: doc[config.LAST_UPDATED]},
                                        original)
                    insert_into_versions(doc=doc)
                except KeyError:
                    raise SuperdeskApiError.badRequestError("A non-existent content id is requested to publish")
Пример #13
0
    def remove_refs_in_package(self, package, ref_id_to_remove, processed_packages=None):
        """
        Removes residRef referenced by ref_id_to_remove from the package associations and returns the package id.
        Before removing checks if the package has been processed. If processed the package is skipped.
        In case of takes package, sequence is decremented and last_take field is updated.
        If sequence is zero then the takes package is deleted.
        :return: package[config.ID_FIELD]
        """
        groups = package[GROUPS]

        if processed_packages is None:
            processed_packages = []

        sub_package_ids = [ref['guid'] for group in groups
                           for ref in group[ASSOCIATIONS] if ref.get('type') == CONTENT_TYPE.COMPOSITE]
        for sub_package_id in sub_package_ids:
            if sub_package_id not in processed_packages:
                sub_package = self.find_one(req=None, _id=sub_package_id)
                return self.remove_refs_in_package(sub_package, ref_id_to_remove)

        new_groups = [{GROUP_ID: group[GROUP_ID], ROLE: group.get(ROLE),
                       ASSOCIATIONS: [ref for ref in group[ASSOCIATIONS] if ref.get('guid') != ref_id_to_remove]}
                      for group in groups]
        new_root_refs = [{ID_REF: group[GROUP_ID]} for group in new_groups if group[GROUP_ID] != ROOT_GROUP]

        for group in new_groups:
            if group[GROUP_ID] == ROOT_GROUP:
                group[ASSOCIATIONS] = new_root_refs
                break

        updates = {config.LAST_UPDATED: utcnow(), GROUPS: new_groups}

        # if takes package then adjust the reference.
        # safe to do this as take can only be in one takes package.
        delete_package = False
        if package.get(PACKAGE_TYPE) == TAKES_PACKAGE:
            new_sequence = package[SEQUENCE] - 1
            if new_sequence == 0:
                # remove the takes package.
                get_resource_service(ARCHIVE).delete_action({config.ID_FIELD: package[config.ID_FIELD]})
                delete_package = True
            else:
                updates[SEQUENCE] = new_sequence
                last_take_group = next(reference for reference in
                                       next(new_group.get(ASSOCIATIONS) for new_group in new_groups if
                                            new_group[GROUP_ID] == MAIN_GROUP)
                                       if reference.get(SEQUENCE) == new_sequence)

                if last_take_group:
                    updates[LAST_TAKE] = last_take_group.get(ITEM_REF)

        if not delete_package:
            resolve_document_version(updates, ARCHIVE, 'PATCH', package)
            get_resource_service(ARCHIVE).patch(package[config.ID_FIELD], updates)
            insert_into_versions(id_=package[config.ID_FIELD])

        sub_package_ids.append(package[config.ID_FIELD])
        return sub_package_ids
Пример #14
0
    def __update_state(self, updates, original):
        if self.__is_content_assigned_to_new_desk(original, updates):
            # check if the preconditions for the action are in place
            original_state = original[config.CONTENT_STATE]
            if not is_workflow_state_transition_valid('move', original_state):
                raise InvalidStateTransitionError()

            updates[config.CONTENT_STATE] = 'draft' if self.__is_content_moved_from_desk(updates) else 'submitted'
            resolve_document_version(updates, ARCHIVE, 'PATCH', original)
Пример #15
0
def enqueue_item(published_item):
    """
    Creates the corresponding entries in the publish queue for the given item
    """
    published_item_id = ObjectId(published_item[config.ID_FIELD])
    published_service = get_resource_service(PUBLISHED)
    archive_service = get_resource_service(ARCHIVE)
    published_update = {QUEUE_STATE: PUBLISH_STATE.IN_PROGRESS, 'last_queue_event': utcnow()}
    try:
        logger.info('Queueing item with id: {} and item_id: {}'.format(published_item_id, published_item['item_id']))

        published_item = published_service.find_one(req=None, _id=published_item_id)
        if published_item.get(QUEUE_STATE) != PUBLISH_STATE.PENDING:
            logger.info('Queue State is not pending for published item {}. It is in {}'.
                        format(published_item_id, published_item.get(QUEUE_STATE)))
            return

        if published_item.get(ITEM_STATE) == CONTENT_STATE.SCHEDULED:
            # if scheduled then change the state to published
            # change the `version` and `versioncreated` for the item
            # in archive collection and published collection.
            versioncreated = utcnow()
            item_updates = {'versioncreated': versioncreated, ITEM_STATE: CONTENT_STATE.PUBLISHED}
            resolve_document_version(document=item_updates, resource=ARCHIVE,
                                     method='PATCH',
                                     latest_doc={config.VERSION: published_item[config.VERSION]})

            # update the archive collection
            archive_item = archive_service.find_one(req=None, _id=published_item['item_id'])
            archive_service.system_update(published_item['item_id'], item_updates, archive_item)
            # insert into version.
            insert_into_versions(published_item['item_id'], doc=None)
            # import to legal archive
            import_into_legal_archive.apply_async(countdown=3, kwargs={'item_id': published_item['item_id']})
            logger.info('Modified the version of scheduled item: {}'.format(published_item_id))

            logger.info('Publishing scheduled item_id: {}'.format(published_item_id))
            # update the published collection
            published_update.update(item_updates)
            published_item.update({'versioncreated': versioncreated,
                                   ITEM_STATE: CONTENT_STATE.PUBLISHED,
                                   config.VERSION: item_updates[config.VERSION]})

        published_service.patch(published_item_id, published_update)
        queued = get_enqueue_service(published_item[ITEM_OPERATION]).enqueue_item(published_item)
        # if the item is queued in the publish_queue then the state is "queued"
        # else the queue state is "queued_not_transmitted"
        queue_state = PUBLISH_STATE.QUEUED if queued else PUBLISH_STATE.QUEUED_NOT_TRANSMITTED
        published_service.patch(published_item_id, {QUEUE_STATE: queue_state})
        logger.info('Queued item with id: {} and item_id: {}'.format(published_item_id, published_item['item_id']))
    except KeyError:
        published_service.patch(published_item_id, {QUEUE_STATE: PUBLISH_STATE.PENDING})
        logger.exception('No enqueue service found for operation %s', published_item[ITEM_OPERATION])
    except:
        published_service.patch(published_item_id, {QUEUE_STATE: PUBLISH_STATE.PENDING})
        raise
Пример #16
0
    def __update_state(self, updates, original):
        if self.__is_content_assigned_to_new_desk(original, updates):
            # check if the preconditions for the action are in place
            original_state = original[ITEM_STATE]
            if not is_workflow_state_transition_valid('move', original_state):
                raise InvalidStateTransitionError()

            updates[ITEM_STATE] = CONTENT_STATE.DRAFT if self.__is_content_moved_from_desk(updates) \
                else CONTENT_STATE.SUBMITTED
            resolve_document_version(updates, ARCHIVE, 'PATCH', original)
Пример #17
0
    def _set_version_last_modified_and_state(self, original, updates, last_updated):
        """
        Sets config.VERSION, config.LAST_UPDATED, config.CONTENT_STATE in updates document.
        """

        self.set_state(original, updates)
        updates[config.LAST_UPDATED] = last_updated

        if original[config.VERSION] == updates.get(config.VERSION, original[config.VERSION]):
            resolve_document_version(document=updates, resource=ARCHIVE, method='PATCH', latest_doc=original)
    def link_as_next_take(self, target, link):
        """Makes next take to target from given link.

        Check if target has an associated takes package. If not, create it and add target as a take.
        Check if the target is the last take, if not, resolve the last take. Copy metadata from the target and add it
        as the next take and return the update link item

        :return: the updated link item
        """

        takes_package_id = self.get_take_package_id(target)
        archive_service = get_resource_service(ARCHIVE)
        takes_package = archive_service.find_one(req=None, _id=takes_package_id) if takes_package_id else {}

        if not takes_package:
            # setting the sequence to 1 for target.
            updates = {SEQUENCE: 1}
            if target[ITEM_STATE] in [CONTENT_STATE.SPIKED, CONTENT_STATE.KILLED,
                                      CONTENT_STATE.SCHEDULED, CONTENT_STATE.INGESTED]:
                raise SuperdeskApiError.forbiddenError("Item isn't in a valid state for creating takes.")
            else:
                archive_service.system_update(target.get(config.ID_FIELD), updates, target)

        link_updates = {}

        if not link.get(config.ID_FIELD):
            # A new story to be linked
            self.__copy_metadata__(target, link, takes_package, set_state=True)
            archive_service.post([link])
        else:
            self.__copy_metadata__(target, link_updates, takes_package, set_state=False)

        link.update(link_updates)

        if not takes_package_id:
            takes_package_id = self.package_story_as_a_take(target, takes_package, link)
        else:
            self.__link_items__(takes_package, target, link)
            del takes_package[config.ID_FIELD]
            takes_package.pop('unique_id', None)
            takes_package.pop('unique_name', None)
            takes_package.pop(PUBLISH_SCHEDULE, None)
            takes_package.pop(SCHEDULE_SETTINGS, None)

            resolve_document_version(takes_package, ARCHIVE, 'PATCH', takes_package)
            archive_service.patch(takes_package_id, takes_package)
            get_resource_service('archive_broadcast').on_broadcast_master_updated(ITEM_CREATE, target,
                                                                                  takes_package_id=takes_package_id)

        if link.get(SEQUENCE):
            link_updates.update({SEQUENCE: link[SEQUENCE]})
            archive_service.system_update(link[config.ID_FIELD], link_updates, link)

        insert_into_versions(id_=takes_package_id)
        return link
Пример #19
0
 def on_update(self, updates, original):
     self.update_times(updates)
     if is_assigned_to_a_desk(updates):
         self.__update_state(updates, original)
     new_stage_id = str(updates.get('task', {}).get('stage', ''))
     old_stage_id = str(original.get('task', {}).get('stage', ''))
     new_user_id = updates.get('task', {}).get('user', '')
     if new_stage_id and new_stage_id != old_stage_id:
         updates[ITEM_OPERATION] = ITEM_SEND
         send_to(doc=original, update=updates, desk_id=None, stage_id=new_stage_id, user_id=new_user_id)
         resolve_document_version(updates, ARCHIVE, 'PATCH', original)
     update_version(updates, original)
Пример #20
0
    def unlock(self, item_filter, user_id, session_id, etag):
        item_model = get_model(ItemModel)
        item = item_model.find_one(item_filter)

        if not item:
            raise SuperdeskApiError.notFoundError()

        if not item.get(LOCK_USER):
            raise SuperdeskApiError.badRequestError(message="Item is not locked.")

        can_user_unlock, error_message = self.can_unlock(item, user_id)

        if can_user_unlock:
            self.app.on_item_unlock(item, user_id)
            updates = {}

            # delete the item if nothing is saved so far
            # version 0 created on lock item
            if item.get(config.VERSION, 0) == 0 and item[ITEM_STATE] == CONTENT_STATE.DRAFT:
                if item.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE:
                    # if item is composite then update referenced items in package.
                    PackageService().update_groups({}, item)

                superdesk.get_resource_service('archive').delete_action(lookup={'_id': item['_id']})
                push_content_notification([item])
            else:
                updates = {LOCK_USER: None, LOCK_SESSION: None, 'lock_time': None,
                           'lock_action': None, 'force_unlock': True}
                autosave = superdesk.get_resource_service('archive_autosave').find_one(req=None, _id=item['_id'])
                if autosave and item[ITEM_STATE] not in PUBLISH_STATES:
                    if not hasattr(flask.g, 'user'):  # user is not set when session expires
                        flask.g.user = superdesk.get_resource_service('users').find_one(req=None, _id=user_id)
                    autosave.update(updates)
                    resolve_document_version(autosave, 'archive', 'PATCH', item)
                    superdesk.get_resource_service('archive').patch(item['_id'], autosave)
                    item = superdesk.get_resource_service('archive').find_one(req=None, _id=item['_id'])
                    insert_versioning_documents('archive', item)
                else:
                    item_model.update(item_filter, updates)
                    item = item_model.find_one(item_filter)
                self.app.on_item_unlocked(item, user_id)

            push_notification('item:unlock',
                              item=str(item_filter.get(config.ID_FIELD)),
                              item_version=str(item.get(config.VERSION)),
                              state=item.get(ITEM_STATE),
                              user=str(user_id), lock_session=str(session_id),
                              _etag=item.get(config.ETAG))
        else:
            raise SuperdeskApiError.forbiddenError(message=error_message)

        return item
Пример #21
0
 def on_update(self, updates, original):
     self.update_times(updates)
     if is_assigned_to_a_desk(updates):
         self.__update_state(updates, original)
     new_stage_id = str(updates.get("task", {}).get("stage", ""))
     old_stage_id = str(original.get("task", {}).get("stage", ""))
     new_user_id = updates.get("task", {}).get("user", "")
     if new_stage_id and new_stage_id != old_stage_id:
         updates[ITEM_OPERATION] = ITEM_SEND
         send_to(doc=original, update=updates, desk_id=None, stage_id=new_stage_id, user_id=new_user_id)
         resolve_document_version(updates, ARCHIVE, "PATCH", original)
     convert_task_attributes_to_objectId(updates)
     update_version(updates, original)
Пример #22
0
    def remove_refs_in_package(self, package, ref_id_to_remove, processed_packages=None):
        """
        Removes residRef referenced by ref_id_to_remove from the package associations and returns the package id.
        Before removing checks if the package has been processed. If processed the package is skipped.
        :return: package[config.ID_FIELD]
        """
        groups = package['groups']

        if processed_packages is None:
            processed_packages = []

        sub_package_ids = [ref['guid'] for group in groups for ref in group['refs'] if ref.get('type') == 'composite']
        for sub_package_id in sub_package_ids:
            if sub_package_id not in processed_packages:
                sub_package = self.find_one(req=None, _id=sub_package_id)
                return self.remove_refs_in_package(sub_package, ref_id_to_remove)

        new_groups = [{'id': group['id'], 'role': group.get('role'),
                       'refs': [ref for ref in group['refs'] if ref.get('guid') != ref_id_to_remove]}
                      for group in groups]
        new_root_refs = [{'idRef': group['id']} for group in new_groups if group['id'] != 'root']

        for group in new_groups:
            if group['id'] == 'root':
                group['refs'] = new_root_refs
                break

        updates = {config.LAST_UPDATED: utcnow(), 'groups': new_groups}

        # if takes package then adjust the reference.
        # safe to do this as take can only be in one takes package.
        if package.get(PACKAGE_TYPE) == TAKES_PACKAGE:
            new_sequence = package[SEQUENCE] - 1
            updates[SEQUENCE] = new_sequence
            last_take_group = next(reference for reference in
                                   next(new_group.get('refs') for new_group in new_groups if new_group['id'] == 'main')
                                   if reference.get(SEQUENCE) == new_sequence)

            if last_take_group:
                updates[LAST_TAKE] = last_take_group.get(ITEM_REF)

        resolve_document_version(updates, ARCHIVE, 'PATCH', package)

        get_resource_service(ARCHIVE).patch(package[config.ID_FIELD], updates)
        insert_into_versions(id_=package[config.ID_FIELD])

        sub_package_ids.append(package[config.ID_FIELD])
        return sub_package_ids
Пример #23
0
 def kill_item(self, updates, original):
     """
     Kill the item after applying the template.
     :param dict item: Item
     :param str body_html: body_html of the original item that triggered the kill.
     """
     # apply the kill template
     original_copy = deepcopy(original)
     updates_data = self._apply_kill_template(original_copy)
     updates_data['body_html'] = updates.get('body_html', '')
     # resolve the document version
     resolve_document_version(document=updates_data, resource=ARCHIVE, method='PATCH', latest_doc=original)
     # kill the item
     self.patch(original.get(config.ID_FIELD), updates_data)
     # insert into versions
     insert_into_versions(id_=original[config.ID_FIELD])
Пример #24
0
    def create(self, docs):
        service = get_resource_service(SOURCE)
        item_id = request.view_args['item_id']
        item = service.find_one(req=None, _id=item_id)
        doc = docs[0]

        self._valid_broadcast_item(item)

        desk_id = doc.get('desk')
        desk = None

        if desk_id:
            desk = get_resource_service('desks').find_one(req=None, _id=desk_id)

        doc.pop('desk', None)
        doc['task'] = {}
        if desk:
            doc['task']['desk'] = desk.get(config.ID_FIELD)
            doc['task']['stage'] = desk.get('working_stage')

        doc['task']['user'] = get_user().get('_id')
        genre_list = get_resource_service('vocabularies').find_one(req=None, _id='genre') or {}
        broadcast_genre = [{'qcode': genre.get('qcode'), 'name': genre.get('name')}
                           for genre in genre_list.get('items', [])
                           if genre.get('qcode') == BROADCAST_GENRE and genre.get('is_active')]

        if not broadcast_genre:
            raise SuperdeskApiError.badRequestError(message="Cannot find the {} genre.".format(BROADCAST_GENRE))

        doc['broadcast'] = {
            'status': '',
            'master_id': item_id,
            'takes_package_id': self.takesService.get_take_package_id(item),
            'rewrite_id': item.get('rewritten_by')
        }

        doc['genre'] = broadcast_genre
        doc['family_id'] = item.get('family_id')

        for key in FIELDS_TO_COPY:
            doc[key] = item.get(key)

        resolve_document_version(document=doc, resource=SOURCE, method='POST')
        service.post(docs)
        insert_into_versions(id_=doc[config.ID_FIELD])
        build_custom_hateoas(CUSTOM_HATEOAS, doc)
        return [doc[config.ID_FIELD]]
Пример #25
0
    def kill_item(self, item):
        """
        Kill the item after applying the template.
        :param dict item: Item
        """
        # get the kill template
        template = get_resource_service("content_templates").get_template_by_name("kill")
        if not template:
            SuperdeskApiError.badRequestError(message="Kill Template missing.")

        # apply the kill template
        updates = render_content_template(item, template)
        # resolve the document version
        resolve_document_version(document=updates, resource=ARCHIVE, method="PATCH", latest_doc=item)
        # kill the item
        self.patch(item.get(config.ID_FIELD), updates)
        # insert into versions
        insert_into_versions(id_=item[config.ID_FIELD])
Пример #26
0
def build_response_document(
        document, resource, embedded_fields, latest_doc=None):
    """ Prepares a document for response including generation of ETag and
    metadata fields.

    :param document: the document to embed other documents into.
    :param resource: the resource name.
    :param embedded_fields: the list of fields we are allowed to embed.
    :param document: the latest version of document.

    .. versionchanged:: 0.5
       Only compute ETAG if necessary (#369).
       Add version support (#475).

    .. versionadded:: 0.4
    """
    # need to update the document field since the etag must be computed on the
    # same document representation that might have been used in the collection
    # 'get' method
    document[config.DATE_CREATED] = date_created(document)
    document[config.LAST_UPDATED] = last_updated(document)
    # TODO: last_update could include consideration for embedded documents

    # Up to v0.4 etags were not stored with the documents.
    if config.IF_MATCH and config.ETAG not in document:
        document[config.ETAG] = document_etag(document)

    # hateoas links
    if config.DOMAIN[resource]['hateoas'] and config.ID_FIELD in document:
        version = None
        if config.DOMAIN[resource]['versioning'] is True \
                and request.args.get(config.VERSION_PARAM):
            version = document[config.VERSION]
        document[config.LINKS] = {'self':
                                  document_link(resource,
                                                document[config.ID_FIELD],
                                                version)}

    # add version numbers
    resolve_document_version(document, resource, 'GET', latest_doc)

    # media and embedded documents
    resolve_media_files(document, resource)
    resolve_embedded_documents(document, resource, embedded_fields)
Пример #27
0
    def _set_updates(self, original, updates, last_updated, preserve_state=False):
        """Sets config.VERSION, config.LAST_UPDATED, ITEM_STATE in updates document.

        If item is being published and embargo is available then append Editorial Note with 'Embargoed'.

        :param dict original: original document
        :param dict updates: updates related to the original document
        :param datetime last_updated: datetime of the updates.
        """
        if not preserve_state:
            self.set_state(original, updates)
        updates.setdefault(config.LAST_UPDATED, last_updated)

        if original[config.VERSION] == updates.get(config.VERSION, original[config.VERSION]):
            resolve_document_version(document=updates, resource=ARCHIVE, method='PATCH', latest_doc=original)

        user = get_user()
        if user and user.get(config.ID_FIELD):
            updates['version_creator'] = user[config.ID_FIELD]
Пример #28
0
    def _set_updates(self, original, updates, last_updated):
        """
        Sets config.VERSION, config.LAST_UPDATED, ITEM_STATE in updates document.
        If item is being published and embargo is available then append Editorial Note with 'Embargoed'.

        :param dict original: original document
        :param dict updates: updates related to the original document
        :param datetime last_updated: datetime of the updates.
        """

        self.set_state(original, updates)
        updates.setdefault(config.LAST_UPDATED, last_updated)

        if original[config.VERSION] == updates.get(config.VERSION, original[config.VERSION]):
            resolve_document_version(document=updates, resource=ARCHIVE, method='PATCH', latest_doc=original)

        if updates.get(EMBARGO, original.get(EMBARGO)) \
                and updates.get('ednote', original.get('ednote', '')).find('Embargo') == -1:
            updates['ednote'] = '{} {}'.format(original.get('ednote', ''), 'Embargoed.').strip()
Пример #29
0
 def _move(self, archived_doc, doc):
     archive_service = get_resource_service(ARCHIVE)
     original = deepcopy(archived_doc)
     user = get_user()
     send_to(doc=archived_doc, desk_id=doc.get('task', {}).get('desk'), stage_id=doc.get('task', {}).get('stage'),
             user_id=user.get(config.ID_FIELD))
     if archived_doc[ITEM_STATE] not in {CONTENT_STATE.PUBLISHED, CONTENT_STATE.SCHEDULED, CONTENT_STATE.KILLED}:
         archived_doc[ITEM_STATE] = CONTENT_STATE.SUBMITTED
     archived_doc[ITEM_OPERATION] = ITEM_MOVE
     # set the change in desk type when content is moved.
     self.set_change_in_desk_type(archived_doc, original)
     archived_doc.pop(SIGN_OFF, None)
     set_sign_off(archived_doc, original=original)
     convert_task_attributes_to_objectId(archived_doc)
     resolve_document_version(archived_doc, ARCHIVE, 'PATCH', original)
     del archived_doc[config.ID_FIELD]
     archive_service.update(original[config.ID_FIELD], archived_doc, original)
     insert_into_versions(id_=original[config.ID_FIELD])
     push_item_move_notification(original, archived_doc)
Пример #30
0
    def move_content(self, id, doc):
        archive_service = get_resource_service(ARCHIVE)
        archived_doc = archive_service.find_one(req=None, _id=id)

        if not archived_doc:
            raise SuperdeskApiError.notFoundError('Fail to found item with guid: %s' % id)

        current_stage_of_item = archived_doc.get('task', {}).get('stage')
        if current_stage_of_item and str(current_stage_of_item) == str(doc.get('task', {}).get('stage')):
            raise SuperdeskApiError.preconditionFailedError(message='Move is not allowed within the same stage.')

        if not is_workflow_state_transition_valid('submit_to_desk', archived_doc[ITEM_STATE]):
            raise InvalidStateTransitionError()

        original = deepcopy(archived_doc)
        user = get_user()

        send_to(doc=archived_doc, desk_id=doc.get('task', {}).get('desk'), stage_id=doc.get('task', {}).get('stage'),
                user_id=user.get(config.ID_FIELD))

        if archived_doc[ITEM_STATE] not in {CONTENT_STATE.PUBLISHED, CONTENT_STATE.SCHEDULED, CONTENT_STATE.KILLED}:
            archived_doc[ITEM_STATE] = CONTENT_STATE.SUBMITTED
        archived_doc[ITEM_OPERATION] = ITEM_MOVE

        # set the change in desk type when content is moved.
        self.set_change_in_desk_type(archived_doc, original)
        archived_doc.pop(SIGN_OFF, None)
        set_sign_off(archived_doc, original=original)
        convert_task_attributes_to_objectId(archived_doc)
        resolve_document_version(archived_doc, ARCHIVE, 'PATCH', original)

        del archived_doc[config.ID_FIELD]
        archive_service.update(original[config.ID_FIELD], archived_doc, original)

        insert_into_versions(id_=original[config.ID_FIELD])

        push_content_notification([archived_doc, original])

        # finally apply any on stage rules/macros
        apply_onstage_rule(archived_doc, original[config.ID_FIELD])

        return archived_doc
    def spike_item(self, original):
        """If Original item is re-write then it will remove the reference from the broadcast item.

        :param: dict original: original document
        """
        broadcast_items = [item for item in self.get_broadcast_items_from_master_story(original)
                           if item.get(ITEM_STATE) not in PUBLISH_STATES]
        spike_service = get_resource_service('archive_spike')

        for item in broadcast_items:
            id_ = item.get(config.ID_FIELD)
            try:
                self.packageService.remove_spiked_refs_from_package(id_)
                updates = {ITEM_STATE: CONTENT_STATE.SPIKED}
                resolve_document_version(updates, SOURCE, 'PATCH', item)
                spike_service.patch(id_, updates)
                insert_into_versions(id_=id_)
            except Exception:
                logger.exception(message="Failed to spike the related broadcast item {}.".format(id_))

        if original.get('rewrite_of') and original.get(ITEM_STATE) not in PUBLISH_STATES:
            self.remove_rewrite_refs(original)
Пример #32
0
    def link_as_next_take(self, target, link):
        """
        Check if target has an associated takes package. If not, create it and add target as a take.
        Check if the target is the last take, if not, resolve the last take. Copy metadata from the target and add it
        as the next take and return the update link item

        :return: the updated link item
        """

        takes_package_id = self.get_take_package_id(target)
        archive_service = get_resource_service(ARCHIVE)
        takes_package = archive_service.find_one(req=None, _id=takes_package_id) if takes_package_id else {}

        if not takes_package:
            # setting the sequence to 1 for target.
            updates = {SEQUENCE: 1}
            resolve_document_version(updates, ARCHIVE, 'PATCH', target)
            archive_service.patch(target.get(config.ID_FIELD), updates)

        if not link.get(config.ID_FIELD):
            self.__copy_metadata__(target, link, takes_package)
            archive_service.post([link])

        if not takes_package_id:
            takes_package_id = self.package_story_as_a_take(target, takes_package, link)
        else:
            self.__link_items__(takes_package, target, link)
            del takes_package[config.ID_FIELD]
            resolve_document_version(takes_package, ARCHIVE, 'PATCH', takes_package)
            archive_service.patch(takes_package_id, takes_package)
            get_resource_service('archive_broadcast').on_broadcast_master_updated(ITEM_CREATE, target,
                                                                                  takes_package_id=takes_package_id)

        if link.get(SEQUENCE):
            archive_service.patch(link[config.ID_FIELD], {SEQUENCE: link[SEQUENCE]})

        insert_into_versions(id_=takes_package_id)

        return link
Пример #33
0
    def _remove_and_set_kill_properties(self, article, articles_to_kill, updates):
        """Removes the irrelevant properties from the given article and sets the properties for kill operation.

        :param article: article from the archived repo
        :type article: dict
        :param articles_to_kill: list of articles which were about to kill from dusty archive
        :type articles_to_kill: list
        :param updates: updates to be applied on the article before saving
        :type updates: dict
        """

        article.pop('archived_id', None)
        article.pop('_type', None)
        article.pop('_links', None)
        article.pop('queue_state', None)
        article.pop(config.ETAG, None)

        for field in ['headline', 'abstract', 'body_html']:
            article[field] = updates.get(field, article.get(field, ''))

        article[ITEM_STATE] = CONTENT_STATE.KILLED
        article[ITEM_OPERATION] = ITEM_KILL
        article['pubstatus'] = PUB_STATUS.CANCELED
        article[config.LAST_UPDATED] = utcnow()

        user = get_user()
        article['version_creator'] = str(user[config.ID_FIELD])

        resolve_document_version(article, ARCHIVE, 'PATCH', article)

        if article[ITEM_TYPE] == CONTENT_TYPE.COMPOSITE:
            package_service = PackageService()
            item_refs = package_service.get_item_refs(article)
            for ref in item_refs:
                item_in_package = [item for item in articles_to_kill
                                   if item.get('item_id', item.get(config.ID_FIELD)) == ref[RESIDREF]]
                ref['location'] = ARCHIVE
                ref[config.VERSION] = item_in_package[0][config.VERSION]
Пример #34
0
    def __publish_package_items(self, package, last_updated):
        """
        Publishes items of a package recursively

        :return: True if all the items of a package have been published successfully. False otherwise.
        """

        items = [
            ref.get('residRef') for group in package.get('groups', [])
            for ref in group.get('refs', []) if 'residRef' in ref
        ]

        if items:
            for guid in items:
                doc = super().find_one(req=None, _id=guid)
                original = copy(doc)
                try:
                    if doc['type'] == 'composite':
                        self.__publish_package_items(doc)

                    resolve_document_version(document=doc,
                                             resource=ARCHIVE,
                                             method='PATCH',
                                             latest_doc=doc)
                    doc[config.CONTENT_STATE] = 'published'
                    doc[config.LAST_UPDATED] = last_updated
                    doc[config.ETAG] = document_etag(doc)
                    self.backend.update(
                        self.datasource, guid, {
                            config.CONTENT_STATE: doc[config.CONTENT_STATE],
                            config.ETAG: doc[config.ETAG],
                            config.VERSION: doc[config.VERSION],
                            config.LAST_UPDATED: doc[config.LAST_UPDATED]
                        }, original)
                    insert_into_versions(doc=doc)
                except KeyError:
                    raise SuperdeskApiError.badRequestError(
                        "A non-existent content id is requested to publish")
Пример #35
0
    def restore_version(self, id, doc, original):
        item_id = id
        old_version = int(doc.get('old_version', 0))
        last_version = int(doc.get('last_version', 0))
        if (not all([item_id, old_version, last_version])):
            return None

        old = get_resource_service('archive_versions').find_one(req=None, _id_document=item_id,
                                                                _current_version=old_version)
        if old is None:
            raise SuperdeskApiError.notFoundError(_('Invalid version {old_version}').format(old_version=old_version))

        curr = get_resource_service(SOURCE).find_one(req=None, _id=item_id)
        if curr is None:
            raise SuperdeskApiError.notFoundError(_('Invalid item id {item_id}').format(item_id=item_id))

        if curr[config.VERSION] != last_version:
            raise SuperdeskApiError.preconditionFailedError(
                _('Invalid last version {last_version}').format(last_version=last_version))

        old['_id'] = old['_id_document']
        old['_updated'] = old['versioncreated'] = utcnow()
        set_item_expiry(old, doc)
        old.pop('_id_document', None)
        old.pop(SIGN_OFF, None)
        old[ITEM_OPERATION] = ITEM_RESTORE

        resolve_document_version(old, SOURCE, 'PATCH', curr)
        remove_unwanted(old)
        set_sign_off(updates=old, original=curr)

        super().replace(id=item_id, document=old, original=curr)

        old.pop('old_version', None)
        old.pop('last_version', None)

        doc.update(old)
        return item_id
Пример #36
0
    def _set_updates(self, original, updates, last_updated, preserve_state=False):
        """
        Sets config.VERSION, config.LAST_UPDATED, ITEM_STATE in updates document.
        If item is being published and embargo is available then append Editorial Note with 'Embargoed'.

        :param dict original: original document
        :param dict updates: updates related to the original document
        :param datetime last_updated: datetime of the updates.
        """
        if not preserve_state:
            self.set_state(original, updates)
        updates.setdefault(config.LAST_UPDATED, last_updated)

        if original[config.VERSION] == updates.get(config.VERSION, original[config.VERSION]):
            resolve_document_version(document=updates, resource=ARCHIVE, method='PATCH', latest_doc=original)

        if updates.get(EMBARGO, original.get(EMBARGO)) \
                and updates.get('ednote', original.get('ednote', '')).find('Embargo') == -1:
            updates['ednote'] = '{} {}'.format(original.get('ednote', ''), 'Embargoed.').strip()

        user = get_user()
        if user and user.get(config.ID_FIELD):
            updates['version_creator'] = user[config.ID_FIELD]
Пример #37
0
    def _duplicate_item(self, original_doc):
        """
        Duplicates the 'original_doc' including it's version history. If the article being duplicated is contained
        in a desk then the article state is changed to Submitted.

        :return: guid of the duplicated article
        """

        new_doc = original_doc.copy()
        self._remove_after_copy(new_doc)
        on_duplicate_item(new_doc)
        resolve_document_version(new_doc, SOURCE, 'PATCH', new_doc)

        if original_doc.get('task',
                            {}).get('desk') is not None and new_doc.get(
                                ITEM_STATE) != CONTENT_STATE.SUBMITTED:
            new_doc[ITEM_STATE] = CONTENT_STATE.SUBMITTED

        convert_task_attributes_to_objectId(new_doc)
        get_model(ItemModel).create([new_doc])
        self._duplicate_versions(original_doc['guid'], new_doc)

        return new_doc['guid']
Пример #38
0
    def _duplicate_item(self, original_doc):
        """
        Duplicates the 'original_doc' including it's version history. If the article being duplicated is contained
        in a desk then the article state is changed to Submitted.

        :return: guid of the duplicated article
        """

        new_doc = original_doc.copy()
        self._remove_after_copy(new_doc)

        new_doc[ITEM_OPERATION] = ITEM_DUPLICATE
        item_model = get_model(ItemModel)

        on_duplicate_item(new_doc)
        resolve_document_version(new_doc, SOURCE, 'PATCH', new_doc)
        if original_doc.get('task', {}).get(
                'desk') is not None and new_doc.get('state') != 'submitted':
            new_doc[ITEM_STATE] = CONTENT_STATE.SUBMITTED
        item_model.create([new_doc])
        self._duplicate_versions(original_doc['guid'], new_doc)

        return new_doc['guid']
Пример #39
0
def build_response_document(document,
                            resource,
                            embedded_fields,
                            latest_doc=None):
    """ Prepares a document for response including generation of ETag and
    metadata fields.

    :param document: the document to embed other documents into.
    :param resource: the resource name.
    :param embedded_fields: the list of fields we are allowed to embed.
    :param document: the latest version of document.

    .. versionadded:: 0.4
    """
    # need to update the document field since the etag must be computed on the
    # same document representation that might have been used in the collection
    # 'get' method
    document[config.DATE_CREATED] = date_created(document)
    document[config.LAST_UPDATED] = last_updated(document)
    # TODO: last_update could include consideration for embedded documents

    # generate ETag
    if config.IF_MATCH:
        document[config.ETAG] = document_etag(document)

    # hateoas links
    if config.DOMAIN[resource]['hateoas'] and config.ID_FIELD in document:
        document[config.LINKS] = {
            'self': document_link(resource, document[config.ID_FIELD])
        }

    # add version numbers
    resolve_document_version(document, resource, 'GET', latest_doc)

    # media and embedded documents
    resolve_media_files(document, resource)
    resolve_embedded_documents(document, resource, embedded_fields)
Пример #40
0
    def create(self, docs, **kwargs):
        doc = docs[0] if len(docs) > 0 else {}
        original_id = request.view_args['original_id']
        update_document = doc.get('update')

        archive_service = get_resource_service(ARCHIVE)
        original = archive_service.find_one(req=None, _id=original_id)
        self._validate_rewrite(original, update_document)

        rewrite = self._create_rewrite_article(original,
                                               existing_item=update_document,
                                               desk_id=doc.get('desk_id'))

        if update_document:
            # process the existing story
            archive_service.patch(update_document[config.ID_FIELD], rewrite)
            app.on_archive_item_updated(rewrite, update_document, ITEM_LINK)
            rewrite[config.ID_FIELD] = update_document[config.ID_FIELD]
            ids = [update_document[config.ID_FIELD]]
        else:
            # Set the version.
            resolve_document_version(rewrite, ARCHIVE, "POST")
            ids = archive_service.post([rewrite])
            insert_into_versions(doc=rewrite)
            build_custom_hateoas(CUSTOM_HATEOAS, rewrite)

            app.on_archive_item_updated(
                {'rewrite_of': rewrite.get('rewrite_of')}, rewrite, ITEM_LINK)

        self._add_rewritten_flag(original, rewrite)
        get_resource_service('archive_broadcast').on_broadcast_master_updated(
            ITEM_CREATE, item=original, rewrite_id=ids[0])

        doc.clear()
        doc.update(rewrite)
        return ids
Пример #41
0
    def duplicate_item(self,
                       original_doc,
                       state=None,
                       extra_fields=None,
                       operation=None):
        """Duplicates an item.

        Duplicates the 'original_doc' including it's version history. If the article being duplicated is contained
        in a desk then the article state is changed to Submitted.

        :return: guid of the duplicated article
        """

        new_doc = original_doc.copy()
        self.remove_after_copy(new_doc, extra_fields)
        on_duplicate_item(new_doc, original_doc, operation)
        resolve_document_version(new_doc, SOURCE, 'PATCH', new_doc)

        if original_doc.get('task',
                            {}).get('desk') is not None and new_doc.get(
                                ITEM_STATE) != CONTENT_STATE.SUBMITTED:
            new_doc[ITEM_STATE] = CONTENT_STATE.SUBMITTED

        if state:
            new_doc[ITEM_STATE] = state

        convert_task_attributes_to_objectId(new_doc)
        get_model(ItemModel).create([new_doc])
        self._duplicate_versions(original_doc['_id'], new_doc)
        self._duplicate_history(original_doc['_id'], new_doc)
        app.on_archive_item_updated({'duplicate_id': new_doc['guid']},
                                    original_doc, operation or ITEM_DUPLICATE)
        app.on_archive_item_updated({'duplicate_id': original_doc['_id']},
                                    new_doc, operation or ITEM_DUPLICATED_FROM)

        return new_doc['guid']
 def _move(self, archived_doc, doc):
     archive_service = get_resource_service(ARCHIVE)
     original = deepcopy(archived_doc)
     user = get_user()
     send_to(doc=archived_doc,
             desk_id=doc.get('task', {}).get('desk'),
             stage_id=doc.get('task', {}).get('stage'),
             user_id=user.get(config.ID_FIELD))
     if archived_doc[ITEM_STATE] not in \
             {CONTENT_STATE.PUBLISHED, CONTENT_STATE.SCHEDULED, CONTENT_STATE.KILLED, CONTENT_STATE.RECALLED}:
         archived_doc[ITEM_STATE] = CONTENT_STATE.SUBMITTED
     archived_doc[ITEM_OPERATION] = ITEM_MOVE
     # set the change in desk type when content is moved.
     self.set_change_in_desk_type(archived_doc, original)
     archived_doc.pop(SIGN_OFF, None)
     set_sign_off(archived_doc, original=original)
     convert_task_attributes_to_objectId(archived_doc)
     resolve_document_version(archived_doc, ARCHIVE, 'PATCH', original)
     del archived_doc[config.ID_FIELD]
     archive_service.update(original[config.ID_FIELD], archived_doc,
                            original)
     insert_into_versions(id_=original[config.ID_FIELD])
     push_item_move_notification(original, archived_doc)
     app.on_archive_item_updated(archived_doc, original, ITEM_MOVE)
Пример #43
0
    def setUp(self):
        try:
            from apps.legal_archive.commands import ImportLegalArchiveCommand
        except ImportError:
            self.fail("Could not import class under test (ImportLegalArchiveCommand).")
        else:
            self.class_under_test = ImportLegalArchiveCommand
            self.app.data.insert("desks", self.desks)
            self.app.data.insert("users", self.users)
            self.validators = [
                {"schema": {}, "type": "text", "act": "publish", "_id": "publish_text"},
                {"schema": {}, "type": "text", "act": "correct", "_id": "correct_text"},
                {"schema": {}, "type": "text", "act": "kill", "_id": "kill_text"},
            ]

            self.products = [
                {"_id": "1", "name": "prod1"},
                {"_id": "2", "name": "prod2", "codes": "abc,def"},
                {"_id": "3", "name": "prod3", "codes": "xyz"},
            ]

            self.subscribers = [
                {
                    "name": "Test",
                    "is_active": True,
                    "subscriber_type": "wire",
                    "email": "*****@*****.**",
                    "sequence_num_settings": {"max": 9999, "min": 1},
                    "products": ["1"],
                    "destinations": [
                        {
                            "name": "test",
                            "delivery_type": "email",
                            "format": "nitf",
                            "config": {"recipients": "*****@*****.**"},
                        }
                    ],
                }
            ]
            self.app.data.insert("validators", self.validators)
            self.app.data.insert("products", self.products)
            self.app.data.insert("subscribers", self.subscribers)
            self.class_under_test = ImportLegalArchiveCommand
            self.archive_items = [
                {
                    "task": {"desk": self.desks[0]["_id"], "stage": self.desks[0]["incoming_stage"], "user": "******"},
                    "_id": "item1",
                    "state": "in_progress",
                    "headline": "item 1",
                    "type": "text",
                    "slugline": "item 1 slugline",
                    "_current_version": 1,
                    "_created": utcnow() - timedelta(minutes=3),
                    "expired": utcnow() - timedelta(minutes=30),
                },
                {
                    "task": {"desk": self.desks[0]["_id"], "stage": self.desks[0]["incoming_stage"], "user": "******"},
                    "_id": "item2",
                    "state": "in_progress",
                    "headline": "item 2",
                    "type": "text",
                    "slugline": "item 2 slugline",
                    "_current_version": 1,
                    "_created": utcnow() - timedelta(minutes=2),
                    "expired": utcnow() - timedelta(minutes=30),
                },
                {
                    "task": {"desk": self.desks[0]["_id"], "stage": self.desks[0]["incoming_stage"], "user": "******"},
                    "_id": "item3",
                    "state": "in_progress",
                    "headline": "item 2",
                    "type": "text",
                    "slugline": "item 2 slugline",
                    "_current_version": 1,
                    "_created": utcnow() - timedelta(minutes=1),
                    "expired": utcnow() - timedelta(minutes=30),
                },
            ]

            get_resource_service(ARCHIVE).post(self.archive_items)
            for item in self.archive_items:
                resolve_document_version(item, ARCHIVE, "POST")
                insert_into_versions(id_=item["_id"])
Пример #44
0
def deleteitem_internal(resource,
                        concurrency_check=False,
                        suppress_callbacks=False,
                        original=None,
                        **lookup):
    """ Intended for internal delete calls, this method is not rate limited,
    authentication is not checked, pre-request events are not raised, and
    concurrency checking is optional. Deletes a resource item.

    :param resource: name of the resource to which the item(s) belong.
    :param concurrency_check: concurrency check switch (bool)
    :param original: original document if already fetched from the database
    :param **lookup: item lookup query.

    .. versionchanged:: 0.6
       Support for soft delete.

    .. versionchanged:: 0.5
       Return 204 NoContent instead of 200.
       Push updates to OpLog.
       Original deleteitem() has been split into deleteitem() and
       deleteitem_internal().

    .. versionchanged:: 0.4
       Fix #284: If you have a media field, and set datasource projection to
       0 for that field, the media will not be deleted.
       Support for document versioning.
       'on_delete_item' events raised before performing the delete.
       'on_deleted_item' events raised after performing the delete.

    .. versionchanged:: 0.3
       Delete media files as needed.
       Pass the explicit query filter to the data driver, as it does not
       support the id argument anymore.

    .. versionchanged:: 0.2
       Raise pre_<method> event.

    .. versionchanged:: 0.0.7
       Support for Rate-Limiting.

    .. versionchanged:: 0.0.5
      Pass current resource to ``parse_request``, allowing for proper
      processing of new configuration settings: `filters`, `sorting`, `paging`.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.
    """
    resource_def = config.DOMAIN[resource]
    soft_delete_enabled = resource_def["soft_delete"]
    original = get_document(resource,
                            concurrency_check,
                            original,
                            force_auth_field_projection=soft_delete_enabled,
                            **lookup)
    if not original or (soft_delete_enabled
                        and original.get(config.DELETED) is True):
        return all_done()

    # notify callbacks
    if not suppress_callbacks:
        getattr(app, "on_delete_item")(resource, original)
        getattr(app, "on_delete_item_%s" % resource)(original)

    if soft_delete_enabled:
        # Instead of removing the document from the db, just mark it as deleted
        marked_document = copy.deepcopy(original)

        # Set DELETED flag and update metadata
        last_modified = datetime.utcnow().replace(microsecond=0)
        marked_document[config.DELETED] = True
        marked_document[config.LAST_UPDATED] = last_modified

        if config.IF_MATCH:
            resolve_document_etag(marked_document, resource)

        resolve_document_version(marked_document, resource, "DELETE", original)

        # Update document in database (including version collection if needed)
        id = original[resource_def["id_field"]]
        try:
            app.data.replace(resource, id, marked_document, original)
        except app.data.OriginalChangedError:
            if concurrency_check:
                abort(412, description="Client and server etags don't match")

        # create previous version if it wasn't already there
        late_versioning_catch(original, resource)
        # and add deleted version
        insert_versioning_documents(resource, marked_document)
        # update oplog if needed
        oplog_push(resource, marked_document, "DELETE", id)

    else:
        # Delete the document for real

        # media cleanup
        media_fields = app.config["DOMAIN"][resource]["_media"]

        # document might miss one or more media fields because of datasource
        # and/or client projection.
        missing_media_fields = [f for f in media_fields if f not in original]
        if missing_media_fields:
            # retrieve the whole document so we have all media fields available
            # Should be very a rare occurrence. We can't get rid of the
            # get_document() call since it also deals with etag matching, which
            # is still needed. Also, this lookup should never fail.
            # TODO not happy with this hack. Not at all. Is there a better way?
            original = app.data.find_one_raw(resource, **lookup)

        for field in media_fields:
            if field in original:
                media_field = original[field]
                if isinstance(media_field, list):
                    for file_id in media_field:
                        app.media.delete(file_id, resource)
                else:
                    app.media.delete(original[field], resource)

        id = original[resource_def["id_field"]]
        app.data.remove(resource, lookup)

        # TODO: should attempt to delete version collection even if setting is
        # off
        if app.config["DOMAIN"][resource]["versioning"] is True:
            app.data.remove(
                resource + config.VERSIONS,
                {
                    versioned_id_field(resource_def):
                    original[resource_def["id_field"]]
                },
            )

        # update oplog if needed
        oplog_push(resource, original, "DELETE", id)

    if not suppress_callbacks:
        getattr(app, "on_deleted_item")(resource, original)
        getattr(app, "on_deleted_item_%s" % resource)(original)

    return all_done()
Пример #45
0
def build_response_document(document,
                            resource,
                            embedded_fields,
                            latest_doc=None):
    """ Prepares a document for response including generation of ETag and
    metadata fields.

    :param document: the document to embed other documents into.
    :param resource: the resource name.
    :param embedded_fields: the list of fields we are allowed to embed.
    :param document: the latest version of document.

    .. versionchanged:: 0.5
       Only compute ETAG if necessary (#369).
       Add version support (#475).

    .. versionadded:: 0.4
    """
    resource_def = config.DOMAIN[resource]

    # need to update the document field since the etag must be computed on the
    # same document representation that might have been used in the collection
    # 'get' method
    document[config.DATE_CREATED] = date_created(document)
    document[config.LAST_UPDATED] = last_updated(document)

    # Up to v0.4 etags were not stored with the documents.
    if config.IF_MATCH and config.ETAG not in document:
        ignore_fields = resource_def['etag_ignore_fields']
        document[config.ETAG] = document_etag(document,
                                              ignore_fields=ignore_fields)

    # hateoas links
    if resource_def['hateoas'] and resource_def['id_field'] in document:
        version = None
        if resource_def['versioning'] is True \
                and request.args.get(config.VERSION_PARAM):
            version = document[config.VERSION]

        self_dict = {
            'self':
            document_link(resource, document[resource_def['id_field']],
                          version)
        }
        if config.LINKS not in document:
            document[config.LINKS] = self_dict
        elif 'self' not in document[config.LINKS]:
            document[config.LINKS].update(self_dict)

    # add version numbers
    resolve_document_version(document, resource, 'GET', latest_doc)

    # resolve media
    resolve_media_files(document, resource)

    # resolve soft delete
    if resource_def['soft_delete'] is True:
        if document.get(config.DELETED) is None:
            document[config.DELETED] = False
        elif document[config.DELETED] is True:
            # Soft deleted documents are sent without expansion of embedded
            # documents. Return before resolving them.
            return

    # resolve embedded documents
    resolve_embedded_documents(document, resource, embedded_fields)
    def unlock(self, item_filter, user_id, session_id, etag):
        item_model = get_model(ItemModel)
        item = item_model.find_one(item_filter)

        if not item:
            raise SuperdeskApiError.notFoundError()

        if not item.get(LOCK_USER):
            raise SuperdeskApiError.badRequestError(
                message="Item is not locked.")

        can_user_unlock, error_message = self.can_unlock(item, user_id)

        if can_user_unlock:
            self.app.on_item_unlock(item, user_id)
            updates = {}

            # delete the item if nothing is saved so far
            # version 0 created on lock item
            if item.get(config.VERSION,
                        0) == 0 and item[ITEM_STATE] == CONTENT_STATE.DRAFT:
                if item.get(ITEM_TYPE) == CONTENT_TYPE.COMPOSITE:
                    # if item is composite then update referenced items in package.
                    PackageService().update_groups({}, item)

                superdesk.get_resource_service('archive').delete_action(
                    lookup={'_id': item['_id']})
                push_content_notification([item])
            else:
                updates = {
                    LOCK_USER: None,
                    LOCK_SESSION: None,
                    LOCK_TIME: None,
                    LOCK_ACTION: None,
                    'force_unlock': True
                }
                autosave = superdesk.get_resource_service(
                    'archive_autosave').find_one(req=None, _id=item['_id'])
                if autosave and item[ITEM_STATE] not in PUBLISH_STATES:
                    if not hasattr(
                            flask.g,
                            'user'):  # user is not set when session expires
                        flask.g.user = superdesk.get_resource_service(
                            'users').find_one(req=None, _id=user_id)
                    autosave.update(updates)
                    resolve_document_version(autosave, 'archive', 'PATCH',
                                             item)
                    superdesk.get_resource_service('archive').patch(
                        item['_id'], autosave)
                    item = superdesk.get_resource_service('archive').find_one(
                        req=None, _id=item['_id'])
                    insert_versioning_documents('archive', item)
                else:
                    item_model.update(item_filter, updates)
                    item = item_model.find_one(item_filter)
                self.app.on_item_unlocked(item, user_id)

            push_unlock_notification(item, user_id, session_id)
        else:
            raise SuperdeskApiError.forbiddenError(message=error_message)

        return item
Пример #47
0
def put_internal(resource,
                 payload=None,
                 concurrency_check=False,
                 skip_validation=False,
                 **lookup):
    """ Intended for internal put calls, this method is not rate limited,
    authentication is not checked, pre-request events are not raised, and
    concurrency checking is optional. Performs a document replacement.
    Updates are first validated against the resource schema. If validation
    passes, the document is repalced and an OK status update is returned.
    If validation fails a set of validation issues is returned.

    :param resource: the name of the resource to which the document belongs.
    :param payload: alternative payload. When calling put() from your own code
                    you can provide an alternative payload. This can be useful,
                    for example, when you have a callback function hooked to a
                    certain endpoint, and want to perform additional put()
                    callsfrom there.

                    Please be advised that in order to successfully use this
                    option, a request context must be available.
    :param concurrency_check: concurrency check switch (bool)
    :param skip_validation: skip payload validation before write (bool)
    :param **lookup: document lookup query.

    .. versionchanged:: 0.6
       Create document if it does not exist. Closes #634.
       Allow restoring soft deleted documents via PUT

    .. versionchanged:: 0.5
       Back to resolving default values after validaton as now the validator
       can properly validate dependency even when some have default values. See
       #353.
       Original put() has been split into put() and put_internal().
       You can now pass a pre-defined custom payload to the funcion.
       ETAG is now stored with the document (#369).
       Catching all HTTPExceptions and returning them to the caller, allowing
       for eventual flask.abort() invocations in callback functions to go
       through. Fixes #395.

    .. versionchanged:: 0.4
       Allow abort() to be inoked by callback functions.
       Resolve default values before validation is performed. See #353.
       Raise 'on_replace' instead of 'on_insert'. The callback function gets
       the document (as opposed to a list of just 1 document) as an argument.
       Support for document versioning.
       Raise `on_replaced` after the document has been replaced

    .. versionchanged:: 0.3
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise pre_<method> event.
       explictly resolve default values instead of letting them be resolved
       by common.parse. This avoids a validation error when a read-only field
       also has a default value.

    .. versionchanged:: 0.1.1
       auth.request_auth_value is now used to store the auth_field value.
       Item-identifier wrapper stripped from both request and response payload.

    .. versionadded:: 0.1.0
    """
    resource_def = app.config['DOMAIN'][resource]
    schema = resource_def['schema']
    validator = app.validator(schema, resource)

    if payload is None:
        payload = payload_()

    original = get_document(resource, concurrency_check, **lookup)
    if not original:
        if config.UPSERT_ON_PUT:
            id = lookup[resource_def['id_field']]
            # this guard avoids a bson dependency, which would be needed if we
            # wanted to use 'isinstance'. Should also be slightly faster.
            if schema[resource_def['id_field']].get('type', '') == 'objectid':
                id = str(id)
            payload[resource_def['id_field']] = id
            return post_internal(resource, payl=payload)
        else:
            abort(404)

    last_modified = None
    etag = None
    issues = {}
    object_id = original[resource_def['id_field']]

    response = {}

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    try:
        document = parse(payload, resource)
        resolve_sub_resource_path(document, resource)
        if skip_validation:
            validation = True
        else:
            validation = validator.validate_replace(document, object_id,
                                                    original)
            # Apply coerced values
            document = validator.document

        if validation:
            # sneak in a shadow copy if it wasn't already there
            late_versioning_catch(original, resource)

            # update meta
            last_modified = datetime.utcnow().replace(microsecond=0)
            document[config.LAST_UPDATED] = last_modified
            document[config.DATE_CREATED] = original[config.DATE_CREATED]
            if resource_def['soft_delete'] is True:
                # PUT with soft delete enabled should always set the DELETED
                # field to False. We are either carrying through un-deleted
                # status, or restoring a soft deleted document
                document[config.DELETED] = False

            # id_field not in document means it is not being automatically
            # handled (it has been set to a field which exists in the
            # resource schema.
            if resource_def['id_field'] not in document:
                document[resource_def['id_field']] = object_id

            resolve_user_restricted_access(document, resource)
            resolve_default_values(document, resource_def['defaults'])
            store_media_files(document, resource, original)
            resolve_document_version(document, resource, 'PUT', original)

            # notify callbacks
            getattr(app, "on_replace")(resource, document, original)
            getattr(app, "on_replace_%s" % resource)(document, original)

            resolve_document_etag(document, resource)

            # write to db
            try:
                app.data.replace(resource, object_id, document, original)
            except app.data.OriginalChangedError:
                if concurrency_check:
                    abort(412,
                          description='Client and server etags don\'t match')

            # update oplog if needed
            oplog_push(resource, document, 'PUT')

            insert_versioning_documents(resource, document)

            # notify callbacks
            getattr(app, "on_replaced")(resource, document, original)
            getattr(app, "on_replaced_%s" % resource)(document, original)

            # build the full response document
            build_response_document(document, resource, embedded_fields,
                                    document)
            response = document
            if config.IF_MATCH:
                etag = response[config.ETAG]
        else:
            issues = validator.errors
    except ValidationError as e:
        # TODO should probably log the error and abort 400 instead (when we
        # got logging)
        issues['validator exception'] = str(e)
    except exceptions.HTTPException as e:
        raise e
    except Exception as e:
        # consider all other exceptions as Bad Requests
        app.logger.exception(e)
        abort(400,
              description=debug_error_message('An exception occurred: %s' % e))

    if len(issues):
        response[config.ISSUES] = issues
        response[config.STATUS] = config.STATUS_ERR
        status = config.VALIDATION_ERROR_STATUS
    else:
        response[config.STATUS] = config.STATUS_OK
        status = 200

    # limit what actually gets sent to minimize bandwidth usage
    response = marshal_write_response(response, resource)

    return response, last_modified, etag, status
Пример #48
0
def put(resource, **lookup):
    """ Perform a document replacement. Updates are first validated against
    the resource schema. If validation passes, the document is repalced and
    an OK status update is returned. If validation fails a set of validation
    issues is returned.

    :param resource: the name of the resource to which the document belongs.
    :param **lookup: document lookup query.

    .. versionchanged:: 0.4
       Allow abort() to be inoked by callback functions.
       Resolve default values before validation is performed. See #353.
       Raise 'on_replace' instead of 'on_insert'. The callback function gets
       the document (as opposed to a list of just 1 document) as an argument.
       Support for document versioning.
       Raise `on_replaced` after the document has been replaced

    .. versionchanged:: 0.3
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise pre_<method> event.
       explictly resolve default values instead of letting them be resolved
       by common.parse. This avoids a validation error when a read-only field
       also has a default value.

    .. versionchanged:: 0.1.1
       auth.request_auth_value is now used to store the auth_field value.
       Item-identifier wrapper stripped from both request and response payload.

    .. versionadded:: 0.1.0
    """
    resource_def = app.config['DOMAIN'][resource]
    schema = resource_def['schema']
    validator = app.validator(schema, resource)

    payload = payload_()
    original = get_document(resource, **lookup)
    if not original:
        # not found
        abort(404)

    last_modified = None
    etag = None
    issues = {}
    object_id = original[config.ID_FIELD]

    response = {}

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    try:
        document = parse(payload, resource)
        resolve_default_values(document, resource_def['defaults'])
        validation = validator.validate_replace(document, object_id)
        if validation:
            # sneak in a shadow copy if it wasn't already there
            late_versioning_catch(original, resource)

            # update meta
            last_modified = datetime.utcnow().replace(microsecond=0)
            document[config.LAST_UPDATED] = last_modified
            document[config.DATE_CREATED] = original[config.DATE_CREATED]

            # ID_FIELD not in document means it is not being automatically
            # handled (it has been set to a field which exists in the resource
            # schema.
            if config.ID_FIELD not in document:
                document[config.ID_FIELD] = object_id

            resolve_user_restricted_access(document, resource)
            store_media_files(document, resource, original)
            resolve_document_version(document, resource, 'PUT', original)

            # notify callbacks
            getattr(app, "on_replace")(resource, document, original)
            getattr(app, "on_replace_%s" % resource)(document, original)

            # write to db
            app.data.replace(resource, object_id, document)
            insert_versioning_documents(resource, document)

            # notify callbacks
            getattr(app, "on_replaced")(resource, document, original)
            getattr(app, "on_replaced_%s" % resource)(document, original)

            # build the full response document
            build_response_document(document, resource, embedded_fields,
                                    document)
            response = document
        else:
            issues = validator.errors
    except ValidationError as e:
        # TODO should probably log the error and abort 400 instead (when we
        # got logging)
        issues['validator exception'] = str(e)
    except (exceptions.InternalServerError, exceptions.Unauthorized,
            exceptions.Forbidden, exceptions.NotFound) as e:
        raise e
    except Exception as e:
        # consider all other exceptions as Bad Requests
        abort(400,
              description=debug_error_message('An exception occurred: %s' % e))

    if len(issues):
        response[config.ISSUES] = issues
        response[config.STATUS] = config.STATUS_ERR
    else:
        response[config.STATUS] = config.STATUS_OK

    # limit what actually gets sent to minimize bandwidth usage
    response = marshal_write_response(response, resource)

    return response, last_modified, etag, 200
Пример #49
0
def post_internal(resource, payl=None, skip_validation=False):
    """
    Intended for internal post calls, this method is not rate limited,
    authentication is not checked and pre-request events are not raised.
    Adds one or more documents to a resource. Each document is validated
    against the domain schema. If validation passes the document is inserted
    and ID_FIELD, LAST_UPDATED and DATE_CREATED along with a link to the
    document are returned. If validation fails, a list of validation issues
    is returned.

    :param resource: name of the resource involved.
    :param payl: alternative payload. When calling post() from your own code
                 you can provide an alternative payload. This can be useful,
                 for example, when you have a callback function hooked to a
                 certain endpoint, and want to perform additional post() calls
                 from there.

                 Please be advised that in order to successfully use this
                 option, a request context must be available.

                 See https://github.com/pyeve/eve/issues/74 for a
                 discussion, and a typical use case.
    :param skip_validation: skip payload validation before write (bool)

    .. versionchanged:: 0.7
       Add support for Location header. Closes #795.

    .. versionchanged:: 0.6
       Fix: since v0.6, skip_validation = True causes a 422 response (#726).

    .. versionchanged:: 0.6
       Initialize DELETED field when soft_delete is enabled.

    .. versionchanged:: 0.5
       Back to resolving default values after validation as now the validator
       can properly validate dependency even when some have default values. See
       #353.
       Push updates to the OpLog.
       Original post() has been split into post() and post_internal().
       ETAGS are now stored with documents (#369).

    .. versionchanged:: 0.4
       Resolve default values before validation is performed. See #353.
       Support for document versioning.

    .. versionchanged:: 0.3
       Return 201 if at least one document has been successfully inserted.
       Fix #231 auth field not set if resource level authentication is set.
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise 'on_pre_<method>' event.
       Explicitly resolve default values instead of letting them be resolved
       by common.parse. This avoids a validation error when a read-only field
       also has a default value.
       Added ``on_inserted*`` events after the database insert

    .. versionchanged:: 0.1.1
       auth.request_auth_value is now used to store the auth_field value.

    .. versionchanged:: 0.1.0
       More robust handling of auth_field.
       Support for optional HATEOAS.

    .. versionchanged: 0.0.9
       Event hooks renamed to be more robust and consistent: 'on_posting'
       renamed to 'on_insert'.
       You can now pass a pre-defined custom payload to the funcion.

    .. versionchanged:: 0.0.9
       Storing self.app.auth.userid in auth_field when 'user-restricted
       resource access' is enabled.

    .. versionchanged: 0.0.7
       Support for Rate-Limiting.
       Support for 'extra_response_fields'.

       'on_posting' and 'on_posting_<resource>' events are raised before the
       documents are inserted into the database. This allows callback functions
       to arbitrarily edit/update the documents being stored.

    .. versionchanged:: 0.0.6
       Support for bulk inserts.

       Please note: validation constraints are checked against the database,
       and not between the payload documents themselves. This causes an
       interesting corner case: in the event of a multiple documents payload
       where two or more documents carry the same value for a field where the
       'unique' constraint is set, the payload will validate successfully, as
       there are no duplicates in the database (yet). If this is an issue, the
       client can always send the documents once at a time for insertion, or
       validate locally before submitting the payload to the API.

    .. versionchanged:: 0.0.5
       Support for 'application/json' Content-Type .
       Support for 'user-restricted resource access'.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.

    .. versionchanged:: 0.0.3
       JSON links. Superflous ``response`` container removed.
    """

    date_utc = datetime.utcnow().replace(microsecond=0)
    resource_def = app.config["DOMAIN"][resource]
    schema = resource_def["schema"]
    validator = (
        None
        if skip_validation
        else app.validator(
            schema, resource=resource, allow_unknown=resource_def["allow_unknown"]
        )
    )

    documents = []
    results = []
    failures = 0
    id_field = resource_def["id_field"]

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    # validation, and additional fields
    if payl is None:
        payl = payload()

    if isinstance(payl, dict):
        payl = [payl]

    if not payl:
        # empty bulk insert
        abort(400, description=debug_error_message("Empty bulk insert"))

    if len(payl) > 1 and not config.DOMAIN[resource]["bulk_enabled"]:
        abort(400, description=debug_error_message("Bulk insert not allowed"))

    for value in payl:
        document = []
        doc_issues = {}
        try:
            document = parse(value, resource)
            resolve_sub_resource_path(document, resource)
            if skip_validation:
                validation = True
            else:
                validation = validator.validate(document)
            if validation:  # validation is successful
                # validator might be not available if skip_validation. #726.
                if validator:
                    # Apply coerced values
                    document = validator.document

                # Populate meta and default fields
                document[config.LAST_UPDATED] = document[config.DATE_CREATED] = date_utc

                if config.DOMAIN[resource]["soft_delete"] is True:
                    document[config.DELETED] = False

                resolve_user_restricted_access(document, resource)
                store_media_files(document, resource)
                resolve_document_version(document, resource, "POST")
            else:
                # validation errors added to list of document issues
                doc_issues = validator.errors
        except DocumentError as e:
            doc_issues["validation exception"] = str(e)
        except Exception as e:
            # most likely a problem with the incoming payload, report back to
            # the client as if it was a validation issue
            app.logger.exception(e)
            doc_issues["exception"] = str(e)

        if len(doc_issues):
            document = {config.STATUS: config.STATUS_ERR, config.ISSUES: doc_issues}
            failures += 1

        documents.append(document)

    if failures:
        # If at least one document got issues, the whole request fails and a
        # ``422 Bad Request`` status is return.
        for document in documents:
            if (
                config.STATUS in document
                and document[config.STATUS] == config.STATUS_ERR
            ):
                results.append(document)
            else:
                results.append({config.STATUS: config.STATUS_OK})

        return_code = config.VALIDATION_ERROR_STATUS
    else:
        # notify callbacks
        getattr(app, "on_insert")(resource, documents)
        getattr(app, "on_insert_%s" % resource)(documents)

        # compute etags here as documents might have been updated by callbacks.
        resolve_document_etag(documents, resource)

        # bulk insert
        ids = app.data.insert(resource, documents)

        # update oplog if needed
        oplog_push(resource, documents, "POST")

        # assign document ids
        for document in documents:
            # either return the custom ID_FIELD or the id returned by
            # data.insert().
            id_ = document.get(id_field, ids.pop(0))
            document[id_field] = id_

            # build the full response document
            result = document
            build_response_document(result, resource, embedded_fields, document)

            # add extra write meta data
            result[config.STATUS] = config.STATUS_OK

            # limit what actually gets sent to minimize bandwidth usage
            result = marshal_write_response(result, resource)
            results.append(result)

        # insert versioning docs
        insert_versioning_documents(resource, documents)

        # notify callbacks
        getattr(app, "on_inserted")(resource, documents)
        getattr(app, "on_inserted_%s" % resource)(documents)
        # request was received and accepted; at least one document passed
        # validation and was accepted for insertion.

        return_code = 201

    if len(results) == 1:
        response = results.pop(0)
    else:
        response = {
            config.STATUS: config.STATUS_ERR if failures else config.STATUS_OK,
            config.ITEMS: results,
        }

    if failures:
        response[config.ERROR] = {
            "code": return_code,
            "message": "Insertion failure: %d document(s) contain(s) error(s)"
            % failures,
        }

    location_header = (
        None
        if return_code != 201 or not documents
        else [("Location", "%s/%s" % (resource_link(), documents[0][id_field]))]
    )

    return response, None, None, return_code, location_header
Пример #50
0
def enqueue_item(published_item):
    """
    Creates the corresponding entries in the publish queue for the given item
    """
    published_item_id = ObjectId(published_item[config.ID_FIELD])
    published_service = get_resource_service(PUBLISHED)
    archive_service = get_resource_service(ARCHIVE)
    published_update = {
        QUEUE_STATE: PUBLISH_STATE.IN_PROGRESS,
        'last_queue_event': utcnow()
    }
    try:
        logger.info('Queueing item with id: {} and item_id: {}'.format(
            published_item_id, published_item['item_id']))

        published_item = published_service.find_one(req=None,
                                                    _id=published_item_id)
        if published_item.get(QUEUE_STATE) != PUBLISH_STATE.PENDING:
            logger.info(
                'Queue State is not pending for published item {}. It is in {}'
                .format(published_item_id, published_item.get(QUEUE_STATE)))
            return

        if published_item.get(ITEM_STATE) == CONTENT_STATE.SCHEDULED:
            # if scheduled then change the state to published
            # change the `version` and `versioncreated` for the item
            # in archive collection and published collection.
            versioncreated = utcnow()
            item_updates = {
                'versioncreated': versioncreated,
                ITEM_STATE: CONTENT_STATE.PUBLISHED
            }
            resolve_document_version(
                document=item_updates,
                resource=ARCHIVE,
                method='PATCH',
                latest_doc={config.VERSION: published_item[config.VERSION]})
            # update the archive collection
            archive_service.patch(published_item['item_id'], item_updates)
            # insert into version.
            insert_into_versions(published_item['item_id'], doc=None)
            # import to legal archive
            import_into_legal_archive.apply_async(
                countdown=3, kwargs={'item_id': published_item['item_id']})
            logger.info('Modified the version of scheduled item: {}'.format(
                published_item_id))

            logger.info(
                'Publishing scheduled item_id: {}'.format(published_item_id))
            # update the published collection
            published_update.update(item_updates)
            published_item.update({
                'versioncreated': versioncreated,
                ITEM_STATE: CONTENT_STATE.PUBLISHED,
                config.VERSION: item_updates[config.VERSION]
            })

        published_service.patch(published_item_id, published_update)
        queued = get_enqueue_service(
            published_item[ITEM_OPERATION]).enqueue_item(published_item)
        # if the item is queued in the publish_queue then the state is "queued"
        # else the queue state is "queued_not_transmitted"
        queue_state = PUBLISH_STATE.QUEUED if queued else PUBLISH_STATE.QUEUED_NOT_TRANSMITTED
        published_service.patch(published_item_id, {QUEUE_STATE: queue_state})
        logger.info('Queued item with id: {} and item_id: {}'.format(
            published_item_id, published_item['item_id']))
    except KeyError:
        published_service.patch(published_item_id,
                                {QUEUE_STATE: PUBLISH_STATE.PENDING})
        logger.exception('No enqueue service found for operation %s',
                         published_item[ITEM_OPERATION])
    except:
        published_service.patch(published_item_id,
                                {QUEUE_STATE: PUBLISH_STATE.PENDING})
        raise
    def link_as_next_take(self, target, link):
        """Makes next take to target from given link.

        Check if target has an associated takes package. If not, create it and add target as a take.
        Check if the target is the last take, if not, resolve the last take. Copy metadata from the target and add it
        as the next take and return the update link item

        :return: the updated link item
        """

        takes_package_id = self.get_take_package_id(target)
        archive_service = get_resource_service(ARCHIVE)
        takes_package = archive_service.find_one(
            req=None, _id=takes_package_id) if takes_package_id else {}

        if not takes_package:
            # setting the sequence to 1 for target.
            updates = {SEQUENCE: 1}
            if target[ITEM_STATE] in [
                    CONTENT_STATE.SPIKED, CONTENT_STATE.KILLED,
                    CONTENT_STATE.SCHEDULED, CONTENT_STATE.INGESTED
            ]:
                raise SuperdeskApiError.forbiddenError(
                    "Item isn't in a valid state for creating takes.")
            else:
                archive_service.system_update(target.get(config.ID_FIELD),
                                              updates, target)

        link_updates = {}

        if not link.get(config.ID_FIELD):
            # A new story to be linked
            self.__copy_metadata__(target, link, takes_package, set_state=True)
            link[ITEM_OPERATION] = ITEM_CREATE
            archive_service.post([link])
        else:
            self.__copy_metadata__(target,
                                   link_updates,
                                   takes_package,
                                   set_state=False)

        link.update(link_updates)

        if not takes_package_id:
            takes_package_id = self.package_story_as_a_take(
                target, takes_package, link)
        else:
            original_takes_package = deepcopy(takes_package)
            self.__link_items__(takes_package, target, link)
            del takes_package[config.ID_FIELD]
            takes_package.pop('unique_id', None)
            takes_package.pop('unique_name', None)
            takes_package.pop(PUBLISH_SCHEDULE, None)
            takes_package.pop(SCHEDULE_SETTINGS, None)

            resolve_document_version(takes_package, ARCHIVE, 'PATCH',
                                     takes_package)
            archive_service.patch(takes_package_id, takes_package)
            app.on_archive_item_updated(link_updates, original_takes_package,
                                        ITEM_LINK)
            get_resource_service(
                'archive_broadcast').on_broadcast_master_updated(
                    ITEM_CREATE, target, takes_package_id=takes_package_id)

        if link.get(SEQUENCE):
            link_updates.update({SEQUENCE: link[SEQUENCE]})
            archive_service.system_update(link[config.ID_FIELD], link_updates,
                                          link)
            app.on_archive_item_updated({'linked_to': target[config.ID_FIELD]},
                                        link, ITEM_LINK)

        insert_into_versions(id_=takes_package_id)

        if RE_OPENS.lower() in link.get('anpa_take_key', '').lower():
            app.on_archive_item_updated({'new_take_id': link[config.ID_FIELD]},
                                        target, ITEM_REOPEN)
        else:
            app.on_archive_item_updated({'new_take_id': link[config.ID_FIELD]},
                                        target, ITEM_TAKE)

        return link
Пример #52
0
 def on_create(self, docs):
     on_create_item(docs)
     for doc in docs:
         resolve_document_version(doc, ARCHIVE, 'POST')
         self.update_times(doc)
         self.update_stage(doc)
Пример #53
0
    def remove_refs_in_package(self,
                               package,
                               ref_id_to_remove,
                               processed_packages=None):
        """
        Removes residRef referenced by ref_id_to_remove from the package associations and returns the package id.
        Before removing checks if the package has been processed. If processed the package is skipped.
        In case of takes package, sequence is decremented and last_take field is updated.
        If sequence is zero then the takes package is deleted.
        :return: package[config.ID_FIELD]
        """
        groups = package[GROUPS]

        if processed_packages is None:
            processed_packages = []

        sub_package_ids = [
            ref['guid'] for group in groups for ref in group[REFS]
            if ref.get('type') == CONTENT_TYPE.COMPOSITE
        ]
        for sub_package_id in sub_package_ids:
            if sub_package_id not in processed_packages:
                sub_package = self.find_one(req=None, _id=sub_package_id)
                return self.remove_refs_in_package(sub_package,
                                                   ref_id_to_remove)

        new_groups = [{
            GROUP_ID:
            group[GROUP_ID],
            ROLE:
            group.get(ROLE),
            REFS: [
                ref for ref in group[REFS]
                if ref.get('guid') != ref_id_to_remove
            ]
        } for group in groups]
        new_root_refs = [{
            ID_REF: group[GROUP_ID]
        } for group in new_groups if group[GROUP_ID] != ROOT_GROUP]

        for group in new_groups:
            if group[GROUP_ID] == ROOT_GROUP:
                group[REFS] = new_root_refs
                break

        updates = {config.LAST_UPDATED: utcnow(), GROUPS: new_groups}

        # if takes package then adjust the reference.
        # safe to do this as take can only be in one takes package.
        delete_package = False
        if package.get(PACKAGE_TYPE) == TAKES_PACKAGE:
            new_sequence = package[SEQUENCE] - 1
            if new_sequence == 0:
                # remove the takes package.
                get_resource_service(ARCHIVE).delete_action(
                    {config.ID_FIELD: package[config.ID_FIELD]})
                delete_package = True
            else:
                updates[SEQUENCE] = new_sequence
                last_take_group = next(
                    reference for reference in next(
                        new_group.get(REFS) for new_group in new_groups
                        if new_group[GROUP_ID] == MAIN_GROUP)
                    if reference.get(SEQUENCE) == new_sequence)

                if last_take_group:
                    updates[LAST_TAKE] = last_take_group.get(RESIDREF)

        if not delete_package:
            resolve_document_version(updates, ARCHIVE, 'PATCH', package)
            get_resource_service(ARCHIVE).patch(package[config.ID_FIELD],
                                                updates)
            insert_into_versions(id_=package[config.ID_FIELD])

        sub_package_ids.append(package[config.ID_FIELD])
        return sub_package_ids
Пример #54
0
def patch_internal(resource,
                   payload=None,
                   concurrency_check=False,
                   skip_validation=False,
                   mongo_options=None,
                   **lookup):
    """Intended for internal patch calls, this method is not rate limited,
    authentication is not checked, pre-request events are not raised, and
    concurrency checking is optional. Performs a document patch/update.
    Updates are first validated against the resource schema. If validation
    passes, the document is updated and an OK status update is returned.
    If validation fails, a set of validation issues is returned.

    :param resource: the name of the resource to which the document belongs.
    :param payload: alternative payload. When calling patch() from your own
                    code you can provide an alternative payload. This can be
                    useful, for example, when you have a callback function
                    hooked to a certain endpoint, and want to perform
                    additional patch() callsfrom there.

                    Please be advised that in order to successfully use this
                    option, a request context must be available.
    :param concurrency_check: concurrency check switch (bool)
    :param skip_validation: skip payload validation before write (bool)
    :param mongo_options: options to pass to PyMongo. e.g. ReadConcern of the initial get.
    :param **lookup: document lookup query.

    .. versionchanged:: 0.6.2
       Fix: validator is not set when skip_validation is true.

    .. versionchanged:: 0.6
       on_updated returns the updated document (#682).
       Allow restoring soft deleted documents via PATCH

    .. versionchanged:: 0.5
       Updating nested document fields does not overwrite the nested document
       itself (#519).
       Push updates to the OpLog.
       Original patch() has been split into patch() and patch_internal().
       You can now pass a pre-defined custom payload to the funcion.
       ETAG is now stored with the document (#369).
       Catching all HTTPExceptions and returning them to the caller, allowing
       for eventual flask.abort() invocations in callback functions to go
       through. Fixes #395.

    .. versionchanged:: 0.4
       Allow abort() to be invoked by callback functions.
       'on_update' raised before performing the update on the database.
       Support for document versioning.
       'on_updated' raised after performing the update on the database.

    .. versionchanged:: 0.3
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise 'on_pre_<method>' event.

    .. versionchanged:: 0.1.1
       Item-identifier wrapper stripped from both request and response payload.

    .. versionchanged:: 0.1.0
       Support for optional HATEOAS.
       Re-raises `exceptions.Unauthorized`, this could occur if the
       `auth_field` condition fails

    .. versionchanged:: 0.0.9
       More informative error messages.
       Support for Python 3.3.

    .. versionchanged:: 0.0.8
       Let ``werkzeug.exceptions.InternalServerError`` go through as they have
       probably been explicitly raised by the data driver.

    .. versionchanged:: 0.0.7
       Support for Rate-Limiting.

    .. versionchanged:: 0.0.6
       ETag is now computed without the need of an additional db lookup

    .. versionchanged:: 0.0.5
       Support for 'application/json' Content-Type.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.

    .. versionchanged:: 0.0.3
       JSON links. Superflous ``response`` container removed.
    """
    if payload is None:
        payload = payload_()

    original = get_document(resource, concurrency_check, mongo_options,
                            **lookup)
    if not original:
        # not found
        abort(404)

    resource_def = app.config["DOMAIN"][resource]
    schema = resource_def["schema"]
    normalize_document = resource_def.get("normalize_on_patch")
    validator = app.validator(schema,
                              resource=resource,
                              allow_unknown=resource_def["allow_unknown"])

    object_id = original[resource_def["id_field"]]
    last_modified = None
    etag = None

    issues = {}
    response = {}

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    try:
        updates = parse(payload, resource)
        if skip_validation:
            validation = True
        else:
            validation = validator.validate_update(updates, object_id,
                                                   original,
                                                   normalize_document)
            updates = validator.document

        if validation:
            # Apply coerced values

            # sneak in a shadow copy if it wasn't already there
            late_versioning_catch(original, resource)

            store_media_files(updates, resource, original)
            resolve_document_version(updates, resource, "PATCH", original)

            # some datetime precision magic
            updates[config.LAST_UPDATED] = datetime.utcnow().replace(
                microsecond=0)

            if resource_def["soft_delete"] is True:
                # PATCH with soft delete enabled should always set the DELETED
                # field to False. We are either carrying through un-deleted
                # status, or restoring a soft deleted document
                updates[config.DELETED] = False

            # the mongo driver has a different precision than the python
            # datetime. since we don't want to reload the document once it
            # has been updated, and we still have to provide an updated
            # etag, we're going to update the local version of the
            # 'original' document, and we will use it for the etag
            # computation.
            updated = deepcopy(original)

            # notify callbacks
            getattr(app, "on_update")(resource, updates, original)
            getattr(app, "on_update_%s" % resource)(updates, original)

            if resource_def["merge_nested_documents"]:
                updates = resolve_nested_documents(updates, updated)

            if mongo_options:
                updated.with_options(mongo_options).update(updates)
            else:
                updated.update(updates)

            if config.IF_MATCH:
                resolve_document_etag(updated, resource)
                # now storing the (updated) ETAG with every document (#453)
                updates[config.ETAG] = updated[config.ETAG]
            try:
                app.data.update(resource, object_id, updates, original)
            except app.data.OriginalChangedError:
                if concurrency_check:
                    abort(412,
                          description="Client and server etags don't match")

            # update oplog if needed
            oplog_push(resource, updates, "PATCH", object_id)

            insert_versioning_documents(resource, updated)

            # nofity callbacks
            getattr(app, "on_updated")(resource, updates, original)
            getattr(app, "on_updated_%s" % resource)(updates, original)

            updated.update(updates)

            # build the full response document
            build_response_document(updated, resource, embedded_fields,
                                    updated)
            response = updated
            if config.IF_MATCH:
                etag = response[config.ETAG]
        else:
            issues = validator.errors
    except DocumentError as e:
        # TODO should probably log the error and abort 400 instead (when we
        # got logging)
        issues["validator exception"] = str(e)
    except exceptions.HTTPException as e:
        raise e
    except Exception as e:
        # consider all other exceptions as Bad Requests
        app.logger.exception(e)
        abort(400,
              description=debug_error_message("An exception occurred: %s" % e))

    if len(issues):
        response[config.ISSUES] = issues
        response[config.STATUS] = config.STATUS_ERR
        status = config.VALIDATION_ERROR_STATUS
    else:
        response[config.STATUS] = config.STATUS_OK
        status = 200

    # limit what actually gets sent to minimize bandwidth usage
    response = marshal_write_response(response, resource)

    return response, last_modified, etag, status
Пример #55
0
def patch(resource, **lookup):
    """ Perform a document patch/update. Updates are first validated against
    the resource schema. If validation passes, the document is updated and
    an OK status update is returned. If validation fails, a set of validation
    issues is returned.

    :param resource: the name of the resource to which the document belongs.
    :param **lookup: document lookup query.

    .. versionchanged:: 0.4
       Allow abort() to be inoked by callback functions.
       'on_update' raised before performing the update on the database.
       Support for document versioning.
       'on_updated' raised after performing the update on the database.

    .. versionchanged:: 0.3
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise 'on_pre_<method>' event.

    .. versionchanged:: 0.1.1
       Item-identifier wrapper stripped from both request and response payload.

    .. versionchanged:: 0.1.0
       Support for optional HATEOAS.
       Re-raises `exceptions.Unauthorized`, this could occur if the
       `auth_field` condition fails

    .. versionchanged:: 0.0.9
       More informative error messages.
       Support for Python 3.3.

    .. versionchanged:: 0.0.8
       Let ``werkzeug.exceptions.InternalServerError`` go through as they have
       probably been explicitly raised by the data driver.

    .. versionchanged:: 0.0.7
       Support for Rate-Limiting.

    .. versionchanged:: 0.0.6
       ETag is now computed without the need of an additional db lookup

    .. versionchanged:: 0.0.5
       Support for 'aplication/json' Content-Type.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.

    .. versionchanged:: 0.0.3
       JSON links. Superflous ``response`` container removed.
    """
    payload = payload_()
    original = get_document(resource, **lookup)
    if not original:
        # not found
        abort(404)

    resource_def = app.config['DOMAIN'][resource]
    schema = resource_def['schema']
    validator = app.validator(schema, resource)

    object_id = original[config.ID_FIELD]
    last_modified = None
    etag = None

    issues = {}
    response = {}

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    try:
        updates = parse(payload, resource)
        validation = validator.validate_update(updates, object_id)
        if validation:
            # sneak in a shadow copy if it wasn't already there
            late_versioning_catch(original, resource)

            store_media_files(updates, resource, original)
            resolve_document_version(updates, resource, 'PATCH', original)

            # some datetime precision magic
            updates[config.LAST_UPDATED] = \
                datetime.utcnow().replace(microsecond=0)

            # the mongo driver has a different precision than the python
            # datetime. since we don't want to reload the document once it has
            # been updated, and we still have to provide an updated etag,
            # we're going to update the local version of the 'original'
            # document, and we will use it for the etag computation.
            updated = original.copy()

            # notify callbacks
            getattr(app, "on_update")(resource, updates, original)
            getattr(app, "on_update_%s" % resource)(updates, original)

            updated.update(updates)

            app.data.update(resource, object_id, updates)
            insert_versioning_documents(resource, updated)

            # nofity callbacks
            getattr(app, "on_updated")(resource, updates, original)
            getattr(app, "on_updated_%s" % resource)(updates, original)

            # build the full response document
            build_response_document(updated, resource, embedded_fields,
                                    updated)
            response = updated

        else:
            issues = validator.errors
    except ValidationError as e:
        # TODO should probably log the error and abort 400 instead (when we
        # got logging)
        issues['validator exception'] = str(e)
    except (exceptions.InternalServerError, exceptions.Unauthorized,
            exceptions.Forbidden, exceptions.NotFound) as e:
        raise e
    except Exception as e:
        # consider all other exceptions as Bad Requests
        abort(400,
              description=debug_error_message('An exception occurred: %s' % e))

    if len(issues):
        response[config.ISSUES] = issues
        response[config.STATUS] = config.STATUS_ERR
    else:
        response[config.STATUS] = config.STATUS_OK

    # limit what actually gets sent to minimize bandwidth usage
    response = marshal_write_response(response, resource)

    return response, last_modified, etag, 200
Пример #56
0
    def setUp(self):
        super().setUp()
        try:
            from apps.legal_archive.commands import ImportLegalArchiveCommand
        except ImportError:
            self.fail(
                'Could not import class under test (ImportLegalArchiveCommand).'
            )
        else:
            self.class_under_test = ImportLegalArchiveCommand
            self.app.data.insert('desks', self.desks)
            self.app.data.insert('users', self.users)
            self.validators = [{
                'schema': {},
                'type': 'text',
                'act': 'publish',
                '_id': 'publish_text'
            }, {
                'schema': {},
                'type': 'text',
                'act': 'correct',
                '_id': 'correct_text'
            }, {
                'schema': {},
                'type': 'text',
                'act': 'kill',
                '_id': 'kill_text'
            }]

            self.products = [{
                '_id': '1',
                'name': 'prod1'
            }, {
                '_id': '2',
                'name': 'prod2',
                'codes': 'abc,def'
            }, {
                '_id': '3',
                'name': 'prod3',
                'codes': 'xyz'
            }]

            self.subscribers = [{
                'name':
                'Test',
                'is_active':
                True,
                'subscriber_type':
                'wire',
                'email':
                '*****@*****.**',
                'sequence_num_settings': {
                    'max': 9999,
                    'min': 1
                },
                'products': ['1'],
                'destinations': [{
                    'name': 'test',
                    'delivery_type': 'email',
                    'format': 'nitf',
                    'config': {
                        'recipients': '*****@*****.**'
                    }
                }]
            }]
            self.app.data.insert('validators', self.validators)
            self.app.data.insert('products', self.products)
            self.app.data.insert('subscribers', self.subscribers)
            self.class_under_test = ImportLegalArchiveCommand
            self.archive_items = [{
                'task': {
                    'desk': self.desks[0]['_id'],
                    'stage': self.desks[0]['incoming_stage'],
                    'user': '******'
                },
                '_id': 'item1',
                'state': 'in_progress',
                'headline': 'item 1',
                'type': 'text',
                'slugline': 'item 1 slugline',
                '_current_version': 1,
                '_created': utcnow() - timedelta(minutes=3),
                'expired': utcnow() - timedelta(minutes=30)
            }, {
                'task': {
                    'desk': self.desks[0]['_id'],
                    'stage': self.desks[0]['incoming_stage'],
                    'user': '******'
                },
                '_id': 'item2',
                'state': 'in_progress',
                'headline': 'item 2',
                'type': 'text',
                'slugline': 'item 2 slugline',
                '_current_version': 1,
                '_created': utcnow() - timedelta(minutes=2),
                'expired': utcnow() - timedelta(minutes=30)
            }, {
                'task': {
                    'desk': self.desks[0]['_id'],
                    'stage': self.desks[0]['incoming_stage'],
                    'user': '******'
                },
                '_id': 'item3',
                'state': 'in_progress',
                'headline': 'item 2',
                'type': 'text',
                'slugline': 'item 2 slugline',
                '_current_version': 1,
                '_created': utcnow() - timedelta(minutes=1),
                'expired': utcnow() - timedelta(minutes=30)
            }]

            get_resource_service(ARCHIVE).post(self.archive_items)
            for item in self.archive_items:
                resolve_document_version(item, ARCHIVE, 'POST')
                insert_into_versions(id_=item['_id'])
Пример #57
0
    def enqueue_item(self, published_item):
        """
        Creates the corresponding entries in the publish queue for the given item
        """
        published_item_id = ObjectId(published_item[config.ID_FIELD])
        published_service = get_resource_service(PUBLISHED)
        archive_service = get_resource_service(ARCHIVE)
        published_update = {
            QUEUE_STATE: PUBLISH_STATE.IN_PROGRESS,
            'last_queue_event': utcnow()
        }
        try:
            logger.info('Queueing item with id: {} and item_id: {}'.format(
                published_item_id, published_item['item_id']))

            published_item = published_service.find_one(req=None,
                                                        _id=published_item_id)
            if published_item.get(QUEUE_STATE) != PUBLISH_STATE.PENDING:
                logger.info(
                    'Queue State is not pending for published item {}. It is in {}'
                    .format(published_item_id,
                            published_item.get(QUEUE_STATE)))
                return

            if published_item.get(ITEM_STATE) == CONTENT_STATE.SCHEDULED:
                # if scheduled then change the state to published
                # change the `version` and `versioncreated` for the item
                # in archive collection and published collection.
                versioncreated = utcnow()
                item_updates = {
                    'versioncreated': versioncreated,
                    ITEM_STATE: CONTENT_STATE.PUBLISHED
                }
                resolve_document_version(document=item_updates,
                                         resource=ARCHIVE,
                                         method='PATCH',
                                         latest_doc={
                                             config.VERSION:
                                             published_item[config.VERSION]
                                         })

                # update the archive collection
                archive_item = archive_service.find_one(
                    req=None, _id=published_item['item_id'])
                archive_service.system_update(published_item['item_id'],
                                              item_updates, archive_item)
                # insert into version.
                insert_into_versions(published_item['item_id'], doc=None)
                # update archive history
                app.on_archive_item_updated(item_updates, archive_item,
                                            ITEM_PUBLISH)
                # import to legal archive
                import_into_legal_archive.apply_async(
                    countdown=3, kwargs={'item_id': published_item['item_id']})
                logger.info(
                    'Modified the version of scheduled item: {}'.format(
                        published_item_id))

                logger.info('Publishing scheduled item_id: {}'.format(
                    published_item_id))
                # update the published collection
                published_update.update(item_updates)
                published_item.update({
                    'versioncreated':
                    versioncreated,
                    ITEM_STATE:
                    CONTENT_STATE.PUBLISHED,
                    config.VERSION:
                    item_updates[config.VERSION]
                })
                # send a notification to the clients
                push_content_notification([{
                    '_id':
                    str(published_item['item_id']),
                    'task':
                    published_item.get('task', None)
                }])
                #  apply internal destinations
                signals.item_published.send(self,
                                            item=archive_service.find_one(
                                                req=None,
                                                _id=published_item['item_id']))

            published_service.patch(published_item_id, published_update)
            # queue the item for publishing
            try:
                queued = get_enqueue_service(
                    published_item[ITEM_OPERATION]).enqueue_item(
                        published_item, None)
            except KeyError as key_error:
                error_updates = {
                    QUEUE_STATE: PUBLISH_STATE.ERROR,
                    ERROR_MESSAGE: str(key_error)
                }
                published_service.patch(published_item_id, error_updates)
                logger.exception('No enqueue service found for operation %s',
                                 published_item[ITEM_OPERATION])
                raise

            # if the item is queued in the publish_queue then the state is "queued"
            # else the queue state is "queued_not_transmitted"
            queue_state = PUBLISH_STATE.QUEUED if queued else PUBLISH_STATE.QUEUED_NOT_TRANSMITTED
            published_service.patch(published_item_id,
                                    {QUEUE_STATE: queue_state})
            logger.info('Queued item with id: {} and item_id: {}'.format(
                published_item_id, published_item['item_id']))
        except ConnectionTimeout as error:  # recoverable, set state to pending and retry next time
            error_updates = {
                QUEUE_STATE: PUBLISH_STATE.PENDING,
                ERROR_MESSAGE: str(error)
            }
            published_service.patch(published_item_id, error_updates)
            raise
        except SoftTimeLimitExceeded as error:
            error_updates = {
                QUEUE_STATE: PUBLISH_STATE.PENDING,
                ERROR_MESSAGE: str(error)
            }
            published_service.patch(published_item_id, error_updates)
            raise
        except Exception as error:
            error_updates = {
                QUEUE_STATE: PUBLISH_STATE.ERROR,
                ERROR_MESSAGE: str(error)
            }
            published_service.patch(published_item_id, error_updates)
            raise
Пример #58
0
def post(resource, payl=None):
    """ Adds one or more documents to a resource. Each document is validated
    against the domain schema. If validation passes the document is inserted
    and ID_FIELD, LAST_UPDATED and DATE_CREATED along with a link to the
    document are returned. If validation fails, a list of validation issues
    is returned.

    :param resource: name of the resource involved.
    :param payl: alternative payload. When calling post() from your own code
                 you can provide an alternative payload. This can be useful,
                 for example, when you have a callback function hooked to a
                 certain endpoint, and want to perform additional post() calls
                 from there.

                 Please be advised that in order to successfully use this
                 option, a request context must be available.

                 See https://github.com/nicolaiarocci/eve/issues/74 for a
                 discussion, and a typical use case.

    .. versionchanged:: 0.4
       Resolve default values before validation is performed. See #353.
       Support for document versioning.

    .. versionchanged:: 0.3
       Return 201 if at least one document has been successfully inserted.
       Fix #231 auth field not set if resource level authentication is set.
       Support for media fields.
       When IF_MATCH is disabled, no etag is included in the payload.
       Support for new validation format introduced with Cerberus v0.5.

    .. versionchanged:: 0.2
       Use the new STATUS setting.
       Use the new ISSUES setting.
       Raise 'on_pre_<method>' event.
       Explictly resolve default values instead of letting them be resolved
       by common.parse. This avoids a validation error when a read-only field
       also has a default value.
       Added ``on_inserted*`` events after the database insert

    .. versionchanged:: 0.1.1
       auth.request_auth_value is now used to store the auth_field value.

    .. versionchanged:: 0.1.0
       More robust handling of auth_field.
       Support for optional HATEOAS.

    .. versionchanged: 0.0.9
       Event hooks renamed to be more robuts and consistent: 'on_posting'
       renamed to 'on_insert'.
       You can now pass a pre-defined custom payload to the funcion.

    .. versionchanged:: 0.0.9
       Storing self.app.auth.userid in auth_field when 'user-restricted
       resource access' is enabled.

    .. versionchanged: 0.0.7
       Support for Rate-Limiting.
       Support for 'extra_response_fields'.

       'on_posting' and 'on_posting_<resource>' events are raised before the
       documents are inserted into the database. This allows callback functions
       to arbitrarily edit/update the documents being stored.

    .. versionchanged:: 0.0.6
       Support for bulk inserts.

       Please note: validation constraints are checked against the database,
       and not between the payload documents themselves. This causes an
       interesting corner case: in the event of a multiple documents payload
       where two or more documents carry the same value for a field where the
       'unique' constraint is set, the payload will validate successfully, as
       there are no duplicates in the database (yet). If this is an issue, the
       client can always send the documents once at a time for insertion, or
       validate locally before submitting the payload to the API.

    .. versionchanged:: 0.0.5
       Support for 'application/json' Content-Type .
       Support for 'user-restricted resource access'.

    .. versionchanged:: 0.0.4
       Added the ``requires_auth`` decorator.

    .. versionchanged:: 0.0.3
       JSON links. Superflous ``response`` container removed.
    """

    date_utc = datetime.utcnow().replace(microsecond=0)
    resource_def = app.config['DOMAIN'][resource]
    schema = resource_def['schema']
    validator = app.validator(schema, resource)
    documents = []
    results = []
    failures = 0

    if config.BANDWIDTH_SAVER is True:
        embedded_fields = []
    else:
        req = parse_request(resource)
        embedded_fields = resolve_embedded_fields(resource, req)

    # validation, and additional fields
    if payl is None:
        payl = payload()

    if isinstance(payl, dict):
        payl = [payl]

    for value in payl:
        document = []
        doc_issues = {}
        try:
            document = parse(value, resource)
            resolve_default_values(document, resource_def['defaults'])
            validation = validator.validate(document)
            if validation:
                # validation is successful
                document[config.LAST_UPDATED] = \
                    document[config.DATE_CREATED] = date_utc

                resolve_user_restricted_access(document, resource)
                resolve_sub_resource_path(document, resource)
                store_media_files(document, resource)
                resolve_document_version(document, resource, 'POST')
            else:
                # validation errors added to list of document issues
                doc_issues = validator.errors
        except ValidationError as e:
            doc_issues['validation exception'] = str(e)
        except Exception as e:
            # most likely a problem with the incoming payload, report back to
            # the client as if it was a validation issue
            doc_issues['exception'] = str(e)

        if len(doc_issues):
            document = {
                config.STATUS: config.STATUS_ERR,
                config.ISSUES: doc_issues,
            }
            failures += 1

        documents.append(document)

    if failures:
        # If at least one document got issues, the whole request fails and a
        # ``400 Bad Request`` status is return.
        for document in documents:
            if config.STATUS in document \
               and document[config.STATUS] == config.STATUS_ERR:
                results.append(document)
            else:
                results.append({config.STATUS: config.STATUS_OK})

        return_code = 400
    else:
        # notify callbacks
        getattr(app, "on_insert")(resource, documents)
        getattr(app, "on_insert_%s" % resource)(documents)

        # bulk insert
        ids = app.data.insert(resource, documents)

        # assign document ids
        for document in documents:
            # either return the custom ID_FIELD or the id returned by
            # data.insert().
            document[config.ID_FIELD] = \
                document.get(config.ID_FIELD, ids.pop(0))

            # build the full response document
            result = document
            build_response_document(
                result, resource, embedded_fields, document)

            # add extra write meta data
            result[config.STATUS] = config.STATUS_OK

            # limit what actually gets sent to minimize bandwidth usage
            result = marshal_write_response(result, resource)
            results.append(result)

        # insert versioning docs
        insert_versioning_documents(resource, documents)

        # notify callbacks
        getattr(app, "on_inserted")(resource, documents)
        getattr(app, "on_inserted_%s" % resource)(documents)
        # request was received and accepted; at least one document passed
        # validation and was accepted for insertion.

        return_code = 201

    if len(results) == 1:
        response = results.pop(0)
    else:
        response = {
            config.STATUS: config.STATUS_ERR if failures else config.STATUS_OK,
            config.ITEMS: results,
        }

    if failures:
        response[config.ERROR] = {
            "code": return_code,
            "message": "Insertion failure: %d document(s) contain(s) error(s)"
            % failures,
        }

    return response, None, None, return_code