def create_scheduled_content(now=None):
    lock_name = get_lock_id("Template", "Schedule")
    if not lock(lock_name, expire=130):
        logger.info('Task: {} is already running.'.format(lock_name))
        return

    try:
        if now is None:
            now = utcnow()
        templates = get_scheduled_templates(now)
        production = superdesk.get_resource_service(ARCHIVE)
        items = []
        for template in templates:
            set_template_timestamps(template, now)
            item = get_item_from_template(template)
            item[config.VERSION] = 1
            production.post([item])
            insert_into_versions(doc=item)
            try:
                apply_onstage_rule(item, item.get(config.ID_FIELD))
            except Exception as ex:  # noqa
                logger.exception('Failed to apply on stage rule while scheduling template.')
            items.append(item)
        return items
    except Exception as e:
        logger.exception('Task: {} failed with error {}.'.format(lock_name, str(e)))
    finally:
        unlock(lock_name)
Exemplo n.º 2
0
    def run(self, page_size=None):
        logger.info("Import to Legal Archive")
        lock_name = get_lock_id("legal_archive", "import_to_legal_archive")
        page_size = int(page_size) if page_size else self.default_page_size
        if not lock(lock_name, "", expire=1800):
            return
        try:
            legal_archive_import = LegalArchiveImport()
            publish_queue = get_resource_service("publish_queue")
            for items in self.get_expired_items(page_size):
                for item in items:
                    try:
                        legal_archive_import.upsert_into_legal_archive(item.get("item_id"))
                        req = ParsedRequest()
                        req.where = json.dumps({"item_id": item["item_id"]})
                        queue_items = list(publish_queue.get(req=req, lookup=None))
                        if queue_items:
                            try:
                                logger.info("Import to Legal Publish Queue")
                                legal_archive_import.process_queue_items(queue_items, True)
                            except:
                                logger.exception(
                                    "Failed to import into legal publish queue  "
                                    "archive via command {}.".format(item.get("item_id"))
                                )
                    except:
                        logger.exception(
                            "Failed to import into legal " "archive via command {}.".format(item.get("item_id"))
                        )

        except:
            logger.exception("Failed to import into legal archive.")
        finally:
            unlock(lock_name, "")
Exemplo n.º 3
0
def report():
    """Check all saved_searches with subscribers, and publish reports"""
    if not lock(LOCK_NAME, expire=REPORT_SOFT_LIMIT + 10):
        return
    try:
        saved_searches = get_resource_service('saved_searches')
        subscribed_searches = saved_searches.find({"subscribers": {"$exists": 1}})
        tz = pytz.timezone(superdesk.app.config['DEFAULT_TIMEZONE'])
        now = datetime.now(tz=tz)
        for search in subscribed_searches:
            do_update = False

            subscribed_users = search['subscribers'].get('user_subscriptions', [])
            do_update = process_subscribers(subscribed_users, search, now, False) or do_update

            subscribed_desks = search['subscribers'].get('desk_subscriptions', [])
            do_update = process_subscribers(subscribed_desks, search, now, True) or do_update

            if do_update:
                updates = {'subscribers': search['subscribers']}
                saved_searches.update(search['_id'], updates, search)
    except Exception as e:
        logger.error("Can't report saved searched: {reason}".format(reason=e))
    finally:
        unlock(LOCK_NAME)
    def run(self, expiry_days=None):
        if expiry_days:
            self.expiry_days = int(expiry_days)
        elif app.settings.get('CONTENT_API_EXPIRY_DAYS'):
            self.expiry_days = app.settings['CONTENT_API_EXPIRY_DAYS']

        if self.expiry_days == 0:
            logger.info('Expiry days is set to 0, therefor no items will be removed.')
            return

        now = utcnow()
        self.log_msg = 'Expiry Time: {}'.format(now)
        logger.info('{} Starting to remove expired content_api items.'.format(self.log_msg))

        lock_name = get_lock_id('content_api', 'remove_expired')
        if not lock(lock_name, expire=600):
            logger.info('{} Remove expired content_api items task is already running'.format(self.log_msg))
            return

        try:
            num_items_removed = self._remove_expired_items(now, self.expiry_days)
        finally:
            unlock(lock_name)

        if num_items_removed == 0:
            logger.info('{} Completed but no items were removed'.format(self.log_msg))
        else:
            logger.info('{} Completed removing {} expired content_api items'.format(self.log_msg, num_items_removed))
Exemplo n.º 5
0
def publish():
    """
    Fetches items from publish queue as per the configuration,
    calls the transmit function.
    """
    lock_name = get_lock_id("Transmit", "Articles")
    if not lock(lock_name, '', expire=1800):
        logger.info('Task: {} is already running.'.format(lock_name))
        return

    try:
        # Query any oustanding transmit requests
        items = list(get_queue_items())
        if len(items) > 0:
            transmit_items(items)

        # Query any outstanding retry attempts
        retry_items = list(get_queue_items(True))
        if len(retry_items) > 0:
            transmit_items(retry_items)

    except:
        logger.exception('Task: {} failed.'.format(lock_name))
    finally:
        unlock(lock_name, '')
Exemplo n.º 6
0
def transmit_subscriber_items(self, queue_items, subscriber):
    # Attempt to obtain a lock for transmissions to the subscriber
    lock_name = get_lock_id("Subscriber", "Transmit", subscriber)

    if not lock(lock_name, expire=610):
        return

    for queue_item in queue_items:
        publish_queue_service = get_resource_service(PUBLISH_QUEUE)
        log_msg = (
            "_id: {_id}  item_id: {item_id}  state: {state} "
            "item_version: {item_version} headline: {headline}".format(**queue_item)
        )
        try:
            # check the status of the queue item
            queue_item = publish_queue_service.find_one(req=None, _id=queue_item[config.ID_FIELD])
            if queue_item.get("state") not in [QueueState.PENDING.value, QueueState.RETRYING.value]:
                logger.info(
                    "Transmit State is not pending/retrying for queue item: {}. It is in {}".format(
                        queue_item.get(config.ID_FIELD), queue_item.get("state")
                    )
                )
                continue

            # update the status of the item to in-progress
            queue_update = {"state": "in-progress", "transmit_started_at": utcnow()}
            publish_queue_service.patch(queue_item.get(config.ID_FIELD), queue_update)
            logger.info("Transmitting queue item {}".format(log_msg))

            destination = queue_item["destination"]
            transmitter = superdesk.publish.registered_transmitters[destination.get("delivery_type")]
            transmitter.transmit(queue_item)
            logger.info("Transmitted queue item {}".format(log_msg))
        except Exception as e:
            logger.exception("Failed to transmit queue item {}".format(log_msg))

            max_retry_attempt = app.config.get("MAX_TRANSMIT_RETRY_ATTEMPT")
            retry_attempt_delay = app.config.get("TRANSMIT_RETRY_ATTEMPT_DELAY_MINUTES")
            try:
                orig_item = publish_queue_service.find_one(req=None, _id=queue_item["_id"])
                updates = {config.LAST_UPDATED: utcnow()}

                if orig_item.get("retry_attempt", 0) < max_retry_attempt and not isinstance(
                    e, PublishHTTPPushClientError
                ):

                    updates["retry_attempt"] = orig_item.get("retry_attempt", 0) + 1
                    updates["state"] = QueueState.RETRYING.value
                    updates["next_retry_attempt_at"] = utcnow() + timedelta(minutes=retry_attempt_delay)
                else:
                    # all retry attempts exhausted marking the item as failed.
                    updates["state"] = QueueState.FAILED.value

                publish_queue_service.system_update(orig_item.get(config.ID_FIELD), updates, orig_item)
            except:
                logger.error("Failed to set the state for failed publish queue item {}.".format(queue_item["_id"]))

    # Release the lock for the subscriber
    unlock(lock_name)
Exemplo n.º 7
0
 def run(self):
     logger.info('Importing Legal Publish Queue')
     lock_name = get_lock_id('legal_archive', 'import_legal_publish_queue')
     if not lock(lock_name, '', expire=600):
         return
     try:
         LegalArchiveImport().import_legal_publish_queue()
     finally:
         unlock(lock_name, '')
Exemplo n.º 8
0
 def run(self):
     logger.info("Import to Legal Publish Queue")
     lock_name = get_lock_id("legal_archive", "import_legal_publish_queue")
     if not lock(lock_name, "", expire=600):
         return
     try:
         LegalArchiveImport().import_legal_publish_queue()
     finally:
         unlock(lock_name, "")
Exemplo n.º 9
0
 def run(self, page_size=None):
     logger.info('Import to Legal Publish Queue')
     lock_name = get_lock_id('legal_archive', 'import_legal_publish_queue')
     page_size = int(page_size) if page_size else self.default_page_size
     if not lock(lock_name, expire=310):
         return
     try:
         LegalArchiveImport().import_legal_publish_queue(page_size=page_size)
     finally:
         unlock(lock_name)
Exemplo n.º 10
0
def update_provider(self, provider, rule_set=None, routing_scheme=None):
    """
    Fetches items from ingest provider as per the configuration, ingests them into Superdesk and
    updates the provider.

    :param self:
    :type self:
    :param provider: Ingest Provider Details
    :type provider: dict :py:class:`superdesk.io.ingest_provider_model.IngestProviderResource`
    :param rule_set: Translation Rule Set if one is associated with Ingest Provider.
    :type rule_set: dict :py:class:`apps.rules.rule_sets.RuleSetsResource`
    :param routing_scheme: Routing Scheme if one is associated with Ingest Provider.
    :type routing_scheme: dict :py:class:`apps.rules.routing_rules.RoutingRuleSchemeResource`
    """

    lock_name = get_lock_id('ingest', provider['name'], provider[superdesk.config.ID_FIELD])
    host_name = get_host_id(self)

    if not lock(lock_name, host_name, expire=1800):
        return

    try:
        feeding_service = registered_feeding_services[provider['feeding_service']]
        feeding_service = feeding_service.__class__()

        update = {LAST_UPDATED: utcnow()}

        for items in feeding_service.update(provider):
            ingest_items(items, provider, feeding_service, rule_set, routing_scheme)
            stats.incr('ingest.ingested_items', len(items))
            if items:
                update[LAST_ITEM_UPDATE] = utcnow()

        # Some Feeding Services update the collection and by this time the _etag might have been changed.
        # So it's necessary to fetch it once again. Otherwise, OriginalChangedError is raised.
        ingest_provider_service = superdesk.get_resource_service('ingest_providers')
        provider = ingest_provider_service.find_one(req=None, _id=provider[superdesk.config.ID_FIELD])
        ingest_provider_service.system_update(provider[superdesk.config.ID_FIELD], update, provider)

        if LAST_ITEM_UPDATE not in update and get_is_idle(provider):
            admins = superdesk.get_resource_service('users').get_users_by_user_type('administrator')
            notify_and_add_activity(
                ACTIVITY_EVENT,
                'Provider {{name}} has gone strangely quiet. Last activity was on {{last}}',
                resource='ingest_providers', user_list=admins, name=provider.get('name'),
                last=provider[LAST_ITEM_UPDATE].replace(tzinfo=timezone.utc).astimezone(tz=None).strftime("%c"))

        logger.info('Provider {0} updated'.format(provider[superdesk.config.ID_FIELD]))

        if LAST_ITEM_UPDATE in update:  # Only push a notification if there has been an update
            push_notification('ingest:update', provider_id=str(provider[superdesk.config.ID_FIELD]))
    finally:
        unlock(lock_name, host_name)
Exemplo n.º 11
0
    def init_elastic(self, app):
        """Init elastic index.

        It will create index and put mapping. It should run only once so locks are in place.
        Thus mongo must be already setup before running this.
        """
        with app.app_context():
            if lock('elastic', expire=10):
                try:
                    self.elastic.init_index(app)
                finally:
                    unlock('elastic')
Exemplo n.º 12
0
def transmit_subscriber_items(queue_items, subscriber):
    lock_name = get_lock_id('Subscriber', 'Transmit', subscriber)
    publish_queue_service = get_resource_service(PUBLISH_QUEUE)

    if not lock(lock_name, expire=610):
        return

    try:
        for queue_item in queue_items:
            log_msg = '_id: {_id}  item_id: {item_id}  state: {state} ' \
                      'item_version: {item_version} headline: {headline}'.format(**queue_item)
            try:
                # check the status of the queue item
                queue_item = publish_queue_service.find_one(req=None, _id=queue_item[config.ID_FIELD])
                if queue_item.get('state') not in [QueueState.PENDING.value, QueueState.RETRYING.value]:
                    logger.info('Transmit State is not pending/retrying for queue item: {}. It is in {}'.
                                format(queue_item.get(config.ID_FIELD), queue_item.get('state')))
                    continue

                # update the status of the item to in-progress
                queue_update = {'state': 'in-progress', 'transmit_started_at': utcnow()}
                publish_queue_service.patch(queue_item.get(config.ID_FIELD), queue_update)
                logger.info('Transmitting queue item {}'.format(log_msg))

                destination = queue_item['destination']
                transmitter = superdesk.publish.registered_transmitters[destination.get('delivery_type')]
                transmitter.transmit(queue_item)
                logger.info('Transmitted queue item {}'.format(log_msg))
            except Exception as e:
                logger.exception('Failed to transmit queue item {}'.format(log_msg))

                max_retry_attempt = app.config.get('MAX_TRANSMIT_RETRY_ATTEMPT')
                retry_attempt_delay = app.config.get('TRANSMIT_RETRY_ATTEMPT_DELAY_MINUTES')
                try:
                    orig_item = publish_queue_service.find_one(req=None, _id=queue_item['_id'])
                    updates = {config.LAST_UPDATED: utcnow()}

                    if orig_item.get('retry_attempt', 0) < max_retry_attempt and \
                            not isinstance(e, PublishHTTPPushClientError):

                        updates['retry_attempt'] = orig_item.get('retry_attempt', 0) + 1
                        updates['state'] = QueueState.RETRYING.value
                        updates['next_retry_attempt_at'] = utcnow() + timedelta(minutes=retry_attempt_delay)
                    else:
                        # all retry attempts exhausted marking the item as failed.
                        updates['state'] = QueueState.FAILED.value

                    publish_queue_service.system_update(orig_item.get(config.ID_FIELD), updates, orig_item)
                except:
                    logger.error('Failed to set the state for failed publish queue item {}.'.format(queue_item['_id']))
    finally:
        unlock(lock_name)
Exemplo n.º 13
0
def update_provider(provider, rule_set=None, routing_scheme=None):
    """Fetch items from ingest provider, ingest them into Superdesk and update the provider.

    :param provider: Ingest Provider data
    :param rule_set: Translation Rule Set if one is associated with Ingest Provider.
    :param routing_scheme: Routing Scheme if one is associated with Ingest Provider.
    """
    lock_name = get_lock_id('ingest', provider['name'], provider[superdesk.config.ID_FIELD])

    if not lock(lock_name, expire=1810):
        return

    try:
        feeding_service = registered_feeding_services[provider['feeding_service']]
        feeding_service = feeding_service.__class__()

        update = {LAST_UPDATED: utcnow()}

        for items in feeding_service.update(provider, update):
            ingest_items(items, provider, feeding_service, rule_set, routing_scheme)
            if items:
                last_item_update = max(
                    [item['versioncreated'] for item in items if item.get('versioncreated')],
                    default=utcnow()
                )
                if not update.get(LAST_ITEM_UPDATE) or update[LAST_ITEM_UPDATE] < last_item_update:
                    update[LAST_ITEM_UPDATE] = last_item_update

        # Some Feeding Services update the collection and by this time the _etag might have been changed.
        # So it's necessary to fetch it once again. Otherwise, OriginalChangedError is raised.
        ingest_provider_service = superdesk.get_resource_service('ingest_providers')
        provider = ingest_provider_service.find_one(req=None, _id=provider[superdesk.config.ID_FIELD])
        ingest_provider_service.system_update(provider[superdesk.config.ID_FIELD], update, provider)

        if LAST_ITEM_UPDATE not in update and get_is_idle(provider):
            admins = superdesk.get_resource_service('users').get_users_by_user_type('administrator')
            notify_and_add_activity(
                ACTIVITY_EVENT,
                'Provider {{name}} has gone strangely quiet. Last activity was on {{last}}',
                resource='ingest_providers', user_list=admins, name=provider.get('name'),
                last=provider[LAST_ITEM_UPDATE].replace(tzinfo=timezone.utc).astimezone(tz=None).strftime("%c"))

        logger.info('Provider {0} updated'.format(provider[superdesk.config.ID_FIELD]))

        if LAST_ITEM_UPDATE in update:  # Only push a notification if there has been an update
            push_notification('ingest:update', provider_id=str(provider[superdesk.config.ID_FIELD]))
    except Exception as e:
        logger.error("Failed to ingest file: {error}".format(error=e))
        raise IngestFileError(3000, e, provider)
    finally:
        unlock(lock_name)
Exemplo n.º 14
0
    def create(self, docs, **kwargs):
        guid_of_item_to_be_moved = request.view_args['guid']
        guid_of_moved_items = []

        # set the lock_id it per item
        lock_id = "item_move {}".format(guid_of_item_to_be_moved)

        if not lock(lock_id, expire=5):
            raise SuperdeskApiError.forbiddenError(message="Item is locked for move by another user.")

        try:
            # doc represents the target desk and stage
            doc = docs[0]
            moved_item = self.move_content(guid_of_item_to_be_moved, doc)
            guid_of_moved_items.append(moved_item.get(config.ID_FIELD))

            if moved_item.get('type', None) == 'composite' and doc.get('allPackageItems', False):
                try:
                    item_lock_id = None
                    for item_id in (ref[RESIDREF] for group in moved_item.get(GROUPS, [])
                                    for ref in group.get(REFS, []) if RESIDREF in ref):
                        item_lock_id = "item_move {}".format(item_id)
                        if lock(item_lock_id, expire=5):
                            item = self.move_content(item_id, doc)
                            guid_of_moved_items.append(item.get(config.ID_FIELD))
                            unlock(item_lock_id, remove=True)
                            item_lock_id = None
                        else:
                            item_lock_id = None
                finally:
                    if item_lock_id:
                        unlock(item_lock_id, remove=True)

            return guid_of_moved_items
        finally:
            unlock(lock_id, remove=True)
Exemplo n.º 15
0
    def run(self):
        """Fetches items from publish queue as per the configuration, calls the transmit function.
        """
        lock_name = get_lock_id('publish', 'enqueue_published')
        if not lock(lock_name, expire=310):
            logger.info('Enqueue Task: {} is already running.'.format(lock_name))
            return

        try:
            items = get_published_items()

            if len(items) > 0:
                enqueue_items(items)
        finally:
            unlock(lock_name)
    def remove_expired(self, provider):
        lock_name = 'ingest:gc'

        if not lock(lock_name, expire=300):
            return

        try:

            remove_expired_data(provider)
            push_notification('ingest:cleaned')
        except Exception as err:
            logger.exception(err)
            raise ProviderError.expiredContentError(err, provider)
        finally:
            unlock(lock_name)
Exemplo n.º 17
0
    def run(self):
        now = utcnow()
        expiry_time_log_msg = 'Expiry Time: {}.'.format(now)
        logger.info('{} Starting to remove expired content at.'.format(expiry_time_log_msg))
        lock_name = get_lock_id('archive', 'remove_expired')
        if not lock(lock_name, '', expire=600):
            logger.info('{} Remove expired content task is already running.'.format(expiry_time_log_msg))
            return
        try:
            logger.info('{} Removing expired content for expiry.'.format(expiry_time_log_msg))
            self._remove_expired_items(now, expiry_time_log_msg)
        finally:
            unlock(lock_name, '')

        push_notification('content:expired')
        logger.info('{} Completed remove expired content.'.format(expiry_time_log_msg))
Exemplo n.º 18
0
    def lock(self, item_filter, user_id, session_id, action):
        item_model = get_model(ItemModel)
        item = item_model.find_one(item_filter)

        # set the lock_id it per item
        lock_id = "item_lock {}".format(item.get(config.ID_FIELD))

        if not item:
            raise SuperdeskApiError.notFoundError()

        # get the lock it not raise forbidden exception
        if not lock(lock_id, expire=5):
            raise SuperdeskApiError.forbiddenError(message="Item is locked by another user.")

        try:
            can_user_lock, error_message = self.can_lock(item, user_id, session_id)

            if can_user_lock:
                self.app.on_item_lock(item, user_id)
                updates = {LOCK_USER: user_id, LOCK_SESSION: session_id, 'lock_time': utcnow()}
                if action:
                    updates['lock_action'] = action

                item_model.update(item_filter, updates)

                if item.get(TASK):
                    item[TASK]['user'] = user_id
                else:
                    item[TASK] = {'user': user_id}

                superdesk.get_resource_service('tasks').assign_user(item[config.ID_FIELD], item[TASK])
                self.app.on_item_locked(item, user_id)
                item = item_model.find_one(item_filter)
                push_notification('item:lock',
                                  item=str(item.get(config.ID_FIELD)),
                                  item_version=str(item.get(config.VERSION)),
                                  user=str(user_id), lock_time=updates['lock_time'],
                                  lock_session=str(session_id),
                                  _etag=item.get(config.ETAG))
            else:
                raise SuperdeskApiError.forbiddenError(message=error_message)

            item = item_model.find_one(item_filter)
            return item
        finally:
            # unlock the lock :)
            unlock(lock_id, remove=True)
Exemplo n.º 19
0
def update_provider(self, provider, rule_set=None, routing_scheme=None):
    """
    Fetches items from ingest provider as per the configuration, ingests them into Superdesk and
    updates the provider.
    """
    if provider.get('type') == 'search':
        return

    if not is_updatable(provider):
        return

    lock_name = get_lock_id('ingest', provider['name'], provider[superdesk.config.ID_FIELD])
    host_name = get_host_id(self)

    if not lock(lock_name, host_name, expire=1800):
        return

    try:
        update = {
            LAST_UPDATED: utcnow()
        }

        for items in providers[provider.get('type')].update(provider):
            ingest_items(items, provider, rule_set, routing_scheme)
            stats.incr('ingest.ingested_items', len(items))
            if items:
                update[LAST_ITEM_UPDATE] = utcnow()
        ingest_service = superdesk.get_resource_service('ingest_providers')
        ingest_service.system_update(provider[superdesk.config.ID_FIELD], update, provider)

        if LAST_ITEM_UPDATE not in update and get_is_idle(provider):
            notify_and_add_activity(
                ACTIVITY_EVENT,
                'Provider {{name}} has gone strangely quiet. Last activity was on {{last}}',
                resource='ingest_providers',
                user_list=ingest_service._get_administrators(),
                name=provider.get('name'),
                last=provider[LAST_ITEM_UPDATE].replace(tzinfo=timezone.utc).astimezone(tz=None).strftime("%c"))

        logger.info('Provider {0} updated'.format(provider[superdesk.config.ID_FIELD]))
        # Only push a notification if there has been an update
        if LAST_ITEM_UPDATE in update:
            push_notification('ingest:update', provider_id=str(provider[superdesk.config.ID_FIELD]))
    finally:
        unlock(lock_name, host_name)
Exemplo n.º 20
0
    def create(self, docs, **kwargs):
        guid_of_item_to_be_moved = request.view_args['guid']
        guid_of_moved_items = []

        # set the lock_id it per item
        lock_id = "item_move {}".format(guid_of_item_to_be_moved)

        if not lock(lock_id, expire=5):
            raise SuperdeskApiError.forbiddenError(message="Item is locked for move by another user.")

        try:
            # doc represents the target desk and stage
            doc = docs[0]
            moved_item = self.move_content(guid_of_item_to_be_moved, doc)
            guid_of_moved_items.append(moved_item.get(config.ID_FIELD))
            return guid_of_moved_items
        finally:
            unlock(lock_id, remove=True)
Exemplo n.º 21
0
    def run(self, page_size=None):
        logger.info('Starting to fix expired content.')

        if app.settings.get('PUBLISHED_CONTENT_EXPIRY_MINUTES'):
            self.expiry_minutes = app.settings['PUBLISHED_CONTENT_EXPIRY_MINUTES']

        if page_size:
            self.default_page_size = int(page_size)

        lock_name = get_lock_id('archive', 'fix_expired_content')
        if not lock(lock_name, expire=610):
            logger.info('Fix expired content task is already running.')
            return
        try:
            self.fix_items_expiry()
        finally:
            unlock(lock_name)

        logger.info('Completed fixing expired content.')
Exemplo n.º 22
0
    def run(self, page_size=None):
        if not is_legal_archive_enabled():
            return
        logger.info('Import to Legal Archive')
        lock_name = get_lock_id('legal_archive', 'import_to_legal_archive')
        page_size = int(page_size) if page_size else self.default_page_size
        if not lock(lock_name, expire=1810):
            return
        try:
            legal_archive_import = LegalArchiveImport()
            # publish_queue = get_resource_service('publish_queue')
            # move the publish item to legal archive.
            expired_items = set()
            for items in self.get_expired_items(page_size):
                for item in items:
                    self._move_to_legal(item.get('item_id'), item.get(config.VERSION), expired_items)

            # get the invalid items from archive.
            for items in get_resource_service(ARCHIVE).get_expired_items(utcnow(), invalid_only=True):
                for item in items:
                    self._move_to_legal(item.get(config.ID_FIELD), item.get(config.VERSION), expired_items)

            # if publish item is moved but publish_queue item is not.
            if len(expired_items):
                try:
                    for items in legal_archive_import.get_publish_queue_items(page_size, list(expired_items)):
                        legal_archive_import.process_queue_items(items, True)
                except:
                    logger.exception('Failed to import into legal publish queue via command')

            # reset the expiry status
            archive_service = get_resource_service(ARCHIVE)
            for item_id in expired_items:
                try:
                    item = archive_service.find_one(req=None, _id=item_id)
                    if item:
                        archive_service.system_update(item_id, {'expiry_status': ''}, item)
                except:
                    logger.exception('Failed to reset expiry status for item id: {}.'.format(item_id))
        except:
            logger.exception('Failed to import into legal archive.')
        finally:
            unlock(lock_name)
    def run(self, input_date, days_to_process, page_size):

        lock_name = 'legal_archive:consistency'
        self.default_page_size = int(page_size)
        days_to_process = int(days_to_process)
        if not lock(lock_name, expire=610):
            logger.warn("Task: {} is already running.".format(lock_name))
            return

        try:
            logger.info('Input Date: {}  ---- Days to Process: {}'.format(input_date, days_to_process))
            self.check_legal_archive_consistency(input_date, days_to_process)
            self.check_legal_archive_version_consistency()
            self.check_legal_archive_queue_consistency()
            logger.info('Completed the legal archive consistency check.')
        except:
            logger.exception("Failed to execute LegalArchiveConsistencyCheckCommand")
        finally:
            unlock(lock_name)
Exemplo n.º 24
0
    def run(self):
        lock_name = 'schema:migrate'

        if not lock(lock_name, expire=1800):
            return

        try:
            app_schema_version = get_schema_version()
            superdesk_schema_version = app.config.get('SCHEMA_VERSION', superdesk.SCHEMA_VERSION)
            if app_schema_version < superdesk.SCHEMA_VERSION:
                print('Updating schema from version {} to {}.'.format(
                    app_schema_version, superdesk_schema_version
                ))
                update_schema()
                set_schema_version(superdesk_schema_version)
            else:
                print('App already at version ({}).'.format(app_schema_version))
        finally:
            unlock(lock_name)
Exemplo n.º 25
0
def publish():
    """
    Fetches items from publish queue as per the configuration,
    calls the transmit function.
    """
    lock_name = get_lock_id("Transmit", "Articles")
    if not lock(lock_name, "", expire=1800):
        logger.info("Task: {} is already running.".format(lock_name))
        return

    try:
        items = list(get_queue_items())

        if len(items) > 0:
            transmit_items(items)

    except:
        logger.exception("Task: {} failed.".format(lock_name))
    finally:
        unlock(lock_name, "")
Exemplo n.º 26
0
def send_email(self, subject, sender, recipients, text_body, html_body, cc=None, bcc=None):
    _id = get_activity_digest({
        'subject': subject,
        'recipients': recipients,
        'text_body': text_body,
        'html_body': html_body,
        'cc': cc,
        'bcc': bcc,
    })

    lock_id = 'email:%s' % _id
    if not lock(lock_id, expire=120):
        return

    try:
        msg = SuperdeskMessage(subject, sender=sender, recipients=recipients, cc=cc, bcc=bcc,
                               body=text_body, html=html_body)
        return app.mail.send(msg)
    finally:
        unlock(lock_id, remove=True)
Exemplo n.º 27
0
def transmit_subscriber_items(self, queue_items, subscriber):
    # Attempt to obtain a lock for transmissions to the subscriber
    lock_name = get_lock_id("Subscriber", "Transmit", subscriber)
    if not lock(lock_name, expire=300):
        return

    for queue_item in queue_items:
        publish_queue_service = get_resource_service(PUBLISH_QUEUE)
        log_msg = '_id: {_id}  item_id: {item_id}  state: {state} ' \
                  'item_version: {item_version} headline: {headline}'.format(**queue_item)
        try:
            # update the status of the item to in-progress
            logger.info('Transmitting queue item {}'.format(log_msg))
            queue_update = {'state': 'in-progress', 'transmit_started_at': utcnow()}
            publish_queue_service.patch(queue_item.get(config.ID_FIELD), queue_update)
            destination = queue_item['destination']
            transmitter = superdesk.publish.registered_transmitters[destination.get('delivery_type')]
            transmitter.transmit(queue_item)
            logger.info('Transmitted queue item {}'.format(log_msg))
        except:
            logger.exception('Failed to transmit queue item {}'.format(log_msg))
            max_retry_attempt = app.config.get('MAX_TRANSMIT_RETRY_ATTEMPT')
            retry_attempt_delay = app.config.get('TRANSMIT_RETRY_ATTEMPT_DELAY_MINUTES')
            try:
                orig_item = publish_queue_service.find_one(req=None, _id=queue_item['_id'])
                updates = {config.LAST_UPDATED: utcnow()}
                if orig_item.get('retry_attempt', 0) < max_retry_attempt:
                    updates['retry_attempt'] = orig_item.get('retry_attempt', 0) + 1
                    updates['state'] = QueueState.RETRYING.value
                    updates['next_retry_attempt_at'] = utcnow() + timedelta(minutes=retry_attempt_delay)
                else:
                    # all retry attempts exhausted marking the item as failed.
                    updates['state'] = QueueState.FAILED.value

                publish_queue_service.system_update(orig_item.get(config.ID_FIELD), updates, orig_item)
            except:
                logger.error('Failed to set the state for failed publish queue item {}.'.format(queue_item['_id']))

    # Release the lock for the subscriber
    lock_name = get_lock_id("Subscriber", "Transmit", subscriber)
    unlock(lock_name)
Exemplo n.º 28
0
    def run(self):
        now = utcnow()
        self.log_msg = 'Expiry Time: {}.'.format(now)
        logger.info('{} Starting to remove expired content at.'.format(self.log_msg))
        lock_name = get_lock_id('archive', 'remove_expired')

        if not lock(lock_name, expire=610):
            logger.info('{} Remove expired content task is already running.'.format(self.log_msg))
            return

        logger.info('{} Removing expired content for expiry.'.format(self.log_msg))
        # both functions should be called, even the first one throw exception,
        # so they are wrapped with log_exeption
        self._remove_expired_publish_queue_items()
        self._remove_expired_items(now)
        unlock(lock_name)

        push_notification('content:expired')
        logger.info('{} Completed remove expired content.'.format(self.log_msg))

        remove_locks()
    def run(self, desk=None):
        if desk:
            self.default_desk = desk

        logger.info('Starting to export {} desk legal archive content to archived'.format(self.default_desk))

        lock_name = get_lock_id('legal_archive', 'export_to_archived')
        if not lock(lock_name, expire=610):
            logger.info('Export legal archive to archived task is already running.')
            return

        try:
            list_ids = self._export_to_archived()
        finally:
            unlock(lock_name)

        if list_ids:
            logger.info('Completed exporting {} {} desk documents from legal archive to text archived'.format(
                len(list_ids),
                self.default_desk)
            )
        else:
            logger.info('Completed but nothing was exported...')