示例#1
0
    def test_no_job_on_revise(self):
        page = create(Builder('page').in_state(EXAMPLE_WF_PUBLISHED))
        Plone().login().visit(page)
        Workflow().do_transition('revise')

        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())
 def test_no_job_when_object_has_no_publisher_workflow(self, browser):
     queue = IQueue(self.portal)
     page = create(Builder('page'))
     browser.login().visit(page, view='delete_confirmation')
     self.assertEquals(0, queue.countJobs())
     browser.click_on('Delete')
     self.assertEquals(0, queue.countJobs())
    def test_delete(self):
        page = create(Builder('page').providing(IPreventPublishing))
        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())

        page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(0, queue.countJobs())
示例#4
0
    def test_no_job_on_submit(self):
        page = create(Builder('page'))
        Plone().login().visit(page)
        Workflow().do_transition('submit')

        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())
示例#5
0
 def test_no_job_when_object_has_no_publisher_workflow(self):
     queue = IQueue(self.portal)
     page = create(Builder('page'))
     Plone().login().visit(page, 'delete_confirmation')
     self.assertEquals(0, queue.countJobs())
     self.click_delete()
     self.assertEquals(0, queue.countJobs())
示例#6
0
def upgrade_executed_jobs_storage(portal_setup):
    """The executed jobs storage has changed from PersistentList
    to IOBTree storage, so we need to migrate the storage.

    """
    portal = portal_setup.portal_url.getPortalObject()
    queue = IQueue(portal)
    annotations = IAnnotations(portal)

    if 'publisher-executed' not in annotations:
        # No data to migrate.
        return

    # get jobs directly from the annotations - accessing with
    # queue methods is not possible yet
    jobs = list(annotations.get('publisher-executed', []))

    # drop the current list
    del annotations['publisher-executed']

    # add every single job with the new methods
    for job in jobs:
        queue.append_executed_job(job)

    # check if it worked
    assert len(jobs) == queue.get_executed_jobs_length()
    def test_delete(self):
        page = create(Builder('page').providing(IPreventPublishing))
        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())

        page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(0, queue.countJobs())
    def test_blocks_are_published_with_contentpage(self):
        page = create(Builder(self.page_builder))
        create(Builder(self.textblock_builder).within(page))

        Plone().login().visit(page)
        Workflow().do_transition("publish")

        queue = IQueue(self.portal)
        self.assertEquals(2, queue.countJobs(), "Expected the page and the text block to be in the queue.")
示例#9
0
    def __call__(self, event, no_response=False, msg=None):
        if IPreventPublishing.providedBy(self.context):
            return 'prevented'

        if publisher_jobs_are_disabled():
            return 'disabled'

        self.logger = getLogger()
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted():
            self.logger.warning(
                'Could not create move job for blacklisted object (%s at %s)' %
                (self.context.Title(), '/'.join(
                    self.context.getPhysicalPath())))
            if not no_response:
                return self.request.RESPONSE.redirect('./view')
            return False

        # This View should not be executed at the PloneSiteRoot
        if IPloneSiteRoot.providedBy(self.context):
            raise Exception('Not allowed on PloneSiteRoot')

        # get username
        user = self.context.portal_membership.getAuthenticatedMember()
        username = user.getUserName()

        # create Job
        portal = self.context.portal_url.getPortalObject()
        queue = IQueue(portal)

        additional_data = {
            'move_data': {
                'newName': event.newName,
                'newParent': get_site_relative_path(event.newParent),
                'newTitle': event.object.Title().decode('utf-8'),
                'oldName': event.oldName,
                'oldParent': get_site_relative_path(event.oldParent),
            }
        }

        queue.createJob('move',
                        self.context,
                        username,
                        additional_data=additional_data)
        self.logger.debug('Created "%s" Job for "%s" at %s' % (
            'move',
            self.context.Title(),
            '/'.join(self.context.getPhysicalPath()),
        ))

        # status message
        if msg is None:
            msg = _(u'Object move/rename action has been added to the queue.')

        IStatusMessage(self.request).addStatusMessage(msg, type='info')
        if not no_response:
            return self.request.RESPONSE.redirect('./view')
示例#10
0
    def __call__(self):
        """
        Handles logging purposes and calls execute() method.
        """

        # get config and queue
        self.config = IConfig(self.context)
        portal = self.context.portal_url.getPortalObject()
        self.queue = IQueue(portal)
        event.notify(BeforeQueueExecutionEvent(portal, self.queue))
        # prepare logger
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        # is it allowed to publish?
        if not self.config.publishing_enabled():
            self.logger.warning('PUBLISHING IS DISABLED')
            return 'PUBLISHING IS DISABLED'

        if self.config.locking_enabled():
            self.logger.info('LOCKING IS ENABLED')
        else:
            self.logger.info('LOCKING IS DISABLED')

        # lock - check for locking flag
        if self.config.locking_enabled(
        ) and not self.get_lock_object().acquire(0):
            self.logger.warning('Already publishing')
            return 'Already publishing'

        # register our own logging handler for returning logs afterwards
        logStream = StringIO()
        logHandler = logging.StreamHandler(logStream)
        self.logger.addHandler(logHandler)
        # be sure to remove the handler!
        try:
            # execute queue
            self.execute()
        except Exception:
            self.logger.removeHandler(logHandler)
            if self.config.locking_enabled():
                self.get_lock_object().release()
            # re-raise exception
            raise
        # get logs
        self.logger.removeHandler(logHandler)
        logStream.seek(0)
        log = logStream.read()
        del logStream
        del logHandler

        # unlock
        if self.config.locking_enabled():
            self.get_lock_object().release()

        event.notify(QueueExecutedEvent(portal, log))
        return log
示例#11
0
    def test_delete_job_create_when_we_have_a_publisher_workflow(self):
        queue = IQueue(self.portal)
        folder = create(Builder('folder'))
        Plone().login().visit(folder, 'delete_confirmation')
        self.assertEquals(0, queue.countJobs())
        self.click_delete()
        self.assertEquals(1, queue.countJobs())

        job = queue.getJobs()[0]
        self.assertEquals('delete', job.action)
        self.assertEquals('/plone/folder', job.objectPath)
示例#12
0
    def test_blocks_are_published_with_contentpage(self, browser):
        page = create(Builder(self.page_builder))
        create(Builder(self.textblock_builder).within(page))

        browser.login().visit(page)
        Workflow().do_transition('publish')

        queue = IQueue(self.portal)
        self.assertEquals(
            2, queue.countJobs(),
            'Expected the page and the text block to be in the queue.')
    def test_publish_plonesite_enqueues_job(self, browser):
        self.grant('Manager')
        self.assertEquals(0, IQueue(self.portal).countJobs())
        browser.login().open(self.portal, view='@@publisher.publish')
        statusmessages.assert_message('This object has been added to the queue.')
        transaction.begin()

        self.assertEquals(1, IQueue(self.portal).countJobs())
        job = IQueue(self.portal).getJobs()[0]
        self.assertEquals('push', job.action)
        self.assertEquals(self.portal, job.getObject(self.portal))
示例#14
0
    def __call__(self, no_response=False, msg=None, *args, **kwargs):
        """
        The __call__ method is used to execute the BrowserView. It creates and
        adds a "PUSH"-Job on the current context to the queue.
        @param args:    list of unnamed arguments
        @type args:     list
        @param kwargs:  dict of named keyword-arguments
        @type kwargs:   dict
        @return:        Redirect to object`s default view
        """

        if IPreventPublishing.providedBy(self.context):
            return 'prevented'

        if publisher_jobs_are_disabled():
            return 'disabled'

        self.logger = getLogger()
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted():
            self.logger.warning('Could not create push job for blacklisted '+\
                                    'object (%s at %s)' % (
                    self.context.Title(),
                    '/'.join(self.context.getPhysicalPath())))
            if not no_response:
                return self.request.RESPONSE.redirect('./view')
            return False

        # mle: now its possible to execite this view on plonesiteroot
        # This View should not be executed at the PloneSiteRoot
        #if IPloneSiteRoot.providedBy(self.context):
        #    raise Exception('Not allowed on PloneSiteRoot')
        # get username
        user = self.context.portal_membership.getAuthenticatedMember()
        username = user.getUserName()
        # create Job
        portal = self.context.portal_url.getPortalObject()
        queue = IQueue(portal)
        queue.createJob('push', self.context, username)
        self.logger.debug('Created "%s" Job for "%s" at %s' % (
                'push',
                self.context.Title(),
                '/'.join(self.context.getPhysicalPath()),
                ))

        # status message
        if msg is None:
            msg = _(u'This object has been added to the queue.')
        IStatusMessage(self.request).addStatusMessage(
            msg,
            type='info'
            )
        if not no_response:
            return self.request.RESPONSE.redirect('./view')
    def test_delete(self):
        page = create(Builder('page'))
        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())

        with publisher_jobs_disabled():
            page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(0, queue.countJobs())

        page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(1, queue.countJobs())
    def test_delete(self):
        page = create(Builder('page'))
        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())

        with publisher_jobs_disabled():
            page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(0, queue.countJobs())

        page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(1, queue.countJobs())
示例#17
0
    def test_push_job_in_publisher_queue_after_publishing(self):
        page = create(Builder('page'))
        Plone().login().visit(page)
        Workflow().do_transition('publish')

        queue = IQueue(self.portal)
        self.assertEquals(1, queue.countJobs())

        job = queue.getJobs()[0]
        self.assertEquals('push', job.action)
        self.assertEquals(page, job.getObject(self.portal))
    def test_sl_listing_block_publishes_its_children(self):
        page = create(Builder(self.page_builder))
        listing_block = create(Builder(self.listingblock_builder).within(page))
        create(Builder("file").within(listing_block))

        Plone().login().visit(page)
        Workflow().do_transition("publish")

        queue = IQueue(self.portal)
        self.assertEquals(
            3, queue.countJobs(), "Expected the page, the listing block and the file to be" " in the queue."
        )
示例#19
0
    def test_renaming_object_via_folder_contents_rename_action(self, browser):

        self.grant('Manager')
        folder = create(Builder('folder').titled(u'Folder'))
        with freeze(datetime(2018, 1, 2, 3, 4, 5)):
            page = create(Builder('page').titled(u'The Page').within(folder))

            self.assertEquals(0, IQueue(self.portal).countJobs())

            form_data = {}
            form_data['_authenticator'] = createToken()
            form_data['UID_1'] = IUUID(page)
            form_data['newid_1'] = 'new_id'
            form_data['newtitle_1'] = u'Ch\xe4nged title'

            browser.login().visit(folder, view='@@fc-rename', data=form_data)

        self.assertEquals(1, IQueue(self.portal).countJobs())

        job, = IQueue(self.portal).getJobs()
        self.assertEquals('move', job.action)
        data = job.getData()
        self.assertTrue(data)

        self.maxDiff = None

        expected = {
            u'utf8:metadata': {
                u'utf8:UID': u'utf8:testrenamingobjectviafolde000002',
                u'utf8:action': u'utf8:move',
                u'utf8:id': u'utf8:new_id',
                u'utf8:modified': u'utf8:2018/01/02 03:04:05 GMT+1',
                u'utf8:physicalPath': u'utf8:/folder/new_id',
                u'utf8:portal_type': u'utf8:Document',
                u'utf8:review_state': u'utf8:',
                u'utf8:sibling_positions': {
                    u'utf8:new_id': 0
                }
            },
            u'utf8:move': {
                u'utf8:newName': u'utf8:new_id',
                u'utf8:newParent': u'utf8:/folder',
                u'utf8:newTitle': u'unicode:Ch\xe4nged title',
                u'utf8:oldName': u'utf8:the-page',
                u'utf8:oldParent': u'utf8:/folder'
            }
        }

        if IS_AT_LEAST_PLONE_5_1:
            expected[u'utf8:metadata'][u'utf8:modified'] = u'utf8:{}'.format(
                str(page.modified()).decode('utf-8'))

        self.assertEquals(expected, json.loads(data))
示例#20
0
    def test_publish_plonesite_enqueues_job(self, browser):
        self.grant('Manager')
        self.assertEquals(0, IQueue(self.portal).countJobs())
        browser.login().open(self.portal, view='@@publisher.publish')
        statusmessages.assert_message(
            'This object has been added to the queue.')
        transaction.begin()

        self.assertEquals(1, IQueue(self.portal).countJobs())
        job = IQueue(self.portal).getJobs()[0]
        self.assertEquals('push', job.action)
        self.assertEquals(self.portal, job.getObject(self.portal))
    def test_sl_listing_block_publishes_its_children(self, browser):
        page = create(Builder(self.page_builder))
        listing_block = create(Builder(self.listingblock_builder).within(page))
        create(Builder('file').within(listing_block))

        browser.login().visit(page)
        Workflow().do_transition('publish')

        queue = IQueue(self.portal)
        self.assertEquals(
            3, queue.countJobs(),
            'Expected the page, the listing block and the file to be'
            ' in the queue.')
示例#22
0
    def test_sl_listing_block_publishes_its_children(self, browser):
        page = create(Builder(self.page_builder))
        listing_block = create(Builder(self.listingblock_builder).within(page))
        create(Builder('file').within(listing_block))

        browser.login().visit(page)
        Workflow().do_transition('publish')

        queue = IQueue(self.portal)
        self.assertEquals(
            3, queue.countJobs(),
            'Expected the page, the listing block and the file to be'
            ' in the queue.')
示例#23
0
    def test_delete_job_create_when_parent_has_publisher_workflow(self):
        queue = IQueue(self.portal)
        folder = create(Builder('folder'))
        page = create(Builder('page').within(folder))

        Plone().login().visit(page, 'delete_confirmation')
        self.assertEquals(0, queue.countJobs())
        self.click_delete()
        self.assertEquals(1, queue.countJobs())

        job = queue.getJobs()[0]
        self.assertEquals('delete', job.action)
        self.assertEquals('/plone/folder/document', job.objectPath)
示例#24
0
    def __call__(self, no_response=False, msg=None, *args, **kwargs):
        """
        Add the current context as delete-job to the queue, creates a status
        message to inform the user and returns to the default view.
        @param args:    list of unnamed arguments
        @type args:     list
        @param kwargs:  dict of named keyword-arguments
        @type kwargs:   dict
        @return:        Redirect to object`s default view
        """

        if IPreventPublishing.providedBy(self.context):
            return 'prevented'

        if publisher_jobs_are_disabled():
            return 'disabled'

        self.logger = getLogger()
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted():
            self.logger.warning('Could not create delete job for blacklisted '
                                'object (%s at %s)' % (
                    self.context.Title(),
                    '/'.join(self.context.getPhysicalPath())))
            if not no_response:
                return self.request.RESPONSE.redirect('./view')
            return False

        # This view should not be executed at the PloneSiteRoot
        if IPloneSiteRoot.providedBy(self.context):
            raise Exception('Not allowed on PloneSiteRoot')
        # get username
        user = self.context.portal_membership.getAuthenticatedMember()
        username = user.getUserName()
        # create Job
        portal = self.context.portal_url.getPortalObject()
        queue = IQueue(portal)
        queue.createJob('delete', self.context, username)
        self.logger.debug('Created "%s" Job for "%s" at %s' % (
                'delete',
                self.context.Title(),
                '/'.join(self.context.getPhysicalPath()),
                ))

        # status message
        if msg is None:
            msg = _(u'This object will be deleted at the remote sites.')
        add_transaction_aware_status_message(self.request, msg, type='info')

        if not no_response:
            return self.request.RESPONSE.redirect('./view')
示例#25
0
    def __call__(self, no_response=False, msg=None, recursive=True):
        if IPreventPublishing.providedBy(self.context):
            return 'prevented'

        if publisher_jobs_are_disabled():
            return 'disabled'

        self.logger = getLogger()
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted():
            self.logger.warning(
                'Could not create push job for blacklisted object (%s at %s)' %
                (self.context.Title(), '/'.join(
                    self.context.getPhysicalPath())))
            if not no_response:
                return self.request.RESPONSE.redirect('./view')
            return False

        event.notify(BeforePublishEvent(self.context))

        # mle: now its possible to execite this view on plonesiteroot
        # This View should not be executed at the PloneSiteRoot
        # if IPloneSiteRoot.providedBy(self.context):
        #    raise Exception('Not allowed on PloneSiteRoot')
        # get username
        user = self.context.portal_membership.getAuthenticatedMember()
        username = user.getUserName()

        # create Job
        portal = self.context.portal_url.getPortalObject()
        queue = IQueue(portal)
        queue.createJob('push', self.context, username)
        self.logger.debug('Created "%s" Job for "%s" at %s' % (
            'push',
            self.context.Title(),
            '/'.join(self.context.getPhysicalPath()),
        ))

        if recursive and base_hasattr(self.context, 'contentValues'):
            # Use contentValues for implicit ftw.trash compatibility.
            for obj in filter(belongs_to_parent, self.context.contentValues()):
                obj.restrictedTraverse('@@publisher.publish')(no_response=True,
                                                              msg=msg)

        # status message
        if msg is None:
            msg = _(u'This object has been added to the queue.')
        IStatusMessage(self.request).addStatusMessage(msg, type='info')
        if not no_response:
            return self.request.RESPONSE.redirect('./view')
    def test_delete_folder_enqueues_job(self, browser):
        self.grant('Manager')
        folder = create(Builder('folder'))
        transaction.commit()

        self.assertEquals(0, IQueue(self.portal).countJobs())
        browser.login().open(folder, view='@@publisher.delete')
        statusmessages.assert_message('This object will be deleted at the remote sites.')
        transaction.begin()

        self.assertEquals(1, IQueue(self.portal).countJobs())
        job = IQueue(self.portal).getJobs()[0]
        self.assertEquals('delete', job.action)
        self.assertEquals(folder, job.getObject(self.portal))
示例#27
0
    def test_renaming_object_via_object_rename_action(self, browser):

        self.grant('Manager')
        folder = create(Builder('folder').titled(u'Folder'))
        with freeze(datetime(2018, 1, 2, 3, 4, 5)):
            page = create(Builder('page').titled(u'The Page').within(folder))

            self.assertEquals(0, IQueue(self.portal).countJobs())
            browser.login().open(page).click_on('Rename')
            browser.fill({'form.widgets.new_id': 'new_id'}).submit()
            statusmessages.assert_message(
                'Object move/rename action has been added to the queue.')

        self.assertEquals(1, IQueue(self.portal).countJobs())

        job, = IQueue(self.portal).getJobs()
        self.assertEquals('move', job.action)
        data = job.getData()
        self.assertTrue(data)

        self.maxDiff = None

        expected = {
            u'utf8:metadata': {
                u'utf8:UID': u'utf8:testrenamingobjectviaobjec000002',
                u'utf8:action': u'utf8:move',
                u'utf8:id': u'utf8:new_id',
                u'utf8:modified': u'utf8:2018/01/02 03:04:05 GMT+1',
                u'utf8:physicalPath': u'utf8:/folder/new_id',
                u'utf8:portal_type': u'utf8:Document',
                u'utf8:review_state': u'utf8:',
                u'utf8:sibling_positions': {
                    u'utf8:new_id': 0
                }
            },
            u'utf8:move': {
                u'utf8:newName': u'utf8:new_id',
                u'utf8:newParent': u'utf8:/folder',
                u'utf8:newTitle': u'unicode:The Page',
                u'utf8:oldName': u'utf8:the-page',
                u'utf8:oldParent': u'utf8:/folder'
            }
        }

        if IS_AT_LEAST_PLONE_5_1:
            expected[u'utf8:metadata'][u'utf8:modified'] = u'utf8:{}'.format(
                str(page.modified()).decode('utf-8'))

        self.assertEquals(expected, json.loads(data))
示例#28
0
    def test_delete_folder_enqueues_job(self, browser):
        self.grant('Manager')
        folder = create(Builder('folder'))
        transaction.commit()

        self.assertEquals(0, IQueue(self.portal).countJobs())
        browser.login().open(folder, view='@@publisher.delete')
        statusmessages.assert_message(
            'This object will be deleted at the remote sites.')
        transaction.begin()

        self.assertEquals(1, IQueue(self.portal).countJobs())
        job = IQueue(self.portal).getJobs()[0]
        self.assertEquals('delete', job.action)
        self.assertEquals(folder, job.getObject(self.portal))
示例#29
0
    def setUp(self):
        super(TestEventhandler, self).setUp()

        self.portal = self.layer['portal']
        setRoles(self.portal, TEST_USER_ID, ['Manager'])
        login(self.portal, TEST_USER_NAME)

        self.folder = self.portal.get(self.portal.invokeFactory(
                'Folder', 'mailing-test', title='Mailing Test Folder'))
        self.queue = IQueue(self.portal)

        mtool = getToolByName(self.portal, 'portal_membership')
        self.user = mtool.getMemberById(TEST_USER_ID)

        self.config = IMonitorConfigurationSchema(self.portal)
        self.config.set_enabled(True)
        self.config.set_receivers_plain('*****@*****.**')

        self.notifier_class = self.stub_interface(IMonitorNotifier)
        self.notifier = self.mock_interface(IMonitorNotifier)
        self.expect(self.notifier_class(self.portal)).result(
            self.notifier)

        provideAdapter(factory=self.notifier_class,
                       provides=IMonitorNotifier,
                       adapts=(IPloneSiteRoot,))
示例#30
0
    def assert_jobs(self, *expected):
        """Example:
        self.assert_jobs(('push', 'page'), ('push', 'textblock'))
        """
        got = []
        for job in IQueue(self.portal).getJobs():
            got.append((job.action, job.getObject(self.portal).getId()))

        self.assertEqual(list(expected), list(got))
示例#31
0
    def __call__(self, no_response=False, msg=None):
        if IPreventPublishing.providedBy(self.context):
            return 'prevented'

        if publisher_jobs_are_disabled():
            return 'disabled'

        self.logger = getLogger()
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted():
            self.logger.warning('Could not create delete job for blacklisted '
                                'object (%s at %s)' %
                                (self.context.Title(), '/'.join(
                                    self.context.getPhysicalPath())))
            if not no_response:
                return self.request.RESPONSE.redirect('./view')
            return False

        # This view should not be executed at the PloneSiteRoot
        if IPloneSiteRoot.providedBy(self.context):
            raise Exception('Not allowed on PloneSiteRoot')

        # get username
        user = self.context.portal_membership.getAuthenticatedMember()
        username = user.getUserName()

        # create Job
        portal = self.context.portal_url.getPortalObject()
        queue = IQueue(portal)
        queue.createJob('delete', self.context, username)
        self.logger.debug('Created "%s" Job for "%s" at %s' % (
            'delete',
            self.context.Title(),
            '/'.join(self.context.getPhysicalPath()),
        ))

        # status message
        if msg is None:
            msg = _(u'This object will be deleted at the remote sites.')
        add_transaction_aware_status_message(self.request, msg, type='info')

        if not no_response:
            return self.request.RESPONSE.redirect('./view')
示例#32
0
 def execute_single_job(self, job):
     """ Executes a single job without calling the view
     """
     self.logger = getLogger()
     self.error_logger = getErrorLogger()
     portal = self.context.portal_url.getPortalObject()
     self.config = IConfig(portal)
     self.queue = IQueue(portal)
     # remove job from queue
     if job in self.queue.getJobs():
         self.queue.removeJob(job)
     elif job in self.queue.get_executed_jobs():
         self.queue.remove_executed_job(job)
     # execute it
     self.executeJob(job)
     # move json file
     job.move_jsonfile_to(self.config.get_executed_folder())
     # add to executed list
     return self.queue.append_executed_job(job)
    def get_options(self):
        """Returns a `dict` of data needed for rendering the mail template.
        """
        config = self.get_configuration()
        last_date = get_last_notification_date()
        queue = IQueue(self.context)
        data = {'success': 0,
                'warning': 0,
                'error': 0,
                'total': 0,
                'jobs_in_queue': 0,
                'erroneous_jobs': [],
                'show_details': config.detailed_report,
                'subject': self.get_subject(),
                'portal': self.context}

        # count the jobs by group and total
        for _key, job in queue.get_executed_jobs():
            # get the runs
            runs = getattr(job, 'executed_list', None)
            if not runs or len(runs) == 0:
                continue

            # was it published since last notification?
            if last_date and runs[-1]['date'] < last_date:
                continue

            # count it
            state = job.get_latest_executed_entry()
            if isinstance(state, states.ErrorState):
                data['error'] += 1
                data['erroneous_jobs'].append(job)
            elif isinstance(state, states.WarningState):
                data['warning'] += 1
            if isinstance(state, states.SuccessState):
                data['success'] += 1
            data['total'] += 1

        # get the amount of jobs in the queue
        data['jobs_in_queue'] = queue.countJobs()

        return data
示例#34
0
    def __call__(self):
        """
        Handles logging purposes and calls execute() method.
        """

        # get config and queue
        self.config = IConfig(self.context)
        portal = self.context.portal_url.getPortalObject()
        self.queue = IQueue(portal)
        event.notify(BeforeQueueExecutionEvent(portal, self.queue))
        # prepare logger
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        # is it allowed to publish?
        if not self.config.publishing_enabled():
            self.logger.warning('PUBLISHING IS DISABLED')
            return 'PUBLISHING IS DISABLED'

        if self.config.locking_enabled():
            self.logger.info('LOCKING IS ENABLED')
        else:
            self.logger.info('LOCKING IS DISABLED')

        # lock - check for locking flag
        if self.config.locking_enabled() and not self.get_lock_object().acquire(0):
            self.logger.warning('Already publishing')
            return 'Already publishing'

        # register our own logging handler for returning logs afterwards
        logStream = StringIO()
        logHandler = logging.StreamHandler(logStream)
        self.logger.addHandler(logHandler)
        # be sure to remove the handler!
        try:
            # execute queue
            self.execute()
        except:
            self.logger.removeHandler(logHandler)
            if self.config.locking_enabled(): self.get_lock_object().release()
            # re-raise exception
            raise
        # get logs
        self.logger.removeHandler(logHandler)
        logStream.seek(0)
        log = logStream.read()
        del logStream
        del logHandler

        # unlock
        if self.config.locking_enabled(): self.get_lock_object().release()

        event.notify(QueueExecutedEvent(portal, log))
        return log
示例#35
0
    def test_moving_object_job_data(self, browser):
        self.grant('Manager')
        source = create(Builder('folder').titled(u'Source'))
        target = create(Builder('folder').titled(u'Target'))
        with freeze(datetime(2018, 1, 2, 3, 4, 5)):
            page = create(Builder('page').titled(u'The Page').within(source))

        self.assertEquals(0, IQueue(self.portal).countJobs())
        browser.login().open(page).click_on('Cut').open(target).click_on('Paste')
        statusmessages.assert_message(
            'Object move/rename action has been added to the queue.')

        self.assertEquals(1, IQueue(self.portal).countJobs())

        job, = IQueue(self.portal).getJobs()
        self.assertEquals('move', job.action)
        data = job.getData()
        self.assertTrue(data)

        self.maxDiff = None

        expected = {u'utf8:metadata': {
            u'utf8:UID': u'utf8:testmovingobjectjobdata000000003',
            u'utf8:action': u'utf8:move',
            u'utf8:id': u'utf8:the-page',
            u'utf8:modified': u'utf8:2018/01/02 03:04:05 GMT+1',
            u'utf8:physicalPath': u'utf8:/target/the-page',
            u'utf8:portal_type': u'utf8:Document',
            u'utf8:review_state': u'utf8:',
            u'utf8:sibling_positions': {u'utf8:the-page': 0}},
         u'utf8:move': {
             u'utf8:newName': u'utf8:the-page',
             u'utf8:newParent': u'utf8:/target',
             u'utf8:newTitle': u'unicode:The Page',
             u'utf8:oldName': u'utf8:the-page',
             u'utf8:oldParent': u'utf8:/source'}}

        self.assertEquals(
            expected,
            json.loads(data))
示例#36
0
    def test_no_delete_jobs_for_blocks(self):
        """When a block is deleted, we do not want to instantly delete
        the block on the receiver side.
        Instead the block is deleted automatically when the page is published
        because it is no longer listed in the page state.
        """

        page = create(Builder(self.page_builder))
        textblock = create(Builder(self.textblock_builder).within(page))

        self.assertEquals(0, IQueue(self.portal).countJobs())
        page.manage_delObjects([textblock.getId()])
        self.assertEquals(
            0,
            IQueue(self.portal).countJobs(),
            'Deleting an ftw.simplelayout block'
            ' should not publish a delete job.')

        self.portal.manage_delObjects([page.getId()])
        self.assertEquals(
            1,
            IQueue(self.portal).countJobs(),
            'Deleteing an ftw.simplelayout page'
            ' should still add a delete job.')
    def setUp(self):
        super(TestEmailNotification, self).setUp()

        self.portal = self.layer['portal']
        setRoles(self.portal, TEST_USER_ID, ['Manager'])
        login(self.portal, TEST_USER_NAME)

        self.folder = self.portal.get(self.portal.invokeFactory(
                'Folder', 'mailing-test', title='Mailing Test Folder'))
        self.queue = IQueue(self.portal)
        self.realm = Realm(True, 'http://*****:*****@user.com')
        self.notifier_config.set_interval('hourly')

        # configure mail settings
        properties_tool = getToolByName(self.portal, 'portal_properties')
        properties_tool.email_from_name = 'Plone'
        properties_tool.email_from_address = '*****@*****.**'

        # patch MailHost
        self.mail_host = self.stub()
        self.mock_tool(self.mail_host, 'MailHost')
        self.mails = []
        self.expect(self.mail_host.send(ARGS, KWARGS)).call(
            lambda *args, **kwargs: self.mails.append((args, kwargs)))
        self.expect(self.mail_host.secureSend(ARGS, KWARGS)).call(
            lambda *args, **kwargs: self.mails.append((args, kwargs)))

        # mock datetime.now
        self.now = datetime.datetime(2010, 1, 2, 3, 4, 5)

        self._ori_datetime = datetime.datetime
        dt = self.mocker.proxy(datetime.datetime, count=False)
        self.expect(dt.now()).call(lambda: self.now).count(0, None)
        datetime.datetime = dt

        self.replay()
示例#38
0
 def execute_single_job(self, job):
     """ Executes a single job without calling the view
     """
     self.logger = getLogger()
     self.error_logger = getErrorLogger()
     portal = self.context.portal_url.getPortalObject()
     self.config = IConfig(portal)
     self.queue = IQueue(portal)
     # remove job from queue
     if job in self.queue.getJobs():
         self.queue.removeJob(job)
     elif job in self.queue.get_executed_jobs():
         self.queue.remove_executed_job(job)
     # execute it
     self.executeJob(job)
     # move json file
     job.move_jsonfile_to(self.config.get_executed_folder())
     # add to executed list
     return self.queue.append_executed_job(job)
示例#39
0
 def __init__(self, *args, **kwargs):
     super(PublisherConfigletView, self).__init__(*args, **kwargs)
     self.config = IConfig(self.context)
     self.queue = IQueue(self.context)
class TestEmailNotification(MockTestCase):

    layer = MAILREPORT_FUNCTIONAL_TESTING

    def setUp(self):
        super(TestEmailNotification, self).setUp()

        self.portal = self.layer['portal']
        setRoles(self.portal, TEST_USER_ID, ['Manager'])
        login(self.portal, TEST_USER_NAME)

        self.folder = self.portal.get(self.portal.invokeFactory(
                'Folder', 'mailing-test', title='Mailing Test Folder'))
        self.queue = IQueue(self.portal)
        self.realm = Realm(True, 'http://*****:*****@user.com')
        self.notifier_config.set_interval('hourly')

        # configure mail settings
        properties_tool = getToolByName(self.portal, 'portal_properties')
        properties_tool.email_from_name = 'Plone'
        properties_tool.email_from_address = '*****@*****.**'

        # patch MailHost
        self.mail_host = self.stub()
        self.mock_tool(self.mail_host, 'MailHost')
        self.mails = []
        self.expect(self.mail_host.send(ARGS, KWARGS)).call(
            lambda *args, **kwargs: self.mails.append((args, kwargs)))
        self.expect(self.mail_host.secureSend(ARGS, KWARGS)).call(
            lambda *args, **kwargs: self.mails.append((args, kwargs)))

        # mock datetime.now
        self.now = datetime.datetime(2010, 1, 2, 3, 4, 5)

        self._ori_datetime = datetime.datetime
        dt = self.mocker.proxy(datetime.datetime, count=False)
        self.expect(dt.now()).call(lambda: self.now).count(0, None)
        datetime.datetime = dt

        self.replay()

    def tearDown(self):
        datetime.datetime = self._ori_datetime
        setRoles(self.portal, TEST_USER_ID, ['Member'])

        super(TestEmailNotification, self).tearDown()

    def suppose_job_was_executed(self, successful=False, error=False,
                                 warning=False):
        """ Adds a job to the "executed" list
        """

        if error:
            response = UIDPathMismatchError()
        elif warning:
            response = ObjectNotFoundForMovingWarning()
        elif successful:
            response = ObjectUpdatedState()
        else:
            raise TypeError('suppose_job_was_executed expects one '
                            'positive keyword argument')

        job = Job('push', self.folder, self.user)
        job.executed_with_states({
                'date': datetime.datetime.now(),
                self.realm: response})
        self.queue.append_executed_job(job)
        return job

    def set_time(self, hour, minute=None):
        if minute is None:
            minute = hour
        self.now = datetime.datetime(2010, 12, 27, hour, minute)

    def get_normalize_statistics_table_from_message(self, message):
        message = pq(str(message))
        statistics_table = message('table:first').html()
        # "normalize" whitspace
        statistics_table = re.sub('\s{1,}', ' ', statistics_table)
        statistics_table = statistics_table.replace('> <', '><')
        return statistics_table

    def test_report_sent_after_executing_queue(self):
        self.set_time(1)
        utils.set_last_notification_date_to_now()

        self.set_time(2)
        self.suppose_job_was_executed(successful=True)
        self.suppose_job_was_executed(successful=True)
        self.suppose_job_was_executed(error=True)
        self.suppose_job_was_executed(warning=True)
        self.suppose_job_was_executed(warning=True)
        self.suppose_job_was_executed(warning=True)

        self.set_time(3)
        self.assertTrue(utils.is_interval_expired())

        self.portal.restrictedTraverse('@@publisher.executeQueue')()

        self.assertEqual(len(self.mails), 1)
        args, kwargs = self.mails.pop()

        self.assertEqual(kwargs.get('mfrom'), '*****@*****.**')
        self.assertEqual(kwargs.get('mto'), '*****@*****.**')
        self.assertEqual(kwargs.get('subject'),
                         u'Publisher report: Plone site')

        statistics_table = self.get_normalize_statistics_table_from_message(
            args[0])

        self.assertIn('<tr><th>Successfull jobs:</th><td>2</td></tr>',
                      statistics_table)

        self.assertIn('<tr><th>Jobs with warning:</th><td>3</td></tr>',
                      statistics_table)

        self.assertIn('<tr><th>Jobs with errors:</th><td>1</td></tr>',
                      statistics_table)

        self.assertIn('<tr><th>Total executed jobs:</th><td>6</td></tr>',
                      statistics_table)

    def test_report_does_only_contain_new_jobs(self):
        self.set_time(1)
        self.suppose_job_was_executed(successful=True)
        self.suppose_job_was_executed(successful=True)

        self.set_time(2)
        utils.set_last_notification_date_to_now()

        self.set_time(3)
        self.suppose_job_was_executed(successful=True)

        self.set_time(4)
        self.assertTrue(utils.is_interval_expired())
        self.portal.restrictedTraverse('@@publisher.executeQueue')()

        self.assertEqual(len(self.mails), 1)
        args, kwargs = self.mails.pop()

        statistics_table = self.get_normalize_statistics_table_from_message(
            args[0])

        self.assertIn('<tr><th>Total executed jobs:</th><td>1</td></tr>',
                      statistics_table)

    def test_report_is_sent_to_each_receivers(self):
        self.notifier_config.set_receivers_plain('\n'.join((
                    '*****@*****.**',
                    '*****@*****.**')))

        self.set_time(1)
        self.suppose_job_was_executed(successful=True)

        self.set_time(2)
        utils.set_last_notification_date_to_now()

        self.set_time(3)
        self.assertTrue(utils.is_interval_expired())
        self.portal.restrictedTraverse('@@publisher.executeQueue')()

        self.assertEqual(len(self.mails), 2)

        # we pop it reversed, therfore we test in opposite order than
        # it is configured.
        args, kwargs = self.mails.pop()
        self.assertEqual(kwargs.get('mto'), '*****@*****.**')

        args, kwargs = self.mails.pop()
        self.assertEqual(kwargs.get('mto'), '*****@*****.**')
    def setUp(self):
        super(TestStorage, self).setUp()
        self.grant('Manager')

        self.folder = create(Builder('folder').titled(u'Foo'))
        self.queue = IQueue(self.portal)
class TestStorage(FunctionalTestCase):

    def setUp(self):
        super(TestStorage, self).setUp()
        self.grant('Manager')

        self.folder = create(Builder('folder').titled(u'Foo'))
        self.queue = IQueue(self.portal)

    def test_queue_has_no_jobs_by_default(self):
        self.assertEqual(0, self.queue.countJobs())

    def test_queue_has_no_executed_jobs_by_default(self):
        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_adding_a_job_to_the_queue(self):
        self.queue.createJob('push', self.folder, 'user')

        # The queue contains one job now.
        self.assertEqual(1, self.queue.countJobs())

        # Make sure the job is there.
        self.assertEqual(
            ['push'],
            [job.action for job in self.queue.getJobs()]
        )

    def test_queue_after_publishing(self):
        """
        Simulate an entire publishing cycle.
        """
        self.queue.createJob('push', self.folder, 'user')

        self.queue.move_to_worker_queue()
        job = self.queue.popJob()
        self.assertEqual('push', job.action)

        key = self.queue.append_executed_job(job)
        self.assertEqual(1, key)

        self.assertEqual(0, self.queue.countJobs())
        self.assertEqual(1, self.queue.get_executed_jobs_length())

        executed_job = list(self.queue.get_executed_jobs())[0]
        self.assertEqual(key, executed_job[0])
        self.assertEqual('push', executed_job[1].action)

        self.queue.remove_executed_job(key)
        self.assertEqual([], list(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_clear_executed_jobs(self):
        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.queue.clear_executed_jobs()

        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_get_executed_job_by_key(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(
            'obj 5',
            self.queue.get_executed_job_by_key(6)
        )
        with self.assertRaises(KeyError):
            self.queue.get_executed_job_by_key(1000)

    def test_get_batch_of_executed_jobs(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
             self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.assertEqual(
            [(1, 'obj 0'), (2, 'obj 1')],
            list(self.queue.get_executed_jobs(start=0, end=2))
        )
        self.assertEqual(
            [(5, 'obj 4')],
            list(self.queue.get_executed_jobs(start=4, end=5))
        )
        with self.assertRaises(ValueError):
            list(self.queue.get_executed_jobs(start=1000, end=1001))

    def test_get_batch_of_executed_jobs_on_empty_storage(self):
        self.queue.clear_executed_jobs()

        self.assertEqual(0, self.queue.get_executed_jobs_length())
        self.assertEqual((), tuple(self.queue.get_executed_jobs(0, 0)))
        self.assertEqual((), tuple(self.queue.get_executed_jobs(10, 2)))

    def test_remove_old_executed_jobs(self):
        self.queue.clear_executed_jobs()

        # Execute 19 jobs.
        for day in range(1, 20):
            job = Job('push', self.folder, 'user')
            date = datetime(year=2000, month=1, day=day, hour=12)
            job.executed_with_states({'date': date})
            self.queue.append_executed_job(job)

        self.assertEqual(19, self.queue.get_executed_jobs_length())

        # Remove old jobs (older than 2000-01-10, including the one from 2000-01-10).
        tenth = datetime(year=2000, month=1, day=10, hour=23)
        self.queue.remove_executed_jobs_older_than(tenth)
        self.assertEqual(9, self.queue.get_executed_jobs_length())

    def test_remove_jobs_with_filter(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        entry_to_delete = tuple(self.queue.get_executed_jobs())[2]
        self.queue.remove_jobs_by_filter(lambda *params: params == entry_to_delete)

        self.assertEqual(9, self.queue.get_executed_jobs_length())

        self.assertTrue(entry_to_delete not in tuple(self.queue.get_executed_jobs()))
示例#43
0
class ExecuteQueue(BrowserView):
    """Executes the Queue and sends all Jobs to the target
    realms.

    """
    def execute_single_job(self, job):
        """ Executes a single job without calling the view
        """
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        portal = self.context.portal_url.getPortalObject()
        self.config = IConfig(portal)
        self.queue = IQueue(portal)
        # remove job from queue
        if job in self.queue.getJobs():
            self.queue.removeJob(job)
        elif job in self.queue.get_executed_jobs():
            self.queue.remove_executed_job(job)
        # execute it
        self.executeJob(job)
        # move json file
        job.move_jsonfile_to(self.config.get_executed_folder())
        # add to executed list
        return self.queue.append_executed_job(job)

    def __call__(self):
        """
        Handles logging purposes and calls execute() method.
        """

        # get config and queue
        self.config = IConfig(self.context)
        portal = self.context.portal_url.getPortalObject()
        self.queue = IQueue(portal)
        event.notify(BeforeQueueExecutionEvent(portal, self.queue))
        # prepare logger
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        # is it allowed to publish?
        if not self.config.publishing_enabled():
            self.logger.warning('PUBLISHING IS DISABLED')
            return 'PUBLISHING IS DISABLED'

        if self.config.locking_enabled():
            self.logger.info('LOCKING IS ENABLED')
        else:
            self.logger.info('LOCKING IS DISABLED')

        # lock - check for locking flag
        if self.config.locking_enabled(
        ) and not self.get_lock_object().acquire(0):
            self.logger.warning('Already publishing')
            return 'Already publishing'

        # register our own logging handler for returning logs afterwards
        logStream = StringIO()
        logHandler = logging.StreamHandler(logStream)
        self.logger.addHandler(logHandler)
        # be sure to remove the handler!
        try:
            # execute queue
            self.execute()
        except Exception:
            self.logger.removeHandler(logHandler)
            if self.config.locking_enabled():
                self.get_lock_object().release()
            # re-raise exception
            raise
        # get logs
        self.logger.removeHandler(logHandler)
        logStream.seek(0)
        log = logStream.read()
        del logStream
        del logHandler

        # unlock
        if self.config.locking_enabled():
            self.get_lock_object().release()

        event.notify(QueueExecutedEvent(portal, log))
        return log

    def get_lock_object(self):
        if getattr(self.__class__, '_lock', None) is None:
            self.__class__._lock = RLock()
        return self.__class__._lock

    def getActiveRealms(self):
        """
        @return: a list of active Realms
        @rtype: list
        """
        if '_activeRealms' not in dir(self):
            self._activeRealms = [
                r for r in self.config.getRealms() if r.active
            ]
        return self._activeRealms

    def execute(self):
        """
        Executes the jobs from the queue.
        @return: None
        """

        jobs = self.queue.countJobs()

        self.queue.move_to_worker_queue()

        self.logger.info('Executing Queue: %i of %i objects to %i realms' % (
            jobs,
            self.queue.countJobs(),
            len(self.getActiveRealms()),
        ))

        while len(self.queue.get_worker_queue()):
            job = self.queue.popJob()

            if not job.json_file_exists():
                continue
            try:
                # execute job
                self.executeJob(job)
            except (ConflictError, Retry):
                raise
            except URLError:
                raise
            except ReceiverTimeoutError:
                raise
            except Exception:
                # print the exception to the publisher error log
                exc = ''.join(traceback.format_exception(*sys.exc_info()))
                self.error_logger.error(exc)
                job.executed_exception = exc
            job.move_jsonfile_to(self.config.get_executed_folder())
            self.queue.append_executed_job(job)
            transaction.commit()

    def executeJob(self, job):
        """
        Executes a Job: sends the job to all available realms.
        @param job:     Job object to execute
        @type job:      Job
        """
        objTitle = job.objectTitle
        if isinstance(objTitle, unicode):
            objTitle = objTitle.encode('utf8')
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted(job.objectPath):
            self.logger.error('blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                job.action,
                objTitle,
                job.objectPath,
                job.objectUID,
            ))
            self.error_logger.error(
                'blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                ))
            return False

        # get data from chache file
        state = None
        json = job.getData()
        self.logger.info('-' * 100)
        self.logger.info('executing "%s" on "%s" (at %s | UID %s)' % (
            job.action,
            objTitle,
            job.objectPath,
            job.objectUID,
        ))
        self.logger.info('... request data length: %i' % len(json))
        state_entries = {'date': datetime.now()}
        for realm in self.getActiveRealms():
            self.logger.info('... to realm %s' % (realm.url, ))
            # send data to each realm
            state = sendJsonToRealm(json, realm, 'publisher.receive')
            if isinstance(state, states.ErrorState):
                self.logger.error('... got result: %s' % state.toString())
                self.error_logger.error(
                    'executing "%s" on "%s" (at %s | UID %s)' % (
                        job.action,
                        objTitle,
                        job.objectPath,
                        job.objectUID,
                    ))
                self.error_logger.error('... got result: %s' %
                                        state.toString())
            else:
                self.logger.info('... got result: %s' % state.toString())
            state_entries[realm] = state
        job.executed_with_states(state_entries)

        # fire AfterPushEvent
        obj = uuidToObject(job.objectUID)
        if state is not None:
            event.notify(AfterPushEvent(obj, state, job))
示例#44
0
class ExecuteQueue(BrowserView):
    """Executes the Queue and sends all Jobs to the target
    realms.

    """

    def execute_single_job(self, job):
        """ Executes a single job without calling the view
        """
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        portal = self.context.portal_url.getPortalObject()
        self.config = IConfig(portal)
        self.queue = IQueue(portal)
        # remove job from queue
        if job in self.queue.getJobs():
            self.queue.removeJob(job)
        elif job in self.queue.get_executed_jobs():
            self.queue.remove_executed_job(job)
        # execute it
        self.executeJob(job)
        # move json file
        job.move_jsonfile_to(self.config.get_executed_folder())
        # add to executed list
        return self.queue.append_executed_job(job)

    def __call__(self):
        """
        Handles logging purposes and calls execute() method.
        """

        # get config and queue
        self.config = IConfig(self.context)
        portal = self.context.portal_url.getPortalObject()
        self.queue = IQueue(portal)
        event.notify(BeforeQueueExecutionEvent(portal, self.queue))
        # prepare logger
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        # is it allowed to publish?
        if not self.config.publishing_enabled():
            self.logger.warning('PUBLISHING IS DISABLED')
            return 'PUBLISHING IS DISABLED'

        if self.config.locking_enabled():
            self.logger.info('LOCKING IS ENABLED')
        else:
            self.logger.info('LOCKING IS DISABLED')

        # lock - check for locking flag
        if self.config.locking_enabled() and not self.get_lock_object().acquire(0):
            self.logger.warning('Already publishing')
            return 'Already publishing'

        # register our own logging handler for returning logs afterwards
        logStream = StringIO()
        logHandler = logging.StreamHandler(logStream)
        self.logger.addHandler(logHandler)
        # be sure to remove the handler!
        try:
            # execute queue
            self.execute()
        except:
            self.logger.removeHandler(logHandler)
            if self.config.locking_enabled(): self.get_lock_object().release()
            # re-raise exception
            raise
        # get logs
        self.logger.removeHandler(logHandler)
        logStream.seek(0)
        log = logStream.read()
        del logStream
        del logHandler

        # unlock
        if self.config.locking_enabled(): self.get_lock_object().release()

        event.notify(QueueExecutedEvent(portal, log))
        return log

    def get_lock_object(self):
        if getattr(self.__class__, '_lock', None) == None:
            self.__class__._lock = RLock()
        return self.__class__._lock

    def getActiveRealms(self):
        """
        @return: a list of active Realms
        @rtype: list
        """
        if '_activeRealms' not in dir(self):
            self._activeRealms = [r for r in self.config.getRealms()
                                  if r.active]
        return self._activeRealms

    def execute(self):
        """
        Executes the jobs from the queue.
        @return: None
        """

        jobs = self.queue.countJobs()

        self.queue.move_to_worker_queue()

        self.logger.info('Executing Queue: %i of %i objects to %i realms' % (
            jobs,
            self.queue.countJobs(),
            len(self.getActiveRealms()),
            ))

        while len(self.queue.get_worker_queue()):
            job = self.queue.popJob()

            if not job.json_file_exists():
                continue
            try:
                # execute job
                self.executeJob(job)
            except (ConflictError, Retry):
                raise
            except URLError:
                raise
            except ReceiverTimeoutError:
                raise
            except:
                # print the exception to the publisher error log
                exc = ''.join(traceback.format_exception(*sys.exc_info()))
                self.error_logger.error(exc)
                job.executed_exception = exc
            job.move_jsonfile_to(self.config.get_executed_folder())
            self.queue.append_executed_job(job)
            transaction.commit()

    def executeJob(self, job):
        """
        Executes a Job: sends the job to all available realms.
        @param job:     Job object to execute
        @type job:      Job
        """
        objTitle = job.objectTitle
        if isinstance(objTitle, unicode):
            objTitle = objTitle.encode('utf8')
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted(job.objectPath):
            self.logger.error('blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                    ))
            self.error_logger.error(
                'blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                    ))
            return False

        # get data from chache file
        state = None
        json = job.getData()
        self.logger.info('-' * 100)
        self.logger.info('executing "%s" on "%s" (at %s | UID %s)' % (
                job.action,
                objTitle,
                job.objectPath,
                job.objectUID,
                ))
        self.logger.info('... request data length: %i' % len(json))
        state_entries = {'date': datetime.now()}
        for realm in self.getActiveRealms():
            self.logger.info('... to realm %s' % (
                    realm.url,
                    ))
            # send data to each realm
            state = sendJsonToRealm(json, realm, 'publisher.receive')
            if isinstance(state, states.ErrorState):
                self.logger.error('... got result: %s' % state.toString())
                self.error_logger.error(
                    'executing "%s" on "%s" (at %s | UID %s)' % (
                        job.action,
                        objTitle,
                        job.objectPath,
                        job.objectUID,
                        ))
                self.error_logger.error('... got result: %s' %
                                        state.toString())
            else:
                self.logger.info('... got result: %s' % state.toString())
            state_entries[realm] = state
        job.executed_with_states(state_entries)

        # fire AfterPushEvent
        reference_catalog = getToolByName(self.context, 'reference_catalog')
        obj = reference_catalog.lookupObject(job.objectUID)
        if state is not None:
            event.notify(AfterPushEvent(obj, state, job))
示例#45
0
class TestEventhandler(MockTestCase):

    layer = MONITOR_FUNCTIONAL_TESTING

    def setUp(self):
        super(TestEventhandler, self).setUp()

        self.portal = self.layer['portal']
        setRoles(self.portal, TEST_USER_ID, ['Manager'])
        login(self.portal, TEST_USER_NAME)

        self.folder = self.portal.get(self.portal.invokeFactory(
                'Folder', 'mailing-test', title='Mailing Test Folder'))
        self.queue = IQueue(self.portal)

        mtool = getToolByName(self.portal, 'portal_membership')
        self.user = mtool.getMemberById(TEST_USER_ID)

        self.config = IMonitorConfigurationSchema(self.portal)
        self.config.set_enabled(True)
        self.config.set_receivers_plain('*****@*****.**')

        self.notifier_class = self.stub_interface(IMonitorNotifier)
        self.notifier = self.mock_interface(IMonitorNotifier)
        self.expect(self.notifier_class(self.portal)).result(
            self.notifier)

        provideAdapter(factory=self.notifier_class,
                       provides=IMonitorNotifier,
                       adapts=(IPloneSiteRoot,))

    def tearDown(self):
        sm = getGlobalSiteManager()
        sm.unregisterAdapter(factory=self.notifier_class,
                             provided=IMonitorNotifier,
                             required=(IPloneSiteRoot,))
        super(TestEventhandler, self).tearDown()

    def stub_current_queue_length(self, amount_of_jobs):
        while self.queue.countJobs() > 0:
            self.queue.popJob()

        for _i in range(amount_of_jobs):
            # Remove acquisition wrapper from "self.user" in order to
            # prevent the following error:
            #   TypeError: Can't pickle objects in acquisition wrappers.
            self.queue.createJob('push', self.folder, aq_base(self.user))

    def test_eventhandler_calls_notifier(self):
        self.config.set_threshold(2)
        self.stub_current_queue_length(3)
        event = self.create_dummy(queue=self.queue)

        self.expect(self.notifier(ANY, ANY))
        self.replay()

        invoke_notification(self.portal, event)

    def test_adapter_called_after_queue_execution(self):
        self.config.set_threshold(2)
        self.stub_current_queue_length(3)

        self.expect(self.notifier(ANY, ANY))
        self.replay()

        self.portal.unrestrictedTraverse('@@publisher.executeQueue')()

    def test_adapter_not_called_when_monitoring_disabled(self):
        self.config.set_threshold(2)
        self.stub_current_queue_length(3)

        self.config.set_enabled(False)

        self.expect(self.notifier(ANY, ANY)).count(0)
        self.replay()

        self.portal.unrestrictedTraverse('@@publisher.executeQueue')()

    def test_adapter_not_called_when_threshold_not_reached(self):
        self.config.set_threshold(10)
        self.stub_current_queue_length(1)

        self.expect(self.notifier(ARGS, KWARGS)).count(0)
        self.replay()

        self.portal.unrestrictedTraverse('@@publisher.executeQueue')()

    def test_adapter_is_called_even_when_publishing_disabled(self):
        self.config.set_threshold(2)
        self.stub_current_queue_length(3)

        config = IConfig(self.portal)
        config.set_publishing_enabled(False)

        self.expect(self.notifier(ARGS, KWARGS))
        self.replay()

        self.portal.unrestrictedTraverse('@@publisher.executeQueue')()
示例#46
0
    def __call__(self, event, no_response=False, msg=None, *args, **kwargs):
        """
        Creates a "rename" job for the current item(s)
        @param args:    list of unnamed arguments
        @type args:     list
        @param kwargs:  dict of named keyword-arguments
        @type kwargs:   dict
        @return:        Redirect to object`s default view
        """

        if IPreventPublishing.providedBy(self.context):
            return 'prevented'

        if publisher_jobs_are_disabled():
            return 'disabled'

        self.logger = getLogger()
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted():
            self.logger.warning('Could not create move job for blacklisted '+\
                                    'object (%s at %s)' % (
                    self.context.Title(),
                    '/'.join(self.context.getPhysicalPath())))
            if not no_response:
                return self.request.RESPONSE.redirect('./view')
            return False

        # This View should not be executed at the PloneSiteRoot
        if IPloneSiteRoot.providedBy(self.context):
            raise Exception('Not allowed on PloneSiteRoot')
        # get username
        user = self.context.portal_membership.getAuthenticatedMember()
        username = user.getUserName()
        # create Job
        portal = self.context.portal_url.getPortalObject()
        queue = IQueue(portal)

        additional_data = {'move_data': {
            'newName': event.newName,
            'newParent': get_site_relative_path(event.newParent),
            'newTitle': event.object.Title().decode('utf-8'),
            'oldName': event.oldName,
            'oldParent': get_site_relative_path(event.oldParent),
        }}

        queue.createJob('move', self.context, username,
                        additional_data=additional_data)
        self.logger.debug('Created "%s" Job for "%s" at %s' % (
            'move',
            self.context.Title(),
            '/'.join(self.context.getPhysicalPath()),
        ))
        # status message
        if msg is None:
            msg = _(u'Object move/rename action has been added to the queue.')

        IStatusMessage(self.request).addStatusMessage(
            msg,
            type='info'
        )
        if not no_response:
            return self.request.RESPONSE.redirect('./view')
示例#47
0
 def getQueueSize(self):
     return IQueue(self.context).countJobs()
示例#48
0
class TestStorage(FunctionalTestCase):
    def setUp(self):
        super(TestStorage, self).setUp()
        self.grant('Manager')

        self.folder = create(Builder('folder').titled(u'Foo'))
        self.queue = IQueue(self.portal)

    def test_queue_has_no_jobs_by_default(self):
        self.assertEqual(0, self.queue.countJobs())

    def test_queue_has_no_executed_jobs_by_default(self):
        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_adding_a_job_to_the_queue(self):
        self.queue.createJob('push', self.folder, 'user')

        # The queue contains one job now.
        self.assertEqual(1, self.queue.countJobs())

        # Make sure the job is there.
        self.assertEqual(['push'],
                         [job.action for job in self.queue.getJobs()])

    def test_queue_after_publishing(self):
        """
        Simulate an entire publishing cycle.
        """
        self.queue.createJob('push', self.folder, 'user')

        self.queue.move_to_worker_queue()
        job = self.queue.popJob()
        self.assertEqual('push', job.action)

        key = self.queue.append_executed_job(job)
        self.assertEqual(1, key)

        self.assertEqual(0, self.queue.countJobs())
        self.assertEqual(1, self.queue.get_executed_jobs_length())

        executed_job = list(self.queue.get_executed_jobs())[0]
        self.assertEqual(key, executed_job[0])
        self.assertEqual('push', executed_job[1].action)

        self.queue.remove_executed_job(key)
        self.assertEqual([], list(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_clear_executed_jobs(self):
        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.queue.clear_executed_jobs()

        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_get_executed_job_by_key(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual('obj 5', self.queue.get_executed_job_by_key(6))
        with self.assertRaises(KeyError):
            self.queue.get_executed_job_by_key(1000)

    def test_get_batch_of_executed_jobs(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.assertEqual([(1, 'obj 0'), (2, 'obj 1')],
                         list(self.queue.get_executed_jobs(start=0, end=2)))
        self.assertEqual([(5, 'obj 4')],
                         list(self.queue.get_executed_jobs(start=4, end=5)))
        with self.assertRaises(ValueError):
            list(self.queue.get_executed_jobs(start=1000, end=1001))

    def test_get_batch_of_executed_jobs_on_empty_storage(self):
        self.queue.clear_executed_jobs()

        self.assertEqual(0, self.queue.get_executed_jobs_length())
        self.assertEqual((), tuple(self.queue.get_executed_jobs(0, 0)))
        self.assertEqual((), tuple(self.queue.get_executed_jobs(10, 2)))

    def test_remove_old_executed_jobs(self):
        self.queue.clear_executed_jobs()

        # Execute 19 jobs.
        for day in range(1, 20):
            job = Job('push', self.folder, 'user')
            date = datetime(year=2000, month=1, day=day, hour=12)
            job.executed_with_states({'date': date})
            self.queue.append_executed_job(job)

        self.assertEqual(19, self.queue.get_executed_jobs_length())

        # Remove old jobs (older than 2000-01-10, including the one from 2000-01-10).
        tenth = datetime(year=2000, month=1, day=10, hour=23)
        self.queue.remove_executed_jobs_older_than(tenth)
        self.assertEqual(9, self.queue.get_executed_jobs_length())

    def test_remove_jobs_with_filter(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        entry_to_delete = tuple(self.queue.get_executed_jobs())[2]
        self.queue.remove_jobs_by_filter(
            lambda *params: params == entry_to_delete)

        self.assertEqual(9, self.queue.get_executed_jobs_length())

        self.assertTrue(
            entry_to_delete not in tuple(self.queue.get_executed_jobs()))
示例#49
0
 def test_delete_plonesite_is_not_allowed(self, browser):
     self.grant('Manager')
     self.assertEquals(0, IQueue(self.portal).countJobs())
     with browser.expect_http_error(500):
         browser.login().open(self.portal, view='@@publisher.delete')
示例#50
0
    def setUp(self):
        super(TestStorage, self).setUp()
        self.grant('Manager')

        self.folder = create(Builder('folder').titled(u'Foo'))
        self.queue = IQueue(self.portal)