def test_delete(self):
        page = create(Builder('page').providing(IPreventPublishing))
        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())

        page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(0, queue.countJobs())
    def test_delete(self):
        page = create(Builder('page').providing(IPreventPublishing))
        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())

        page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(0, queue.countJobs())
示例#3
0
 def test_no_job_when_object_has_no_publisher_workflow(self):
     queue = IQueue(self.portal)
     page = create(Builder('page'))
     Plone().login().visit(page, 'delete_confirmation')
     self.assertEquals(0, queue.countJobs())
     self.click_delete()
     self.assertEquals(0, queue.countJobs())
 def test_no_job_when_object_has_no_publisher_workflow(self, browser):
     queue = IQueue(self.portal)
     page = create(Builder('page'))
     browser.login().visit(page, view='delete_confirmation')
     self.assertEquals(0, queue.countJobs())
     browser.click_on('Delete')
     self.assertEquals(0, queue.countJobs())
示例#5
0
    def test_delete_job_create_when_we_have_a_publisher_workflow(self):
        queue = IQueue(self.portal)
        folder = create(Builder('folder'))
        Plone().login().visit(folder, 'delete_confirmation')
        self.assertEquals(0, queue.countJobs())
        self.click_delete()
        self.assertEquals(1, queue.countJobs())

        job = queue.getJobs()[0]
        self.assertEquals('delete', job.action)
        self.assertEquals('/plone/folder', job.objectPath)
    def test_delete(self):
        page = create(Builder('page'))
        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())

        with publisher_jobs_disabled():
            page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(0, queue.countJobs())

        page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(1, queue.countJobs())
    def test_delete(self):
        page = create(Builder('page'))
        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())

        with publisher_jobs_disabled():
            page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(0, queue.countJobs())

        page.restrictedTraverse('@@publisher.delete')()
        self.assertEquals(1, queue.countJobs())
示例#8
0
    def test_delete_job_create_when_parent_has_publisher_workflow(self):
        queue = IQueue(self.portal)
        folder = create(Builder('folder'))
        page = create(Builder('page').within(folder))

        Plone().login().visit(page, 'delete_confirmation')
        self.assertEquals(0, queue.countJobs())
        self.click_delete()
        self.assertEquals(1, queue.countJobs())

        job = queue.getJobs()[0]
        self.assertEquals('delete', job.action)
        self.assertEquals('/plone/folder/document', job.objectPath)
示例#9
0
    def test_no_job_on_submit(self):
        page = create(Builder('page'))
        Plone().login().visit(page)
        Workflow().do_transition('submit')

        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())
示例#10
0
    def test_no_job_on_revise(self):
        page = create(Builder('page').in_state(EXAMPLE_WF_PUBLISHED))
        Plone().login().visit(page)
        Workflow().do_transition('revise')

        queue = IQueue(self.portal)
        self.assertEquals(0, queue.countJobs())
    def test_blocks_are_published_with_contentpage(self):
        page = create(Builder(self.page_builder))
        create(Builder(self.textblock_builder).within(page))

        Plone().login().visit(page)
        Workflow().do_transition("publish")

        queue = IQueue(self.portal)
        self.assertEquals(2, queue.countJobs(), "Expected the page and the text block to be in the queue.")
示例#12
0
    def test_blocks_are_published_with_contentpage(self, browser):
        page = create(Builder(self.page_builder))
        create(Builder(self.textblock_builder).within(page))

        browser.login().visit(page)
        Workflow().do_transition('publish')

        queue = IQueue(self.portal)
        self.assertEquals(
            2, queue.countJobs(),
            'Expected the page and the text block to be in the queue.')
示例#13
0
    def test_push_job_in_publisher_queue_after_publishing(self):
        page = create(Builder('page'))
        Plone().login().visit(page)
        Workflow().do_transition('publish')

        queue = IQueue(self.portal)
        self.assertEquals(1, queue.countJobs())

        job = queue.getJobs()[0]
        self.assertEquals('push', job.action)
        self.assertEquals(page, job.getObject(self.portal))
    def test_sl_listing_block_publishes_its_children(self):
        page = create(Builder(self.page_builder))
        listing_block = create(Builder(self.listingblock_builder).within(page))
        create(Builder("file").within(listing_block))

        Plone().login().visit(page)
        Workflow().do_transition("publish")

        queue = IQueue(self.portal)
        self.assertEquals(
            3, queue.countJobs(), "Expected the page, the listing block and the file to be" " in the queue."
        )
    def test_sl_listing_block_publishes_its_children(self, browser):
        page = create(Builder(self.page_builder))
        listing_block = create(Builder(self.listingblock_builder).within(page))
        create(Builder('file').within(listing_block))

        browser.login().visit(page)
        Workflow().do_transition('publish')

        queue = IQueue(self.portal)
        self.assertEquals(
            3, queue.countJobs(),
            'Expected the page, the listing block and the file to be'
            ' in the queue.')
示例#16
0
    def test_sl_listing_block_publishes_its_children(self, browser):
        page = create(Builder(self.page_builder))
        listing_block = create(Builder(self.listingblock_builder).within(page))
        create(Builder('file').within(listing_block))

        browser.login().visit(page)
        Workflow().do_transition('publish')

        queue = IQueue(self.portal)
        self.assertEquals(
            3, queue.countJobs(),
            'Expected the page, the listing block and the file to be'
            ' in the queue.')
    def get_options(self):
        """Returns a `dict` of data needed for rendering the mail template.
        """
        config = self.get_configuration()
        last_date = get_last_notification_date()
        queue = IQueue(self.context)
        data = {'success': 0,
                'warning': 0,
                'error': 0,
                'total': 0,
                'jobs_in_queue': 0,
                'erroneous_jobs': [],
                'show_details': config.detailed_report,
                'subject': self.get_subject(),
                'portal': self.context}

        # count the jobs by group and total
        for _key, job in queue.get_executed_jobs():
            # get the runs
            runs = getattr(job, 'executed_list', None)
            if not runs or len(runs) == 0:
                continue

            # was it published since last notification?
            if last_date and runs[-1]['date'] < last_date:
                continue

            # count it
            state = job.get_latest_executed_entry()
            if isinstance(state, states.ErrorState):
                data['error'] += 1
                data['erroneous_jobs'].append(job)
            elif isinstance(state, states.WarningState):
                data['warning'] += 1
            if isinstance(state, states.SuccessState):
                data['success'] += 1
            data['total'] += 1

        # get the amount of jobs in the queue
        data['jobs_in_queue'] = queue.countJobs()

        return data
class TestStorage(FunctionalTestCase):

    def setUp(self):
        super(TestStorage, self).setUp()
        self.grant('Manager')

        self.folder = create(Builder('folder').titled(u'Foo'))
        self.queue = IQueue(self.portal)

    def test_queue_has_no_jobs_by_default(self):
        self.assertEqual(0, self.queue.countJobs())

    def test_queue_has_no_executed_jobs_by_default(self):
        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_adding_a_job_to_the_queue(self):
        self.queue.createJob('push', self.folder, 'user')

        # The queue contains one job now.
        self.assertEqual(1, self.queue.countJobs())

        # Make sure the job is there.
        self.assertEqual(
            ['push'],
            [job.action for job in self.queue.getJobs()]
        )

    def test_queue_after_publishing(self):
        """
        Simulate an entire publishing cycle.
        """
        self.queue.createJob('push', self.folder, 'user')

        self.queue.move_to_worker_queue()
        job = self.queue.popJob()
        self.assertEqual('push', job.action)

        key = self.queue.append_executed_job(job)
        self.assertEqual(1, key)

        self.assertEqual(0, self.queue.countJobs())
        self.assertEqual(1, self.queue.get_executed_jobs_length())

        executed_job = list(self.queue.get_executed_jobs())[0]
        self.assertEqual(key, executed_job[0])
        self.assertEqual('push', executed_job[1].action)

        self.queue.remove_executed_job(key)
        self.assertEqual([], list(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_clear_executed_jobs(self):
        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.queue.clear_executed_jobs()

        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_get_executed_job_by_key(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(
            'obj 5',
            self.queue.get_executed_job_by_key(6)
        )
        with self.assertRaises(KeyError):
            self.queue.get_executed_job_by_key(1000)

    def test_get_batch_of_executed_jobs(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
             self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.assertEqual(
            [(1, 'obj 0'), (2, 'obj 1')],
            list(self.queue.get_executed_jobs(start=0, end=2))
        )
        self.assertEqual(
            [(5, 'obj 4')],
            list(self.queue.get_executed_jobs(start=4, end=5))
        )
        with self.assertRaises(ValueError):
            list(self.queue.get_executed_jobs(start=1000, end=1001))

    def test_get_batch_of_executed_jobs_on_empty_storage(self):
        self.queue.clear_executed_jobs()

        self.assertEqual(0, self.queue.get_executed_jobs_length())
        self.assertEqual((), tuple(self.queue.get_executed_jobs(0, 0)))
        self.assertEqual((), tuple(self.queue.get_executed_jobs(10, 2)))

    def test_remove_old_executed_jobs(self):
        self.queue.clear_executed_jobs()

        # Execute 19 jobs.
        for day in range(1, 20):
            job = Job('push', self.folder, 'user')
            date = datetime(year=2000, month=1, day=day, hour=12)
            job.executed_with_states({'date': date})
            self.queue.append_executed_job(job)

        self.assertEqual(19, self.queue.get_executed_jobs_length())

        # Remove old jobs (older than 2000-01-10, including the one from 2000-01-10).
        tenth = datetime(year=2000, month=1, day=10, hour=23)
        self.queue.remove_executed_jobs_older_than(tenth)
        self.assertEqual(9, self.queue.get_executed_jobs_length())

    def test_remove_jobs_with_filter(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        entry_to_delete = tuple(self.queue.get_executed_jobs())[2]
        self.queue.remove_jobs_by_filter(lambda *params: params == entry_to_delete)

        self.assertEqual(9, self.queue.get_executed_jobs_length())

        self.assertTrue(entry_to_delete not in tuple(self.queue.get_executed_jobs()))
示例#19
0
class ExecuteQueue(BrowserView):
    """Executes the Queue and sends all Jobs to the target
    realms.

    """
    def execute_single_job(self, job):
        """ Executes a single job without calling the view
        """
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        portal = self.context.portal_url.getPortalObject()
        self.config = IConfig(portal)
        self.queue = IQueue(portal)
        # remove job from queue
        if job in self.queue.getJobs():
            self.queue.removeJob(job)
        elif job in self.queue.get_executed_jobs():
            self.queue.remove_executed_job(job)
        # execute it
        self.executeJob(job)
        # move json file
        job.move_jsonfile_to(self.config.get_executed_folder())
        # add to executed list
        return self.queue.append_executed_job(job)

    def __call__(self):
        """
        Handles logging purposes and calls execute() method.
        """

        # get config and queue
        self.config = IConfig(self.context)
        portal = self.context.portal_url.getPortalObject()
        self.queue = IQueue(portal)
        event.notify(BeforeQueueExecutionEvent(portal, self.queue))
        # prepare logger
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        # is it allowed to publish?
        if not self.config.publishing_enabled():
            self.logger.warning('PUBLISHING IS DISABLED')
            return 'PUBLISHING IS DISABLED'

        if self.config.locking_enabled():
            self.logger.info('LOCKING IS ENABLED')
        else:
            self.logger.info('LOCKING IS DISABLED')

        # lock - check for locking flag
        if self.config.locking_enabled(
        ) and not self.get_lock_object().acquire(0):
            self.logger.warning('Already publishing')
            return 'Already publishing'

        # register our own logging handler for returning logs afterwards
        logStream = StringIO()
        logHandler = logging.StreamHandler(logStream)
        self.logger.addHandler(logHandler)
        # be sure to remove the handler!
        try:
            # execute queue
            self.execute()
        except Exception:
            self.logger.removeHandler(logHandler)
            if self.config.locking_enabled():
                self.get_lock_object().release()
            # re-raise exception
            raise
        # get logs
        self.logger.removeHandler(logHandler)
        logStream.seek(0)
        log = logStream.read()
        del logStream
        del logHandler

        # unlock
        if self.config.locking_enabled():
            self.get_lock_object().release()

        event.notify(QueueExecutedEvent(portal, log))
        return log

    def get_lock_object(self):
        if getattr(self.__class__, '_lock', None) is None:
            self.__class__._lock = RLock()
        return self.__class__._lock

    def getActiveRealms(self):
        """
        @return: a list of active Realms
        @rtype: list
        """
        if '_activeRealms' not in dir(self):
            self._activeRealms = [
                r for r in self.config.getRealms() if r.active
            ]
        return self._activeRealms

    def execute(self):
        """
        Executes the jobs from the queue.
        @return: None
        """

        jobs = self.queue.countJobs()

        self.queue.move_to_worker_queue()

        self.logger.info('Executing Queue: %i of %i objects to %i realms' % (
            jobs,
            self.queue.countJobs(),
            len(self.getActiveRealms()),
        ))

        while len(self.queue.get_worker_queue()):
            job = self.queue.popJob()

            if not job.json_file_exists():
                continue
            try:
                # execute job
                self.executeJob(job)
            except (ConflictError, Retry):
                raise
            except URLError:
                raise
            except ReceiverTimeoutError:
                raise
            except Exception:
                # print the exception to the publisher error log
                exc = ''.join(traceback.format_exception(*sys.exc_info()))
                self.error_logger.error(exc)
                job.executed_exception = exc
            job.move_jsonfile_to(self.config.get_executed_folder())
            self.queue.append_executed_job(job)
            transaction.commit()

    def executeJob(self, job):
        """
        Executes a Job: sends the job to all available realms.
        @param job:     Job object to execute
        @type job:      Job
        """
        objTitle = job.objectTitle
        if isinstance(objTitle, unicode):
            objTitle = objTitle.encode('utf8')
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted(job.objectPath):
            self.logger.error('blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                job.action,
                objTitle,
                job.objectPath,
                job.objectUID,
            ))
            self.error_logger.error(
                'blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                ))
            return False

        # get data from chache file
        state = None
        json = job.getData()
        self.logger.info('-' * 100)
        self.logger.info('executing "%s" on "%s" (at %s | UID %s)' % (
            job.action,
            objTitle,
            job.objectPath,
            job.objectUID,
        ))
        self.logger.info('... request data length: %i' % len(json))
        state_entries = {'date': datetime.now()}
        for realm in self.getActiveRealms():
            self.logger.info('... to realm %s' % (realm.url, ))
            # send data to each realm
            state = sendJsonToRealm(json, realm, 'publisher.receive')
            if isinstance(state, states.ErrorState):
                self.logger.error('... got result: %s' % state.toString())
                self.error_logger.error(
                    'executing "%s" on "%s" (at %s | UID %s)' % (
                        job.action,
                        objTitle,
                        job.objectPath,
                        job.objectUID,
                    ))
                self.error_logger.error('... got result: %s' %
                                        state.toString())
            else:
                self.logger.info('... got result: %s' % state.toString())
            state_entries[realm] = state
        job.executed_with_states(state_entries)

        # fire AfterPushEvent
        obj = uuidToObject(job.objectUID)
        if state is not None:
            event.notify(AfterPushEvent(obj, state, job))
示例#20
0
class TestStorage(FunctionalTestCase):
    def setUp(self):
        super(TestStorage, self).setUp()
        self.grant('Manager')

        self.folder = create(Builder('folder').titled(u'Foo'))
        self.queue = IQueue(self.portal)

    def test_queue_has_no_jobs_by_default(self):
        self.assertEqual(0, self.queue.countJobs())

    def test_queue_has_no_executed_jobs_by_default(self):
        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_adding_a_job_to_the_queue(self):
        self.queue.createJob('push', self.folder, 'user')

        # The queue contains one job now.
        self.assertEqual(1, self.queue.countJobs())

        # Make sure the job is there.
        self.assertEqual(['push'],
                         [job.action for job in self.queue.getJobs()])

    def test_queue_after_publishing(self):
        """
        Simulate an entire publishing cycle.
        """
        self.queue.createJob('push', self.folder, 'user')

        self.queue.move_to_worker_queue()
        job = self.queue.popJob()
        self.assertEqual('push', job.action)

        key = self.queue.append_executed_job(job)
        self.assertEqual(1, key)

        self.assertEqual(0, self.queue.countJobs())
        self.assertEqual(1, self.queue.get_executed_jobs_length())

        executed_job = list(self.queue.get_executed_jobs())[0]
        self.assertEqual(key, executed_job[0])
        self.assertEqual('push', executed_job[1].action)

        self.queue.remove_executed_job(key)
        self.assertEqual([], list(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_clear_executed_jobs(self):
        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.queue.clear_executed_jobs()

        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_get_executed_job_by_key(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual('obj 5', self.queue.get_executed_job_by_key(6))
        with self.assertRaises(KeyError):
            self.queue.get_executed_job_by_key(1000)

    def test_get_batch_of_executed_jobs(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.assertEqual([(1, 'obj 0'), (2, 'obj 1')],
                         list(self.queue.get_executed_jobs(start=0, end=2)))
        self.assertEqual([(5, 'obj 4')],
                         list(self.queue.get_executed_jobs(start=4, end=5)))
        with self.assertRaises(ValueError):
            list(self.queue.get_executed_jobs(start=1000, end=1001))

    def test_get_batch_of_executed_jobs_on_empty_storage(self):
        self.queue.clear_executed_jobs()

        self.assertEqual(0, self.queue.get_executed_jobs_length())
        self.assertEqual((), tuple(self.queue.get_executed_jobs(0, 0)))
        self.assertEqual((), tuple(self.queue.get_executed_jobs(10, 2)))

    def test_remove_old_executed_jobs(self):
        self.queue.clear_executed_jobs()

        # Execute 19 jobs.
        for day in range(1, 20):
            job = Job('push', self.folder, 'user')
            date = datetime(year=2000, month=1, day=day, hour=12)
            job.executed_with_states({'date': date})
            self.queue.append_executed_job(job)

        self.assertEqual(19, self.queue.get_executed_jobs_length())

        # Remove old jobs (older than 2000-01-10, including the one from 2000-01-10).
        tenth = datetime(year=2000, month=1, day=10, hour=23)
        self.queue.remove_executed_jobs_older_than(tenth)
        self.assertEqual(9, self.queue.get_executed_jobs_length())

    def test_remove_jobs_with_filter(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        entry_to_delete = tuple(self.queue.get_executed_jobs())[2]
        self.queue.remove_jobs_by_filter(
            lambda *params: params == entry_to_delete)

        self.assertEqual(9, self.queue.get_executed_jobs_length())

        self.assertTrue(
            entry_to_delete not in tuple(self.queue.get_executed_jobs()))
示例#21
0
class ExecuteQueue(BrowserView):
    """Executes the Queue and sends all Jobs to the target
    realms.

    """

    def execute_single_job(self, job):
        """ Executes a single job without calling the view
        """
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        portal = self.context.portal_url.getPortalObject()
        self.config = IConfig(portal)
        self.queue = IQueue(portal)
        # remove job from queue
        if job in self.queue.getJobs():
            self.queue.removeJob(job)
        elif job in self.queue.get_executed_jobs():
            self.queue.remove_executed_job(job)
        # execute it
        self.executeJob(job)
        # move json file
        job.move_jsonfile_to(self.config.get_executed_folder())
        # add to executed list
        return self.queue.append_executed_job(job)

    def __call__(self):
        """
        Handles logging purposes and calls execute() method.
        """

        # get config and queue
        self.config = IConfig(self.context)
        portal = self.context.portal_url.getPortalObject()
        self.queue = IQueue(portal)
        event.notify(BeforeQueueExecutionEvent(portal, self.queue))
        # prepare logger
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        # is it allowed to publish?
        if not self.config.publishing_enabled():
            self.logger.warning('PUBLISHING IS DISABLED')
            return 'PUBLISHING IS DISABLED'

        if self.config.locking_enabled():
            self.logger.info('LOCKING IS ENABLED')
        else:
            self.logger.info('LOCKING IS DISABLED')

        # lock - check for locking flag
        if self.config.locking_enabled() and not self.get_lock_object().acquire(0):
            self.logger.warning('Already publishing')
            return 'Already publishing'

        # register our own logging handler for returning logs afterwards
        logStream = StringIO()
        logHandler = logging.StreamHandler(logStream)
        self.logger.addHandler(logHandler)
        # be sure to remove the handler!
        try:
            # execute queue
            self.execute()
        except:
            self.logger.removeHandler(logHandler)
            if self.config.locking_enabled(): self.get_lock_object().release()
            # re-raise exception
            raise
        # get logs
        self.logger.removeHandler(logHandler)
        logStream.seek(0)
        log = logStream.read()
        del logStream
        del logHandler

        # unlock
        if self.config.locking_enabled(): self.get_lock_object().release()

        event.notify(QueueExecutedEvent(portal, log))
        return log

    def get_lock_object(self):
        if getattr(self.__class__, '_lock', None) == None:
            self.__class__._lock = RLock()
        return self.__class__._lock

    def getActiveRealms(self):
        """
        @return: a list of active Realms
        @rtype: list
        """
        if '_activeRealms' not in dir(self):
            self._activeRealms = [r for r in self.config.getRealms()
                                  if r.active]
        return self._activeRealms

    def execute(self):
        """
        Executes the jobs from the queue.
        @return: None
        """

        jobs = self.queue.countJobs()

        self.queue.move_to_worker_queue()

        self.logger.info('Executing Queue: %i of %i objects to %i realms' % (
            jobs,
            self.queue.countJobs(),
            len(self.getActiveRealms()),
            ))

        while len(self.queue.get_worker_queue()):
            job = self.queue.popJob()

            if not job.json_file_exists():
                continue
            try:
                # execute job
                self.executeJob(job)
            except (ConflictError, Retry):
                raise
            except URLError:
                raise
            except ReceiverTimeoutError:
                raise
            except:
                # print the exception to the publisher error log
                exc = ''.join(traceback.format_exception(*sys.exc_info()))
                self.error_logger.error(exc)
                job.executed_exception = exc
            job.move_jsonfile_to(self.config.get_executed_folder())
            self.queue.append_executed_job(job)
            transaction.commit()

    def executeJob(self, job):
        """
        Executes a Job: sends the job to all available realms.
        @param job:     Job object to execute
        @type job:      Job
        """
        objTitle = job.objectTitle
        if isinstance(objTitle, unicode):
            objTitle = objTitle.encode('utf8')
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted(job.objectPath):
            self.logger.error('blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                    ))
            self.error_logger.error(
                'blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                    ))
            return False

        # get data from chache file
        state = None
        json = job.getData()
        self.logger.info('-' * 100)
        self.logger.info('executing "%s" on "%s" (at %s | UID %s)' % (
                job.action,
                objTitle,
                job.objectPath,
                job.objectUID,
                ))
        self.logger.info('... request data length: %i' % len(json))
        state_entries = {'date': datetime.now()}
        for realm in self.getActiveRealms():
            self.logger.info('... to realm %s' % (
                    realm.url,
                    ))
            # send data to each realm
            state = sendJsonToRealm(json, realm, 'publisher.receive')
            if isinstance(state, states.ErrorState):
                self.logger.error('... got result: %s' % state.toString())
                self.error_logger.error(
                    'executing "%s" on "%s" (at %s | UID %s)' % (
                        job.action,
                        objTitle,
                        job.objectPath,
                        job.objectUID,
                        ))
                self.error_logger.error('... got result: %s' %
                                        state.toString())
            else:
                self.logger.info('... got result: %s' % state.toString())
            state_entries[realm] = state
        job.executed_with_states(state_entries)

        # fire AfterPushEvent
        reference_catalog = getToolByName(self.context, 'reference_catalog')
        obj = reference_catalog.lookupObject(job.objectUID)
        if state is not None:
            event.notify(AfterPushEvent(obj, state, job))
示例#22
0
class TestEventhandler(MockTestCase):

    layer = MONITOR_FUNCTIONAL_TESTING

    def setUp(self):
        super(TestEventhandler, self).setUp()

        self.portal = self.layer['portal']
        setRoles(self.portal, TEST_USER_ID, ['Manager'])
        login(self.portal, TEST_USER_NAME)

        self.folder = self.portal.get(self.portal.invokeFactory(
                'Folder', 'mailing-test', title='Mailing Test Folder'))
        self.queue = IQueue(self.portal)

        mtool = getToolByName(self.portal, 'portal_membership')
        self.user = mtool.getMemberById(TEST_USER_ID)

        self.config = IMonitorConfigurationSchema(self.portal)
        self.config.set_enabled(True)
        self.config.set_receivers_plain('*****@*****.**')

        self.notifier_class = self.stub_interface(IMonitorNotifier)
        self.notifier = self.mock_interface(IMonitorNotifier)
        self.expect(self.notifier_class(self.portal)).result(
            self.notifier)

        provideAdapter(factory=self.notifier_class,
                       provides=IMonitorNotifier,
                       adapts=(IPloneSiteRoot,))

    def tearDown(self):
        sm = getGlobalSiteManager()
        sm.unregisterAdapter(factory=self.notifier_class,
                             provided=IMonitorNotifier,
                             required=(IPloneSiteRoot,))
        super(TestEventhandler, self).tearDown()

    def stub_current_queue_length(self, amount_of_jobs):
        while self.queue.countJobs() > 0:
            self.queue.popJob()

        for _i in range(amount_of_jobs):
            # Remove acquisition wrapper from "self.user" in order to
            # prevent the following error:
            #   TypeError: Can't pickle objects in acquisition wrappers.
            self.queue.createJob('push', self.folder, aq_base(self.user))

    def test_eventhandler_calls_notifier(self):
        self.config.set_threshold(2)
        self.stub_current_queue_length(3)
        event = self.create_dummy(queue=self.queue)

        self.expect(self.notifier(ANY, ANY))
        self.replay()

        invoke_notification(self.portal, event)

    def test_adapter_called_after_queue_execution(self):
        self.config.set_threshold(2)
        self.stub_current_queue_length(3)

        self.expect(self.notifier(ANY, ANY))
        self.replay()

        self.portal.unrestrictedTraverse('@@publisher.executeQueue')()

    def test_adapter_not_called_when_monitoring_disabled(self):
        self.config.set_threshold(2)
        self.stub_current_queue_length(3)

        self.config.set_enabled(False)

        self.expect(self.notifier(ANY, ANY)).count(0)
        self.replay()

        self.portal.unrestrictedTraverse('@@publisher.executeQueue')()

    def test_adapter_not_called_when_threshold_not_reached(self):
        self.config.set_threshold(10)
        self.stub_current_queue_length(1)

        self.expect(self.notifier(ARGS, KWARGS)).count(0)
        self.replay()

        self.portal.unrestrictedTraverse('@@publisher.executeQueue')()

    def test_adapter_is_called_even_when_publishing_disabled(self):
        self.config.set_threshold(2)
        self.stub_current_queue_length(3)

        config = IConfig(self.portal)
        config.set_publishing_enabled(False)

        self.expect(self.notifier(ARGS, KWARGS))
        self.replay()

        self.portal.unrestrictedTraverse('@@publisher.executeQueue')()