Example #1
0
def upgrade_executed_jobs_storage(portal_setup):
    """The executed jobs storage has changed from PersistentList
    to IOBTree storage, so we need to migrate the storage.

    """
    portal = portal_setup.portal_url.getPortalObject()
    queue = IQueue(portal)
    annotations = IAnnotations(portal)

    if 'publisher-executed' not in annotations:
        # No data to migrate.
        return

    # get jobs directly from the annotations - accessing with
    # queue methods is not possible yet
    jobs = list(annotations.get('publisher-executed', []))

    # drop the current list
    del annotations['publisher-executed']

    # add every single job with the new methods
    for job in jobs:
        queue.append_executed_job(job)

    # check if it worked
    assert len(jobs) == queue.get_executed_jobs_length()
class TestEmailNotification(MockTestCase):

    layer = MAILREPORT_FUNCTIONAL_TESTING

    def setUp(self):
        super(TestEmailNotification, self).setUp()

        self.portal = self.layer['portal']
        setRoles(self.portal, TEST_USER_ID, ['Manager'])
        login(self.portal, TEST_USER_NAME)

        self.folder = self.portal.get(self.portal.invokeFactory(
                'Folder', 'mailing-test', title='Mailing Test Folder'))
        self.queue = IQueue(self.portal)
        self.realm = Realm(True, 'http://*****:*****@user.com')
        self.notifier_config.set_interval('hourly')

        # configure mail settings
        properties_tool = getToolByName(self.portal, 'portal_properties')
        properties_tool.email_from_name = 'Plone'
        properties_tool.email_from_address = '*****@*****.**'

        # patch MailHost
        self.mail_host = self.stub()
        self.mock_tool(self.mail_host, 'MailHost')
        self.mails = []
        self.expect(self.mail_host.send(ARGS, KWARGS)).call(
            lambda *args, **kwargs: self.mails.append((args, kwargs)))
        self.expect(self.mail_host.secureSend(ARGS, KWARGS)).call(
            lambda *args, **kwargs: self.mails.append((args, kwargs)))

        # mock datetime.now
        self.now = datetime.datetime(2010, 1, 2, 3, 4, 5)

        self._ori_datetime = datetime.datetime
        dt = self.mocker.proxy(datetime.datetime, count=False)
        self.expect(dt.now()).call(lambda: self.now).count(0, None)
        datetime.datetime = dt

        self.replay()

    def tearDown(self):
        datetime.datetime = self._ori_datetime
        setRoles(self.portal, TEST_USER_ID, ['Member'])

        super(TestEmailNotification, self).tearDown()

    def suppose_job_was_executed(self, successful=False, error=False,
                                 warning=False):
        """ Adds a job to the "executed" list
        """

        if error:
            response = UIDPathMismatchError()
        elif warning:
            response = ObjectNotFoundForMovingWarning()
        elif successful:
            response = ObjectUpdatedState()
        else:
            raise TypeError('suppose_job_was_executed expects one '
                            'positive keyword argument')

        job = Job('push', self.folder, self.user)
        job.executed_with_states({
                'date': datetime.datetime.now(),
                self.realm: response})
        self.queue.append_executed_job(job)
        return job

    def set_time(self, hour, minute=None):
        if minute is None:
            minute = hour
        self.now = datetime.datetime(2010, 12, 27, hour, minute)

    def get_normalize_statistics_table_from_message(self, message):
        message = pq(str(message))
        statistics_table = message('table:first').html()
        # "normalize" whitspace
        statistics_table = re.sub('\s{1,}', ' ', statistics_table)
        statistics_table = statistics_table.replace('> <', '><')
        return statistics_table

    def test_report_sent_after_executing_queue(self):
        self.set_time(1)
        utils.set_last_notification_date_to_now()

        self.set_time(2)
        self.suppose_job_was_executed(successful=True)
        self.suppose_job_was_executed(successful=True)
        self.suppose_job_was_executed(error=True)
        self.suppose_job_was_executed(warning=True)
        self.suppose_job_was_executed(warning=True)
        self.suppose_job_was_executed(warning=True)

        self.set_time(3)
        self.assertTrue(utils.is_interval_expired())

        self.portal.restrictedTraverse('@@publisher.executeQueue')()

        self.assertEqual(len(self.mails), 1)
        args, kwargs = self.mails.pop()

        self.assertEqual(kwargs.get('mfrom'), '*****@*****.**')
        self.assertEqual(kwargs.get('mto'), '*****@*****.**')
        self.assertEqual(kwargs.get('subject'),
                         u'Publisher report: Plone site')

        statistics_table = self.get_normalize_statistics_table_from_message(
            args[0])

        self.assertIn('<tr><th>Successfull jobs:</th><td>2</td></tr>',
                      statistics_table)

        self.assertIn('<tr><th>Jobs with warning:</th><td>3</td></tr>',
                      statistics_table)

        self.assertIn('<tr><th>Jobs with errors:</th><td>1</td></tr>',
                      statistics_table)

        self.assertIn('<tr><th>Total executed jobs:</th><td>6</td></tr>',
                      statistics_table)

    def test_report_does_only_contain_new_jobs(self):
        self.set_time(1)
        self.suppose_job_was_executed(successful=True)
        self.suppose_job_was_executed(successful=True)

        self.set_time(2)
        utils.set_last_notification_date_to_now()

        self.set_time(3)
        self.suppose_job_was_executed(successful=True)

        self.set_time(4)
        self.assertTrue(utils.is_interval_expired())
        self.portal.restrictedTraverse('@@publisher.executeQueue')()

        self.assertEqual(len(self.mails), 1)
        args, kwargs = self.mails.pop()

        statistics_table = self.get_normalize_statistics_table_from_message(
            args[0])

        self.assertIn('<tr><th>Total executed jobs:</th><td>1</td></tr>',
                      statistics_table)

    def test_report_is_sent_to_each_receivers(self):
        self.notifier_config.set_receivers_plain('\n'.join((
                    '*****@*****.**',
                    '*****@*****.**')))

        self.set_time(1)
        self.suppose_job_was_executed(successful=True)

        self.set_time(2)
        utils.set_last_notification_date_to_now()

        self.set_time(3)
        self.assertTrue(utils.is_interval_expired())
        self.portal.restrictedTraverse('@@publisher.executeQueue')()

        self.assertEqual(len(self.mails), 2)

        # we pop it reversed, therfore we test in opposite order than
        # it is configured.
        args, kwargs = self.mails.pop()
        self.assertEqual(kwargs.get('mto'), '*****@*****.**')

        args, kwargs = self.mails.pop()
        self.assertEqual(kwargs.get('mto'), '*****@*****.**')
class TestStorage(FunctionalTestCase):

    def setUp(self):
        super(TestStorage, self).setUp()
        self.grant('Manager')

        self.folder = create(Builder('folder').titled(u'Foo'))
        self.queue = IQueue(self.portal)

    def test_queue_has_no_jobs_by_default(self):
        self.assertEqual(0, self.queue.countJobs())

    def test_queue_has_no_executed_jobs_by_default(self):
        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_adding_a_job_to_the_queue(self):
        self.queue.createJob('push', self.folder, 'user')

        # The queue contains one job now.
        self.assertEqual(1, self.queue.countJobs())

        # Make sure the job is there.
        self.assertEqual(
            ['push'],
            [job.action for job in self.queue.getJobs()]
        )

    def test_queue_after_publishing(self):
        """
        Simulate an entire publishing cycle.
        """
        self.queue.createJob('push', self.folder, 'user')

        self.queue.move_to_worker_queue()
        job = self.queue.popJob()
        self.assertEqual('push', job.action)

        key = self.queue.append_executed_job(job)
        self.assertEqual(1, key)

        self.assertEqual(0, self.queue.countJobs())
        self.assertEqual(1, self.queue.get_executed_jobs_length())

        executed_job = list(self.queue.get_executed_jobs())[0]
        self.assertEqual(key, executed_job[0])
        self.assertEqual('push', executed_job[1].action)

        self.queue.remove_executed_job(key)
        self.assertEqual([], list(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_clear_executed_jobs(self):
        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.queue.clear_executed_jobs()

        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_get_executed_job_by_key(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(
            'obj 5',
            self.queue.get_executed_job_by_key(6)
        )
        with self.assertRaises(KeyError):
            self.queue.get_executed_job_by_key(1000)

    def test_get_batch_of_executed_jobs(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
             self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.assertEqual(
            [(1, 'obj 0'), (2, 'obj 1')],
            list(self.queue.get_executed_jobs(start=0, end=2))
        )
        self.assertEqual(
            [(5, 'obj 4')],
            list(self.queue.get_executed_jobs(start=4, end=5))
        )
        with self.assertRaises(ValueError):
            list(self.queue.get_executed_jobs(start=1000, end=1001))

    def test_get_batch_of_executed_jobs_on_empty_storage(self):
        self.queue.clear_executed_jobs()

        self.assertEqual(0, self.queue.get_executed_jobs_length())
        self.assertEqual((), tuple(self.queue.get_executed_jobs(0, 0)))
        self.assertEqual((), tuple(self.queue.get_executed_jobs(10, 2)))

    def test_remove_old_executed_jobs(self):
        self.queue.clear_executed_jobs()

        # Execute 19 jobs.
        for day in range(1, 20):
            job = Job('push', self.folder, 'user')
            date = datetime(year=2000, month=1, day=day, hour=12)
            job.executed_with_states({'date': date})
            self.queue.append_executed_job(job)

        self.assertEqual(19, self.queue.get_executed_jobs_length())

        # Remove old jobs (older than 2000-01-10, including the one from 2000-01-10).
        tenth = datetime(year=2000, month=1, day=10, hour=23)
        self.queue.remove_executed_jobs_older_than(tenth)
        self.assertEqual(9, self.queue.get_executed_jobs_length())

    def test_remove_jobs_with_filter(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        entry_to_delete = tuple(self.queue.get_executed_jobs())[2]
        self.queue.remove_jobs_by_filter(lambda *params: params == entry_to_delete)

        self.assertEqual(9, self.queue.get_executed_jobs_length())

        self.assertTrue(entry_to_delete not in tuple(self.queue.get_executed_jobs()))
Example #4
0
class TestStorage(FunctionalTestCase):
    def setUp(self):
        super(TestStorage, self).setUp()
        self.grant('Manager')

        self.folder = create(Builder('folder').titled(u'Foo'))
        self.queue = IQueue(self.portal)

    def test_queue_has_no_jobs_by_default(self):
        self.assertEqual(0, self.queue.countJobs())

    def test_queue_has_no_executed_jobs_by_default(self):
        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_adding_a_job_to_the_queue(self):
        self.queue.createJob('push', self.folder, 'user')

        # The queue contains one job now.
        self.assertEqual(1, self.queue.countJobs())

        # Make sure the job is there.
        self.assertEqual(['push'],
                         [job.action for job in self.queue.getJobs()])

    def test_queue_after_publishing(self):
        """
        Simulate an entire publishing cycle.
        """
        self.queue.createJob('push', self.folder, 'user')

        self.queue.move_to_worker_queue()
        job = self.queue.popJob()
        self.assertEqual('push', job.action)

        key = self.queue.append_executed_job(job)
        self.assertEqual(1, key)

        self.assertEqual(0, self.queue.countJobs())
        self.assertEqual(1, self.queue.get_executed_jobs_length())

        executed_job = list(self.queue.get_executed_jobs())[0]
        self.assertEqual(key, executed_job[0])
        self.assertEqual('push', executed_job[1].action)

        self.queue.remove_executed_job(key)
        self.assertEqual([], list(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_clear_executed_jobs(self):
        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.queue.clear_executed_jobs()

        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_get_executed_job_by_key(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual('obj 5', self.queue.get_executed_job_by_key(6))
        with self.assertRaises(KeyError):
            self.queue.get_executed_job_by_key(1000)

    def test_get_batch_of_executed_jobs(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.assertEqual([(1, 'obj 0'), (2, 'obj 1')],
                         list(self.queue.get_executed_jobs(start=0, end=2)))
        self.assertEqual([(5, 'obj 4')],
                         list(self.queue.get_executed_jobs(start=4, end=5)))
        with self.assertRaises(ValueError):
            list(self.queue.get_executed_jobs(start=1000, end=1001))

    def test_get_batch_of_executed_jobs_on_empty_storage(self):
        self.queue.clear_executed_jobs()

        self.assertEqual(0, self.queue.get_executed_jobs_length())
        self.assertEqual((), tuple(self.queue.get_executed_jobs(0, 0)))
        self.assertEqual((), tuple(self.queue.get_executed_jobs(10, 2)))

    def test_remove_old_executed_jobs(self):
        self.queue.clear_executed_jobs()

        # Execute 19 jobs.
        for day in range(1, 20):
            job = Job('push', self.folder, 'user')
            date = datetime(year=2000, month=1, day=day, hour=12)
            job.executed_with_states({'date': date})
            self.queue.append_executed_job(job)

        self.assertEqual(19, self.queue.get_executed_jobs_length())

        # Remove old jobs (older than 2000-01-10, including the one from 2000-01-10).
        tenth = datetime(year=2000, month=1, day=10, hour=23)
        self.queue.remove_executed_jobs_older_than(tenth)
        self.assertEqual(9, self.queue.get_executed_jobs_length())

    def test_remove_jobs_with_filter(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        entry_to_delete = tuple(self.queue.get_executed_jobs())[2]
        self.queue.remove_jobs_by_filter(
            lambda *params: params == entry_to_delete)

        self.assertEqual(9, self.queue.get_executed_jobs_length())

        self.assertTrue(
            entry_to_delete not in tuple(self.queue.get_executed_jobs()))
Example #5
0
class ExecuteQueue(BrowserView):
    """Executes the Queue and sends all Jobs to the target
    realms.

    """

    def execute_single_job(self, job):
        """ Executes a single job without calling the view
        """
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        portal = self.context.portal_url.getPortalObject()
        self.config = IConfig(portal)
        self.queue = IQueue(portal)
        # remove job from queue
        if job in self.queue.getJobs():
            self.queue.removeJob(job)
        elif job in self.queue.get_executed_jobs():
            self.queue.remove_executed_job(job)
        # execute it
        self.executeJob(job)
        # move json file
        job.move_jsonfile_to(self.config.get_executed_folder())
        # add to executed list
        return self.queue.append_executed_job(job)

    def __call__(self):
        """
        Handles logging purposes and calls execute() method.
        """

        # get config and queue
        self.config = IConfig(self.context)
        portal = self.context.portal_url.getPortalObject()
        self.queue = IQueue(portal)
        event.notify(BeforeQueueExecutionEvent(portal, self.queue))
        # prepare logger
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        # is it allowed to publish?
        if not self.config.publishing_enabled():
            self.logger.warning('PUBLISHING IS DISABLED')
            return 'PUBLISHING IS DISABLED'

        if self.config.locking_enabled():
            self.logger.info('LOCKING IS ENABLED')
        else:
            self.logger.info('LOCKING IS DISABLED')

        # lock - check for locking flag
        if self.config.locking_enabled() and not self.get_lock_object().acquire(0):
            self.logger.warning('Already publishing')
            return 'Already publishing'

        # register our own logging handler for returning logs afterwards
        logStream = StringIO()
        logHandler = logging.StreamHandler(logStream)
        self.logger.addHandler(logHandler)
        # be sure to remove the handler!
        try:
            # execute queue
            self.execute()
        except:
            self.logger.removeHandler(logHandler)
            if self.config.locking_enabled(): self.get_lock_object().release()
            # re-raise exception
            raise
        # get logs
        self.logger.removeHandler(logHandler)
        logStream.seek(0)
        log = logStream.read()
        del logStream
        del logHandler

        # unlock
        if self.config.locking_enabled(): self.get_lock_object().release()

        event.notify(QueueExecutedEvent(portal, log))
        return log

    def get_lock_object(self):
        if getattr(self.__class__, '_lock', None) == None:
            self.__class__._lock = RLock()
        return self.__class__._lock

    def getActiveRealms(self):
        """
        @return: a list of active Realms
        @rtype: list
        """
        if '_activeRealms' not in dir(self):
            self._activeRealms = [r for r in self.config.getRealms()
                                  if r.active]
        return self._activeRealms

    def execute(self):
        """
        Executes the jobs from the queue.
        @return: None
        """

        jobs = self.queue.countJobs()

        self.queue.move_to_worker_queue()

        self.logger.info('Executing Queue: %i of %i objects to %i realms' % (
            jobs,
            self.queue.countJobs(),
            len(self.getActiveRealms()),
            ))

        while len(self.queue.get_worker_queue()):
            job = self.queue.popJob()

            if not job.json_file_exists():
                continue
            try:
                # execute job
                self.executeJob(job)
            except (ConflictError, Retry):
                raise
            except URLError:
                raise
            except ReceiverTimeoutError:
                raise
            except:
                # print the exception to the publisher error log
                exc = ''.join(traceback.format_exception(*sys.exc_info()))
                self.error_logger.error(exc)
                job.executed_exception = exc
            job.move_jsonfile_to(self.config.get_executed_folder())
            self.queue.append_executed_job(job)
            transaction.commit()

    def executeJob(self, job):
        """
        Executes a Job: sends the job to all available realms.
        @param job:     Job object to execute
        @type job:      Job
        """
        objTitle = job.objectTitle
        if isinstance(objTitle, unicode):
            objTitle = objTitle.encode('utf8')
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted(job.objectPath):
            self.logger.error('blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                    ))
            self.error_logger.error(
                'blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                    ))
            return False

        # get data from chache file
        state = None
        json = job.getData()
        self.logger.info('-' * 100)
        self.logger.info('executing "%s" on "%s" (at %s | UID %s)' % (
                job.action,
                objTitle,
                job.objectPath,
                job.objectUID,
                ))
        self.logger.info('... request data length: %i' % len(json))
        state_entries = {'date': datetime.now()}
        for realm in self.getActiveRealms():
            self.logger.info('... to realm %s' % (
                    realm.url,
                    ))
            # send data to each realm
            state = sendJsonToRealm(json, realm, 'publisher.receive')
            if isinstance(state, states.ErrorState):
                self.logger.error('... got result: %s' % state.toString())
                self.error_logger.error(
                    'executing "%s" on "%s" (at %s | UID %s)' % (
                        job.action,
                        objTitle,
                        job.objectPath,
                        job.objectUID,
                        ))
                self.error_logger.error('... got result: %s' %
                                        state.toString())
            else:
                self.logger.info('... got result: %s' % state.toString())
            state_entries[realm] = state
        job.executed_with_states(state_entries)

        # fire AfterPushEvent
        reference_catalog = getToolByName(self.context, 'reference_catalog')
        obj = reference_catalog.lookupObject(job.objectUID)
        if state is not None:
            event.notify(AfterPushEvent(obj, state, job))
Example #6
0
class ExecuteQueue(BrowserView):
    """Executes the Queue and sends all Jobs to the target
    realms.

    """
    def execute_single_job(self, job):
        """ Executes a single job without calling the view
        """
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        portal = self.context.portal_url.getPortalObject()
        self.config = IConfig(portal)
        self.queue = IQueue(portal)
        # remove job from queue
        if job in self.queue.getJobs():
            self.queue.removeJob(job)
        elif job in self.queue.get_executed_jobs():
            self.queue.remove_executed_job(job)
        # execute it
        self.executeJob(job)
        # move json file
        job.move_jsonfile_to(self.config.get_executed_folder())
        # add to executed list
        return self.queue.append_executed_job(job)

    def __call__(self):
        """
        Handles logging purposes and calls execute() method.
        """

        # get config and queue
        self.config = IConfig(self.context)
        portal = self.context.portal_url.getPortalObject()
        self.queue = IQueue(portal)
        event.notify(BeforeQueueExecutionEvent(portal, self.queue))
        # prepare logger
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        # is it allowed to publish?
        if not self.config.publishing_enabled():
            self.logger.warning('PUBLISHING IS DISABLED')
            return 'PUBLISHING IS DISABLED'

        if self.config.locking_enabled():
            self.logger.info('LOCKING IS ENABLED')
        else:
            self.logger.info('LOCKING IS DISABLED')

        # lock - check for locking flag
        if self.config.locking_enabled(
        ) and not self.get_lock_object().acquire(0):
            self.logger.warning('Already publishing')
            return 'Already publishing'

        # register our own logging handler for returning logs afterwards
        logStream = StringIO()
        logHandler = logging.StreamHandler(logStream)
        self.logger.addHandler(logHandler)
        # be sure to remove the handler!
        try:
            # execute queue
            self.execute()
        except Exception:
            self.logger.removeHandler(logHandler)
            if self.config.locking_enabled():
                self.get_lock_object().release()
            # re-raise exception
            raise
        # get logs
        self.logger.removeHandler(logHandler)
        logStream.seek(0)
        log = logStream.read()
        del logStream
        del logHandler

        # unlock
        if self.config.locking_enabled():
            self.get_lock_object().release()

        event.notify(QueueExecutedEvent(portal, log))
        return log

    def get_lock_object(self):
        if getattr(self.__class__, '_lock', None) is None:
            self.__class__._lock = RLock()
        return self.__class__._lock

    def getActiveRealms(self):
        """
        @return: a list of active Realms
        @rtype: list
        """
        if '_activeRealms' not in dir(self):
            self._activeRealms = [
                r for r in self.config.getRealms() if r.active
            ]
        return self._activeRealms

    def execute(self):
        """
        Executes the jobs from the queue.
        @return: None
        """

        jobs = self.queue.countJobs()

        self.queue.move_to_worker_queue()

        self.logger.info('Executing Queue: %i of %i objects to %i realms' % (
            jobs,
            self.queue.countJobs(),
            len(self.getActiveRealms()),
        ))

        while len(self.queue.get_worker_queue()):
            job = self.queue.popJob()

            if not job.json_file_exists():
                continue
            try:
                # execute job
                self.executeJob(job)
            except (ConflictError, Retry):
                raise
            except URLError:
                raise
            except ReceiverTimeoutError:
                raise
            except Exception:
                # print the exception to the publisher error log
                exc = ''.join(traceback.format_exception(*sys.exc_info()))
                self.error_logger.error(exc)
                job.executed_exception = exc
            job.move_jsonfile_to(self.config.get_executed_folder())
            self.queue.append_executed_job(job)
            transaction.commit()

    def executeJob(self, job):
        """
        Executes a Job: sends the job to all available realms.
        @param job:     Job object to execute
        @type job:      Job
        """
        objTitle = job.objectTitle
        if isinstance(objTitle, unicode):
            objTitle = objTitle.encode('utf8')
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted(job.objectPath):
            self.logger.error('blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                job.action,
                objTitle,
                job.objectPath,
                job.objectUID,
            ))
            self.error_logger.error(
                'blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                ))
            return False

        # get data from chache file
        state = None
        json = job.getData()
        self.logger.info('-' * 100)
        self.logger.info('executing "%s" on "%s" (at %s | UID %s)' % (
            job.action,
            objTitle,
            job.objectPath,
            job.objectUID,
        ))
        self.logger.info('... request data length: %i' % len(json))
        state_entries = {'date': datetime.now()}
        for realm in self.getActiveRealms():
            self.logger.info('... to realm %s' % (realm.url, ))
            # send data to each realm
            state = sendJsonToRealm(json, realm, 'publisher.receive')
            if isinstance(state, states.ErrorState):
                self.logger.error('... got result: %s' % state.toString())
                self.error_logger.error(
                    'executing "%s" on "%s" (at %s | UID %s)' % (
                        job.action,
                        objTitle,
                        job.objectPath,
                        job.objectUID,
                    ))
                self.error_logger.error('... got result: %s' %
                                        state.toString())
            else:
                self.logger.info('... got result: %s' % state.toString())
            state_entries[realm] = state
        job.executed_with_states(state_entries)

        # fire AfterPushEvent
        obj = uuidToObject(job.objectUID)
        if state is not None:
            event.notify(AfterPushEvent(obj, state, job))