コード例 #1
0
class TestStorage(FunctionalTestCase):

    def setUp(self):
        super(TestStorage, self).setUp()
        self.grant('Manager')

        self.folder = create(Builder('folder').titled(u'Foo'))
        self.queue = IQueue(self.portal)

    def test_queue_has_no_jobs_by_default(self):
        self.assertEqual(0, self.queue.countJobs())

    def test_queue_has_no_executed_jobs_by_default(self):
        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_adding_a_job_to_the_queue(self):
        self.queue.createJob('push', self.folder, 'user')

        # The queue contains one job now.
        self.assertEqual(1, self.queue.countJobs())

        # Make sure the job is there.
        self.assertEqual(
            ['push'],
            [job.action for job in self.queue.getJobs()]
        )

    def test_queue_after_publishing(self):
        """
        Simulate an entire publishing cycle.
        """
        self.queue.createJob('push', self.folder, 'user')

        self.queue.move_to_worker_queue()
        job = self.queue.popJob()
        self.assertEqual('push', job.action)

        key = self.queue.append_executed_job(job)
        self.assertEqual(1, key)

        self.assertEqual(0, self.queue.countJobs())
        self.assertEqual(1, self.queue.get_executed_jobs_length())

        executed_job = list(self.queue.get_executed_jobs())[0]
        self.assertEqual(key, executed_job[0])
        self.assertEqual('push', executed_job[1].action)

        self.queue.remove_executed_job(key)
        self.assertEqual([], list(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_clear_executed_jobs(self):
        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.queue.clear_executed_jobs()

        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_get_executed_job_by_key(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(
            'obj 5',
            self.queue.get_executed_job_by_key(6)
        )
        with self.assertRaises(KeyError):
            self.queue.get_executed_job_by_key(1000)

    def test_get_batch_of_executed_jobs(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
             self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.assertEqual(
            [(1, 'obj 0'), (2, 'obj 1')],
            list(self.queue.get_executed_jobs(start=0, end=2))
        )
        self.assertEqual(
            [(5, 'obj 4')],
            list(self.queue.get_executed_jobs(start=4, end=5))
        )
        with self.assertRaises(ValueError):
            list(self.queue.get_executed_jobs(start=1000, end=1001))

    def test_get_batch_of_executed_jobs_on_empty_storage(self):
        self.queue.clear_executed_jobs()

        self.assertEqual(0, self.queue.get_executed_jobs_length())
        self.assertEqual((), tuple(self.queue.get_executed_jobs(0, 0)))
        self.assertEqual((), tuple(self.queue.get_executed_jobs(10, 2)))

    def test_remove_old_executed_jobs(self):
        self.queue.clear_executed_jobs()

        # Execute 19 jobs.
        for day in range(1, 20):
            job = Job('push', self.folder, 'user')
            date = datetime(year=2000, month=1, day=day, hour=12)
            job.executed_with_states({'date': date})
            self.queue.append_executed_job(job)

        self.assertEqual(19, self.queue.get_executed_jobs_length())

        # Remove old jobs (older than 2000-01-10, including the one from 2000-01-10).
        tenth = datetime(year=2000, month=1, day=10, hour=23)
        self.queue.remove_executed_jobs_older_than(tenth)
        self.assertEqual(9, self.queue.get_executed_jobs_length())

    def test_remove_jobs_with_filter(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        entry_to_delete = tuple(self.queue.get_executed_jobs())[2]
        self.queue.remove_jobs_by_filter(lambda *params: params == entry_to_delete)

        self.assertEqual(9, self.queue.get_executed_jobs_length())

        self.assertTrue(entry_to_delete not in tuple(self.queue.get_executed_jobs()))
コード例 #2
0
class ExecuteQueue(BrowserView):
    """Executes the Queue and sends all Jobs to the target
    realms.

    """

    def execute_single_job(self, job):
        """ Executes a single job without calling the view
        """
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        portal = self.context.portal_url.getPortalObject()
        self.config = IConfig(portal)
        self.queue = IQueue(portal)
        # remove job from queue
        if job in self.queue.getJobs():
            self.queue.removeJob(job)
        elif job in self.queue.get_executed_jobs():
            self.queue.remove_executed_job(job)
        # execute it
        self.executeJob(job)
        # move json file
        job.move_jsonfile_to(self.config.get_executed_folder())
        # add to executed list
        return self.queue.append_executed_job(job)

    def __call__(self):
        """
        Handles logging purposes and calls execute() method.
        """

        # get config and queue
        self.config = IConfig(self.context)
        portal = self.context.portal_url.getPortalObject()
        self.queue = IQueue(portal)
        event.notify(BeforeQueueExecutionEvent(portal, self.queue))
        # prepare logger
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        # is it allowed to publish?
        if not self.config.publishing_enabled():
            self.logger.warning('PUBLISHING IS DISABLED')
            return 'PUBLISHING IS DISABLED'

        if self.config.locking_enabled():
            self.logger.info('LOCKING IS ENABLED')
        else:
            self.logger.info('LOCKING IS DISABLED')

        # lock - check for locking flag
        if self.config.locking_enabled() and not self.get_lock_object().acquire(0):
            self.logger.warning('Already publishing')
            return 'Already publishing'

        # register our own logging handler for returning logs afterwards
        logStream = StringIO()
        logHandler = logging.StreamHandler(logStream)
        self.logger.addHandler(logHandler)
        # be sure to remove the handler!
        try:
            # execute queue
            self.execute()
        except:
            self.logger.removeHandler(logHandler)
            if self.config.locking_enabled(): self.get_lock_object().release()
            # re-raise exception
            raise
        # get logs
        self.logger.removeHandler(logHandler)
        logStream.seek(0)
        log = logStream.read()
        del logStream
        del logHandler

        # unlock
        if self.config.locking_enabled(): self.get_lock_object().release()

        event.notify(QueueExecutedEvent(portal, log))
        return log

    def get_lock_object(self):
        if getattr(self.__class__, '_lock', None) == None:
            self.__class__._lock = RLock()
        return self.__class__._lock

    def getActiveRealms(self):
        """
        @return: a list of active Realms
        @rtype: list
        """
        if '_activeRealms' not in dir(self):
            self._activeRealms = [r for r in self.config.getRealms()
                                  if r.active]
        return self._activeRealms

    def execute(self):
        """
        Executes the jobs from the queue.
        @return: None
        """

        jobs = self.queue.countJobs()

        self.queue.move_to_worker_queue()

        self.logger.info('Executing Queue: %i of %i objects to %i realms' % (
            jobs,
            self.queue.countJobs(),
            len(self.getActiveRealms()),
            ))

        while len(self.queue.get_worker_queue()):
            job = self.queue.popJob()

            if not job.json_file_exists():
                continue
            try:
                # execute job
                self.executeJob(job)
            except (ConflictError, Retry):
                raise
            except URLError:
                raise
            except ReceiverTimeoutError:
                raise
            except:
                # print the exception to the publisher error log
                exc = ''.join(traceback.format_exception(*sys.exc_info()))
                self.error_logger.error(exc)
                job.executed_exception = exc
            job.move_jsonfile_to(self.config.get_executed_folder())
            self.queue.append_executed_job(job)
            transaction.commit()

    def executeJob(self, job):
        """
        Executes a Job: sends the job to all available realms.
        @param job:     Job object to execute
        @type job:      Job
        """
        objTitle = job.objectTitle
        if isinstance(objTitle, unicode):
            objTitle = objTitle.encode('utf8')
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted(job.objectPath):
            self.logger.error('blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                    ))
            self.error_logger.error(
                'blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                    ))
            return False

        # get data from chache file
        state = None
        json = job.getData()
        self.logger.info('-' * 100)
        self.logger.info('executing "%s" on "%s" (at %s | UID %s)' % (
                job.action,
                objTitle,
                job.objectPath,
                job.objectUID,
                ))
        self.logger.info('... request data length: %i' % len(json))
        state_entries = {'date': datetime.now()}
        for realm in self.getActiveRealms():
            self.logger.info('... to realm %s' % (
                    realm.url,
                    ))
            # send data to each realm
            state = sendJsonToRealm(json, realm, 'publisher.receive')
            if isinstance(state, states.ErrorState):
                self.logger.error('... got result: %s' % state.toString())
                self.error_logger.error(
                    'executing "%s" on "%s" (at %s | UID %s)' % (
                        job.action,
                        objTitle,
                        job.objectPath,
                        job.objectUID,
                        ))
                self.error_logger.error('... got result: %s' %
                                        state.toString())
            else:
                self.logger.info('... got result: %s' % state.toString())
            state_entries[realm] = state
        job.executed_with_states(state_entries)

        # fire AfterPushEvent
        reference_catalog = getToolByName(self.context, 'reference_catalog')
        obj = reference_catalog.lookupObject(job.objectUID)
        if state is not None:
            event.notify(AfterPushEvent(obj, state, job))
コード例 #3
0
class TestStorage(FunctionalTestCase):
    def setUp(self):
        super(TestStorage, self).setUp()
        self.grant('Manager')

        self.folder = create(Builder('folder').titled(u'Foo'))
        self.queue = IQueue(self.portal)

    def test_queue_has_no_jobs_by_default(self):
        self.assertEqual(0, self.queue.countJobs())

    def test_queue_has_no_executed_jobs_by_default(self):
        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_adding_a_job_to_the_queue(self):
        self.queue.createJob('push', self.folder, 'user')

        # The queue contains one job now.
        self.assertEqual(1, self.queue.countJobs())

        # Make sure the job is there.
        self.assertEqual(['push'],
                         [job.action for job in self.queue.getJobs()])

    def test_queue_after_publishing(self):
        """
        Simulate an entire publishing cycle.
        """
        self.queue.createJob('push', self.folder, 'user')

        self.queue.move_to_worker_queue()
        job = self.queue.popJob()
        self.assertEqual('push', job.action)

        key = self.queue.append_executed_job(job)
        self.assertEqual(1, key)

        self.assertEqual(0, self.queue.countJobs())
        self.assertEqual(1, self.queue.get_executed_jobs_length())

        executed_job = list(self.queue.get_executed_jobs())[0]
        self.assertEqual(key, executed_job[0])
        self.assertEqual('push', executed_job[1].action)

        self.queue.remove_executed_job(key)
        self.assertEqual([], list(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_clear_executed_jobs(self):
        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.queue.clear_executed_jobs()

        self.assertEqual((), tuple(self.queue.get_executed_jobs()))
        self.assertEqual(0, self.queue.get_executed_jobs_length())

    def test_get_executed_job_by_key(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual('obj 5', self.queue.get_executed_job_by_key(6))
        with self.assertRaises(KeyError):
            self.queue.get_executed_job_by_key(1000)

    def test_get_batch_of_executed_jobs(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job('obj %i' % i)

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        self.assertEqual([(1, 'obj 0'), (2, 'obj 1')],
                         list(self.queue.get_executed_jobs(start=0, end=2)))
        self.assertEqual([(5, 'obj 4')],
                         list(self.queue.get_executed_jobs(start=4, end=5)))
        with self.assertRaises(ValueError):
            list(self.queue.get_executed_jobs(start=1000, end=1001))

    def test_get_batch_of_executed_jobs_on_empty_storage(self):
        self.queue.clear_executed_jobs()

        self.assertEqual(0, self.queue.get_executed_jobs_length())
        self.assertEqual((), tuple(self.queue.get_executed_jobs(0, 0)))
        self.assertEqual((), tuple(self.queue.get_executed_jobs(10, 2)))

    def test_remove_old_executed_jobs(self):
        self.queue.clear_executed_jobs()

        # Execute 19 jobs.
        for day in range(1, 20):
            job = Job('push', self.folder, 'user')
            date = datetime(year=2000, month=1, day=day, hour=12)
            job.executed_with_states({'date': date})
            self.queue.append_executed_job(job)

        self.assertEqual(19, self.queue.get_executed_jobs_length())

        # Remove old jobs (older than 2000-01-10, including the one from 2000-01-10).
        tenth = datetime(year=2000, month=1, day=10, hour=23)
        self.queue.remove_executed_jobs_older_than(tenth)
        self.assertEqual(9, self.queue.get_executed_jobs_length())

    def test_remove_jobs_with_filter(self):
        self.queue.clear_executed_jobs()

        for i in range(10):
            self.queue.append_executed_job(Job('push', self.folder, 'user'))

        self.assertEqual(10, self.queue.get_executed_jobs_length())

        entry_to_delete = tuple(self.queue.get_executed_jobs())[2]
        self.queue.remove_jobs_by_filter(
            lambda *params: params == entry_to_delete)

        self.assertEqual(9, self.queue.get_executed_jobs_length())

        self.assertTrue(
            entry_to_delete not in tuple(self.queue.get_executed_jobs()))
コード例 #4
0
class ExecuteQueue(BrowserView):
    """Executes the Queue and sends all Jobs to the target
    realms.

    """
    def execute_single_job(self, job):
        """ Executes a single job without calling the view
        """
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        portal = self.context.portal_url.getPortalObject()
        self.config = IConfig(portal)
        self.queue = IQueue(portal)
        # remove job from queue
        if job in self.queue.getJobs():
            self.queue.removeJob(job)
        elif job in self.queue.get_executed_jobs():
            self.queue.remove_executed_job(job)
        # execute it
        self.executeJob(job)
        # move json file
        job.move_jsonfile_to(self.config.get_executed_folder())
        # add to executed list
        return self.queue.append_executed_job(job)

    def __call__(self):
        """
        Handles logging purposes and calls execute() method.
        """

        # get config and queue
        self.config = IConfig(self.context)
        portal = self.context.portal_url.getPortalObject()
        self.queue = IQueue(portal)
        event.notify(BeforeQueueExecutionEvent(portal, self.queue))
        # prepare logger
        self.logger = getLogger()
        self.error_logger = getErrorLogger()
        # is it allowed to publish?
        if not self.config.publishing_enabled():
            self.logger.warning('PUBLISHING IS DISABLED')
            return 'PUBLISHING IS DISABLED'

        if self.config.locking_enabled():
            self.logger.info('LOCKING IS ENABLED')
        else:
            self.logger.info('LOCKING IS DISABLED')

        # lock - check for locking flag
        if self.config.locking_enabled(
        ) and not self.get_lock_object().acquire(0):
            self.logger.warning('Already publishing')
            return 'Already publishing'

        # register our own logging handler for returning logs afterwards
        logStream = StringIO()
        logHandler = logging.StreamHandler(logStream)
        self.logger.addHandler(logHandler)
        # be sure to remove the handler!
        try:
            # execute queue
            self.execute()
        except Exception:
            self.logger.removeHandler(logHandler)
            if self.config.locking_enabled():
                self.get_lock_object().release()
            # re-raise exception
            raise
        # get logs
        self.logger.removeHandler(logHandler)
        logStream.seek(0)
        log = logStream.read()
        del logStream
        del logHandler

        # unlock
        if self.config.locking_enabled():
            self.get_lock_object().release()

        event.notify(QueueExecutedEvent(portal, log))
        return log

    def get_lock_object(self):
        if getattr(self.__class__, '_lock', None) is None:
            self.__class__._lock = RLock()
        return self.__class__._lock

    def getActiveRealms(self):
        """
        @return: a list of active Realms
        @rtype: list
        """
        if '_activeRealms' not in dir(self):
            self._activeRealms = [
                r for r in self.config.getRealms() if r.active
            ]
        return self._activeRealms

    def execute(self):
        """
        Executes the jobs from the queue.
        @return: None
        """

        jobs = self.queue.countJobs()

        self.queue.move_to_worker_queue()

        self.logger.info('Executing Queue: %i of %i objects to %i realms' % (
            jobs,
            self.queue.countJobs(),
            len(self.getActiveRealms()),
        ))

        while len(self.queue.get_worker_queue()):
            job = self.queue.popJob()

            if not job.json_file_exists():
                continue
            try:
                # execute job
                self.executeJob(job)
            except (ConflictError, Retry):
                raise
            except URLError:
                raise
            except ReceiverTimeoutError:
                raise
            except Exception:
                # print the exception to the publisher error log
                exc = ''.join(traceback.format_exception(*sys.exc_info()))
                self.error_logger.error(exc)
                job.executed_exception = exc
            job.move_jsonfile_to(self.config.get_executed_folder())
            self.queue.append_executed_job(job)
            transaction.commit()

    def executeJob(self, job):
        """
        Executes a Job: sends the job to all available realms.
        @param job:     Job object to execute
        @type job:      Job
        """
        objTitle = job.objectTitle
        if isinstance(objTitle, unicode):
            objTitle = objTitle.encode('utf8')
        # is the object blacklisted?
        if IPathBlacklist(self.context).is_blacklisted(job.objectPath):
            self.logger.error('blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                job.action,
                objTitle,
                job.objectPath,
                job.objectUID,
            ))
            self.error_logger.error(
                'blacklisted: "%s" on "%s" (at %s | UID %s)' % (
                    job.action,
                    objTitle,
                    job.objectPath,
                    job.objectUID,
                ))
            return False

        # get data from chache file
        state = None
        json = job.getData()
        self.logger.info('-' * 100)
        self.logger.info('executing "%s" on "%s" (at %s | UID %s)' % (
            job.action,
            objTitle,
            job.objectPath,
            job.objectUID,
        ))
        self.logger.info('... request data length: %i' % len(json))
        state_entries = {'date': datetime.now()}
        for realm in self.getActiveRealms():
            self.logger.info('... to realm %s' % (realm.url, ))
            # send data to each realm
            state = sendJsonToRealm(json, realm, 'publisher.receive')
            if isinstance(state, states.ErrorState):
                self.logger.error('... got result: %s' % state.toString())
                self.error_logger.error(
                    'executing "%s" on "%s" (at %s | UID %s)' % (
                        job.action,
                        objTitle,
                        job.objectPath,
                        job.objectUID,
                    ))
                self.error_logger.error('... got result: %s' %
                                        state.toString())
            else:
                self.logger.info('... got result: %s' % state.toString())
            state_entries[realm] = state
        job.executed_with_states(state_entries)

        # fire AfterPushEvent
        obj = uuidToObject(job.objectUID)
        if state is not None:
            event.notify(AfterPushEvent(obj, state, job))