def __call__(self, event, no_response=False, msg=None): if IPreventPublishing.providedBy(self.context): return 'prevented' if publisher_jobs_are_disabled(): return 'disabled' self.logger = getLogger() # is the object blacklisted? if IPathBlacklist(self.context).is_blacklisted(): self.logger.warning( 'Could not create move job for blacklisted object (%s at %s)' % (self.context.Title(), '/'.join( self.context.getPhysicalPath()))) if not no_response: return self.request.RESPONSE.redirect('./view') return False # This View should not be executed at the PloneSiteRoot if IPloneSiteRoot.providedBy(self.context): raise Exception('Not allowed on PloneSiteRoot') # get username user = self.context.portal_membership.getAuthenticatedMember() username = user.getUserName() # create Job portal = self.context.portal_url.getPortalObject() queue = IQueue(portal) additional_data = { 'move_data': { 'newName': event.newName, 'newParent': get_site_relative_path(event.newParent), 'newTitle': event.object.Title().decode('utf-8'), 'oldName': event.oldName, 'oldParent': get_site_relative_path(event.oldParent), } } queue.createJob('move', self.context, username, additional_data=additional_data) self.logger.debug('Created "%s" Job for "%s" at %s' % ( 'move', self.context.Title(), '/'.join(self.context.getPhysicalPath()), )) # status message if msg is None: msg = _(u'Object move/rename action has been added to the queue.') IStatusMessage(self.request).addStatusMessage(msg, type='info') if not no_response: return self.request.RESPONSE.redirect('./view')
def __call__(self, no_response=False, msg=None, *args, **kwargs): """ The __call__ method is used to execute the BrowserView. It creates and adds a "PUSH"-Job on the current context to the queue. @param args: list of unnamed arguments @type args: list @param kwargs: dict of named keyword-arguments @type kwargs: dict @return: Redirect to object`s default view """ if IPreventPublishing.providedBy(self.context): return 'prevented' if publisher_jobs_are_disabled(): return 'disabled' self.logger = getLogger() # is the object blacklisted? if IPathBlacklist(self.context).is_blacklisted(): self.logger.warning('Could not create push job for blacklisted '+\ 'object (%s at %s)' % ( self.context.Title(), '/'.join(self.context.getPhysicalPath()))) if not no_response: return self.request.RESPONSE.redirect('./view') return False # mle: now its possible to execite this view on plonesiteroot # This View should not be executed at the PloneSiteRoot #if IPloneSiteRoot.providedBy(self.context): # raise Exception('Not allowed on PloneSiteRoot') # get username user = self.context.portal_membership.getAuthenticatedMember() username = user.getUserName() # create Job portal = self.context.portal_url.getPortalObject() queue = IQueue(portal) queue.createJob('push', self.context, username) self.logger.debug('Created "%s" Job for "%s" at %s' % ( 'push', self.context.Title(), '/'.join(self.context.getPhysicalPath()), )) # status message if msg is None: msg = _(u'This object has been added to the queue.') IStatusMessage(self.request).addStatusMessage( msg, type='info' ) if not no_response: return self.request.RESPONSE.redirect('./view')
def __call__(self, no_response=False, msg=None, *args, **kwargs): """ Add the current context as delete-job to the queue, creates a status message to inform the user and returns to the default view. @param args: list of unnamed arguments @type args: list @param kwargs: dict of named keyword-arguments @type kwargs: dict @return: Redirect to object`s default view """ if IPreventPublishing.providedBy(self.context): return 'prevented' if publisher_jobs_are_disabled(): return 'disabled' self.logger = getLogger() # is the object blacklisted? if IPathBlacklist(self.context).is_blacklisted(): self.logger.warning('Could not create delete job for blacklisted ' 'object (%s at %s)' % ( self.context.Title(), '/'.join(self.context.getPhysicalPath()))) if not no_response: return self.request.RESPONSE.redirect('./view') return False # This view should not be executed at the PloneSiteRoot if IPloneSiteRoot.providedBy(self.context): raise Exception('Not allowed on PloneSiteRoot') # get username user = self.context.portal_membership.getAuthenticatedMember() username = user.getUserName() # create Job portal = self.context.portal_url.getPortalObject() queue = IQueue(portal) queue.createJob('delete', self.context, username) self.logger.debug('Created "%s" Job for "%s" at %s' % ( 'delete', self.context.Title(), '/'.join(self.context.getPhysicalPath()), )) # status message if msg is None: msg = _(u'This object will be deleted at the remote sites.') add_transaction_aware_status_message(self.request, msg, type='info') if not no_response: return self.request.RESPONSE.redirect('./view')
def __call__(self, no_response=False, msg=None, recursive=True): if IPreventPublishing.providedBy(self.context): return 'prevented' if publisher_jobs_are_disabled(): return 'disabled' self.logger = getLogger() # is the object blacklisted? if IPathBlacklist(self.context).is_blacklisted(): self.logger.warning( 'Could not create push job for blacklisted object (%s at %s)' % (self.context.Title(), '/'.join( self.context.getPhysicalPath()))) if not no_response: return self.request.RESPONSE.redirect('./view') return False event.notify(BeforePublishEvent(self.context)) # mle: now its possible to execite this view on plonesiteroot # This View should not be executed at the PloneSiteRoot # if IPloneSiteRoot.providedBy(self.context): # raise Exception('Not allowed on PloneSiteRoot') # get username user = self.context.portal_membership.getAuthenticatedMember() username = user.getUserName() # create Job portal = self.context.portal_url.getPortalObject() queue = IQueue(portal) queue.createJob('push', self.context, username) self.logger.debug('Created "%s" Job for "%s" at %s' % ( 'push', self.context.Title(), '/'.join(self.context.getPhysicalPath()), )) if recursive and base_hasattr(self.context, 'contentValues'): # Use contentValues for implicit ftw.trash compatibility. for obj in filter(belongs_to_parent, self.context.contentValues()): obj.restrictedTraverse('@@publisher.publish')(no_response=True, msg=msg) # status message if msg is None: msg = _(u'This object has been added to the queue.') IStatusMessage(self.request).addStatusMessage(msg, type='info') if not no_response: return self.request.RESPONSE.redirect('./view')
def __call__(self, no_response=False, msg=None): if IPreventPublishing.providedBy(self.context): return 'prevented' if publisher_jobs_are_disabled(): return 'disabled' self.logger = getLogger() # is the object blacklisted? if IPathBlacklist(self.context).is_blacklisted(): self.logger.warning('Could not create delete job for blacklisted ' 'object (%s at %s)' % (self.context.Title(), '/'.join( self.context.getPhysicalPath()))) if not no_response: return self.request.RESPONSE.redirect('./view') return False # This view should not be executed at the PloneSiteRoot if IPloneSiteRoot.providedBy(self.context): raise Exception('Not allowed on PloneSiteRoot') # get username user = self.context.portal_membership.getAuthenticatedMember() username = user.getUserName() # create Job portal = self.context.portal_url.getPortalObject() queue = IQueue(portal) queue.createJob('delete', self.context, username) self.logger.debug('Created "%s" Job for "%s" at %s' % ( 'delete', self.context.Title(), '/'.join(self.context.getPhysicalPath()), )) # status message if msg is None: msg = _(u'This object will be deleted at the remote sites.') add_transaction_aware_status_message(self.request, msg, type='info') if not no_response: return self.request.RESPONSE.redirect('./view')
class TestStorage(FunctionalTestCase): def setUp(self): super(TestStorage, self).setUp() self.grant('Manager') self.folder = create(Builder('folder').titled(u'Foo')) self.queue = IQueue(self.portal) def test_queue_has_no_jobs_by_default(self): self.assertEqual(0, self.queue.countJobs()) def test_queue_has_no_executed_jobs_by_default(self): self.assertEqual((), tuple(self.queue.get_executed_jobs())) self.assertEqual(0, self.queue.get_executed_jobs_length()) def test_adding_a_job_to_the_queue(self): self.queue.createJob('push', self.folder, 'user') # The queue contains one job now. self.assertEqual(1, self.queue.countJobs()) # Make sure the job is there. self.assertEqual( ['push'], [job.action for job in self.queue.getJobs()] ) def test_queue_after_publishing(self): """ Simulate an entire publishing cycle. """ self.queue.createJob('push', self.folder, 'user') self.queue.move_to_worker_queue() job = self.queue.popJob() self.assertEqual('push', job.action) key = self.queue.append_executed_job(job) self.assertEqual(1, key) self.assertEqual(0, self.queue.countJobs()) self.assertEqual(1, self.queue.get_executed_jobs_length()) executed_job = list(self.queue.get_executed_jobs())[0] self.assertEqual(key, executed_job[0]) self.assertEqual('push', executed_job[1].action) self.queue.remove_executed_job(key) self.assertEqual([], list(self.queue.get_executed_jobs())) self.assertEqual(0, self.queue.get_executed_jobs_length()) def test_clear_executed_jobs(self): for i in range(10): self.queue.append_executed_job(Job('push', self.folder, 'user')) self.assertEqual(10, self.queue.get_executed_jobs_length()) self.queue.clear_executed_jobs() self.assertEqual((), tuple(self.queue.get_executed_jobs())) self.assertEqual(0, self.queue.get_executed_jobs_length()) def test_get_executed_job_by_key(self): self.queue.clear_executed_jobs() for i in range(10): self.queue.append_executed_job('obj %i' % i) self.assertEqual( 'obj 5', self.queue.get_executed_job_by_key(6) ) with self.assertRaises(KeyError): self.queue.get_executed_job_by_key(1000) def test_get_batch_of_executed_jobs(self): self.queue.clear_executed_jobs() for i in range(10): self.queue.append_executed_job('obj %i' % i) self.assertEqual(10, self.queue.get_executed_jobs_length()) self.assertEqual( [(1, 'obj 0'), (2, 'obj 1')], list(self.queue.get_executed_jobs(start=0, end=2)) ) self.assertEqual( [(5, 'obj 4')], list(self.queue.get_executed_jobs(start=4, end=5)) ) with self.assertRaises(ValueError): list(self.queue.get_executed_jobs(start=1000, end=1001)) def test_get_batch_of_executed_jobs_on_empty_storage(self): self.queue.clear_executed_jobs() self.assertEqual(0, self.queue.get_executed_jobs_length()) self.assertEqual((), tuple(self.queue.get_executed_jobs(0, 0))) self.assertEqual((), tuple(self.queue.get_executed_jobs(10, 2))) def test_remove_old_executed_jobs(self): self.queue.clear_executed_jobs() # Execute 19 jobs. for day in range(1, 20): job = Job('push', self.folder, 'user') date = datetime(year=2000, month=1, day=day, hour=12) job.executed_with_states({'date': date}) self.queue.append_executed_job(job) self.assertEqual(19, self.queue.get_executed_jobs_length()) # Remove old jobs (older than 2000-01-10, including the one from 2000-01-10). tenth = datetime(year=2000, month=1, day=10, hour=23) self.queue.remove_executed_jobs_older_than(tenth) self.assertEqual(9, self.queue.get_executed_jobs_length()) def test_remove_jobs_with_filter(self): self.queue.clear_executed_jobs() for i in range(10): self.queue.append_executed_job(Job('push', self.folder, 'user')) self.assertEqual(10, self.queue.get_executed_jobs_length()) entry_to_delete = tuple(self.queue.get_executed_jobs())[2] self.queue.remove_jobs_by_filter(lambda *params: params == entry_to_delete) self.assertEqual(9, self.queue.get_executed_jobs_length()) self.assertTrue(entry_to_delete not in tuple(self.queue.get_executed_jobs()))
class TestStorage(FunctionalTestCase): def setUp(self): super(TestStorage, self).setUp() self.grant('Manager') self.folder = create(Builder('folder').titled(u'Foo')) self.queue = IQueue(self.portal) def test_queue_has_no_jobs_by_default(self): self.assertEqual(0, self.queue.countJobs()) def test_queue_has_no_executed_jobs_by_default(self): self.assertEqual((), tuple(self.queue.get_executed_jobs())) self.assertEqual(0, self.queue.get_executed_jobs_length()) def test_adding_a_job_to_the_queue(self): self.queue.createJob('push', self.folder, 'user') # The queue contains one job now. self.assertEqual(1, self.queue.countJobs()) # Make sure the job is there. self.assertEqual(['push'], [job.action for job in self.queue.getJobs()]) def test_queue_after_publishing(self): """ Simulate an entire publishing cycle. """ self.queue.createJob('push', self.folder, 'user') self.queue.move_to_worker_queue() job = self.queue.popJob() self.assertEqual('push', job.action) key = self.queue.append_executed_job(job) self.assertEqual(1, key) self.assertEqual(0, self.queue.countJobs()) self.assertEqual(1, self.queue.get_executed_jobs_length()) executed_job = list(self.queue.get_executed_jobs())[0] self.assertEqual(key, executed_job[0]) self.assertEqual('push', executed_job[1].action) self.queue.remove_executed_job(key) self.assertEqual([], list(self.queue.get_executed_jobs())) self.assertEqual(0, self.queue.get_executed_jobs_length()) def test_clear_executed_jobs(self): for i in range(10): self.queue.append_executed_job(Job('push', self.folder, 'user')) self.assertEqual(10, self.queue.get_executed_jobs_length()) self.queue.clear_executed_jobs() self.assertEqual((), tuple(self.queue.get_executed_jobs())) self.assertEqual(0, self.queue.get_executed_jobs_length()) def test_get_executed_job_by_key(self): self.queue.clear_executed_jobs() for i in range(10): self.queue.append_executed_job('obj %i' % i) self.assertEqual('obj 5', self.queue.get_executed_job_by_key(6)) with self.assertRaises(KeyError): self.queue.get_executed_job_by_key(1000) def test_get_batch_of_executed_jobs(self): self.queue.clear_executed_jobs() for i in range(10): self.queue.append_executed_job('obj %i' % i) self.assertEqual(10, self.queue.get_executed_jobs_length()) self.assertEqual([(1, 'obj 0'), (2, 'obj 1')], list(self.queue.get_executed_jobs(start=0, end=2))) self.assertEqual([(5, 'obj 4')], list(self.queue.get_executed_jobs(start=4, end=5))) with self.assertRaises(ValueError): list(self.queue.get_executed_jobs(start=1000, end=1001)) def test_get_batch_of_executed_jobs_on_empty_storage(self): self.queue.clear_executed_jobs() self.assertEqual(0, self.queue.get_executed_jobs_length()) self.assertEqual((), tuple(self.queue.get_executed_jobs(0, 0))) self.assertEqual((), tuple(self.queue.get_executed_jobs(10, 2))) def test_remove_old_executed_jobs(self): self.queue.clear_executed_jobs() # Execute 19 jobs. for day in range(1, 20): job = Job('push', self.folder, 'user') date = datetime(year=2000, month=1, day=day, hour=12) job.executed_with_states({'date': date}) self.queue.append_executed_job(job) self.assertEqual(19, self.queue.get_executed_jobs_length()) # Remove old jobs (older than 2000-01-10, including the one from 2000-01-10). tenth = datetime(year=2000, month=1, day=10, hour=23) self.queue.remove_executed_jobs_older_than(tenth) self.assertEqual(9, self.queue.get_executed_jobs_length()) def test_remove_jobs_with_filter(self): self.queue.clear_executed_jobs() for i in range(10): self.queue.append_executed_job(Job('push', self.folder, 'user')) self.assertEqual(10, self.queue.get_executed_jobs_length()) entry_to_delete = tuple(self.queue.get_executed_jobs())[2] self.queue.remove_jobs_by_filter( lambda *params: params == entry_to_delete) self.assertEqual(9, self.queue.get_executed_jobs_length()) self.assertTrue( entry_to_delete not in tuple(self.queue.get_executed_jobs()))
def __call__(self, event, no_response=False, msg=None, *args, **kwargs): """ Creates a "rename" job for the current item(s) @param args: list of unnamed arguments @type args: list @param kwargs: dict of named keyword-arguments @type kwargs: dict @return: Redirect to object`s default view """ if IPreventPublishing.providedBy(self.context): return 'prevented' if publisher_jobs_are_disabled(): return 'disabled' self.logger = getLogger() # is the object blacklisted? if IPathBlacklist(self.context).is_blacklisted(): self.logger.warning('Could not create move job for blacklisted '+\ 'object (%s at %s)' % ( self.context.Title(), '/'.join(self.context.getPhysicalPath()))) if not no_response: return self.request.RESPONSE.redirect('./view') return False # This View should not be executed at the PloneSiteRoot if IPloneSiteRoot.providedBy(self.context): raise Exception('Not allowed on PloneSiteRoot') # get username user = self.context.portal_membership.getAuthenticatedMember() username = user.getUserName() # create Job portal = self.context.portal_url.getPortalObject() queue = IQueue(portal) additional_data = {'move_data': { 'newName': event.newName, 'newParent': get_site_relative_path(event.newParent), 'newTitle': event.object.Title().decode('utf-8'), 'oldName': event.oldName, 'oldParent': get_site_relative_path(event.oldParent), }} queue.createJob('move', self.context, username, additional_data=additional_data) self.logger.debug('Created "%s" Job for "%s" at %s' % ( 'move', self.context.Title(), '/'.join(self.context.getPhysicalPath()), )) # status message if msg is None: msg = _(u'Object move/rename action has been added to the queue.') IStatusMessage(self.request).addStatusMessage( msg, type='info' ) if not no_response: return self.request.RESPONSE.redirect('./view')
class TestEventhandler(MockTestCase): layer = MONITOR_FUNCTIONAL_TESTING def setUp(self): super(TestEventhandler, self).setUp() self.portal = self.layer['portal'] setRoles(self.portal, TEST_USER_ID, ['Manager']) login(self.portal, TEST_USER_NAME) self.folder = self.portal.get(self.portal.invokeFactory( 'Folder', 'mailing-test', title='Mailing Test Folder')) self.queue = IQueue(self.portal) mtool = getToolByName(self.portal, 'portal_membership') self.user = mtool.getMemberById(TEST_USER_ID) self.config = IMonitorConfigurationSchema(self.portal) self.config.set_enabled(True) self.config.set_receivers_plain('*****@*****.**') self.notifier_class = self.stub_interface(IMonitorNotifier) self.notifier = self.mock_interface(IMonitorNotifier) self.expect(self.notifier_class(self.portal)).result( self.notifier) provideAdapter(factory=self.notifier_class, provides=IMonitorNotifier, adapts=(IPloneSiteRoot,)) def tearDown(self): sm = getGlobalSiteManager() sm.unregisterAdapter(factory=self.notifier_class, provided=IMonitorNotifier, required=(IPloneSiteRoot,)) super(TestEventhandler, self).tearDown() def stub_current_queue_length(self, amount_of_jobs): while self.queue.countJobs() > 0: self.queue.popJob() for _i in range(amount_of_jobs): # Remove acquisition wrapper from "self.user" in order to # prevent the following error: # TypeError: Can't pickle objects in acquisition wrappers. self.queue.createJob('push', self.folder, aq_base(self.user)) def test_eventhandler_calls_notifier(self): self.config.set_threshold(2) self.stub_current_queue_length(3) event = self.create_dummy(queue=self.queue) self.expect(self.notifier(ANY, ANY)) self.replay() invoke_notification(self.portal, event) def test_adapter_called_after_queue_execution(self): self.config.set_threshold(2) self.stub_current_queue_length(3) self.expect(self.notifier(ANY, ANY)) self.replay() self.portal.unrestrictedTraverse('@@publisher.executeQueue')() def test_adapter_not_called_when_monitoring_disabled(self): self.config.set_threshold(2) self.stub_current_queue_length(3) self.config.set_enabled(False) self.expect(self.notifier(ANY, ANY)).count(0) self.replay() self.portal.unrestrictedTraverse('@@publisher.executeQueue')() def test_adapter_not_called_when_threshold_not_reached(self): self.config.set_threshold(10) self.stub_current_queue_length(1) self.expect(self.notifier(ARGS, KWARGS)).count(0) self.replay() self.portal.unrestrictedTraverse('@@publisher.executeQueue')() def test_adapter_is_called_even_when_publishing_disabled(self): self.config.set_threshold(2) self.stub_current_queue_length(3) config = IConfig(self.portal) config.set_publishing_enabled(False) self.expect(self.notifier(ARGS, KWARGS)) self.replay() self.portal.unrestrictedTraverse('@@publisher.executeQueue')()