Exemplo n.º 1
0
def send_notifications():
    notifications = Notification.objects.filter(sent_at=None, send=True).order_by('-created_at')


    for notification in notifications:
        if not notification.send:
            logging.info("Auto-send is disabled for notification %s."%notification.id)
            continue
        not_key = CHECK_NOTIFICATION_KEY % notification.id
        if memcache.get(not_key,False):
            #this means that the check is already running
            logging.critical("Module id %s is already running"%notification.id)
            continue

        request = HttpRequest()
        request.META['HTTP_X_CELERY_CRON'] = 'true'
        memcache.set(not_key,notification, CELERY_CACHE_TIMEOUT)
        send_notification_task.apply_async((request, notification.id))

    return True


#class CheckNotifications(Job):
#    run_every = 120 #seconds
#
#
#    def job(self):
#        check_notifications(HttpRequest())

#cronScheduler.register(CheckPassiveHosts)
#cronScheduler.register(CheckNotifications)
Exemplo n.º 2
0
def check_passive_url_monitors():
    request = HttpRequest()
    request.META["HTTP_X_CELERY_CRON"] = "true"

    modules = Module.objects.filter(module_type="url_check")

    sending_modules = []
    for i in xrange(len(modules)):
        module = modules[i]

        passive_key = CHECK_HOST_KEY % module.id
        if memcache.get(passive_key, False):
            # this means that the check is still running for this module
            logging.critical("Module id %s is still running" % module.id)
            continue

        memcache.set(passive_key, module, CELERY_CACHE_TIMEOUT)

        sending_modules.append(module.id)
        if (i != 0 or len(modules) == 1) and (
            ((i % settings.PASSIVE_URL_CHECK_BATCH) == 0) or (i == (len(modules) - 1))
        ):
            # Don't run when it is the first iteration, unless only one module to monitor
            # Run in batch sizes defined by the settings and run the remaning at the end of
            # the loop even if batch size isn't met.
            check_passive_url_task.apply_async((request, sending_modules))
            sending_modules = []

    return True
Exemplo n.º 3
0
    def get_day_status(self, day):
        day_status = memcache.get(MODULE_DAY_STATUS_KEY % (day.month, day.day, self.id), False)

        if day_status:
            return day_status

        day_status = DailyModuleStatus.objects.\
                        filter(module=self).\
                        filter(created_at__gte=datetime.datetime(\
                                 day.year, day.month, day.day, 0, 0, 0)).\
                        filter(created_at__lte=datetime.datetime(\
                                 day.year, day.month, day.day, 23, 59, 59, 999999)).\
                        filter(site_config=self.site_config)

        if day_status:
            memcache.set(MODULE_DAY_STATUS_KEY % (day.month, day.day, self.id), day_status[0])
            return day_status[0]

        day_status = DailyModuleStatus()
        day_status.created_at = day
        day_status.updated_at = day
        day_status.module = self
        day_status.statuses = self.status
        day_status.status = self.status
        day_status.site_config = self.site_config
        day_status.save()

        memcache.set(MODULE_DAY_STATUS_KEY % (day.month, day.day, self.id), day_status)
        return day_status
Exemplo n.º 4
0
def check_passive_url_monitors():
    modules = Module.objects.filter(module_type='url_check')

    request = HttpRequest()
    request.META['HTTP_X_CELERY_CRON'] = 'true'

    for module in modules:
        passive_key = CHECK_HOST_KEY % module.id
        if memcache.get(passive_key,False):
            #this means that the check is already running
            logging.critical("Module id %s is already running"%module.id)
            continue
        memcache.set(passive_key,module,CELERY_CACHE_TIMEOUT)
        check_passive_url_task.apply_async((request, module.id))
Exemplo n.º 5
0
 def test_notification_load(self):
     # TODO5: Expose these tests to have them ran through the production server (admin only, of course)
     
     # This is the amount of notifications the system should handle per minute
     # 8400 is equivalent to 140 notifications per second - This should be able
     # to run on virtually any robust setup.
     # If in somewhere else (other than appengine) this can scale even further,
     # and even in appengine can scale further if we make more than one cron
     # request per minute (totally possible).
     test_size = 50
     events = []
     notifications = []
     
     for i in xrange(test_size):
         events.append(self._create_open_event())
         
         # Setting the event back_at should create a notification 
         events[-1].back_at = datetime.datetime.now()
         events[-1].save()
         
         notifications.append(Notification.objects.get(notification_type='event',
                                                       target_id=events[-1].id,
                                                       sent_at=None))
     
     self.assertEqual(len(events), test_size)
     self.assertEqual(len(notifications), test_size)
     
     self._login_as_admin()
     
     # Now, we need to time the execution of the view.
     # If greater than 30 seconds, we're in danger of timeout
     start = datetime.datetime.now()
     response = self.client.get(reverse('check_notifications'))
     end = datetime.datetime.now()
     
     logging.critical('>>> Time to process %s: %s seconds' % (test_size, (end - start).seconds))
     
     # Check if it returned a 200. First indication that it succeeded
     self.assertEqual(response.status_code, 200)
     
     # Check the time it took to process everything is fewer than 30 secs
     self.assertTrue((end - start).seconds < 30)
     
     # Check if all tasks were scheduled properly
     for notification_event in notifications:
         not_key = CHECK_NOTIFICATION_KEY % notification_event.id
         self.assertTrue(memcache.get(not_key, False))
Exemplo n.º 6
0
def send_notifications():
    notifications = Notification.objects.filter(sent_at=None, send=True).order_by("-created_at")

    for notification in notifications:
        if not notification.send:
            logging.info("Auto-send is disabled for notification %s." % notification.id)
            continue

        not_key = CHECK_NOTIFICATION_KEY % notification.id
        if memcache.get(not_key, False):
            # this means that the check is already running
            logging.critical("Module id %s is already running" % notification.id)
            continue

        request = HttpRequest()
        request.META["HTTP_X_CELERY_CRON"] = "true"
        memcache.set(not_key, notification, CELERY_CACHE_TIMEOUT)
        send_notification_task.apply_async((request, notification.id))

    return True
Exemplo n.º 7
0
 def _test_subscription(self, email, notification_type, one_time):
     # TODO3: Gotta confirm the cron job can handle a huge number of notifications at a time (1000)
     # TODO4: Gotta confirm the time necessary to send one notification (task execution time) with a large number of recipients (100)
     # TODO5: Expose these tests to have them ran through the production server (admin only, of course)
     
     response = None
     event = self._create_open_event()
     target_id = None
     
     if notification_type == 'system':
         response = self.client.post(reverse('system_subscribe'),
                                     {'email':email,
                                      'one_time':one_time})
     elif notification_type == 'event':
         target_id = event.id
         response = self.client.post(reverse('event_subscribe',
                                             kwargs={'event_id':target_id}),
                                     {'email':email,
                                      'one_time':one_time})
     elif notification_type == 'module':
         target_id = event.module.id
         response = self.client.post(reverse('module_subscribe',
                                             kwargs={'module_id':target_id}),
                                     {'email':email,
                                      'one_time':one_time})
     else:
         raise Exception('Unknown notification_type: %s' % notification_type)
     
     self.assertEqual(response.status_code, 200)
     self.assertTrue(re.findall('.*?(successfuly subscribed).*?', response.content))
     
     # Checking if exists and if it isn't duplicated
     subscriber = Subscriber.objects.get(email=email)
     
     notification = NotifyOnEvent.objects.get(notification_type=notification_type,
                                              target_id=target_id, one_time=one_time,
                                              site_config=self.site_config)
     self.assertTrue(subscriber.email in notification.list_emails)
     
     # Now, test that once the system is back user is notified
     event.back_at = datetime.datetime.now()
     event.save()
     
     notification_event = Notification.objects.get(notification_type='event',
                                                   target_id=event.id,
                                                   sent_at=None,
                                                   site_config=self.site_config)
     
     self._login_as_admin()
     response = self.client.get(reverse('check_notifications'))
     self.assertEqual(response.status_code, 200)
     
     # Check if task was scheduled
     not_key = CHECK_NOTIFICATION_KEY % notification_event.id
     self.assertTrue(memcache.get(not_key, False))
     
     # But since the stub won't work locally, we ought to call the task ourselves
     response = self.client.get(reverse('send_notification_task',
                                        kwargs={'notification_id':notification_event.id}))
     self.assertEqual(response.status_code, 200)
     
     notification = NotifyOnEvent.objects.get(notification_type=notification_type,
                                              one_time=one_time, target_id=target_id,
                                              site_config=self.site_config)
     
     if notification.last_notified == None:
         logging.critical('<<< Failing notification: %s' % notification)
     
     self.assertFalse(notification.last_notified == None)