def test_pickle_retention(self):
        print self.conf.modules
        #get our modules
        mod = pickle_retention_file_scheduler.Pickle_retention_scheduler(modconf, 'tmp/retention-test.dat')
        try :
            os.unlink(mod.path)
        except :
            pass

        sl = get_instance(mod)
        print "Instance", sl
        #Hack here :(
        sl.properties = {}
        sl.properties['to_queue'] = None
        sl.init()
        l = logger

        #updte the hosts and service in the scheduler in the retentino-file
        sl.hook_save_retention(self.sched)
        
        #Now we change thing
        svc = self.sched.hosts.find_by_name("test_host_0")
        self.assert_(svc.state == 'PENDING')
        print "State", svc.state
        svc.state = 'UP' #was PENDING in the save time
        
        r = sl.hook_load_retention(self.sched)
        self.assert_(r == True)
        
        #search if the host is not changed by the loading thing
        svc2 = self.sched.hosts.find_by_name("test_host_0")
        self.assert_(svc == svc2)
        
        self.assert_(svc.state == 'PENDING')

        #Ok, we can delete the retention file
        os.unlink(mod.path)


        # Now make real loops with notifications
        self.scheduler_loop(10, [[svc, 2, 'CRITICAL | bibi=99%']])
        #updte the hosts and service in the scheduler in the retentino-file
        sl.hook_save_retention(self.sched)

        r = sl.hook_load_retention(self.sched)
        self.assert_(r == True)
    def test_pickle_retention(self):
        print self.conf.modules
        # get our modules
        mod = pickle_retention_file_scheduler.Pickle_retention_scheduler(modconf, "tmp/retention-test.dat")
        try:
            os.unlink(mod.path)
        except:
            pass

        sl = get_instance(mod)
        print "Instance", sl
        # Hack here :(
        sl.properties = {}
        sl.properties["to_queue"] = None
        sl.init()
        l = logger

        # updte the hosts and service in the scheduler in the retentino-file
        sl.hook_save_retention(self.sched)

        # Now we change thing
        svc = self.sched.hosts.find_by_name("test_host_0")
        self.assert_(svc.state == "PENDING")
        print "State", svc.state
        svc.state = "UP"  # was PENDING in the save time

        r = sl.hook_load_retention(self.sched)
        self.assert_(r == True)

        # search if the host is not changed by the loading thing
        svc2 = self.sched.hosts.find_by_name("test_host_0")
        self.assert_(svc == svc2)

        self.assert_(svc.state == "PENDING")

        # Ok, we can delete the retention file
        os.unlink(mod.path)

        # Now make real loops with notifications
        self.scheduler_loop(10, [[svc, 2, "CRITICAL | bibi=99%"]])
        # updte the hosts and service in the scheduler in the retentino-file
        sl.hook_save_retention(self.sched)

        r = sl.hook_load_retention(self.sched)
        self.assert_(r == True)
    def test_pickle_retention(self):
        print self.conf.modules
        now = time.time()
        #get our modules
        mod = pickle_retention_file_scheduler.Pickle_retention_scheduler(modconf, 'tmp/retention-test.dat')
        try :
            os.unlink(mod.path)
        except :
            pass

        sl = get_instance(mod)
        print "Instance", sl
        #Hack here :(
        sl.properties = {}
        sl.properties['to_queue'] = None
        sl.init()
        l = logger

        in_the_future = now + 500
        #Now we change thing
        svc = self.sched.hosts.find_by_name("test_host_0")
        # We want it to go in the future
        svc.next_chk = in_the_future

        # By default in the conf, the active checsk are active
        self.assert_(svc.active_checks_enabled == True)
        # and passive one too
        self.assert_(svc.passive_checks_enabled == True)

        #updte the hosts and service in the scheduler in the retentino-file
        sl.hook_save_retention(self.sched)
        
        self.assert_(svc.state == 'PENDING')
        print "State", svc.state
        svc.state = 'UP' #was PENDING in the save time
        
        # We try to change active state change too
        svc.active_checks_enabled = False
        svc.passive_checks_enabled = False

        # now we try to change it
        svc.next_chk = now - 3000

        r = sl.hook_load_retention(self.sched)
        self.assert_(r == True)
        
        # Now look at checks active or not
        # Should be back as normal values :)
        self.assert_(svc.active_checks_enabled == True)
        # and passive one too
        self.assert_(svc.passive_checks_enabled == True)


        print "F**k after load, will go in", svc.next_chk - now

        # Should be ok, because we load it from retention
        self.assert_(svc.next_chk == in_the_future)
        
        #search if the host is not changed by the loading thing
        svc2 = self.sched.hosts.find_by_name("test_host_0")
        self.assert_(svc == svc2)
        
        self.assert_(svc.state == 'PENDING')

        #Ok, we can delete the retention file
        os.unlink(mod.path)

        # Lie about us in checking or not
        svc.in_checking = False
        diff = svc.next_chk - now
        print "F**k Ok go for a enw scheduling!", diff
        #should be near 500 seconds ahead
        self.assert_(499 < diff < 501)

        # Now we reschedule it, should be our time_to_go
        svc.schedule()
        print "F**k after a reschedule", svc.next_chk - now
        # should be the same value in the future, we want to keep it
        diff = svc.next_chk - now
        self.assert_(499 < diff < 501)

        # Now make real loops with notifications
        self.scheduler_loop(10, [[svc, 2, 'CRITICAL | bibi=99%']])
        #updte the hosts and service in the scheduler in the retentino-file
        save_notified_contacts = svc2.notified_contacts
        print "Save notif contacts", save_notified_contacts
        sl.hook_save_retention(self.sched)

        r = sl.hook_load_retention(self.sched)
        self.assert_(r == True)
        
        print "Notif?", svc2.notified_contacts
        # We should got our contacts, and still the true objects
        self.assert_(len(svc2.notified_contacts) > 0)
        for c in svc2.notified_contacts:
            self.assert_(c in save_notified_contacts)
    def test_pickle_retention(self):
        now = time.time()
        # get our modules
        mod = pickle_retention_file_scheduler.Pickle_retention_scheduler(
            modconf, 'tmp/retention-test.dat')
        try:
            os.unlink(mod.path)
        except:
            pass

        sl = get_instance(mod)
        # Hack here :(
        sl.properties = {}
        sl.properties['to_queue'] = None
        sl.init()

        in_the_future = now + 500
        # Now we change thing
        svc = self.sched.hosts.find_by_name("test_host_0")
        # We want it to go in the future
        svc.next_chk = in_the_future

        # By default in the conf, both the active and passive checks
        # are enabled
        self.assertTrue(svc.active_checks_enabled)
        self.assertTrue(svc.passive_checks_enabled)

        # update the hosts and service in the scheduler in the retention-file
        sl.hook_save_retention(self.sched)

        self.assertEqual(svc.state, 'PENDING')
        svc.state = 'UP'  # was PENDING in the save time

        # We try to change active state change too
        svc.active_checks_enabled = False
        svc.passive_checks_enabled = False

        # now we try to change it
        svc.next_chk = now - 3000

        r = sl.hook_load_retention(self.sched)
        self.assertTrue(r)

        # Both the active and passive checks should be back as default
        # values (enabled).
        self.assertTrue(svc.active_checks_enabled)
        self.assertTrue(svc.passive_checks_enabled)

        # Should be ok, because we load it from retention
        self.assertEqual(svc.next_chk, in_the_future)

        # search if the host is not changed by the loading thing
        svc2 = self.sched.hosts.find_by_name("test_host_0")
        self.assertEqual(svc, svc2)
        self.assertEqual(svc.state, 'PENDING')

        # Ok, we can delete the retention file
        os.unlink(mod.path)

        # Lie about us in checking or not
        svc.in_checking = False
        diff = svc.next_chk - now
        # should be near 500 seconds ahead
        self.assert_(499 < diff < 501)

        # Now we reschedule it, should be our time_to_go
        svc.schedule()
        # should be the same value in the future, we want to keep it
        diff = svc.next_chk - now
        self.assert_(499 < diff < 501)

        # Now make real loops with notifications
        self.scheduler_loop(10, [[svc, 2, 'CRITICAL | bibi=99%']])
        # update the hosts and service in the scheduler in the retention-file
        save_notified_contacts = svc2.notified_contacts
        sl.hook_save_retention(self.sched)

        r = sl.hook_load_retention(self.sched)
        self.assertTrue(r)

        # We should got our contacts, and still the true objects
        self.assert_(len(svc2.notified_contacts) > 0)
        for c in svc2.notified_contacts:
            self.assertIn(c, save_notified_contacts)
Exemple #5
0
    def test_pickle_retention(self):
        now = time.time()
        # get our modules
        mod = pickle_retention_file_scheduler.Pickle_retention_scheduler(
            modconf, 'tmp/retention-test.dat')
        try:
            os.unlink(mod.path)
        except:
            pass

        sl = get_instance(mod)
        # Hack here :(
        sl.properties = {}
        sl.properties['to_queue'] = None
        sl.init()

        in_the_future = now + 500
        # Now we change thing
        svc = self.sched.hosts.find_by_name("test_host_0")
        # We want it to go in the future
        svc.next_chk = in_the_future

        # By default in the conf, both the active and passive checks
        # are enabled
        self.assertTrue(svc.active_checks_enabled)
        self.assertTrue(svc.passive_checks_enabled)

        # update the hosts and service in the scheduler in the retention-file
        sl.hook_save_retention(self.sched)

        self.assertEqual(svc.state, 'PENDING')
        svc.state = 'UP'  # was PENDING in the save time

        # We try to change active state change too
        svc.active_checks_enabled = False
        svc.passive_checks_enabled = False

        # now we try to change it
        svc.next_chk = now - 3000

        r = sl.hook_load_retention(self.sched)
        self.assertTrue(r)

        # Both the active and passive checks should be back as default
        # values (enabled).
        self.assertTrue(svc.active_checks_enabled)
        self.assertTrue(svc.passive_checks_enabled)

        # Should be ok, because we load it from retention
        self.assertEqual(svc.next_chk, in_the_future)

        # search if the host is not changed by the loading thing
        svc2 = self.sched.hosts.find_by_name("test_host_0")
        self.assertEqual(svc, svc2)
        self.assertEqual(svc.state, 'PENDING')

        # Ok, we can delete the retention file
        os.unlink(mod.path)

        # Lie about us in checking or not
        svc.in_checking = False
        diff = svc.next_chk - now
        # should be near 500 seconds ahead
        self.assert_(499 < diff < 501)

        # Now we reschedule it, should be our time_to_go
        svc.schedule()
        # should be the same value in the future, we want to keep it
        diff = svc.next_chk - now
        self.assert_(499 < diff < 501)

        # Now make real loops with notifications
        self.scheduler_loop(10, [[svc, 2, 'CRITICAL | bibi=99%']])
        # update the hosts and service in the scheduler in the retention-file
        save_notified_contacts = svc2.notified_contacts
        sl.hook_save_retention(self.sched)

        r = sl.hook_load_retention(self.sched)
        self.assertTrue(r)

        # We should got our contacts, and still the true objects
        self.assert_(len(svc2.notified_contacts) > 0)
        for c in svc2.notified_contacts:
            self.assertIn(c, save_notified_contacts)