示例#1
0
    def init_livestatus(self, modconf=None, dbmodconf=None, needcache=False):
        self.livelogs = 'tmp/livelogs.db' + self.testid

        if modconf is None:
            modconf = Module({'module_name': 'LiveStatus',
                'module_type': 'livestatus',
                'port': str(50000 + os.getpid()),
                'pnp_path': 'tmp/pnp4nagios_test' + self.testid,
                'host': '127.0.0.1',
                'socket': 'live',
                'name': 'test', #?
            })

        if dbmodconf is None:
            dbmodconf = Module({'module_name': 'LogStore',
                'module_type': 'logstore_sqlite',
                'use_aggressive_sql': "0",
                'database_file': self.livelogs,
                'archive_path': os.path.join(os.path.dirname(self.livelogs), 'archives'),
            })

        modconf.modules = [dbmodconf]
        self.livestatus_broker = LiveStatus_broker(modconf)
        self.livestatus_broker.create_queues()

        #--- livestatus_broker.main
        self.livestatus_broker.log = logger
        # this seems to damage the logger so that the scheduler can't use it
        #self.livestatus_broker.log.load_obj(self.livestatus_broker)
        self.livestatus_broker.debug_output = []
        self.livestatus_broker.modules_manager = ModulesManager('livestatus', modules_dir, [])
        self.livestatus_broker.modules_manager.set_modules(self.livestatus_broker.modules)
        # We can now output some previouly silented debug ouput
        self.livestatus_broker.do_load_modules()
        for inst in self.livestatus_broker.modules_manager.instances:
            if inst.properties["type"].startswith('logstore'):
                f = getattr(inst, 'load', None)
                if f and callable(f):
                    f(self.livestatus_broker)  # !!! NOT self here !!!!
                break
        for s in self.livestatus_broker.debug_output:
            print "errors during load", s
        del self.livestatus_broker.debug_output
        self.livestatus_broker.rg = LiveStatusRegenerator()
        self.livestatus_broker.datamgr = datamgr
        datamgr.load(self.livestatus_broker.rg)
        self.livestatus_broker.query_cache = LiveStatusQueryCache()
        if not needcache:
            self.livestatus_broker.query_cache.disable()
        self.livestatus_broker.rg.register_cache(self.livestatus_broker.query_cache)
        #--- livestatus_broker.main

        self.livestatus_broker.init()
        self.livestatus_broker.db = self.livestatus_broker.modules_manager.instances[0]
        self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.datamgr, self.livestatus_broker.query_cache, self.livestatus_broker.db, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)

        #--- livestatus_broker.do_main
        self.livestatus_broker.db.open()
        if hasattr(self.livestatus_broker.db, 'prepare_log_db_table'):
            self.livestatus_broker.db.prepare_log_db_table()
示例#2
0
    def init_livestatus(self):
        self.livelogs = "bigbigbig"
        modconf = Module({'module_name': 'LiveStatus',
            'module_type': 'livestatus',
            'port': str(50000 + os.getpid()),
            'pnp_path': 'tmp/livestatus_broker.pnp_path_test' + self.testid,
            'host': '127.0.0.1',
            'socket': 'live',
            'name': 'test', #?
        })

        dbmodconf = Module({'module_name': 'LogStore',
            'module_type': 'logstore_mongodb',
            'database': 'bigbigbig',
            'mongodb_uri': "mongodb://127.0.0.1:27017",
            #'mongodb_uri': "mongodb://10.0.12.50:27017,10.0.12.51:27017",
        #    'replica_set': 'livestatus',
            'max_logs_age': '7',
        })
        modconf.modules = [dbmodconf]
        self.livestatus_broker = LiveStatus_broker(modconf)
        self.livestatus_broker.create_queues()

        #--- livestatus_broker.main
        self.livestatus_broker.log = logger
        # this seems to damage the logger so that the scheduler can't use it
        #self.livestatus_broker.log.load_obj(self.livestatus_broker)
        self.livestatus_broker.debug_output = []
        self.livestatus_broker.modules_manager = ModulesManager('livestatus', self.livestatus_broker.find_modules_path(), [])
        self.livestatus_broker.modules_manager.set_modules(self.livestatus_broker.modules)
        # We can now output some previouly silented debug ouput
        self.livestatus_broker.do_load_modules()
        for inst in self.livestatus_broker.modules_manager.instances:
            if inst.properties["type"].startswith('logstore'):
                f = getattr(inst, 'load', None)
                if f and callable(f):
                    f(self.livestatus_broker)  # !!! NOT self here !!!!
                break
        for s in self.livestatus_broker.debug_output:
            print "errors during load", s
        del self.livestatus_broker.debug_output
        self.livestatus_broker.rg = LiveStatusRegenerator()
        self.livestatus_broker.datamgr = datamgr
        datamgr.load(self.livestatus_broker.rg)
        self.livestatus_broker.query_cache = LiveStatusQueryCache()
        self.livestatus_broker.query_cache.disable()
        self.livestatus_broker.rg.register_cache(self.livestatus_broker.query_cache)
        #--- livestatus_broker.main

        self.livestatus_broker.init()
        self.livestatus_broker.db = self.livestatus_broker.modules_manager.instances[0]
        self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.datamgr, self.livestatus_broker.query_cache, self.livestatus_broker.db, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)

        #--- livestatus_broker.do_main
        self.livestatus_broker.db.open()
示例#3
0
    def setUp(self):
        super(TestConfigBig, self).setUp()
        start_setUp = time.time()
        self.setup_with_file('etc/shinken_5r_100h_2000s.cfg')
        Comment.id = 1
        self.testid = str(os.getpid() + random.randint(1, 1000))

        dbmodconf = Module({'module_name': 'LogStore',
            'module_type': 'logstore_mongodb',
            'mongodb_uri': self.mongo_db_uri,
            'database': 'testtest' + self.testid,
        })

        self.init_livestatus(dbmodconf=dbmodconf)
        print("Cleaning old broks?")
        self.sched.conf.skip_initial_broks = False
        self.sched.brokers['Default-Broker'] = {'broks' : {}, 'has_full_broks' : False}
        self.sched.fill_initial_broks('Default-Broker')

        self.update_broker()
        print("************* Overall Setup:", time.time() - start_setUp)
        # add use_aggressive_host_checking so we can mix exit codes 1 and 2
        # but still get DOWN state
        host = self.sched.hosts.find_by_name("test_host_000")
        host.__class__.use_aggressive_host_checking = 1
示例#4
0
    def test_init(self):
        modconf = Module(
            {
                'module_name': 'influxdbBroker',
                'module_type': 'influxdbBroker',
                'host': 'testhost',
                'port': '1111',
                'user': '******',
                'password': '******',
                'database': 'testdatabase',
                'use_udp': '1',
                'udp_port': '2222',
                'tick_limit': '3333',
            }
        )

        broker = InfluxdbBroker(modconf)

        self.assertEqual(broker.host, 'testhost')
        self.assertEqual(broker.port, 1111)
        self.assertEqual(broker.user, 'testuser')
        self.assertEqual(broker.password, 'testpassword')
        self.assertEqual(broker.database, 'testdatabase')
        self.assertEqual(broker.use_udp, True)
        self.assertEqual(broker.udp_port, 2222)
        self.assertEqual(broker.tick_limit, 3333)
    def setUp(self):

        self.basic_modconf = Module({
            'module_name': 'riemannBroker',
            'module_type': 'riemannBroker',
        })
        self.broker = RiemannBroker(self.basic_modconf)
        self.broker.use_udp = True
        self.broker.init()
示例#6
0
    def log_db_do_archive(self):
        """
        In order to limit the datafile's sizes we flush logs dating from
        before today/00:00 to their own datafiles.
        """
        if self.read_only:
            return
        try:
            os.stat(self.archive_path)
        except:
            os.mkdir(self.archive_path)
        for day in self.log_db_historic_contents():
            dayobj, handle, archive, starttime, stoptime = day
            if handle == "main":
                # Skip archiving of today's contents
                continue
            if not os.path.exists(archive):
                # Create an empty datafile with the logs table
                #tmpconn = LiveStatusDb(archive, None, 0)
                #tmpconn.prepare_log_db_table()
                #tmpconn.close()

                dbmodconf = Module({
                    'module_name': 'LogStore',
                    'module_type': 'logstore_sqlite',
                    'use_aggressive_sql': '0',
                    'database_file': archive,
                    'max_logs_age': '0',
                })
                tmpconn = LiveStatusLogStoreSqlite(dbmodconf)
                tmpconn.open()
                tmpconn.close()

            self.commit()
            logger.info(
                "[Logstore SQLite] move logs from %s - %s to database %s" %
                (time.asctime(time.localtime(starttime)),
                 time.asctime(time.localtime(stoptime)), archive))
            cmd = "ATTACH DATABASE '%s' AS %s" % (archive, handle)
            self.execute_attach(cmd)
            cmd = "INSERT INTO %s.logs SELECT * FROM logs WHERE time >= %d AND time < %d" % (
                handle, starttime, stoptime)
            self.execute(cmd)
            cmd = "DELETE FROM logs WHERE time >= %d AND time < %d" % (
                starttime, stoptime)
            self.execute(cmd)
            self.commit()
            cmd = "DETACH DATABASE %s" % handle
            self.execute(cmd)
            # This is necessary to shrink the database file
            try:
                self.execute('VACUUM')
            except sqlite3.DatabaseError, exp:
                logger.error(
                    "[Logstore SQLite] WARNING: it seems your database is corrupted. Please recreate it"
                )
            self.commit()
示例#7
0
 def test_max_logs_age(self):
     if not has_pymongo:
         return
     dbmodconf = Module({'module_name': 'LogStore',
         'module_type': 'logstore_mongodb',
         'database': 'bigbigbig',
         'mongodb_uri': "mongodb://127.0.0.1:27017",
         'max_logs_age': '7y',
     })
     self.assert_(dbmodconf.max_logs_age == 7*365)
示例#8
0
    def test_max_logs_age(self):
        dbmodconf = Module({'module_name': 'LogStore',
            'module_type': 'logstore_mongodb',
            'database': 'bigbigbig',
            'mongodb_uri': self.mongo_db_uri,
            'max_logs_age': '7y',
        })

        print(dbmodconf.max_logs_age)
        livestatus_broker = LiveStatusLogStoreMongoDB(dbmodconf)
        self.assertEqual(7*365, livestatus_broker.max_logs_age)
示例#9
0
 def setUp(self):
     super(TestFull_WaitQuery, self).setUp()
     time_hacker.set_real_time()
     self.testid = str(os.getpid() + random.randint(1, 1000))
     self.modconf = Module({'module_name': 'LiveStatus',
         'module_type': 'livestatus',
         'port': str(random.randint(50000, 65534)),
         'pnp_path': 'tmp/pnp4nagios_test' + self.testid,
         'host': '127.0.0.1',
         'name': 'test',
         'modules': ''
     })
     self.init_livestatus(self.modconf)
示例#10
0
    def test_init_default_tcp_udp(self):
        modconf = Module({
            'module_name': 'carbon',
            'module_type': 'carbon',
            'use_tcp': 'True',
            'use_udp': 'True'
        })

        arbiter = get_instance(modconf)
        self.assertEqual(arbiter.tcp['host'], "0.0.0.0")
        self.assertEqual(arbiter.tcp['port'], 2003)
        self.assertEqual(arbiter.udp['host'], "239.192.74.66")
        self.assertEqual(arbiter.udp['port'], 2003)
        self.assertEqual(arbiter.udp['multicast'], False)
示例#11
0
    def test_max_logs_age(self):
        if not has_pymongo:
            return
        dbmodconf = Module({
            'module_name': 'LogStore',
            'module_type': 'logstore_mongodb',
            'database': 'bigbigbig',
            'mongodb_uri': "mongodb://127.0.0.1:27017",
            'max_logs_age': '7y',
        })

        print dbmodconf.max_logs_age
        livestatus_broker = LiveStatusLogStoreMongoDB(dbmodconf)
        self.assert_(livestatus_broker.max_logs_age == 7 * 365)
    def test_init(self):
        modconf = Module({
            'module_name': 'influxdbBroker',
            'module_type': 'influxdbBroker',
            'host': 'testhost',
            'port': '1111',
            'tick_limit': '3333',
            'use_udp': '1'
        })

        broker = RiemannBroker(modconf)

        self.assertEqual(broker.host, 'testhost')
        self.assertEqual(broker.port, 1111)
        self.assertEqual(broker.tick_limit, 3333)
        self.assertEqual(broker.use_udp, True)
示例#13
0
    def test_01_default(self):
        modconf = Module({
            'module_name': 'LiveStatus',
            'module_type': 'livestatus',
            'port': str(random.randint(50000, 65534)),
            'pnp_path': 'tmp/pnp4nagios_test' + self.testid,
            'host': '127.0.0.1',
            'name': 'test',
            'modules': ''
        })
        self.init_livestatus(modconf)

        # test livestatus connection
        self.assertTrue(
            self.query_livestatus(modconf.host, int(modconf.port),
                                  "GET hosts\n\n"))
示例#14
0
    def test_03_dont_allow_localhost(self):
        modconf = Module({
            'module_name': 'LiveStatus',
            'module_type': 'livestatus',
            'port': str(random.randint(50000, 65534)),
            'pnp_path': 'tmp/pnp4nagios_test' + self.testid,
            'host': '127.0.0.1',
            'name': 'test',
            'modules': '',
            'allowed_hosts': '192.168.0.1'
        })
        self.init_livestatus(modconf)

        # test livestatus connection
        self.assertFalse(
            self.query_livestatus(modconf.host, int(modconf.port),
                                  "GET hosts\n\n"))
    def init_livestatus(self):
        modconf = Module({
            'module_name': 'LiveStatus2',
            'module_type': 'livestatus2',
            'port': str(50000 + os.getpid()),
            'host': '127.0.0.1',
            'socket': 'live',
            'name': 'test'
        })

        self.livestatus_broker = LiveStatus_broker(modconf)
        self.livestatus_broker.log = logger
        self.livestatus_broker.debug_output = []
        self.livestatus_broker.create_queues()
        self.livestatus_broker.init()
        self.livestatus_broker.livestatus = LiveStatus(
            datamgr=self.livestatus_broker.datamgr,
            return_queue=self.livestatus_broker.from_q)
示例#16
0
    def test_init(self):
        modconf = Module({
            'module_name': 'carbon',
            'module_type': 'carbon',
            'use_tcp': 'True',
            'host_tcp': 'testhost',
            'port_tcp': '1111',
            'use_udp': 'True',
            'host_udp': 'testhost2',
            'port_udp': '1112',
            'multicast': 'False',
            'interval': '25',
            'grouped_collectd_plugins': 'disk,cpu,df'
        })

        arbiter = get_instance(modconf)
        self.assertEqual(arbiter.tcp['host'], "testhost")
        self.assertEqual(arbiter.tcp['port'], 1111)
        self.assertEqual(arbiter.udp['host'], "testhost2")
        self.assertEqual(arbiter.udp['port'], 1112)
        self.assertEqual(arbiter.udp['multicast'], False)
        self.assertEqual(arbiter.interval, 25)
        self.assertEqual(arbiter.grouped_collectd_plugins,
                         ['disk', 'cpu', 'df'])
示例#17
0
    def init_livestatus(self):
        self.livelogs = "tmp/livelogs.db" + self.testid
        modconf = Module(
            {
                "module_name": "LiveStatus",
                "module_type": "livestatus",
                "port": str(50000 + os.getpid()),
                "pnp_path": "tmp/pnp4nagios_test" + self.testid,
                "host": "127.0.0.1",
                "socket": "live",
                "name": "test",  # ?
            }
        )

        dbmodconf = Module(
            {
                "module_name": "LogStore",
                "module_type": "logstore_mongodb",
                "mongodb_uri": "mongodb://127.0.0.1:27017",
                "database": "testtest" + self.testid,
            }
        )
        modconf.modules = [dbmodconf]
        self.livestatus_broker = LiveStatus_broker(modconf)
        self.livestatus_broker.create_queues()

        # --- livestatus_broker.main
        self.livestatus_broker.log = logger
        # this seems to damage the logger so that the scheduler can't use it
        # self.livestatus_broker.log.load_obj(self.livestatus_broker)
        self.livestatus_broker.debug_output = []
        self.livestatus_broker.modules_manager = ModulesManager(
            "livestatus", self.livestatus_broker.find_modules_path(), []
        )
        self.livestatus_broker.modules_manager.set_modules(self.livestatus_broker.modules)
        # We can now output some previouly silented debug ouput
        self.livestatus_broker.do_load_modules()
        for inst in self.livestatus_broker.modules_manager.instances:
            if inst.properties["type"].startswith("logstore"):
                f = getattr(inst, "load", None)
                if f and callable(f):
                    f(self.livestatus_broker)  # !!! NOT self here !!!!
                break
        for s in self.livestatus_broker.debug_output:
            print "errors during load", s
        del self.livestatus_broker.debug_output
        self.livestatus_broker.rg = LiveStatusRegenerator()
        self.livestatus_broker.datamgr = datamgr
        datamgr.load(self.livestatus_broker.rg)
        self.livestatus_broker.query_cache = LiveStatusQueryCache()
        self.livestatus_broker.query_cache.disable()
        self.livestatus_broker.rg.register_cache(self.livestatus_broker.query_cache)
        # --- livestatus_broker.main

        self.livestatus_broker.init()
        for i in self.livestatus_broker.modules_manager.instances:
            print "instance", i
        self.livestatus_broker.db = self.livestatus_broker.modules_manager.instances[0]
        self.livestatus_broker.livestatus = LiveStatus(
            self.livestatus_broker.datamgr,
            self.livestatus_broker.query_cache,
            self.livestatus_broker.db,
            self.livestatus_broker.pnp_path,
            self.livestatus_broker.from_q,
        )

        # --- livestatus_broker.do_main
        self.livestatus_broker.db.open()
# You should have received a copy of the GNU Affero General Public License
# along with Shinken.  If not, see <http://www.gnu.org/licenses/>.

"""
Test Mongodb retention.
"""

from shinken_test import unittest, ShinkenTest

from shinken.objects.module import Module

from shinken.modulesctx import modulesctx
mongodb_retention = modulesctx.get_module('retention_mongodb')
get_instance = mongodb_retention.get_instance

modconf = Module()
modconf.module_name = "MongodbRetention"
modconf.uri = 'mongodb://127.0.0.1:27017'
modconf.database = 'test'
modconf.module_type = mongodb_retention.properties['type']
modconf.properties = mongodb_retention.properties.copy()


class TestMongodbRetention(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def test_mongodb_retention(self):
        # get our modules
        sl = mongodb_retention.Mongodb_retention_scheduler(modconf, 'localhost', 'test', '')

        # sl = get_instance(mod)
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken.  If not, see <http://www.gnu.org/licenses/>.
"""
Test Nagios retention
"""

from shinken_test import unittest, ShinkenTest

from shinken.objects.module import Module

from shinken.modulesctx import modulesctx
nagios_retention_file_scheduler = modulesctx.get_module('retention_nagios')
get_instance = nagios_retention_file_scheduler.get_instance

modconf = Module()
modconf.module_name = "NagiosRetention"
modconf.module_type = nagios_retention_file_scheduler.properties['type']
modconf.properties = nagios_retention_file_scheduler.properties.copy()


class TestNagiosRetention(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def test_pickle_retention(self):
        # get our modules
        mod = nagios_retention_file_scheduler.Nagios_retention_scheduler(
            modconf, 'etc/module_nagios_retention/retention.dat')

        sl = get_instance(mod)
        # Hack here :(
示例#20
0
 def setUp(self):
     from shinken.objects.module import Module
     self.item = Module()
示例#21
0
    def test_split_database(self):
        #
        # after daylight-saving time has begun or ended,
        # this test may fail for some days
        #
        #os.removedirs("var/archives")
        self.print_header()
        host = self.sched.hosts.find_by_name("test_host_0")
        save_now = time.time()
        today = datetime.datetime.fromtimestamp(time.time())
        today_noon = datetime.datetime(today.year, today.month, today.day, 12,
                                       0, 0)
        today_morning = datetime.datetime(today.year, today.month, today.day,
                                          0, 0, 0)
        back2days_noon = today_noon - datetime.timedelta(days=2)
        back2days_morning = today_morning - datetime.timedelta(days=2)
        back4days_noon = today_noon - datetime.timedelta(days=4)
        back4days_morning = today_morning - datetime.timedelta(days=4)
        today_noon = int(time.mktime(today_noon.timetuple()))
        today_morning = int(time.mktime(today_morning.timetuple()))
        back2days_noon = int(time.mktime(back2days_noon.timetuple()))
        back2days_morning = int(time.mktime(back2days_morning.timetuple()))
        back4days_noon = int(time.mktime(back4days_noon.timetuple()))
        back4days_morning = int(time.mktime(back4days_morning.timetuple()))
        now = time.time()
        time_hacker.time_warp(-1 * (now - back4days_noon))
        now = time.time()
        print "4t is", time.asctime(time.localtime(int(now)))
        logs = 0
        for day in range(1, 5):
            print "day", day
            # at 12:00
            now = time.time()
            print "it is", time.asctime(time.localtime(int(now)))
            self.write_logs(host, day)
            logs += 2 * day
            time.sleep(3600)
            # at 13:00
            now = time.time()
            print "it is", time.asctime(time.localtime(int(now)))
            self.write_logs(host, day)
            logs += 2 * day
            time.sleep(36000)
            # at 23:00
            now = time.time()
            print "it is", time.asctime(time.localtime(int(now)))
            self.write_logs(host, day)
            logs += 2 * day
            time.sleep(3600)
            # at 00:00
            now = time.time()
            print "it is", time.asctime(time.localtime(int(now)))
            self.write_logs(host, day)
            logs += 2 * day
            time.sleep(43200)
        # day 1: 1 * (2 + 2 + 2)
        # day 2: 2 * (2 + 2 + 2) + 1 * 2 (from last loop)
        # day 3: 3 * (2 + 2 + 2) + 2 * 2 (from last loop)
        # day 4: 4 * (2 + 2 + 2) + 3 * 2 (from last loop)
        # today: 4 * 2 (from last loop)
        # 6 + 14 + 22 + 30  + 8 = 80
        now = time.time()
        print "0t is", time.asctime(time.localtime(int(now)))
        request = """GET log
OutputFormat: python
Columns: time type options state host_name"""
        response, keepalive = self.livestatus_broker.livestatus.handle_request(
            request)
        print response
        pyresponse = eval(response)
        # ignore these internal logs
        pyresponse = [
            l for l in pyresponse
            if l[1].strip() not in ["Warning", "Info", "Debug"]
        ]
        print "Raw pyresponse", pyresponse
        print "pyresponse", len(pyresponse)
        print "expect", logs
        self.assertEqual(logs, len(pyresponse))

        self.livestatus_broker.db.log_db_do_archive()
        self.assert_(os.path.exists("tmp/archives"))
        tempres = [
            d for d in os.listdir("tmp/archives") if not d.endswith("journal")
        ]
        self.assertEqual(4, len(tempres))
        lengths = []
        for db in sorted(tempres):
            dbmodconf = Module({
                'module_name': 'LogStore',
                'module_type': 'logstore_sqlite',
                'use_aggressive_sql': '0',
                'database_file': "tmp/archives/" + db,
                'archive_path': "tmp/archives/",
                'max_logs_age': '0',
            })
            tmpconn = LiveStatusLogStoreSqlite(dbmodconf)
            tmpconn.open()
            numlogs = tmpconn.execute("SELECT COUNT(*) FROM logs")
            lengths.append(numlogs[0][0])
            print "db entries", db, numlogs
            tmpconn.close()
        print "lengths is", lengths
        self.assertEqual([6, 14, 22, 30], lengths)

        request = """GET log
Filter: time >= """ + str(int(back4days_morning)) + """
Filter: time <= """ + str(int(back2days_noon)) + """
OutputFormat: python
Columns: time type options state host_name"""
        response, keepalive = self.livestatus_broker.livestatus.handle_request(
            request)
        print response
        pyresponse = eval(response)
        self.assertEqual(30, len(pyresponse))
        print "pyresponse", len(pyresponse)
        print "expect", logs

        self.livestatus_broker.db.log_db_do_archive()

        request = """GET log
Filter: time >= """ + str(int(back4days_morning)) + """
Filter: time <= """ + str(int(back2days_noon)) + """
OutputFormat: python
Columns: time type options state host_name"""
        response, keepalive = self.livestatus_broker.livestatus.handle_request(
            request)
        print response
        pyresponse = eval(response)
        self.assertEqual(30, len(pyresponse))
        print "pyresponse", len(pyresponse)
        print "expect", logs

        self.livestatus_broker.db.log_db_do_archive()

        request = """GET log
Filter: time >= """ + str(int(back4days_morning)) + """
Filter: time <= """ + str(int(back2days_noon) - 1) + """
OutputFormat: python
Columns: time type options state host_name"""
        response, keepalive = self.livestatus_broker.livestatus.handle_request(
            request)
        print response
        pyresponse = eval(response)
        self.assertEqual(24, len(pyresponse))
        print "pyresponse", len(pyresponse)
        print "expect", logs

        # now warp to the time when we entered this test
        time_hacker.time_warp(-1 * (time.time() - save_now))
        # and now start the same logging
        today = datetime.datetime.fromtimestamp(time.time())
        today_noon = datetime.datetime(today.year, today.month, today.day, 12,
                                       0, 0)
        today_morning = datetime.datetime(today.year, today.month, today.day,
                                          0, 0, 0)
        back2days_noon = today_noon - datetime.timedelta(days=2)
        back2days_morning = today_morning - datetime.timedelta(days=2)
        back4days_noon = today_noon - datetime.timedelta(days=4)
        back4days_morning = today_morning - datetime.timedelta(days=4)
        today_noon = int(time.mktime(today_noon.timetuple()))
        today_morning = int(time.mktime(today_morning.timetuple()))
        back2days_noon = int(time.mktime(back2days_noon.timetuple()))
        back2days_morning = int(time.mktime(back2days_morning.timetuple()))
        back4days_noon = int(time.mktime(back4days_noon.timetuple()))
        back4days_morning = int(time.mktime(back4days_morning.timetuple()))
        now = time.time()
        time_hacker.time_warp(-1 * (now - back4days_noon))
        now = time.time()
        time.sleep(5)
        print "4t is", time.asctime(time.localtime(int(now)))
        logs = 0
        for day in range(1, 5):
            print "day", day
            # at 12:00
            now = time.time()
            print "it is", time.asctime(time.localtime(int(now)))
            self.write_logs(host, day)
            logs += 2 * day
            time.sleep(3600)
            # at 13:00
            now = time.time()
            print "it is", time.asctime(time.localtime(int(now)))
            self.write_logs(host, day)
            logs += 2 * day
            time.sleep(36000)
            # at 23:00
            now = time.time()
            print "it is", time.asctime(time.localtime(int(now)))
            self.write_logs(host, day)
            logs += 2 * day
            time.sleep(3600)
            # at 00:00
            now = time.time()
            print "it is", time.asctime(time.localtime(int(now)))
            self.write_logs(host, day)
            logs += 2 * day
            time.sleep(43200)
        # day 1: 1 * (2 + 2 + 2)
        # day 2: 2 * (2 + 2 + 2) + 1 * 2 (from last loop)
        # day 3: 3 * (2 + 2 + 2) + 2 * 2 (from last loop)
        # day 4: 4 * (2 + 2 + 2) + 3 * 2 (from last loop)
        # today: 4 * 2 (from last loop)
        # 6 + 14 + 22 + 30  + 8 = 80
        self.livestatus_broker.db.log_db_do_archive()
        self.assert_(os.path.exists("tmp/archives"))
        self.assert_(
            len([
                d for d in os.listdir("tmp/archives")
                if not d.endswith("journal")
            ]) == 4)
        lengths = []
        for db in sorted([
                d for d in os.listdir("tmp/archives")
                if not d.endswith("journal")
        ]):
            dbmodconf = Module({
                'module_name': 'LogStore',
                'module_type': 'logstore_sqlite',
                'use_aggressive_sql': '0',
                'database_file': "tmp/archives/" + db,
                'max_logs_age': '0',
            })
            tmpconn = LiveStatusLogStoreSqlite(dbmodconf)
            tmpconn.open()
            numlogs = tmpconn.execute("SELECT COUNT(*) FROM logs")
            lengths.append(numlogs[0][0])
            print "db entries", db, numlogs
            tmpconn.close()
        print "lengths is", lengths
        self.assertEqual([12, 28, 44, 60], lengths)
import os, time

from shinken_test import unittest, ShinkenTest, original_time_time, original_time_sleep

# Need to use the real time-functions as we are reading timestamps
# from the filesystem.
time.time = original_time_time
time.sleep = original_time_sleep

from shinken.objects.module import Module

from shinken.modules.hot_dependencies import module as hot_dependencies_arbiter
from shinken.modules.hot_dependencies.module import Hot_dependencies_arbiter, get_instance

modconf = Module()
modconf.module_name = "PickleRetention"
modconf.module_type = hot_dependencies_arbiter.properties['type']
modconf.modules = []
modconf.properties = hot_dependencies_arbiter.properties.copy()

try:
    import json
except ImportError:
    # For old Python version, load
    # simple json (it can be hard json?! It's 2 functions guy!)
    try:
        import simplejson as json
    except ImportError:
        print "Error: you need the json or simplejson module for this script"
        raise SystemExit(0)
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken.  If not, see <http://www.gnu.org/licenses/>.

"""
Test Mongodb retention.
"""

from shinken_test import unittest, ShinkenTest

from shinken.objects.module import Module
from shinken.modules.retention_mongodb import module as mongodb_retention
from shinken.modules.retention_mongodb.module import get_instance

modconf = Module()
modconf.module_name = "MongodbRetention"
modconf.uri = "mongodb://127.0.0.1:27017"
modconf.database = "test"
modconf.module_type = mongodb_retention.properties["type"]
modconf.properties = mongodb_retention.properties.copy()


class TestMongodbRetention(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def test_mongodb_retention(self):
        # get our modules
        sl = mongodb_retention.Mongodb_retention_scheduler(modconf, "localhost", "test", "")

        # sl = get_instance(mod)
Test the named pipe arbiter module.
"""

import os, time, platform

from shinken_test import unittest, ShinkenTest

from shinken.objects.module import Module

from shinken.modulesctx import modulesctx
named_pipe = modulesctx.get_module('named_pipe')
Named_Pipe_arbiter = named_pipe.Named_Pipe_arbiter
get_instance       = named_pipe.get_instance


modconf = Module()
modconf.module_name = "NamedPipe"
modconf.module_type = named_pipe.properties['type']
modconf.properties = named_pipe.properties.copy()


class TestModuleNamedPipe(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def test_read_named_pipe(self):

        # Ok, windows do not have named pipe, we know...
        # cygwin cannot write from two sides at the same time
        if os.name == 'nt' or platform.system().startswith('CYGWIN'):
            return
Test pickle retention arbiter.
"""

import os
import copy

from shinken_test import unittest, ShinkenTest

from shinken.daemons.arbiterdaemon import Arbiter
from shinken.objects.module import Module

from shinken.modulesctx import modulesctx
pickle_retention_file_generic = modulesctx.get_module('pickle_retention_file_generic')
get_instance = pickle_retention_file_generic.get_instance

modconf = Module()
modconf.module_name = "PickleRetentionGeneric"
modconf.module_type = pickle_retention_file_generic.properties['type']
modconf.properties = pickle_retention_file_generic.properties.copy()


class TestPickleRetentionArbiter(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def test_pickle_retention(self):
        # get our modules
        mod = pickle_retention_file_generic.Pickle_retention_generic(
            modconf, 'tmp/retention-test.dat')
        try:
            os.unlink(mod.path)
        except:
示例#26
0
#
# This file is used to test reading and processing of config files
#

import os
from Queue import Empty
from multiprocessing import Queue, Manager, active_children

from shinken_test import *
from shinken.log import logger
from shinken.objects.module import Module
from shinken.modules.booster_nrpe import module as nrpe_poller
from shinken.modules.booster_nrpe.module import get_instance
from shinken.message import Message

modconf = Module()
modconf.module_name = "NrpePoller"
modconf.module_type = nrpe_poller.properties['type']
modconf.properties = nrpe_poller.properties.copy()


class TestNrpePoller(ShinkenTest):
    # Uncomment this is you want to use a specific configuration
    # for your test
    #def setUp(self):
    #    self.setup_with_file('etc/nagios_module_hack_cmd_poller_tag.cfg')

    def test_nrpe_poller(self):
        if os.name == 'nt':
            return
        
# along with Shinken.  If not, see <http://www.gnu.org/licenses/>.

"""
Test Nagios retention
"""

from shinken_test import unittest, ShinkenTest

from shinken.objects.module import Module

from shinken.modulesctx import modulesctx
nagios_retention_file_scheduler = modulesctx.get_module('retention_nagios')
get_instance                    = nagios_retention_file_scheduler.get_instance


modconf = Module()
modconf.module_name = "NagiosRetention"
modconf.module_type = nagios_retention_file_scheduler.properties['type']
modconf.properties = nagios_retention_file_scheduler.properties.copy()


class TestNagiosRetention(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def test_pickle_retention(self):
        # get our modules
        mod = nagios_retention_file_scheduler.Nagios_retention_scheduler(
            modconf, 'etc/module_nagios_retention/retention.dat')

        sl = get_instance(mod)
        # Hack here :(
    def test_json_read_with_command(self):
        """
        We are trying to see if we can have good data with 2 commands call
          CASE1: link between host0 and 1
        then after some seconds:
          CASE2: link between host1 and host2, so like the previous
                 test, but with command calls
        """

        host0 = self.sched.conf.hosts.find_by_name('test_host_0')
        self.assert_(host0 is not None)
        host1 = self.sched.conf.hosts.find_by_name('test_host_1')
        self.assert_(host1 is not None)
        host2 = self.sched.conf.hosts.find_by_name('test_host_2')
        self.assert_(host2 is not None)

        # From now there is no link between hosts (just parent with the router)
        # but it's not imporant here
        self.assertFalse(host0.is_linked_with_host(host1))
        self.assertFalse(host1.is_linked_with_host(host0))
        self.assertFalse(host0.is_linked_with_host(host2))
        self.assertFalse(host2.is_linked_with_host(host0))
        self.assertFalse(host2.is_linked_with_host(host1))
        self.assertFalse(host1.is_linked_with_host(host2))

        # get our modules
        mod = None
        mod = Module({
            'type': 'hot_dependencies',
            'module_name': 'VMWare_auto_linking',
            'mapping_file': 'tmp/vmware_mapping_file.json',
            'mapping_command':
            "libexec/hot_dep_export.py case1 tmp/vmware_mapping_file.json",
            'mapping_command_interval': '30'
        })

        try:
            os.unlink(mod.mapping_file)
        except:
            pass

        sl = get_instance(mod)

        # Hack here :(
        sl.properties = {}
        sl.properties['to_queue'] = None
        # Under windows, call python.exe
        if os.name == 'nt':
            sl.mapping_command = 'python.exe libexec\\hot_dep_export.py case1 tmp\\vmware_mapping_file.json'
        sl.init()

        # Try the hook for the late config, so it will create
        # the link between host1 and host0
        sl.hook_late_configuration(self)

        # We can look is now the hosts are linked or not :)
        self.assertFalse(host1.is_linked_with_host(host0))

        # The hook_late should have seen a problem of no file
        # and so launch the command. We can wait it finished
        time.sleep(1.5)

        # Now we look if it's finished, and we get data and manage them
        # with case 1 (0 and 1 linked, not with 1 and 2)
        sl.hook_tick(self)

        # Now we should see link between 1 and 0, but not between 2 and 1
        self.assertTrue(host1.is_linked_with_host(host0))
        self.assertFalse(host1.is_linked_with_host(host2))

        # Now we go in case2
        if os.name != 'nt':
            sl.mapping_command = 'libexec/hot_dep_export.py case2 tmp/vmware_mapping_file.json'
        else:
            sl.mapping_command = 'python.exe libexec\\hot_dep_export.py case2 tmp\\vmware_mapping_file.json'

        # We lie in the interval:p (not 0, because 0 mean: disabled)
        sl.mapping_command_interval = 0.1
        sl.hook_tick(self)
        time.sleep(1.5)
        # But we need another tick to get all of it
        sl.hook_tick(self)

        # Now we should see link between 1 and 0, but not between 2 and 1
        self.assertFalse(host1.is_linked_with_host(host0))
        self.assertTrue(host1.is_linked_with_host(host2))

        # Ok, we can delete the retention file
        os.unlink(mod.mapping_file)
示例#29
0
# This file is used to test the npcd broker module
#

import os, sys, string, time
from multiprocessing import Queue

from shinken_test import unittest, ShinkenTest

from shinken.objects.module import Module

from shinken.modules.npcdmod import module as npcdmod_broker
from shinken.modules.npcdmod.module import Npcd_broker

sys.setcheckinterval(10000)

modconf = Module()
modconf.module_name = "ncpd"
modconf.module_type = npcdmod_broker.properties['type']
modconf.modules = []
modconf.properties = npcdmod_broker.properties.copy()


class TestNpcd(ShinkenTest):

    def add(self, b):
        self.broks[b.id] = b

    def fake_check(self, ref, exit_status, output="OK"):
        print "fake", ref
        now = time.time()
        ref.schedule()
#
# This file is used to test reading and processing of config files
#

import os
import time

from shinken_test import unittest, ShinkenTest

from shinken.log import logger
from shinken.objects.module import Module
from shinken.modules import pickle_retention_file_scheduler
from shinken.modules.pickle_retention_file_scheduler import get_instance 


modconf = Module()
modconf.module_name = "PickleRetention"
modconf.module_type = pickle_retention_file_scheduler.properties['type']
modconf.modules = []
modconf.properties = pickle_retention_file_scheduler.properties.copy()


class TestConfig(ShinkenTest):
    #setUp is in shinken_test

    #Change ME :)
    def test_pickle_retention(self):
        print self.conf.modules
        now = time.time()
        #get our modules
        mod = pickle_retention_file_scheduler.Pickle_retention_scheduler(modconf, 'tmp/retention-test.dat')
#
# This file is used to test reading and processing of config files
#

import os, sys, time

from shinken_test import unittest, ShinkenTest

from shinken.log import logger
from shinken.objects.module import Module

from shinken.modules import named_pipe
from shinken.modules.named_pipe import Named_Pipe_arbiter, get_instance


modconf = Module()
modconf.module_name = "NamedPipe"
modconf.module_type = named_pipe.properties['type']
modconf.properties = named_pipe.properties.copy()


class TestModuleNamedPipe(ShinkenTest):
    #setUp is in shinken_test
    #def setUp(self):
    #    self.setup_with_file('etc/nagios_module_hot_dependencies_arbiter.cfg')

    # Change ME :)
    def test_read_named_pipe(self):

        # Ok, windows do not have named pipe, we know...
        if os.name == 'nt':

#
# This file is used to test reading and processing of config files
#

import os

from shinken_test import unittest, ShinkenTest

from shinken.log import logger
from shinken.objects.module import Module
from shinken.modules import memcache_retention_scheduler
from shinken.modules.memcache_retention_scheduler import get_instance

modconf = Module()
modconf.module_name = "MemcacheRetention"
modconf.module_type = memcache_retention_scheduler.properties['type']
modconf.properties = memcache_retention_scheduler.properties.copy()


class TestConfig(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def test_memcache_retention(self):
        print self.conf.modules
        # get our modules
        modconf.server = 'localhost'
        modconf.port = '11211'
        mod = memcache_retention_scheduler.Memcache_retention_scheduler(modconf)
示例#33
0
 def setUp(self):
     self.basic_modconf = Module(basic_dict_modconf)
示例#34
0
#
# This file is used to test reading and processing of config files
#

import os

from shinken_test import unittest, ShinkenTest

from shinken.log import logger
from shinken.objects.module import Module
from shinken.modules import redis_retention_scheduler
from shinken.modules.redis_retention_scheduler import get_instance 


modconf = Module()
modconf.module_name = "RedisRetention"
modconf.module_type = redis_retention_scheduler.properties['type']
modconf.properties = redis_retention_scheduler.properties.copy()


class TestConfig(ShinkenTest):
    #setUp is in shinken_test

    #Change ME :)
    def test_redis_retention(self):
        print self.conf.modules
        #get our modules
        mod = redis_retention_scheduler.Redis_retention_scheduler(modconf, 'localhost')

        sl = get_instance(mod)
示例#35
0
from shinken.brok import Brok

from shinken.daemons.schedulerdaemon import Shinken
from shinken.daemons.brokerdaemon import Broker
from shinken.daemons.arbiterdaemon import Arbiter

# Special Livestatus module opening since the module rename
from shinken.modules.livestatus import module as livestatus_broker
from shinken.modules.livestatus.module import LiveStatus_broker
from shinken.modules.livestatus.livestatus import LiveStatus
from shinken.modules.livestatus.livestatus_regenerator import LiveStatusRegenerator
from shinken.modules.livestatus.livestatus_query_cache import LiveStatusQueryCache
from shinken.misc.datamanager import datamgr

livestatus_modconf = Module()
livestatus_modconf.module_name = "livestatus"
livestatus_modconf.module_type = livestatus_broker.properties['type']
livestatus_modconf.properties = livestatus_broker.properties.copy()

# We overwrite the functions time() and sleep()
# This way we can modify sleep() so that it immediately returns although
# for a following time() it looks like thee was actually a delay.
# This massively speeds up the tests.

time.my_offset = 0
time.my_starttime = time.time()
time.my_oldtime = time.time


def my_time_time():
示例#36
0
 def setUp(self):
     self.basic_modconf = Module(basic_dict_modconf)
     self.influx_broker = InfluxdbBroker(self.basic_modconf)
示例#37
0
#

import os, sys, string, time
from multiprocessing import Queue

from shinken_test import unittest, ShinkenTest

from shinken.objects.module import Module
from shinken.modulesctx import modulesctx
npcdmod_broker = modulesctx.get_module('npcdmod')
Npcd_broker = npcdmod_broker.Npcd_broker


sys.setcheckinterval(10000)

modconf = Module()
modconf.module_name = "ncpd"
modconf.module_type = npcdmod_broker.properties['type']
modconf.modules = []
modconf.properties = npcdmod_broker.properties.copy()


class TestNpcd(ShinkenTest):

    def add(self, b):
        self.broks[b.id] = b

    def fake_check(self, ref, exit_status, output="OK"):
        print "fake", ref
        now = time.time()
        ref.schedule()
示例#38
0
modulesctx.set_modulesdir(modules_dir)


# Special Livestatus module opening since the module rename
#from shinken.modules.livestatus import module as livestatus_broker
livestatus_broker = modulesctx.get_module('livestatus')
LiveStatus_broker = livestatus_broker.LiveStatus_broker
LiveStatus = livestatus_broker.LiveStatus
LiveStatusRegenerator = livestatus_broker.LiveStatusRegenerator
LiveStatusQueryCache = livestatus_broker.LiveStatusQueryCache

Logline = livestatus_broker.Logline
LiveStatusLogStoreMongoDB = modulesctx.get_module('logstore-mongodb').LiveStatusLogStoreMongoDB
LiveStatusLogStoreSqlite = modulesctx.get_module('logstore-sqlite').LiveStatusLogStoreSqlite

livestatus_modconf = Module()
livestatus_modconf.module_name = "livestatus"
livestatus_modconf.module_type = livestatus_broker.properties['type']
livestatus_modconf.properties = livestatus_broker.properties.copy()



class ShinkenModulesTest(ShinkenTest):

    def do_load_modules(self):
        self.modules_manager.load_and_init()
        self.log.log("I correctly loaded the modules: [%s]" % (','.join([inst.get_name() for inst in self.modules_manager.instances])))



    def update_broker(self, dodeepcopy=False):
示例#39
0
from shinken.check import Check

try:
    import unittest2 as unittest
except ImportError:
    import unittest


from shinken.objects.module import Module
from shinken.message import Message


import booster_nrpe


modconf = Module()
modconf.module_name = "NrpePoller"
modconf.module_type = booster_nrpe.properties['type']
modconf.properties = booster_nrpe.properties.copy()


class NrpePollerTestMixin(object):

    def setUp(self):
        super(NrpePollerTestMixin, self).setUp()
        logger.setLevel(logging.DEBUG)

    def _setup_nrpe(self, modconf):
        mod = booster_nrpe.Nrpe_poller(modconf)
        inst = booster_nrpe.get_instance(mod)
        inst.init()
示例#40
0
    SkipTest = None

if not sys.version_info > (2, 5):
    if SkipTest:
        raise SkipTest("bah, i am 2.4.x")
    else:
        raise SystemExit(0)

from shinken.objects.module import Module
from shinken.modules import passwd_ui
from shinken.modules.passwd_ui import get_instance
from shinken.log import logger


if sys.version_info > (2, 5):
    modconf = Module()
    modconf.module_name = "PasswdUI"
    modconf.module_type = passwd_ui.properties['type']
    modconf.properties = passwd_ui.properties.copy()


class TestPasswdUI(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def test_check_auth(self):
        # get our modules
        modconf.passwd = 'libexec/htpasswd.users'
        mod = passwd_ui.get_instance(modconf)

        sl = get_instance(mod)
        # Hack here :(
示例#41
0
# along with Shinken.  If not, see <http://www.gnu.org/licenses/>.
"""
Test pickle retention broker.
"""

import os
import copy

from shinken_test import unittest, ShinkenTest

from shinken.daemons.brokerdaemon import Broker
from shinken.objects.module import Module
from shinken.modules import pickle_retention_file_generic
from shinken.modules.pickle_retention_file_generic import get_instance

modconf = Module()
modconf.module_name = "PickleRetentionGeneric"
modconf.module_type = pickle_retention_file_generic.properties['type']
modconf.properties = pickle_retention_file_generic.properties.copy()


class TestPickleRetentionBroker(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def test_pickle_retention(self):
        # get our modules
        mod = pickle_retention_file_generic.Pickle_retention_generic(
            modconf, 'tmp/retention-test.dat')
        try:
            os.unlink(mod.path)
        except:
示例#42
0
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken.  If not, see <http://www.gnu.org/licenses/>.
"""
Test memcache retention.
"""

from shinken_test import unittest, ShinkenTest

from shinken.objects.module import Module
from shinken.modulesctx import modulesctx
memcache_retention_scheduler = modulesctx.get_module('retention_memcache')
get_instance = memcache_retention_scheduler.get_instance

modconf = Module()
modconf.module_name = "MemcacheRetention"
modconf.module_type = memcache_retention_scheduler.properties['type']
modconf.properties = memcache_retention_scheduler.properties.copy()

try:
    import memcache
except ImportError:
    memcache = None


class TestMemcacheRetention(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def test_memcache_retention(self):
        if not memcache:
示例#43
0
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken.  If not, see <http://www.gnu.org/licenses/>.
"""
Test Mongodb retention.
"""

from shinken_test import unittest, ShinkenTest

from shinken.objects.module import Module
from shinken.modules import mongodb_retention
from shinken.modules.mongodb_retention import get_instance

modconf = Module()
modconf.module_name = "MongodbRetention"
modconf.uri = 'mongodb://127.0.0.1:27017'
modconf.database = 'test'
modconf.module_type = mongodb_retention.properties['type']
modconf.properties = mongodb_retention.properties.copy()


class TestMongodbRetention(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def test_mongodb_retention(self):
        # get our modules
        sl = mongodb_retention.Mongodb_retention_scheduler(
            modconf, 'localhost', 'test')
from shinken_test import unittest, ShinkenTest, original_time_time, original_time_sleep

# Need to use the real time-functions as we are reading timestamps
# from the filesystem.
time.time = original_time_time
time.sleep = original_time_sleep

from shinken.objects.module import Module

from shinken.modulesctx import modulesctx
hot_dependencies_arbiter = modulesctx.get_module('hot_dependencies')
Hot_dependencies_arbiter = hot_dependencies_arbiter.Hot_dependencies_arbiter
get_instance             = hot_dependencies_arbiter.get_instance

modconf = Module()
modconf.module_name = "PickleRetention"
modconf.module_type = hot_dependencies_arbiter.properties['type']
modconf.modules = []
modconf.properties = hot_dependencies_arbiter.properties.copy()


try:
    import json
except ImportError:
    # For old Python version, load
    # simple json (it can be hard json?! It's 2 functions guy!)
    try:
        import simplejson as json
    except ImportError:
        print "Error: you need the json or simplejson module for this script"