Exemplo n.º 1
0
class TestMonitorRemovesRecoveringMember(unittest.TestCase):
    # Members in STARTUP2 or RECOVERING states are shown in the primary's
    # isMaster response, but aren't secondaries and shouldn't be read from.
    # Verify that if a secondary goes into RECOVERING mode, the Monitor removes
    # it from the set of readers.

    def setUp(self):
        members = [{}, {'priority': 0}, {'priority': 0}]
        res = ha_tools.start_replica_set(members)
        self.seed, self.name = res

    def test_monitor_removes_recovering_member(self):
        self.c = MongoReplicaSetClient(self.seed,
                                       replicaSet=self.name,
                                       use_greenlets=use_greenlets)

        secondaries = ha_tools.get_secondaries()

        for mode in SECONDARY, SECONDARY_PREFERRED:
            partitioned_secondaries = [_partition_node(s) for s in secondaries]
            utils.assertReadFromAll(self, self.c, partitioned_secondaries,
                                    mode)

        secondary, recovering_secondary = secondaries
        ha_tools.set_maintenance(recovering_secondary, True)
        sleep(2 * MONITOR_INTERVAL)

        for mode in SECONDARY, SECONDARY_PREFERRED:
            # Don't read from recovering member
            utils.assertReadFrom(self, self.c, _partition_node(secondary),
                                 mode)

    def tearDown(self):
        self.c.close()
        ha_tools.kill_all_members()
Exemplo n.º 2
0
    def test_alive(self):
        primary = ha_tools.get_primary()
        secondary = ha_tools.get_random_secondary()
        primary_cx = MongoClient(primary, use_greenlets=use_greenlets)
        secondary_cx = MongoClient(secondary, use_greenlets=use_greenlets)
        rsc = MongoReplicaSetClient(self.seed,
                                    replicaSet=self.name,
                                    use_greenlets=use_greenlets)

        try:
            self.assertTrue(primary_cx.alive())
            self.assertTrue(secondary_cx.alive())
            self.assertTrue(rsc.alive())

            ha_tools.kill_primary()
            time.sleep(0.5)

            self.assertFalse(primary_cx.alive())
            self.assertTrue(secondary_cx.alive())
            self.assertFalse(rsc.alive())

            ha_tools.kill_members([secondary], 2)
            time.sleep(0.5)

            self.assertFalse(primary_cx.alive())
            self.assertFalse(secondary_cx.alive())
            self.assertFalse(rsc.alive())
        finally:
            rsc.close()
Exemplo n.º 3
0
class TestMonitorRemovesRecoveringMember(unittest.TestCase):
    # Members in STARTUP2 or RECOVERING states are shown in the primary's
    # isMaster response, but aren't secondaries and shouldn't be read from.
    # Verify that if a secondary goes into RECOVERING mode, the Monitor removes
    # it from the set of readers.

    def setUp(self):
        members = [{}, {'priority': 0}, {'priority': 0}]
        res = ha_tools.start_replica_set(members)
        self.seed, self.name = res

    def test_monitor_removes_recovering_member(self):
        self.c = MongoReplicaSetClient(
            self.seed, replicaSet=self.name, use_greenlets=use_greenlets)

        secondaries = ha_tools.get_secondaries()

        for mode in SECONDARY, SECONDARY_PREFERRED:
            partitioned_secondaries = [_partition_node(s) for s in secondaries]
            utils.assertReadFromAll(self, self.c, partitioned_secondaries, mode)

        secondary, recovering_secondary = secondaries
        ha_tools.set_maintenance(recovering_secondary, True)
        sleep(2 * MONITOR_INTERVAL)

        for mode in SECONDARY, SECONDARY_PREFERRED:
            # Don't read from recovering member
            utils.assertReadFrom(self, self.c, _partition_node(secondary), mode)

    def tearDown(self):
        self.c.close()
        ha_tools.kill_all_members()
Exemplo n.º 4
0
class TestPassiveAndHidden(unittest.TestCase):
    def setUp(self):
        members = [{}, {
            'priority': 0
        }, {
            'arbiterOnly': True
        }, {
            'priority': 0,
            'hidden': True
        }, {
            'priority': 0,
            'slaveDelay': 5
        }]
        res = ha_tools.start_replica_set(members)
        self.seed, self.name = res

    def test_passive_and_hidden(self):
        self.c = MongoReplicaSetClient(self.seed,
                                       replicaSet=self.name,
                                       use_greenlets=use_greenlets)

        passives = ha_tools.get_passives()
        passives = [_partition_node(member) for member in passives]
        self.assertEqual(self.c.secondaries, set(passives))

        for mode in SECONDARY, SECONDARY_PREFERRED:
            utils.assertReadFromAll(self, self.c, passives, mode)

        ha_tools.kill_members(ha_tools.get_passives(), 2)
        sleep(2 * MONITOR_INTERVAL)
        utils.assertReadFrom(self, self.c, self.c.primary, SECONDARY_PREFERRED)

    def tearDown(self):
        self.c.close()
        ha_tools.kill_all_members()
Exemplo n.º 5
0
    def test_alive(self):
        primary = ha_tools.get_primary()
        secondary = ha_tools.get_random_secondary()
        primary_cx = MongoClient(primary, use_greenlets=use_greenlets)
        secondary_cx = MongoClient(secondary, use_greenlets=use_greenlets)
        rsc = MongoReplicaSetClient(
            self.seed, replicaSet=self.name, use_greenlets=use_greenlets)

        try:
            self.assertTrue(primary_cx.alive())
            self.assertTrue(secondary_cx.alive())
            self.assertTrue(rsc.alive())
    
            ha_tools.kill_primary()
            time.sleep(0.5)

            self.assertFalse(primary_cx.alive())
            self.assertTrue(secondary_cx.alive())
            self.assertFalse(rsc.alive())
            
            ha_tools.kill_members([secondary], 2)
            time.sleep(0.5)

            self.assertFalse(primary_cx.alive())
            self.assertFalse(secondary_cx.alive())
            self.assertFalse(rsc.alive())
        finally:
            rsc.close()
Exemplo n.º 6
0
class TestPassiveAndHidden(unittest.TestCase):

    def setUp(self):
        members = [{},
                   {'priority': 0},
                   {'arbiterOnly': True},
                   {'priority': 0, 'hidden': True},
                   {'priority': 0, 'slaveDelay': 5}
        ]
        res = ha_tools.start_replica_set(members)
        self.seed, self.name = res

    def test_passive_and_hidden(self):
        self.c = MongoReplicaSetClient(
            self.seed, replicaSet=self.name, use_greenlets=use_greenlets)

        passives = ha_tools.get_passives()
        passives = [_partition_node(member) for member in passives]
        self.assertEqual(self.c.secondaries, set(passives))

        for mode in SECONDARY, SECONDARY_PREFERRED:
            utils.assertReadFromAll(self, self.c, passives, mode)

        ha_tools.kill_members(ha_tools.get_passives(), 2)
        sleep(2 * MONITOR_INTERVAL)
        utils.assertReadFrom(self, self.c, self.c.primary, SECONDARY_PREFERRED)

    def tearDown(self):
        self.c.close()
        ha_tools.kill_all_members()
Exemplo n.º 7
0
class Connection():
	def __init__(self):
		self.connection = None

	def connect(self,hosts="localhost", replicaSet="", readPreference=ReadPreference.SECONDARY_PREFERRED):
		# if the replicaset is empty, we use a normal connection, if not, we use a MongoReplicaSetClient
		# in wich case we set the read preference
		# by default, SECONDARY_PREFERRED since we don't big have consistancy issues. Fail safe when the primary/secondary goes down.
		if replicaSet == "":
			print("Using mongodb host " + hosts + " for read/writes.")
			self.connection = MongoConnection(hosts)
		else:
			print("Using mongodb hosts " + hosts + " with replica set " + replicaSet + " and read pref " + str(readPreference))
			self.connection = MongoReplicaSetClient(hosts, replicaSet=replicaSet, read_preference=readPreference)
			print("Replica set:")
			print("PRIMARY: " + str(self.connection.primary))
			print("SECONDARIES: " + str(self.connection.secondaries))
			print("ARBITERS: " + str(self.connection.arbiters))

	def close(self):
		self.connection.close()
		self.connection = None

	# \return the requested db setting the readPreference if set.
	def getDatabase(self, dbname, readPreference=None):
		db = self.connection[dbname]
		if readPreference != None:
			db.read_preference = readPreference
		return db
Exemplo n.º 8
0
class TestReplicaSetRequest(unittest.TestCase):
    def setUp(self):
        members = [{}, {}, {'arbiterOnly': True}]
        res = ha_tools.start_replica_set(members)
        self.c = MongoReplicaSetClient(res[0],
                                       replicaSet=res[1],
                                       use_greenlets=use_greenlets,
                                       auto_start_request=True)

    def test_request_during_failover(self):
        primary = _partition_node(ha_tools.get_primary())
        secondary = _partition_node(ha_tools.get_random_secondary())

        self.assertTrue(self.c.auto_start_request)
        self.assertTrue(self.c.in_request())

        primary_pool = self.c._MongoReplicaSetClient__members[primary].pool
        secondary_pool = self.c._MongoReplicaSetClient__members[secondary].pool

        # Trigger start_request on primary pool
        utils.assertReadFrom(self, self.c, primary, PRIMARY)
        self.assertTrue(primary_pool.in_request())

        # Fail over
        ha_tools.kill_primary()
        patience_seconds = 60
        for _ in range(patience_seconds):
            sleep(1)
            try:
                if ha_tools.ha_tools_debug:
                    print 'Waiting for failover'
                if ha_tools.get_primary():
                    # We have a new primary
                    break
            except ConnectionFailure:
                pass
        else:
            self.fail("Problem with test: No new primary after %s seconds" %
                      patience_seconds)

        try:
            # Trigger start_request on secondary_pool, which is becoming new
            # primary
            self.c.test.test.find_one()
        except AutoReconnect:
            # We've noticed the failover now
            pass

        # The old secondary is now primary
        utils.assertReadFrom(self, self.c, secondary, PRIMARY)
        self.assertTrue(self.c.in_request())
        self.assertTrue(secondary_pool.in_request())

    def tearDown(self):
        self.c.close()
        ha_tools.kill_all_members()
Exemplo n.º 9
0
class TestReplicaSetRequest(unittest.TestCase):
    def setUp(self):
        members = [{}, {}, {'arbiterOnly': True}]
        res = ha_tools.start_replica_set(members)
        self.c = MongoReplicaSetClient(res[0], replicaSet=res[1],
                                       use_greenlets=use_greenlets,
                                       auto_start_request=True)

    def test_request_during_failover(self):
        primary = _partition_node(ha_tools.get_primary())
        secondary = _partition_node(ha_tools.get_random_secondary())

        self.assertTrue(self.c.auto_start_request)
        self.assertTrue(self.c.in_request())

        primary_pool = self.c._MongoReplicaSetClient__members[primary].pool
        secondary_pool = self.c._MongoReplicaSetClient__members[secondary].pool

        # Trigger start_request on primary pool
        utils.assertReadFrom(self, self.c, primary, PRIMARY)
        self.assertTrue(primary_pool.in_request())

        # Fail over
        ha_tools.kill_primary()
        patience_seconds = 60
        for _ in range(patience_seconds):
            sleep(1)
            try:
                if ha_tools.ha_tools_debug:
                    print 'Waiting for failover'
                if ha_tools.get_primary():
                    # We have a new primary
                    break
            except ConnectionFailure:
                pass
        else:
            self.fail("Problem with test: No new primary after %s seconds"
                % patience_seconds)

        try:
            # Trigger start_request on secondary_pool, which is becoming new
            # primary
            self.c.test.test.find_one()
        except AutoReconnect:
            # We've noticed the failover now
            pass

        # The old secondary is now primary
        utils.assertReadFrom(self, self.c, secondary, PRIMARY)
        self.assertTrue(self.c.in_request())
        self.assertTrue(secondary_pool.in_request())

    def tearDown(self):
        self.c.close()
        ha_tools.kill_all_members()
Exemplo n.º 10
0
class TestReplicaSetAuth(unittest.TestCase):
    def setUp(self):
        members = [
            {},
            {
                'priority': 0
            },
            {
                'priority': 0
            },
        ]

        res = ha_tools.start_replica_set(members, auth=True)
        self.c = MongoReplicaSetClient(res[0],
                                       replicaSet=res[1],
                                       use_greenlets=use_greenlets)

        # Add an admin user to enable auth
        self.c.admin.add_user('admin', 'adminpass')
        self.c.admin.authenticate('admin', 'adminpass')

        self.db = self.c.pymongo_ha_auth
        self.db.add_user('user', 'userpass')
        self.c.admin.logout()

    def test_auth_during_failover(self):
        self.assertTrue(self.db.authenticate('user', 'userpass'))
        self.assertTrue(
            self.db.foo.insert({'foo': 'bar'}, safe=True, w=3, wtimeout=1000))
        self.db.logout()
        self.assertRaises(OperationFailure, self.db.foo.find_one)

        primary = '%s:%d' % self.c.primary
        ha_tools.kill_members([primary], 2)

        # Let monitor notice primary's gone
        sleep(2 * MONITOR_INTERVAL)

        # Make sure we can still authenticate
        self.assertTrue(self.db.authenticate('user', 'userpass'))
        # And still query.
        self.db.read_preference = PRIMARY_PREFERRED
        self.assertEqual('bar', self.db.foo.find_one()['foo'])

    def tearDown(self):
        self.c.close()
        ha_tools.kill_all_members()
Exemplo n.º 11
0
class TestReplicaSetAuth(unittest.TestCase):
    def setUp(self):
        members = [
            {},
            {'priority': 0},
            {'priority': 0},
        ]

        res = ha_tools.start_replica_set(members, auth=True)
        self.c = MongoReplicaSetClient(res[0], replicaSet=res[1],
                                       use_greenlets=use_greenlets)

        # Add an admin user to enable auth
        self.c.admin.add_user('admin', 'adminpass')
        self.c.admin.authenticate('admin', 'adminpass')

        self.db = self.c.pymongo_ha_auth
        self.db.add_user('user', 'userpass')
        self.c.admin.logout()

    def test_auth_during_failover(self):
        self.assertTrue(self.db.authenticate('user', 'userpass'))
        self.assertTrue(self.db.foo.insert({'foo': 'bar'},
                                           safe=True, w=3, wtimeout=1000))
        self.db.logout()
        self.assertRaises(OperationFailure, self.db.foo.find_one)

        primary = '%s:%d' % self.c.primary
        ha_tools.kill_members([primary], 2)

        # Let monitor notice primary's gone
        sleep(2 * MONITOR_INTERVAL)

        # Make sure we can still authenticate
        self.assertTrue(self.db.authenticate('user', 'userpass'))
        # And still query.
        self.db.read_preference = PRIMARY_PREFERRED
        self.assertEqual('bar', self.db.foo.find_one()['foo'])

    def tearDown(self):
        self.c.close()
        ha_tools.kill_all_members()
Exemplo n.º 12
0
        sys.stderr.write("Operation %s" % e) 
        error_output_enabled = re.search("enabled",str(e))
        error_output_initialized = re.search("initialized",str(e))
        if error_output_enabled:
                print "sharding already enabled"
                pass

        if error_output_initialized:
                print "initialized"
                pass

try:
        #initiate Sharding
        config = {_id: replicaset, members: [
        {_id: 0, host: servers[0]},
        {_id: 1, host: servers[1]},
        {_id: 2, host: servers[2]}]
        }

        mongod.admin.command("replSetInitiate" , config)

except pymongo.errors.OperationFailure, e:
        error_output = re.search("initialized",str(e))
        if error_output:
                print "sharding already initialized"
                pass

mongod.close()
mongos.close()

Exemplo n.º 13
0
class TestReadPreference(unittest.TestCase):
    def setUp(self):
        members = [
            # primary
            {
                'tags': {
                    'dc': 'ny',
                    'name': 'primary'
                }
            },

            # secondary
            {
                'tags': {
                    'dc': 'la',
                    'name': 'secondary'
                },
                'priority': 0
            },

            # other_secondary
            {
                'tags': {
                    'dc': 'ny',
                    'name': 'other_secondary'
                },
                'priority': 0
            },
        ]

        res = ha_tools.start_replica_set(members)
        self.seed, self.name = res

        primary = ha_tools.get_primary()
        self.primary = _partition_node(primary)
        self.primary_tags = ha_tools.get_tags(primary)
        # Make sure priority worked
        self.assertEqual('primary', self.primary_tags['name'])

        self.primary_dc = {'dc': self.primary_tags['dc']}

        secondaries = ha_tools.get_secondaries()

        (secondary, ) = [
            s for s in secondaries
            if ha_tools.get_tags(s)['name'] == 'secondary'
        ]

        self.secondary = _partition_node(secondary)
        self.secondary_tags = ha_tools.get_tags(secondary)
        self.secondary_dc = {'dc': self.secondary_tags['dc']}

        (other_secondary, ) = [
            s for s in secondaries
            if ha_tools.get_tags(s)['name'] == 'other_secondary'
        ]

        self.other_secondary = _partition_node(other_secondary)
        self.other_secondary_tags = ha_tools.get_tags(other_secondary)
        self.other_secondary_dc = {'dc': self.other_secondary_tags['dc']}

        self.c = MongoReplicaSetClient(self.seed,
                                       replicaSet=self.name,
                                       use_greenlets=use_greenlets)
        self.db = self.c.pymongo_test
        self.w = len(self.c.secondaries) + 1
        self.db.test.remove({}, w=self.w)
        self.db.test.insert([{'foo': i} for i in xrange(10)], w=self.w)

        self.clear_ping_times()

    def set_ping_time(self, host, ping_time_seconds):
        Member._host_to_ping_time[host] = ping_time_seconds

    def clear_ping_times(self):
        Member._host_to_ping_time.clear()

    def test_read_preference(self):
        # We pass through four states:
        #
        #       1. A primary and two secondaries
        #       2. Primary down
        #       3. Primary up, one secondary down
        #       4. Primary up, all secondaries down
        #
        # For each state, we verify the behavior of PRIMARY,
        # PRIMARY_PREFERRED, SECONDARY, SECONDARY_PREFERRED, and NEAREST
        c = MongoReplicaSetClient(self.seed,
                                  replicaSet=self.name,
                                  use_greenlets=use_greenlets)

        def assertReadFrom(member, *args, **kwargs):
            utils.assertReadFrom(self, c, member, *args, **kwargs)

        def assertReadFromAll(members, *args, **kwargs):
            utils.assertReadFromAll(self, c, members, *args, **kwargs)

        def unpartition_node(node):
            host, port = node
            return '%s:%s' % (host, port)

        # To make the code terser, copy hosts into local scope
        primary = self.primary
        secondary = self.secondary
        other_secondary = self.other_secondary

        bad_tag = {'bad': 'tag'}

        # 1. THREE MEMBERS UP -------------------------------------------------
        #       PRIMARY
        assertReadFrom(primary, PRIMARY)

        #       PRIMARY_PREFERRED
        # Trivial: mode and tags both match
        assertReadFrom(primary, PRIMARY_PREFERRED, self.primary_dc)

        # Secondary matches but not primary, choose primary
        assertReadFrom(primary, PRIMARY_PREFERRED, self.secondary_dc)

        # Chooses primary, ignoring tag sets
        assertReadFrom(primary, PRIMARY_PREFERRED, self.primary_dc)

        # Chooses primary, ignoring tag sets
        assertReadFrom(primary, PRIMARY_PREFERRED, bad_tag)
        assertReadFrom(primary, PRIMARY_PREFERRED, [bad_tag, {}])

        #       SECONDARY
        assertReadFromAll([secondary, other_secondary], SECONDARY)

        #       SECONDARY_PREFERRED
        assertReadFromAll([secondary, other_secondary], SECONDARY_PREFERRED)

        # Multiple tags
        assertReadFrom(secondary, SECONDARY_PREFERRED, self.secondary_tags)

        # Fall back to primary if it's the only one matching the tags
        assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'primary'})

        # No matching secondaries
        assertReadFrom(primary, SECONDARY_PREFERRED, bad_tag)

        # Fall back from non-matching tag set to matching set
        assertReadFromAll([secondary, other_secondary], SECONDARY_PREFERRED,
                          [bad_tag, {}])

        assertReadFrom(other_secondary, SECONDARY_PREFERRED,
                       [bad_tag, {
                           'dc': 'ny'
                       }])

        #       NEAREST
        self.clear_ping_times()

        assertReadFromAll([primary, secondary, other_secondary], NEAREST)

        assertReadFromAll([primary, other_secondary], NEAREST,
                          [bad_tag, {
                              'dc': 'ny'
                          }])

        self.set_ping_time(primary, 0)
        self.set_ping_time(secondary, .03)  # 30 ms
        self.set_ping_time(other_secondary, 10)

        # Nearest member, no tags
        assertReadFrom(primary, NEAREST)

        # Tags override nearness
        assertReadFrom(primary, NEAREST, {'name': 'primary'})
        assertReadFrom(secondary, NEAREST, self.secondary_dc)

        # Make secondary fast
        self.set_ping_time(primary, .03)  # 30 ms
        self.set_ping_time(secondary, 0)

        assertReadFrom(secondary, NEAREST)

        # Other secondary fast
        self.set_ping_time(secondary, 10)
        self.set_ping_time(other_secondary, 0)

        assertReadFrom(other_secondary, NEAREST)

        # High secondaryAcceptableLatencyMS, should read from all members
        assertReadFromAll([primary, secondary, other_secondary],
                          NEAREST,
                          secondary_acceptable_latency_ms=1000 * 1000)

        self.clear_ping_times()

        assertReadFromAll([primary, other_secondary], NEAREST, [{'dc': 'ny'}])

        # 2. PRIMARY DOWN -----------------------------------------------------
        killed = ha_tools.kill_primary()

        # Let monitor notice primary's gone
        sleep(2 * MONITOR_INTERVAL)

        #       PRIMARY
        assertReadFrom(None, PRIMARY)

        #       PRIMARY_PREFERRED
        # No primary, choose matching secondary
        assertReadFromAll([secondary, other_secondary], PRIMARY_PREFERRED)
        assertReadFrom(secondary, PRIMARY_PREFERRED, {'name': 'secondary'})

        # No primary or matching secondary
        assertReadFrom(None, PRIMARY_PREFERRED, bad_tag)

        #       SECONDARY
        assertReadFromAll([secondary, other_secondary], SECONDARY)

        # Only primary matches
        assertReadFrom(None, SECONDARY, {'name': 'primary'})

        # No matching secondaries
        assertReadFrom(None, SECONDARY, bad_tag)

        #       SECONDARY_PREFERRED
        assertReadFromAll([secondary, other_secondary], SECONDARY_PREFERRED)

        # Mode and tags both match
        assertReadFrom(secondary, SECONDARY_PREFERRED, {'name': 'secondary'})

        #       NEAREST
        self.clear_ping_times()

        assertReadFromAll([secondary, other_secondary], NEAREST)

        # 3. PRIMARY UP, ONE SECONDARY DOWN -----------------------------------
        ha_tools.restart_members([killed])

        for _ in range(30):
            if ha_tools.get_primary():
                break
            sleep(1)
        else:
            self.fail("Primary didn't come back up")

        ha_tools.kill_members([unpartition_node(secondary)], 2)
        self.assertTrue(
            MongoClient(unpartition_node(primary),
                        use_greenlets=use_greenlets,
                        read_preference=PRIMARY_PREFERRED).admin.command(
                            'ismaster')['ismaster'])

        sleep(2 * MONITOR_INTERVAL)

        #       PRIMARY
        assertReadFrom(primary, PRIMARY)

        #       PRIMARY_PREFERRED
        assertReadFrom(primary, PRIMARY_PREFERRED)

        #       SECONDARY
        assertReadFrom(other_secondary, SECONDARY)
        assertReadFrom(other_secondary, SECONDARY, self.other_secondary_dc)

        # Only the down secondary matches
        assertReadFrom(None, SECONDARY, {'name': 'secondary'})

        #       SECONDARY_PREFERRED
        assertReadFrom(other_secondary, SECONDARY_PREFERRED)
        assertReadFrom(other_secondary, SECONDARY_PREFERRED,
                       self.other_secondary_dc)

        # The secondary matching the tag is down, use primary
        assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'secondary'})

        #       NEAREST
        assertReadFromAll([primary, other_secondary], NEAREST)
        assertReadFrom(other_secondary, NEAREST, {'name': 'other_secondary'})
        assertReadFrom(primary, NEAREST, {'name': 'primary'})

        # 4. PRIMARY UP, ALL SECONDARIES DOWN ---------------------------------
        ha_tools.kill_members([unpartition_node(other_secondary)], 2)
        self.assertTrue(
            MongoClient(unpartition_node(primary),
                        use_greenlets=use_greenlets,
                        read_preference=PRIMARY_PREFERRED).admin.command(
                            'ismaster')['ismaster'])

        #       PRIMARY
        assertReadFrom(primary, PRIMARY)

        #       PRIMARY_PREFERRED
        assertReadFrom(primary, PRIMARY_PREFERRED)
        assertReadFrom(primary, PRIMARY_PREFERRED, self.secondary_dc)

        #       SECONDARY
        assertReadFrom(None, SECONDARY)
        assertReadFrom(None, SECONDARY, self.other_secondary_dc)
        assertReadFrom(None, SECONDARY, {'dc': 'ny'})

        #       SECONDARY_PREFERRED
        assertReadFrom(primary, SECONDARY_PREFERRED)
        assertReadFrom(primary, SECONDARY_PREFERRED, self.secondary_dc)
        assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'secondary'})
        assertReadFrom(primary, SECONDARY_PREFERRED, {'dc': 'ny'})

        #       NEAREST
        assertReadFrom(primary, NEAREST)
        assertReadFrom(None, NEAREST, self.secondary_dc)
        assertReadFrom(None, NEAREST, {'name': 'secondary'})

        # Even if primary's slow, still read from it
        self.set_ping_time(primary, 100)
        assertReadFrom(primary, NEAREST)
        assertReadFrom(None, NEAREST, self.secondary_dc)

        self.clear_ping_times()

    def test_pinning(self):
        # To make the code terser, copy modes into local scope
        PRIMARY = ReadPreference.PRIMARY
        PRIMARY_PREFERRED = ReadPreference.PRIMARY_PREFERRED
        SECONDARY = ReadPreference.SECONDARY
        SECONDARY_PREFERRED = ReadPreference.SECONDARY_PREFERRED
        NEAREST = ReadPreference.NEAREST

        c = MongoReplicaSetClient(self.seed,
                                  replicaSet=self.name,
                                  use_greenlets=use_greenlets,
                                  auto_start_request=True)

        # Verify that changing the mode unpins the member. We'll try it for
        # every relevant change of mode.
        for mode0, mode1 in itertools.permutations(
            (PRIMARY, SECONDARY, SECONDARY_PREFERRED, NEAREST), 2):
            # Try reading and then changing modes and reading again, see if we
            # read from a different host
            for _ in range(1000):
                # pin to this host
                host = utils.read_from_which_host(c, mode0)
                # unpin?
                new_host = utils.read_from_which_host(c, mode1)
                if host != new_host:
                    # Reading with a different mode unpinned, hooray!
                    break
            else:
                self.fail("Changing from mode %s to mode %s never unpinned" %
                          (modes[mode0], modes[mode1]))

        # Now verify changing the tag_sets unpins the member.
        tags0 = [{'a': 'a'}, {}]
        tags1 = [{'a': 'x'}, {}]
        for _ in range(1000):
            host = utils.read_from_which_host(c, NEAREST, tags0)
            new_host = utils.read_from_which_host(c, NEAREST, tags1)
            if host != new_host:
                break
        else:
            self.fail("Changing from tags %s to tags %s never unpinned" %
                      (tags0, tags1))

        # Finally, verify changing the secondary_acceptable_latency_ms unpins
        # the member.
        for _ in range(1000):
            host = utils.read_from_which_host(c, SECONDARY, None, 15)
            new_host = utils.read_from_which_host(c, SECONDARY, None, 20)
            if host != new_host:
                break
        else:
            self.fail("Changing secondary_acceptable_latency_ms from 15 to 20"
                      " never unpinned")

    def tearDown(self):
        self.c.close()
        ha_tools.kill_all_members()
        self.clear_ping_times()
Exemplo n.º 14
0
class TestReadPreference(unittest.TestCase):
    def setUp(self):
        members = [
            # primary
            {'tags': {'dc': 'ny', 'name': 'primary'}},

            # secondary
            {'tags': {'dc': 'la', 'name': 'secondary'}, 'priority': 0},

            # other_secondary
            {'tags': {'dc': 'ny', 'name': 'other_secondary'}, 'priority': 0},
        ]

        res = ha_tools.start_replica_set(members)
        self.seed, self.name = res

        primary = ha_tools.get_primary()
        self.primary = _partition_node(primary)
        self.primary_tags = ha_tools.get_tags(primary)
        # Make sure priority worked
        self.assertEqual('primary', self.primary_tags['name'])

        self.primary_dc = {'dc': self.primary_tags['dc']}

        secondaries = ha_tools.get_secondaries()

        (secondary, ) = [
            s for s in secondaries
            if ha_tools.get_tags(s)['name'] == 'secondary']

        self.secondary = _partition_node(secondary)
        self.secondary_tags = ha_tools.get_tags(secondary)
        self.secondary_dc = {'dc': self.secondary_tags['dc']}

        (other_secondary, ) = [
            s for s in secondaries
            if ha_tools.get_tags(s)['name'] == 'other_secondary']

        self.other_secondary = _partition_node(other_secondary)
        self.other_secondary_tags = ha_tools.get_tags(other_secondary)
        self.other_secondary_dc = {'dc': self.other_secondary_tags['dc']}

        self.c = MongoReplicaSetClient(
            self.seed, replicaSet=self.name, use_greenlets=use_greenlets)
        self.db = self.c.pymongo_test
        self.w = len(self.c.secondaries) + 1
        self.db.test.remove({}, w=self.w)
        self.db.test.insert(
            [{'foo': i} for i in xrange(10)], w=self.w)

        self.clear_ping_times()

    def set_ping_time(self, host, ping_time_seconds):
        Member._host_to_ping_time[host] = ping_time_seconds

    def clear_ping_times(self):
        Member._host_to_ping_time.clear()

    def test_read_preference(self):
        # We pass through four states:
        #
        #       1. A primary and two secondaries
        #       2. Primary down
        #       3. Primary up, one secondary down
        #       4. Primary up, all secondaries down
        #
        # For each state, we verify the behavior of PRIMARY,
        # PRIMARY_PREFERRED, SECONDARY, SECONDARY_PREFERRED, and NEAREST
        c = MongoReplicaSetClient(
            self.seed, replicaSet=self.name, use_greenlets=use_greenlets)

        def assertReadFrom(member, *args, **kwargs):
            utils.assertReadFrom(self, c, member, *args, **kwargs)

        def assertReadFromAll(members, *args, **kwargs):
            utils.assertReadFromAll(self, c, members, *args, **kwargs)

        def unpartition_node(node):
            host, port = node
            return '%s:%s' % (host, port)

        # To make the code terser, copy hosts into local scope
        primary = self.primary
        secondary = self.secondary
        other_secondary = self.other_secondary

        bad_tag = {'bad': 'tag'}

        # 1. THREE MEMBERS UP -------------------------------------------------
        #       PRIMARY
        assertReadFrom(primary, PRIMARY)

        #       PRIMARY_PREFERRED
        # Trivial: mode and tags both match
        assertReadFrom(primary, PRIMARY_PREFERRED, self.primary_dc)

        # Secondary matches but not primary, choose primary
        assertReadFrom(primary, PRIMARY_PREFERRED, self.secondary_dc)

        # Chooses primary, ignoring tag sets
        assertReadFrom(primary, PRIMARY_PREFERRED, self.primary_dc)

        # Chooses primary, ignoring tag sets
        assertReadFrom(primary, PRIMARY_PREFERRED, bad_tag)
        assertReadFrom(primary, PRIMARY_PREFERRED, [bad_tag, {}])

        #       SECONDARY
        assertReadFromAll([secondary, other_secondary], SECONDARY)

        #       SECONDARY_PREFERRED
        assertReadFromAll([secondary, other_secondary], SECONDARY_PREFERRED)

        # Multiple tags
        assertReadFrom(secondary, SECONDARY_PREFERRED, self.secondary_tags)

        # Fall back to primary if it's the only one matching the tags
        assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'primary'})

        # No matching secondaries
        assertReadFrom(primary, SECONDARY_PREFERRED, bad_tag)

        # Fall back from non-matching tag set to matching set
        assertReadFromAll([secondary, other_secondary],
            SECONDARY_PREFERRED, [bad_tag, {}])

        assertReadFrom(other_secondary,
            SECONDARY_PREFERRED, [bad_tag, {'dc': 'ny'}])

        #       NEAREST
        self.clear_ping_times()

        assertReadFromAll([primary, secondary, other_secondary], NEAREST)

        assertReadFromAll([primary, other_secondary],
            NEAREST, [bad_tag, {'dc': 'ny'}])

        self.set_ping_time(primary, 0)
        self.set_ping_time(secondary, .03) # 30 ms
        self.set_ping_time(other_secondary, 10)

        # Nearest member, no tags
        assertReadFrom(primary, NEAREST)

        # Tags override nearness
        assertReadFrom(primary, NEAREST, {'name': 'primary'})
        assertReadFrom(secondary, NEAREST, self.secondary_dc)

        # Make secondary fast
        self.set_ping_time(primary, .03) # 30 ms
        self.set_ping_time(secondary, 0)

        assertReadFrom(secondary, NEAREST)

        # Other secondary fast
        self.set_ping_time(secondary, 10)
        self.set_ping_time(other_secondary, 0)

        assertReadFrom(other_secondary, NEAREST)

        # High secondaryAcceptableLatencyMS, should read from all members
        assertReadFromAll(
            [primary, secondary, other_secondary],
            NEAREST, secondary_acceptable_latency_ms=1000*1000)

        self.clear_ping_times()

        assertReadFromAll([primary, other_secondary], NEAREST, [{'dc': 'ny'}])

        # 2. PRIMARY DOWN -----------------------------------------------------
        killed = ha_tools.kill_primary()

        # Let monitor notice primary's gone
        sleep(2 * MONITOR_INTERVAL)

        #       PRIMARY
        assertReadFrom(None, PRIMARY)

        #       PRIMARY_PREFERRED
        # No primary, choose matching secondary
        assertReadFromAll([secondary, other_secondary], PRIMARY_PREFERRED)
        assertReadFrom(secondary, PRIMARY_PREFERRED, {'name': 'secondary'})

        # No primary or matching secondary
        assertReadFrom(None, PRIMARY_PREFERRED, bad_tag)

        #       SECONDARY
        assertReadFromAll([secondary, other_secondary], SECONDARY)

        # Only primary matches
        assertReadFrom(None, SECONDARY, {'name': 'primary'})

        # No matching secondaries
        assertReadFrom(None, SECONDARY, bad_tag)

        #       SECONDARY_PREFERRED
        assertReadFromAll([secondary, other_secondary], SECONDARY_PREFERRED)

        # Mode and tags both match
        assertReadFrom(secondary, SECONDARY_PREFERRED, {'name': 'secondary'})

        #       NEAREST
        self.clear_ping_times()

        assertReadFromAll([secondary, other_secondary], NEAREST)

        # 3. PRIMARY UP, ONE SECONDARY DOWN -----------------------------------
        ha_tools.restart_members([killed])

        for _ in range(30):
            if ha_tools.get_primary():
                break
            sleep(1)
        else:
            self.fail("Primary didn't come back up")

        ha_tools.kill_members([unpartition_node(secondary)], 2)
        self.assertTrue(MongoClient(
            unpartition_node(primary), use_greenlets=use_greenlets,
            read_preference=PRIMARY_PREFERRED
        ).admin.command('ismaster')['ismaster'])

        sleep(2 * MONITOR_INTERVAL)

        #       PRIMARY
        assertReadFrom(primary, PRIMARY)

        #       PRIMARY_PREFERRED
        assertReadFrom(primary, PRIMARY_PREFERRED)

        #       SECONDARY
        assertReadFrom(other_secondary, SECONDARY)
        assertReadFrom(other_secondary, SECONDARY, self.other_secondary_dc)

        # Only the down secondary matches
        assertReadFrom(None, SECONDARY, {'name': 'secondary'})

        #       SECONDARY_PREFERRED
        assertReadFrom(other_secondary, SECONDARY_PREFERRED)
        assertReadFrom(
            other_secondary, SECONDARY_PREFERRED, self.other_secondary_dc)

        # The secondary matching the tag is down, use primary
        assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'secondary'})

        #       NEAREST
        assertReadFromAll([primary, other_secondary], NEAREST)
        assertReadFrom(other_secondary, NEAREST, {'name': 'other_secondary'})
        assertReadFrom(primary, NEAREST, {'name': 'primary'})

        # 4. PRIMARY UP, ALL SECONDARIES DOWN ---------------------------------
        ha_tools.kill_members([unpartition_node(other_secondary)], 2)
        self.assertTrue(MongoClient(
            unpartition_node(primary), use_greenlets=use_greenlets,
            read_preference=PRIMARY_PREFERRED
        ).admin.command('ismaster')['ismaster'])

        #       PRIMARY
        assertReadFrom(primary, PRIMARY)

        #       PRIMARY_PREFERRED
        assertReadFrom(primary, PRIMARY_PREFERRED)
        assertReadFrom(primary, PRIMARY_PREFERRED, self.secondary_dc)

        #       SECONDARY
        assertReadFrom(None, SECONDARY)
        assertReadFrom(None, SECONDARY, self.other_secondary_dc)
        assertReadFrom(None, SECONDARY, {'dc': 'ny'})

        #       SECONDARY_PREFERRED
        assertReadFrom(primary, SECONDARY_PREFERRED)
        assertReadFrom(primary, SECONDARY_PREFERRED, self.secondary_dc)
        assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'secondary'})
        assertReadFrom(primary, SECONDARY_PREFERRED, {'dc': 'ny'})

        #       NEAREST
        assertReadFrom(primary, NEAREST)
        assertReadFrom(None, NEAREST, self.secondary_dc)
        assertReadFrom(None, NEAREST, {'name': 'secondary'})

        # Even if primary's slow, still read from it
        self.set_ping_time(primary, 100)
        assertReadFrom(primary, NEAREST)
        assertReadFrom(None, NEAREST, self.secondary_dc)

        self.clear_ping_times()

    def test_pinning(self):
        # To make the code terser, copy modes into local scope
        PRIMARY = ReadPreference.PRIMARY
        PRIMARY_PREFERRED = ReadPreference.PRIMARY_PREFERRED
        SECONDARY = ReadPreference.SECONDARY
        SECONDARY_PREFERRED = ReadPreference.SECONDARY_PREFERRED
        NEAREST = ReadPreference.NEAREST

        c = MongoReplicaSetClient(
            self.seed, replicaSet=self.name, use_greenlets=use_greenlets,
            auto_start_request=True)

        # Verify that changing the mode unpins the member. We'll try it for
        # every relevant change of mode.
        for mode0, mode1 in itertools.permutations(
            (PRIMARY, SECONDARY, SECONDARY_PREFERRED, NEAREST), 2
        ):
            # Try reading and then changing modes and reading again, see if we
            # read from a different host
            for _ in range(1000):
                # pin to this host
                host = utils.read_from_which_host(c, mode0)
                # unpin?
                new_host = utils.read_from_which_host(c, mode1)
                if host != new_host:
                    # Reading with a different mode unpinned, hooray!
                    break
            else:
                self.fail(
                    "Changing from mode %s to mode %s never unpinned" % (
                        modes[mode0], modes[mode1]))

        # Now verify changing the tag_sets unpins the member.
        tags0 = [{'a': 'a'}, {}]
        tags1 = [{'a': 'x'}, {}]
        for _ in range(1000):
            host = utils.read_from_which_host(c, NEAREST, tags0)
            new_host = utils.read_from_which_host(c, NEAREST, tags1)
            if host != new_host:
                break
        else:
            self.fail(
                "Changing from tags %s to tags %s never unpinned" % (
                    tags0, tags1))

        # Finally, verify changing the secondary_acceptable_latency_ms unpins
        # the member.
        for _ in range(1000):
            host = utils.read_from_which_host(c, SECONDARY, None, 15)
            new_host = utils.read_from_which_host(c, SECONDARY, None, 20)
            if host != new_host:
                break
        else:
            self.fail(
                "Changing secondary_acceptable_latency_ms from 15 to 20"
                " never unpinned")

    def tearDown(self):
        self.c.close()
        ha_tools.kill_all_members()
        self.clear_ping_times()
class MongodbBroker(BaseModule):
    
    def __init__(self, mod_conf):
        BaseModule.__init__(self, mod_conf)
        self._parse_conf(mod_conf)
        
        self.queue = Queue.Queue(self.queue_size)
        self.conn = None
        # service notification log broks. 
        # ref: service.raise_notification_log_entry
        self.service_notification = ('contact',
                                     'host',
                                     'service_description',
                                     'state',
                                     'command',
                                     'output'
                                     )
        # host notification log broks.
        # ref: host.raise_notification_log_entry
        self.host_notification = ('contact',
                                  'host',
                                  'state',
                                  'command',
                                  'output')
        
        self.timestamp_regex = re.compile('.*\[(?P<timestamp>\d+)\].*')
        
    def _parse_conf(self, mod_conf):
        self.high_availability = to_bool(getattr(mod_conf,
                                                 'high_availability', 'false'))
        if not self.high_availability:
            self.stand_alone = getattr(mod_conf, 'stand_alone', '')
            if not self.stand_alone:
                logger.error('[Mongodb-Notification-Broker] Mongodb is '
                             'configured with high availability be false but '
                             'stand_alone is not configured')
                raise Exception('[Mongodb-Notification-Broker] Configuration '
                                'Error')
        else:
            replica_set_str = getattr(mod_conf, 'replica_set', '')
            self._set_replica_set(replica_set_str)
        self.database = getattr(mod_conf,
                                'database', 'shinken_broker_notification')
        self.username = getattr(mod_conf,
                                'username', 'shinken_broker_notification')
        self.password = getattr(mod_conf,
                                'password', 'shinken_broker_notification')
        self.url_options = getattr(mod_conf, 'url_options', '')
        
        try:
            self.retry_per_log = int(getattr(mod_conf, 'retry_per_log'))
        except:
            self.retry_per_log = 5
        try:
            self.queue_size = int(getattr(mod_conf, 'queue_size'))
        except:
            self.queue_size = 10000
        
    def _set_replica_set(self, replica_set_str):
        raw_members = replica_set_str.split(',')
        members = []
        for member in raw_members:
            members.append(member.strip())
        self.replica_set = members        
        
    def _set_mongodb_url(self):
        scheme = 'mongodb://'
        db_and_options = '/%s?%s' % (self.database, self.url_options) 
        credential = ':'.join((self.username, '%s@' % self.password))
        if not self.high_availability:
            address = self.stand_alone
            mongodb_url = ''.join((scheme, credential, address, db_and_options))
        else:
            address = ','.join(self.replica_set)
            mongodb_url = ''.join((scheme, credential, address, db_and_options))
        self.mongodb_url = mongodb_url
        
    # Called by Broker to do init work
    def init(self):
        logger.info('[Mongodb-Notification-Broker] Initialization of '
                    'mongodb_notification_broker module')
        self._set_mongodb_url()
        logger.debug('[Mongodb-Notification-Broker] Mongodb connect url: %s' 
                     % self.mongodb_url)
        
        # In case notification broker process down occasionally, the self.conn 
        # object must be dropped cleanly in Broker daemon.
        self.do_stop()
        try:
            if not self.high_availability:
                self.conn = MongoClient(self.mongodb_url)
            else:
                self.conn = MongoReplicaSetClient(self.mongodb_url)
        except ConnectionFailure:
            logger.warn('[Mongodb-Notification-Broker] Can not make connection '
                        ' with MongoDB')
            raise
            
        except (InvalidURI, ConfigurationError):
            logger.warn('[Mongodb-Notification-Broker] Mongodb connect url '
                        'error')
            logger.warn('[Mongodb-Notification-Broker] Mongodb connect url: %s' 
                        % self.mongodb_url)
            raise 
        self._get_collections()
        
    def _get_collections(self):
        db = self.conn[self.database]
        self.hosts = db['hosts']
        self.services = db['services']
        self.notifications = db['notifications']
    
    # Override the same function in basemodule.py for clean up 
    def do_stop(self):
        if self.conn:
            self.conn.close()
            self.conn = None
    
    # If we are confronted with AutoReconnect Exception, then we should always 
    # retry until the operation succeeds. However, if other exception is thrown,
    # we should ignore the operation and go to next operation.
    def _process_db_operation(self, operation, *param):
        reconnect_start = time.time()
        result = None        
        while True:
            try:
                result = operation(*param)
            except AutoReconnect:
                logger.warn('[Mongodb-Notification-Broker] Update error. ' 
                            'Reconnected last %d seconds' % (time.time() - reconnect_start))
                # avoid to invoke too many write operations
                time.sleep(self.retry_per_log)
            except Exception:
                logger.warn('[Mongodb-Notification-Broker] Update error. '
                            'operation %s, param %s' % (operation, param))
                logger.warn('[Mongodb-Notification-Broker] %s' % traceback.format_exc())
                break
            else:
                logger.debug('[Mongodb-Notification-Broker] Update success. '
                             'Operation %s, param %s' % (operation, param))
                break
        return result    
    
    # main function to update mongodb database
    def _save(self, ref, ref_identity, notification):
        self._process_db_operation(self.notifications.insert, notification)
        if ref == 'service':
            _id = ','.join((ref_identity.get('host'),
                            ref_identity.get('service_description')))
            cursor = self._process_db_operation(self.services.find,
                                                {'_id': _id})
        elif ref == 'host':
            _id = ref_identity.get('host')
            cursor = self._process_db_operation(self.hosts.find, {'_id': _id})
        
        # if service or host find error, 'cursor' will be None.
        # then we can not make sure that whether specific host or service 
        # exists. In order to not make data be corrupted, we should stop here.
        if cursor:
            if not cursor.count():
                # if notification insert error, then '_id' will not be in it and we
                # then should ignore the notification.
                ref_identity.setdefault('notification_ids',
                                        [notification.get('_id')] if '_id' in notification else [])
                ref_identity.setdefault('_id', _id)
                
                if ref == 'service':
                    self._process_db_operation(self.services.insert, ref_identity)
                elif ref == 'host':
                    self._process_db_operation(self.hosts.insert, ref_identity)
            else:
                document = cursor[0]
                notification_ids = document.get('notification_ids')
                # if notification insert error, then '_id' will not be in it and we 
                # then should ignore the notification
                if '_id' in notification:
                    notification_ids.append(notification.get('_id'))
                    if ref == 'service':
                        self._process_db_operation(self.services.update,
                                                   {'_id': _id},
                                                   {'$set': {'notification_ids': notification_ids}})
                    elif ref == 'host':
                        self._process_db_operation(self.hosts.update,
                                                   {'_id': _id},
                                                   {'$set': {'notification_ids': notification_ids}})    
        else:
            logger.warn('[Mongodb-Notification-Broker] Update notification '
                        'success, link with host or service error.')
        logger.debug('[Mongodb-Notification-Broker] Update notification ends.')    
        
    
    # restore 'log' type notification to self.queue
    def _do_loop_turn(self):
        while not self.interrupted:
            broks = self.to_q.get()
            for brok in broks:
                brok.prepare()
                self._manage_brok(brok)
    
    def _update_db(self):
        while True:
            # if self.queue is empty, get operation will be blocked.
            brok = self.queue.get()
            logger.debug('[Mongodb-Notification-Broker] '
                         'Update notification begins.')
            
            msg = brok.data['log']
            parts = msg.split(':', 1)
            if 'SERVICE' in parts[0]:
                service_identiry, notification = self._process_notification_brok('service',
                                                                                 self.service_notification,
                                                                                 parts[0], parts[1])
                self._save('service', service_identiry, notification)
            elif 'HOST' in parts[0]:
                host_identity, notification = self._process_notification_brok('host',
                                                                              self.host_notification,
                                                                              parts[0], parts[1])
                self._save('host', host_identity, notification)

    def _process_notification_brok(self, ref, keys, header, notification_info):
        elts = notification_info.split(';', len(keys))
        
        timestamp = ''
        match = self.timestamp_regex.match(header)
        if match:
            timestamp = match.group('timestamp')
        
        info_map = dict(zip(keys, elts))
        if ref == 'service':
            ref_identity = {'host': info_map.get('host'),
                            'service_description': info_map.get('service_description')
                            }
        elif ref == 'host':
            ref_identity = {'host': info_map.get('host')}
            
        notification = {'contact': info_map.get('contact'),
                        'command': info_map.get('command'),
                        'output': info_map.get('output'),
                        'timestamp': timestamp
                        }
        return ref_identity, notification
    
    def _manage_brok(self, brok):
        if brok.type == 'log' and 'NOTIFICATION' in brok.data['log']:
            try:
                self.queue.put_nowait(brok)
            except Queue.Full:
                logger.warn('[Mongodb-Notification-Broker] Queue full. '
                            'Ignore broks.')
        
    # invoked by basemodule._main        
    def main(self):
        logger.info('[Mongodb-Notification-Broker] Start main function.')
        worker = Thread(target=self._update_db)
        worker.setDaemon(True)
        worker.start()
        self._do_loop_turn()