Example #1
0
    def test_mongos_configure(self):
        server = MongosServer()
        server.my_hostname = 'node1.test.com'
        server.configure(self.env)

        self.assertTrue(
            os.path.exists(server.mongodb_config_file),
            "The config file for mongos instances does not exists")
    def test_get_cluster_status_with_several_hosts(self):
        self.several_hosts_setup_with_config_server()
        server = MongosServer()
        server.my_hostname = 'node1.test.com'

        clusterStatus = server.getClusterStatus(server.getClusterData())
        self.assertEqual(clusterStatus, self.expected_cluster_status_for_several_hosts_stopped,
                         "The cluster status result before stating the mongos is not right")
 def test_stopping_an_already_stopped_cluster(self):
     self.one_host_setup()
     server = MongosServer()
     server.my_hostname = 'node1.test.com'
     clusterStatus = server.getClusterStatus(server.getClusterData())
     self.assertEqual(clusterStatus,self.expected_cluster_status_for_one_host_stopped,
                      "The cluster status result before stating the mongos is not right")
     server.stop(self.env)
     clusterStatus = server.getClusterStatus(server.getClusterData())
     self.assertEqual(clusterStatus, self.expected_cluster_status_for_one_host_stopped,
                      "The cluster status result after stopping the mongos is not right")
    def test_must_not_start_if_all_config_servers_are_off(self):
        self.several_hosts_setup()

        server1 = MongosServer()
        server1.my_hostname = 'node1.test.com'

        clusterStatus = server1.getClusterStatus(server1.getClusterData())
        self.assertEqual(clusterStatus, self.expected_cluster_status_for_several_hosts_stopped,
                         "The cluster status result before stating the mongos is not right")

        server1.start(self.env)

        clusterStatus = server1.getClusterStatus(server1.getClusterData())
        self.assertEqual(clusterStatus, self.expected_cluster_status_for_several_hosts_stopped,
                         "The cluster status result before stating the mongos is not right")
Example #5
0
    def cluster_setup(self):
        Script.config['clusterHostInfo'] = {
            'mongos_hosts': ['node1.test.com'],
            'mongodb_hosts': ['node1.test.com','node2.test.com','node3.test.com'],
            'mongodc_hosts': ['node1.test.com']
        }

        params.mongod_cluster_definition = 'node1.test.com,node2.test.com/arbiter,node3.test.com,node2.test.com;' \
                                           'node2.test.com,node2.test.com,node3.test.com/arbiter'

        # Starting the required config server
        self.config_server = MongoConfigServer()
        self.config_server.my_hostname = 'node1.test.com'
        self.config_server.start(self.env)
        sleep(self.SLEEP_INTERVAL_AFTER_START_A_INSTANCE)
        # Starting the mongos server
        self.mongos_server = MongosServer()
        self.mongos_server.my_hostname = 'node1.test.com'
        self.mongos_server.start(self.env)
    def test_get_cluster_data_with_one_host(self):
        self.one_host_setup()
        server = MongosServer()
        server.my_hostname = 'node1.test.com'

        expectedClusterData = [('0', ['node1.test.com', 'node1.test.com'],
                                [InstanceConfig(shard_name='0',
                                                pid_file_name='/var/run/mongodb/node1_0_0.pid',
                                                final_db_path='/var/lib/mongodb/node1_0_0',
                                                log_file='/var/log/mongodb/node1_0_0.log',
                                                db_port='27017',
                                                host_name='node1.test.com',
                                                is_arbiter=False),
                                 InstanceConfig(shard_name='0',
                                                pid_file_name='/var/run/mongodb/node1_0_1.pid',
                                                final_db_path='/var/lib/mongodb/node1_0_1',
                                                log_file='/var/log/mongodb/node1_0_1.log',
                                                db_port='27018',
                                                host_name='node1.test.com',
                                                is_arbiter=False)])]
        clusterData = server.getClusterData()
        self.assertEqual(clusterData,expectedClusterData,"The cluster data for the mongos is not right")
 def setUp(self):
     self.as_super = super(IntegratedMongoConfTestCase, self)
     self.as_super.setUp()
     self.config_server = None
     params.try_interval = 4
     params.times_to_try = 10
     # Configuring and Installing mongo config dependencies
     server = MongoConfigServer()
     server.my_hostname = 'node1.test.com'
     server.configure(self.env)
     server.install(self.env)
     # Configuring and Installing mongos dependencies
     server = MongosServer()
     server.my_hostname = 'node1.test.com'
     server.configure(self.env)
     server.install(self.env)
    def test_must_not_start_if_no_config_servers_primary_on(self):
        self.several_hosts_setup()

        server1 = MongosServer()
        server1.my_hostname = 'node1.test.com'

        clusterStatus = server1.getClusterStatus(server1.getClusterData())
        self.assertEqual(clusterStatus, self.expected_cluster_status_for_several_hosts_stopped,
                         "The cluster status result before stating the mongos is not right")

        # Starting only the secondary config servers
        config_server2 = MongoConfigServer()
        config_server2.my_hostname = 'node2.test.com'
        config_server2.start(self.env)

        config_server3 = MongoConfigServer()
        config_server3.my_hostname = 'node3.test.com'
        config_server3.start(self.env)

        server1.start(self.env)

        clusterStatus = server1.getClusterStatus(server1.getClusterData())
        self.assertEqual(clusterStatus, self.expected_cluster_status_for_several_hosts_stopped,
                         "The cluster status result before stating the mongos is not right")
Example #9
0
 def test_mongos_install(self):
     server = MongosServer()
     server.my_hostname = 'node1.test.com'
     server.install(self.env)
    def test_mongos_with_several_hosts(self):
        self.several_hosts_setup_with_config_server()

        server2 = MongosServer()
        server2.my_hostname = 'node2.test.com'
        server1 = MongosServer()
        server1.my_hostname = 'node1.test.com'

        clusterStatus = server2.getClusterStatus(server2.getClusterData())
        self.assertEqual(clusterStatus, self.expected_cluster_status_for_several_hosts_stopped,
                         "The cluster status result before stating the mongos is not right")

        server2.start(self.env)
        sleep(self.SLEEP_INTERVAL_AFTER_START_A_INSTANCE)
        server2.status(self.env)

        expectedClusterStatusServer2On = [
        ('0',['node1.test.com','node2.test.com'], [
            InstanceStatus(shard_name='0',
                           pid_file_name='/var/run/mongodb/node1_0_0.pid',
                           final_db_path='/var/lib/mongodb/node1_0_0',
                           log_file='/var/log/mongodb/node1_0_0.log',
                           db_port='27017',
                           host_name='node1.test.com',
                           is_arbiter=False,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None),
            InstanceStatus(shard_name='0',
                           pid_file_name='/var/run/mongodb/node2_0_0.pid',
                           final_db_path='/var/lib/mongodb/node2_0_0',
                           log_file='/var/log/mongodb/node2_0_0.log',
                           db_port='27017',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=True,
                           is_repl_configurated=False,
                           repl_role=None)])]

        clusterStatus = server2.getClusterStatus(server2.getClusterData())
        self.assertEqual(clusterStatus, expectedClusterStatusServer2On, "The cluster status result for a started node2"
                                                                        " in the mongos is not right")
        server1.start(self.env)
        sleep(self.SLEEP_INTERVAL_AFTER_START_A_INSTANCE)
        server1.status(self.env)

        expectedClusterStatusServer1On = [
        ('0',['node1.test.com','node2.test.com'], [
            InstanceStatus(shard_name='0',
                           pid_file_name='/var/run/mongodb/node1_0_0.pid',
                           final_db_path='/var/lib/mongodb/node1_0_0',
                           log_file='/var/log/mongodb/node1_0_0.log',
                           db_port='27017',
                           host_name='node1.test.com',
                           is_arbiter=False,
                           is_started=True,
                           is_repl_configurated=False,
                           repl_role=None),
            InstanceStatus(shard_name='0',
                           pid_file_name='/var/run/mongodb/node2_0_0.pid',
                           final_db_path='/var/lib/mongodb/node2_0_0',
                           log_file='/var/log/mongodb/node2_0_0.log',
                           db_port='27017',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=True,
                           is_repl_configurated=False,
                           repl_role=None)])]

        clusterStatus = server1.getClusterStatus(server1.getClusterData())
        self.assertEqual(clusterStatus, expectedClusterStatusServer1On, "The cluster status result for a started node1"
                                                                        " in the mongos is not right")

        server2.stop(self.env)
        with self.assertRaises(ComponentIsNotRunning):
            server2.status(self.env)

        server1.stop(self.env)
        with self.assertRaises(ComponentIsNotRunning):
            server1.status(self.env)

        clusterStatus = server2.getClusterStatus(server2.getClusterData())
        self.assertEqual(clusterStatus, self.expected_cluster_status_for_several_hosts_stopped,
                         "The cluster status result after stopping the mongos is not right")
Example #11
0
class IntegratedShardedClusterTestCase(IntegratedBaseTestCase):

    def setUp(self):
        self.as_super = super(IntegratedShardedClusterTestCase, self)
        self.as_super.setUp()
        self.config_server = None
        self.mongos_server = None
        params.try_interval = 4
        params.times_to_try = 10

        # Configuring and Installing mongo config dependencies
        server = MongoDBServer()
        server.my_hostname = 'node1.test.com'
        server.configure(self.env)
        server.install(self.env)
        # Configuring and Installing mongo config dependencies
        server = MongoConfigServer()
        server.my_hostname = 'node1.test.com'
        server.configure(self.env)
        server.install(self.env)
        # Configuring and Installing mongos dependencies
        server = MongosServer()
        server.my_hostname = 'node1.test.com'
        server.configure(self.env)
        server.install(self.env)

    def tearDown(self):
        self.as_super = super(IntegratedShardedClusterTestCase, self)
        self.as_super.tearDown()
        if self.config_server:
            self.config_server.stop(self.env)
        if self.mongos_server:
            self.mongos_server.stop(self.env)

    def cluster_setup(self):
        Script.config['clusterHostInfo'] = {
            'mongos_hosts': ['node1.test.com'],
            'mongodb_hosts': ['node1.test.com','node2.test.com','node3.test.com'],
            'mongodc_hosts': ['node1.test.com']
        }

        params.mongod_cluster_definition = 'node1.test.com,node2.test.com/arbiter,node3.test.com,node2.test.com;' \
                                           'node2.test.com,node2.test.com,node3.test.com/arbiter'

        # Starting the required config server
        self.config_server = MongoConfigServer()
        self.config_server.my_hostname = 'node1.test.com'
        self.config_server.start(self.env)
        sleep(self.SLEEP_INTERVAL_AFTER_START_A_INSTANCE)
        # Starting the mongos server
        self.mongos_server = MongosServer()
        self.mongos_server.my_hostname = 'node1.test.com'
        self.mongos_server.start(self.env)

    expected_cluster_status_stopped = [
        ('shard0',['node1.test.com','node2.test.com/arbiter','node3.test.com','node2.test.com'], [
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node1_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node1_shard0_0',
                           log_file='/var/log/mongodb/node1_shard0_0.log',
                           db_port='27025',
                           host_name='node1.test.com',
                           is_arbiter=False,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node2_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node2_shard0_0',
                           log_file='/var/log/mongodb/node2_shard0_0.log',
                           db_port='27025',
                           host_name='node2.test.com',
                           is_arbiter=True,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node3_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node3_shard0_0',
                           log_file='/var/log/mongodb/node3_shard0_0.log',
                           db_port='27025',
                           host_name='node3.test.com',
                           is_arbiter=False,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node2_shard0_1.pid',
                           final_db_path='/var/lib/mongodb/node2_shard0_1',
                           log_file='/var/log/mongodb/node2_shard0_1.log',
                           db_port='27030',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None)]),
        ('shard1', ['node2.test.com', 'node2.test.com', 'node3.test.com/arbiter'], [
            InstanceStatus(shard_name='shard1',
                           pid_file_name='/var/run/mongodb/node2_shard1_2.pid',
                           final_db_path='/var/lib/mongodb/node2_shard1_2',
                           log_file='/var/log/mongodb/node2_shard1_2.log',
                           db_port='27031',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None),
            InstanceStatus(shard_name='shard1',
                           pid_file_name='/var/run/mongodb/node2_shard1_3.pid',
                           final_db_path='/var/lib/mongodb/node2_shard1_3',
                           log_file='/var/log/mongodb/node2_shard1_3.log',
                           db_port='27032',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None),
            InstanceStatus(shard_name='shard1',
                           pid_file_name='/var/run/mongodb/node3_shard1_1.pid',
                           final_db_path='/var/lib/mongodb/node3_shard1_1',
                           log_file='/var/log/mongodb/node3_shard1_1.log',
                           db_port='27030',
                           host_name='node3.test.com',
                           is_arbiter=True,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None)])
    ]

    def test_sharded_cluster(self):
        self.cluster_setup()

        server3 = MongoDBServer()
        server3.my_hostname = 'node3.test.com'
        server2 = MongoDBServer()
        server2.my_hostname = 'node2.test.com'
        server1 = MongoDBServer()
        server1.my_hostname = 'node1.test.com'

        clusterStatus = server3.getClusterStatus(server3.getClusterData())
        self.assertEqual(clusterStatus, self.expected_cluster_status_stopped,
                         "The cluster status result before stating the replicaset is not right")

        mongos_status, shard_list = server3.getMongosStatus('node1.test.com:27017')
        self.assertTrue(mongos_status,"Mongos MUST be running to execute this test!")
        self.assertEqual(len(shard_list),0,'The mongos must not know any shard at this first point!')

        server3.start(self.env)
        sleep(self.SLEEP_INTERVAL_AFTER_START_A_INSTANCE)
        server3.status(self.env)

        mongos_status, shard_list = server3.getMongosStatus('node1.test.com:27017')
        self.assertTrue(mongos_status,"Mongos MUST be running to execute this test!")
        self.assertEqual(len(shard_list),0,'The mongos must not know any shard at this second point!')

        expectedClusterStatusServer3On = [
        ('shard0',['node1.test.com','node2.test.com/arbiter','node3.test.com','node2.test.com'], [
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node1_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node1_shard0_0',
                           log_file='/var/log/mongodb/node1_shard0_0.log',
                           db_port='27025',
                           host_name='node1.test.com',
                           is_arbiter=False,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node2_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node2_shard0_0',
                           log_file='/var/log/mongodb/node2_shard0_0.log',
                           db_port='27025',
                           host_name='node2.test.com',
                           is_arbiter=True,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node3_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node3_shard0_0',
                           log_file='/var/log/mongodb/node3_shard0_0.log',
                           db_port='27025',
                           host_name='node3.test.com',
                           is_arbiter=False,
                           is_started=True,
                           is_repl_configurated=False,
                           repl_role=None),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node2_shard0_1.pid',
                           final_db_path='/var/lib/mongodb/node2_shard0_1',
                           log_file='/var/log/mongodb/node2_shard0_1.log',
                           db_port='27030',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None)]),
        ('shard1', ['node2.test.com', 'node2.test.com', 'node3.test.com/arbiter'], [
            InstanceStatus(shard_name='shard1',
                           pid_file_name='/var/run/mongodb/node2_shard1_2.pid',
                           final_db_path='/var/lib/mongodb/node2_shard1_2',
                           log_file='/var/log/mongodb/node2_shard1_2.log',
                           db_port='27031',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None),
            InstanceStatus(shard_name='shard1',
                           pid_file_name='/var/run/mongodb/node2_shard1_3.pid',
                           final_db_path='/var/lib/mongodb/node2_shard1_3',
                           log_file='/var/log/mongodb/node2_shard1_3.log',
                           db_port='27032',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None),
            InstanceStatus(shard_name='shard1',
                           pid_file_name='/var/run/mongodb/node3_shard1_1.pid',
                           final_db_path='/var/lib/mongodb/node3_shard1_1',
                           log_file='/var/log/mongodb/node3_shard1_1.log',
                           db_port='27030',
                           host_name='node3.test.com',
                           is_arbiter=True,
                           is_started=True,
                           is_repl_configurated=False,
                           repl_role=None)])
        ]

        clusterStatus = server3.getClusterStatus(server3.getClusterData())
        self.assertEqual(clusterStatus, expectedClusterStatusServer3On, "The cluster status result for a started node3 "
                                                                        "in the replicaset is not right")
        server2.start(self.env)
        sleep(self.SLEEP_INTERVAL_AFTER_START_A_INSTANCE)
        server2.status(self.env)

        mongos_status, shard_list = server3.getMongosStatus('node1.test.com:27017')
        self.assertTrue(mongos_status,"Mongos MUST be running to execute this test!")
        self.assertEqual(len(shard_list),1,'The mongos must know one shard at this point!')

        expectedClusterStatusServer2On = [
        ('shard0',['node1.test.com','node2.test.com/arbiter','node3.test.com','node2.test.com'], [
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node1_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node1_shard0_0',
                           log_file='/var/log/mongodb/node1_shard0_0.log',
                           db_port='27025',
                           host_name='node1.test.com',
                           is_arbiter=False,
                           is_started=False,
                           is_repl_configurated=None,
                           repl_role=None),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node2_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node2_shard0_0',
                           log_file='/var/log/mongodb/node2_shard0_0.log',
                           db_port='27025',
                           host_name='node2.test.com',
                           is_arbiter=True,
                           is_started=True,
                           is_repl_configurated=False,
                           repl_role=None),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node3_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node3_shard0_0',
                           log_file='/var/log/mongodb/node3_shard0_0.log',
                           db_port='27025',
                           host_name='node3.test.com',
                           is_arbiter=False,
                           is_started=True,
                           is_repl_configurated=False,
                           repl_role=None),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node2_shard0_1.pid',
                           final_db_path='/var/lib/mongodb/node2_shard0_1',
                           log_file='/var/log/mongodb/node2_shard0_1.log',
                           db_port='27030',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=True,
                           is_repl_configurated=False,
                           repl_role=None)]),
        ('shard1', ['node2.test.com', 'node2.test.com', 'node3.test.com/arbiter'], [
            InstanceStatus(shard_name='shard1',
                           pid_file_name='/var/run/mongodb/node2_shard1_2.pid',
                           final_db_path='/var/lib/mongodb/node2_shard1_2',
                           log_file='/var/log/mongodb/node2_shard1_2.log',
                           db_port='27031',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=True,
                           is_repl_configurated=True,
                           repl_role="PRIMARY"),
            InstanceStatus(shard_name='shard1',
                           pid_file_name='/var/run/mongodb/node2_shard1_3.pid',
                           final_db_path='/var/lib/mongodb/node2_shard1_3',
                           log_file='/var/log/mongodb/node2_shard1_3.log',
                           db_port='27032',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=True,
                           is_repl_configurated=True,
                           repl_role="SECONDARY"),
            InstanceStatus(shard_name='shard1',
                           pid_file_name='/var/run/mongodb/node3_shard1_1.pid',
                           final_db_path='/var/lib/mongodb/node3_shard1_1',
                           log_file='/var/log/mongodb/node3_shard1_1.log',
                           db_port='27030',
                           host_name='node3.test.com',
                           is_arbiter=True,
                           is_started=True,
                           is_repl_configurated=True,
                           repl_role="SECONDARY")])
        ]

        clusterStatus = server2.getClusterStatus(server2.getClusterData())
        self.assertEqual(clusterStatus, expectedClusterStatusServer2On, "The cluster status result for a started node2"
                                                                        " in the replicaset is not right")
        server1.start(self.env)
        sleep(self.SLEEP_INTERVAL_AFTER_START_A_INSTANCE)
        server1.status(self.env)

        mongos_status, shard_list = server3.getMongosStatus('node1.test.com:27017')
        self.assertTrue(mongos_status,"Mongos MUST be running to execute this test!")
        self.assertEqual(len(shard_list),2,'The mongos must know two shards at this point!')

        expectedClusterStatusServer1On = [
        ('shard0',['node1.test.com','node2.test.com/arbiter','node3.test.com','node2.test.com'], [
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node1_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node1_shard0_0',
                           log_file='/var/log/mongodb/node1_shard0_0.log',
                           db_port='27025',
                           host_name='node1.test.com',
                           is_arbiter=False,
                           is_started=True,
                           is_repl_configurated=True,
                           repl_role="PRIMARY"),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node2_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node2_shard0_0',
                           log_file='/var/log/mongodb/node2_shard0_0.log',
                           db_port='27025',
                           host_name='node2.test.com',
                           is_arbiter=True,
                           is_started=True,
                           is_repl_configurated=True,
                           repl_role="SECONDARY"),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node3_shard0_0.pid',
                           final_db_path='/var/lib/mongodb/node3_shard0_0',
                           log_file='/var/log/mongodb/node3_shard0_0.log',
                           db_port='27025',
                           host_name='node3.test.com',
                           is_arbiter=False,
                           is_started=True,
                           is_repl_configurated=True,
                           repl_role="SECONDARY"),
            InstanceStatus(shard_name='shard0',
                           pid_file_name='/var/run/mongodb/node2_shard0_1.pid',
                           final_db_path='/var/lib/mongodb/node2_shard0_1',
                           log_file='/var/log/mongodb/node2_shard0_1.log',
                           db_port='27030',
                           host_name='node2.test.com',
                           is_arbiter=False,
                           is_started=True,
                           is_repl_configurated=True,
                           repl_role="SECONDARY")]),
            ('shard1', ['node2.test.com', 'node2.test.com', 'node3.test.com/arbiter'], [
                InstanceStatus(shard_name='shard1',
                               pid_file_name='/var/run/mongodb/node2_shard1_2.pid',
                               final_db_path='/var/lib/mongodb/node2_shard1_2',
                               log_file='/var/log/mongodb/node2_shard1_2.log',
                               db_port='27031',
                               host_name='node2.test.com',
                               is_arbiter=False,
                               is_started=True,
                               is_repl_configurated=True,
                               repl_role="PRIMARY"),
                InstanceStatus(shard_name='shard1',
                               pid_file_name='/var/run/mongodb/node2_shard1_3.pid',
                               final_db_path='/var/lib/mongodb/node2_shard1_3',
                               log_file='/var/log/mongodb/node2_shard1_3.log',
                               db_port='27032',
                               host_name='node2.test.com',
                               is_arbiter=False,
                               is_started=True,
                               is_repl_configurated=True,
                               repl_role="SECONDARY"),
                InstanceStatus(shard_name='shard1',
                               pid_file_name='/var/run/mongodb/node3_shard1_1.pid',
                               final_db_path='/var/lib/mongodb/node3_shard1_1',
                               log_file='/var/log/mongodb/node3_shard1_1.log',
                               db_port='27030',
                               host_name='node3.test.com',
                               is_arbiter=True,
                               is_started=True,
                               is_repl_configurated=True,
                               repl_role="SECONDARY")])
        ]

        clusterStatus = server2.getClusterStatus(server2.getClusterData())
        self.assertEqual(clusterStatus, expectedClusterStatusServer1On, "The cluster status result for a started node1"
                                                                        " in the replicaset is not right")

        server2.stop(self.env)
        with self.assertRaises(ComponentIsNotRunning):
            server2.status(self.env)

        server1.stop(self.env)
        with self.assertRaises(ComponentIsNotRunning):
            server1.status(self.env)

        server3.stop(self.env)
        with self.assertRaises(ComponentIsNotRunning):
            server3.status(self.env)


        clusterStatus = server3.getClusterStatus(server3.getClusterData())
        self.assertEqual(clusterStatus, self.expected_cluster_status_stopped,
                         "The cluster status result after stopping the replicaset is not right")