def decommission_node_test(self):
        debug("decommission_node_test()")
        cluster = self.cluster

        cluster.populate(1)
        # create and add a new node, I must not be a seed, otherwise
        # we get schema disagreement issues for awhile after decommissioning it.
        node2 = Node("node2", cluster, True, ("127.0.0.2", 9160), ("127.0.0.2", 7000), "7200", None)
        cluster.add(node2, False)

        [node1, node2] = cluster.nodelist()
        node1.start()
        node2.start()
        wait(2)

        cursor = self.cql_connection(node1).cursor()
        self.prepare_for_changes(cursor)

        node2.decommission()
        wait(30)

        self.validate_schema_consistent(node1)
        self.make_schema_changes(cursor, namespace="ns1")

        # create and add a new node
        node3 = Node("node3", cluster, True, ("127.0.0.3", 9160), ("127.0.0.3", 7000), "7300", None)

        cluster.add(node3, True)
        node3.start()

        wait(30)
        self.validate_schema_consistent(node1)
示例#2
0
    def test_add_and_remove_node(self):
        """
        Test that NEW_NODE and REMOVED_NODE are sent correctly as nodes join and leave.
        @jira_ticket CASSANDRA-11038
        """
        self.cluster.populate(1).start()
        node1 = self.cluster.nodelist()[0]

        waiter = NotificationWaiter(self, node1,
                                    ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])

        # need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
        # don't confuse the state below
        logger.debug("Waiting for unwanted notifications...")
        waiter.wait_for_notifications(timeout=30, num_notifications=2)
        waiter.clear_notifications()

        session = self.patient_cql_connection(node1)
        # reduce system_distributed RF to 2 so we don't require forceful decommission
        session.execute(
            "ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};"
        )
        session.execute(
            "ALTER KEYSPACE system_traces WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};"
        )

        logger.debug("Adding second node...")
        node2 = Node('node2',
                     self.cluster,
                     True,
                     None, ('127.0.0.2', 7000),
                     '7200',
                     '0',
                     None,
                     binary_interface=('127.0.0.2', 9042))
        self.cluster.add(node2, False)
        node2.start()
        logger.debug("Waiting for notifications from {}".format(
            waiter.address))
        notifications = waiter.wait_for_notifications(timeout=120.0,
                                                      num_notifications=2)
        assert 2 == len(notifications), notifications
        for notification in notifications:
            assert get_ip_from_node(node2) == notification["address"][0]
            assert "NEW_NODE" == notifications[0]["change_type"]
            assert "UP" == notifications[1]["change_type"]

        logger.debug("Removing second node...")
        waiter.clear_notifications()
        node2.decommission()
        node2.stop(gently=False)
        logger.debug("Waiting for notifications from {}".format(
            waiter.address))
        notifications = waiter.wait_for_notifications(timeout=120.0,
                                                      num_notifications=2)
        assert 2 == len(notifications), notifications
        for notification in notifications:
            assert get_ip_from_node(node2) == notification["address"][0]
            assert "REMOVED_NODE" == notifications[0]["change_type"]
            assert "DOWN" == notifications[1]["change_type"]
示例#3
0
    def decommission_node_test(self):
        debug("decommission_node_test()")
        cluster = self.cluster

        cluster.populate(1)
        # create and add a new node, I must not be a seed, otherwise
        # we get schema disagreement issues for awhile after decommissioning it.
        node2 = Node('node2', cluster, True, ('127.0.0.2', 9160),
                     ('127.0.0.2', 7000), '7200', '0', None)
        cluster.add(node2, False)

        [node1, node2] = cluster.nodelist()
        node1.start()
        node2.start()
        wait(2)

        cursor = self.cql_connection(node1).cursor()
        self.prepare_for_changes(cursor)

        node2.decommission()
        wait(30)

        self.validate_schema_consistent(node1)
        self.make_schema_changes(cursor, namespace='ns1')

        # create and add a new node
        node3 = Node('node3', cluster, True, ('127.0.0.3', 9160),
                     ('127.0.0.3', 7000), '7300', '0', None)

        cluster.add(node3, True)
        node3.start()

        wait(30)
        self.validate_schema_consistent(node1)
示例#4
0
    def test_decommission_node(self):
        logger.debug("decommission_node_test()")
        cluster = self.cluster

        cluster.populate(1)
        # create and add a new node, I must not be a seed, otherwise
        # we get schema disagreement issues for awhile after decommissioning it.
        node2 = Node('node2',
                     cluster,
                     True,
                     ('127.0.0.2', 9160),
                     ('127.0.0.2', 7000),
                     '7200',
                     '0',
                     None,
                     binary_interface=('127.0.0.2', 9042))
        cluster.add(node2, False)

        node1, node2 = cluster.nodelist()
        node1.start(wait_for_binary_proto=True)
        node2.start(wait_for_binary_proto=True)
        wait(2)

        session = self.patient_cql_connection(node1)
        self.prepare_for_changes(session)

        node2.decommission()
        wait(30)

        self.validate_schema_consistent(node1)
        self.make_schema_changes(session, namespace='ns1')

        # create and add a new node
        node3 = Node('node3',
                     cluster,
                     True,
                     ('127.0.0.3', 9160),
                     ('127.0.0.3', 7000),
                     '7300',
                     '0',
                     None,
                     binary_interface=('127.0.0.3', 9042))

        cluster.add(node3, True)
        node3.start(wait_for_binary_proto=True)

        wait(30)
        self.validate_schema_consistent(node1)
    def add_and_remove_node_test(self):
        """
        Test that NEW_NODE and REMOVED_NODE are sent correctly as nodes join and leave.
        @jira_ticket CASSANDRA-11038
        """
        self.cluster.populate(1).start(wait_for_binary_proto=True)
        node1 = self.cluster.nodelist()[0]

        waiter = NotificationWaiter(self, node1,
                                    ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])

        # need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
        # don't confuse the state below
        debug("Waiting for unwanted notifications...")
        waiter.wait_for_notifications(timeout=30, num_notifications=2)
        waiter.clear_notifications()

        debug("Adding second node...")
        node2 = Node('node2', self.cluster, True, ('127.0.0.2', 9160),
                     ('127.0.0.2', 7000), '7200', '0', None,
                     ('127.0.0.2', 9042))
        self.cluster.add(node2, False)
        node2.start(wait_other_notice=True)
        debug("Waiting for notifications from {}".format(waiter.address))
        notifications = waiter.wait_for_notifications(timeout=60.0,
                                                      num_notifications=2)
        self.assertEquals(2, len(notifications), notifications)
        for notification in notifications:
            self.assertEquals(self.get_ip_from_node(node2),
                              notification["address"][0])
            self.assertEquals("NEW_NODE", notifications[0]["change_type"])
            self.assertEquals("UP", notifications[1]["change_type"])

        debug("Removing second node...")
        waiter.clear_notifications()
        node2.decommission()
        node2.stop(gently=False)
        debug("Waiting for notifications from {}".format(waiter.address))
        notifications = waiter.wait_for_notifications(timeout=60.0,
                                                      num_notifications=2)
        self.assertEquals(2, len(notifications), notifications)
        for notification in notifications:
            self.assertEquals(self.get_ip_from_node(node2),
                              notification["address"][0])
            self.assertEquals("REMOVED_NODE", notifications[0]["change_type"])
            self.assertEquals("DOWN", notifications[1]["change_type"])
    def test_add_and_remove_node(self):
        """
        Test that NEW_NODE and REMOVED_NODE are sent correctly as nodes join and leave.
        @jira_ticket CASSANDRA-11038
        """
        self.cluster.populate(1).start(wait_for_binary_proto=True)
        node1 = self.cluster.nodelist()[0]

        waiter = NotificationWaiter(self, node1, ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])

        # need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
        # don't confuse the state below
        logger.debug("Waiting for unwanted notifications...")
        waiter.wait_for_notifications(timeout=30, num_notifications=2)
        waiter.clear_notifications()

        session = self.patient_cql_connection(node1)
        # reduce system_distributed RF to 2 so we don't require forceful decommission
        session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")
        session.execute("ALTER KEYSPACE system_traces WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")

        logger.debug("Adding second node...")
        node2 = Node('node2', self.cluster, True, None, ('127.0.0.2', 7000), '7200', '0', None, binary_interface=('127.0.0.2', 9042))
        self.cluster.add(node2, False)
        node2.start(wait_other_notice=True)
        logger.debug("Waiting for notifications from {}".format(waiter.address))
        notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
        assert 2 == len(notifications), notifications
        for notification in notifications:
            assert get_ip_from_node(node2) == notification["address"][0]
            assert "NEW_NODE" == notifications[0]["change_type"]
            assert "UP" == notifications[1]["change_type"]

        logger.debug("Removing second node...")
        waiter.clear_notifications()
        node2.decommission()
        node2.stop(gently=False)
        logger.debug("Waiting for notifications from {}".format(waiter.address))
        notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
        assert 2 == len(notifications), notifications
        for notification in notifications:
            assert get_ip_from_node(node2) == notification["address"][0]
            assert "REMOVED_NODE" == notifications[0]["change_type"]
            assert "DOWN" == notifications[1]["change_type"]
    def add_and_remove_node_test(self):
        """
        Test that NEW_NODE and REMOVED_NODE are sent correctly as nodes join and leave.
        @jira_ticket CASSANDRA-11038
        """
        self.cluster.populate(1).start(wait_for_binary_proto=True)
        node1 = self.cluster.nodelist()[0]

        waiter = NotificationWaiter(self, node1, ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])

        # need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
        # don't confuse the state below
        debug("Waiting for unwanted notifications...")
        waiter.wait_for_notifications(timeout=30, num_notifications=2)
        waiter.clear_notifications()

        debug("Adding second node...")
        node2 = Node('node2', self.cluster, True, None, ('127.0.0.2', 7000), '7200', '0', None, binary_interface=('127.0.0.2', 9042))
        self.cluster.add(node2, False)
        node2.start(wait_other_notice=True)
        debug("Waiting for notifications from {}".format(waiter.address))
        notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
        self.assertEquals(2, len(notifications), notifications)
        for notification in notifications:
            self.assertEquals(get_ip_from_node(node2), notification["address"][0])
            self.assertEquals("NEW_NODE", notifications[0]["change_type"])
            self.assertEquals("UP", notifications[1]["change_type"])

        debug("Removing second node...")
        waiter.clear_notifications()
        node2.decommission()
        node2.stop(gently=False)
        debug("Waiting for notifications from {}".format(waiter.address))
        notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
        self.assertEquals(2, len(notifications), notifications)
        for notification in notifications:
            self.assertEquals(get_ip_from_node(node2), notification["address"][0])
            self.assertEquals("REMOVED_NODE", notifications[0]["change_type"])
            self.assertEquals("DOWN", notifications[1]["change_type"])
    def decommission_node_schema_check_test(self):
        cluster = self.cluster

        cluster.populate(1)
        # create and add a non-seed node.
        node2 = Node('node2', 
                    cluster,
                    True,
                    ('127.0.0.2', 9160),
                    ('127.0.0.2', 7000),
                    '7200',
                    None)
        cluster.add(node2, False)

        [node1, node2] = cluster.nodelist()
        node1.start()
        node2.start()
        time.sleep(2)

        node2.decommission()
        time.sleep(30)

        self.validate_schema_consistent(node1)