Exemple #1
0
    def test_add_and_remove_node(self):
        """
        Test that NEW_NODE and REMOVED_NODE are sent correctly as nodes join and leave.
        @jira_ticket CASSANDRA-11038
        """
        self.cluster.populate(1).start()
        node1 = self.cluster.nodelist()[0]

        waiter = NotificationWaiter(self, node1,
                                    ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])

        # need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
        # don't confuse the state below
        logger.debug("Waiting for unwanted notifications...")
        waiter.wait_for_notifications(timeout=30, num_notifications=2)
        waiter.clear_notifications()

        session = self.patient_cql_connection(node1)
        # reduce system_distributed RF to 2 so we don't require forceful decommission
        session.execute(
            "ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};"
        )
        session.execute(
            "ALTER KEYSPACE system_traces WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};"
        )

        logger.debug("Adding second node...")
        node2 = Node('node2',
                     self.cluster,
                     True,
                     None, ('127.0.0.2', 7000),
                     '7200',
                     '0',
                     None,
                     binary_interface=('127.0.0.2', 9042))
        self.cluster.add(node2, False)
        node2.start()
        logger.debug("Waiting for notifications from {}".format(
            waiter.address))
        notifications = waiter.wait_for_notifications(timeout=120.0,
                                                      num_notifications=2)
        assert 2 == len(notifications), notifications
        for notification in notifications:
            assert get_ip_from_node(node2) == notification["address"][0]
            assert "NEW_NODE" == notifications[0]["change_type"]
            assert "UP" == notifications[1]["change_type"]

        logger.debug("Removing second node...")
        waiter.clear_notifications()
        node2.decommission()
        node2.stop(gently=False)
        logger.debug("Waiting for notifications from {}".format(
            waiter.address))
        notifications = waiter.wait_for_notifications(timeout=120.0,
                                                      num_notifications=2)
        assert 2 == len(notifications), notifications
        for notification in notifications:
            assert get_ip_from_node(node2) == notification["address"][0]
            assert "REMOVED_NODE" == notifications[0]["change_type"]
            assert "DOWN" == notifications[1]["change_type"]
Exemple #2
0
    def test_move_single_node(self):
        """
        @jira_ticket CASSANDRA-8516
        Moving a token should result in MOVED_NODE notifications.
        """
        self.cluster.populate(3).start()

        waiters = [
            NotificationWaiter(self, node, ["TOPOLOGY_CHANGE"])
            for node in list(self.cluster.nodes.values())
        ]

        # The first node sends NEW_NODE for the other 2 nodes during startup, in case they are
        # late due to network delays let's block a bit longer
        logger.debug("Waiting for unwanted notifications....")
        waiters[0].wait_for_notifications(timeout=30, num_notifications=2)
        waiters[0].clear_notifications()

        logger.debug("Issuing move command....")
        node1 = list(self.cluster.nodes.values())[0]
        node1.move("123")

        for waiter in waiters:
            logger.debug("Waiting for notification from {}".format(
                waiter.address, ))
            notifications = waiter.wait_for_notifications(60.0)
            assert 1 == len(notifications), notifications
            notification = notifications[0]
            change_type = notification["change_type"]
            address, port = notification["address"]
            assert "MOVED_NODE" == change_type
            assert get_ip_from_node(node1) == address
Exemple #3
0
    def _create_session(self, node, keyspace, user, password, compression, protocol_version,
                        port=None, ssl_opts=None, execution_profiles=None, **kwargs):
        node_ip = get_ip_from_node(node)
        if not port:
            port = get_port_from_node(node)

        if protocol_version is None:
            protocol_version = get_eager_protocol_version(node.cluster.version())

        if user is not None:
            auth_provider = get_auth_provider(user=user, password=password)
        else:
            auth_provider = None

        profiles = {EXEC_PROFILE_DEFAULT: make_execution_profile(**kwargs)
                    } if not execution_profiles else execution_profiles

        cluster = PyCluster([node_ip],
                            auth_provider=auth_provider,
                            compression=compression,
                            protocol_version=protocol_version,
                            port=port,
                            ssl_options=ssl_opts,
                            connect_timeout=15,
                            allow_beta_protocol_version=True,
                            execution_profiles=profiles)
        session = cluster.connect(wait_for_all_pools=True)

        if keyspace is not None:
            session.set_keyspace(keyspace)

        self.connections.append(session)
        return session
    def _create_session(self, node, keyspace, user, password, compression, protocol_version,
                        port=None, ssl_opts=None, execution_profiles=None, **kwargs):
        node_ip = get_ip_from_node(node)
        if not port:
            port = get_port_from_node(node)

        if protocol_version is None:
            protocol_version = get_eager_protocol_version(node.cluster.version())

        if user is not None:
            auth_provider = get_auth_provider(user=user, password=password)
        else:
            auth_provider = None

        profiles = {EXEC_PROFILE_DEFAULT: make_execution_profile(**kwargs)
                    } if not execution_profiles else execution_profiles

        cluster = PyCluster([node_ip],
                            auth_provider=auth_provider,
                            compression=compression,
                            protocol_version=protocol_version,
                            port=port,
                            ssl_options=ssl_opts,
                            connect_timeout=15,
                            allow_beta_protocol_version=True,
                            execution_profiles=profiles)
        session = cluster.connect(wait_for_all_pools=True)

        if keyspace is not None:
            session.set_keyspace(keyspace)

        self.connections.append(session)
        return session
    def test_move_single_node(self):
        """
        @jira_ticket CASSANDRA-8516
        Moving a token should result in MOVED_NODE notifications.
        """
        self.cluster.populate(3).start(wait_for_binary_proto=True, wait_other_notice=True)

        waiters = [NotificationWaiter(self, node, ["TOPOLOGY_CHANGE"])
                   for node in list(self.cluster.nodes.values())]

        # The first node sends NEW_NODE for the other 2 nodes during startup, in case they are
        # late due to network delays let's block a bit longer
        logger.debug("Waiting for unwanted notifications....")
        waiters[0].wait_for_notifications(timeout=30, num_notifications=2)
        waiters[0].clear_notifications()

        logger.debug("Issuing move command....")
        node1 = list(self.cluster.nodes.values())[0]
        node1.move("123")

        for waiter in waiters:
            logger.debug("Waiting for notification from {}".format(waiter.address,))
            notifications = waiter.wait_for_notifications(60.0)
            assert 1 == len(notifications), notifications
            notification = notifications[0]
            change_type = notification["change_type"]
            address, port = notification["address"]
            assert "MOVED_NODE" == change_type
            assert get_ip_from_node(node1) == address
    def move_single_node_test(self):
        """
        @jira_ticket CASSANDRA-8516
        Moving a token should result in MOVED_NODE notifications.
        """
        self.cluster.populate(3).start(wait_for_binary_proto=True, wait_other_notice=True)

        waiters = [NotificationWaiter(self, node, ["TOPOLOGY_CHANGE"])
                   for node in self.cluster.nodes.values()]

        # The first node sends NEW_NODE for the other 2 nodes during startup, in case they are
        # late due to network delays let's block a bit longer
        debug("Waiting for unwanted notifications....")
        waiters[0].wait_for_notifications(timeout=30, num_notifications=2)
        waiters[0].clear_notifications()

        debug("Issuing move command....")
        node1 = self.cluster.nodes.values()[0]
        node1.move("123")

        for waiter in waiters:
            debug("Waiting for notification from {}".format(waiter.address,))
            notifications = waiter.wait_for_notifications(60.0)
            self.assertEquals(1, len(notifications), notifications)
            notification = notifications[0]
            change_type = notification["change_type"]
            address, port = notification["address"]
            self.assertEquals("MOVED_NODE", change_type)
            self.assertEquals(get_ip_from_node(node1), address)
    def test_add_and_remove_node(self):
        """
        Test that NEW_NODE and REMOVED_NODE are sent correctly as nodes join and leave.
        @jira_ticket CASSANDRA-11038
        """
        self.cluster.populate(1).start(wait_for_binary_proto=True)
        node1 = self.cluster.nodelist()[0]

        waiter = NotificationWaiter(self, node1, ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])

        # need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
        # don't confuse the state below
        logger.debug("Waiting for unwanted notifications...")
        waiter.wait_for_notifications(timeout=30, num_notifications=2)
        waiter.clear_notifications()

        session = self.patient_cql_connection(node1)
        # reduce system_distributed RF to 2 so we don't require forceful decommission
        session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")
        session.execute("ALTER KEYSPACE system_traces WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")

        logger.debug("Adding second node...")
        node2 = Node('node2', self.cluster, True, None, ('127.0.0.2', 7000), '7200', '0', None, binary_interface=('127.0.0.2', 9042))
        self.cluster.add(node2, False)
        node2.start(wait_other_notice=True)
        logger.debug("Waiting for notifications from {}".format(waiter.address))
        notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
        assert 2 == len(notifications), notifications
        for notification in notifications:
            assert get_ip_from_node(node2) == notification["address"][0]
            assert "NEW_NODE" == notifications[0]["change_type"]
            assert "UP" == notifications[1]["change_type"]

        logger.debug("Removing second node...")
        waiter.clear_notifications()
        node2.decommission()
        node2.stop(gently=False)
        logger.debug("Waiting for notifications from {}".format(waiter.address))
        notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
        assert 2 == len(notifications), notifications
        for notification in notifications:
            assert get_ip_from_node(node2) == notification["address"][0]
            assert "REMOVED_NODE" == notifications[0]["change_type"]
            assert "DOWN" == notifications[1]["change_type"]
Exemple #8
0
    def exclusive_cql_connection(self, node, keyspace=None, user=None,
                                 password=None, compression=True, protocol_version=None, port=None, ssl_opts=None,
                                 **kwargs):

        node_ip = get_ip_from_node(node)
        wlrr = WhiteListRoundRobinPolicy([node_ip])

        return self._create_session(node, keyspace, user, password, compression,
                                    protocol_version, port=port, ssl_opts=ssl_opts, load_balancing_policy=wlrr,
                                    **kwargs)
    def exclusive_cql_connection(self, node, keyspace=None, user=None,
                                 password=None, compression=True, protocol_version=None, port=None, ssl_opts=None,
                                 **kwargs):

        node_ip = get_ip_from_node(node)
        wlrr = WhiteListRoundRobinPolicy([node_ip])

        return self._create_session(node, keyspace, user, password, compression,
                                    protocol_version, port=port, ssl_opts=ssl_opts, load_balancing_policy=wlrr,
                                    **kwargs)
    def add_and_remove_node_test(self):
        """
        Test that NEW_NODE and REMOVED_NODE are sent correctly as nodes join and leave.
        @jira_ticket CASSANDRA-11038
        """
        self.cluster.populate(1).start(wait_for_binary_proto=True)
        node1 = self.cluster.nodelist()[0]

        waiter = NotificationWaiter(self, node1, ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])

        # need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
        # don't confuse the state below
        debug("Waiting for unwanted notifications...")
        waiter.wait_for_notifications(timeout=30, num_notifications=2)
        waiter.clear_notifications()

        debug("Adding second node...")
        node2 = Node('node2', self.cluster, True, None, ('127.0.0.2', 7000), '7200', '0', None, binary_interface=('127.0.0.2', 9042))
        self.cluster.add(node2, False)
        node2.start(wait_other_notice=True)
        debug("Waiting for notifications from {}".format(waiter.address))
        notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
        self.assertEquals(2, len(notifications), notifications)
        for notification in notifications:
            self.assertEquals(get_ip_from_node(node2), notification["address"][0])
            self.assertEquals("NEW_NODE", notifications[0]["change_type"])
            self.assertEquals("UP", notifications[1]["change_type"])

        debug("Removing second node...")
        waiter.clear_notifications()
        node2.decommission()
        node2.stop(gently=False)
        debug("Waiting for notifications from {}".format(waiter.address))
        notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
        self.assertEquals(2, len(notifications), notifications)
        for notification in notifications:
            self.assertEquals(get_ip_from_node(node2), notification["address"][0])
            self.assertEquals("REMOVED_NODE", notifications[0]["change_type"])
            self.assertEquals("DOWN", notifications[1]["change_type"])
Exemple #11
0
    def test_move_single_node_localhost(self):
        """
        Test that we don't get NODE_MOVED notifications from nodes other than the local one,
        when rpc_address is set to localhost (127.0.0.1) Pre 4.0.
        Test that we get NODE_MOVED notifications from nodes other than the local one,
        when rpc_address is set to localhost (127.0.0.1) Post 4.0.
        @jira_ticket  CASSANDRA-10052
        @jira_ticket  CASSANDRA-15677

        To set-up this test we override the rpc_address to "localhost (127.0.0.1)" for all nodes, and
        therefore we must change the rpc port or else processes won't start.
        """
        cluster = self.cluster
        cluster.populate(3)

        self.change_rpc_address_to_localhost()

        cluster.start()

        waiters = [
            NotificationWaiter(self, node, ["TOPOLOGY_CHANGE"])
            for node in list(self.cluster.nodes.values())
        ]

        # The first node sends NEW_NODE for the other 2 nodes during startup, in case they are
        # late due to network delays let's block a bit longer
        logger.debug("Waiting for unwanted notifications...")
        waiters[0].wait_for_notifications(timeout=30, num_notifications=2)
        waiters[0].clear_notifications()

        logger.debug("Issuing move command....")
        node1 = list(self.cluster.nodes.values())[0]
        node1.move("123")

        version = self.cluster.cassandra_version()
        for waiter in waiters:
            logger.debug("Waiting for notification from {}".format(
                waiter.address, ))
            notifications = waiter.wait_for_notifications(30.0)
            if version >= '4.0':
                # CASSANDRA-15677 Post 4.0 we'll get the notifications. Check that they are for the right node.
                assert 1 == len(notifications), notifications
                notification = notifications[0]
                change_type = notification["change_type"]
                address, port = notification["address"]
                assert "MOVED_NODE" == change_type
                assert get_ip_from_node(node1) == address
                assert get_port_from_node(node1) == port
            else:
                assert 1 if waiter.node is node1 else 0 == len(
                    notifications), notifications
Exemple #12
0
    def test_restart_node_localhost(self):
        """
        Test that we don't get client notifications when rpc_address is set to localhost Pre 4.0.
        Test that we get correct client notifications when rpc_address is set to localhost Post 4.0.
        @jira_ticket  CASSANDRA-10052
        @jira_ticket  CASSANDRA-15677

        To set-up this test we override the rpc_address to "localhost" for all nodes, and
        therefore we must change the rpc port or else processes won't start.
        """
        cluster = self.cluster
        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        self.change_rpc_address_to_localhost()

        cluster.start()

        # register for notification with node1
        waiter = NotificationWaiter(self, node1,
                                    ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])

        # restart node 2
        version = self.cluster.cassandra_version()
        if version >= '4.0':
            # >=4.0 we wait for the NEW_NODE and UP notifications to reach us
            waiter.wait_for_notifications(timeout=30.0, num_notifications=2)
            waiter.clear_notifications()

        logger.debug("Restarting second node...")
        node2.stop(wait_other_notice=True)
        node2.start()

        # check that node1 did not send UP or DOWN notification for node2
        logger.debug("Waiting for notifications from {}".format(
            waiter.address, ))
        notifications = waiter.wait_for_notifications(timeout=30.0,
                                                      num_notifications=2)

        if version >= '4.0':
            # CASSANDRA-15677 Post 4.0 we'll get the notifications. Check that they are for the right node.
            for notification in notifications:
                address, port = notification["address"]
                assert get_ip_from_node(node2) == address
                assert get_port_from_node(node2) == port
            assert "DOWN" == notifications[0]["change_type"], notifications
            assert "UP" == notifications[1]["change_type"], notifications
        else:
            assert 0 == len(notifications), notifications
    def test_restart_node(self):
        """
        @jira_ticket CASSANDRA-7816
        Restarting a node should generate exactly one DOWN and one UP notification
        """
        self.cluster.populate(2).start(wait_for_binary_proto=True,
                                       wait_other_notice=True)
        node1, node2 = self.cluster.nodelist()

        waiter = NotificationWaiter(self, node1,
                                    ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])

        # need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
        # don't confuse the state below.
        logger.debug("Waiting for unwanted notifications...")
        waiter.wait_for_notifications(timeout=30, num_notifications=2)
        waiter.clear_notifications()

        # On versions prior to 2.2, an additional NEW_NODE notification is sent when a node
        # is restarted. This bug was fixed in CASSANDRA-11038 (see also CASSANDRA-11360)
        version = self.cluster.cassandra_version()
        expected_notifications = 2 if version >= '2.2' else 3
        for i in range(5):
            logger.debug("Restarting second node...")
            node2.stop(wait_other_notice=True)
            node2.start(wait_other_notice=True)
            logger.debug("Waiting for notifications from {}".format(
                waiter.address))
            notifications = waiter.wait_for_notifications(
                timeout=60.0, num_notifications=expected_notifications)
            assert expected_notifications, len(notifications) == notifications
            for notification in notifications:
                assert get_ip_from_node(node2) == notification["address"][0]
            assert "DOWN" == notifications[0]["change_type"]
            if version >= '2.2':
                assert "UP" == notifications[1]["change_type"]
            else:
                # pre 2.2, we'll receive both a NEW_NODE and an UP notification,
                # but the order is not guaranteed
                assert {"NEW_NODE", "UP"
                        } == set([n["change_type"] for n in notifications[1:]])

            waiter.clear_notifications()
    def restart_node_test(self):
        """
        @jira_ticket CASSANDRA-7816
        Restarting a node should generate exactly one DOWN and one UP notification
        """

        self.cluster.populate(2).start(wait_for_binary_proto=True, wait_other_notice=True)
        node1, node2 = self.cluster.nodelist()

        waiter = NotificationWaiter(self, node1, ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])

        # need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
        # don't confuse the state below.
        debug("Waiting for unwanted notifications...")
        waiter.wait_for_notifications(timeout=30, num_notifications=2)
        waiter.clear_notifications()

        # On versions prior to 2.2, an additional NEW_NODE notification is sent when a node
        # is restarted. This bug was fixed in CASSANDRA-11038 (see also CASSANDRA-11360)
        version = self.cluster.cassandra_version()
        expected_notifications = 2 if version >= '2.2' else 3
        for i in range(5):
            debug("Restarting second node...")
            node2.stop(wait_other_notice=True)
            node2.start(wait_other_notice=True)
            debug("Waiting for notifications from {}".format(waiter.address))
            notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=expected_notifications)
            self.assertEquals(expected_notifications, len(notifications), notifications)
            for notification in notifications:
                self.assertEquals(get_ip_from_node(node2), notification["address"][0])
            self.assertEquals("DOWN", notifications[0]["change_type"])
            if version >= '2.2':
                self.assertEquals("UP", notifications[1]["change_type"])
            else:
                # pre 2.2, we'll receive both a NEW_NODE and an UP notification,
                # but the order is not guaranteed
                self.assertEquals({"NEW_NODE", "UP"}, set(map(lambda n: n["change_type"], notifications[1:])))

            waiter.clear_notifications()