Пример #1
0
 def test_invalid_error_after_valid_error(self):
     err = """objc[36358]: Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk
            /Contents/Home/bin/java and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib.
            One of the two will be used. Which one is undefined.
            This string is no good and should fail."""
     with self.assertRaises(AssertionError):
         assert_stderr_clean(err)
Пример #2
0
 def _launch_nodetool_cmd(self, node, cmd):
     """
     Launch a nodetool command and check there is no error, return the result
     """
     out, err, _ = node.nodetool(cmd)
     assert_stderr_clean(err)
     return out
Пример #3
0
 def test_invalid_error_before_valid_errors(self):
     err = """This string is not good and should fail. WARN  14:08:15,018 JNA link failure, one or more native method will be unavailable.objc[36358]:
              Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/bin/java
              and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib. One of the two will be used. Which one is undefined.
              """
     with self.assertRaises(AssertionError):
         assert_stderr_clean(err)
Пример #4
0
    def launch_standalone_scrub(self,
                                ks,
                                cf,
                                reinsert_overflowed_ttl=False,
                                no_validate=False):
        """
        Launch the standalone scrub
        """
        node1 = self.cluster.nodelist()[0]
        env = common.make_cassandra_env(node1.get_install_cassandra_root(),
                                        node1.get_node_cassandra_root())
        scrub_bin = node1.get_tool('sstablescrub')
        logger.debug(scrub_bin)

        args = [scrub_bin]
        if reinsert_overflowed_ttl:
            args += ['--reinsert-overflowed-ttl']
        if no_validate:
            args += ['--no-validate']
        args += [ks, cf] if reinsert_overflowed_ttl else [ks, cf]
        p = subprocess.Popen(args,
                             env=env,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        out, err = p.communicate()
        logger.debug(out.decode("utf-8"))
        # if we have less than 64G free space, we get this warning - ignore it
        if err and "Consider adding more capacity" not in err.decode("utf-8"):
            logger.debug(err.decode("utf-8"))
            assert_stderr_clean(err.decode("utf-8"))
 def test_invalid_error_after_valid_error(self):
     err = """objc[36358]: Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk
            /Contents/Home/bin/java and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib.
            One of the two will be used. Which one is undefined.
            This string is no good and should fail."""
     with pytest.raises(AssertionError):
         assert_stderr_clean(err)
Пример #6
0
    def test_assassination_of_unknown_node(self):
        """
        @jira_ticket CASSANDRA-16588
        Test that a non-seed node can come back online after assassinating an
        unknown node.
        """
        cluster = self.cluster

        # Create a 5-node cluster
        cluster.populate(5)
        node1 = cluster.nodelist()[0]
        node3 = cluster.nodelist()[2]

        self.cluster.set_configuration_options({
            'seed_provider': [{
                'class_name':
                'org.apache.cassandra.locator.SimpleSeedProvider',
                'parameters': [{
                    'seeds': node1.address()
                }]
            }]
        })

        cluster.start()

        logger.debug("Shutting down node {}".format(node3.address()))
        node3.stop()

        logger.debug("Assassinating unknown node 11.1.1.1")
        out, err, _ = node1.nodetool("assassinate 11.1.1.1")
        assert_stderr_clean(err)

        logger.debug("Starting node {}".format(node3.address()))
        node3.start()
Пример #7
0
    def test_correct_dc_rack_in_nodetool_info(self):
        """
        @jira_ticket CASSANDRA-10382

        Test that nodetool info returns the correct rack and dc
        """

        cluster = self.cluster
        cluster.populate([2, 2])
        cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch'})

        for i, node in enumerate(cluster.nodelist()):
            with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as snitch_file:
                for line in ["dc={}".format(node.data_center), "rack=rack{}".format(i % 2)]:
                    snitch_file.write(line + os.linesep)

        cluster.start()

        for i, node in enumerate(cluster.nodelist()):
            out, err, _ = node.nodetool('info')
            assert_stderr_clean(err)
            out_str = out
            if isinstance(out, (bytes, bytearray)):
                out_str = out.decode("utf-8")
            logger.debug(out_str)
            for line in out_str.split(os.linesep):
                if line.startswith('Data Center'):
                    assert line.endswith(node.data_center), \
                        "Expected dc {} for {} but got {}".format(node.data_center, node.address(), line.rsplit(None, 1)[-1])
                elif line.startswith('Rack'):
                    rack = "rack{}".format(i % 2)
                    assert line.endswith(rack), \
                        "Expected rack {} for {} but got {}".format(rack, node.address(), line.rsplit(None, 1)[-1])
 def test_invalid_error_before_valid_errors(self):
     err = """This string is not good and should fail. WARN  14:08:15,018 JNA link failure, one or more native method will be unavailable.objc[36358]:
              Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/bin/java
              and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib. One of the two will be used. Which one is undefined.
              """
     with pytest.raises(AssertionError):
         assert_stderr_clean(err)
Пример #9
0
 def test_valid_and_invalid_errors_same_line(self):
     err = (
         "This string is no good and should fail.objc[36358]: Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk"
         "/Contents/Home/bin/java and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib."
         "One of the two will be used. Which one is undefined.")
     with self.assertRaises(AssertionError):
         assert_stderr_clean(err)
Пример #10
0
    def test_local_quorum_bootstrap(self):
        """
        Test that CL local_quorum works while a node is bootstrapping.
        @jira_ticket CASSANDRA-8058
        """
        cluster = self.cluster
        cluster.populate([1, 1])
        cluster.start()

        node1 = cluster.nodes['node1']
        yaml_config = """
        # Create the keyspace and table
        keyspace: keyspace1
        keyspace_definition: |
          CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};
        table: users
        table_definition:
          CREATE TABLE users (
            username text,
            first_name text,
            last_name text,
            email text,
            PRIMARY KEY(username)
          ) WITH compaction = {'class':'SizeTieredCompactionStrategy'};
        insert:
          partitions: fixed(1)
          batchtype: UNLOGGED
        queries:
          read:
            cql: select * from users where username = ?
            fields: samerow
        """
        with tempfile.NamedTemporaryFile(mode='w+') as stress_config:
            stress_config.write(yaml_config)
            stress_config.flush()
            node1.stress([
                'user', 'profile=' + stress_config.name, 'n=200K', 'no-warmup',
                'ops(insert=1)', '-rate', 'threads=10'
            ])

            node3 = new_node(cluster, data_center='dc2')
            node3.start(jvm_args=["-Dcassandra.write_survey=true"],
                        no_wait=True)
            time.sleep(5)

            ntout = node1.nodetool('status').stdout
            assert re.search(r'UJ\s+' + node3.ip_addr, ntout), ntout
            out, err, _ = node1.stress([
                'user', 'profile=' + stress_config.name, 'ops(insert=1)',
                'n=10k', 'no-warmup', 'cl=LOCAL_QUORUM', '-rate', 'threads=10',
                '-errors', 'retries=2'
            ])
            ntout = node1.nodetool('status').stdout
            assert re.search(r'UJ\s+' + node3.ip_addr, ntout), ntout

        logger.debug(out)
        assert_stderr_clean(err)
        regex = re.compile("Operation.+error inserting key.+Exception")
        failure = regex.search(str(out))
        assert failure is None, "Error during stress while bootstrapping"
Пример #11
0
    def local_quorum_bootstrap_test(self):
        """
        Test that CL local_quorum works while a node is bootstrapping.
        @jira_ticket CASSANDRA-8058
        """

        cluster = self.cluster
        cluster.populate([1, 1])
        cluster.start()

        node1 = cluster.nodes['node1']
        yaml_config = """
        # Create the keyspace and table
        keyspace: keyspace1
        keyspace_definition: |
          CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};
        table: users
        table_definition:
          CREATE TABLE users (
            username text,
            first_name text,
            last_name text,
            email text,
            PRIMARY KEY(username)
          ) WITH compaction = {'class':'SizeTieredCompactionStrategy'};
        insert:
          partitions: fixed(1)
          batchtype: UNLOGGED
        queries:
          read:
            cql: select * from users where username = ?
            fields: samerow
        """
        with tempfile.NamedTemporaryFile(mode='w+') as stress_config:
            stress_config.write(yaml_config)
            stress_config.flush()
            node1.stress(['user', 'profile=' + stress_config.name, 'n=2M', 'no-warmup',
                          'ops(insert=1)', '-rate', 'threads=50'])

            node3 = new_node(cluster, data_center='dc2')
            node3.start(no_wait=True)
            time.sleep(3)

            out, err, _ = node1.stress(['user', 'profile=' + stress_config.name, 'ops(insert=1)',
                                        'n=500K', 'no-warmup', 'cl=LOCAL_QUORUM',
                                        '-rate', 'threads=5',
                                        '-errors', 'retries=2'])

        debug(out)
        assert_stderr_clean(err)
        regex = re.compile("Operation.+error inserting key.+Exception")
        failure = regex.search(out)
        self.assertIsNone(failure, "Error during stress while bootstrapping")
Пример #12
0
    def test_jobs_option_warning(self):
        """
        Verify that nodetool -j/--jobs option warning is raised depending on the value of `concurrent_compactors` in the
        target node, independently from where the tool is used.

        Before CASSANDRA-16104 the warning was based on the local value of `concurrent_compactors`, and not in the value
        used in the target node, which is got through JMX.

        From 4.0 we have a JUnit test in place that supersedes this test.

        @jira_ticket CASSANDRA-16104
        """

        # setup a cluster with a different value for concurrent_compactors in each node
        cluster = self.cluster
        cluster.populate(2)
        node1, node2 = cluster.nodelist()
        node1.set_configuration_options(values={'concurrent_compactors': '1'})
        node2.set_configuration_options(values={'concurrent_compactors': '10'})
        cluster.start()

        # we will invoke nodetool always from node1 environment
        tool = node1.get_tool('nodetool')
        env = node1.get_env()
        warning = 'jobs (10) is bigger than configured concurrent_compactors (1)'

        def nodetool(node):
            cmd = [
                tool, '-h', 'localhost', '-p',
                str(node.jmx_port), 'upgradesstables', '-j', '10'
            ]
            p = subprocess.Popen(cmd,
                                 env=env,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 universal_newlines=True)
            return ccmlib.node.handle_external_tool_process(p, cmd)

        # from node1 environment, connect to node1 and verify that the warning is raised
        out, err, _ = nodetool(node1)
        assert_stderr_clean(err)
        assert warning in out

        # from node1 environment, connect to node2 and verify that the warning is not raised
        out, err, _ = nodetool(node2)
        assert_stderr_clean(err)
        assert warning not in out
    def launch_standalone_scrub(self, ks, cf):
        """
        Launch the standalone scrub
        """
        node1 = self.cluster.nodelist()[0]
        env = common.make_cassandra_env(node1.get_install_cassandra_root(), node1.get_node_cassandra_root())
        scrub_bin = node1.get_tool('sstablescrub')
        debug(scrub_bin)

        args = [scrub_bin, ks, cf]
        p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        out, err = p.communicate()
        debug(out)
        # if we have less than 64G free space, we get this warning - ignore it
        if err and "Consider adding more capacity" not in err:
            debug(err)
            assert_stderr_clean(err)
Пример #14
0
    def test_decommission_after_drain_is_invalid(self):
        """
        @jira_ticket CASSANDRA-8741

        Running a decommission after a drain should generate
        an unsupported operation message and exit with an error
        code (which we receive as a ToolError exception).
        """
        cluster = self.cluster
        cluster.populate([3]).start()

        node = cluster.nodelist()[0]
        node.drain(block_on_log=True)

        try:
            node.decommission()
            assert not "Expected nodetool error"
        except ToolError as e:
            assert_stderr_clean(e.stderr)
            assert 'Unsupported operation' in e.stdout
Пример #15
0
    def test_nodetool_timeout_commands(self):
        """
        @jira_ticket CASSANDRA-10953

        Test that nodetool gettimeout and settimeout work at a basic level
        """
        cluster = self.cluster
        cluster.populate([1]).start()
        node = cluster.nodelist()[0]

        types = ['read', 'range', 'write', 'counterwrite', 'cascontention',
                 'truncate', 'misc']
        if cluster.version() < '4.0':
            types.append('streamingsocket')

        # read all of the timeouts, make sure we get a sane response
        for timeout_type in types:
            out, err, _ = node.nodetool('gettimeout {}'.format(timeout_type))
            assert_stderr_clean(err)
            logger.debug(out)
            assert re.search(r'.* \d+ ms', out)

        # set all of the timeouts to 123
        for timeout_type in types:
            _, err, _ = node.nodetool('settimeout {} 123'.format(timeout_type))
            assert_stderr_clean(err)

        # verify that they're all reported as 123
        for timeout_type in types:
            out, err, _ = node.nodetool('gettimeout {}'.format(timeout_type))
            assert_stderr_clean(err)
            logger.debug(out)
            assert re.search(r'.* 123 ms', out)
Пример #16
0
    def test_meaningless_notice_in_status(self):
        """
        @jira_ticket CASSANDRA-10176

        nodetool status don't return ownership when there is more than one user keyspace
        define (since they likely have different replication infos making ownership
        meaningless in general) and shows a helpful notice as to why it does that.
        This test checks that said notice is only printed is there is indeed more than
        one user keyspace.
        """
        cluster = self.cluster
        cluster.populate([3]).start()

        node = cluster.nodelist()[0]

        notice_message = r'effective ownership information is meaningless'

        # Do a first try without any keypace, we shouldn't have the notice
        out, err, _ = node.nodetool('status')
        assert_stderr_clean(err)
        assert not re.search(notice_message, out)

        session = self.patient_cql_connection(node)
        session.execute(
            "CREATE KEYSPACE ks1 WITH replication = { 'class':'SimpleStrategy', 'replication_factor':1}"
        )

        # With 1 keyspace, we should still not get the notice
        out, err, _ = node.nodetool('status')
        assert_stderr_clean(err)
        assert not re.search(notice_message, out)

        session.execute(
            "CREATE KEYSPACE ks2 WITH replication = { 'class':'SimpleStrategy', 'replication_factor':1}"
        )

        # With 2 keyspaces with the same settings, we should not get the notice
        out, err, _ = node.nodetool('status')
        assert_stderr_clean(err)
        assert not re.search(notice_message, out)

        session.execute(
            "CREATE KEYSPACE ks3 WITH replication = { 'class':'SimpleStrategy', 'replication_factor':3}"
        )

        # With a keyspace without the same replication factor, we should get the notice
        out, err, _ = node.nodetool('status')
        assert_stderr_clean(err)
        assert re.search(notice_message, out)
Пример #17
0
    def launch_standalone_scrub(self, ks, cf, reinsert_overflowed_ttl=False, no_validate=False):
        """
        Launch the standalone scrub
        """
        node1 = self.cluster.nodelist()[0]
        env = common.make_cassandra_env(node1.get_install_cassandra_root(), node1.get_node_cassandra_root())
        scrub_bin = node1.get_tool('sstablescrub')
        logger.debug(scrub_bin)

        args = [scrub_bin]
        if reinsert_overflowed_ttl:
            args += ['--reinsert-overflowed-ttl']
        if no_validate:
            args += ['--no-validate']
        args += [ks, cf] if reinsert_overflowed_ttl else [ks, cf]
        p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        out, err = p.communicate()
        logger.debug(out.decode("utf-8"))
        # if we have less than 64G free space, we get this warning - ignore it
        if err and "Consider adding more capacity" not in err.decode("utf-8"):
            logger.debug(err.decode("utf-8"))
            assert_stderr_clean(err.decode("utf-8"))
Пример #18
0
    def test_assassinate_valid_node(self):
        """
        @jira_ticket CASSANDRA-16588
        Test that after taking two non-seed nodes down and assassinating
        one of them, the other can come back up.
        """
        cluster = self.cluster

        cluster.populate(5).start()
        node1 = cluster.nodelist()[0]
        node3 = cluster.nodelist()[2]

        self.cluster.set_configuration_options({
            'seed_provider': [{
                'class_name':
                'org.apache.cassandra.locator.SimpleSeedProvider',
                'parameters': [{
                    'seeds': node1.address()
                }]
            }]
        })

        non_seed_nodes = cluster.nodelist()[-2:]
        for node in non_seed_nodes:
            node.stop()

        assassination_target = non_seed_nodes[0]
        logger.debug("Assassinating non-seed node {}".format(
            assassination_target.address()))
        out, err, _ = node1.nodetool("assassinate {}".format(
            assassination_target.address()))
        assert_stderr_clean(err)

        logger.debug("Starting non-seed nodes")
        for node in non_seed_nodes:
            node.start()
Пример #19
0
 def test_valid_error(self):
     err = ("objc[36358]: Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk"
            "/Contents/Home/bin/java and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib."
            "One of the two will be used. Which one is undefined.")
     assert_stderr_clean(err)
Пример #20
0
 def test_invalid_error(self):
     err = "This string is no good and should fail."
     with self.assertRaises(AssertionError):
         assert_stderr_clean(err)
Пример #21
0
 def _describe(self, node):
     node_describe, err, _ = node.nodetool('describecluster')
     assert_stderr_clean(err)
     out_sorted = node_describe.split()
     out_sorted.sort()
     return (node_describe, out_sorted)
 def test_invalid_error(self):
     err = "This string is no good and should fail."
     with pytest.raises(AssertionError):
         assert_stderr_clean(err)
 def test_valid_error_with_whitespace(self):
     err = """WARN  14:08:15,018 JNA link failure, one or more native method will be unavailable.objc[36358]: Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/bin/java and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib. One of the two will be used. Which one is undefined.
              """
     assert_stderr_clean(err)
 def test_valid_error(self):
     err = (
         "objc[36358]: Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk"
         "/Contents/Home/bin/java and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib."
         "One of the two will be used. Which one is undefined.")
     assert_stderr_clean(err)
 def test_multiple_valid_errors(self):
     err = """objc[65696]: Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/bin/java and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib. One of the two will be used. Which one is undefined.
              WARN  15:28:24,788 JNA link failure, one or more native method will be unavailable."""
     assert_stderr_clean(err)
 def test_empty_string(self):
     err = ''
     assert_stderr_clean(err)
Пример #27
0
 def test_valid_error_with_whitespace(self):
     err = """WARN  14:08:15,018 JNA link failure, one or more native method will be unavailable.objc[36358]: Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/bin/java and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib. One of the two will be used. Which one is undefined.
              """
     assert_stderr_clean(err)
Пример #28
0
 def test_multiple_valid_errors(self):
     err = """objc[65696]: Class JavaLaunchHelper is implemented in both /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/bin/java and /Library/Java/JavaVirtualMachines/jdk1.8.0_91.jdk/Contents/Home/jre/lib/libinstrument.dylib. One of the two will be used. Which one is undefined.
              WARN  15:28:24,788 JNA link failure, one or more native method will be unavailable."""
     assert_stderr_clean(err)
Пример #29
0
 def test_empty_string(self):
     err = ''
     assert_stderr_clean(err)
Пример #30
0
    def test_prefer_local_reconnect_on_listen_address(self):
        """
        @jira_ticket CASSANDRA-9748
        @jira_ticket CASSANDRA-8084

        Test that it's possible to connect over the broadcast_address when
        listen_on_broadcast_address=true and that GossipingPropertyFileSnitch
        reconnect via listen_address when prefer_local=true
        """

        NODE1_LISTEN_ADDRESS = '127.0.0.1'
        NODE1_BROADCAST_ADDRESS = '127.0.0.3'

        NODE1_LISTEN_FMT_ADDRESS = '/127.0.0.1'
        NODE1_BROADCAST_FMT_ADDRESS = '/127.0.0.3'

        NODE1_40_LISTEN_ADDRESS = '127.0.0.1:7000'
        NODE1_40_BROADCAST_ADDRESS = '127.0.0.3:7000'

        NODE1_40_LISTEN_FMT_ADDRESS = '/127.0.0.1:7000'
        NODE1_40_BROADCAST_FMT_ADDRESS = '/127.0.0.3:7000'

        NODE2_LISTEN_ADDRESS = '127.0.0.2'
        NODE2_BROADCAST_ADDRESS = '127.0.0.4'

        NODE2_LISTEN_FMT_ADDRESS = '/127.0.0.2'
        NODE2_BROADCAST_FMT_ADDRESS = '/127.0.0.4'

        NODE2_40_LISTEN_ADDRESS = '127.0.0.2:7000'
        NODE2_40_BROADCAST_ADDRESS = '127.0.0.4:7000'

        NODE2_40_LISTEN_FMT_ADDRESS = '/127.0.0.2:7000'
        NODE2_40_BROADCAST_FMT_ADDRESS = '/127.0.0.4:7000'

        STORAGE_PORT = 7000

        cluster = self.cluster
        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        running40 = node1.get_base_cassandra_version() >= 4.0

        cluster.seeds = [NODE1_BROADCAST_ADDRESS]
        cluster.set_configuration_options(
            values={
                'endpoint_snitch':
                'org.apache.cassandra.locator.GossipingPropertyFileSnitch',
                'listen_on_broadcast_address': 'true'
            })
        node1.set_configuration_options(
            values={'broadcast_address': NODE1_BROADCAST_ADDRESS})
        node2.auto_bootstrap = True
        node2.set_configuration_options(
            values={'broadcast_address': NODE2_BROADCAST_ADDRESS})

        for node in cluster.nodelist():
            with open(
                    os.path.join(node.get_conf_dir(),
                                 'cassandra-rackdc.properties'),
                    'w') as snitch_file:
                snitch_file.write("dc=dc1" + os.linesep)
                snitch_file.write("rack=rack1" + os.linesep)
                snitch_file.write("prefer_local=true" + os.linesep)

        node1.start(wait_for_binary_proto=True)
        if running40:
            node1.watch_log_for("Listening on address: \({}:{}\)".format(
                NODE1_40_LISTEN_FMT_ADDRESS[:-5], STORAGE_PORT),
                                timeout=60)
            node1.watch_log_for("Listening on address: \({}:{}\)".format(
                NODE1_40_BROADCAST_FMT_ADDRESS[:-5], STORAGE_PORT),
                                timeout=60)
        else:
            node1.watch_log_for("Starting Messaging Service on {}:{}".format(
                NODE1_LISTEN_FMT_ADDRESS, STORAGE_PORT),
                                timeout=60)
            node1.watch_log_for("Starting Messaging Service on {}:{}".format(
                NODE1_BROADCAST_FMT_ADDRESS, STORAGE_PORT),
                                timeout=60)

        self._test_connect(NODE1_LISTEN_ADDRESS, STORAGE_PORT)
        self._test_connect(NODE1_BROADCAST_ADDRESS, STORAGE_PORT)

        # write some data to node1
        node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8'])

        session = self.patient_cql_connection(node1)
        stress_table = 'keyspace1.standard1'
        original_rows = list(
            session.execute("SELECT * FROM {}".format(stress_table)))

        node2.start(wait_for_binary_proto=True, wait_other_notice=False)
        if running40:
            node2.watch_log_for("Listening on address: \({}:{}\)".format(
                NODE2_40_LISTEN_FMT_ADDRESS[:-5], STORAGE_PORT),
                                timeout=60)
            node2.watch_log_for("Listening on address: \({}:{}\)".format(
                NODE2_40_BROADCAST_FMT_ADDRESS[:-5], STORAGE_PORT),
                                timeout=60)
        else:
            node2.watch_log_for("Starting Messaging Service on {}:{}".format(
                NODE2_LISTEN_FMT_ADDRESS, STORAGE_PORT),
                                timeout=60)
            node2.watch_log_for("Starting Messaging Service on {}:{}".format(
                NODE2_BROADCAST_FMT_ADDRESS, STORAGE_PORT),
                                timeout=60)

        self._test_connect(NODE2_LISTEN_ADDRESS, STORAGE_PORT)
        self._test_connect(NODE2_BROADCAST_ADDRESS, STORAGE_PORT)

        # Intiated -> Initiated typo was fixed in 3.10
        reconnectFmtString = "Ini?tiated reconnect to an Internal IP {} for the {}"
        if node1.get_base_cassandra_version() >= 3.10:
            reconnectFmtString = "Initiated reconnect to an Internal IP {} for the {}"
        node1.watch_log_for(reconnectFmtString.format(
            NODE2_40_LISTEN_FMT_ADDRESS if running40 else
            NODE2_LISTEN_FMT_ADDRESS, NODE2_40_BROADCAST_FMT_ADDRESS
            if running40 else NODE2_BROADCAST_FMT_ADDRESS),
                            filename='debug.log',
                            timeout=60)
        node2.watch_log_for(reconnectFmtString.format(
            NODE1_40_LISTEN_FMT_ADDRESS if running40 else
            NODE1_LISTEN_FMT_ADDRESS, NODE1_40_BROADCAST_FMT_ADDRESS
            if running40 else NODE1_BROADCAST_FMT_ADDRESS),
                            filename='debug.log',
                            timeout=60)

        # read data from node2 just to make sure data and connectivity is OK
        session = self.patient_exclusive_cql_connection(node2)
        new_rows = list(
            session.execute("SELECT * FROM {}".format(stress_table)))
        assert original_rows == new_rows

        out, err, _ = node1.nodetool('gossipinfo')
        assert_stderr_clean(err)
        logger.debug(out)

        assert "/{}".format(NODE1_BROADCAST_ADDRESS) in out
        assert "INTERNAL_IP:{}:{}".format('9' if running40 else '6',
                                          NODE1_LISTEN_ADDRESS) in out
        assert "/{}".format(NODE2_BROADCAST_ADDRESS) in out
        assert "INTERNAL_IP:{}:{}".format('9' if running40 else '6',
                                          NODE2_LISTEN_ADDRESS) in out
        if running40:
            assert "INTERNAL_ADDRESS_AND_PORT:7:{}".format(
                NODE1_40_LISTEN_ADDRESS) in out
            assert "INTERNAL_ADDRESS_AND_PORT:7:{}".format(
                NODE2_40_LISTEN_ADDRESS) in out