Example #1
0
    def setup(self, request):
        self.use_vnodes = request.config.getoption("--use-vnodes")
        self.use_off_heap_memtables = request.config.getoption("--use-off-heap-memtables")
        self.num_tokens = request.config.getoption("--num-tokens")
        self.data_dir_count = request.config.getoption("--data-dir-count-per-instance")
        self.force_execution_of_resource_intensive_tests = request.config.getoption("--force-resource-intensive-tests")
        self.skip_resource_intensive_tests = request.config.getoption("--skip-resource-intensive-tests")
        if request.config.getoption("--cassandra-dir") is not None:
            self.cassandra_dir = os.path.expanduser(request.config.getoption("--cassandra-dir"))
        self.cassandra_version = request.config.getoption("--cassandra-version")

        # There are times when we want to know the C* version we're testing against
        # before we do any cluster. In the general case, we can't know that -- the
        # test method could use any version it wants for self.cluster. However, we can
        # get the version from build.xml in the C* repository specified by
        # CASSANDRA_VERSION or CASSANDRA_DIR.
        if self.cassandra_version is not None:
            ccm_repo_cache_dir, _ = ccmlib.repository.setup(self.cassandra_version)
            self.cassandra_version_from_build = get_version_from_build(ccm_repo_cache_dir)
        elif self.cassandra_dir is not None:
            self.cassandra_version_from_build = get_version_from_build(self.cassandra_dir)

        self.delete_logs = request.config.getoption("--delete-logs")
        self.execute_upgrade_tests = request.config.getoption("--execute-upgrade-tests")
        self.disable_active_log_watching = request.config.getoption("--disable-active-log-watching")
        self.keep_test_dir = request.config.getoption("--keep-test-dir")
        self.enable_jacoco_code_coverage = request.config.getoption("--enable-jacoco-code-coverage")
def fixture_since(request, fixture_dtest_setup):
    if request.node.get_closest_marker('since'):
        max_version_str = request.node.get_closest_marker('since').kwargs.get('max_version', None)
        max_version = None
        if max_version_str:
            max_version = LooseVersion(max_version_str)

        since_str = request.node.get_closest_marker('since').args[0]
        since = LooseVersion(since_str)
        # For upgrade tests don't run the test if any of the involved versions
        # are excluded by the annotation
        if hasattr(request.cls, "UPGRADE_PATH"):
            upgrade_path = request.cls.UPGRADE_PATH
            ccm_repo_cache_dir, _ = ccmlib.repository.setup(upgrade_path.starting_meta.version)
            starting_version = get_version_from_build(ccm_repo_cache_dir)
            skip_msg = _skip_msg(starting_version, since, max_version)
            if skip_msg:
                pytest.skip(skip_msg)
            ccm_repo_cache_dir, _ = ccmlib.repository.setup(upgrade_path.upgrade_meta.version)
            ending_version = get_version_from_build(ccm_repo_cache_dir)
            skip_msg = _skip_msg(ending_version, since, max_version)
            if skip_msg:
                pytest.skip(skip_msg)
        else:
            # For regular tests the value in the current cluster actually means something so we should
            # use that to check.
            # Use cassandra_version_from_build as it's guaranteed to be a LooseVersion
            # whereas cassandra_version may be a string if set in the cli options
            current_running_version = fixture_dtest_setup.dtest_config.cassandra_version_from_build
            skip_msg = _skip_msg(current_running_version, since, max_version)
            if skip_msg:
                pytest.skip(skip_msg)
Example #3
0
def fixture_since(request, fixture_dtest_setup):
    if request.node.get_closest_marker('since'):
        max_version_str = request.node.get_closest_marker('since').kwargs.get('max_version', None)
        max_version = None
        if max_version_str:
            max_version = LooseVersion(max_version_str)

        since_str = request.node.get_closest_marker('since').args[0]
        since = LooseVersion(since_str)
        # For upgrade tests don't run the test if any of the involved versions
        # are excluded by the annotation
        if hasattr(request.cls, "UPGRADE_PATH"):
            upgrade_path = request.cls.UPGRADE_PATH
            ccm_repo_cache_dir, _ = ccmlib.repository.setup(upgrade_path.starting_meta.version)
            starting_version = get_version_from_build(ccm_repo_cache_dir)
            skip_msg = _skip_msg(starting_version, since, max_version)
            if skip_msg:
                pytest.skip(skip_msg)
            ccm_repo_cache_dir, _ = ccmlib.repository.setup(upgrade_path.upgrade_meta.version)
            ending_version = get_version_from_build(ccm_repo_cache_dir)
            skip_msg = _skip_msg(ending_version, since, max_version)
            if skip_msg:
                pytest.skip(skip_msg)
        else:
            # For regular tests the value in the current cluster actually means something so we should
            # use that to check.
            # Use cassandra_version_from_build as it's guaranteed to be a LooseVersion
            # whereas cassandra_version may be a string if set in the cli options
            current_running_version = fixture_dtest_setup.dtest_config.cassandra_version_from_build
            skip_msg = _skip_msg(current_running_version, since, max_version)
            if skip_msg:
                pytest.skip(skip_msg)
Example #4
0
def fixture_ported_to_in_jvm(request, fixture_dtest_setup):
    marker = request.node.get_closest_marker('ported_to_in_jvm')
    if marker and not request.config.getoption("--use-vnodes"):

        if not marker.args:
            pytest.skip("ported to in-jvm")

        from_str = marker.args[0]
        ported_from_version = LooseVersion(from_str)

        # For upgrade tests don't run the test if any of the involved versions
        # are excluded by the annotation
        if hasattr(request.cls, "UPGRADE_PATH"):
            upgrade_path = request.cls.UPGRADE_PATH
            ccm_repo_cache_dir, _ = ccmlib.repository.setup(upgrade_path.starting_meta.version)
            starting_version = get_version_from_build(ccm_repo_cache_dir)
            skip_msg = _skip_ported_msg(starting_version, ported_from_version)
            if skip_msg:
                pytest.skip(skip_msg)
            ccm_repo_cache_dir, _ = ccmlib.repository.setup(upgrade_path.upgrade_meta.version)
            ending_version = get_version_from_build(ccm_repo_cache_dir)
            skip_msg = _skip_ported_msg(ending_version, ported_from_version)
            if skip_msg:
                pytest.skip(skip_msg)
        else:
            # For regular tests the value in the current cluster actually means something so we should
            # use that to check.
            # Use cassandra_version_from_build as it's guaranteed to be a LooseVersion
            # whereas cassandra_version may be a string if set in the cli options
            current_running_version = fixture_dtest_setup.dtest_config.cassandra_version_from_build
            skip_msg = _skip_ported_msg(current_running_version, ported_from_version)
            if skip_msg:
                pytest.skip(skip_msg)
 def get_version_from_build(self):
     # There are times when we want to know the C* version we're testing against
     # before we do any cluster. In the general case, we can't know that -- the
     # test method could use any version it wants for self.cluster. However, we can
     # get the version from build.xml in the C* repository specified by
     # CASSANDRA_VERSION or CASSANDRA_DIR.
     if self.cassandra_version is not None:
         ccm_repo_cache_dir, _ = ccmlib.repository.setup(
             self.cassandra_version)
         return get_version_from_build(ccm_repo_cache_dir)
     elif self.cassandra_dir is not None:
         return get_version_from_build(self.cassandra_dir)
    def validate(self, parser, options, args):
        Cmd.validate(self, parser, options, args, cluster_name=True)
        if options.ipprefix and options.ipformat:
            parser.print_help()
            parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
        self.nodes = parse_populate_count(options.nodes)
        if self.options.vnodes and self.nodes is None:
            print_("Can't set --vnodes if not populating cluster in this command.")
            parser.print_help()
            exit(1)
        if not options.version:
            try:
                common.validate_install_dir(options.install_dir)
            except ArgumentError:
                parser.print_help()
                parser.error("%s is not a valid cassandra directory. You must define a cassandra dir or version." % options.install_dir)

            if common.get_dse_version(options.install_dir) is not None:
                common.assert_jdk_valid_for_cassandra_version(common.get_dse_cassandra_version(options.install_dir))
            else:
                common.assert_jdk_valid_for_cassandra_version(common.get_version_from_build(options.install_dir))

        if common.is_win() and os.path.exists('c:\windows\system32\java.exe'):
            print_("""WARN: c:\windows\system32\java.exe exists.
                This may cause registry issues, and jre7 to be used, despite jdk8 being installed.
                """)
Example #7
0
    def validate(self, parser, options, args):
        if options.scylla and not options.install_dir:
            parser.error("must specify install_dir using scylla")
        Cmd.validate(self, parser, options, args, cluster_name=True)
        if options.ipprefix and options.ipformat:
            parser.print_help()
            parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
        self.nodes = parse_populate_count(options.nodes)
        if self.options.vnodes and self.nodes is None:
            print_("Can't set --vnodes if not populating cluster in this command.")
            parser.print_help()
            sys.exit(1)
        if self.options.snitch and \
            (not isinstance(self.nodes, list) or
             not (self.options.snitch == 'org.apache.cassandra.locator.PropertyFileSnitch' or
                  self.options.snitch == 'org.apache.cassandra.locator.GossipingPropertyFileSnitch')):
            parser.print_help()
            sys.exit(1)

        if not options.version:
            try:
                common.validate_install_dir(options.install_dir)
            except ArgumentError:
                parser.print_help()
                parser.error("%s is not a valid cassandra directory. You must define a cassandra dir or version." % options.install_dir)

            common.assert_jdk_valid_for_cassandra_version(common.get_version_from_build(options.install_dir))
        if common.is_win() and os.path.exists('c:\windows\system32\java.exe'):
            print_("""WARN: c:\windows\system32\java.exe exists.
                This may cause registry issues, and jre7 to be used, despite jdk8 being installed.
                """)
Example #8
0
def compile_version_elassandra(version, target_dir, verbose=False, elassandra_version=None):
    assert_jdk_valid_for_cassandra_version(get_version_from_build(target_dir))

    # compiling cassandra and the stress tool
    logfile = lastlogfilename()
    logger = get_logger(logfile)

    common.info("Compiling Elassandra {} ...".format(version))
    logger.info("--- Elassandra Build -------------------\n")
    try:
        # Patch for pending Cassandra issue: https://issues.apache.org/jira/browse/CASSANDRA-5543
        # Similar patch seen with buildbot
        attempt = 0
        ret_val = 1
        if elassandra_version is None:
            logger.info("elassandra version is not set, trying with 2.4.2")
            elassandra_version = "2.4.2"
        targz_file = "distribution/tar/target/releases/elassandra-%s.tar.gz" % elassandra_version
        target_install_dir = os.path.join(target_dir, "elassandra-%s")
        while attempt < 3 and ret_val is not 0:
            if attempt > 0:
                logger.info("\n\n`mvn package -DskipTests` failed. Retry #%s...\n\n" % attempt)
            process = subprocess.Popen([platform_binary('mvn'), 'package', '-DskipTests'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            ret_val, _, _ = log_info(process, logger)
            if ret_val is 0:
                process = subprocess.Popen([platform_binary('tar'), '-xzf', targz_file], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                ret_val, _, _ = log_info(process, logger)
            attempt += 1
        if ret_val is not 0:
            raise CCMError('Error compiling Elassandra. See {logfile} or run '
                           '"ccm showlastlog" for details'.format(logfile=logfile))
    except OSError as e:
        raise CCMError("Error compiling Elassandra. Is maven installed? See %s for details" % logfile)
    def validate(self, parser, options, args):
        Cmd.validate(self, parser, options, args, cluster_name=True)
        if options.ipprefix and options.ipformat:
            parser.print_help()
            parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
        self.nodes = parse_populate_count(options.nodes)
        if self.options.vnodes and self.nodes is None:
            print_("Can't set --vnodes if not populating cluster in this command.")
            parser.print_help()
            exit(1)
        if not options.version:
            try:
                common.validate_install_dir(options.install_dir)
            except ArgumentError:
                parser.print_help()
                parser.error("%s is not a valid cassandra directory. You must define a cassandra dir or version." % options.install_dir)

            if common.get_dse_version(options.install_dir) is not None:
                common.assert_jdk_valid_for_cassandra_version(common.get_dse_cassandra_version(options.install_dir))
            else:
                common.assert_jdk_valid_for_cassandra_version(common.get_version_from_build(options.install_dir))

        if common.is_win() and os.path.exists('c:\windows\system32\java.exe'):
            print_("""WARN: c:\windows\system32\java.exe exists.
                This may cause registry issues, and jre7 to be used, despite jdk8 being installed.
                """)
Example #10
0
def compile_version(version, target_dir, verbose=False):
    assert_jdk_valid_for_cassandra_version(get_version_from_build(target_dir))

    # compiling cassandra and the stress tool
    logfile = lastlogfilename()
    logger = get_logger(logfile)

    common.info("Compiling Cassandra {} ...".format(version))
    logger.info("--- Cassandra Build -------------------\n")

    default_build_properties = os.path.join(common.get_default_path(), 'build.properties.default')
    if os.path.exists(default_build_properties):
        target_build_properties = os.path.join(target_dir, 'build.properties')
        logger.info("Copying %s to %s\n" % (default_build_properties, target_build_properties))
        shutil.copyfile(default_build_properties, target_build_properties)

    try:
        # Patch for pending Cassandra issue: https://issues.apache.org/jira/browse/CASSANDRA-5543
        # Similar patch seen with buildbot
        attempt = 0
        ret_val = 1
        while attempt < 3 and ret_val is not 0:
            if attempt > 0:
                logger.info("\n\n`ant jar` failed. Retry #%s...\n\n" % attempt)
            process = subprocess.Popen([platform_binary('ant'), 'jar'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            ret_val, _, _ = log_info(process, logger)
            attempt += 1
        if ret_val is not 0:
            raise CCMError('Error compiling Cassandra. See {logfile} or run '
                           '"ccm showlastlog" for details'.format(logfile=logfile))
    except OSError as e:
        raise CCMError("Error compiling Cassandra. Is ant installed? See %s for details" % logfile)

    logger.info("\n\n--- cassandra/stress build ------------\n")
    stress_dir = os.path.join(target_dir, "tools", "stress") if (
        version >= "0.8.0") else \
        os.path.join(target_dir, "contrib", "stress")

    build_xml = os.path.join(stress_dir, 'build.xml')
    if os.path.exists(build_xml):  # building stress separately is only necessary pre-1.1
        try:
            # set permissions correctly, seems to not always be the case
            stress_bin_dir = os.path.join(stress_dir, 'bin')
            for f in os.listdir(stress_bin_dir):
                full_path = os.path.join(stress_bin_dir, f)
                os.chmod(full_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)

            process = subprocess.Popen([platform_binary('ant'), 'build'], cwd=stress_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            ret_val, _, _ = log_info(process, logger)
            if ret_val is not 0:
                process = subprocess.Popen([platform_binary('ant'), 'stress-build'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                ret_val, _, _ = log_info(process, logger)
                if ret_val is not 0:
                    raise CCMError("Error compiling Cassandra stress tool.  "
                                   "See %s for details (you will still be able to use ccm "
                                   "but not the stress related commands)" % logfile)
        except IOError as e:
            raise CCMError("Error compiling Cassandra stress tool: %s (you will "
                           "still be able to use ccm but not the stress related commands)" % str(e))
Example #11
0
    def do_upgrade(self, session):
        """
        Upgrades the first node in the cluster and returns a list of
        (is_upgraded, Session) tuples.  If `is_upgraded` is true, the
        Session is connected to the upgraded node.
        """
        session.cluster.shutdown()
        node1 = self.cluster.nodelist()[0]
        node2 = self.cluster.nodelist()[1]

        # stop the nodes
        node1.drain()
        node1.stop(gently=True)

        # Ignore errors before upgrade on Windows
        # We ignore errors from 2.1, because windows 2.1
        # support is only beta. There are frequent log errors,
        # related to filesystem interactions that are a direct result
        # of the lack of full functionality on 2.1 Windows, and we dont
        # want these to pollute our results.
        if is_win() and self.cluster.version() <= '2.2':
            node1.mark_log_for_errors()

        debug('upgrading node1 to {}'.format(self.UPGRADE_PATH.upgrade_version))
        switch_jdks(self.UPGRADE_PATH.upgrade_meta.java_version)

        node1.set_install_dir(version=self.UPGRADE_PATH.upgrade_version)

        # this is a bandaid; after refactoring, upgrades should account for protocol version
        new_version_from_build = get_version_from_build(node1.get_install_dir())
        if (new_version_from_build >= '3' and self.protocol_version is not None and self.protocol_version < 3):
            self.skip('Protocol version {} incompatible '
                      'with Cassandra version {}'.format(self.protocol_version, new_version_from_build))
        node1.set_log_level("DEBUG" if DEBUG else "INFO")
        node1.set_configuration_options(values={'internode_compression': 'none'})
        node1.start(wait_for_binary_proto=True, wait_other_notice=True)

        sessions = []
        session = self.patient_exclusive_cql_connection(node1, protocol_version=self.protocol_version)
        session.set_keyspace('ks')
        sessions.append((True, session))

        # open a second session with the node on the old version
        session = self.patient_exclusive_cql_connection(node2, protocol_version=self.protocol_version)
        session.set_keyspace('ks')
        sessions.append((False, session))

        if self.CL:
            for is_upgraded, session in sessions:
                session.default_consistency_level = self.CL

        # Let the nodes settle briefly before yielding connections in turn (on the upgraded and non-upgraded alike)
        # CASSANDRA-11396 was the impetus for this change, wherein some apparent perf noise was preventing
        # CL.ALL from being reached. The newly upgraded node needs to settle because it has just barely started, and each
        # non-upgraded node needs a chance to settle as well, because the entire cluster (or isolated nodes) may have been doing resource intensive activities
        # immediately before.
        for s in sessions:
            time.sleep(5)
            yield s
 def __init__(self, *args, **kwargs):
     CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR')
     if get_version_from_build(CASSANDRA_DIR) >= '3.0':
         kwargs['cluster_options'] = {'enable_user_defined_functions': 'true',
                                      'enable_scripted_user_defined_functions': 'true'}
     else:
         kwargs['cluster_options'] = {'enable_user_defined_functions': 'true'}
     Tester.__init__(self, *args, **kwargs)
def set_version_family():
    """
    Detects the version family (line) using dtest.py:CASSANDRA_VERSION_FROM_BUILD
    """
    # todo CASSANDRA-14421
    # current_version = CASSANDRA_VERSION_FROM_BUILD
    # There are times when we want to know the C* version we're testing against
    # before we call Tester.setUp. In the general case, we can't know that -- the
    # test method could use any version it wants for self.cluster. However, we can
    # get the version from build.xml in the C* repository specified by
    # CASSANDRA_VERSION or CASSANDRA_DIR. This should use the same resolution
    # strategy as the actual checkout code in Tester.setUp; if it does not, that is
    # a bug.
    cassandra_version_slug = CONFIG.getoption("--cassandra-version")
    cassandra_dir = CONFIG.getoption("--cassandra-dir") or CONFIG.getini(
        "cassandra_dir")
    # Prefer CASSANDRA_VERSION if it's set in the environment. If not, use CASSANDRA_DIR
    if cassandra_version_slug:
        # fetch but don't build the specified C* version
        ccm_repo_cache_dir, _ = ccmlib.repository.setup(cassandra_version_slug)
        current_version = get_version_from_build(ccm_repo_cache_dir)
    else:
        current_version = get_version_from_build(cassandra_dir)

    # TODO add a new item whenever Cassandra is branched
    if current_version.vstring.startswith('2.0'):
        version_family = CASSANDRA_2_0
    elif current_version.vstring.startswith('2.1'):
        version_family = CASSANDRA_2_1
    elif current_version.vstring.startswith('2.2'):
        version_family = CASSANDRA_2_2
    elif current_version.vstring.startswith('3.0'):
        version_family = CASSANDRA_3_0
    elif current_version.vstring.startswith('3.11'):
        version_family = CASSANDRA_3_11
    elif current_version.vstring.startswith('4.0'):
        version_family = CASSANDRA_4_0
    else:
        # when this occurs, it's time to update this manifest a bit!
        raise RuntimeError(
            "Testing upgrades from/to version %s is not supported. Please use a custom manifest (see upgrade_manifest.py)"
            % current_version.vstring)

    global VERSION_FAMILY
    VERSION_FAMILY = version_family
    logger.info("Setting version family to %s\n" % VERSION_FAMILY)
 def __init__(self, *args, **kwargs):
     CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR')
     if get_version_from_build(CASSANDRA_DIR) >= '3.0':
         kwargs['cluster_options'] = {'enable_user_defined_functions': 'true',
                                      'enable_scripted_user_defined_functions': 'true'}
     else:
         kwargs['cluster_options'] = {'enable_user_defined_functions': 'true'}
     Tester.__init__(self, *args, **kwargs)
    def do_upgrade(self, session):
        """
        Upgrades the first node in the cluster and returns a list of
        (is_upgraded, Session) tuples.  If `is_upgraded` is true, the
        Session is connected to the upgraded node.
        """
        session.cluster.shutdown()
        node1 = self.cluster.nodelist()[0]
        node2 = self.cluster.nodelist()[1]

        # stop the nodes
        node1.drain()
        node1.stop(gently=True)

        # Ignore errors before upgrade on Windows
        # We ignore errors from 2.1, because windows 2.1
        # support is only beta. There are frequent log errors,
        # related to filesystem interactions that are a direct result
        # of the lack of full functionality on 2.1 Windows, and we dont
        # want these to pollute our results.
        if is_win() and self.cluster.version() <= '2.2':
            node1.mark_log_for_errors()

        debug('upgrading node1 to {}'.format(self.UPGRADE_PATH.upgrade_version))

        node1.set_install_dir(version=self.UPGRADE_PATH.upgrade_version)

        # this is a bandaid; after refactoring, upgrades should account for protocol version
        new_version_from_build = get_version_from_build(node1.get_install_dir())
        if (new_version_from_build >= '3' and self.protocol_version is not None and self.protocol_version < 3):
            self.skip('Protocol version {} incompatible '
                      'with Cassandra version {}'.format(self.protocol_version, new_version_from_build))
        node1.set_log_level("DEBUG" if DEBUG else "INFO")
        node1.set_configuration_options(values={'internode_compression': 'none'})
        node1.start(wait_for_binary_proto=True, wait_other_notice=True)

        sessions = []
        session = self.patient_exclusive_cql_connection(node1, protocol_version=self.protocol_version)
        session.set_keyspace('ks')
        sessions.append((True, session))

        # open a second session with the node on the old version
        session = self.patient_exclusive_cql_connection(node2, protocol_version=self.protocol_version)
        session.set_keyspace('ks')
        sessions.append((False, session))

        if self.CL:
            for is_upgraded, session in sessions:
                session.default_consistency_level = self.CL

        # Let the nodes settle briefly before yielding connections in turn (on the upgraded and non-upgraded alike)
        # CASSANDRA-11396 was the impetus for this change, wherein some apparent perf noise was preventing
        # CL.ALL from being reached. The newly upgraded node needs to settle because it has just barely started, and each
        # non-upgraded node needs a chance to settle as well, because the entire cluster (or isolated nodes) may have been doing resource intensive activities
        # immediately before.
        for s in sessions:
            time.sleep(5)
            yield s
Example #16
0
    def _clean_win_jmx(self):
        if common.get_version_from_build(node_path=self.get_path()) >= '2.1':
            sh_file = os.path.join(common.CASSANDRA_CONF_DIR, common.CASSANDRA_WIN_ENV)
            dst = os.path.join(self.get_path(), sh_file)
            common.replace_in_file(dst, "JMX_PORT=", "    $JMX_PORT=\"" + self.jmx_port + "\"")

            # properly use single and double quotes to count for single quotes in the CASSANDRA_CONF path
            common.replace_in_file(dst,'CASSANDRA_PARAMS=','    $env:CASSANDRA_PARAMS=\'-Dcassandra' +    # -Dcassandra
              ' -Dlogback.configurationFile=/"\' + "$env:CASSANDRA_CONF" + \'/logback.xml"\'' +            # -Dlogback.configurationFile=/"$env:CASSANDRA_CONF/logback.xml"
              ' + \' -Dcassandra.config=file:"\' + "///$env:CASSANDRA_CONF" + \'/cassandra.yaml"\'')        # -Dcassandra.config=file:"///$env:CASSANDRA_CONF/cassandra.yaml"
Example #17
0
 def _clean_win_jmx(self):
     if common.get_version_from_build(node_path=self.get_path()) >= "2.1":
         sh_file = os.path.join(common.CASSANDRA_CONF_DIR, common.CASSANDRA_WIN_ENV)
         dst = os.path.join(self.get_path(), sh_file)
         common.replace_in_file(dst, "JMX_PORT=", '    $JMX_PORT="' + self.jmx_port + '"')
         common.replace_in_file(
             dst,
             "CASSANDRA_PARAMS=",
             '    $env:CASSANDRA_PARAMS="-Dcassandra -Dlogback.configurationFile=/$env:CASSANDRA_CONF/logback.xml -Dcassandra.config=file:/$env:CASSANDRA_CONF/cassandra.yaml"',
         )
Example #18
0
    def _clean_win_jmx(self):
        if common.get_version_from_build(node_path=self.get_path()) >= '2.1':
            sh_file = os.path.join(common.CASSANDRA_CONF_DIR, common.CASSANDRA_WIN_ENV)
            dst = os.path.join(self.get_path(), sh_file)
            common.replace_in_file(dst, "JMX_PORT=", "    $JMX_PORT=\"" + self.jmx_port + "\"")

            # properly use single and double quotes to count for single quotes in the CASSANDRA_CONF path
            common.replace_in_file(dst,'CASSANDRA_PARAMS=','    $env:CASSANDRA_PARAMS=\'-Dcassandra' +    # -Dcassandra
              ' -Dlogback.configurationFile=/"\' + "$env:CASSANDRA_CONF" + \'/logback.xml"\'' +            # -Dlogback.configurationFile=/"$env:CASSANDRA_CONF/logback.xml"
              ' + \' -Dcassandra.config=file:"\' + "///$env:CASSANDRA_CONF" + \'/cassandra.yaml"\'')        # -Dcassandra.config=file:"///$env:CASSANDRA_CONF/cassandra.yaml"
def set_version_family():
    """
    Detects the version family (line) using dtest.py:CASSANDRA_VERSION_FROM_BUILD
    """
    # todo CASSANDRA-14421
    # current_version = CASSANDRA_VERSION_FROM_BUILD
    # There are times when we want to know the C* version we're testing against
    # before we call Tester.setUp. In the general case, we can't know that -- the
    # test method could use any version it wants for self.cluster. However, we can
    # get the version from build.xml in the C* repository specified by
    # CASSANDRA_VERSION or CASSANDRA_DIR. This should use the same resolution
    # strategy as the actual checkout code in Tester.setUp; if it does not, that is
    # a bug.
    cassandra_version_slug = CONFIG.getoption("--cassandra-version")
    cassandra_dir = CONFIG.getoption("--cassandra-dir")
    # Prefer CASSANDRA_VERSION if it's set in the environment. If not, use CASSANDRA_DIR
    if cassandra_version_slug:
        # fetch but don't build the specified C* version
        ccm_repo_cache_dir, _ = ccmlib.repository.setup(cassandra_version_slug)
        current_version = get_version_from_build(ccm_repo_cache_dir)
    else:
        current_version = get_version_from_build(cassandra_dir)

    if current_version.vstring.startswith('2.0'):
        version_family = '2.0.x'
    elif current_version.vstring.startswith('2.1'):
        version_family = '2.1.x'
    elif current_version.vstring.startswith('2.2'):
        version_family = '2.2.x'
    elif current_version.vstring.startswith('3.0'):
        version_family = '3.0.x'
    elif '3.1' <= current_version < '4.0':
        version_family = '3.x'
    elif '4.0' <= current_version < '4.1':
        version_family = 'trunk'
    else:
        # when this occurs, it's time to update this manifest a bit!
        raise RuntimeError("4.1+ not yet supported on upgrade tests!")

    global VERSION_FAMILY
    VERSION_FAMILY = version_family
    logger.info("Setting version family to %s\n" % VERSION_FAMILY)
Example #20
0
    def prepare(self, ordered=False, create_keyspace=True, use_cache=False, nodes=2, rf=1, protocol_version=None, **kwargs):
        assert nodes >= 2, "backwards compatibility tests require at least two nodes"
        assert not self._preserve_cluster, "preserve_cluster cannot be True for upgrade tests"

        self.protocol_version = protocol_version

        cluster = self.cluster

        if (ordered):
            cluster.set_partitioner("org.apache.cassandra.dht.ByteOrderedPartitioner")

        if (use_cache):
            cluster.set_configuration_options(values={'row_cache_size_in_mb': 100})

        start_rpc = kwargs.pop('start_rpc', False)
        if start_rpc:
            cluster.set_configuration_options(values={'start_rpc': True})

        cluster.set_configuration_options(values={'internode_compression': 'none'})
        if not cluster.nodelist():
            cluster.populate(nodes)
            node1 = cluster.nodelist()[0]
            self.original_install_dir = node1.get_install_dir()
            self.original_git_branch = cassandra_git_branch()
            self.original_version = get_version_from_build(node_path=node1.get_path())
            if OLD_CASSANDRA_DIR:
                cluster.set_install_dir(install_dir=OLD_CASSANDRA_DIR)
            else:
                # upgrade from 3.0 to current install dir if we're running from trunk
                if self.original_git_branch == 'trunk':
                    cluster.set_install_dir(version='git:cassandra-3.0')
            cluster.start(wait_for_binary_proto=True)
            debug('starting from {}'.format(get_version_from_build(node1.get_install_dir())))

        node1 = cluster.nodelist()[0]
        time.sleep(0.2)

        session = self.patient_cql_connection(node1, protocol_version=protocol_version)
        if create_keyspace:
            self.create_ks(session, 'ks', rf)

        return session
Example #21
0
    def run(self):
        version_from_nodetool = self.node.nodetool('version')[0].strip()
        version_from_build = common.get_version_from_build(self.node.get_install_dir())

        if version_from_nodetool and (version_from_nodetool != version_from_build):
            print_('nodetool reports Cassandra version {ntv}; '
                   'version from build.xml is {bv}'.format(ntv=version_from_nodetool,
                                                           bv=version_from_build),
                   file=sys.stderr)

        print_(version_from_build)
Example #22
0
    def upgrade_with_index_creation_test(self):
        cluster = self.cluster

        # Forcing cluster version on purpose
        cluster.set_install_dir(version="1.2.16")
        cluster.populate(2).start()

        [node1, node2] = cluster.nodelist()

        cli = node1.cli()
        cli.do(
            "create keyspace test with placement_strategy = 'SimpleStrategy' and strategy_options = {replication_factor : 2} and durable_writes = true"
        )
        cli.do("use test")
        cli.do(
            "create column family sc_test with column_type = 'Super' and comparator = 'UTF8Type' and subcomparator = 'UTF8Type' and default_validation_class = 'UTF8Type' and key_validation_class = 'UTF8Type'"
        )

        for i in range(0, 2):
            for j in range(0, 2):
                cli.do("set sc_test['k0']['sc%d']['c%d'] = 'v'" % (i, j))

        assert not cli.has_errors(), cli.errors()
        cli.close()

        CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR')
        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            #Upgrade nodes to 2.0.
            #See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0")
            time.sleep(.5)

        # Upgrade node 1
        node1.flush()
        time.sleep(.5)
        node1.stop(wait_other_notice=True)
        self.set_node_to_current_version(node1)
        node1.start(wait_other_notice=True)
        time.sleep(.5)

        cli = node1.cli()
        cli.do("use test")
        cli.do("consistencylevel as quorum")

        # Check we can still get data properly
        cli.do("get sc_test['k0']")
        assert_scs(cli, ['sc0', 'sc1'])
        assert_columns(cli, ['c0', 'c1'])

        cli.do("get sc_test['k0']['sc1']")
        assert_columns(cli, ['c0', 'c1'])

        cli.do("get sc_test['k0']['sc1']['c1']")
        assert_columns(cli, ['c1'])
Example #23
0
    def run(self):
        version_from_nodetool = self.node.nodetool('version')[0].strip()
        version_from_build = common.get_version_from_build(self.node.get_install_dir())

        if version_from_nodetool and (version_from_nodetool != version_from_build):
            print_('nodetool reports Cassandra version {ntv}; '
                   'version from build.xml is {bv}'.format(ntv=version_from_nodetool,
                                                           bv=version_from_build),
                   file=sys.stderr)

        print_(version_from_build)
Example #24
0
def compile_version(version, target_dir, verbose=False):
    assert_jdk_valid_for_cassandra_version(get_version_from_build(target_dir))

    # compiling cassandra and the stress tool
    logfile = lastlogfilename()
    logger = get_logger(logfile)

    common.info("Compiling Cassandra {} ...".format(version))
    logger.info("--- Cassandra Build -------------------\n")
    try:
        # Patch for pending Cassandra issue: https://issues.apache.org/jira/browse/CASSANDRA-5543
        # Similar patch seen with buildbot
        attempt = 0
        ret_val = 1
        while attempt < 3 and ret_val is not 0:
            if attempt > 0:
                logger.info("\n\n`ant jar` failed. Retry #%s...\n\n" % attempt)
            process = subprocess.Popen([platform_binary('ant'), 'jar'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            ret_val, _, _ = log_info(process, logger)
            attempt += 1
        if ret_val is not 0:
            raise CCMError('Error compiling Cassandra. See {logfile} or run '
                           '"ccm showlastlog" for details'.format(logfile=logfile))
    except OSError as e:
        raise CCMError("Error compiling Cassandra. Is ant installed? See %s for details" % logfile)

    logger.info("\n\n--- cassandra/stress build ------------\n")
    stress_dir = os.path.join(target_dir, "tools", "stress") if (
        version >= "0.8.0") else \
        os.path.join(target_dir, "contrib", "stress")

    build_xml = os.path.join(stress_dir, 'build.xml')
    if os.path.exists(build_xml):  # building stress separately is only necessary pre-1.1
        try:
            # set permissions correctly, seems to not always be the case
            stress_bin_dir = os.path.join(stress_dir, 'bin')
            for f in os.listdir(stress_bin_dir):
                full_path = os.path.join(stress_bin_dir, f)
                os.chmod(full_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)

            process = subprocess.Popen([platform_binary('ant'), 'build'], cwd=stress_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            ret_val, _, _ = log_info(process, logger)
            if ret_val is not 0:
                process = subprocess.Popen([platform_binary('ant'), 'stress-build'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                ret_val, _, _ = log_info(process, logger)
                if ret_val is not 0:
                    raise CCMError("Error compiling Cassandra stress tool.  "
                                   "See %s for details (you will still be able to use ccm "
                                   "but not the stress related commands)" % logfile)
        except IOError as e:
            raise CCMError("Error compiling Cassandra stress tool: %s (you will "
                           "still be able to use ccm but not the stress related commands)" % str(e))
    def do_upgrade(self, session):
        """
        Upgrades the first node in the cluster and returns a list of
        (is_upgraded, Session) tuples.  If `is_upgraded` is true, the
        Session is connected to the upgraded node.
        """
        session.cluster.shutdown()
        node1 = self.cluster.nodelist()[0]
        node2 = self.cluster.nodelist()[1]

        # stop the nodes
        node1.drain()
        node1.stop(gently=True)

        # Ignore errors before upgrade on Windows
        # We ignore errors from 2.1, because windows 2.1
        # support is only beta. There are frequent log errors,
        # related to filesystem interactions that are a direct result
        # of the lack of full functionality on 2.1 Windows, and we dont
        # want these to pollute our results.
        if is_win() and self.cluster.version() <= '2.2':
            node1.mark_log_for_errors()

        debug('upgrading node1 to {}'.format(self.UPGRADE_PATH.upgrade_version))

        node1.set_install_dir(version=self.UPGRADE_PATH.upgrade_version)

        # this is a bandaid; after refactoring, upgrades should account for protocol version
        new_version_from_build = get_version_from_build(node1.get_install_dir())
        if (new_version_from_build >= '3' and self.protocol_version is not None and self.protocol_version < 3):
            self.skip('Protocol version {} incompatible '
                      'with Cassandra version {}'.format(self.protocol_version, new_version_from_build))
        node1.set_log_level("DEBUG" if DEBUG else "INFO")
        node1.set_configuration_options(values={'internode_compression': 'none'})
        node1.start(wait_for_binary_proto=True, wait_other_notice=True)

        sessions = []
        session = self.patient_exclusive_cql_connection(node1, protocol_version=self.protocol_version)
        session.set_keyspace('ks')
        sessions.append((True, session))

        # open a second session with the node on the old version
        session = self.patient_exclusive_cql_connection(node2, protocol_version=self.protocol_version)
        session.set_keyspace('ks')
        sessions.append((False, session))

        if self.CL:
            for is_upgraded, session in sessions:
                session.default_consistency_level = self.CL

        return sessions
    def upgrade_with_index_creation_test(self):
        cluster = self.cluster

        # Forcing cluster version on purpose
        cluster.set_install_dir(version="1.2.16")
        cluster.populate(2).start()

        [node1, node2] = cluster.nodelist()

        cli = node1.cli()
        cli.do("create keyspace test with placement_strategy = 'SimpleStrategy' and strategy_options = {replication_factor : 2} and durable_writes = true")
        cli.do("use test")
        cli.do("create column family sc_test with column_type = 'Super' and comparator = 'UTF8Type' and subcomparator = 'UTF8Type' and default_validation_class = 'UTF8Type' and key_validation_class = 'UTF8Type'")

        for i in range(0, 2):
            for j in range(0, 2):
                cli.do("set sc_test['k0']['sc%d']['c%d'] = 'v'" % (i, j))

        assert not cli.has_errors(), cli.errors()
        cli.close()

        CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR')
        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            #Upgrade nodes to 2.0.
            #See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0")
            time.sleep(.5)

        # Upgrade node 1
        node1.flush()
        time.sleep(.5)
        node1.stop(wait_other_notice=True)
        self.set_node_to_current_version(node1)
        node1.start(wait_other_notice=True)
        time.sleep(.5)

        cli = node1.cli()
        cli.do("use test")
        cli.do("consistencylevel as quorum")

        # Check we can still get data properly
        cli.do("get sc_test['k0']")
        assert_scs(cli, ['sc0', 'sc1'])
        assert_columns(cli, ['c0', 'c1'])

        cli.do("get sc_test['k0']['sc1']")
        assert_columns(cli, ['c0', 'c1'])

        cli.do("get sc_test['k0']['sc1']['c1']")
        assert_columns(cli, ['c1'])
Example #27
0
    def validate(self, parser, options, args):
        Cmd.validate(self, parser, options, args, cluster_name=True)
        if options.ipprefix and options.ipformat:
            parser.print_help()
            parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
        self.nodes = parse_populate_count(options.nodes)
        if self.options.vnodes and self.nodes is None:
            print_("Can't set --vnodes if not populating cluster in this command.")
            parser.print_help()
            exit(1)
        if not options.version:
            try:
                common.validate_install_dir(options.install_dir)
            except ArgumentError:
                parser.print_help()
                parser.error("%s is not a valid cassandra directory. You must define a cassandra dir or version." % options.install_dir)

            common.assert_jdk_valid_for_cassandra_version(common.get_version_from_build(options.install_dir))
Example #28
0
    def validate(self, parser, options, args):
        if options.scylla and not options.install_dir:
            parser.error("must specify install_dir using scylla")
        Cmd.validate(self, parser, options, args, cluster_name=True)
        if options.ipprefix and options.ipformat:
            parser.print_help()
            parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
        self.nodes = parse_populate_count(options.nodes)
        if self.options.vnodes and self.nodes is None:
            print_("Can't set --vnodes if not populating cluster in this command.")
            parser.print_help()
            sys.exit(1)
        if self.options.snitch and \
            (not isinstance(self.nodes, list) or
             not (self.options.snitch == 'org.apache.cassandra.locator.PropertyFileSnitch' or
                  self.options.snitch == 'org.apache.cassandra.locator.GossipingPropertyFileSnitch')):
            parser.print_help()
            sys.exit(1)

        if not options.version and not options.docker_image:
            try:
                common.validate_install_dir(options.install_dir)
            except ArgumentError:
                parser.print_help()
                parser.error("%s is not a valid cassandra directory. You must define a cassandra dir or version." % options.install_dir)

            common.assert_jdk_valid_for_cassandra_version(common.get_version_from_build(options.install_dir))
        if common.is_win() and os.path.exists('c:\windows\system32\java.exe'):
            print_("""WARN: c:\windows\system32\java.exe exists.
                This may cause registry issues, and jre7 to be used, despite jdk8 being installed.
                """)

        if options.scylla_core_package_uri:
            os.environ['SCYLLA_CORE_PACKAGE'] = options.scylla_core_package_uri
        if options.scylla_tools_java_package_uri:
            os.environ['SCYLLA_TOOLS_JAVA_PACKAGE'] = options.scylla_tools_java_package_uri
            # TODO: remove this export eventually, it's for backward
            # compatibility with the previous name
            os.environ['SCYLLA_JAVA_TOOLS_PACKAGE'] = options.scylla_tools_java_package_uri
        if options.scylla_jmx_package_uri:
            os.environ['SCYLLA_JMX_PACKAGE'] = options.scylla_jmx_package_uri
Example #29
0
    def validate(self, parser, options, args):
        Cmd.validate(self, parser, options, args, cluster_name=True)
        if options.ipprefix and options.ipformat:
            parser.print_help()
            parser.error("%s and %s may not be used together" %
                         (parser.get_option('-i'), parser.get_option('-I')))
        self.nodes = parse_populate_count(options.nodes)
        if self.options.vnodes and self.nodes is None:
            print_(
                "Can't set --vnodes if not populating cluster in this command."
            )
            parser.print_help()
            exit(1)
        if not options.version:
            try:
                common.validate_install_dir(options.install_dir)
            except ArgumentError:
                parser.print_help()
                parser.error(
                    "%s is not a valid cassandra directory. You must define a cassandra dir or version."
                    % options.install_dir)

            common.assert_jdk_valid_for_cassandra_version(
                common.get_version_from_build(options.install_dir))
Example #30
0
 def __get_version_from_build(self):
     return common.get_version_from_build(self.get_cassandra_dir())
Example #31
0
            raise


# There are times when we want to know the C* version we're testing against
# before we call Tester.setUp. In the general case, we can't know that -- the
# test method could use any version it wants for self.cluster. However, we can
# get the version from build.xml in the C* repository specified by
# CASSANDRA_VERSION or CASSANDRA_DIR. This should use the same resolution
# strategy as the actual checkout code in Tester.setUp; if it does not, that is
# a bug.
_cassandra_version_slug = os.environ.get('CASSANDRA_VERSION')
# Prefer CASSANDRA_VERSION if it's set in the environment. If not, use CASSANDRA_DIR
if _cassandra_version_slug:
    # fetch but don't build the specified C* version
    ccm_repo_cache_dir, _ = ccmlib.repository.setup(_cassandra_version_slug)
    CASSANDRA_VERSION_FROM_BUILD = get_version_from_build(ccm_repo_cache_dir)
    CASSANDRA_GITREF = get_sha(
        ccm_repo_cache_dir)  # will be set None when not a git repo
else:
    CASSANDRA_VERSION_FROM_BUILD = LooseVersion("4.0")  # todo kjkjkj
    CASSANDRA_GITREF = ""
    #CASSANDRA_VERSION_FROM_BUILD = get_version_from_build(self.dtest_config.cassandra_dir)
    #CASSANDRA_GITREF = get_sha(dtest_config.cassandra_dir)

# copy the initial environment variables so we can reset them later:
initial_environment = copy.deepcopy(os.environ)


class DtestTimeoutError(Exception):
    pass
Example #32
0
    def do_upgrade(self, session):
        """
        Upgrades the first node in the cluster and returns a list of
        (is_upgraded, Session) tuples.  If `is_upgraded` is true, the
        Session is connected to the upgraded node.
        """
        session.cluster.shutdown()
        node1 = self.cluster.nodelist()[0]
        node2 = self.cluster.nodelist()[1]

        if UPGRADE_MODE not in ('normal', 'all', 'none'):
            raise Exception("UPGRADE_MODE should be one of 'normal', 'all', or 'none'")

        # stop the nodes
        if UPGRADE_MODE != "none":
            node1.drain()
            node1.stop(gently=True)

        # Ignore errors before upgrade on Windows
        # We ignore errors from 2.1, because windows 2.1
        # support is only beta. There are frequent log errors,
        # related to filesystem interactions that are a direct result
        # of the lack of full functionality on 2.1 Windows, and we dont
        # want these to pollute our results.
        if is_win() and self.cluster.version() <= '2.2':
            node1.mark_log_for_errors()

        if UPGRADE_MODE == "all":
            node2.drain()
            node2.stop(gently=True)
            if is_win() and self.cluster.version() <= '2.2':
                node2.mark_log_for_errors()

        # choose version to upgrade to
        if UPGRADE_TO_DIR:
            install_kwargs = {'install_dir': UPGRADE_TO_DIR}
        elif UPGRADE_TO:
            install_kwargs = {'version': UPGRADE_TO}
        elif self.upgrade_path.upgrade_version:
            install_kwargs = {'version': self.upgrade_path.upgrade_version}
        else:
            install_kwargs = {'install_dir': self.original_install_dir}

        debug('upgrading to {}'.format(install_kwargs))

        # start them again
        if UPGRADE_MODE != "none":
            node1.set_install_dir(**install_kwargs)
            # this is a bandaid; after refactoring, upgrades should account for protocol version
            new_version_from_build = get_version_from_build(node1.get_install_dir())
            if (new_version_from_build >= '3' and self.protocol_version is not None and self.protocol_version < 3):
                self.skip('Protocol version {} incompatible '
                          'with Cassandra version {}'.format(self.protocol_version, new_version_from_build))
            node1.set_log_level("DEBUG" if DEBUG else "INFO")
            node1.set_configuration_options(values={'internode_compression': 'none'})
            node1.start(wait_for_binary_proto=True)

        if UPGRADE_MODE == "all":
            node2.set_install_dir(**install_kwargs)
            # this is a bandaid; after refactoring, upgrades should account for protocol version
            new_version_from_build = get_version_from_build(node1.get_install_dir())
            if (new_version_from_build >= '3' and self.protocol_version is not None and self.protocol_version < 3):
                self.skip('Protocol version {} incompatible '
                          'with Cassandra version {}'.format(self.protocol_version, new_version_from_build))
            node2.set_log_level("DEBUG" if DEBUG else "INFO")
            node2.set_configuration_options(values={'internode_compression': 'none'})
            node2.start(wait_for_binary_proto=True)

        sessions = []
        if QUERY_UPGRADED:
            session = self.patient_exclusive_cql_connection(node1, protocol_version=self.protocol_version)
            session.set_keyspace('ks')
            sessions.append((True, session))
        if QUERY_OLD:
            # open a second session with the node on the old version
            session = self.patient_exclusive_cql_connection(node2, protocol_version=self.protocol_version)
            session.set_keyspace('ks')
            sessions.append((False, session))

        if self.CL:
            for is_upgraded, session in sessions:
                session.default_consistency_level = self.CL

        return sessions
Example #33
0
 def __get_version_from_build(self):
     return common.get_version_from_build(self.get_cassandra_dir())
Example #34
0
    def prepare(self, ordered=False, create_keyspace=True, use_cache=False,
                nodes=None, rf=None, protocol_version=None, cl=None, **kwargs):
        nodes = self.NODES if nodes is None else nodes
        rf = self.RF if rf is None else rf

        cl = self.CL if cl is None else cl
        self.CL = cl  # store for later use in do_upgrade

        assert nodes >= 2, "backwards compatibility tests require at least two nodes"
        assert not self._preserve_cluster, "preserve_cluster cannot be True for upgrade tests"

        self.protocol_version = protocol_version

        cluster = self.cluster

        if (ordered):
            cluster.set_partitioner("org.apache.cassandra.dht.ByteOrderedPartitioner")

        if (use_cache):
            cluster.set_configuration_options(values={'row_cache_size_in_mb': 100})

        start_rpc = kwargs.pop('start_rpc', False)
        if start_rpc:
            cluster.set_configuration_options(values={'start_rpc': True})

        cluster.set_configuration_options(values={'internode_compression': 'none'})
        if not cluster.nodelist():
            cluster.populate(nodes)
            node1 = cluster.nodelist()[0]
            self.original_install_dir = node1.get_install_dir()
            self.original_version = get_version_from_build(node_path=node1.get_path())
            self.upgrade_path = get_default_upgrade_path(self.original_version, cdir=self.original_install_dir)
            if OLD_CASSANDRA_DIR:
                cluster.set_install_dir(install_dir=OLD_CASSANDRA_DIR)
                debug('running C* from {}'.format(OLD_CASSANDRA_DIR))
            elif OLD_CASSANDRA_VERSION:
                cluster.set_install_dir(version=OLD_CASSANDRA_VERSION)
                debug('installed C* {}'.format(OLD_CASSANDRA_VERSION))
            elif self.upgrade_path.starting_version:
                try:
                    cluster.set_install_dir(version=self.upgrade_path.starting_version)
                except:
                    if self.upgrade_path.starting_version.startswith('binary'):
                        debug('Exception while downloading {}; falling back to source'.format(
                            self.upgrade_path.starting_version))
                        version_number = self.upgrade_path.starting_version.split(':')[-1]
                        source_ccm_id = 'git:cassandra-' + version_number
                        debug('Source identifier: {}'.format(source_ccm_id))
                        cluster.set_install_dir(version=source_ccm_id)

            # in other cases, just use the existing install directory
            cluster.start(wait_for_binary_proto=True)
            debug('starting from {}'.format(get_version_from_build(node1.get_install_dir())))

        node1 = cluster.nodelist()[0]
        time.sleep(0.2)

        session = self.patient_cql_connection(node1, protocol_version=protocol_version)
        if create_keyspace:
            self.create_ks(session, 'ks', rf)

        if cl:
            session.default_consistency_level = cl

        return session
Example #35
0
def pytest_collection_modifyitems(items, config):
    """
    This function is called upon during the pytest test collection phase and allows for modification
    of the test items within the list
    """
    collect_only = config.getoption("--collect-only")
    cassandra_dir, cassandra_version = cassandra_dir_and_version(config)
    if not collect_only and cassandra_dir is None:
        if  cassandra_version is None:
            raise Exception("Required dtest arguments were missing! You must provide either --cassandra-dir "
                            "or --cassandra-version. You can also set 'cassandra_dir' in pytest.ini. "
                            "Refer to the documentation or invoke the help with --help.")

    # Either cassandra_version or cassandra_dir is defined, so figure out the version
    CASSANDRA_VERSION = cassandra_version or get_version_from_build(cassandra_dir)

    # Check that use_off_heap_memtables is supported in this c* version
    if config.getoption("--use-off-heap-memtables") and ("3.0" <= CASSANDRA_VERSION < "3.4"):
        raise Exception("The selected Cassandra version %s doesn't support the provided option "
                        "--use-off-heap-memtables, see https://issues.apache.org/jira/browse/CASSANDRA-9472 "
                        "for details" % CASSANDRA_VERSION)


    selected_items = []
    deselected_items = []

    sufficient_system_resources_resource_intensive = sufficient_system_resources_for_resource_intensive_tests()
    logger.debug("has sufficient resources? %s" % sufficient_system_resources_resource_intensive)

    for item in items:
        deselect_test = False

        if config.getoption("--execute-upgrade-tests-only"):
            deselect_test = not item.get_closest_marker("upgrade_test")
            if deselect_test:
                logger.info("SKIP: Deselecting non-upgrade test %s because of --execute-upgrade-tests-only" % item.name)

        if item.get_closest_marker("resource_intensive") and not collect_only:
            force_resource_intensive = config.getoption("--force-resource-intensive-tests")
            skip_resource_intensive = config.getoption("--skip-resource-intensive-tests")
            if not force_resource_intensive:
                if skip_resource_intensive:
                    deselect_test = True
                    logger.info("SKIP: Deselecting test %s as test marked resource_intensive. To force execution of "
                          "this test re-run with the --force-resource-intensive-tests command line argument" % item.name)
                if not sufficient_system_resources_resource_intensive:
                    deselect_test = True
                    logger.info("SKIP: Deselecting resource_intensive test %s due to insufficient system resources" % item.name)

        if not item.get_closest_marker("resource_intensive") and not collect_only:
            only_resource_intensive = config.getoption("--only-resource-intensive-tests")
            if only_resource_intensive:
                deselect_test = True
                logger.info("SKIP: Deselecting non resource_intensive test %s as --only-resource-intensive-tests specified" % item.name)

        if item.get_closest_marker("no_vnodes"):
            if config.getoption("--use-vnodes"):
                deselect_test = True
                logger.info("SKIP: Deselecting test %s as the test requires vnodes to be disabled. To run this test, "
                      "re-run without the --use-vnodes command line argument" % item.name)

        if item.get_closest_marker("vnodes"):
            if not config.getoption("--use-vnodes"):
                deselect_test = True
                logger.info("SKIP: Deselecting test %s as the test requires vnodes to be enabled. To run this test, "
                            "re-run with the --use-vnodes command line argument" % item.name)

        for test_item_class in inspect.getmembers(item.module, inspect.isclass):
            if not hasattr(test_item_class[1], "pytestmark"):
                continue

            for module_pytest_mark in test_item_class[1].pytestmark:
                if module_pytest_mark.name == "upgrade_test":
                    deselect_test = not _upgrade_testing_enabled(config)

        if item.get_closest_marker("upgrade_test"):
            deselect_test = not _upgrade_testing_enabled(config)

        if item.get_closest_marker("no_offheap_memtables"):
            if config.getoption("use_off_heap_memtables"):
                deselect_test = True

        # deselect cqlsh tests that depend on fixing a driver behavior
        if item.get_closest_marker("depends_driver"):
            deselect_test = True

        if deselect_test:
            deselected_items.append(item)
        else:
            selected_items.append(item)

    config.hook.pytest_deselected(items=deselected_items)
    items[:] = selected_items
Example #36
0
 def __get_version_from_build(self):
     return common.get_version_from_build(self.get_install_dir())
    def upgrade_with_index_creation_test(self):
        cluster = self.cluster

        # Forcing cluster version on purpose
        cluster.set_install_dir(version="1.2.16")
        cluster.populate(2).start()

        [node1, node2] = cluster.nodelist()

        # wait for the rpc server to start
        session = self.patient_exclusive_cql_connection(node1)

        host, port = node1.network_interfaces['thrift']
        client = get_thrift_client(host, port)
        client.transport.open()

        ksdef = KsDef()
        ksdef.name = 'test'
        ksdef.strategy_class = 'SimpleStrategy'
        ksdef.strategy_options = {'replication_factor': '2'}
        ksdef.durable_writes = True
        ksdef.cf_defs = []

        client.system_add_keyspace(ksdef)
        client.set_keyspace('test')

        # create a super column family with UTF8 for all types
        cfdef = CfDef()
        cfdef.keyspace = 'test'
        cfdef.name = 'sc_test'
        cfdef.column_type = 'Super'
        cfdef.comparator_type = 'UTF8Type'
        cfdef.subcomparator_type = 'UTF8Type'
        cfdef.key_validation_class = 'UTF8Type'
        cfdef.default_validation_class = 'UTF8Type'
        cfdef.caching = 'rows_only'

        client.system_add_column_family(cfdef)

        session.cluster.control_connection.wait_for_schema_agreement()

        for i in range(2):
            supercol_name = 'sc%d' % i
            for j in range(2):
                col_name = 'c%d' % j
                column = Column(name=col_name, value='v', timestamp=100)
                client.batch_mutate(
                    {'k0': {'sc_test': [Mutation(ColumnOrSuperColumn(super_column=SuperColumn(supercol_name, [column])))]}},
                    ThriftConsistencyLevel.ONE)

        session.cluster.shutdown()
        client.transport.close()

        CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR')
        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            # Upgrade nodes to 2.0.
            # See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0")
            time.sleep(.5)

        # Upgrade node 1
        node1.flush()
        time.sleep(.5)
        node1.stop(wait_other_notice=True)
        self.set_node_to_current_version(node1)
        node1.start(wait_other_notice=True)
        time.sleep(.5)

        # wait for the RPC server to start
        session = self.patient_exclusive_cql_connection(node1)

        client = get_thrift_client(host, port)
        client.transport.open()
        client.set_keyspace('test')

        # fetch all supercolumns
        column_parent = ColumnParent(column_family='sc_test')
        predicate = SlicePredicate(slice_range=SliceRange("", "", False, 100))
        super_columns = client.get_slice('k0', column_parent, predicate, ThriftConsistencyLevel.QUORUM)
        self.assertEqual(2, len(super_columns))
        for i in range(2):
            super_column = super_columns[i].super_column
            self.assertEqual('sc%d' % i, super_column.name)
            self.assertEqual(2, len(super_column.columns))
            for j in range(2):
                column = super_column.columns[j]
                self.assertEqual('c%d' % j, column.name)
                self.assertEqual('v', column.value)

        # fetch a single supercolumn
        column_parent = ColumnParent(column_family='sc_test', super_column='sc1')
        columns = client.get_slice('k0', column_parent, predicate, ThriftConsistencyLevel.QUORUM)
        self.assertEqual(2, len(columns))
        for j in range(2):
            column = columns[j].column
            self.assertEqual('c%d' % j, column.name)
            self.assertEqual('v', column.value)

        # fetch a single subcolumn
        predicate = SlicePredicate(column_names=['c1'])
        columns = client.get_slice('k0', column_parent, predicate, ThriftConsistencyLevel.QUORUM)
        self.assertEqual(1, len(columns))
        column = columns[0].column
        self.assertEqual('c%d' % j, column.name)
        self.assertEqual('v', column.value)
    def upgrade_with_counters_test(self):
        cluster = self.cluster

        # Forcing cluster version on purpose
        cluster.set_install_dir(version="1.2.19")
        cluster.populate(3).start()

        node1, node2, node3 = cluster.nodelist()

        # wait for the rpc server to start
        session = self.patient_exclusive_cql_connection(node1)

        host, port = node1.network_interfaces['thrift']
        client = get_thrift_client(host, port)
        client.transport.open()

        ksdef = KsDef()
        ksdef.name = 'test'
        ksdef.strategy_class = 'SimpleStrategy'
        ksdef.strategy_options = {'replication_factor': '2'}
        ksdef.durable_writes = True
        ksdef.cf_defs = []

        client.system_add_keyspace(ksdef)
        client.set_keyspace('test')

        # create a super column family with UTF8 for all types except for the
        # values, which are counters
        cfdef = CfDef()
        cfdef.keyspace = 'test'
        cfdef.name = 'sc_test'
        cfdef.column_type = 'Super'
        cfdef.comparator_type = 'UTF8Type'
        cfdef.subcomparator_type = 'UTF8Type'
        cfdef.key_validation_class = 'UTF8Type'
        cfdef.default_validation_class = 'CounterColumnType'

        client.system_add_column_family(cfdef)

        session.cluster.control_connection.wait_for_schema_agreement()

        for i in range(2):
            supercol_name = 'sc%d' % i
            column_parent = ColumnParent(column_family='sc_test', super_column=supercol_name)
            for j in range(2):
                col_name = 'c%d' % j
                column = CounterColumn(name=col_name, value=1)
                for k in range(20):
                    client.add('Counter1', column_parent, column, ThriftConsistencyLevel.ONE)

        # If we are on 2.1 or any higher version upgrade to 2.0.latest.
        # Otherwise, we must be on a 2.0.x, so we should be upgrading to that version.
        # This will let us test upgrading from 1.2.19 to each of the 2.0 minor releases.
        CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR')
        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            # Upgrade nodes to 2.0.
            # See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0", [node1])
            time.sleep(.5)
        else:
            node1.drain()
            node1.watch_log_for("DRAINED")
            node1.stop(wait_other_notice=False)
            self.set_node_to_current_version(node1)
            node1.start(wait_other_notice=True)

        # wait for the RPC server to start
        session = self.patient_exclusive_cql_connection(node1)

        for node in (node1, node2, node3):
            host, port = node.network_interfaces['thrift']
            client = get_thrift_client(host, port)
            client.transport.open()
            client.set_keyspace('test')
            for i in range(2):
                supercol_name = 'sc%d' % i
                column_parent = ColumnParent(column_family='sc_test', super_column=supercol_name)
                for j in range(2):
                    col_name = 'c%d' % j
                    column = CounterColumn(name=col_name, value=1)
                    for k in range(50):
                        client.add('Counter1', column_parent, column, ThriftConsistencyLevel.ONE)

            client.transport.close()

        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            # Upgrade nodes to 2.0.
            # See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0", [node2, node3])
            time.sleep(.5)
        else:
            node2.drain()
            node3.drain()
            node2.watch_log_for("DRAINED")
            node3.watch_log_for("DRAINED")
            node2.stop(wait_other_notice=False)
            node3.stop(wait_other_notice=False)
            self.set_node_to_current_version(node2)
            self.set_node_to_current_version(node3)
            node2.start(wait_other_notice=True)
            node3.start(wait_other_notice=True)

        host, port = node1.network_interfaces['thrift']
        client = get_thrift_client(host, port)
        client.transport.open()
        client.set_keyspace('test')

        column_parent = ColumnParent(column_family='sc_test')
        predicate = SlicePredicate(slice_range=SliceRange("", "", False, 100))
        super_columns = client.get_slice('Counter1', column_parent, predicate, ThriftConsistencyLevel.QUORUM)
        self.assertEqual(2, len(super_columns))
        for i in range(2):
            super_column = super_columns[i].counter_super_column
            self.assertEqual('sc%d' % i, super_column.name)
            self.assertEqual(2, len(super_column.columns))
            for j in range(2):
                column = super_column.columns[j]
                self.assertEqual('c%d' % j, column.name)
                self.assertEqual(170, column.value)

        # fetch a single supercolumn
        column_parent = ColumnParent(column_family='sc_test', super_column='sc1')
        columns = client.get_slice('Counter1', column_parent, predicate, ThriftConsistencyLevel.QUORUM)
        self.assertEqual(2, len(columns))
        for j in range(2):
            column = columns[j].counter_column
            self.assertEqual('c%d' % j, column.name)
            self.assertEqual(170, column.value)

        # fetch a single subcolumn
        predicate = SlicePredicate(column_names=['c1'])
        columns = client.get_slice('Counter1', column_parent, predicate, ThriftConsistencyLevel.QUORUM)
        self.assertEqual(1, len(columns))
        column = columns[0].counter_column
        self.assertEqual('c%d' % j, column.name)
        self.assertEqual(170, column.value)
    def upgrade_with_counters_test(self):
        cluster = self.cluster

        # Forcing cluster version on purpose
        cluster.set_install_dir(version="1.2.19")
        cluster.populate(3).start()

        node1, node2, node3 = cluster.nodelist()

        cli = node1.cli()
        cli.do("create keyspace test with placement_strategy = 'SimpleStrategy' and strategy_options = {replication_factor : 2} and durable_writes = true")
        cli.do("use test")
        cli.do("create column family sc_test with column_type = 'Super' and default_validation_class = 'CounterColumnType' AND key_validation_class=UTF8Type AND comparator=UTF8Type")

        for i in xrange(2):
            for j in xrange(2):
                for k in xrange(20):
                    cli.do("incr sc_test['Counter1']['sc%d']['c%d'] by 1" % (i, j))

        assert not cli.has_errors(), cli.errors()
        cli.close()


        ##If we are on 2.1 or any higher version,
        ##upgrade to 2.0.latest.
        ##Otherwise, we must be on a 2.0.x, so we
        ##should be upgrading to that version.
        ##This will let us test upgrading
        ##from 1.2.19 to each of the 2.0 minor releases.
        CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR')
        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            #Upgrade nodes to 2.0.
            #See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0", [node1])
            time.sleep(.5)
        else:
            node1.drain()
            node1.watch_log_for("DRAINED")
            node1.stop(wait_other_notice=False)
            self.set_node_to_current_version(node1)
            node1.start(wait_other_notice=True)

        cli = node1.cli()
        cli.do("use test")
        for i in xrange(2):
            for j in xrange(2):
                for k in xrange(50):
                    cli.do("incr sc_test['Counter1']['sc%d']['c%d'] by 1" % (i, j))

        cli2 = node2.cli()
        cli2.do("use test")
        for i in xrange(2):
            for j in xrange(2):
                for k in xrange(50):
                    cli2.do("incr sc_test['Counter1']['sc%d']['c%d'] by 1" % (i, j))

        cli3 = node3.cli()
        cli3.do("use test")
        for i in xrange(2):
            for j in xrange(2):
                for k in xrange(50):
                    cli3.do("incr sc_test['Counter1']['sc%d']['c%d'] by 1" % (i, j))

        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            #Upgrade nodes to 2.0.
            #See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0", [node2, node3])
            time.sleep(.5)
        else:
            node2.drain()
            node3.drain()
            node2.watch_log_for("DRAINED")
            node3.watch_log_for("DRAINED")
            node2.stop(wait_other_notice=False)
            node3.stop(wait_other_notice=False)
            self.set_node_to_current_version(node2)
            self.set_node_to_current_version(node3)
            node2.start(wait_other_notice=True)
            node3.start(wait_other_notice=True)

        cli = node1.cli()
        cli.do("use test")
        cli.do("consistencylevel as quorum")

        # Check we can still get data properly
        cli.do("get sc_test['Counter1']")
        assert_scs(cli, ['sc0', 'sc1'])
        assert_counter_columns(cli, ['c0', 'c1'])

        cli.do("get sc_test['Counter1']['sc1']")
        assert_counter_columns(cli, ['c0', 'c1'])

        cli.do("get sc_test['Counter1']['sc1']['c1']")
        assert_counter_columns(cli, ['c1'])

        assert not cli.has_errors(), cli.errors()
        cli.close()
Example #40
0
    def run(self, argv):
        parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.ArgumentDefaultsHelpFormatter(prog,
                                                                                                             max_help_position=100,
                                                                                                             width=200))

        # this is a bit ugly: all of our command line arguments are added and configured as part
        # of pytest. however, we also have this wrapper script to make it easier for those who
        # aren't comfortable calling pytest directly. To avoid duplicating code (e.g. have the options
        # in two separate places) we directly use the pytest_addoption fixture from conftest.py. Unfortunately,
        # pytest wraps ArgumentParser, so, first we add the options to a pytest Parser, and then we pull
        # all of those custom options out and add them to the unwrapped ArgumentParser we want to use
        # here inside of run_dtests.py.
        #
        # So NOTE: to add a command line argument, if you're trying to do so by adding it here, you're doing it wrong!
        # add it to conftest.py:pytest_addoption
        pytest_parser = Parser()
        pytest_addoption(pytest_parser)

        # add all of the options from the pytest Parser we created, and add them into our ArgumentParser instance
        pytest_custom_opts = pytest_parser._anonymous
        for opt in pytest_custom_opts.options:
            parser.add_argument(opt._long_opts[0], action=opt._attrs['action'],
                                default=opt._attrs.get('default', None),
                                help=opt._attrs.get('help', None))

        parser.add_argument("--dtest-enable-debug-logging", action="store_true", default=False,
                            help="Enable debug logging (for this script, pytest, and during execution "
                                 "of test functions)")
        parser.add_argument("--dtest-print-tests-only", action="store_true", default=False,
                            help="Print list of all tests found eligible for execution given the provided options.")
        parser.add_argument("--dtest-print-tests-output", action="store", default=False,
                            help="Path to file where the output of --dtest-print-tests-only should be written to")
        parser.add_argument("--pytest-options", action="store", default=None,
                            help="Additional command line arguments to proxy directly thru when invoking pytest.")
        parser.add_argument("--dtest-tests", action="store", default=None,
                            help="Comma separated list of test files, test classes, or test methods to execute.")

        args = parser.parse_args()

        if not args.dtest_print_tests_only:
            if args.cassandra_dir is None and args.cassandra_version is None:
                raise Exception("Required dtest arguments were missing! You must provide either --cassandra-dir "
                                "or --cassandra-version. Refer to the documentation or invoke the help with --help.")

            # Either cassandra_version or cassandra_dir is defined, so figure out the version
            CASSANDRA_VERSION = args.cassandra_version or get_version_from_build(args.cassandra_dir)

            if args.use_off_heap_memtables and ("3.0" <= CASSANDRA_VERSION < "3.4"):
                raise Exception("The selected Cassandra version %s doesn't support the provided option "
                                "--use-off-heap-memtables, see https://issues.apache.org/jira/browse/CASSANDRA-9472 "
                                "for details" % CASSANDRA_VERSION)

        if args.dtest_enable_debug_logging:
            logging.root.setLevel(logging.DEBUG)
            logger.setLevel(logging.DEBUG)


        # Get dictionaries corresponding to each point in the configuration matrix
        # we want to run, then generate a config object for each of them.
        logger.debug('Generating configurations from the following matrix:\n\t{}'.format(args))

        args_to_invoke_pytest = []
        if args.pytest_options:
            for arg in args.pytest_options.split(" "):
                args_to_invoke_pytest.append("'{the_arg}'".format(the_arg=arg))

        for arg in argv:
            if arg.startswith("--pytest-options") or arg.startswith("--dtest-"):
                continue
            args_to_invoke_pytest.append("'{the_arg}'".format(the_arg=arg))

        if args.dtest_print_tests_only:
            args_to_invoke_pytest.append("'--collect-only'")

        if args.dtest_tests:
            for test in args.dtest_tests.split(","):
                args_to_invoke_pytest.append("'{test_name}'".format(test_name=test))

        original_raw_cmd_args = ", ".join(args_to_invoke_pytest)

        logger.debug("args to call with: [%s]" % original_raw_cmd_args)

        # the original run_dtests.py script did it like this to hack around nosetest
        # limitations -- i'm not sure if they still apply or not in a pytest world
        # but for now just leaving it as is, because it does the job (although
        # certainly is still pretty complicated code and has a hacky feeling)
        to_execute = (
                "import pytest\n" +
                (
                "pytest.main([{options}])\n").format(options=original_raw_cmd_args)
        )
        temp = NamedTemporaryFile(dir=getcwd())
        logger.debug('Writing the following to {}:'.format(temp.name))

        logger.debug('```\n{to_execute}```\n'.format(to_execute=to_execute))
        temp.write(to_execute.encode("utf-8"))
        temp.flush()

        # We pass nose_argv as options to the python call to maintain
        # compatibility with the nosetests command. Arguments passed in via the
        # command line are treated one way, args passed in as
        # nose.main(argv=...) are treated another. Compare with the options
        # -xsv for an example.
        cmd_list = [sys.executable, temp.name]
        logger.debug('subprocess.call-ing {cmd_list}'.format(cmd_list=cmd_list))

        sp = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())

        if args.dtest_print_tests_only:
            stdout, stderr = sp.communicate()

            if stderr:
                print(stderr.decode("utf-8"))
                result = sp.returncode
                exit(result)

            all_collected_test_modules = collect_test_modules(stdout)
            joined_test_modules = "\n".join(all_collected_test_modules)
            #print("Collected %d Test Modules" % len(all_collected_test_modules))
            if args.dtest_print_tests_output is not None:
                collected_tests_output_file = open(args.dtest_print_tests_output, "w")
                collected_tests_output_file.write(joined_test_modules)
                collected_tests_output_file.close()

            print(joined_test_modules)
        else:
            while True:
                stdout_output = sp.stdout.readline()
                stdout_output_str = stdout_output.decode("utf-8")
                if stdout_output_str == '' and sp.poll() is not None:
                    break
                if stdout_output_str:
                    print(stdout_output_str.strip())

                stderr_output = sp.stderr.readline()
                stderr_output_str = stderr_output.decode("utf-8")
                if stderr_output_str == '' and sp.poll() is not None:
                    break
                if stderr_output_str:
                    print(stderr_output_str.strip())

        exit(sp.returncode)
Example #41
0
    def upgrade_with_counters_test(self):
        cluster = self.cluster

        # Forcing cluster version on purpose
        cluster.set_install_dir(version="1.2.19")
        cluster.populate(3).start()

        node1, node2, node3 = cluster.nodelist()

        cli = node1.cli()
        cli.do(
            "create keyspace test with placement_strategy = 'SimpleStrategy' and strategy_options = {replication_factor : 2} and durable_writes = true"
        )
        cli.do("use test")
        cli.do(
            "create column family sc_test with column_type = 'Super' and default_validation_class = 'CounterColumnType' AND key_validation_class=UTF8Type AND comparator=UTF8Type"
        )

        for i in xrange(2):
            for j in xrange(2):
                for k in xrange(20):
                    cli.do("incr sc_test['Counter1']['sc%d']['c%d'] by 1" %
                           (i, j))

        assert not cli.has_errors(), cli.errors()
        cli.close()

        ##If we are on 2.1 or any higher version,
        ##upgrade to 2.0.latest.
        ##Otherwise, we must be on a 2.0.x, so we
        ##should be upgrading to that version.
        ##This will let us test upgrading
        ##from 1.2.19 to each of the 2.0 minor releases.
        CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR')
        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            #Upgrade nodes to 2.0.
            #See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0", [node1])
            time.sleep(.5)
        else:
            node1.drain()
            node1.watch_log_for("DRAINED")
            node1.stop(wait_other_notice=False)
            self.set_node_to_current_version(node1)
            node1.start(wait_other_notice=True)

        cli = node1.cli()
        cli.do("use test")
        for i in xrange(2):
            for j in xrange(2):
                for k in xrange(50):
                    cli.do("incr sc_test['Counter1']['sc%d']['c%d'] by 1" %
                           (i, j))

        cli2 = node2.cli()
        cli2.do("use test")
        for i in xrange(2):
            for j in xrange(2):
                for k in xrange(50):
                    cli2.do("incr sc_test['Counter1']['sc%d']['c%d'] by 1" %
                            (i, j))

        cli3 = node3.cli()
        cli3.do("use test")
        for i in xrange(2):
            for j in xrange(2):
                for k in xrange(50):
                    cli3.do("incr sc_test['Counter1']['sc%d']['c%d'] by 1" %
                            (i, j))

        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            #Upgrade nodes to 2.0.
            #See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0", [node2, node3])
            time.sleep(.5)
        else:
            node2.drain()
            node3.drain()
            node2.watch_log_for("DRAINED")
            node3.watch_log_for("DRAINED")
            node2.stop(wait_other_notice=False)
            node3.stop(wait_other_notice=False)
            self.set_node_to_current_version(node2)
            self.set_node_to_current_version(node3)
            node2.start(wait_other_notice=True)
            node3.start(wait_other_notice=True)

        cli = node1.cli()
        cli.do("use test")
        cli.do("consistencylevel as quorum")

        # Check we can still get data properly
        cli.do("get sc_test['Counter1']")
        assert_scs(cli, ['sc0', 'sc1'])
        assert_counter_columns(cli, ['c0', 'c1'])

        cli.do("get sc_test['Counter1']['sc1']")
        assert_counter_columns(cli, ['c0', 'c1'])

        cli.do("get sc_test['Counter1']['sc1']['c1']")
        assert_counter_columns(cli, ['c1'])

        assert not cli.has_errors(), cli.errors()
        cli.close()
Example #42
0
# set python-driver log level to INFO by default for dtest
logging.getLogger('cassandra').setLevel(logging.INFO)

# There are times when we want to know the C* version we're testing against
# before we call Tester.setUp. In the general case, we can't know that -- the
# test method could use any version it wants for self.cluster. However, we can
# get the version from build.xml in the C* repository specified by
# CASSANDRA_VERSION or CASSANDRA_DIR. This should use the same resolution
# strategy as the actual checkout code in Tester.setUp; if it does not, that is
# a bug.
_cassandra_version_slug = os.environ.get('CASSANDRA_VERSION')
# Prefer CASSANDRA_VERSION if it's set in the environment. If not, use CASSANDRA_DIR
if _cassandra_version_slug:
    # fetch but don't build the specified C* version
    ccm_repo_cache_dir, _ = ccmlib.repository.setup(_cassandra_version_slug)
    CASSANDRA_VERSION_FROM_BUILD = get_version_from_build(ccm_repo_cache_dir)
else:
    CASSANDRA_VERSION_FROM_BUILD = get_version_from_build(CASSANDRA_DIR)


class expect_control_connection_failures(object):
    """
    We're just using a class here as a one-off object with a filter method, for
    use as a filter object in the driver logger. It's frustrating that we can't
    just pass in a function, but we need an object with a .filter method. Oh
    well, I guess that's what old stdlib libraries are like.
    """
    @staticmethod
    def filter(record):
        expected_strings = [
            'Control connection failed to connect, shutting down Cluster:',
def pytest_collection_modifyitems(items, config):
    """
    This function is called upon during the pytest test collection phase and allows for modification
    of the test items within the list
    """
    collect_only = config.getoption("--collect-only")
    cassandra_dir = config.getoption("--cassandra-dir")
    cassandra_version = config.getoption("--cassandra-version")
    if not collect_only and cassandra_dir is None:
        if  cassandra_version is None:
            raise Exception("Required dtest arguments were missing! You must provide either --cassandra-dir "
                            "or --cassandra-version. Refer to the documentation or invoke the help with --help.")

    # Either cassandra_version or cassandra_dir is defined, so figure out the version
    CASSANDRA_VERSION = cassandra_version or get_version_from_build(cassandra_dir)

    # Check that use_off_heap_memtables is supported in this c* version
    if config.getoption("--use-off-heap-memtables") and ("3.0" <= CASSANDRA_VERSION < "3.4"):
        raise Exception("The selected Cassandra version %s doesn't support the provided option "
                        "--use-off-heap-memtables, see https://issues.apache.org/jira/browse/CASSANDRA-9472 "
                        "for details" % CASSANDRA_VERSION)


    selected_items = []
    deselected_items = []

    sufficient_system_resources_resource_intensive = sufficient_system_resources_for_resource_intensive_tests()
    logger.debug("has sufficient resources? %s" % sufficient_system_resources_resource_intensive)

    for item in items:
        deselect_test = False

        if item.get_closest_marker("resource_intensive") and not collect_only:
            force_resource_intensive = config.getoption("--force-resource-intensive-tests")
            skip_resource_intensive = config.getoption("--skip-resource-intensive-tests")
            if not force_resource_intensive:
                if skip_resource_intensive:
                    deselect_test = True
                    logger.info("SKIP: Deselecting test %s as test marked resource_intensive. To force execution of "
                          "this test re-run with the --force-resource-intensive-tests command line argument" % item.name)
                if not sufficient_system_resources_resource_intensive:
                    deselect_test = True
                    logger.info("SKIP: Deselecting resource_intensive test %s due to insufficient system resources" % item.name)

        if item.get_closest_marker("no_vnodes"):
            if config.getoption("--use-vnodes"):
                deselect_test = True
                logger.info("SKIP: Deselecting test %s as the test requires vnodes to be disabled. To run this test, "
                      "re-run without the --use-vnodes command line argument" % item.name)

        if item.get_closest_marker("vnodes"):
            if not config.getoption("--use-vnodes"):
                deselect_test = True
                logger.info("SKIP: Deselecting test %s as the test requires vnodes to be enabled. To run this test, "
                            "re-run with the --use-vnodes command line argument" % item.name)

        for test_item_class in inspect.getmembers(item.module, inspect.isclass):
            if not hasattr(test_item_class[1], "pytestmark"):
                continue

            for module_pytest_mark in test_item_class[1].pytestmark:
                if module_pytest_mark.name == "upgrade_test":
                    if not config.getoption("--execute-upgrade-tests"):
                        deselect_test = True

        if item.get_closest_marker("upgrade_test"):
            if not config.getoption("--execute-upgrade-tests"):
                deselect_test = True

        if item.get_closest_marker("no_offheap_memtables"):
            if config.getoption("use_off_heap_memtables"):
                deselect_test = True

        # temporarily deselect tests in cqlsh_copy_tests that depend on cqlshlib,
        # until cqlshlib is Python 3 compatibile
        if item.get_marker("depends_cqlshlib"):
            deselect_test = True

        if deselect_test:
            deselected_items.append(item)
        else:
            selected_items.append(item)

    config.hook.pytest_deselected(items=deselected_items)
    items[:] = selected_items
Example #44
0
 def __get_version_from_build(self):
     return common.get_version_from_build(self.get_install_dir(),
                                          repo_dir=self.elassandra_repo_dir)
Example #45
0
 def run(self):
     print_(common.get_version_from_build(self.node.get_install_dir()))
    def upgrade_with_index_creation_test(self):
        cluster = self.cluster

        # Forcing cluster version on purpose
        cluster.set_install_dir(version="1.2.16")
        if "memtable_allocation_type" in cluster._config_options:
            cluster._config_options.__delitem__("memtable_allocation_type")
        cluster.populate(2).start()

        node1, node2 = cluster.nodelist()

        # wait for the rpc server to start
        session = self.patient_exclusive_cql_connection(node1)

        host, port = node1.network_interfaces['thrift']
        client = get_thrift_client(host, port)
        client.transport.open()

        ksdef = KsDef()
        ksdef.name = 'test'
        ksdef.strategy_class = 'SimpleStrategy'
        ksdef.strategy_options = {'replication_factor': '2'}
        ksdef.durable_writes = True
        ksdef.cf_defs = []

        client.system_add_keyspace(ksdef)

        session.cluster.control_connection.wait_for_schema_agreement()

        client.set_keyspace('test')

        # create a super column family with UTF8 for all types
        cfdef = CfDef()
        cfdef.keyspace = 'test'
        cfdef.name = 'sc_test'
        cfdef.column_type = 'Super'
        cfdef.comparator_type = 'UTF8Type'
        cfdef.subcomparator_type = 'UTF8Type'
        cfdef.key_validation_class = 'UTF8Type'
        cfdef.default_validation_class = 'UTF8Type'
        cfdef.caching = 'rows_only'

        client.system_add_column_family(cfdef)

        session.cluster.control_connection.wait_for_schema_agreement()

        for i in range(2):
            supercol_name = 'sc%d' % i
            for j in range(2):
                col_name = 'c%d' % j
                column = Column(name=col_name, value='v', timestamp=100)
                client.batch_mutate(
                    {
                        'k0': {
                            'sc_test': [
                                Mutation(
                                    ColumnOrSuperColumn(
                                        super_column=SuperColumn(
                                            supercol_name, [column])))
                            ]
                        }
                    }, ThriftConsistencyLevel.ONE)

        session.cluster.shutdown()
        client.transport.close()

        CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR')
        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            # Upgrade nodes to 2.0.
            # See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0")
            time.sleep(.5)

        # Upgrade node 1
        node1.flush()
        time.sleep(.5)
        node1.stop(wait_other_notice=True)
        self.set_node_to_current_version(node1)
        node1.start(wait_other_notice=True, wait_for_binary_proto=True)
        time.sleep(.5)

        # wait for the RPC server to start
        session = self.patient_exclusive_cql_connection(node1)

        client = get_thrift_client(host, port)
        client.transport.open()
        client.set_keyspace('test')

        # fetch all supercolumns
        column_parent = ColumnParent(column_family='sc_test')
        predicate = SlicePredicate(slice_range=SliceRange("", "", False, 100))
        super_columns = client.get_slice('k0', column_parent, predicate,
                                         ThriftConsistencyLevel.QUORUM)
        self.assertEqual(2, len(super_columns))
        for i in range(2):
            super_column = super_columns[i].super_column
            self.assertEqual('sc%d' % i, super_column.name)
            self.assertEqual(2, len(super_column.columns))
            for j in range(2):
                column = super_column.columns[j]
                self.assertEqual('c%d' % j, column.name)
                self.assertEqual('v', column.value)

        # fetch a single supercolumn
        column_parent = ColumnParent(column_family='sc_test',
                                     super_column='sc1')
        columns = client.get_slice('k0', column_parent, predicate,
                                   ThriftConsistencyLevel.QUORUM)
        self.assertEqual(2, len(columns))
        for j in range(2):
            column = columns[j].column
            self.assertEqual('c%d' % j, column.name)
            self.assertEqual('v', column.value)

        # fetch a single subcolumn
        predicate = SlicePredicate(column_names=['c1'])
        columns = client.get_slice('k0', column_parent, predicate,
                                   ThriftConsistencyLevel.QUORUM)
        self.assertEqual(1, len(columns))
        column = columns[0].column
        self.assertEqual('c%d' % j, column.name)
        self.assertEqual('v', column.value)
    def do_upgrade(self, session, use_thrift=False, return_nodes=False, **kwargs):
        """
        Upgrades the first node in the cluster and returns a list of
        (is_upgraded, Session) tuples.  If `is_upgraded` is true, the
        Session is connected to the upgraded node. If `return_nodes`
        is True, a tuple of (is_upgraded, Session, Node) will be
        returned instead.
        """
        session.cluster.shutdown()
        node1 = self.cluster.nodelist()[0]
        node2 = self.cluster.nodelist()[1]

        # stop the nodes, this can fail due to https://issues.apache.org/jira/browse/CASSANDRA-8220 on MacOS
        # for the tests that run against 2.0. You will need to run those in Linux.
        node1.drain()
        node1.stop(gently=True)

        # Ignore errors before upgrade on Windows
        # We ignore errors from 2.1, because windows 2.1
        # support is only beta. There are frequent log errors,
        # related to filesystem interactions that are a direct result
        # of the lack of full functionality on 2.1 Windows, and we dont
        # want these to pollute our results.
        if is_win() and self.cluster.version() <= '2.2':
            node1.mark_log_for_errors()

        logger.debug('upgrading node1 to {}'.format(self.UPGRADE_PATH.upgrade_version))
        switch_jdks(self.UPGRADE_PATH.upgrade_meta.java_version)

        node1.set_install_dir(version=self.UPGRADE_PATH.upgrade_version)

        # this is a bandaid; after refactoring, upgrades should account for protocol version
        new_version_from_build = get_version_from_build(node1.get_install_dir())

        # Check if a since annotation with a max_version was set on this test.
        # The since decorator can only check the starting version of the upgrade,
        # so here we check to new version of the upgrade as well.
        if hasattr(self, 'max_version') and self.max_version is not None and new_version_from_build >= self.max_version:
            pytest.skip("Skipping test, new version {} is equal to or higher than "
                        "max version {}".format(new_version_from_build, self.max_version))

        if (new_version_from_build >= '3' and self.protocol_version is not None and self.protocol_version < 3):
            pytest.skip('Protocol version {} incompatible '
                        'with Cassandra version {}'.format(self.protocol_version, new_version_from_build))
        node1.set_log_level(logging.getLevelName(logging.root.level))
        node1.set_configuration_options(values={'internode_compression': 'none'})

        if use_thrift and node1.get_cassandra_version() < '4':
            node1.set_configuration_options(values={'start_rpc': 'true'})

        if self.fixture_dtest_setup.enable_for_jolokia:
            remove_perf_disable_shared_mem(node1)

        node1.start(wait_for_binary_proto=True, wait_other_notice=True)

        sessions_and_meta = []
        if self.CL:
            session = self.patient_exclusive_cql_connection(node1, protocol_version=self.protocol_version, consistency_level=self.CL, **kwargs)
        else:
            session = self.patient_exclusive_cql_connection(node1, protocol_version=self.protocol_version, **kwargs)
        session.set_keyspace('ks')

        if return_nodes:
            sessions_and_meta.append((True, session, node1))
        else:
            sessions_and_meta.append((True, session))

        # open a second session with the node on the old version
        if self.CL:
            session = self.patient_exclusive_cql_connection(node2, protocol_version=self.protocol_version, consistency_level=self.CL, **kwargs)
        else:
            session = self.patient_exclusive_cql_connection(node2, protocol_version=self.protocol_version, **kwargs)
        session.set_keyspace('ks')

        if return_nodes:
            sessions_and_meta.append((False, session, node2))
        else:
            sessions_and_meta.append((False, session))

        # Let the nodes settle briefly before yielding connections in turn (on the upgraded and non-upgraded alike)
        # CASSANDRA-11396 was the impetus for this change, wherein some apparent perf noise was preventing
        # CL.ALL from being reached. The newly upgraded node needs to settle because it has just barely started, and each
        # non-upgraded node needs a chance to settle as well, because the entire cluster (or isolated nodes) may have been doing resource intensive activities
        # immediately before.
        for s in sessions_and_meta:
            time.sleep(5)
            yield s
    def upgrade_with_counters_test(self):
        cluster = self.cluster

        # Forcing cluster version on purpose
        cluster.set_install_dir(version="1.2.19")
        if "memtable_allocation_type" in cluster._config_options:
            cluster._config_options.__delitem__("memtable_allocation_type")
        cluster.populate(3).start()

        node1, node2, node3 = cluster.nodelist()

        # wait for the rpc server to start
        session = self.patient_exclusive_cql_connection(node1)

        host, port = node1.network_interfaces['thrift']
        client = get_thrift_client(host, port)
        client.transport.open()

        ksdef = KsDef()
        ksdef.name = 'test'
        ksdef.strategy_class = 'SimpleStrategy'
        ksdef.strategy_options = {'replication_factor': '2'}
        ksdef.durable_writes = True
        ksdef.cf_defs = []

        client.system_add_keyspace(ksdef)
        client.set_keyspace('test')

        # create a super column family with UTF8 for all types except for the
        # values, which are counters
        cfdef = CfDef()
        cfdef.keyspace = 'test'
        cfdef.name = 'sc_test'
        cfdef.column_type = 'Super'
        cfdef.comparator_type = 'UTF8Type'
        cfdef.subcomparator_type = 'UTF8Type'
        cfdef.key_validation_class = 'UTF8Type'
        cfdef.default_validation_class = 'CounterColumnType'

        client.system_add_column_family(cfdef)

        session.cluster.control_connection.wait_for_schema_agreement()

        for i in range(2):
            supercol_name = 'sc%d' % i
            column_parent = ColumnParent(column_family='sc_test',
                                         super_column=supercol_name)
            for j in range(2):
                col_name = 'c%d' % j
                column = CounterColumn(name=col_name, value=1)
                for k in range(20):
                    client.add('Counter1', column_parent, column,
                               ThriftConsistencyLevel.ONE)

        # If we are on 2.1 or any higher version upgrade to 2.0.latest.
        # Otherwise, we must be on a 2.0.x, so we should be upgrading to that version.
        # This will let us test upgrading from 1.2.19 to each of the 2.0 minor releases.
        CASSANDRA_DIR = os.environ.get('CASSANDRA_DIR')
        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            # Upgrade nodes to 2.0.
            # See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0", [node1])
            time.sleep(.5)
        else:
            node1.drain()
            node1.watch_log_for("DRAINED")
            node1.stop(wait_other_notice=False)
            self.set_node_to_current_version(node1)
            node1.start(wait_other_notice=True)

        # wait for the RPC server to start
        session = self.patient_exclusive_cql_connection(node1)

        for node in (node1, node2, node3):
            host, port = node.network_interfaces['thrift']
            client = get_thrift_client(host, port)
            client.transport.open()
            client.set_keyspace('test')
            for i in range(2):
                supercol_name = 'sc%d' % i
                column_parent = ColumnParent(column_family='sc_test',
                                             super_column=supercol_name)
                for j in range(2):
                    col_name = 'c%d' % j
                    column = CounterColumn(name=col_name, value=1)
                    for k in range(50):
                        client.add('Counter1', column_parent, column,
                                   ThriftConsistencyLevel.ONE)

            client.transport.close()

        if get_version_from_build(CASSANDRA_DIR) >= '2.1':
            # Upgrade nodes to 2.0.
            # See CASSANDRA-7008
            self.upgrade_to_version("git:cassandra-2.0", [node2, node3])
            time.sleep(.5)
        else:
            node2.drain()
            node3.drain()
            node2.watch_log_for("DRAINED")
            node3.watch_log_for("DRAINED")
            node2.stop(wait_other_notice=False)
            node3.stop(wait_other_notice=False)
            self.set_node_to_current_version(node2)
            self.set_node_to_current_version(node3)
            node2.start(wait_other_notice=True)
            node3.start(wait_other_notice=True)

        host, port = node1.network_interfaces['thrift']
        client = get_thrift_client(host, port)
        client.transport.open()
        client.set_keyspace('test')

        column_parent = ColumnParent(column_family='sc_test')
        predicate = SlicePredicate(slice_range=SliceRange("", "", False, 100))
        super_columns = client.get_slice('Counter1', column_parent, predicate,
                                         ThriftConsistencyLevel.QUORUM)
        self.assertEqual(2, len(super_columns))
        for i in range(2):
            super_column = super_columns[i].counter_super_column
            self.assertEqual('sc%d' % i, super_column.name)
            self.assertEqual(2, len(super_column.columns))
            for j in range(2):
                column = super_column.columns[j]
                self.assertEqual('c%d' % j, column.name)
                self.assertEqual(170, column.value)

        # fetch a single supercolumn
        column_parent = ColumnParent(column_family='sc_test',
                                     super_column='sc1')
        columns = client.get_slice('Counter1', column_parent, predicate,
                                   ThriftConsistencyLevel.QUORUM)
        self.assertEqual(2, len(columns))
        for j in range(2):
            column = columns[j].counter_column
            self.assertEqual('c%d' % j, column.name)
            self.assertEqual(170, column.value)

        # fetch a single subcolumn
        predicate = SlicePredicate(column_names=['c1'])
        columns = client.get_slice('Counter1', column_parent, predicate,
                                   ThriftConsistencyLevel.QUORUM)
        self.assertEqual(1, len(columns))
        column = columns[0].counter_column
        self.assertEqual('c%d' % j, column.name)
        self.assertEqual(170, column.value)
    def do_upgrade(self,
                   session,
                   use_thrift=False,
                   return_nodes=False,
                   **kwargs):
        """
        Upgrades the first node in the cluster and returns a list of
        (is_upgraded, Session) tuples.  If `is_upgraded` is true, the
        Session is connected to the upgraded node. If `return_nodes`
        is True, a tuple of (is_upgraded, Session, Node) will be
        returned instead.
        """
        session.cluster.shutdown()
        node1 = self.cluster.nodelist()[0]
        node2 = self.cluster.nodelist()[1]

        # stop the nodes, this can fail due to https://issues.apache.org/jira/browse/CASSANDRA-8220 on MacOS
        # for the tests that run against 2.0. You will need to run those in Linux.
        node1.drain()
        node1.stop(gently=True)

        # Ignore errors before upgrade on Windows
        # We ignore errors from 2.1, because windows 2.1
        # support is only beta. There are frequent log errors,
        # related to filesystem interactions that are a direct result
        # of the lack of full functionality on 2.1 Windows, and we dont
        # want these to pollute our results.
        if is_win() and self.cluster.version() <= '2.2':
            node1.mark_log_for_errors()

        logger.debug('upgrading node1 to {}'.format(
            self.UPGRADE_PATH.upgrade_version))
        switch_jdks(self.UPGRADE_PATH.upgrade_meta.java_version)

        node1.set_install_dir(version=self.UPGRADE_PATH.upgrade_version)

        # this is a bandaid; after refactoring, upgrades should account for protocol version
        new_version_from_build = get_version_from_build(
            node1.get_install_dir())

        # Check if a since annotation with a max_version was set on this test.
        # The since decorator can only check the starting version of the upgrade,
        # so here we check to new version of the upgrade as well.
        if hasattr(
                self, 'max_version'
        ) and self.max_version is not None and new_version_from_build >= self.max_version:
            pytest.skip(
                "Skipping test, new version {} is equal to or higher than "
                "max version {}".format(new_version_from_build,
                                        self.max_version))

        if (new_version_from_build >= '3' and self.protocol_version is not None
                and self.protocol_version < 3):
            pytest.skip('Protocol version {} incompatible '
                        'with Cassandra version {}'.format(
                            self.protocol_version, new_version_from_build))
        node1.set_log_level(logging.getLevelName(logging.root.level))
        node1.set_configuration_options(
            values={'internode_compression': 'none'})

        if use_thrift and node1.get_cassandra_version() < '4':
            node1.set_configuration_options(values={'start_rpc': 'true'})

        if self.fixture_dtest_setup.enable_for_jolokia:
            remove_perf_disable_shared_mem(node1)

        node1.start(wait_for_binary_proto=True, wait_other_notice=True)

        sessions_and_meta = []
        if self.CL:
            session = self.patient_exclusive_cql_connection(
                node1,
                protocol_version=self.protocol_version,
                consistency_level=self.CL,
                **kwargs)
        else:
            session = self.patient_exclusive_cql_connection(
                node1, protocol_version=self.protocol_version, **kwargs)
        session.set_keyspace('ks')

        if return_nodes:
            sessions_and_meta.append((True, session, node1))
        else:
            sessions_and_meta.append((True, session))

        # open a second session with the node on the old version
        if self.CL:
            session = self.patient_exclusive_cql_connection(
                node2,
                protocol_version=self.protocol_version,
                consistency_level=self.CL,
                **kwargs)
        else:
            session = self.patient_exclusive_cql_connection(
                node2, protocol_version=self.protocol_version, **kwargs)
        session.set_keyspace('ks')

        if return_nodes:
            sessions_and_meta.append((False, session, node2))
        else:
            sessions_and_meta.append((False, session))

        # Let the nodes settle briefly before yielding connections in turn (on the upgraded and non-upgraded alike)
        # CASSANDRA-11396 was the impetus for this change, wherein some apparent perf noise was preventing
        # CL.ALL from being reached. The newly upgraded node needs to settle because it has just barely started, and each
        # non-upgraded node needs a chance to settle as well, because the entire cluster (or isolated nodes) may have been doing resource intensive activities
        # immediately before.
        for s in sessions_and_meta:
            time.sleep(5)
            yield s
Example #50
0
 def run(self):
     print_(common.get_version_from_build(self.node.get_install_dir()))
Example #51
0
    def run(self, argv):
        parser = argparse.ArgumentParser(
            formatter_class=lambda prog: argparse.
            ArgumentDefaultsHelpFormatter(
                prog, max_help_position=100, width=200))

        # this is a bit ugly: all of our command line arguments are added and configured as part
        # of pytest. however, we also have this wrapper script to make it easier for those who
        # aren't comfortable calling pytest directly. To avoid duplicating code (e.g. have the options
        # in two separate places) we directly use the pytest_addoption fixture from conftest.py. Unfortunately,
        # pytest wraps ArgumentParser, so, first we add the options to a pytest Parser, and then we pull
        # all of those custom options out and add them to the unwrapped ArgumentParser we want to use
        # here inside of run_dtests.py.
        #
        # So NOTE: to add a command line argument, if you're trying to do so by adding it here, you're doing it wrong!
        # add it to conftest.py:pytest_addoption
        pytest_parser = Parser()
        pytest_addoption(pytest_parser)

        # add all of the options from the pytest Parser we created, and add them into our ArgumentParser instance
        pytest_custom_opts = pytest_parser._anonymous
        for opt in pytest_custom_opts.options:
            parser.add_argument(opt._long_opts[0],
                                action=opt._attrs['action'],
                                default=opt._attrs.get('default', None),
                                help=opt._attrs.get('help', None))

        parser.add_argument(
            "--dtest-enable-debug-logging",
            action="store_true",
            default=False,
            help=
            "Enable debug logging (for this script, pytest, and during execution "
            "of test functions)")
        parser.add_argument(
            "--dtest-print-tests-only",
            action="store_true",
            default=False,
            help=
            "Print list of all tests found eligible for execution given the provided options."
        )
        parser.add_argument(
            "--dtest-print-tests-output",
            action="store",
            default=False,
            help=
            "Path to file where the output of --dtest-print-tests-only should be written to"
        )
        parser.add_argument(
            "--pytest-options",
            action="store",
            default=None,
            help=
            "Additional command line arguments to proxy directly thru when invoking pytest."
        )
        parser.add_argument(
            "--dtest-tests",
            action="store",
            default=None,
            help=
            "Comma separated list of test files, test classes, or test methods to execute."
        )

        args = parser.parse_args()

        if not args.dtest_print_tests_only:
            if args.cassandra_dir is None and args.cassandra_version is None:
                raise Exception(
                    "Required dtest arguments were missing! You must provide either --cassandra-dir "
                    "or --cassandra-version. Refer to the documentation or invoke the help with --help."
                )

            # Either cassandra_version or cassandra_dir is defined, so figure out the version
            CASSANDRA_VERSION = args.cassandra_version or get_version_from_build(
                args.cassandra_dir)

            if args.use_off_heap_memtables and ("3.0" <= CASSANDRA_VERSION <
                                                "3.4"):
                raise Exception(
                    "The selected Cassandra version %s doesn't support the provided option "
                    "--use-off-heap-memtables, see https://issues.apache.org/jira/browse/CASSANDRA-9472 "
                    "for details" % CASSANDRA_VERSION)

        if args.dtest_enable_debug_logging:
            logging.root.setLevel(logging.DEBUG)
            logger.setLevel(logging.DEBUG)

        # Get dictionaries corresponding to each point in the configuration matrix
        # we want to run, then generate a config object for each of them.
        logger.debug(
            'Generating configurations from the following matrix:\n\t{}'.
            format(args))

        args_to_invoke_pytest = []

        for arg in argv:
            if arg.startswith("--pytest-options") or arg.startswith(
                    "--dtest-"):
                continue
            args_to_invoke_pytest.append("'{the_arg}'".format(the_arg=arg))

        if args.dtest_print_tests_only:
            args_to_invoke_pytest.append("'--collect-only'")

        if args.dtest_tests:
            for test in args.dtest_tests.split(","):
                args_to_invoke_pytest.append(
                    "'{test_name}'".format(test_name=test))

        original_raw_cmd_args = ", ".join(args_to_invoke_pytest)

        logger.debug("args to call with: [%s]" % original_raw_cmd_args)

        # the original run_dtests.py script did it like this to hack around nosetest
        # limitations -- i'm not sure if they still apply or not in a pytest world
        # but for now just leaving it as is, because it does the job (although
        # certainly is still pretty complicated code and has a hacky feeling)
        to_execute = ("import pytest\n" +
                      ("pytest.main([{options}])\n").format(
                          options=original_raw_cmd_args))
        temp = NamedTemporaryFile(dir=getcwd())
        logger.debug('Writing the following to {}:'.format(temp.name))

        logger.debug('```\n{to_execute}```\n'.format(to_execute=to_execute))
        temp.write(to_execute.encode("utf-8"))
        temp.flush()

        # We pass nose_argv as options to the python call to maintain
        # compatibility with the nosetests command. Arguments passed in via the
        # command line are treated one way, args passed in as
        # nose.main(argv=...) are treated another. Compare with the options
        # -xsv for an example.
        cmd_list = [sys.executable, temp.name]
        logger.debug(
            'subprocess.call-ing {cmd_list}'.format(cmd_list=cmd_list))

        sp = subprocess.Popen(cmd_list,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE,
                              env=os.environ.copy())

        if args.dtest_print_tests_only:
            stdout, stderr = sp.communicate()

            if stderr:
                print(stderr.decode("utf-8"))
                result = sp.returncode
                exit(result)

            all_collected_test_modules = collect_test_modules(stdout)
            joined_test_modules = "\n".join(all_collected_test_modules)
            #print("Collected %d Test Modules" % len(all_collected_test_modules))
            if args.dtest_print_tests_output is not None:
                collected_tests_output_file = open(
                    args.dtest_print_tests_output, "w")
                collected_tests_output_file.write(joined_test_modules)
                collected_tests_output_file.close()

            print(joined_test_modules)
        else:
            while True:
                stdout_output = sp.stdout.readline()
                stdout_output_str = stdout_output.decode("utf-8")
                if stdout_output_str == '' and sp.poll() is not None:
                    break
                if stdout_output_str:
                    print(stdout_output_str.strip())

                stderr_output = sp.stderr.readline()
                stderr_output_str = stderr_output.decode("utf-8")
                if stderr_output_str == '' and sp.poll() is not None:
                    break
                if stderr_output_str:
                    print(stderr_output_str.strip())

        exit(sp.returncode)