Exemple #1
0
def compile_version_elassandra(version, target_dir, verbose=False, elassandra_version=None):
    assert_jdk_valid_for_cassandra_version(get_version_from_build(target_dir))

    # compiling cassandra and the stress tool
    logfile = lastlogfilename()
    logger = get_logger(logfile)

    common.info("Compiling Elassandra {} ...".format(version))
    logger.info("--- Elassandra Build -------------------\n")
    try:
        # Patch for pending Cassandra issue: https://issues.apache.org/jira/browse/CASSANDRA-5543
        # Similar patch seen with buildbot
        attempt = 0
        ret_val = 1
        if elassandra_version is None:
            logger.info("elassandra version is not set, trying with 2.4.2")
            elassandra_version = "2.4.2"
        targz_file = "distribution/tar/target/releases/elassandra-%s.tar.gz" % elassandra_version
        target_install_dir = os.path.join(target_dir, "elassandra-%s")
        while attempt < 3 and ret_val is not 0:
            if attempt > 0:
                logger.info("\n\n`mvn package -DskipTests` failed. Retry #%s...\n\n" % attempt)
            process = subprocess.Popen([platform_binary('mvn'), 'package', '-DskipTests'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            ret_val, _, _ = log_info(process, logger)
            if ret_val is 0:
                process = subprocess.Popen([platform_binary('tar'), '-xzf', targz_file], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                ret_val, _, _ = log_info(process, logger)
            attempt += 1
        if ret_val is not 0:
            raise CCMError('Error compiling Elassandra. See {logfile} or run '
                           '"ccm showlastlog" for details'.format(logfile=logfile))
    except OSError as e:
        raise CCMError("Error compiling Elassandra. Is maven installed? See %s for details" % logfile)
Exemple #2
0
def compile_version(version, target_dir, verbose=False):
    assert_jdk_valid_for_cassandra_version(get_version_from_build(target_dir))

    # compiling cassandra and the stress tool
    logfile = lastlogfilename()
    logger = get_logger(logfile)

    common.info("Compiling Cassandra {} ...".format(version))
    logger.info("--- Cassandra Build -------------------\n")

    default_build_properties = os.path.join(common.get_default_path(), 'build.properties.default')
    if os.path.exists(default_build_properties):
        target_build_properties = os.path.join(target_dir, 'build.properties')
        logger.info("Copying %s to %s\n" % (default_build_properties, target_build_properties))
        shutil.copyfile(default_build_properties, target_build_properties)

    try:
        # Patch for pending Cassandra issue: https://issues.apache.org/jira/browse/CASSANDRA-5543
        # Similar patch seen with buildbot
        attempt = 0
        ret_val = 1
        while attempt < 3 and ret_val is not 0:
            if attempt > 0:
                logger.info("\n\n`ant jar` failed. Retry #%s...\n\n" % attempt)
            process = subprocess.Popen([platform_binary('ant'), 'jar'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            ret_val, _, _ = log_info(process, logger)
            attempt += 1
        if ret_val is not 0:
            raise CCMError('Error compiling Cassandra. See {logfile} or run '
                           '"ccm showlastlog" for details'.format(logfile=logfile))
    except OSError as e:
        raise CCMError("Error compiling Cassandra. Is ant installed? See %s for details" % logfile)

    logger.info("\n\n--- cassandra/stress build ------------\n")
    stress_dir = os.path.join(target_dir, "tools", "stress") if (
        version >= "0.8.0") else \
        os.path.join(target_dir, "contrib", "stress")

    build_xml = os.path.join(stress_dir, 'build.xml')
    if os.path.exists(build_xml):  # building stress separately is only necessary pre-1.1
        try:
            # set permissions correctly, seems to not always be the case
            stress_bin_dir = os.path.join(stress_dir, 'bin')
            for f in os.listdir(stress_bin_dir):
                full_path = os.path.join(stress_bin_dir, f)
                os.chmod(full_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)

            process = subprocess.Popen([platform_binary('ant'), 'build'], cwd=stress_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            ret_val, _, _ = log_info(process, logger)
            if ret_val is not 0:
                process = subprocess.Popen([platform_binary('ant'), 'stress-build'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                ret_val, _, _ = log_info(process, logger)
                if ret_val is not 0:
                    raise CCMError("Error compiling Cassandra stress tool.  "
                                   "See %s for details (you will still be able to use ccm "
                                   "but not the stress related commands)" % logfile)
        except IOError as e:
            raise CCMError("Error compiling Cassandra stress tool: %s (you will "
                           "still be able to use ccm but not the stress related commands)" % str(e))
Exemple #3
0
def compile_version(version, target_dir, verbose=False):
    assert_jdk_valid_for_cassandra_version(get_version_from_build(target_dir))

    # compiling cassandra and the stress tool
    logfile = lastlogfilename()
    logger = get_logger(logfile)

    common.info("Compiling Cassandra {} ...".format(version))
    logger.info("--- Cassandra Build -------------------\n")
    try:
        # Patch for pending Cassandra issue: https://issues.apache.org/jira/browse/CASSANDRA-5543
        # Similar patch seen with buildbot
        attempt = 0
        ret_val = 1
        while attempt < 3 and ret_val is not 0:
            if attempt > 0:
                logger.info("\n\n`ant jar` failed. Retry #%s...\n\n" % attempt)
            process = subprocess.Popen([platform_binary('ant'), 'jar'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            ret_val, _, _ = log_info(process, logger)
            attempt += 1
        if ret_val is not 0:
            raise CCMError('Error compiling Cassandra. See {logfile} or run '
                           '"ccm showlastlog" for details'.format(logfile=logfile))
    except OSError as e:
        raise CCMError("Error compiling Cassandra. Is ant installed? See %s for details" % logfile)

    logger.info("\n\n--- cassandra/stress build ------------\n")
    stress_dir = os.path.join(target_dir, "tools", "stress") if (
        version >= "0.8.0") else \
        os.path.join(target_dir, "contrib", "stress")

    build_xml = os.path.join(stress_dir, 'build.xml')
    if os.path.exists(build_xml):  # building stress separately is only necessary pre-1.1
        try:
            # set permissions correctly, seems to not always be the case
            stress_bin_dir = os.path.join(stress_dir, 'bin')
            for f in os.listdir(stress_bin_dir):
                full_path = os.path.join(stress_bin_dir, f)
                os.chmod(full_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)

            process = subprocess.Popen([platform_binary('ant'), 'build'], cwd=stress_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            ret_val, _, _ = log_info(process, logger)
            if ret_val is not 0:
                process = subprocess.Popen([platform_binary('ant'), 'stress-build'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                ret_val, _, _ = log_info(process, logger)
                if ret_val is not 0:
                    raise CCMError("Error compiling Cassandra stress tool.  "
                                   "See %s for details (you will still be able to use ccm "
                                   "but not the stress related commands)" % logfile)
        except IOError as e:
            raise CCMError("Error compiling Cassandra stress tool: %s (you will "
                           "still be able to use ccm but not the stress related commands)" % str(e))
Exemple #4
0
 def _start_agent(self):
     agent_dir = os.path.join(self.get_path(), 'datastax-agent')
     if os.path.exists(agent_dir):
         self._write_agent_address_yaml(agent_dir)
         self._write_agent_log4j_properties(agent_dir)
         args = [os.path.join(agent_dir, 'bin', common.platform_binary('datastax-agent'))]
         subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
Exemple #5
0
 def run_cli(self, cmds=None, show_output=False, cli_options=[]):
     cdir = self.get_cassandra_dir()
     cli = common.join_bin(cdir, 'bin', 'cassandra-cli')
     env = common.make_cassandra_env(cdir, self.get_path())
     host = self.network_interfaces['thrift'][0]
     port = self.network_interfaces['thrift'][1]
     args = [ '-h', host, '-p', str(port) , '--jmxport', str(self.jmx_port) ] + cli_options
     sys.stdout.flush()
     if cmds is None:
         os.execve(cli, [ common.platform_binary('cassandra-cli') ] + args, env)
     else:
         p = subprocess.Popen([ cli ] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
         for cmd in cmds.split(';'):
             p.stdin.write(cmd + ';\n')
         p.stdin.write("quit;\n")
         p.wait()
         for err in p.stderr:
             print_("(EE) ", err, end='')
         if show_output:
             i = 0
             for log in p.stdout:
                 # first four lines are not interesting
                 if i >= 4:
                     print_(log, end='')
                 i = i + 1
Exemple #6
0
 def run_cqlsh(self, cmds=None, show_output=False, cqlsh_options=[]):
     cdir = self.get_cassandra_dir()
     cli = common.join_bin(cdir, "bin", "cqlsh")
     env = common.make_cassandra_env(cdir, self.get_path())
     host = self.network_interfaces["thrift"][0]
     if self.cluster.version() >= "2.1":
         port = self.network_interfaces["binary"][1]
     else:
         port = self.network_interfaces["thrift"][1]
     args = cqlsh_options + [host, str(port)]
     sys.stdout.flush()
     if cmds is None:
         os.execve(cli, [common.platform_binary("cqlsh")] + args, env)
     else:
         p = subprocess.Popen(
             [cli] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE
         )
         for cmd in cmds.split(";"):
             p.stdin.write(cmd + ";\n")
         p.stdin.write("quit;\n")
         p.wait()
         for err in p.stderr:
             print_("(EE) ", err, end="")
         if show_output:
             i = 0
             for log in p.stdout:
                 # first four lines are not interesting
                 if i >= 4:
                     print_(log, end="")
                 i = i + 1
Exemple #7
0
 def _start_agent(self):
     agent_dir = os.path.join(self.get_path(), 'datastax-agent')
     if os.path.exists(agent_dir):
         self._write_agent_address_yaml(agent_dir)
         self._write_agent_log4j_properties(agent_dir)
         args = [os.path.join(agent_dir, 'bin', common.platform_binary('datastax-agent'))]
         subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
Exemple #8
0
 def load_sstables_from_another_node(self, cluster, node_from, node_to, ks):
     cdir = node_to.get_install_dir()
     sstableloader = os.path.join(
         cdir, 'bin', ccmcommon.platform_binary('sstableloader'))
     env = ccmcommon.make_cassandra_env(cdir, node_to.get_path())
     host = node_to.address()
     ret = []
     for x in range(cluster.data_dir_count):
         sstable_dir = os.path.join(node_from.get_path(), 'data' + str(x),
                                    ks.strip('"'))
         for cf_dir in os.listdir(sstable_dir):
             full_cf_dir = os.path.join(sstable_dir, cf_dir)
             if os.path.isdir(full_cf_dir):
                 cmd_args = [
                     sstableloader, '--verbose', '--nodes', host,
                     full_cf_dir
                 ]
                 p = subprocess.Popen(cmd_args,
                                      stderr=subprocess.PIPE,
                                      stdout=subprocess.PIPE,
                                      env=env)
                 stdout, stderr = p.communicate()
                 exit_status = p.returncode
                 ret.append((exit_status, stdout.decode("utf-8"),
                             stderr.decode("utf-8")))
     return ret
Exemple #9
0
 def run_cqlsh(self, cmds=None, show_output=False, cqlsh_options=[]):
     cdir = self.get_cassandra_dir()
     cli = common.join_bin(cdir, 'bin', 'cqlsh')
     env = common.make_cassandra_env(cdir, self.get_path())
     host = self.network_interfaces['thrift'][0]
     if self.cluster.version() >= "2.1":
         port = self.network_interfaces['binary'][1]
     else:
         port = self.network_interfaces['thrift'][1]
     args = cqlsh_options + [ host, str(port) ]
     sys.stdout.flush()
     if cmds is None:
         os.execve(cli, [ common.platform_binary('cqlsh') ] + args, env)
     else:
         p = subprocess.Popen([ cli ] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
         for cmd in cmds.split(';'):
             p.stdin.write(cmd + ';\n')
         p.stdin.write("quit;\n")
         p.wait()
         for err in p.stderr:
             print_("(EE) ", err, end='')
         if show_output:
             i = 0
             for log in p.stdout:
                 # first four lines are not interesting
                 if i >= 4:
                     print_(log, end='')
                 i = i + 1
 def load_sstables(self, cluster, node, ks):
     cdir = node.get_install_dir()
     sstableloader = os.path.join(
         cdir, 'bin', ccmcommon.platform_binary('sstableloader'))
     env = ccmcommon.make_cassandra_env(cdir, node.get_path())
     host = node.address()
     for x in xrange(0, cluster.data_dir_count):
         sstablecopy_dir = os.path.join(node.get_path(),
                                        'data{0}_copy'.format(x),
                                        ks.strip('"'))
         for cf_dir in os.listdir(sstablecopy_dir):
             full_cf_dir = os.path.join(sstablecopy_dir, cf_dir)
             if os.path.isdir(full_cf_dir):
                 cmd_args = [sstableloader, '--nodes', host, full_cf_dir]
                 p = subprocess.Popen(cmd_args,
                                      stderr=subprocess.PIPE,
                                      stdout=subprocess.PIPE,
                                      env=env)
                 exit_status = p.wait()
                 debug('stdout: {out}'.format(out=p.stdout))
                 debug('stderr: {err}'.format(err=p.stderr))
                 self.assertEqual(
                     0, exit_status,
                     "sstableloader exited with a non-zero status: {}".
                     format(exit_status))
Exemple #11
0
def compile_version(version, target_dir, verbose=False):
    # compiling cassandra and the stress tool
    logfile = os.path.join(__get_dir(), "last.log")
    if verbose:
        print_("Compiling Cassandra %s ..." % version)
    with open(logfile, 'w') as lf:
        lf.write("--- Cassandra Build -------------------\n")
        try:
            # Patch for pending Cassandra issue: https://issues.apache.org/jira/browse/CASSANDRA-5543
            # Similar patch seen with buildbot
            attempt = 0
            ret_val = 1
            while attempt < 3 and ret_val is not 0:
                if attempt > 0:
                    lf.write("\n\n`ant jar` failed. Retry #%s...\n\n" % attempt)
                ret_val = subprocess.call([platform_binary('ant'),'jar'], cwd=target_dir, stdout=lf, stderr=lf)
                attempt += 1
            if ret_val is not 0:
                raise CCMError("Error compiling Cassandra. See %s for details" % logfile)
        except OSError as e:
            raise CCMError("Error compiling Cassandra. Is ant installed? See %s for details" % logfile)

        lf.write("\n\n--- cassandra/stress build ------------\n")
        stress_dir = os.path.join(target_dir, "tools", "stress") if (
                version >= "0.8.0") else \
                os.path.join(target_dir, "contrib", "stress")

        build_xml = os.path.join(stress_dir, 'build.xml')
        if os.path.exists(build_xml): # building stress separately is only necessary pre-1.1
            try:
                # set permissions correctly, seems to not always be the case
                stress_bin_dir = os.path.join(stress_dir, 'bin')
                for f in os.listdir(stress_bin_dir):
                    full_path = os.path.join(stress_bin_dir, f)
                    os.chmod(full_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)

                if subprocess.call([platform_binary('ant'), 'build'], cwd=stress_dir, stdout=lf, stderr=lf) is not 0:
                    if subprocess.call([platform_binary('ant'), 'stress-build'], cwd=target_dir, stdout=lf, stderr=lf) is not 0:
                        raise CCMError("Error compiling Cassandra stress tool.  "
                                "See %s for details (you will still be able to use ccm "
                                "but not the stress related commands)" % logfile)
            except IOError as e:
                raise CCMError("Error compiling Cassandra stress tool: %s (you will "
                "still be able to use ccm but not the stress related commands)" % str(e))
Exemple #12
0
 def start_opscenter(self):
     if self.hasOpscenter():
         self.write_opscenter_cluster_config()
         args = [
             os.path.join(self.get_path(), 'opscenter', 'bin',
                          common.platform_binary('opscenter'))
         ]
         subprocess.Popen(args,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.PIPE)
Exemple #13
0
    def load(self, options):
        for itf in self.network_interfaces.values():
            if itf:
                common.check_socket_available(itf)

        cdir = self.get_cassandra_dir()
        loader_bin = common.join_bin(cdir, 'bin', 'sstableloader')
        env = common.make_cassandra_env(cdir, self.get_path())
        if not "-d" in options:
            l = [ node.network_interfaces['storage'][0] for node in self.cluster.nodes.values() if node.is_live() ]
            options = [ "-d",  ",".join(l) ] + options
        #print "Executing with", options
        os.execve(loader_bin, [ common.platform_binary('sstableloader') ] + options, env)
Exemple #14
0
    def load(self, options):
        for itf in self.network_interfaces.values():
            if itf:
                common.check_socket_available(itf)

        cdir = self.get_cassandra_dir()
        loader_bin = common.join_bin(cdir, "bin", "sstableloader")
        env = common.make_cassandra_env(cdir, self.get_path())
        if not "-d" in options:
            l = [node.network_interfaces["storage"][0] for node in self.cluster.nodes.values() if node.is_live()]
            options = ["-d", ",".join(l)] + options
        # print "Executing with", options
        os.execve(loader_bin, [common.platform_binary("sstableloader")] + options, env)
 def load_sstables(self, cluster, node, ks):
     cdir = node.get_install_dir()
     sstableloader = os.path.join(cdir, 'bin', ccmcommon.platform_binary('sstableloader'))
     env = ccmcommon.make_cassandra_env(cdir, node.get_path())
     host = node.address()
     for x in xrange(0, cluster.data_dir_count):
         sstablecopy_dir = os.path.join(node.get_path(), 'data{0}_copy'.format(x), ks.strip('"'))
         for cf_dir in os.listdir(sstablecopy_dir):
             full_cf_dir = os.path.join(sstablecopy_dir, cf_dir)
             if os.path.isdir(full_cf_dir):
                 cmd_args = [sstableloader, '--nodes', host, full_cf_dir]
                 p = subprocess.Popen(cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
                 exit_status = p.wait()
                 debug('stdout: {out}'.format(out=p.stdout))
                 debug('stderr: {err}'.format(err=p.stderr))
                 self.assertEqual(0, exit_status,
                                  "sstableloader exited with a non-zero status: {}".format(exit_status))
 def run_cqlsh(self, node, cmds, cqlsh_options=[]):
     cdir = node.get_install_dir()
     cli = os.path.join(cdir, 'bin', common.platform_binary('cqlsh'))
     env = common.make_cassandra_env(cdir, node.get_path())
     env['LANG'] = 'en_US.UTF-8'
     if LooseVersion(self.cluster.version()) >= LooseVersion('2.1'):
         host = node.network_interfaces['binary'][0]
         port = node.network_interfaces['binary'][1]
     else:
         host = node.network_interfaces['thrift'][0]
         port = node.network_interfaces['thrift'][1]
     args = cqlsh_options + [ host, str(port) ]
     sys.stdout.flush()
     p = subprocess.Popen([ cli ] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
     for cmd in cmds.split(';'):
         p.stdin.write(cmd + ';\n')
     p.stdin.write("quit;\n")
     return p.communicate()
Exemple #17
0
 def run_cqlsh(self, node, cmds, cqlsh_options=[]):
     cdir = node.get_install_dir()
     cli = os.path.join(cdir, 'bin', common.platform_binary('cqlsh'))
     env = common.make_cassandra_env(cdir, node.get_path())
     env['LANG'] = 'en_US.UTF-8'
     if LooseVersion(self.cluster.version()) >= LooseVersion('2.1'):
         host = node.network_interfaces['binary'][0]
         port = node.network_interfaces['binary'][1]
     else:
         host = node.network_interfaces['thrift'][0]
         port = node.network_interfaces['thrift'][1]
     args = cqlsh_options + [host, str(port)]
     sys.stdout.flush()
     p = subprocess.Popen([cli] + args,
                          env=env,
                          stdin=subprocess.PIPE,
                          stderr=subprocess.PIPE,
                          stdout=subprocess.PIPE)
     for cmd in cmds.split(';'):
         p.stdin.write(cmd + ';\n')
     p.stdin.write("quit;\n")
     return p.communicate()
Exemple #18
0
def clone_development(version, verbose=False):
    local_git_cache = os.path.join(__get_dir(), '_git_cache')
    target_dir = os.path.join(__get_dir(), version.replace(':', '_')) # handle git branches like 'git:trunk'.
    git_branch = version[4:] # the part of the version after the 'git:'
    logfile = os.path.join(__get_dir(), "last.log")
    with open(logfile, 'w') as lf:
        try:
            #Checkout/fetch a local repository cache to reduce the number of
            #remote fetches we need to perform:
            if not os.path.exists(local_git_cache):
                if verbose:
                    print_("Cloning Cassandra...")
                out = subprocess.call(
                    ['git', 'clone', '--mirror', GIT_REPO, local_git_cache],
                    cwd=__get_dir(), stdout=lf, stderr=lf)
                assert out == 0, "Could not do a git clone"
            else:
                if verbose:
                    print_("Fetching Cassandra updates...")
                out = subprocess.call(
                    ['git', 'fetch', '-fup', 'origin', '+refs/*:refs/*'],
                    cwd=local_git_cache, stdout=lf, stderr=lf)

            #Checkout the version we want from the local cache:
            if not os.path.exists(target_dir):
                # development branch doesn't exist. Check it out.
                if verbose:
                    print_("Cloning Cassandra (from local cache)")

                # git on cygwin appears to be adding `cwd` to the commands which is breaking clone
                if sys.platform == "cygwin":
                    local_split = local_git_cache.split(os.sep)
                    target_split = target_dir.split(os.sep)
                    subprocess.call(['git', 'clone', local_split[-1], target_split[-1]], cwd=__get_dir(), stdout=lf, stderr=lf)
                else:
                    subprocess.call(['git', 'clone', local_git_cache, target_dir], cwd=__get_dir(), stdout=lf, stderr=lf)

                # now check out the right version
                if verbose:
                    print_("Checking out requested branch (%s)" % git_branch)
                out = subprocess.call(['git', 'checkout', git_branch], cwd=target_dir, stdout=lf, stderr=lf)
                if int(out) != 0:
                    raise CCMError("Could not check out git branch %s. Is this a valid branch name? (see last.log for details)" % git_branch)
                # now compile
                compile_version(git_branch, target_dir, verbose)
            else: # branch is already checked out. See if it is behind and recompile if needed.
                out = subprocess.call(['git', 'fetch', 'origin'], cwd=target_dir, stdout=lf, stderr=lf)
                assert out == 0, "Could not do a git fetch"
                status = subprocess.Popen(['git', 'status', '-sb'], cwd=target_dir, stdout=subprocess.PIPE, stderr=lf).communicate()[0]
                if status.find('[behind') > -1:
                    if verbose:
                        print_("Branch is behind, recompiling")
                    out = subprocess.call(['git', 'pull'], cwd=target_dir, stdout=lf, stderr=lf)
                    assert out == 0, "Could not do a git pull"
                    out = subprocess.call([platform_binary('ant'), 'realclean'], cwd=target_dir, stdout=lf, stderr=lf)
                    assert out == 0, "Could not run 'ant realclean'"

                    # now compile
                    compile_version(git_branch, target_dir, verbose)
        except:
            # wipe out the directory if anything goes wrong. Otherwise we will assume it has been compiled the next time it runs.
            try:
                shutil.rmtree(target_dir)
            except: pass
            raise
Exemple #19
0
 def start_opscenter(self):
     if self.hasOpscenter():
         self.write_opscenter_cluster_config()
         args = [os.path.join(self.get_path(), 'opscenter', 'bin', common.platform_binary('opscenter'))]
         subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
Exemple #20
0
 def scrub(self, options):
     cdir = self.get_cassandra_dir()
     scrub_bin = common.join_bin(cdir, "bin", "sstablescrub")
     env = common.make_cassandra_env(cdir, self.get_path())
     os.execve(scrub_bin, [common.platform_binary("sstablescrub")] + options, env)
def clone_development(git_repo, version, verbose=False):
    local_git_cache = os.path.join(__get_dir(), '_git_cache')
    target_dir = os.path.join(__get_dir(), version.replace(
        ':', '_'))  # handle git branches like 'git:trunk'.
    git_branch = version[4:]  # the part of the version after the 'git:'
    logfile = os.path.join(__get_dir(), "last.log")
    with open(logfile, 'w') as lf:
        try:
            #Checkout/fetch a local repository cache to reduce the number of
            #remote fetches we need to perform:
            if not os.path.exists(local_git_cache):
                if verbose:
                    print_("Cloning Cassandra...")
                out = subprocess.call(
                    ['git', 'clone', '--mirror', git_repo, local_git_cache],
                    cwd=__get_dir(),
                    stdout=lf,
                    stderr=lf)
                assert out == 0, "Could not do a git clone"
            else:
                if verbose:
                    print_("Fetching Cassandra updates...")
                out = subprocess.call(
                    ['git', 'fetch', '-fup', 'origin', '+refs/*:refs/*'],
                    cwd=local_git_cache,
                    stdout=lf,
                    stderr=lf)

            #Checkout the version we want from the local cache:
            if not os.path.exists(target_dir):
                # development branch doesn't exist. Check it out.
                if verbose:
                    print_("Cloning Cassandra (from local cache)")

                # git on cygwin appears to be adding `cwd` to the commands which is breaking clone
                if sys.platform == "cygwin":
                    local_split = local_git_cache.split(os.sep)
                    target_split = target_dir.split(os.sep)
                    subprocess.call(
                        ['git', 'clone', local_split[-1], target_split[-1]],
                        cwd=__get_dir(),
                        stdout=lf,
                        stderr=lf)
                else:
                    subprocess.call(
                        ['git', 'clone', local_git_cache, target_dir],
                        cwd=__get_dir(),
                        stdout=lf,
                        stderr=lf)

                # now check out the right version
                if verbose:
                    print_("Checking out requested branch (%s)" % git_branch)
                out = subprocess.call(['git', 'checkout', git_branch],
                                      cwd=target_dir,
                                      stdout=lf,
                                      stderr=lf)
                if int(out) != 0:
                    raise CCMError(
                        "Could not check out git branch %s. Is this a valid branch name? (see last.log for details)"
                        % git_branch)
                # now compile
                compile_version(git_branch, target_dir, verbose)
            else:  # branch is already checked out. See if it is behind and recompile if needed.
                out = subprocess.call(['git', 'fetch', 'origin'],
                                      cwd=target_dir,
                                      stdout=lf,
                                      stderr=lf)
                assert out == 0, "Could not do a git fetch"
                status = subprocess.Popen(['git', 'status', '-sb'],
                                          cwd=target_dir,
                                          stdout=subprocess.PIPE,
                                          stderr=lf).communicate()[0]
                if str(status).find('[behind') > -1:
                    if verbose:
                        print_("Branch is behind, recompiling")
                    out = subprocess.call(['git', 'pull'],
                                          cwd=target_dir,
                                          stdout=lf,
                                          stderr=lf)
                    assert out == 0, "Could not do a git pull"
                    out = subprocess.call(
                        [platform_binary('ant'), 'realclean'],
                        cwd=target_dir,
                        stdout=lf,
                        stderr=lf)
                    assert out == 0, "Could not run 'ant realclean'"

                    # now compile
                    compile_version(git_branch, target_dir, verbose)
        except:
            # wipe out the directory if anything goes wrong. Otherwise we will assume it has been compiled the next time it runs.
            try:
                shutil.rmtree(target_dir)
                print_("Deleted %s due to error" % target_dir)
            except:
                raise CCMError(
                    "Building C* version %s failed. Attempted to delete %s but failed. This will need to be manually deleted"
                    % (version, target_dir))
            raise
Exemple #22
0
def clone_development(git_repo, version, verbose=False):
    print_(git_repo, version)
    target_dir = directory_name(version)
    assert target_dir
    if "github" in version:
        git_repo_name, git_branch = github_username_and_branch_name(version)
    else:
        git_repo_name = "apache"
        git_branch = version.split(":", 1)[1]
    local_git_cache = os.path.join(__get_dir(), "_git_cache_" + git_repo_name)
    logfile = lastlogfilename()
    with open(logfile, "w") as lf:
        try:
            # Checkout/fetch a local repository cache to reduce the number of
            # remote fetches we need to perform:
            if not os.path.exists(local_git_cache):
                if verbose:
                    print_("Cloning Cassandra...")
                out = subprocess.call(
                    ["git", "clone", "--mirror", git_repo, local_git_cache], cwd=__get_dir(), stdout=lf, stderr=lf
                )
                assert out == 0, "Could not do a git clone"
            else:
                if verbose:
                    print_("Fetching Cassandra updates...")
                out = subprocess.call(
                    ["git", "fetch", "-fup", "origin", "+refs/*:refs/*"], cwd=local_git_cache, stdout=lf, stderr=lf
                )

            # Checkout the version we want from the local cache:
            if not os.path.exists(target_dir):
                # development branch doesn't exist. Check it out.
                if verbose:
                    print_("Cloning Cassandra (from local cache)")

                # git on cygwin appears to be adding `cwd` to the commands which is breaking clone
                if sys.platform == "cygwin":
                    local_split = local_git_cache.split(os.sep)
                    target_split = target_dir.split(os.sep)
                    subprocess.call(
                        ["git", "clone", local_split[-1], target_split[-1]], cwd=__get_dir(), stdout=lf, stderr=lf
                    )
                else:
                    subprocess.call(
                        ["git", "clone", local_git_cache, target_dir], cwd=__get_dir(), stdout=lf, stderr=lf
                    )

                # determine if the request is for a branch
                is_branch = False
                try:
                    branch_listing = subprocess.check_output(["git", "branch", "--all"], cwd=target_dir).decode("utf-8")
                    branches = [b.strip() for b in branch_listing.replace("remotes/origin/", "").split()]
                    is_branch = git_branch in branches
                except subprocess.CalledProcessError as cpe:
                    print_("Error Running Branch Filter: {}\nAssumming request is not for a branch".format(cpe.output))

                # now check out the right version
                if verbose:
                    branch_or_sha_tag = "branch" if is_branch else "SHA/tag"
                    print_("Checking out requested {} ({})".format(branch_or_sha_tag, git_branch))
                if is_branch:
                    # we use checkout -B with --track so we can specify that we want to track a specific branch
                    # otherwise, you get errors on branch names that are also valid SHAs or SHA shortcuts, like 10360
                    # we use -B instead of -b so we reset branches that already exist and create a new one otherwise
                    out = subprocess.call(
                        [
                            "git",
                            "checkout",
                            "-B",
                            git_branch,
                            "--track",
                            "origin/{git_branch}".format(git_branch=git_branch),
                        ],
                        cwd=target_dir,
                        stdout=lf,
                        stderr=lf,
                    )
                else:
                    out = subprocess.call(["git", "checkout", git_branch], cwd=target_dir, stdout=lf, stderr=lf)
                if int(out) != 0:
                    raise CCMError(
                        "Could not check out git branch {branch}. "
                        "Is this a valid branch name? (see {lastlog} or run "
                        '"ccm showlastlog" for details)'.format(branch=git_branch, lastlog=logfile)
                    )
                # now compile
                compile_version(git_branch, target_dir, verbose)
            else:  # branch is already checked out. See if it is behind and recompile if needed.
                out = subprocess.call(["git", "fetch", "origin"], cwd=target_dir, stdout=lf, stderr=lf)
                assert out == 0, "Could not do a git fetch"
                status = subprocess.Popen(
                    ["git", "status", "-sb"], cwd=target_dir, stdout=subprocess.PIPE, stderr=lf
                ).communicate()[0]
                if str(status).find("[behind") > -1:
                    if verbose:
                        print_("Branch is behind, recompiling")
                    out = subprocess.call(["git", "pull"], cwd=target_dir, stdout=lf, stderr=lf)
                    assert out == 0, "Could not do a git pull"
                    out = subprocess.call([platform_binary("ant"), "realclean"], cwd=target_dir, stdout=lf, stderr=lf)
                    assert out == 0, "Could not run 'ant realclean'"

                    # now compile
                    compile_version(git_branch, target_dir, verbose)
        except:
            # wipe out the directory if anything goes wrong. Otherwise we will assume it has been compiled the next time it runs.
            try:
                rmdirs(target_dir)
                print_("Deleted %s due to error" % target_dir)
            except:
                raise CCMError(
                    "Building C* version %s failed. Attempted to delete %s but failed. This will need to be manually deleted"
                    % (version, target_dir)
                )
            raise
Exemple #23
0
def clone_development(git_repo, version, verbose=False):
    print_(git_repo, version)
    target_dir = directory_name(version)
    assert target_dir
    if 'github' in version:
        git_repo_name, git_branch = github_username_and_branch_name(version)
    else:
        git_repo_name = 'apache'
        git_branch = version.split(':', 1)[1]
    local_git_cache = os.path.join(__get_dir(), '_git_cache_' + git_repo_name)
    logfile = os.path.join(__get_dir(), "last.log")
    with open(logfile, 'w') as lf:
        try:
            #Checkout/fetch a local repository cache to reduce the number of
            #remote fetches we need to perform:
            if not os.path.exists(local_git_cache):
                if verbose:
                    print_("Cloning Cassandra...")
                out = subprocess.call(
                    ['git', 'clone', '--mirror', git_repo, local_git_cache],
                    cwd=__get_dir(), stdout=lf, stderr=lf)
                assert out == 0, "Could not do a git clone"
            else:
                if verbose:
                    print_("Fetching Cassandra updates...")
                out = subprocess.call(
                    ['git', 'fetch', '-fup', 'origin', '+refs/*:refs/*'],
                    cwd=local_git_cache, stdout=lf, stderr=lf)

            #Checkout the version we want from the local cache:
            if not os.path.exists(target_dir):
                # development branch doesn't exist. Check it out.
                if verbose:
                    print_("Cloning Cassandra (from local cache)")

                # git on cygwin appears to be adding `cwd` to the commands which is breaking clone
                if sys.platform == "cygwin":
                    local_split = local_git_cache.split(os.sep)
                    target_split = target_dir.split(os.sep)
                    subprocess.call(['git', 'clone', local_split[-1], target_split[-1]], cwd=__get_dir(), stdout=lf, stderr=lf)
                else:
                    subprocess.call(['git', 'clone', local_git_cache, target_dir], cwd=__get_dir(), stdout=lf, stderr=lf)

                # now check out the right version
                if verbose:
                    print_("Checking out requested branch (%s)" % git_branch)
                out = subprocess.call(['git', 'checkout', git_branch], cwd=target_dir, stdout=lf, stderr=lf)
                if int(out) != 0:
                    raise CCMError("Could not check out git branch %s. Is this a valid branch name? (see last.log for details)" % git_branch)
                # now compile
                compile_version(git_branch, target_dir, verbose)
            else: # branch is already checked out. See if it is behind and recompile if needed.
                out = subprocess.call(['git', 'fetch', 'origin'], cwd=target_dir, stdout=lf, stderr=lf)
                assert out == 0, "Could not do a git fetch"
                status = subprocess.Popen(['git', 'status', '-sb'], cwd=target_dir, stdout=subprocess.PIPE, stderr=lf).communicate()[0]
                if str(status).find('[behind') > -1:
                    if verbose:
                        print_("Branch is behind, recompiling")
                    out = subprocess.call(['git', 'pull'], cwd=target_dir, stdout=lf, stderr=lf)
                    assert out == 0, "Could not do a git pull"
                    out = subprocess.call([platform_binary('ant'), 'realclean'], cwd=target_dir, stdout=lf, stderr=lf)
                    assert out == 0, "Could not run 'ant realclean'"

                    # now compile
                    compile_version(git_branch, target_dir, verbose)
        except:
            # wipe out the directory if anything goes wrong. Otherwise we will assume it has been compiled the next time it runs.
            try:
                rmdirs(target_dir)
                print_("Deleted %s due to error" % target_dir)
            except:
                raise CCMError("Building C* version %s failed. Attempted to delete %s but failed. This will need to be manually deleted" % (version, target_dir))
            raise
Exemple #24
0
def clone_development(git_repo, version, verbose=False, alias=False, elassandra_version=None):
    print_(git_repo, version)
    target_dir = directory_name(version)
    assert target_dir
    if 'github' in version:
        git_repo_name, git_branch = github_username_and_branch_name(version)
    elif 'local:' in version:
        git_repo_name = 'local_{}'.format(git_repo)  # add git repo location to distinguish cache location for differing repos
        git_branch = version.split(':')[-1]  # last token on 'local:...' slugs should always be branch name
    elif alias:
        git_repo_name = 'alias_{}'.format(version.split('/')[0].split(':')[-1])
        git_branch = version.split('/')[-1]
    else:
        git_repo_name = 'strapdata'
        git_branch = version.split(':', 1)[1]
    local_git_cache = os.path.join(__get_dir(), '_git_cache_' + git_repo_name)

    logfile = lastlogfilename()
    logger = get_logger(logfile)

    try:
        # Checkout/fetch a local repository cache to reduce the number of
        # remote fetches we need to perform:
        if not os.path.exists(local_git_cache):
            common.info("Cloning Elassandra...")
            process = subprocess.Popen(
                ['git', 'clone', '--mirror', git_repo, local_git_cache],
                cwd=__get_dir(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            out, _, _ = log_info(process, logger)
            assert out == 0, "Could not do a git clone"
        else:
            common.info("Fetching Elassandra updates...")
            process = subprocess.Popen(
                ['git', 'fetch', '-fup', 'origin', '+refs/*:refs/*'],
                cwd=local_git_cache, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            out, _, _ = log_info(process, logger)
            assert out == 0, "Could not update git"

        # Checkout the version we want from the local cache:
        if not os.path.exists(target_dir):
            # development branch doesn't exist. Check it out.
            common.info("Cloning Elassandra (from local cache)")

            # git on cygwin appears to be adding `cwd` to the commands which is breaking clone
            if sys.platform == "cygwin":
                local_split = local_git_cache.split(os.sep)
                target_split = target_dir.split(os.sep)
                process = subprocess.Popen(
                    ['git', 'clone', local_split[-1], target_split[-1]],
                    cwd=__get_dir(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                out, _, _ = log_info(process, logger)
                assert out == 0, "Could not do a git clone"
            else:
                process = subprocess.Popen(
                    ['git', 'clone', local_git_cache, target_dir],
                    cwd=__get_dir(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                out, _, _ = log_info(process, logger)
                assert out == 0, "Could not do a git clone"

            # determine if the request is for a branch
            is_branch = False
            try:
                branch_listing = subprocess.check_output(['git', 'branch', '--all'], cwd=target_dir).decode('utf-8')
                branches = [b.strip() for b in branch_listing.replace('remotes/origin/', '').split()]
                is_branch = git_branch in branches
            except subprocess.CalledProcessError as cpe:
                common.error("Error Running Branch Filter: {}\nAssumming request is not for a branch".format(cpe.output))

            # now check out the right version
            branch_or_sha_tag = 'branch' if is_branch else 'SHA/tag'
            common.info("Checking out requested {} ({})".format(branch_or_sha_tag, git_branch))
            if is_branch:
                # we use checkout -B with --track so we can specify that we want to track a specific branch
                # otherwise, you get errors on branch names that are also valid SHAs or SHA shortcuts, like 10360
                # we use -B instead of -b so we reset branches that already exist and create a new one otherwise
                process = subprocess.Popen(['git', 'checkout', '-B', git_branch,
                                            '--track', 'origin/{git_branch}'.format(git_branch=git_branch)],
                                           cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                out, _, _ = log_info(process, logger)
            else:
                process = subprocess.Popen(
                    ['git', 'checkout', git_branch],
                    cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                out, _, _ = log_info(process, logger)
            if int(out) != 0:
                raise CCMError('Could not check out git branch {branch}. '
                               'Is this a valid branch name? (see {lastlog} or run '
                               '"ccm showlastlog" for details)'.format(
                                   branch=git_branch, lastlog=logfile
                               ))
            # now compile
            compile_version(git_branch, target_dir, verbose, elassandra_version=elassandra_version)
        else:  # branch is already checked out. See if it is behind and recompile if needed.
            process = subprocess.Popen(
                ['git', 'fetch', 'origin'],
                cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            out, _, _ = log_info(process, logger)
            assert out == 0, "Could not do a git fetch"
            process = subprocess.Popen(['git', 'status', '-sb'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            _, status, _ = log_info(process, logger)
            if str(status).find('[behind') > -1:  # If `status` looks like '## cassandra-2.2...origin/cassandra-2.2 [behind 9]\n'
                common.info("Branch is behind, recompiling")
                process = subprocess.Popen(['git', 'pull'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                out, _, _ = log_info(process, logger)
                assert out == 0, "Could not do a git pull"
                process = subprocess.Popen([platform_binary('ant'), 'realclean'], cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                out, _, _ = log_info(process, logger)
                assert out == 0, "Could not run 'ant realclean'"

                # now compile
                compile_version(git_branch, target_dir, verbose, elassandra_version=elassandra_version)
            elif re.search('\[.*?(ahead|behind).*?\]', status.decode("utf-8")) is not None:  # status looks like  '## trunk...origin/trunk [ahead 1, behind 29]\n'
                 # If we have diverged in a way that fast-forward merging cannot solve, raise an exception so the cache is wiped
                common.error("Could not ascertain branch status, please resolve manually.")
                raise Exception
            else:  # status looks like '## cassandra-2.2...origin/cassandra-2.2\n'
                common.debug("Branch up to date, not pulling.")
    except Exception as e:
        # wipe out the directory if anything goes wrong. Otherwise we will assume it has been compiled the next time it runs.
        try:
            rmdirs(target_dir)
            common.error("Deleted {} due to error".format(target_dir))
        except:
            print_('Building C* version {version} failed. Attempted to delete {target_dir}'
                   'but failed. This will need to be manually deleted'.format(
                       version=version,
                       target_dir=target_dir
                   ))
        finally:
            raise e
Exemple #25
0
def compile_version(version, target_dir, verbose=False):
    # compiling cassandra and the stress tool
    logfile = lastlogfilename()
    logger = get_logger(logfile)

    common.info("Compiling Cassandra {} ...".format(version))
    logger.info("--- Cassandra Build -------------------\n")

    env = update_java_version(
        install_dir=target_dir,
        for_build=True,
        info_message='Cassandra {} build'.format(version))

    default_build_properties = os.path.join(common.get_default_path(),
                                            'build.properties.default')
    if os.path.exists(default_build_properties):
        target_build_properties = os.path.join(target_dir, 'build.properties')
        logger.info("Copying %s to %s\n" %
                    (default_build_properties, target_build_properties))
        shutil.copyfile(default_build_properties, target_build_properties)

    try:
        # Patch for pending Cassandra issue: https://issues.apache.org/jira/browse/CASSANDRA-5543
        # Similar patch seen with buildbot
        attempt = 0
        ret_val = 1
        gradlew = os.path.join(target_dir, platform_binary('gradlew'))
        if os.path.exists(gradlew):
            cmd = [gradlew, 'jar']
        else:
            # No gradle, use ant
            cmd = [platform_binary('ant'), 'jar']
            if get_jdk_version_int() >= 11:
                cmd.append('-Duse.jdk11=true')
        while attempt < 3 and ret_val != 0:
            if attempt > 0:
                logger.info("\n\n`{}` failed. Retry #{}...\n\n".format(
                    ' '.join(cmd), attempt))
            process = subprocess.Popen(cmd,
                                       cwd=target_dir,
                                       env=env,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
            ret_val, stdout, stderr = log_info(process, logger)
            attempt += 1
        if ret_val != 0:
            raise CCMError(
                'Error compiling Cassandra. See {logfile} or run '
                '"ccm showlastlog" for details, stdout=\'{stdout}\' stderr=\'{stderr}\''
                .format(logfile=logfile,
                        stdout=stdout.decode(),
                        stderr=stderr.decode()))
    except OSError as e:
        raise CCMError(
            "Error compiling Cassandra. Is ant installed? See %s for details" %
            logfile)

    stress_dir = os.path.join(target_dir, "tools", "stress") if (
        version >= "0.8.0") else \
        os.path.join(target_dir, "contrib", "stress")

    build_xml = os.path.join(stress_dir, 'build.xml')
    if os.path.exists(
            build_xml):  # building stress separately is only necessary pre-1.1
        logger.info("\n\n--- cassandra/stress build ------------\n")
        try:
            # set permissions correctly, seems to not always be the case
            stress_bin_dir = os.path.join(stress_dir, 'bin')
            for f in os.listdir(stress_bin_dir):
                full_path = os.path.join(stress_bin_dir, f)
                os.chmod(
                    full_path,
                    stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP
                    | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)

            process = subprocess.Popen([platform_binary('ant'), 'build'],
                                       cwd=stress_dir,
                                       env=env,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
            ret_val, _, _ = log_info(process, logger)
            if ret_val != 0:
                process = subprocess.Popen(
                    [platform_binary('ant'), 'stress-build'],
                    cwd=target_dir,
                    env=env,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
                ret_val, _, _ = log_info(process, logger)
                if ret_val != 0:
                    raise CCMError(
                        "Error compiling Cassandra stress tool.  "
                        "See %s for details (you will still be able to use ccm "
                        "but not the stress related commands)" % logfile)
        except IOError as e:
            raise CCMError(
                "Error compiling Cassandra stress tool: %s (you will "
                "still be able to use ccm but not the stress related commands)"
                % str(e))
Exemple #26
0
    def load_sstable_with_configuration(self, pre_compression=None, post_compression=None):
        """
        tests that the sstableloader works by using it to load data.
        Compression of the columnfamilies being loaded, and loaded into
        can be specified.

        pre_compression and post_compression can be these values:
        None, 'Snappy', or 'Deflate'.
        """
        NUM_KEYS = 1000

        for compression_option in (pre_compression, post_compression):
            assert compression_option in (None, 'Snappy', 'Deflate')

        debug("Testing sstableloader with pre_compression=%s and post_compression=%s" % (pre_compression, post_compression))

        cluster = self.cluster
        cluster.populate(2).start()
        node1, node2 = cluster.nodelist()
        time.sleep(.5)

        def create_schema(session, compression):
            self.create_ks(session, "ks", rf=2)
            self.create_cf(session, "standard1", compression=compression)
            self.create_cf(session, "counter1", compression=compression, columns={'v': 'counter'})

        debug("creating keyspace and inserting")
        session = self.cql_connection(node1)
        create_schema(session, pre_compression)

        for i in range(NUM_KEYS):
            session.execute("UPDATE standard1 SET v='%d' WHERE KEY='%d' AND c='col'" % (i, i))
            session.execute("UPDATE counter1 SET v=v+1 WHERE KEY='%d'" % i)

        node1.nodetool('drain')
        node1.stop()
        node2.nodetool('drain')
        node2.stop()

        debug("Making a copy of the sstables")
        # make a copy of the sstables
        data_dir = os.path.join(node1.get_path(), 'data')
        copy_root = os.path.join(node1.get_path(), 'data_copy')
        for ddir in os.listdir(data_dir):
            keyspace_dir = os.path.join(data_dir, ddir)
            if os.path.isdir(keyspace_dir) and ddir != 'system':
                copy_dir = os.path.join(copy_root, ddir)
                dir_util.copy_tree(keyspace_dir, copy_dir)

        debug("Wiping out the data and restarting cluster")
        # wipe out the node data.
        cluster.clear()
        cluster.start()
        time.sleep(5)  # let gossip figure out what is going on

        debug("re-creating the keyspace and column families.")
        session = self.cql_connection(node1)
        create_schema(session, post_compression)
        time.sleep(2)

        debug("Calling sstableloader")
        # call sstableloader to re-load each cf.
        cdir = node1.get_install_dir()
        sstableloader = os.path.join(cdir, 'bin', ccmcommon.platform_binary('sstableloader'))
        env = ccmcommon.make_cassandra_env(cdir, node1.get_path())
        host = node1.address()
        sstablecopy_dir = copy_root + '/ks'
        for cf_dir in os.listdir(sstablecopy_dir):
            full_cf_dir = os.path.join(sstablecopy_dir, cf_dir)
            if os.path.isdir(full_cf_dir):
                cmd_args = [sstableloader, '--nodes', host, full_cf_dir]
                p = subprocess.Popen(cmd_args, env=env)
                exit_status = p.wait()
                self.assertEqual(0, exit_status,
                                 "sstableloader exited with a non-zero status: %d" % exit_status)

        def read_and_validate_data(session):
            for i in range(NUM_KEYS):
                rows = list(session.execute("SELECT * FROM standard1 WHERE KEY='%d'" % i))
                self.assertEquals([str(i), 'col', str(i)], list(rows[0]))
                rows = list(session.execute("SELECT * FROM counter1 WHERE KEY='%d'" % i))
                self.assertEquals([str(i), 1], list(rows[0]))

        debug("Reading data back")
        # Now we should have sstables with the loaded data, and the existing
        # data. Lets read it all to make sure it is all there.
        read_and_validate_data(session)

        debug("scrubbing, compacting, and repairing")
        # do some operations and try reading the data again.
        node1.nodetool('scrub')
        node1.nodetool('compact')
        node1.nodetool('repair')

        debug("Reading data back one more time")
        read_and_validate_data(session)
Exemple #27
0
    def load_sstable_with_configuration(self, pre_compression=None, post_compression=None, ks="ks", create_schema=create_schema):
        """
        tests that the sstableloader works by using it to load data.
        Compression of the columnfamilies being loaded, and loaded into
        can be specified.

        pre_compression and post_compression can be these values:
        None, 'Snappy', or 'Deflate'.
        """
        NUM_KEYS = 1000

        for compression_option in (pre_compression, post_compression):
            self.assertIn(compression_option, (None, 'Snappy', 'Deflate'))

        debug("Testing sstableloader with pre_compression=%s and post_compression=%s" % (pre_compression, post_compression))
        if self.upgrade_from:
            debug("Testing sstableloader with upgrade_from=%s and compact=%s" % (self.upgrade_from, self.compact))

        cluster = self.cluster
        if self.upgrade_from:
            debug("Generating sstables with version %s" % (self.upgrade_from))
            default_install_dir = self.cluster.get_install_dir()
            # Forcing cluster version on purpose
            cluster.set_install_dir(version=self.upgrade_from)
        debug("Using jvm_args=%s" % self.jvm_args)
        cluster.populate(2).start(jvm_args=self.jvm_args)
        node1, node2 = cluster.nodelist()
        time.sleep(.5)

        debug("creating keyspace and inserting")
        session = self.cql_connection(node1)
        self.create_schema(session, ks, pre_compression)

        for i in range(NUM_KEYS):
            session.execute("UPDATE standard1 SET v='{}' WHERE KEY='{}' AND c='col'".format(i, i))
            session.execute("UPDATE counter1 SET v=v+1 WHERE KEY='{}'".format(i))

        node1.nodetool('drain')
        node1.stop()
        node2.nodetool('drain')
        node2.stop()

        debug("Making a copy of the sstables")
        # make a copy of the sstables
        for x in xrange(0, cluster.data_dir_count):
            data_dir = os.path.join(node1.get_path(), 'data{0}'.format(x))
            copy_root = os.path.join(node1.get_path(), 'data{0}_copy'.format(x))
            for ddir in os.listdir(data_dir):
                keyspace_dir = os.path.join(data_dir, ddir)
                if os.path.isdir(keyspace_dir) and ddir != 'system':
                    copy_dir = os.path.join(copy_root, ddir)
                    dir_util.copy_tree(keyspace_dir, copy_dir)

        debug("Wiping out the data and restarting cluster")
        # wipe out the node data.
        cluster.clear()

        if self.upgrade_from:
            debug("Running sstableloader with version from %s" % (default_install_dir))
            # Return to previous version
            cluster.set_install_dir(install_dir=default_install_dir)

        cluster.start(jvm_args=self.jvm_args)
        time.sleep(5)  # let gossip figure out what is going on

        debug("re-creating the keyspace and column families.")
        session = self.cql_connection(node1)
        self.create_schema(session, ks, post_compression)
        time.sleep(2)

        debug("Calling sstableloader")
        # call sstableloader to re-load each cf.
        cdir = node1.get_install_dir()
        sstableloader = os.path.join(cdir, 'bin', ccmcommon.platform_binary('sstableloader'))
        env = ccmcommon.make_cassandra_env(cdir, node1.get_path())
        host = node1.address()
        for x in xrange(0, cluster.data_dir_count):
            sstablecopy_dir = os.path.join(node1.get_path(), 'data{0}_copy'.format(x), ks.strip('"'))
            for cf_dir in os.listdir(sstablecopy_dir):
                full_cf_dir = os.path.join(sstablecopy_dir, cf_dir)
                if os.path.isdir(full_cf_dir):
                    cmd_args = [sstableloader, '--nodes', host, full_cf_dir]
                    p = subprocess.Popen(cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
                    exit_status = p.wait()
                    debug('stdout: {out}'.format(out=p.stdout))
                    debug('stderr: {err}'.format(err=p.stderr))
                    self.assertEqual(0, exit_status,
                                     "sstableloader exited with a non-zero status: {}".format(exit_status))

        def read_and_validate_data(session):
            for i in range(NUM_KEYS):
                query = "SELECT * FROM standard1 WHERE KEY='{}'".format(i)
                assert_one(session, query, [str(i), 'col', str(i)])
                query = "SELECT * FROM counter1 WHERE KEY='{}'".format(i)
                assert_one(session, query, [str(i), 1])

        debug("Reading data back")
        # Now we should have sstables with the loaded data, and the existing
        # data. Lets read it all to make sure it is all there.
        read_and_validate_data(session)

        debug("scrubbing, compacting, and repairing")
        # do some operations and try reading the data again.
        node1.nodetool('scrub')
        node1.nodetool('compact')
        node1.nodetool('repair')

        debug("Reading data back one more time")
        read_and_validate_data(session)

        # check that RewindableDataInputStreamPlus spill files are properly cleaned up
        if self.upgrade_from:
            for x in xrange(0, cluster.data_dir_count):
                data_dir = os.path.join(node1.get_path(), 'data{0}'.format(x))
                for ddir in os.listdir(data_dir):
                    keyspace_dir = os.path.join(data_dir, ddir)
                    temp_files = self.glob_data_dirs(os.path.join(keyspace_dir, '*', "tmp", "*.dat"))
                    debug("temp files: " + str(temp_files))
                    self.assertEquals(0, len(temp_files), "Temporary files were not cleaned up.")
    def load_sstable_with_configuration(self, pre_compression=None, post_compression=None, ks="ks", create_schema=create_schema):
        """
        tests that the sstableloader works by using it to load data.
        Compression of the columnfamilies being loaded, and loaded into
        can be specified.

        pre_compression and post_compression can be these values:
        None, 'Snappy', or 'Deflate'.
        """
        NUM_KEYS = 1000

        for compression_option in (pre_compression, post_compression):
            assert compression_option in (None, 'Snappy', 'Deflate')

        debug("Testing sstableloader with pre_compression=%s and post_compression=%s" % (pre_compression, post_compression))
        if self.upgrade_from:
            debug("Testing sstableloader with upgrade_from=%s and compact=%s" % (self.upgrade_from, self.compact))

        cluster = self.cluster
        if self.upgrade_from:
            debug("Generating sstables with version %s" % (self.upgrade_from))
            default_install_dir = self.cluster.get_install_dir()
            # Forcing cluster version on purpose
            cluster.set_install_dir(version=self.upgrade_from)
        debug("Using jvm_args=%s" % self.jvm_args)
        cluster.populate(2).start(jvm_args=self.jvm_args)
        node1, node2 = cluster.nodelist()
        time.sleep(.5)

        debug("creating keyspace and inserting")
        session = self.cql_connection(node1)
        self.create_schema(session, ks, pre_compression)

        for i in range(NUM_KEYS):
            session.execute("UPDATE standard1 SET v='%d' WHERE KEY='%d' AND c='col'" % (i, i))
            session.execute("UPDATE counter1 SET v=v+1 WHERE KEY='%d'" % i)

        node1.nodetool('drain')
        node1.stop()
        node2.nodetool('drain')
        node2.stop()

        debug("Making a copy of the sstables")
        # make a copy of the sstables
        for x in xrange(0, cluster.data_dir_count):
            data_dir = os.path.join(node1.get_path(), 'data{0}'.format(x))
            copy_root = os.path.join(node1.get_path(), 'data{0}_copy'.format(x))
            for ddir in os.listdir(data_dir):
                keyspace_dir = os.path.join(data_dir, ddir)
                if os.path.isdir(keyspace_dir) and ddir != 'system':
                    copy_dir = os.path.join(copy_root, ddir)
                    dir_util.copy_tree(keyspace_dir, copy_dir)

        debug("Wiping out the data and restarting cluster")
        # wipe out the node data.
        cluster.clear()

        if self.upgrade_from:
            debug("Running sstableloader with version from %s" % (default_install_dir))
            # Return to previous version
            cluster.set_install_dir(install_dir=default_install_dir)

        cluster.start(jvm_args=self.jvm_args)
        time.sleep(5)  # let gossip figure out what is going on

        debug("re-creating the keyspace and column families.")
        session = self.cql_connection(node1)
        self.create_schema(session, ks, post_compression)
        time.sleep(2)

        debug("Calling sstableloader")
        # call sstableloader to re-load each cf.
        cdir = node1.get_install_dir()
        sstableloader = os.path.join(cdir, 'bin', ccmcommon.platform_binary('sstableloader'))
        env = ccmcommon.make_cassandra_env(cdir, node1.get_path())
        host = node1.address()
        for x in xrange(0, cluster.data_dir_count):
            sstablecopy_dir = os.path.join(node1.get_path(), 'data{0}_copy'.format(x), ks.strip('"'))
            for cf_dir in os.listdir(sstablecopy_dir):
                full_cf_dir = os.path.join(sstablecopy_dir, cf_dir)
                if os.path.isdir(full_cf_dir):
                    cmd_args = [sstableloader, '--nodes', host, full_cf_dir]
                    p = subprocess.Popen(cmd_args, env=env)
                    exit_status = p.wait()
                    self.assertEqual(0, exit_status,
                                     "sstableloader exited with a non-zero status: %d" % exit_status)

        def read_and_validate_data(session):
            for i in range(NUM_KEYS):
                rows = list(session.execute("SELECT * FROM standard1 WHERE KEY='%d'" % i))
                self.assertEquals([str(i), 'col', str(i)], list(rows[0]))
                rows = list(session.execute("SELECT * FROM counter1 WHERE KEY='%d'" % i))
                self.assertEquals([str(i), 1], list(rows[0]))

        debug("Reading data back")
        # Now we should have sstables with the loaded data, and the existing
        # data. Lets read it all to make sure it is all there.
        read_and_validate_data(session)

        debug("scrubbing, compacting, and repairing")
        # do some operations and try reading the data again.
        node1.nodetool('scrub')
        node1.nodetool('compact')
        node1.nodetool('repair')

        debug("Reading data back one more time")
        read_and_validate_data(session)

        # check that RewindableDataInputStreamPlus spill files are properly cleaned up
        if self.upgrade_from:
            for x in xrange(0, cluster.data_dir_count):
                data_dir = os.path.join(node1.get_path(), 'data{0}'.format(x))
                for ddir in os.listdir(data_dir):
                    keyspace_dir = os.path.join(data_dir, ddir)
                    temp_files = self.glob_data_dirs(os.path.join(keyspace_dir, '*', "tmp", "*.dat"))
                    debug("temp files: " + str(temp_files))
                    self.assertEquals(0, len(temp_files), "Temporary files were not cleaned up.")
Exemple #29
0
def clone_development(git_repo, version, verbose=False):
    print_(git_repo, version)
    target_dir = directory_name(version)
    assert target_dir
    if 'github' in version:
        git_repo_name, git_branch = github_username_and_branch_name(version)
    elif 'local:' in version:
        git_repo_name = 'local_{}'.format(git_repo)  # add git repo location to distinguish cache location for differing repos
        git_branch = version.split(':')[-1]  # last token on 'local:...' slugs should always be branch name
    else:
        git_repo_name = 'apache'
        git_branch = version.split(':', 1)[1]
    local_git_cache = os.path.join(__get_dir(), '_git_cache_' + git_repo_name)
    logfile = lastlogfilename()
    with open(logfile, 'w') as lf:
        try:
            # Checkout/fetch a local repository cache to reduce the number of
            # remote fetches we need to perform:
            if not os.path.exists(local_git_cache):
                common.info("Cloning Cassandra...")
                out = subprocess.call(
                    ['git', 'clone', '--mirror', git_repo, local_git_cache],
                    cwd=__get_dir(), stdout=lf, stderr=lf)
                assert out == 0, "Could not do a git clone"
            else:
                common.info("Fetching Cassandra updates...")
                out = subprocess.call(
                    ['git', 'fetch', '-fup', 'origin', '+refs/*:refs/*'],
                    cwd=local_git_cache, stdout=lf, stderr=lf)

            # Checkout the version we want from the local cache:
            if not os.path.exists(target_dir):
                # development branch doesn't exist. Check it out.
                common.info("Cloning Cassandra (from local cache)")

                # git on cygwin appears to be adding `cwd` to the commands which is breaking clone
                if sys.platform == "cygwin":
                    local_split = local_git_cache.split(os.sep)
                    target_split = target_dir.split(os.sep)
                    subprocess.call(['git', 'clone', local_split[-1], target_split[-1]], cwd=__get_dir(), stdout=lf, stderr=lf)
                else:
                    subprocess.call(['git', 'clone', local_git_cache, target_dir], cwd=__get_dir(), stdout=lf, stderr=lf)

                # determine if the request is for a branch
                is_branch = False
                try:
                    branch_listing = subprocess.check_output(['git', 'branch', '--all'], cwd=target_dir).decode('utf-8')
                    branches = [b.strip() for b in branch_listing.replace('remotes/origin/', '').split()]
                    is_branch = git_branch in branches
                except subprocess.CalledProcessError as cpe:
                    common.error("Error Running Branch Filter: {}\nAssumming request is not for a branch".format(cpe.output))

                # now check out the right version
                branch_or_sha_tag = 'branch' if is_branch else 'SHA/tag'
                common.info("Checking out requested {} ({})".format(branch_or_sha_tag, git_branch))
                if is_branch:
                    # we use checkout -B with --track so we can specify that we want to track a specific branch
                    # otherwise, you get errors on branch names that are also valid SHAs or SHA shortcuts, like 10360
                    # we use -B instead of -b so we reset branches that already exist and create a new one otherwise
                    out = subprocess.call(['git', 'checkout', '-B', git_branch,
                                           '--track', 'origin/{git_branch}'.format(git_branch=git_branch)],
                                          cwd=target_dir, stdout=lf, stderr=lf)
                else:
                    out = subprocess.call(['git', 'checkout', git_branch], cwd=target_dir, stdout=lf, stderr=lf)
                if int(out) != 0:
                    raise CCMError('Could not check out git branch {branch}. '
                                   'Is this a valid branch name? (see {lastlog} or run '
                                   '"ccm showlastlog" for details)'.format(
                                       branch=git_branch, lastlog=logfile
                                   ))
                # now compile
                compile_version(git_branch, target_dir, verbose)
            else:  # branch is already checked out. See if it is behind and recompile if needed.
                out = subprocess.call(['git', 'fetch', 'origin'], cwd=target_dir, stdout=lf, stderr=lf)
                assert out == 0, "Could not do a git fetch"
                status = subprocess.Popen(['git', 'status', '-sb'], cwd=target_dir, stdout=subprocess.PIPE, stderr=lf).communicate()[0]
                if str(status).find('[behind') > -1:
                    common.info("Branch is behind, recompiling")
                    out = subprocess.call(['git', 'pull'], cwd=target_dir, stdout=lf, stderr=lf)
                    assert out == 0, "Could not do a git pull"
                    out = subprocess.call([platform_binary('ant'), 'realclean'], cwd=target_dir, stdout=lf, stderr=lf)
                    assert out == 0, "Could not run 'ant realclean'"

                    # now compile
                    compile_version(git_branch, target_dir, verbose)
        except Exception as e:
            # wipe out the directory if anything goes wrong. Otherwise we will assume it has been compiled the next time it runs.
            try:
                rmdirs(target_dir)
                common.error("Deleted {} due to error".format(target_dir))
            except:
                print_('Building C* version {version} failed. Attempted to delete {target_dir}'
                       'but failed. This will need to be manually deleted'.format(
                           version=version,
                           target_dir=target_dir
                       ))
            finally:
                raise e
Exemple #30
0
def clone_development(git_repo, version, verbose=False):
    print_(git_repo, version)
    target_dir = directory_name(version)
    assert target_dir
    if 'github' in version:
        git_repo_name, git_branch = github_username_and_branch_name(version)
    else:
        git_repo_name = 'apache'
        git_branch = version.split(':', 1)[1]
    local_git_cache = os.path.join(__get_dir(), '_git_cache_' + git_repo_name)
    logfile = lastlogfilename()
    with open(logfile, 'w') as lf:
        try:
            # Checkout/fetch a local repository cache to reduce the number of
            # remote fetches we need to perform:
            if not os.path.exists(local_git_cache):
                if verbose:
                    print_("Cloning Cassandra...")
                out = subprocess.call(
                    ['git', 'clone', '--mirror', git_repo, local_git_cache],
                    cwd=__get_dir(),
                    stdout=lf,
                    stderr=lf)
                assert out == 0, "Could not do a git clone"
            else:
                if verbose:
                    print_("Fetching Cassandra updates...")
                out = subprocess.call(
                    ['git', 'fetch', '-fup', 'origin', '+refs/*:refs/*'],
                    cwd=local_git_cache,
                    stdout=lf,
                    stderr=lf)

            # Checkout the version we want from the local cache:
            if not os.path.exists(target_dir):
                # development branch doesn't exist. Check it out.
                if verbose:
                    print_("Cloning Cassandra (from local cache)")

                # git on cygwin appears to be adding `cwd` to the commands which is breaking clone
                if sys.platform == "cygwin":
                    local_split = local_git_cache.split(os.sep)
                    target_split = target_dir.split(os.sep)
                    subprocess.call(
                        ['git', 'clone', local_split[-1], target_split[-1]],
                        cwd=__get_dir(),
                        stdout=lf,
                        stderr=lf)
                else:
                    subprocess.call(
                        ['git', 'clone', local_git_cache, target_dir],
                        cwd=__get_dir(),
                        stdout=lf,
                        stderr=lf)

                # determine if the request is for a branch
                is_branch = False
                try:
                    branch_listing = subprocess.check_output(
                        ['git', 'branch', '--all'],
                        cwd=target_dir).decode('utf-8')
                    branches = [
                        b.strip() for b in branch_listing.replace(
                            'remotes/origin/', '').split()
                    ]
                    is_branch = git_branch in branches
                except subprocess.CalledProcessError as cpe:
                    print_(
                        "Error Running Branch Filter: {}\nAssumming request is not for a branch"
                        .format(cpe.output))

                # now check out the right version
                if verbose:
                    branch_or_sha_tag = 'branch' if is_branch else 'SHA/tag'
                    print_("Checking out requested {} ({})".format(
                        branch_or_sha_tag, git_branch))
                if is_branch:
                    # we use checkout -B with --track so we can specify that we want to track a specific branch
                    # otherwise, you get errors on branch names that are also valid SHAs or SHA shortcuts, like 10360
                    # we use -B instead of -b so we reset branches that already exist and create a new one otherwise
                    out = subprocess.call([
                        'git', 'checkout', '-B', git_branch, '--track',
                        'origin/{git_branch}'.format(git_branch=git_branch)
                    ],
                                          cwd=target_dir,
                                          stdout=lf,
                                          stderr=lf)
                else:
                    out = subprocess.call(['git', 'checkout', git_branch],
                                          cwd=target_dir,
                                          stdout=lf,
                                          stderr=lf)
                if int(out) != 0:
                    raise CCMError(
                        'Could not check out git branch {branch}. '
                        'Is this a valid branch name? (see {lastlog} or run '
                        '"ccm showlastlog" for details)'.format(
                            branch=git_branch, lastlog=logfile))
                # now compile
                compile_version(git_branch, target_dir, verbose)
            else:  # branch is already checked out. See if it is behind and recompile if needed.
                out = subprocess.call(['git', 'fetch', 'origin'],
                                      cwd=target_dir,
                                      stdout=lf,
                                      stderr=lf)
                assert out == 0, "Could not do a git fetch"
                status = subprocess.Popen(['git', 'status', '-sb'],
                                          cwd=target_dir,
                                          stdout=subprocess.PIPE,
                                          stderr=lf).communicate()[0]
                if str(status).find('[behind') > -1:
                    if verbose:
                        print_("Branch is behind, recompiling")
                    out = subprocess.call(['git', 'pull'],
                                          cwd=target_dir,
                                          stdout=lf,
                                          stderr=lf)
                    assert out == 0, "Could not do a git pull"
                    out = subprocess.call(
                        [platform_binary('ant'), 'realclean'],
                        cwd=target_dir,
                        stdout=lf,
                        stderr=lf)
                    assert out == 0, "Could not run 'ant realclean'"

                    # now compile
                    compile_version(git_branch, target_dir, verbose)
        except:
            # wipe out the directory if anything goes wrong. Otherwise we will assume it has been compiled the next time it runs.
            try:
                rmdirs(target_dir)
                print_("Deleted %s due to error" % target_dir)
            except:
                raise CCMError(
                    "Building C* version %s failed. Attempted to delete %s but failed. This will need to be manually deleted"
                    % (version, target_dir))
            raise
Exemple #31
0
 def scrub(self, options):
     cdir = self.get_cassandra_dir()
     scrub_bin = common.join_bin(cdir, 'bin', 'sstablescrub')
     env = common.make_cassandra_env(cdir, self.get_path())
     os.execve(scrub_bin, [ common.platform_binary('sstablescrub') ] + options, env)