Exemplo n.º 1
0
def download_benchmark_candidate(ctx):
    version = ctx.config.opts("source", "distribution.version")
    repo_name = ctx.config.opts("source", "distribution.repository")
    if version.strip() == "":
        raise exceptions.SystemSetupError("Could not determine version. Please specify the Elasticsearch distribution "
                                          "to download with the command line parameter --distribution-version. "
                                          "E.g. --distribution-version=5.0.0")
    distributions_root = "%s/%s" % (ctx.config.opts("system", "root.dir"), ctx.config.opts("source", "distribution.dir"))
    io.ensure_dir(distributions_root)
    distribution_path = "%s/elasticsearch-%s.tar.gz" % (distributions_root, version)

    try:
        repo = distribution_repos[repo_name]
    except KeyError:
        raise exceptions.SystemSetupError("Unknown distribution repository [%s]. Valid values are: [%s]"
                                          % (repo_name, ",".join(distribution_repos.keys())))

    download_url = repo.download_url(version)
    logger.info("Resolved download URL [%s] for version [%s]" % (download_url, version))
    if not os.path.isfile(distribution_path) or repo.must_download:
        logger.info("Downloading distribution for version [%s]." % version)
        try:
            print("Downloading Elasticsearch %s ..." % version)
            net.download(download_url, distribution_path)
        except urllib.error.HTTPError:
            logging.exception("Cannot download Elasticsearch distribution for version [%s] from [%s]." % (version, download_url))
            raise exceptions.SystemSetupError("Cannot download Elasticsearch distribution from [%s]. Please check that the specified "
                                              "version [%s] is correct." % (download_url, version))
    else:
        logger.info("Skipping download for version [%s]. Found an existing binary locally at [%s]." % (version, distribution_path))

    ctx.config.add(config.Scope.invocation, "builder", "candidate.bin.path", distribution_path)
Exemplo n.º 2
0
    def run(self, task):
        from esrally.utils import jvm

        logger.info("Building from sources in [%s]." % self.src_dir)
        logger.info("Executing %s %s..." % (self.gradle, task))
        io.ensure_dir(self.log_dir)
        log_file = "%s/build.log" % self.log_dir

        # we capture all output to a dedicated build log file
        jvm_major_version = jvm.major_version(self.java_home)
        if jvm_major_version > 8:
            logger.info("Detected JVM with major version [%d]. Adjusting JDK module access options for the build." % jvm_major_version)
            gradle_opts = "export GRADLE_OPTS=\"%s\"; " % Builder.JAVA_9_GRADLE_OPTS
        else:
            gradle_opts = ""

        if process.run_subprocess("%sexport JAVA_HOME=%s; cd %s; %s %s >> %s 2>&1" %
                                  (gradle_opts, self.java_home, self.src_dir, self.gradle, task, log_file)):
            msg = "Executing '%s %s' failed. The last 20 lines in the build log file are:\n" % (self.gradle, task)
            msg += "=========================================================================================================\n"
            with open(log_file, "r") as f:
                msg += "\t"
                msg += "\t".join(f.readlines()[-20:])
            msg += "=========================================================================================================\n"
            msg += "The full build log is available at [%s]." % log_file

            raise BuildError(msg)
Exemplo n.º 3
0
def download_benchmark_candidate(ctx, track):
    version = ctx.config.opts("source", "distribution.version")
    if version.strip() == "":
        raise exceptions.SystemSetupError("Could not determine version. Please specify the command line the Elasticsearch "
                                          "distribution to download with the command line parameter --distribution-version. "
                                          "E.g. --distribution-version=5.0.0")

    distributions_root = "%s/%s" % (ctx.config.opts("system", "root.dir"), ctx.config.opts("source", "distribution.dir"))
    io.ensure_dir(distributions_root)
    distribution_path = "%s/elasticsearch-%s.zip" % (distributions_root, version)

    download_url = "https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/zip/elasticsearch/%s/" \
                   "elasticsearch-%s.zip" % (version, version)
    if not os.path.isfile(distribution_path):
        try:
            print("Downloading Elasticsearch %s ..." % version)
            net.download(download_url, distribution_path)
        except urllib.error.HTTPError:
            logging.exception("Cannot download Elasticsearch distribution for version [%s] from [%s]." % (version, download_url))
            raise exceptions.SystemSetupError("Cannot download Elasticsearch distribution from [%s]. Please check that the specified "
                                              "version [%s] is correct." % (download_url, version))
    else:
        logger.info("Skipping download for version [%s]. Found an existing binary locally at [%s]." % (version, distribution_path))

    ctx.config.add(config.Scope.invocation, "builder", "candidate.bin.path", distribution_path)
Exemplo n.º 4
0
    def run(self, task):
        try:
            src_dir = self._config.opts("source", "local.src.dir")
        except config.ConfigError:
            logging.exception("Rally is not configured to build from sources")
            raise SystemSetupError("Rally is not setup to build from sources. You can either benchmark a binary distribution or "
                                   "install the required software and reconfigure Rally with %s --configure." % PROGRAM_NAME)

        logger.info("Building Elasticsearch from sources in [%s]." % src_dir)
        gradle = self._config.opts("build", "gradle.bin")
        java_home = self._config.opts("runtime", "java8.home")
        log_dir = self._config.opts("system", "log.dir")

        logger.info("Executing %s %s..." % (gradle, task))
        io.ensure_dir(log_dir)
        log_file = "%s/build.log" % log_dir

        # we capture all output to a dedicated build log file

        if process.run_subprocess("export JAVA_HOME=%s; cd %s; %s %s >> %s 2>&1" % (java_home, src_dir, gradle, task, log_file)):
            msg = "Executing '%s %s' failed. The last 20 lines in the build log file are:\n" % (gradle, task)
            msg += "=========================================================================================================\n"
            with open(log_file, "r") as f:
                msg += "\t"
                msg += "\t".join(f.readlines()[-20:])
            msg += "=========================================================================================================\n"
            msg += "The full build log is available at [%s]." % log_file
            raise BuildError(msg)
Exemplo n.º 5
0
    def download(cfg, url, local_path, size_in_bytes):
        offline = cfg.opts("system", "offline.mode")
        file_exists = os.path.isfile(local_path)

        if file_exists:
            logger.info("[%s] already exists locally. Skipping download." % local_path)
            return

        if not offline:
            logger.info("Downloading from [%s] to [%s]." % (url, local_path))
            try:
                io.ensure_dir(os.path.dirname(local_path))
                size_in_mb = round(convert.bytes_to_mb(size_in_bytes))
                # ensure output appears immediately
                print("Downloading data from %s (%s MB) ... " % (url, size_in_mb), end='', flush=True)
                net.download(url, local_path, size_in_bytes)
                print("Done")
            except urllib.error.URLError:
                logger.exception("Could not download [%s] to [%s]." % (url, local_path))

        # file must exist at this point -> verify
        if not os.path.isfile(local_path):
            if offline:
                raise exceptions.SystemSetupError(
                        "Cannot find %s. Please disable offline mode and retry again." % local_path)
            else:
                raise exceptions.SystemSetupError(
                        "Could not download from %s to %s. Please verify that data are available at %s and "
                        "check your internet connection." % (url, local_path, url))
Exemplo n.º 6
0
    def write_report(self, metrics_table):
        headers = ["Metric", "Value"]
        report_format = self._config.opts("report", "reportformat")
        report_file = self._config.opts("report", "reportfile")

        if report_format == "markdown":
            report = tabulate.tabulate(metrics_table, headers=headers, tablefmt="pipe", numalign="right", stralign="right")
        elif report_format == "csv":
            with io.StringIO() as out:
                writer = csv.writer(out)
                writer.writerow(headers)
                for metric_record in metrics_table:
                    writer.writerow(metric_record)
                report = out.getvalue()
        else:
            raise exceptions.SystemSetupError("Unknown report format '%s'" % report_format)

        print_internal(report)
        if len(report_file) > 0:
            normalized_report_file = rio.normalize_path(report_file)
            logger.info("Writing report to [%s] (user specified: [%s]) in format [%s]" %
                        (normalized_report_file, report_file, report_format))
            print("\nWriting report also to '%s'" % normalized_report_file)
            # ensure that the parent folder already exists when we try to write the file...
            rio.ensure_dir(rio.dirname(normalized_report_file))
            with open(normalized_report_file, mode="w", encoding="UTF-8") as f:
                f.writelines(report)
Exemplo n.º 7
0
def configure_logging(cfg):
    # Even if we don't log to a file, other parts of the application rely on this path to exist -> enforce
    log_file = log_file_path(cfg)
    log_dir = os.path.dirname(log_file)
    io.ensure_dir(log_dir)
    cfg.add(config.Scope.application, "system", "log.dir", log_dir)

    logging_output = cfg.opts("system", "logging.output")

    if logging_output == "file":
        console.info("Writing logs to %s" % log_file)
        # there is an old log file lying around -> backup
        if os.path.exists(log_file):
            os.rename(log_file, "%s-bak-%d.log" % (log_file, int(os.path.getctime(log_file))))
        ch = logging.FileHandler(filename=log_file, mode="a")
    else:
        ch = logging.StreamHandler(stream=sys.stdout)

    log_level = logging.INFO
    ch.setLevel(log_level)
    formatter = logging.Formatter("%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
    formatter.converter = time.gmtime
    ch.setFormatter(formatter)

    # Remove all handlers associated with the root logger object so we can start over with an entirely fresh log configuration
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)

    logging.root.addHandler(ch)
    logging.getLogger("elasticsearch").setLevel(logging.WARN)
Exemplo n.º 8
0
 def clone(self):
     src = self.src_dir
     remote = self.remote_url
     io.ensure_dir(src)
     print("Downloading sources from %s to %s." % (remote, src))
     # Don't swallow subprocess output, user might need to enter credentials...
     if process.run_subprocess("git clone %s %s" % (remote, src)):
         raise SupplyError("Could not clone from %s to %s" % (remote, src))
Exemplo n.º 9
0
    def instrument_env(self, car, candidate_id):
        log_root = "%s/%s" % (self.cfg.opts("system", "challenge.root.dir"), self.cfg.opts("benchmarks", "metrics.log.dir"))
        io.ensure_dir(log_root)
        log_file = "%s/%s-%s.jit.log" % (log_root, car.name, candidate_id)

        console.info("%s: Writing JIT compiler log to [%s]" % (self.human_name, log_file), logger=logger)
        return {"ES_JAVA_OPTS": "-XX:+UnlockDiagnosticVMOptions -XX:+TraceClassLoading -XX:+LogCompilation "
                                "-XX:LogFile=%s -XX:+PrintAssembly" % log_file}
Exemplo n.º 10
0
 def _install_binary(self):
     binary = self._config.opts("builder", "candidate.bin.path")
     install_dir = self._install_dir()
     logger.info("Preparing candidate locally in %s." % install_dir)
     io.ensure_dir(install_dir)
     logger.info("Unzipping %s to %s" % (binary, install_dir))
     io.decompress(binary, install_dir)
     binary_path = glob.glob("%s/elasticsearch*" % install_dir)[0]
     self._config.add(config.Scope.benchmark, "provisioning", "local.binary.path", binary_path)
Exemplo n.º 11
0
    def instrument_env(self, setup, candidate_id):
        log_root = "%s/%s" % (self._config.opts("system", "track.setup.root.dir"), self._config.opts("benchmarks", "metrics.log.dir"))
        io.ensure_dir(log_root)
        log_file = "%s/%s-%s.jit.log" % (log_root, setup.name, candidate_id)

        logger.info("%s: Writing JIT compiler logs to [%s]." % (self.human_name, log_file))
        print("%s: Writing JIT compiler log to %s" % (self.human_name, log_file))
        return {"ES_JAVA_OPTS": "-XX:+UnlockDiagnosticVMOptions -XX:+TraceClassLoading -XX:+LogCompilation "
                                "-XX:LogFile=%s -XX:+PrintAssembly" % log_file}
Exemplo n.º 12
0
    def install(self, binary):
        logger.info("Preparing candidate locally in [%s]." % self.install_dir)
        io.ensure_dir(self.install_dir)
        io.ensure_dir(self.node_log_dir)

        logger.info("Unzipping %s to %s" % (binary, self.install_dir))
        io.decompress(binary, self.install_dir)
        self.es_home_path = glob.glob("%s/elasticsearch*" % self.install_dir)[0]
        self.data_paths = self._data_paths()
Exemplo n.º 13
0
    def instrument_env(self, car, candidate_id):
        log_root = "%s/%s" % (self.cfg.opts("system", "challenge.root.dir"), self.cfg.opts("benchmarks", "metrics.log.dir"))
        io.ensure_dir(log_root)
        log_file = "%s/%s-%s.gc.log" % (log_root, car.name, candidate_id)

        console.info("%s: Writing GC log to [%s]" % (self.human_name, log_file), logger=logger)
        # TODO dm: These options change in JDK 9!
        return {"ES_JAVA_OPTS": "-Xloggc:%s -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps  "
                                "-XX:+PrintGCApplicationStoppedTime -XX:+PrintGCApplicationConcurrentTime  -XX:+PrintTenuringDistribution"
                                % log_file}
Exemplo n.º 14
0
 def _install_binary(self):
     binary = self._config.opts("builder", "candidate.bin.path")
     install_dir = self._install_dir()
     logger.info("Preparing candidate locally in %s." % install_dir)
     io.ensure_dir(install_dir)
     logger.info("Unzipping %s to %s" % (binary, install_dir))
     io.unzip(binary, install_dir)
     binary_path = glob.glob("%s/elasticsearch*" % install_dir)[0]
     # config may be different for each track setup so we have to reinitialize every time, hence track setup scope
     self._config.add(config.Scope.trackSetup, "provisioning", "local.binary.path", binary_path)
Exemplo n.º 15
0
    def start(self, car):
        # hardcoded for the moment, should actually be identical to internal launcher
        # Only needed on Mac:
        # hosts = [{"host": process.run_subprocess_with_output("docker-machine ip default")[0].strip(), "port": 9200}]
        hosts = [{"host": "localhost", "port": 9200}]
        client_options = self.cfg.opts("launcher", "client.options")
        # unified client config
        self.cfg.add(config.Scope.benchmark, "client", "hosts", hosts)
        self.cfg.add(config.Scope.benchmark, "client", "options", client_options)

        es = self.client_factory(hosts, client_options).create()

        t = telemetry.Telemetry(self.cfg, devices=[
            # Be aware that some the meta-data are taken from the host system, not the container (e.g. number of CPU cores) so if the
            # Docker container constrains these, the metrics are actually wrong.
            telemetry.EnvironmentInfo(self.cfg, es, self.metrics_store),
            telemetry.NodeStats(self.cfg, es, self.metrics_store),
            telemetry.IndexStats(self.cfg, es, self.metrics_store),
            telemetry.DiskIo(self.cfg, self.metrics_store),
            telemetry.CpuUsage(self.cfg, self.metrics_store)
        ])

        distribution_version = self.cfg.opts("source", "distribution.version", mandatory=False)

        install_dir = self._install_dir()
        io.ensure_dir(install_dir)

        java_opts = ""
        if car.heap:
            java_opts += "-Xms%s -Xmx%s " % (car.heap, car.heap)
        if car.java_opts:
            java_opts += car.java_opts

        vars = {
            "es_java_opts": java_opts,
            "container_memory_gb": "%dg" % (convert.bytes_to_gb(psutil.virtual_memory().total) // 2),
            "es_data_dir": "%s/data" % install_dir,
            "es_version": distribution_version
        }

        docker_cfg = self._render_template_from_file(vars)
        logger.info("Starting Docker container with configuration:\n%s" % docker_cfg)
        docker_cfg_path = self._docker_cfg_path()
        with open(docker_cfg_path, "wt") as f:
            f.write(docker_cfg)

        c = cluster.Cluster([], t)

        self._start_process(cmd="docker-compose -f %s up" % docker_cfg_path, node_name="rally0")
        # Wait for a little while: Plugins may still be initializing although the node has already started.
        time.sleep(10)

        t.attach_to_cluster(c)
        logger.info("Successfully started Docker container")
        return c
Exemplo n.º 16
0
    def attach_to_node(self, node):
        log_root = "%s/%s" % (self.cfg.opts("system", "challenge.root.dir"), self.cfg.opts("benchmarks", "metrics.log.dir"))
        io.ensure_dir(log_root)
        log_file = "%s/%s.perf.log" % (log_root, node.node_name)

        console.info("%s: Writing perf logs to [%s]" % (self.human_name, log_file), logger=logger)

        self.log = open(log_file, "wb")

        self.process = subprocess.Popen(["perf", "stat", "-p %s" % node.process.pid],
                                        stdout=self.log, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL)
        self.node = node
Exemplo n.º 17
0
    def instrument_env(self, car, candidate_id):
        log_root = "%s/%s" % (self.cfg.opts("system", "challenge.root.dir"), self.cfg.opts("benchmarks", "metrics.log.dir"))
        io.ensure_dir(log_root)
        log_file = "%s/%s-%s.jfr" % (log_root, car.name, candidate_id)

        console.info("%s: Writing flight recording to [%s]" % (self.human_name, log_file), logger=logger)
        # this is more robust in case we want to use custom settings
        # see http://stackoverflow.com/questions/34882035/how-to-record-allocations-with-jfr-on-command-line
        #
        # in that case change to: -XX:StartFlightRecording=defaultrecording=true,settings=es-memory-profiling
        return {"ES_JAVA_OPTS": "-XX:+UnlockDiagnosticVMOptions -XX:+UnlockCommercialFeatures -XX:+DebugNonSafepoints -XX:+FlightRecorder "
                                "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true,dumponexitpath=%s "
                                "-XX:StartFlightRecording=defaultrecording=true" % log_file}
Exemplo n.º 18
0
    def _exec(self, task_key):
        src_dir = self._config.opts("source", "local.src.dir")
        gradle = self._config.opts("build", "gradle.bin")
        task = self._config.opts("build", task_key)

        log_root = self._config.opts("system", "log.dir")
        build_log_dir = self._config.opts("build", "log.dir")
        log_dir = "%s/%s" % (log_root, build_log_dir)

        logger.info("Executing %s %s..." % (gradle, task))
        io.ensure_dir(log_dir)
        log_file = "%s/build.%s.log" % (log_dir, task_key)

        # we capture all output to a dedicated build log file
        if process.run_subprocess("cd %s; %s %s > %s.tmp 2>&1" % (src_dir, gradle, task, log_file)):
            logger.warning("Executing '%s %s' failed" % (gradle, task))
        os.rename(("%s.tmp" % log_file), log_file)
Exemplo n.º 19
0
def write_single_report(report_file, report_format, cwd, headers, data_plain, data_rich, write_header=True, show_also_in_console=True):
    if report_format == "markdown":
        formatter = format_as_markdown
    elif report_format == "csv":
        formatter = format_as_csv
    else:
        raise exceptions.SystemSetupError("Unknown report format '%s'" % report_format)

    if show_also_in_console:
        print_internal(formatter(headers, data_rich))
    if len(report_file) > 0:
        normalized_report_file = rio.normalize_path(report_file, cwd)
        logger.info("Writing report to [%s] (user specified: [%s]) in format [%s]" %
                    (normalized_report_file, report_file, report_format))
        # ensure that the parent folder already exists when we try to write the file...
        rio.ensure_dir(rio.dirname(normalized_report_file))
        with open(normalized_report_file, mode="a+", encoding="UTF-8") as f:
            f.writelines(formatter(headers, data_plain, write_header))
Exemplo n.º 20
0
    def download(cfg, url, local_path, size_in_bytes):
        offline = cfg.opts("system", "offline.mode")
        file_exists = os.path.isfile(local_path)

        # ensure we only skip the download if the file size also matches our expectation
        if file_exists and (size_in_bytes is None or os.path.getsize(local_path) == size_in_bytes):
            logger.info("[%s] already exists locally. Skipping download." % local_path)
            return False

        if not offline:
            try:
                io.ensure_dir(os.path.dirname(local_path))
                if size_in_bytes:
                    size_in_mb = round(convert.bytes_to_mb(size_in_bytes))
                    # ensure output appears immediately
                    logger.info("Downloading data from [%s] (%s MB) to [%s]." % (url, size_in_mb, local_path))
                else:
                    logger.info("Downloading data from [%s] to [%s]." % (url, local_path))

                # we want to have a bit more accurate download progress as these files are typically very large
                progress = net.Progress("[INFO] Downloading data for track %s" % track.name, accuracy=1)
                net.download(url, local_path, size_in_bytes, progress_indicator=progress)
                progress.finish()
                logger.info("Downloaded data from [%s] to [%s]." % (url, local_path))
            except urllib.error.URLError:
                logger.exception("Could not download [%s] to [%s]." % (url, local_path))

        # file must exist at this point -> verify
        if not os.path.isfile(local_path):
            if offline:
                raise exceptions.SystemSetupError(
                    "Cannot find %s. Please disable offline mode and retry again." % local_path)
            else:
                raise exceptions.SystemSetupError(
                    "Cannot download from %s to %s. Please verify that data are available at %s and "
                    "check your internet connection." % (url, local_path, url))

        actual_size = os.path.getsize(local_path)
        if size_in_bytes is not None and actual_size != size_in_bytes:
            raise exceptions.DataError("[%s] is corrupt. Downloaded [%d] bytes but [%d] bytes are expected." %
                                       (local_path, actual_size, size_in_bytes))

        return True
Exemplo n.º 21
0
def configure_logging(cfg):
    log_dir = paths.Paths(cfg).log_root()
    io.ensure_dir(log_dir)
    cfg.add(config.Scope.application, "system", "log.dir", log_dir)
    log_file = "%s/rally_out.log" % log_dir

    print("\nWriting additional logs to %s\n" % log_file)

    # Remove all handlers associated with the root logger object so we can start over with an entirely fresh log configuration
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)

    log_level = logging.INFO
    ch = logging.FileHandler(filename=log_file, mode="a")
    ch.setLevel(log_level)
    formatter = logging.Formatter("%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
    formatter.converter = time.gmtime
    ch.setFormatter(formatter)
    logging.root.addHandler(ch)
Exemplo n.º 22
0
def _apply_config(source_root_path, target_root_path, config_vars):
    for root, dirs, files in os.walk(source_root_path):
        env = jinja2.Environment(loader=jinja2.FileSystemLoader(root))

        relative_root = root[len(source_root_path) + 1:]
        absolute_target_root = os.path.join(target_root_path, relative_root)
        io.ensure_dir(absolute_target_root)

        for name in files:
            source_file = os.path.join(root, name)
            target_file = os.path.join(absolute_target_root, name)
            if plain_text(source_file):
                logger.info("Reading config template file [%s] and writing to [%s]." % (source_file, target_file))
                # automatically merge config snippets from plugins (e.g. if they want to add config to elasticsearch.yml)
                with open(target_file, "a") as f:
                    f.write(_render_template(env, config_vars, source_file))
            else:
                logger.info("Treating [%s] as binary and copying as is to [%s]." % (source_file, target_file))
                shutil.copy(source_file, target_file)
Exemplo n.º 23
0
    def download(cfg, url, local_path, size_in_bytes):
        offline = cfg.opts("system", "offline.mode")
        file_exists = os.path.isfile(local_path)

        # ensure we only skip the download if the file size also matches our expectation
        if file_exists and (size_in_bytes is None or os.path.getsize(local_path) == size_in_bytes):
            logger.info("[%s] already exists locally. Skipping download." % local_path)
            return False

        if not offline:
            try:
                io.ensure_dir(os.path.dirname(local_path))
                if size_in_bytes:
                    size_in_mb = round(convert.bytes_to_mb(size_in_bytes))
                    # ensure output appears immediately
                    console.info("Downloading data from [%s] (%s MB) to [%s] ... " % (url, size_in_mb, local_path),
                                 end='', flush=True, logger=logger)
                else:
                    console.info("Downloading data from [%s] to [%s] ... " % (url, local_path), end='', flush=True, logger=logger)

                net.download(url, local_path, size_in_bytes)
                console.println("[OK]")
            except urllib.error.URLError:
                logger.exception("Could not download [%s] to [%s]." % (url, local_path))

        # file must exist at this point -> verify
        if not os.path.isfile(local_path):
            if offline:
                raise exceptions.SystemSetupError(
                    "Cannot find %s. Please disable offline mode and retry again." % local_path)
            else:
                raise exceptions.SystemSetupError(
                    "Cannot download from %s to %s. Please verify that data are available at %s and "
                    "check your internet connection." % (url, local_path, url))

        actual_size = os.path.getsize(local_path)
        if size_in_bytes is not None and actual_size != size_in_bytes:
            raise exceptions.DataError("[%s] is corrupt. Downloaded [%d] bytes but [%d] bytes are expected." %
                                       (local_path, actual_size, size_in_bytes))

        return True
Exemplo n.º 24
0
def configure_logging(cfg):
    start_time = rtime.to_iso8601(cfg.opts("system", "time.start"))
    logging_output = cfg.opts("system", "logging.output")
    profiling_enabled = cfg.opts("driver", "profiling")

    if logging_output == "file":
        log_file = application_log_file_path(start_time)
        log_dir = os.path.dirname(log_file)
        io.ensure_dir(log_dir)
        console.info("Writing logs to %s" % log_file)
        # there is an old log file lying around -> backup
        if os.path.exists(log_file):
            os.rename(log_file, "%s-bak-%d.log" % (log_file, int(os.path.getctime(log_file))))
        ch = logging.FileHandler(filename=log_file, mode="a")
    else:
        ch = logging.StreamHandler(stream=sys.stdout)

    log_level = logging.INFO
    ch.setLevel(log_level)
    formatter = logging.Formatter("%(asctime)s,%(msecs)d PID:%(process)d %(name)s %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
    formatter.converter = time.gmtime
    ch.setFormatter(formatter)

    # Remove all handlers associated with the root logger object so we can start over with an entirely fresh log configuration
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)

    logging.root.addHandler(ch)
    logging.getLogger("elasticsearch").setLevel(logging.WARNING)

    if profiling_enabled:
        profile_file = "%s/profile.log" % application_log_dir_path()
        log_dir = os.path.dirname(profile_file)
        io.ensure_dir(log_dir)
        console.info("Writing driver profiling data to %s" % profile_file)
        handler = logging.FileHandler(filename=profile_file, encoding="UTF-8")
        handler.setFormatter(formatter)

        profile_logger = logging.getLogger("rally.profile")
        profile_logger.setLevel(logging.INFO)
        profile_logger.addHandler(handler)
Exemplo n.º 25
0
    def run(self, command, override_src_dir=None):
        src_dir = self.src_dir if override_src_dir is None else override_src_dir

        io.ensure_dir(self.log_dir)
        log_file = os.path.join(self.log_dir, "build.log")

        # we capture all output to a dedicated build log file

        build_cmd = "export JAVA_HOME={}; cd {}; {} >> {} 2>&1".format(self.java_home, src_dir, command, log_file)
        self.logger.info("Running build command [%s]", build_cmd)

        if process.run_subprocess(build_cmd):
            msg = "Executing '{}' failed. The last 20 lines in the build log file are:\n".format(command)
            msg += "=========================================================================================================\n"
            with open(log_file, "r", encoding="utf-8") as f:
                msg += "\t"
                msg += "\t".join(f.readlines()[-20:])
            msg += "=========================================================================================================\n"
            msg += "The full build log is available at [{}].".format(log_file)

            raise BuildError(msg)
Exemplo n.º 26
0
    def instrument_env(self, setup, candidate_id):
        log_root = "%s/%s" % (
            self._config.opts("system", "track.setup.root.dir"),
            self._config.opts("benchmarks", "metrics.log.dir"))
        io.ensure_dir(log_root)
        log_file = "%s/%s-%s.jfr" % (log_root, setup.name, candidate_id)

        logger.info("%s profiler: Writing telemetry data to [%s]." %
                    (self.human_name, log_file))
        print("%s: Writing flight recording to %s" %
              (self.human_name, log_file))
        # this is more robust in case we want to use custom settings
        # see http://stackoverflow.com/questions/34882035/how-to-record-allocations-with-jfr-on-command-line
        #
        # in that case change to: -XX:StartFlightRecording=defaultrecording=true,settings=es-memory-profiling
        return {
            "ES_JAVA_OPTS":
            "-XX:+UnlockDiagnosticVMOptions -XX:+UnlockCommercialFeatures -XX:+DebugNonSafepoints -XX:+FlightRecorder "
            "-XX:FlightRecorderOptions=disk=true,dumponexit=true,dumponexitpath=%s "
            "-XX:StartFlightRecording=defaultrecording=true" % log_file
        }
Exemplo n.º 27
0
    def run(self, task):
        logger.info("Building Elasticsearch from sources in [%s]." %
                    self.src_dir)
        logger.info("Executing %s %s..." % (self.gradle, task))
        io.ensure_dir(self.log_dir)
        log_file = "%s/build.log" % self.log_dir

        # we capture all output to a dedicated build log file

        if process.run_subprocess(
                "export JAVA_HOME=%s; cd %s; %s %s >> %s 2>&1" %
            (self.java_home, self.src_dir, self.gradle, task, log_file)):
            msg = "Executing '%s %s' failed. The last 20 lines in the build log file are:\n" % (
                self.gradle, task)
            msg += "=========================================================================================================\n"
            with open(log_file, "r") as f:
                msg += "\t"
                msg += "\t".join(f.readlines()[-20:])
            msg += "=========================================================================================================\n"
            msg += "The full build log is available at [%s]." % log_file
            raise BuildError(msg)
Exemplo n.º 28
0
    def fetch(self):
        io.ensure_dir(self.distributions_root)
        distribution_path = "%s/elasticsearch-%s.tar.gz" % (self.distributions_root, self.version)
        download_url = self.repo.download_url
        logger.info("Resolved download URL [%s] for version [%s]" % (download_url, self.version))
        if not os.path.isfile(distribution_path) or not self.repo.cache:
            try:
                logger.info("Starting download of Elasticsearch [%s]" % self.version)
                progress = net.Progress("[INFO] Downloading Elasticsearch %s" % self.version)
                net.download(download_url, distribution_path, progress_indicator=progress)
                progress.finish()
                logger.info("Successfully downloaded Elasticsearch [%s]." % self.version)
            except urllib.error.HTTPError:
                console.println("[FAILED]")
                logging.exception("Cannot download Elasticsearch distribution for version [%s] from [%s]." % (self.version, download_url))
                raise exceptions.SystemSetupError("Cannot download Elasticsearch distribution from [%s]. Please check that the specified "
                                                  "version [%s] is correct." % (download_url, self.version))
        else:
            logger.info("Skipping download for version [%s]. Found an existing binary locally at [%s]." % (self.version, distribution_path))

        self.distribution_path = distribution_path
Exemplo n.º 29
0
    def instrument_env(self, car, candidate_id):
        io.ensure_dir(self.log_root)
        log_file = "%s/%s-%s.jfr" % (self.log_root, car.name, candidate_id)

        console.println("\n***************************************************************************\n")
        console.println("[WARNING] Java flight recorder is a commercial feature of the Oracle JDK.\n")
        console.println("You are using Java flight recorder which requires that you comply with\nthe licensing terms stated in:\n")
        console.println(console.format.link("http://www.oracle.com/technetwork/java/javase/terms/license/index.html"))
        console.println("\nBy using this feature you confirm that you comply with these license terms.\n")
        console.println("Otherwise, please abort and rerun Rally without the \"jfr\" telemetry device.")
        console.println("\n***************************************************************************\n")

        time.sleep(3)

        console.info("%s: Writing flight recording to [%s]" % (self.human_name, log_file), logger=logger)
        # this is more robust in case we want to use custom settings
        # see http://stackoverflow.com/questions/34882035/how-to-record-allocations-with-jfr-on-command-line
        #
        # in that case change to: -XX:StartFlightRecording=defaultrecording=true,settings=es-memory-profiling
        return {"ES_JAVA_OPTS": "-XX:+UnlockDiagnosticVMOptions -XX:+UnlockCommercialFeatures -XX:+DebugNonSafepoints -XX:+FlightRecorder "
                                "-XX:FlightRecorderOptions=disk=true,maxage=0s,maxsize=0,dumponexit=true,dumponexitpath=%s "
                                "-XX:StartFlightRecording=defaultrecording=true" % log_file}
Exemplo n.º 30
0
    def fetch(self):
        io.ensure_dir(self.distributions_root)
        download_url = net.add_url_param_elastic_no_kpi(self.repo.download_url)
        distribution_path = os.path.join(self.distributions_root, self.repo.file_name)
        self.logger.info("Resolved download URL [%s] for version [%s]", download_url, self.version)
        if not os.path.isfile(distribution_path) or not self.repo.cache:
            try:
                self.logger.info("Starting download of Elasticsearch [%s]", self.version)
                progress = net.Progress("[INFO] Downloading Elasticsearch %s" % self.version)
                net.download(download_url, distribution_path, progress_indicator=progress)
                progress.finish()
                self.logger.info("Successfully downloaded Elasticsearch [%s].", self.version)
            except urllib.error.HTTPError:
                self.logger.exception("Cannot download Elasticsearch distribution for version [%s] from [%s].", self.version, download_url)
                raise exceptions.SystemSetupError(
                    "Cannot download Elasticsearch distribution from [%s]. Please check that the specified "
                    "version [%s] is correct." % (download_url, self.version)
                )
        else:
            self.logger.info("Skipping download for version [%s]. Found an existing binary at [%s].", self.version, distribution_path)

        self.distribution_path = distribution_path
Exemplo n.º 31
0
def write_single_report(report_file,
                        report_format,
                        cwd,
                        headers,
                        data_plain,
                        data_rich,
                        write_header=True):
    if report_format == "markdown":
        formatter = format_as_markdown
    elif report_format == "csv":
        formatter = format_as_csv
    else:
        raise exceptions.SystemSetupError("Unknown report format '%s'" %
                                          report_format)

    print_internal(formatter(headers, data_rich))
    if len(report_file) > 0:
        normalized_report_file = rio.normalize_path(report_file, cwd)
        # ensure that the parent folder already exists when we try to write the file...
        rio.ensure_dir(rio.dirname(normalized_report_file))
        with open(normalized_report_file, mode="a+", encoding="utf-8") as f:
            f.writelines(formatter(headers, data_plain, write_header))
Exemplo n.º 32
0
    def instrument_env(self, car, candidate_id):
        io.ensure_dir(self.log_root)
        log_file = "%s/%s-%s.jfr" % (self.log_root, car.safe_name,
                                     candidate_id)

        console.println(
            "\n***************************************************************************\n"
        )
        console.println(
            "[WARNING] Java flight recorder is a commercial feature of the Oracle JDK.\n"
        )
        console.println(
            "You are using Java flight recorder which requires that you comply with\nthe licensing terms stated in:\n"
        )
        console.println(
            console.format.link(
                "http://www.oracle.com/technetwork/java/javase/terms/license/index.html"
            ))
        console.println(
            "\nBy using this feature you confirm that you comply with these license terms.\n"
        )
        console.println(
            "Otherwise, please abort and rerun Rally without the \"jfr\" telemetry device."
        )
        console.println(
            "\n***************************************************************************\n"
        )

        time.sleep(3)

        console.info("%s: Writing flight recording to [%s]" %
                     (self.human_name, log_file),
                     logger=logger)

        java_opts = self.java_opts(log_file)

        logger.info("jfr: Adding JVM arguments: [%s].", java_opts)
        return {"ES_JAVA_OPTS": java_opts}
Exemplo n.º 33
0
def install_default_log_config():
    """
    Ensures a log configuration file is present on this machine. The default
    log configuration is based on the template in resources/logging.json.

    It also ensures that the default log path has been created so log files
    can be successfully opened in that directory.
    """
    log_config = log_config_path()
    if not io.exists(log_config):
        io.ensure_dir(io.dirname(log_config))
        source_path = io.normalize_path(
            os.path.join(os.path.dirname(__file__), "resources",
                         "logging.json"))
        with open(log_config, "w", encoding="UTF-8") as target:
            with open(source_path, "r", encoding="UTF-8") as src:
                # Ensure we have a trailing path separator as after LOG_PATH there will only be the file name
                log_path = os.path.join(paths.logs(), "")
                # the logging path might contain backslashes that we need to escape
                log_path = io.escape_path(log_path)
                contents = src.read().replace("${LOG_PATH}", log_path)
                target.write(contents)
    io.ensure_dir(paths.logs())
Exemplo n.º 34
0
def _apply_config(source_root_path, target_root_path, config_vars):
    for root, dirs, files in os.walk(source_root_path):
        env = jinja2.Environment(loader=jinja2.FileSystemLoader(root))

        relative_root = root[len(source_root_path) + 1:]
        absolute_target_root = os.path.join(target_root_path, relative_root)
        io.ensure_dir(absolute_target_root)

        for name in files:
            source_file = os.path.join(root, name)
            target_file = os.path.join(absolute_target_root, name)
            if plain_text(source_file):
                logger.info(
                    "Reading config template file [%s] and writing to [%s]." %
                    (source_file, target_file))
                # automatically merge config snippets from plugins (e.g. if they want to add config to elasticsearch.yml)
                with open(target_file, mode="a", encoding="utf-8") as f:
                    f.write(_render_template(env, config_vars, source_file))
            else:
                logger.info(
                    "Treating [%s] as binary and copying as is to [%s]." %
                    (source_file, target_file))
                shutil.copy(source_file, target_file)
Exemplo n.º 35
0
def configure_logging(cfg):
    logging_output = cfg.opts("system", "logging.output")
    profiling_enabled = cfg.opts("driver", "profiling")

    if logging_output == "file":
        log_file = application_log_file_path()
        log_dir = os.path.dirname(log_file)
        io.ensure_dir(log_dir)
        console.info("Writing logs to %s" % log_file)
        ch = logging.handlers.TimedRotatingFileHandler(filename=log_file, when="midnight", backupCount=14, encoding="UTF-8")
    else:
        ch = logging.StreamHandler(stream=sys.stdout)

    log_level = logging.INFO
    ch.setLevel(log_level)
    formatter = logging.Formatter("%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
    formatter.converter = time.gmtime
    ch.setFormatter(formatter)

    # Remove all handlers associated with the root logger object so we can start over with an entirely fresh log configuration
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)

    logging.root.addHandler(ch)
    logging.getLogger("elasticsearch").setLevel(logging.WARNING)

    if profiling_enabled:
        profile_file = "%s/profile.log" % application_log_dir_path()
        log_dir = os.path.dirname(profile_file)
        io.ensure_dir(log_dir)
        console.info("Writing driver profiling data to %s" % profile_file)
        handler = logging.FileHandler(filename=profile_file, encoding="UTF-8")
        handler.setFormatter(formatter)

        profile_logger = logging.getLogger("rally.profile")
        profile_logger.setLevel(logging.INFO)
        profile_logger.addHandler(handler)
Exemplo n.º 36
0
    def _download(self, url, local_path, size_in_bytes=None, force_download=False, raise_url_error=False):
        offline = self._config.opts("system", "offline.mode")
        file_exists = os.path.isfile(local_path)

        if file_exists and not force_download:
            logger.info("[%s] already exists locally. Skipping download." % local_path)
            return

        if not offline:
            logger.info("Downloading from [%s] to [%s]." % (url, local_path))
            try:
                io.ensure_dir(os.path.dirname(local_path))
                if size_in_bytes:
                    size_in_mb = round(convert.bytes_to_mb(size_in_bytes))
                    # ensure output appears immediately
                    print("Downloading data from %s (%s MB) ... " % (url, size_in_mb), end='', flush=True)
                if url.startswith("http"):
                    net.download(url, local_path)
                elif url.startswith("s3"):
                    self._do_download_via_s3(url, local_path, size_in_bytes)
                else:
                    raise exceptions.SystemSetupError("Cannot download benchmark data from [%s]. Only http(s) and s3 are supported." % url)
                if size_in_bytes:
                    print("Done")
            except urllib.error.URLError:
                logger.exception("Could not download [%s] to [%s]." % (url, local_path))
                if raise_url_error:
                    raise

        # file must exist at this point -> verify
        if not os.path.isfile(local_path):
            if offline:
                raise exceptions.SystemSetupError("Cannot find %s. Please disable offline mode and retry again." % local_path)
            else:
                raise exceptions.SystemSetupError("Could not download from %s to %s. Please verify that data are available at %s and "
                                                  "check your internet connection." % (url, local_path, url))
Exemplo n.º 37
0
def from_distribution(version, repo_name, distribution_config, distributions_root, plugins):
    if version.strip() == "":
        raise exceptions.SystemSetupError("Could not determine version. Please specify the Elasticsearch distribution "
                                          "to download with the command line parameter --distribution-version. "
                                          "E.g. --distribution-version=5.0.0")
    io.ensure_dir(distributions_root)
    distribution_path = "%s/elasticsearch-%s.tar.gz" % (distributions_root, version)

    repo = DistributionRepository(repo_name, distribution_config, version)

    download_url = repo.download_url
    logger.info("Resolved download URL [%s] for version [%s]" % (download_url, version))
    if not os.path.isfile(distribution_path) or not repo.cache:
        try:
            logger.info("Starting download of Elasticsearch [%s]" % version)
            progress = net.Progress("[INFO] Downloading Elasticsearch %s" % version)
            net.download(download_url, distribution_path, progress_indicator=progress)
            progress.finish()
            logger.info("Successfully downloaded Elasticsearch [%s]." % version)
        except urllib.error.HTTPError:
            console.println("[FAILED]")
            logging.exception("Cannot download Elasticsearch distribution for version [%s] from [%s]." % (version, download_url))
            raise exceptions.SystemSetupError("Cannot download Elasticsearch distribution from [%s]. Please check that the specified "
                                              "version [%s] is correct." % (download_url, version))
    else:
        logger.info("Skipping download for version [%s]. Found an existing binary locally at [%s]." % (version, distribution_path))

    binaries = {"elasticsearch": distribution_path}
    for plugin in plugins:
        # if we have multiple plugin configurations for a plugin we will override entries here but as this is always the same
        # key-value pair this is ok.
        plugin_url = repo.plugin_download_url(plugin.name)
        if plugin_url:
            binaries[plugin.name] = plugin_url

    return binaries
Exemplo n.º 38
0
    def _exec(self, task_key):
        src_dir = self._config.opts("source", "local.src.dir")
        logger.info("Building Elasticsearch from sources in [%s]." % src_dir)
        gradle = self._config.opts("build", "gradle.bin")
        task = self._config.opts("build", task_key)

        log_root = self._config.opts("system", "log.dir")
        build_log_dir = self._config.opts("build", "log.dir")
        log_dir = "%s/%s" % (log_root, build_log_dir)

        logger.info("Executing %s %s..." % (gradle, task))
        io.ensure_dir(log_dir)
        log_file = "%s/build.%s.log" % (log_dir, task_key)

        # we capture all output to a dedicated build log file
        if process.run_subprocess("cd %s; %s %s > %s 2>&1" % (src_dir, gradle, task, log_file)):
            msg = "Executing '%s %s' failed. Here are the last 20 lines in the build log file:\n" % (gradle, task)
            msg += "=========================================================================================================\n"
            with open(log_file, "r") as f:
                msg += "\t"
                msg += "\t".join(f.readlines()[-20:])
            msg += "=========================================================================================================\n"
            msg += "The full build log is available at [%s]." % log_file
            raise BuildError(msg)
Exemplo n.º 39
0
    def _exec(self, task_key):
        src_dir = self._config.opts("source", "local.src.dir")
        gradle = self._config.opts("build", "gradle.bin")
        task = self._config.opts("build", task_key)

        log_root = self._config.opts("system", "log.dir")
        build_log_dir = self._config.opts("build", "log.dir")
        log_dir = "%s/%s" % (log_root, build_log_dir)

        logger.info("Executing %s %s..." % (gradle, task))
        io.ensure_dir(log_dir)
        log_file = "%s/build.%s.log" % (log_dir, task_key)

        # we capture all output to a dedicated build log file
        if process.run_subprocess("cd %s; %s %s > %s 2>&1" % (src_dir, gradle, task, log_file)):
            #logger.warning("Executing '%s %s' failed" % (gradle, task))
            msg = "Executing '%s %s' failed. Here are the last 20 lines in the build log file:\n" % (gradle, task)
            msg += "=========================================================================================================\n"
            with open(log_file, "r") as f:
                msg += "\t"
                msg += "\t".join(f.readlines()[-20:])
            msg += "=========================================================================================================\n"
            msg += "The full build log is available at [%s]." % log_file
            raise BuildError(msg)
Exemplo n.º 40
0
def from_distribution(version, repo_name, distribution_config, distributions_root, plugins):
    if version.strip() == "":
        raise exceptions.SystemSetupError("Could not determine version. Please specify the Elasticsearch distribution "
                                          "to download with the command line parameter --distribution-version. "
                                          "E.g. --distribution-version=5.0.0")
    io.ensure_dir(distributions_root)
    distribution_path = "%s/elasticsearch-%s.tar.gz" % (distributions_root, version)

    repo = DistributionRepository(repo_name, distribution_config, version)

    download_url = repo.download_url
    logger.info("Resolved download URL [%s] for version [%s]" % (download_url, version))
    if not os.path.isfile(distribution_path) or not repo.cache:
        try:
            logger.info("Starting download of Elasticsearch [%s]" % version)
            progress = net.Progress("[INFO] Downloading Elasticsearch %s" % version)
            net.download(download_url, distribution_path, progress_indicator=progress)
            progress.finish()
            logger.info("Successfully downloaded Elasticsearch [%s]." % version)
        except urllib.error.HTTPError:
            console.println("[FAILED]")
            logging.exception("Cannot download Elasticsearch distribution for version [%s] from [%s]." % (version, download_url))
            raise exceptions.SystemSetupError("Cannot download Elasticsearch distribution from [%s]. Please check that the specified "
                                              "version [%s] is correct." % (download_url, version))
    else:
        logger.info("Skipping download for version [%s]. Found an existing binary locally at [%s]." % (version, distribution_path))

    binaries = {"elasticsearch": distribution_path}
    for plugin in plugins:
        # if we have multiple plugin configurations for a plugin we will override entries here but as this is always the same
        # key-value pair this is ok.
        plugin_url = repo.plugin_download_url(plugin.name)
        if plugin_url:
            binaries[plugin.name] = plugin_url

    return binaries
Exemplo n.º 41
0
    def write_single_report(self,
                            report_format,
                            report_file,
                            headers,
                            data,
                            force_cmd_line_output=True):
        if report_format == "markdown":
            report = tabulate.tabulate(data,
                                       headers=headers,
                                       tablefmt="pipe",
                                       numalign="right",
                                       stralign="right")
        elif report_format == "csv":
            with io.StringIO() as out:
                writer = csv.writer(out)
                writer.writerow(headers)
                for metric_record in data:
                    writer.writerow(metric_record)
                report = out.getvalue()
        else:
            raise exceptions.SystemSetupError("Unknown report format '%s'" %
                                              report_format)

        if force_cmd_line_output:
            print_internal(report)
        if len(report_file) > 0:
            normalized_report_file = rio.normalize_path(report_file)
            logger.info(
                "Writing report to [%s] (user specified: [%s]) in format [%s]"
                % (normalized_report_file, report_file, report_format))
            if force_cmd_line_output:
                print("\nWriting report also to '%s'" % normalized_report_file)
            # ensure that the parent folder already exists when we try to write the file...
            rio.ensure_dir(rio.dirname(normalized_report_file))
            with open(normalized_report_file, mode="w", encoding="UTF-8") as f:
                f.writelines(report)
Exemplo n.º 42
0
    def _download(self, url, local_path, size_in_bytes=None, force_download=False, raise_url_error=False):
        offline = self._config.opts("system", "offline.mode")
        file_exists = os.path.isfile(local_path)

        if file_exists and not force_download:
            logger.info("[%s] already exists locally. Skipping download." % local_path)
            return

        if not offline:
            logger.info("Downloading from [%s] to [%s]." % (url, local_path))
            try:
                io.ensure_dir(os.path.dirname(local_path))
                if size_in_bytes:
                    size_in_mb = round(convert.bytes_to_mb(size_in_bytes))
                    # ensure output appears immediately
                    print("Downloading data from %s (%s MB) ... " % (url, size_in_mb), end='', flush=True)
                if url.startswith("http"):
                    net.download(url, local_path)
                elif url.startswith("s3"):
                    self._do_download_via_s3(url, local_path, size_in_bytes)
                else:
                    raise exceptions.SystemSetupError("Cannot download benchmark data from [%s]. Only http(s) and s3 are supported." % url)
                if size_in_bytes:
                    print("Done")
            except urllib.error.URLError:
                logger.exception("Could not download [%s] to [%s]." % (url, local_path))
                if raise_url_error:
                    raise

        # file must exist at this point -> verify
        if not os.path.isfile(local_path):
            if offline:
                raise exceptions.SystemSetupError("Cannot find %s. Please disable offline mode and retry again." % local_path)
            else:
                raise exceptions.SystemSetupError("Could not download from %s to %s. Please verify that data are available at %s and "
                                                  "check your internet connection." % (url, local_path, url))
Exemplo n.º 43
0
    def install(self, binary):
        logger.info("Preparing candidate locally in [%s]." % self.install_dir)
        io.ensure_dir(self.install_dir)
        io.ensure_dir(self.node_log_dir)
        io.ensure_dir(self.heap_dump_dir)

        logger.info("Unzipping %s to %s" % (binary, self.install_dir))
        io.decompress(binary, self.install_dir)
        self.es_home_path = glob.glob("%s/elasticsearch*" % self.install_dir)[0]
        self.data_paths = self._data_paths()
Exemplo n.º 44
0
    def prepare(self, binaries):
        # we need to allow other users to write to these directories due to Docker.
        #
        # Although os.mkdir passes 0o777 by default, mkdir(2) uses `mode & ~umask & 0777` to determine the final flags and
        # hence we need to modify the process' umask here. For details see https://linux.die.net/man/2/mkdir.
        previous_umask = os.umask(0)
        try:
            io.ensure_dir(self.install_dir)
            io.ensure_dir(self.node_log_dir)
            io.ensure_dir(self.data_paths[0])
        finally:
            os.umask(previous_umask)

        mounts = {}

        car_config_path = self.car.config_path
        for root, dirs, files in os.walk(car_config_path):
            env = jinja2.Environment(loader=jinja2.FileSystemLoader(root))

            relative_root = root[len(car_config_path) + 1:]
            absolute_target_root = os.path.join(self.install_dir, relative_root)
            io.ensure_dir(absolute_target_root)

            for name in files:
                source_file = os.path.join(root, name)
                target_file = os.path.join(absolute_target_root, name)
                mounts[target_file] = os.path.join("/usr/share/elasticsearch", relative_root, name)
                if plain_text(source_file):
                    logger.info("Reading config template file [%s] and writing to [%s]." % (source_file, target_file))
                    with open(target_file, "w") as f:
                        f.write(_render_template(env, self.config_vars, source_file))
                else:
                    logger.info("Treating [%s] as binary and copying as is to [%s]." % (source_file, target_file))
                    shutil.copy(source_file, target_file)

        docker_cfg = self._render_template_from_file(self.docker_vars(mounts))
        logger.info("Starting Docker container with configuration:\n%s" % docker_cfg)

        with open(self.binary_path, "wt") as f:
            f.write(docker_cfg)

        return NodeConfiguration(self.car, self.node_ip, self.node_name, self.node_root_dir, self.binary_path,
                                 self.node_log_dir, self.data_paths)
Exemplo n.º 45
0
    def prepare(self, binaries):
        # we need to allow other users to write to these directories due to Docker.
        #
        # Although os.mkdir passes 0o777 by default, mkdir(2) uses `mode & ~umask & 0777` to determine the final flags and
        # hence we need to modify the process' umask here. For details see https://linux.die.net/man/2/mkdir.
        previous_umask = os.umask(0)
        try:
            io.ensure_dir(self.install_dir)
            io.ensure_dir(self.node_log_dir)
            io.ensure_dir(self.data_paths[0])
        finally:
            os.umask(previous_umask)

        mounts = {}

        for car_config_path in self.car.config_paths:
            for root, dirs, files in os.walk(car_config_path):
                env = jinja2.Environment(loader=jinja2.FileSystemLoader(root))

                relative_root = root[len(car_config_path) + 1:]
                absolute_target_root = os.path.join(self.install_dir, relative_root)
                io.ensure_dir(absolute_target_root)

                for name in files:
                    source_file = os.path.join(root, name)
                    target_file = os.path.join(absolute_target_root, name)
                    mounts[target_file] = os.path.join("/usr/share/elasticsearch", relative_root, name)
                    if plain_text(source_file):
                        logger.info("Reading config template file [%s] and writing to [%s]." % (source_file, target_file))
                        with open(target_file, "a") as f:
                            f.write(_render_template(env, self.config_vars, source_file))
                    else:
                        logger.info("Treating [%s] as binary and copying as is to [%s]." % (source_file, target_file))
                        shutil.copy(source_file, target_file)

        docker_cfg = self._render_template_from_file(self.docker_vars(mounts))
        logger.info("Starting Docker container with configuration:\n%s" % docker_cfg)

        with open(self.binary_path, "wt") as f:
            f.write(docker_cfg)

        return NodeConfiguration(self.car, self.node_ip, self.node_name, self.node_root_dir, self.binary_path,
                                 self.node_log_dir, self.data_paths)
Exemplo n.º 46
0
 def store(self, config):
     io.ensure_dir(self.config_dir)
     with open(self.location, "w") as configfile:
         config.write(configfile)
Exemplo n.º 47
0
 def instrument_env(self, car, candidate_id):
     io.ensure_dir(self.log_root)
     log_file = "%s/%s-%s.jit.log" % (self.log_root, car.safe_name, candidate_id)
     console.info("%s: Writing JIT compiler log to [%s]" % (self.human_name, log_file), logger=logger)
     return {"ES_JAVA_OPTS": "-XX:+UnlockDiagnosticVMOptions -XX:+TraceClassLoading -XX:+LogCompilation "
                             "-XX:LogFile=%s -XX:+PrintAssembly" % log_file}
Exemplo n.º 48
0
def clone(src, remote):
    io.ensure_dir(src)
    # Don't swallow subprocess output, user might need to enter credentials...
    if process.run_subprocess_with_logging("git clone %s %s" % (remote, io.escape_path(src))):
        raise exceptions.SupplyError("Could not clone from [%s] to [%s]" % (remote, src))
Exemplo n.º 49
0
 def store(self, config):
     io.ensure_dir(self._config_dir())
     with open(self.location, "w") as configfile:
         config.write(configfile)
Exemplo n.º 50
0
def configure_actor_logging():
    log_dir = "%s/.rally/logs" % os.path.expanduser("~")
    io.ensure_dir(log_dir)

    # actor_log_handler = {"class": "logging.handlers.SysLogHandler", "address": "/var/run/syslog"}
    # actor_messages_handler = {"class": "logging.handlers.SysLogHandler", "address": "/var/run/syslog"}

    return {
        "version": 1,
        "formatters": {
            "normal": {
                "fmt": "%(asctime)s,%(msecs)d PID:%(process)d %(name)s %(levelname)s %(message)s",
                "datefmt": "%Y-%m-%d %H:%M:%S",
                "()": configure_utc_formatter
            },
            "actor": {
                "fmt": "%(asctime)s,%(msecs)d PID:%(process)d %(name)s %(levelname)s %(actorAddress)s => %(message)s",
                "datefmt": "%Y-%m-%d %H:%M:%S",
                "()": configure_utc_formatter
            }
        },
        "filters": {
            "isActorLog": {
                "()": ActorLogFilter
            },
            "notActorLog": {
                "()": NotActorLogFilter
            }
        },
        "handlers": {
            "rally_log_handler": {
                "class": "logging.handlers.TimedRotatingFileHandler",
                "filename": "%s/rally-actors.log" % log_dir,
                "when": "midnight",
                "backupCount": 14,
                "encoding": "UTF-8",
                "formatter": "normal",
                "filters": ["notActorLog"],
                "level": root_log_level
            },
            "actor_log_handler": {
                "class": "logging.handlers.TimedRotatingFileHandler",
                "filename": "%s/rally-actor-messages.log" % log_dir,
                "when": "midnight",
                "backupCount": 14,
                "encoding": "UTF-8",
                "formatter": "actor",
                "filters": ["isActorLog"],
                "level": root_log_level
            }
        },
        "root": {
            "handlers": ["rally_log_handler", "actor_log_handler"],
            "level": root_log_level
        },
        "loggers": {
            "elasticsearch": {
                "handlers": ["rally_log_handler"],
                "level": es_log_level,
                # don't let the root logger handle it again
                "propagate": 0
            }
        }
    }
Exemplo n.º 51
0
Arquivo: git.py Projeto: levylll/rally
def clone(src, remote):
    io.ensure_dir(src)
    # Don't swallow subprocess output, user might need to enter credentials...
    if process.run_subprocess("git clone %s %s" % (remote, src)):
        raise exceptions.SupplyError("Could not clone from '%s' to '%s'" %
                                     (remote, src))
Exemplo n.º 52
0
def configure_actor_logging():
    log_dir = "%s/.rally/logs" % os.path.expanduser("~")
    io.ensure_dir(log_dir)

    # actor_log_handler = {"class": "logging.handlers.SysLogHandler", "address": "/var/run/syslog"}
    # actor_messages_handler = {"class": "logging.handlers.SysLogHandler", "address": "/var/run/syslog"}

    return {
        "version": 1,
        "formatters": {
            "normal": {
                "fmt":
                "%(asctime)s,%(msecs)d PID:%(process)d %(name)s %(levelname)s %(message)s",
                "datefmt": "%Y-%m-%d %H:%M:%S",
                "()": configure_utc_formatter
            },
            "actor": {
                "fmt":
                "%(asctime)s,%(msecs)d PID:%(process)d %(name)s %(levelname)s %(actorAddress)s => %(message)s",
                "datefmt": "%Y-%m-%d %H:%M:%S",
                "()": configure_utc_formatter
            }
        },
        "filters": {
            "isActorLog": {
                "()": ActorLogFilter
            },
            "notActorLog": {
                "()": NotActorLogFilter
            }
        },
        "handlers": {
            "rally_log_handler": {
                "class": "logging.handlers.TimedRotatingFileHandler",
                "filename": "%s/rally-actors.log" % log_dir,
                "when": "midnight",
                "backupCount": 14,
                "encoding": "UTF-8",
                "formatter": "normal",
                "filters": ["notActorLog"],
                "level": root_log_level
            },
            "actor_log_handler": {
                "class": "logging.handlers.TimedRotatingFileHandler",
                "filename": "%s/rally-actor-messages.log" % log_dir,
                "when": "midnight",
                "backupCount": 14,
                "encoding": "UTF-8",
                "formatter": "actor",
                "filters": ["isActorLog"],
                "level": root_log_level
            }
        },
        "root": {
            "handlers": ["rally_log_handler", "actor_log_handler"],
            "level": root_log_level
        },
        "loggers": {
            "elasticsearch": {
                "handlers": ["rally_log_handler"],
                "level": es_log_level,
                # don't let the root logger handle it again
                "propagate": 0
            }
        }
    }
Exemplo n.º 53
0
def migrate(config_file, current_version, target_version, out=print, i=input):
    prompter = Prompter(i=i, o=out, assume_defaults=False)
    logger.info("Upgrading configuration from version [%s] to [%s]." %
                (current_version, target_version))
    # Something is really fishy. We don't want to downgrade the configuration.
    if current_version >= target_version:
        raise ConfigError(
            "The existing config file is available in a later version already. Expected version <= [%s] but found [%s]"
            % (target_version, current_version))
    # but first a backup...
    config_file.backup()
    config = config_file.load(interpolation=None)

    if current_version == 0 and target_version > current_version:
        logger.info("Migrating config from version [0] to [1]")
        current_version = 1
        config["meta"] = {}
        config["meta"]["config.version"] = str(current_version)
        # in version 1 we changed some directories from being absolute to being relative
        config["system"]["log.root.dir"] = "logs"
        config["provisioning"]["local.install.dir"] = "install"
        config["reporting"]["report.base.dir"] = "reports"
    if current_version == 1 and target_version > current_version:
        logger.info("Migrating config from version [1] to [2]")
        current_version = 2
        config["meta"]["config.version"] = str(current_version)
        # no need to ask the user now if we are about to upgrade to version 4
        config["reporting"]["datastore.type"] = "in-memory"
        config["reporting"]["datastore.host"] = ""
        config["reporting"]["datastore.port"] = ""
        config["reporting"]["datastore.secure"] = ""
        config["reporting"]["datastore.user"] = ""
        config["reporting"]["datastore.password"] = ""
        config["system"]["env.name"] = "local"
    if current_version == 2 and target_version > current_version:
        logger.info("Migrating config from version [2] to [3]")
        current_version = 3
        config["meta"]["config.version"] = str(current_version)
        # Remove obsolete settings
        config["reporting"].pop("report.base.dir")
        config["reporting"].pop("output.html.report.filename")
    if current_version == 3 and target_version > current_version:
        root_dir = config["system"]["root.dir"]
        out("""
            *****************************************************************************************

            You have an old configuration of Rally. Rally has now a much simpler setup
            routine which will autodetect lots of settings for you and it also does not
            require you to setup a metrics store anymore.

            Rally will now migrate your configuration but if you don't need advanced features
            like a metrics store, then you should delete the configuration directory:

              rm -rf {0}

            and then rerun Rally's configuration routine:

              {1} configure

            Please also note you have {2:.1f} GB of data in your current benchmark directory at

              {3}

            You might want to clean up this directory also.

            For more details please see {4}

            *****************************************************************************************

            Pausing for 10 seconds to let you consider this message.
            """.format(
            config_file.config_dir, PROGRAM_NAME,
            convert.bytes_to_gb(io.get_size(root_dir)), root_dir,
            console.format.link(
                "https://github.com/elastic/rally/blob/master/CHANGELOG.md#030"
            )))
        time.sleep(10)
        logger.info("Migrating config from version [3] to [4]")
        current_version = 4
        config["meta"]["config.version"] = str(current_version)
        if len(config["reporting"]["datastore.host"]) > 0:
            config["reporting"]["datastore.type"] = "elasticsearch"
        else:
            config["reporting"]["datastore.type"] = "in-memory"
        # Remove obsolete settings
        config["build"].pop("maven.bin")
        config["benchmarks"].pop("metrics.stats.disk.device")

    if current_version == 4 and target_version > current_version:
        config["tracks"] = {}
        config["tracks"][
            "default.url"] = "https://github.com/elastic/rally-tracks"
        current_version = 5
        config["meta"]["config.version"] = str(current_version)

    if current_version == 5 and target_version > current_version:
        config["defaults"] = {}
        config["defaults"]["preserve_benchmark_candidate"] = str(False)
        current_version = 6
        config["meta"]["config.version"] = str(current_version)

    if current_version == 6 and target_version > current_version:
        # Remove obsolete settings
        config.pop("provisioning")
        config["system"].pop("log.root.dir")
        current_version = 7
        config["meta"]["config.version"] = str(current_version)

    if current_version == 7 and target_version > current_version:
        # move [system][root.dir] to [node][root.dir]
        if "node" not in config:
            config["node"] = {}
        config["node"]["root.dir"] = config["system"].pop("root.dir")
        # also move all references!
        for section in config:
            for k, v in config[section].items():
                config[section][k] = v.replace("${system:root.dir}",
                                               "${node:root.dir}")
        current_version = 8
        config["meta"]["config.version"] = str(current_version)
    if current_version == 8 and target_version > current_version:
        config["teams"] = {}
        config["teams"][
            "default.url"] = "https://github.com/elastic/rally-teams"
        current_version = 9
        config["meta"]["config.version"] = str(current_version)
    if current_version == 9 and target_version > current_version:
        config["distributions"] = {}
        config["distributions"]["release.1.url"] = "https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-" \
                                                   "{{VERSION}}.tar.gz"
        config["distributions"]["release.2.url"] = "https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/" \
                                                   "distribution/tar/elasticsearch/{{VERSION}}/elasticsearch-{{VERSION}}.tar.gz"
        config["distributions"][
            "release.url"] = "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz"
        config["distributions"]["release.cache"] = "true"
        current_version = 10
        config["meta"]["config.version"] = str(current_version)
    if current_version == 10 and target_version > current_version:
        config["runtime"]["java.home"] = config["runtime"].pop("java8.home")
        current_version = 11
        config["meta"]["config.version"] = str(current_version)
    if current_version == 11 and target_version > current_version:
        # As this is a rather complex migration, we log more than usual to understand potential migration problems better.
        if "source" in config:
            if "local.src.dir" in config["source"]:
                previous_root = config["source"].pop("local.src.dir")
                logger.info("Set [source][local.src.dir] to [%s]." %
                            previous_root)
                # if this directory was Rally's default location, then move it on the file system because to allow for checkouts of plugins
                # in the sibling directory.
                if previous_root == os.path.join(config["node"]["root.dir"],
                                                 "src"):
                    new_root_dir_all_sources = previous_root
                    new_es_sub_dir = "elasticsearch"
                    new_root = os.path.join(new_root_dir_all_sources,
                                            new_es_sub_dir)
                    # only attempt to move if the directory exists. It may be possible that users never ran a source benchmark although they
                    # have configured it. In that case the source directory will not yet exist.
                    if io.exists(previous_root):
                        logger.info(
                            "Previous source directory was at Rally's default location [%s]. Moving to [%s]."
                            % (previous_root, new_root))
                        try:
                            # we need to do this in two steps as we need to move the sources to a subdirectory
                            tmp_path = io.normalize_path(
                                os.path.join(new_root_dir_all_sources,
                                             os.pardir, "tmp_src_mig"))
                            os.rename(previous_root, tmp_path)
                            io.ensure_dir(new_root)
                            os.rename(tmp_path, new_root)
                        except OSError:
                            logger.exception(
                                "Could not move source directory from [%s] to [%s]."
                                % (previous_root, new_root))
                            # A warning is sufficient as Rally should just do a fresh checkout if moving did not work.
                            console.warn(
                                "Elasticsearch source directory could not be moved from [%s] to [%s]. Please check the logs."
                                % (previous_root, new_root))
                    else:
                        logger.info(
                            "Source directory is configured at Rally's default location [%s] but does not exist yet."
                            % previous_root)
                else:
                    logger.info(
                        "Previous source directory was the custom directory [%s]."
                        % previous_root)
                    new_root_dir_all_sources = io.normalize_path(
                        os.path.join(previous_root, os.path.pardir))
                    # name of the elasticsearch project directory.
                    new_es_sub_dir = io.basename(previous_root)

                logger.info("Setting [node][src.root.dir] to [%s]." %
                            new_root_dir_all_sources)
                config["node"]["src.root.dir"] = new_root_dir_all_sources
                logger.info(
                    "Setting [source][elasticsearch.src.subdir] to [%s]" %
                    new_es_sub_dir)
                config["source"]["elasticsearch.src.subdir"] = new_es_sub_dir
            else:
                logger.info(
                    "Key [local.src.dir] not found. Advancing without changes."
                )
        else:
            logger.info(
                "No section named [source] found in config. Advancing without changes."
            )
        current_version = 12
        config["meta"]["config.version"] = str(current_version)

    if current_version == 12 and target_version > current_version:
        # the current configuration allows to benchmark from sources
        if "build" in config and "gradle.bin" in config["build"]:
            java_9_home = io.guess_java_home(major_version=9)
            from esrally.utils import jvm
            if java_9_home and not jvm.is_early_access_release(java_9_home):
                logger.debug("Autodetected a JDK 9 installation at [%s]" %
                             java_9_home)
                if "runtime" not in config:
                    config["runtime"] = {}
                config["runtime"]["java9.home"] = java_9_home
            else:
                logger.debug(
                    "Could not autodetect a JDK 9 installation. Checking [java.home] already points to a JDK 9."
                )
                detected = False
                if "runtime" in config:
                    java_home = config["runtime"]["java.home"]
                    if jvm.major_version(
                            java_home
                    ) == 9 and not jvm.is_early_access_release(java_home):
                        config["runtime"]["java9.home"] = java_home
                        detected = True

                if not detected:
                    logger.debug(
                        "Could not autodetect a JDK 9 installation. Asking user."
                    )
                    raw_java_9_home = prompter.ask_property(
                        "Enter the JDK 9 root directory",
                        check_path_exists=True,
                        mandatory=False)
                    if raw_java_9_home and jvm.major_version(
                            raw_java_9_home
                    ) == 9 and not jvm.is_early_access_release(
                            raw_java_9_home):
                        java_9_home = io.normalize_path(
                            raw_java_9_home) if raw_java_9_home else None
                        config["runtime"]["java9.home"] = java_9_home
                    else:
                        out("********************************************************************************"
                            )
                        out("You don't have a valid JDK 9 installation and cannot benchmark source builds."
                            )
                        out("")
                        out("You can still benchmark binary distributions with e.g.:"
                            )
                        out("")
                        out("  %s --distribution-version=6.0.0" % PROGRAM_NAME)
                        out("********************************************************************************"
                            )
                        out("")

        current_version = 13
        config["meta"]["config.version"] = str(current_version)

    # all migrations done
    config_file.store(config)
    logger.info("Successfully self-upgraded configuration to version [%s]" %
                target_version)
Exemplo n.º 54
0
 def store(self, config):
     io.ensure_dir(self.config_dir)
     with open(self.location, "wt", encoding="utf-8") as configfile:
         config.write(configfile)
Exemplo n.º 55
0
    def download_corpus(root_url, target_path, size_in_bytes, track_name,
                        offline, test_mode):
        file_name = os.path.basename(target_path)

        if not root_url:
            raise exceptions.DataError(
                "%s is missing and it cannot be downloaded because no source URL is provided in the track."
                % target_path)
        if offline:
            raise exceptions.SystemSetupError(
                "Cannot find %s. Please disable offline mode and retry again."
                % target_path)

        data_url = "%s/%s" % (source_root_url, file_name)
        try:
            io.ensure_dir(os.path.dirname(target_path))
            if size_in_bytes:
                size_in_mb = round(convert.bytes_to_mb(size_in_bytes))
                logger.info("Downloading data from [%s] (%s MB) to [%s]." %
                            (data_url, size_in_mb, target_path))
            else:
                logger.info("Downloading data from [%s] to [%s]." %
                            (data_url, target_path))

            # we want to have a bit more accurate download progress as these files are typically very large
            progress = net.Progress("[INFO] Downloading data for track %s" %
                                    track_name,
                                    accuracy=1)
            net.download(data_url,
                         target_path,
                         size_in_bytes,
                         progress_indicator=progress)
            progress.finish()
            logger.info("Downloaded data from [%s] to [%s]." %
                        (data_url, target_path))
        except urllib.error.HTTPError as e:
            if e.code == 404 and test_mode:
                raise exceptions.DataError(
                    "Track [%s] does not support test mode. Please ask the track author to add it or "
                    "disable test mode and retry." % track_name)
            else:
                msg = "Could not download [%s] to [%s]" % (data_url,
                                                           target_path)
                if e.reason:
                    msg += " (HTTP status: %s, reason: %s)" % (str(
                        e.code), e.reason)
                else:
                    msg += " (HTTP status: %s)" % str(e.code)
                raise exceptions.DataError(msg)
        except urllib.error.URLError:
            logger.exception("Could not download [%s] to [%s]." %
                             (data_url, target_path))
            raise exceptions.DataError("Could not download [%s] to [%s]." %
                                       (data_url, target_path))

        if not os.path.isfile(target_path):
            raise exceptions.SystemSetupError(
                "Cannot download from %s to %s. Please verify that data are available at %s and "
                "check your internet connection." %
                (data_url, target_path, data_url))

        actual_size = os.path.getsize(target_path)
        if size_in_bytes is not None and actual_size != size_in_bytes:
            raise exceptions.DataError(
                "[%s] is corrupt. Downloaded [%d] bytes but [%d] bytes are expected."
                % (target_path, actual_size, size_in_bytes))
Exemplo n.º 56
0
def configure_logging(cfg):
    start_time = rtime.to_iso8601(cfg.opts("system", "time.start"))
    logging_output = cfg.opts("system", "logging.output")
    profiling_enabled = cfg.opts("driver", "profiling")

    if logging_output == "file":
        log_file = application_log_file_path(start_time)
        log_dir = os.path.dirname(log_file)
        io.ensure_dir(log_dir)
        console.info("Writing logs to %s" % log_file)
        # there is an old log file lying around -> backup
        if os.path.exists(log_file):
            os.rename(log_file, "%s-bak-%d.log" % (log_file, int(os.path.getctime(log_file))))
        ch = logging.FileHandler(filename=log_file, mode="a")
    else:
        ch = logging.StreamHandler(stream=sys.stdout)

    log_level = logging.INFO
    ch.setLevel(log_level)
    formatter = logging.Formatter("%(asctime)s,%(msecs)d PID:%(process)d %(name)s %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
    formatter.converter = time.gmtime
    ch.setFormatter(formatter)

    # Remove all handlers associated with the root logger object so we can start over with an entirely fresh log configuration
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)

    logging.root.addHandler(ch)
    logging.getLogger("elasticsearch").setLevel(logging.WARNING)

    # Avoid failures such as the following (shortened a bit):
    #
    # ---------------------------------------------------------------------------------------------
    # "esrally/driver/driver.py", line 220, in create_client
    # "thespian-3.8.0-py3.5.egg/thespian/actors.py", line 187, in createActor
    # [...]
    # "thespian-3.8.0-py3.5.egg/thespian/system/multiprocCommon.py", line 348, in _startChildActor
    # "python3.5/multiprocessing/process.py", line 105, in start
    # "python3.5/multiprocessing/context.py", line 267, in _Popen
    # "python3.5/multiprocessing/popen_fork.py", line 18, in __init__
    # sys.stderr.flush()
    #
    # OSError: [Errno 5] Input/output error
    # ---------------------------------------------------------------------------------------------
    #
    # This is caused by urllib3 wanting to send warnings about insecure SSL connections to stderr when we disable them (in client.py) with:
    #
    #   urllib3.disable_warnings()
    #
    # The filtering functionality of the warnings module causes the error above on some systems. If we instead redirect the warning output
    # to our logs instead of stderr (which is the warnings module's default), we can disable warnings safely.
    logging.captureWarnings(True)

    if profiling_enabled:
        profile_file = "%s/profile.log" % application_log_dir_path()
        log_dir = os.path.dirname(profile_file)
        io.ensure_dir(log_dir)
        console.info("Writing driver profiling data to %s" % profile_file)
        handler = logging.FileHandler(filename=profile_file, encoding="UTF-8")
        handler.setFormatter(formatter)

        profile_logger = logging.getLogger("rally.profile")
        profile_logger.setLevel(logging.INFO)
        profile_logger.addHandler(handler)
Exemplo n.º 57
0
def create(cfg,
           metrics_store,
           single_machine=True,
           cluster_settings=None,
           sources=False,
           build=False,
           distribution=False,
           external=False,
           docker=False):
    challenge_root_path = paths.race_root(cfg)
    install_dir = "%s/install" % challenge_root_path
    log_dir = "%s/logs" % challenge_root_path
    io.ensure_dir(log_dir)

    if sources:
        try:
            src_dir = cfg.opts("source", "local.src.dir")
        except config.ConfigError:
            logger.exception("Cannot determine source directory")
            raise exceptions.SystemSetupError(
                "You cannot benchmark Elasticsearch from sources. Are you missing Gradle? Please install"
                " all prerequisites and reconfigure Rally with %s configure" %
                PROGRAM_NAME)

        remote_url = cfg.opts("source", "remote.repo.url")
        revision = cfg.opts("mechanic", "source.revision")
        gradle = cfg.opts("build", "gradle.bin")
        java_home = cfg.opts("runtime", "java8.home")

        s = lambda: supplier.from_sources(remote_url, src_dir, revision,
                                          gradle, java_home, log_dir, build)
        p = provisioner.local_provisioner(cfg, cluster_settings, install_dir,
                                          single_machine)
        l = launcher.InProcessLauncher(cfg, metrics_store, challenge_root_path,
                                       log_dir)
    elif distribution:
        version = cfg.opts("mechanic", "distribution.version")
        repo_name = cfg.opts("mechanic", "distribution.repository")
        distributions_root = "%s/%s" % (cfg.opts(
            "node", "root.dir"), cfg.opts("source", "distribution.dir"))

        s = lambda: supplier.from_distribution(version=version,
                                               repo_name=repo_name,
                                               distributions_root=
                                               distributions_root)
        p = provisioner.local_provisioner(cfg, cluster_settings, install_dir,
                                          single_machine)
        l = launcher.InProcessLauncher(cfg, metrics_store, challenge_root_path,
                                       log_dir)
    elif external:
        if cluster_settings:
            logger.warning(
                "Cannot apply challenge-specific cluster settings [%s] for an externally provisioned cluster. Please ensure "
                "that the cluster settings are present or the benchmark may fail or behave unexpectedly."
                % cluster_settings)
        s = lambda: None
        p = provisioner.no_op_provisioner(cfg)
        l = launcher.ExternalLauncher(cfg, metrics_store)
    elif docker:
        s = lambda: None
        p = provisioner.docker_provisioner(cfg, cluster_settings, install_dir)
        l = launcher.DockerLauncher(cfg, metrics_store)
    else:
        # It is a programmer error (and not a user error) if this function is called with wrong parameters
        raise RuntimeError(
            "One of sources, distribution, docker or external must be True")

    return Mechanic(s, p, l)
Exemplo n.º 58
0
    def start(self, car):
        # hardcoded for the moment, should actually be identical to internal launcher
        # Only needed on Mac:
        # hosts = [{"host": process.run_subprocess_with_output("docker-machine ip default")[0].strip(), "port": 9200}]
        hosts = [{"host": "localhost", "port": 9200}]
        client_options = self.cfg.opts("launcher", "client.options")
        # unified client config
        self.cfg.add(config.Scope.benchmark, "client", "hosts", hosts)
        self.cfg.add(config.Scope.benchmark, "client", "options",
                     client_options)

        es = self.client_factory(hosts, client_options).create()

        t = telemetry.Telemetry(
            self.cfg,
            devices=[
                # Be aware that some the meta-data are taken from the host system, not the container (e.g. number of CPU cores) so if the
                # Docker container constrains these, the metrics are actually wrong.
                telemetry.EnvironmentInfo(self.cfg, es, self.metrics_store),
                telemetry.NodeStats(self.cfg, es, self.metrics_store),
                telemetry.IndexStats(self.cfg, es, self.metrics_store),
                telemetry.DiskIo(self.cfg, self.metrics_store),
                telemetry.CpuUsage(self.cfg, self.metrics_store)
            ])

        distribution_version = self.cfg.opts("source",
                                             "distribution.version",
                                             mandatory=False)

        install_dir = self._install_dir()
        io.ensure_dir(install_dir)

        java_opts = ""
        if car.heap:
            java_opts += "-Xms%s -Xmx%s " % (car.heap, car.heap)
        if car.java_opts:
            java_opts += car.java_opts

        vars = {
            "es_java_opts":
            java_opts,
            "container_memory_gb":
            "%dg" % (convert.bytes_to_gb(psutil.virtual_memory().total) // 2),
            "es_data_dir":
            "%s/data" % install_dir,
            "es_version":
            distribution_version
        }

        docker_cfg = self._render_template_from_file(vars)
        logger.info("Starting Docker container with configuration:\n%s" %
                    docker_cfg)
        docker_cfg_path = self._docker_cfg_path()
        with open(docker_cfg_path, "wt") as f:
            f.write(docker_cfg)

        c = cluster.Cluster([], t)

        self._start_process(cmd="docker-compose -f %s up" % docker_cfg_path,
                            node_name="rally0")
        # Wait for a little while: Plugins may still be initializing although the node has already started.
        time.sleep(10)

        t.attach_to_cluster(c)
        logger.info("Successfully started Docker container")
        return c
Exemplo n.º 59
0
 def instrument_env(self, car, candidate_id):
     io.ensure_dir(self.log_root)
     log_file = "%s/%s-%s.gc.log" % (self.log_root, car.safe_name, candidate_id)
     console.info("%s: Writing GC log to [%s]" % (self.human_name, log_file), logger=logger)
     return self.java_opts(log_file)