def decompress(data_set_path, expected_size_in_bytes): # we assume that track data are always compressed and try to decompress them before running the benchmark basename, extension = io.splitext(data_set_path) decompressed = False if not os.path.isfile(basename) or os.path.getsize( basename) != expected_size_in_bytes: decompressed = True if type.uncompressed_size_in_bytes: console.info( "Decompressing track data from [%s] to [%s] (resulting size: %.2f GB) ... " % (data_set_path, basename, convert.bytes_to_gb(type.uncompressed_size_in_bytes)), end='', flush=True, logger=logger) else: console.info( "Decompressing track data from [%s] to [%s] ... " % (data_set_path, basename), end='', flush=True, logger=logger) io.decompress(data_set_path, io.dirname(data_set_path)) console.println("[OK]") extracted_bytes = os.path.getsize(basename) if expected_size_in_bytes is not None and extracted_bytes != expected_size_in_bytes: raise exceptions.DataError( "[%s] is corrupt. Extracted [%d] bytes but [%d] bytes are expected." % (basename, extracted_bytes, expected_size_in_bytes)) return basename, decompressed
def _render_template_from_file(self, variables): compose_file = os.path.join(self.rally_root, "resources", "docker-compose.yml.j2") return self._render_template(loader=jinja2.FileSystemLoader( io.dirname(compose_file)), template_name=io.basename(compose_file), variables=variables)
def _render_template_from_file(self, variables): compose_file = "%s/resources/docker-compose.yml" % (self.cfg.opts( "system", "rally.root")) return self._render_template(loader=jinja2.FileSystemLoader( io.dirname(compose_file)), template_name=io.basename(compose_file), variables=variables)
def _unzip(self, data_set_path): # we assume that track data are always compressed and try to unzip them before running the benchmark basename, extension = io.splitext(data_set_path) if not os.path.isfile(basename): logger.info("Unzipping track data from [%s] to [%s]." % (data_set_path, basename)) io.unzip(data_set_path, io.dirname(data_set_path)) return basename
def write_report(self, metrics_table): headers = ["Metric", "Value"] report_format = self._config.opts("report", "reportformat") report_file = self._config.opts("report", "reportfile") if report_format == "markdown": report = tabulate.tabulate(metrics_table, headers=headers, tablefmt="pipe", numalign="right", stralign="right") elif report_format == "csv": with io.StringIO() as out: writer = csv.writer(out) writer.writerow(headers) for metric_record in metrics_table: writer.writerow(metric_record) report = out.getvalue() else: raise exceptions.SystemSetupError("Unknown report format '%s'" % report_format) print_internal(report) if len(report_file) > 0: normalized_report_file = rio.normalize_path(report_file) logger.info("Writing report to [%s] (user specified: [%s]) in format [%s]" % (normalized_report_file, report_file, report_format)) print("\nWriting report also to '%s'" % normalized_report_file) # ensure that the parent folder already exists when we try to write the file... rio.ensure_dir(rio.dirname(normalized_report_file)) with open(normalized_report_file, mode="w", encoding="UTF-8") as f: f.writelines(report)
def write_single_report(report_file, report_format, cwd, headers, data_plain, data_rich, write_header=True): if report_format == "markdown": formatter = format_as_markdown elif report_format == "csv": formatter = format_as_csv else: raise exceptions.SystemSetupError("Unknown report format '%s'" % report_format) print_internal(formatter(headers, data_rich)) if len(report_file) > 0: normalized_report_file = rio.normalize_path(report_file, cwd) logger.info( "Writing report to [%s] (user specified: [%s]) in format [%s]" % (normalized_report_file, report_file, report_format)) # ensure that the parent folder already exists when we try to write the file... rio.ensure_dir(rio.dirname(normalized_report_file)) with open(normalized_report_file, mode="a+", encoding="utf-8") as f: f.writelines(formatter(headers, data_plain, write_header))
def write_single_report(self, report_format, report_file, headers, data, force_cmd_line_output=True): if report_format == "markdown": report = tabulate.tabulate(data, headers=headers, tablefmt="pipe", numalign="right", stralign="right") elif report_format == "csv": with io.StringIO() as out: writer = csv.writer(out) writer.writerow(headers) for metric_record in data: writer.writerow(metric_record) report = out.getvalue() else: raise exceptions.SystemSetupError("Unknown report format '%s'" % report_format) if force_cmd_line_output: print_internal(report) if len(report_file) > 0: normalized_report_file = rio.normalize_path(report_file) logger.info("Writing report to [%s] (user specified: [%s]) in format [%s]" % (normalized_report_file, report_file, report_format)) if force_cmd_line_output: print("\nWriting report also to '%s'" % normalized_report_file) # ensure that the parent folder already exists when we try to write the file... rio.ensure_dir(rio.dirname(normalized_report_file)) with open(normalized_report_file, mode="w", encoding="UTF-8") as f: f.writelines(report)
def _unzip(self, data_set_path): # we assume that track data are always compressed and try to unzip them before running the benchmark basename, extension = io.splitext(data_set_path) if not os.path.isfile(basename): logger.info("Unzipping track data from [%s] to [%s]." % (data_set_path, basename)) io.unzip(data_set_path, io.dirname(data_set_path)) return basename
def add(self, binaries): if self.cached: self.logger.info("Using cached artifact in [%s]", self.cached_path) binaries[self.file_resolver. artifact_key] = self.file_resolver.to_artifact_path( self.cached_path) else: self.source_supplier.add(binaries) original_path = self.file_resolver.to_file_system_path( binaries[self.file_resolver.artifact_key]) # this can be None if the Elasticsearch does not reside in a git repo and the user has only # copied all source files. In that case, we cannot resolve a revision hash and thus we cannot cache. if self.cached_path: try: io.ensure_dir(io.dirname(self.cached_path)) shutil.copy(original_path, self.cached_path) self.logger.info("Caching artifact in [%s]", self.cached_path) binaries[ self.file_resolver. artifact_key] = self.file_resolver.to_artifact_path( self.cached_path) except OSError: self.logger.exception("Not caching [%s].", original_path) else: self.logger.info("Not caching [%s] (no revision info).", original_path)
def decompress_corpus(archive_path, documents_path, uncompressed_size): if uncompressed_size: console.info( "Decompressing track data from [%s] to [%s] (resulting size: %.2f GB) ... " % (archive_path, documents_path, convert.bytes_to_gb(uncompressed_size)), end='', flush=True, logger=logger) else: console.info("Decompressing track data from [%s] to [%s] ... " % (archive_path, documents_path), end='', flush=True, logger=logger) io.decompress(archive_path, io.dirname(archive_path)) console.println("[OK]") if not os.path.isfile(documents_path): raise exceptions.DataError( "Decompressing [%s] did not create [%s]. Please check with the track author if the compressed " "archive has been created correctly." % (archive_path, documents_path)) extracted_bytes = os.path.getsize(documents_path) if uncompressed_size is not None and extracted_bytes != uncompressed_size: raise exceptions.DataError( "[%s] is corrupt. Extracted [%d] bytes but [%d] bytes are expected." % (documents_path, extracted_bytes, uncompressed_size))
def write_single_report(self, report_file, headers, data, write_header=True, show_also_in_console=True): report_format = self._config.opts("report", "reportformat") if report_format == "markdown": formatter = self.format_as_markdown elif report_format == "csv": formatter = self.format_as_csv else: raise exceptions.SystemSetupError("Unknown report format '%s'" % report_format) if show_also_in_console: print_internal(formatter(headers, data)) if len(report_file) > 0: normalized_report_file = rio.normalize_path(report_file) logger.info( "Writing report to [%s] (user specified: [%s]) in format [%s]" % (normalized_report_file, report_file, report_format)) # if show_also_in_console: # print("\nWriting report also to '%s'" % normalized_report_file) # ensure that the parent folder already exists when we try to write the file... rio.ensure_dir(rio.dirname(normalized_report_file)) with open(normalized_report_file, mode="a+", encoding="UTF-8") as f: f.writelines(formatter(headers, data, write_header))
def decompress(data_set_path, expected_size_in_bytes): # we assume that track data are always compressed and try to decompress them before running the benchmark basename, extension = io.splitext(data_set_path) if not os.path.isfile(basename) or os.path.getsize(basename) != expected_size_in_bytes: logger.info("Unzipping track data from [%s] to [%s]." % (data_set_path, basename)) io.decompress(data_set_path, io.dirname(data_set_path)) extracted_bytes = os.path.getsize(basename) if extracted_bytes != expected_size_in_bytes: raise exceptions.DataError("[%s] is corrupt. Extracted [%d] bytes but [%d] bytes are expected." % (basename, extracted_bytes, expected_size_in_bytes))
def render_template_from_file(template_file_name): def relative_glob(start, f): result = glob.glob(os.path.join(start, f)) if result: return [os.path.relpath(p, start) for p in result] else: return [] base_path = io.dirname(template_file_name) return render_template(loader=jinja2.FileSystemLoader(base_path), template_name=io.basename(template_file_name), glob_helper=lambda f: relative_glob(base_path, f))
def render_template_from_file(template_file_name): def relative_glob(start, f): result = glob.glob(os.path.join(start, f)) if result: return [os.path.relpath(p, start) for p in result] else: return [] base_path = io.dirname(template_file_name) return render_template(loader=jinja2.FileSystemLoader(base_path), template_name=io.basename(template_file_name), glob_helper=lambda f: relative_glob(base_path, f))
def decompress(data_set_path, expected_size_in_bytes): # we assume that track data are always compressed and try to decompress them before running the benchmark basename, extension = io.splitext(data_set_path) if not os.path.isfile(basename) or os.path.getsize(basename) != expected_size_in_bytes: logger.info("Unzipping track data from [%s] to [%s]." % (data_set_path, basename)) print("Decompressing %s (resulting size: %.2f GB) ... " % (type.document_archive, convert.bytes_to_gb(type.uncompressed_size_in_bytes)), end='', flush=True) io.decompress(data_set_path, io.dirname(data_set_path)) print("Done") extracted_bytes = os.path.getsize(basename) if extracted_bytes != expected_size_in_bytes: raise exceptions.DataError("[%s] is corrupt. Extracted [%d] bytes but [%d] bytes are expected." % (basename, extracted_bytes, expected_size_in_bytes))
def write_single_report(report_file, report_format, cwd, numbers_align, headers, data_plain, data_rich): if report_format == "markdown": formatter = partial(format_as_markdown, numbers_align=numbers_align) elif report_format == "csv": formatter = format_as_csv else: raise exceptions.SystemSetupError("Unknown report format '%s'" % report_format) print_internal(formatter(headers, data_rich)) if len(report_file) > 0: normalized_report_file = rio.normalize_path(report_file, cwd) # ensure that the parent folder already exists when we try to write the file... rio.ensure_dir(rio.dirname(normalized_report_file)) with open(normalized_report_file, mode="a+", encoding="utf-8") as f: f.writelines(formatter(headers, data_plain))
def install_default_log_config(): """ Ensures a log configuration file is present on this machine. The default log configuration is based on the template in resources/logging.json. It also ensures that the default log path has been created so log files can be successfully opened in that directory. """ log_config = log_config_path() if not io.exists(log_config): io.ensure_dir(io.dirname(log_config)) source_path = io.normalize_path(os.path.join(os.path.dirname(__file__), "resources", "logging.json")) with open(log_config, "w", encoding="UTF-8") as target: with open(source_path, "r", encoding="UTF-8") as src: contents = src.read().replace("${LOG_PATH}", default_log_path()) target.write(contents) io.ensure_dir(default_log_path())
def write_single_report(report_file, report_format, cwd, headers, data_plain, data_rich, write_header=True, show_also_in_console=True): if report_format == "markdown": formatter = format_as_markdown elif report_format == "csv": formatter = format_as_csv else: raise exceptions.SystemSetupError("Unknown report format '%s'" % report_format) if show_also_in_console: print_internal(formatter(headers, data_rich)) if len(report_file) > 0: normalized_report_file = rio.normalize_path(report_file, cwd) logger.info("Writing report to [%s] (user specified: [%s]) in format [%s]" % (normalized_report_file, report_file, report_format)) # ensure that the parent folder already exists when we try to write the file... rio.ensure_dir(rio.dirname(normalized_report_file)) with open(normalized_report_file, mode="a+", encoding="UTF-8") as f: f.writelines(formatter(headers, data_plain, write_header))
def __init__(self, track_path): if not os.path.exists(track_path): raise exceptions.SystemSetupError("Track path %s does not exist" % track_path) if os.path.isdir(track_path): self.track_name = io.basename(track_path) self._track_dir = track_path self._track_file = os.path.join(track_path, "track.json") if not os.path.exists(self._track_file): raise exceptions.SystemSetupError("Could not find track.json in %s" % track_path) elif os.path.isfile(track_path): if io.has_extension(track_path, ".json"): self._track_dir = io.dirname(track_path) self._track_file = track_path self.track_name = io.splitext(io.basename(track_path))[0] else: raise exceptions.SystemSetupError("%s has to be a JSON file" % track_path) else: raise exceptions.SystemSetupError("%s is neither a file nor a directory" % track_path)
def decompress(data_set_path, expected_size_in_bytes): # we assume that track data are always compressed and try to decompress them before running the benchmark basename, extension = io.splitext(data_set_path) decompressed = False if not os.path.isfile(basename) or os.path.getsize(basename) != expected_size_in_bytes: decompressed = True if type.uncompressed_size_in_bytes: console.info("Decompressing track data from [%s] to [%s] (resulting size: %.2f GB) ... " % (data_set_path, basename, convert.bytes_to_gb(type.uncompressed_size_in_bytes)), end='', flush=True, logger=logger) else: console.info("Decompressing track data from [%s] to [%s] ... " % (data_set_path, basename), end='', flush=True, logger=logger) io.decompress(data_set_path, io.dirname(data_set_path)) console.println("[OK]") extracted_bytes = os.path.getsize(basename) if expected_size_in_bytes is not None and extracted_bytes != expected_size_in_bytes: raise exceptions.DataError("[%s] is corrupt. Extracted [%d] bytes but [%d] bytes are expected." % (basename, extracted_bytes, expected_size_in_bytes)) return basename, decompressed
def install_default_log_config(): """ Ensures a log configuration file is present on this machine. The default log configuration is based on the template in resources/logging.json. It also ensures that the default log path has been created so log files can be successfully opened in that directory. """ log_config = log_config_path() if not io.exists(log_config): io.ensure_dir(io.dirname(log_config)) source_path = io.normalize_path( os.path.join(os.path.dirname(__file__), "resources", "logging.json")) with open(log_config, "w", encoding="UTF-8") as target: with open(source_path, "r", encoding="UTF-8") as src: # Ensure we have a trailing path separator as after LOG_PATH there will only be the file name log_path = os.path.join(paths.logs(), "") # the logging path might contain backslashes that we need to escape log_path = io.escape_path(log_path) contents = src.read().replace("${LOG_PATH}", log_path) target.write(contents) io.ensure_dir(paths.logs())
def _render_template_from_file(self, variables): compose_file = "%s/resources/docker-compose.yml.j2" % self.rally_root return self._render_template(loader=jinja2.FileSystemLoader( io.dirname(compose_file)), template_name=io.basename(compose_file), variables=variables)
def _render_template_from_file(self, variables): compose_file = "%s/resources/docker-compose.yml" % (self.cfg.opts("system", "rally.root")) return self._render_template(loader=jinja2.FileSystemLoader(io.dirname(compose_file)), template_name=io.basename(compose_file), variables=variables)
def render_template_from_file(template_file_name): return render_template(loader=jinja2.FileSystemLoader( io.dirname(template_file_name)), template_name=io.basename(template_file_name))
def render_template_from_file(template_file_name): return render_template(loader=jinja2.FileSystemLoader(io.dirname(template_file_name)), template_name=io.basename(template_file_name))