def report_disk_usage(self, stats): if stats.has_disk_usage_stats(): return [ [self.lap, "Index size", "", convert.bytes_to_gb(stats.index_size), "GB"], [self.lap, "Totally written", "", convert.bytes_to_gb(stats.bytes_written), "GB"] ] else: return []
def report_disk_usage(self, stats): if stats.has_disk_usage_stats(): return [ ["Index size [GB]", convert.bytes_to_gb(stats.index_size)], ["Totally written [GB]", convert.bytes_to_gb(stats.bytes_written)] ] else: return []
def report_disk_usage(self, store): index_size = store.get_one("final_index_size_bytes") bytes_written = store.get_one("disk_io_write_bytes_%s" % track.BenchmarkPhase.index.name) if index_size is not None and bytes_written is not None: print(" Final index size: %.1fGB (%.1fMB)" % (convert.bytes_to_gb(index_size), convert.bytes_to_mb(index_size))) print(" Totally written: %.1fGB (%.1fMB)" % (convert.bytes_to_gb(bytes_written), convert.bytes_to_mb(bytes_written))) else: print(" Could not determine disk usage metrics")
def decompress(data_set_path, expected_size_in_bytes): # we assume that track data are always compressed and try to decompress them before running the benchmark basename, extension = io.splitext(data_set_path) decompressed = False if not os.path.isfile(basename) or os.path.getsize( basename) != expected_size_in_bytes: decompressed = True if type.uncompressed_size_in_bytes: console.info( "Decompressing track data from [%s] to [%s] (resulting size: %.2f GB) ... " % (data_set_path, basename, convert.bytes_to_gb(type.uncompressed_size_in_bytes)), end='', flush=True, logger=logger) else: console.info( "Decompressing track data from [%s] to [%s] ... " % (data_set_path, basename), end='', flush=True, logger=logger) io.decompress(data_set_path, io.dirname(data_set_path)) console.println("[OK]") extracted_bytes = os.path.getsize(basename) if expected_size_in_bytes is not None and extracted_bytes != expected_size_in_bytes: raise exceptions.DataError( "[%s] is corrupt. Extracted [%d] bytes but [%d] bytes are expected." % (basename, extracted_bytes, expected_size_in_bytes)) return basename, decompressed
def decompress_corpus(archive_path, documents_path, uncompressed_size): if uncompressed_size: console.info( "Decompressing track data from [%s] to [%s] (resulting size: %.2f GB) ... " % (archive_path, documents_path, convert.bytes_to_gb(uncompressed_size)), end='', flush=True, logger=logger) else: console.info("Decompressing track data from [%s] to [%s] ... " % (archive_path, documents_path), end='', flush=True, logger=logger) io.decompress(archive_path, io.dirname(archive_path)) console.println("[OK]") if not os.path.isfile(documents_path): raise exceptions.DataError( "Decompressing [%s] did not create [%s]. Please check with the track author if the compressed " "archive has been created correctly." % (archive_path, documents_path)) extracted_bytes = os.path.getsize(documents_path) if uncompressed_size is not None and extracted_bytes != uncompressed_size: raise exceptions.DataError( "[%s] is corrupt. Extracted [%d] bytes but [%d] bytes are expected." % (documents_path, extracted_bytes, uncompressed_size))
def _facts(): disks = [] for disk in sysstats.disks(): dev, _, fstype, _ = disk disks.append({ "device": dev, "type": "TODO: Provide one of 'ssd', 'spinning'", "file-system": fstype, }) return { "environment": { "type": "TODO: Provide one of 'cloud', 'bare-metal' + details about the environment (EC2, instance type)", }, "hardware": { "cpu_model": sysstats.cpu_model(), "disk": disks, "memory": "%dgb" % convert.bytes_to_gb(sysstats.total_memory()) }, "software": { "jvm_vendor": _jvm_property("java.vm.vendor"), "jvm_version": _jvm_property("java.runtime.version"), "os_name": sysstats.os_name(), "os_version": sysstats.os_version(), "rally_version": version.version(), "distribution_version": "TODO: Provide Elasticsearch distribution version" } }
def start(self, car): # hardcoded for the moment, should actually be identical to internal launcher # Only needed on Mac: # hosts = [{"host": process.run_subprocess_with_output("docker-machine ip default")[0].strip(), "port": 9200}] hosts = [{"host": "localhost", "port": 9200}] client_options = self.cfg.opts("launcher", "client.options") # unified client config self.cfg.add(config.Scope.benchmark, "client", "hosts", hosts) self.cfg.add(config.Scope.benchmark, "client", "options", client_options) es = self.client_factory(hosts, client_options).create() t = telemetry.Telemetry(self.cfg, devices=[ # Be aware that some the meta-data are taken from the host system, not the container (e.g. number of CPU cores) so if the # Docker container constrains these, the metrics are actually wrong. telemetry.EnvironmentInfo(self.cfg, es, self.metrics_store), telemetry.NodeStats(self.cfg, es, self.metrics_store), telemetry.IndexStats(self.cfg, es, self.metrics_store), telemetry.DiskIo(self.cfg, self.metrics_store), telemetry.CpuUsage(self.cfg, self.metrics_store) ]) distribution_version = self.cfg.opts("source", "distribution.version", mandatory=False) install_dir = self._install_dir() io.ensure_dir(install_dir) java_opts = "" if car.heap: java_opts += "-Xms%s -Xmx%s " % (car.heap, car.heap) if car.java_opts: java_opts += car.java_opts vars = { "es_java_opts": java_opts, "container_memory_gb": "%dg" % (convert.bytes_to_gb(psutil.virtual_memory().total) // 2), "es_data_dir": "%s/data" % install_dir, "es_version": distribution_version } docker_cfg = self._render_template_from_file(vars) logger.info("Starting Docker container with configuration:\n%s" % docker_cfg) docker_cfg_path = self._docker_cfg_path() with open(docker_cfg_path, "wt") as f: f.write(docker_cfg) c = cluster.Cluster([], t) self._start_process(cmd="docker-compose -f %s up" % docker_cfg_path, node_name="rally0") # Wait for a little while: Plugins may still be initializing although the node has already started. time.sleep(10) t.attach_to_cluster(c) logger.info("Successfully started Docker container") return c
def decompress(data_set_path, expected_size_in_bytes): # we assume that track data are always compressed and try to decompress them before running the benchmark basename, extension = io.splitext(data_set_path) if not os.path.isfile(basename) or os.path.getsize(basename) != expected_size_in_bytes: logger.info("Unzipping track data from [%s] to [%s]." % (data_set_path, basename)) print("Decompressing %s (resulting size: %.2f GB) ... " % (type.document_archive, convert.bytes_to_gb(type.uncompressed_size_in_bytes)), end='', flush=True) io.decompress(data_set_path, io.dirname(data_set_path)) print("Done") extracted_bytes = os.path.getsize(basename) if extracted_bytes != expected_size_in_bytes: raise exceptions.DataError("[%s] is corrupt. Extracted [%d] bytes but [%d] bytes are expected." % (basename, extracted_bytes, expected_size_in_bytes))
def docker_vars(self): java_opts = "" if self.car.heap: java_opts += "-Xms%s -Xmx%s " % (self.car.heap, self.car.heap) if self.car.java_opts: java_opts += self.car.java_opts return { "es_java_opts": java_opts, "container_memory_gb": "%dg" % (convert.bytes_to_gb(sysstats.total_memory()) // 2), "es_data_dir": "%s/data" % self.install_dir, "es_version": self.distribution_version, "http_port": self.http_port }
def decompress(data_set_path, expected_size_in_bytes): # we assume that track data are always compressed and try to decompress them before running the benchmark basename, extension = io.splitext(data_set_path) decompressed = False if not os.path.isfile(basename) or os.path.getsize(basename) != expected_size_in_bytes: decompressed = True if type.uncompressed_size_in_bytes: console.info("Decompressing track data from [%s] to [%s] (resulting size: %.2f GB) ... " % (data_set_path, basename, convert.bytes_to_gb(type.uncompressed_size_in_bytes)), end='', flush=True, logger=logger) else: console.info("Decompressing track data from [%s] to [%s] ... " % (data_set_path, basename), end='', flush=True, logger=logger) io.decompress(data_set_path, io.dirname(data_set_path)) console.println("[OK]") extracted_bytes = os.path.getsize(basename) if expected_size_in_bytes is not None and extracted_bytes != expected_size_in_bytes: raise exceptions.DataError("[%s] is corrupt. Extracted [%d] bytes but [%d] bytes are expected." % (basename, extracted_bytes, expected_size_in_bytes)) return basename, decompressed
def migrate(config_file, current_version, target_version, out=print, i=input): prompter = Prompter(i=i, o=out, assume_defaults=False) logger.info("Upgrading configuration from version [%s] to [%s]." % (current_version, target_version)) # Something is really fishy. We don't want to downgrade the configuration. if current_version >= target_version: raise ConfigError( "The existing config file is available in a later version already. Expected version <= [%s] but found [%s]" % (target_version, current_version)) # but first a backup... config_file.backup() config = config_file.load(interpolation=None) if current_version == 0 and target_version > current_version: logger.info("Migrating config from version [0] to [1]") current_version = 1 config["meta"] = {} config["meta"]["config.version"] = str(current_version) # in version 1 we changed some directories from being absolute to being relative config["system"]["log.root.dir"] = "logs" config["provisioning"]["local.install.dir"] = "install" config["reporting"]["report.base.dir"] = "reports" if current_version == 1 and target_version > current_version: logger.info("Migrating config from version [1] to [2]") current_version = 2 config["meta"]["config.version"] = str(current_version) # no need to ask the user now if we are about to upgrade to version 4 config["reporting"]["datastore.type"] = "in-memory" config["reporting"]["datastore.host"] = "" config["reporting"]["datastore.port"] = "" config["reporting"]["datastore.secure"] = "" config["reporting"]["datastore.user"] = "" config["reporting"]["datastore.password"] = "" config["system"]["env.name"] = "local" if current_version == 2 and target_version > current_version: logger.info("Migrating config from version [2] to [3]") current_version = 3 config["meta"]["config.version"] = str(current_version) # Remove obsolete settings config["reporting"].pop("report.base.dir") config["reporting"].pop("output.html.report.filename") if current_version == 3 and target_version > current_version: root_dir = config["system"]["root.dir"] out(""" ***************************************************************************************** You have an old configuration of Rally. Rally has now a much simpler setup routine which will autodetect lots of settings for you and it also does not require you to setup a metrics store anymore. Rally will now migrate your configuration but if you don't need advanced features like a metrics store, then you should delete the configuration directory: rm -rf {0} and then rerun Rally's configuration routine: {1} configure Please also note you have {2:.1f} GB of data in your current benchmark directory at {3} You might want to clean up this directory also. For more details please see {4} ***************************************************************************************** Pausing for 10 seconds to let you consider this message. """.format( config_file.config_dir, PROGRAM_NAME, convert.bytes_to_gb(io.get_size(root_dir)), root_dir, console.format.link( "https://github.com/elastic/rally/blob/master/CHANGELOG.md#030" ))) time.sleep(10) logger.info("Migrating config from version [3] to [4]") current_version = 4 config["meta"]["config.version"] = str(current_version) if len(config["reporting"]["datastore.host"]) > 0: config["reporting"]["datastore.type"] = "elasticsearch" else: config["reporting"]["datastore.type"] = "in-memory" # Remove obsolete settings config["build"].pop("maven.bin") config["benchmarks"].pop("metrics.stats.disk.device") if current_version == 4 and target_version > current_version: config["tracks"] = {} config["tracks"][ "default.url"] = "https://github.com/elastic/rally-tracks" current_version = 5 config["meta"]["config.version"] = str(current_version) if current_version == 5 and target_version > current_version: config["defaults"] = {} config["defaults"]["preserve_benchmark_candidate"] = str(False) current_version = 6 config["meta"]["config.version"] = str(current_version) if current_version == 6 and target_version > current_version: # Remove obsolete settings config.pop("provisioning") config["system"].pop("log.root.dir") current_version = 7 config["meta"]["config.version"] = str(current_version) if current_version == 7 and target_version > current_version: # move [system][root.dir] to [node][root.dir] if "node" not in config: config["node"] = {} config["node"]["root.dir"] = config["system"].pop("root.dir") # also move all references! for section in config: for k, v in config[section].items(): config[section][k] = v.replace("${system:root.dir}", "${node:root.dir}") current_version = 8 config["meta"]["config.version"] = str(current_version) if current_version == 8 and target_version > current_version: config["teams"] = {} config["teams"][ "default.url"] = "https://github.com/elastic/rally-teams" current_version = 9 config["meta"]["config.version"] = str(current_version) if current_version == 9 and target_version > current_version: config["distributions"] = {} config["distributions"]["release.1.url"] = "https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-" \ "{{VERSION}}.tar.gz" config["distributions"]["release.2.url"] = "https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/" \ "distribution/tar/elasticsearch/{{VERSION}}/elasticsearch-{{VERSION}}.tar.gz" config["distributions"][ "release.url"] = "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz" config["distributions"]["release.cache"] = "true" current_version = 10 config["meta"]["config.version"] = str(current_version) if current_version == 10 and target_version > current_version: config["runtime"]["java.home"] = config["runtime"].pop("java8.home") current_version = 11 config["meta"]["config.version"] = str(current_version) if current_version == 11 and target_version > current_version: # As this is a rather complex migration, we log more than usual to understand potential migration problems better. if "source" in config: if "local.src.dir" in config["source"]: previous_root = config["source"].pop("local.src.dir") logger.info("Set [source][local.src.dir] to [%s]." % previous_root) # if this directory was Rally's default location, then move it on the file system because to allow for checkouts of plugins # in the sibling directory. if previous_root == os.path.join(config["node"]["root.dir"], "src"): new_root_dir_all_sources = previous_root new_es_sub_dir = "elasticsearch" new_root = os.path.join(new_root_dir_all_sources, new_es_sub_dir) # only attempt to move if the directory exists. It may be possible that users never ran a source benchmark although they # have configured it. In that case the source directory will not yet exist. if io.exists(previous_root): logger.info( "Previous source directory was at Rally's default location [%s]. Moving to [%s]." % (previous_root, new_root)) try: # we need to do this in two steps as we need to move the sources to a subdirectory tmp_path = io.normalize_path( os.path.join(new_root_dir_all_sources, os.pardir, "tmp_src_mig")) os.rename(previous_root, tmp_path) io.ensure_dir(new_root) os.rename(tmp_path, new_root) except OSError: logger.exception( "Could not move source directory from [%s] to [%s]." % (previous_root, new_root)) # A warning is sufficient as Rally should just do a fresh checkout if moving did not work. console.warn( "Elasticsearch source directory could not be moved from [%s] to [%s]. Please check the logs." % (previous_root, new_root)) else: logger.info( "Source directory is configured at Rally's default location [%s] but does not exist yet." % previous_root) else: logger.info( "Previous source directory was the custom directory [%s]." % previous_root) new_root_dir_all_sources = io.normalize_path( os.path.join(previous_root, os.path.pardir)) # name of the elasticsearch project directory. new_es_sub_dir = io.basename(previous_root) logger.info("Setting [node][src.root.dir] to [%s]." % new_root_dir_all_sources) config["node"]["src.root.dir"] = new_root_dir_all_sources logger.info( "Setting [source][elasticsearch.src.subdir] to [%s]" % new_es_sub_dir) config["source"]["elasticsearch.src.subdir"] = new_es_sub_dir else: logger.info( "Key [local.src.dir] not found. Advancing without changes." ) else: logger.info( "No section named [source] found in config. Advancing without changes." ) current_version = 12 config["meta"]["config.version"] = str(current_version) if current_version == 12 and target_version > current_version: # the current configuration allows to benchmark from sources if "build" in config and "gradle.bin" in config["build"]: java_9_home = io.guess_java_home(major_version=9) from esrally.utils import jvm if java_9_home and not jvm.is_early_access_release(java_9_home): logger.debug("Autodetected a JDK 9 installation at [%s]" % java_9_home) if "runtime" not in config: config["runtime"] = {} config["runtime"]["java9.home"] = java_9_home else: logger.debug( "Could not autodetect a JDK 9 installation. Checking [java.home] already points to a JDK 9." ) detected = False if "runtime" in config: java_home = config["runtime"]["java.home"] if jvm.major_version( java_home ) == 9 and not jvm.is_early_access_release(java_home): config["runtime"]["java9.home"] = java_home detected = True if not detected: logger.debug( "Could not autodetect a JDK 9 installation. Asking user." ) raw_java_9_home = prompter.ask_property( "Enter the JDK 9 root directory", check_path_exists=True, mandatory=False) if raw_java_9_home and jvm.major_version( raw_java_9_home ) == 9 and not jvm.is_early_access_release( raw_java_9_home): java_9_home = io.normalize_path( raw_java_9_home) if raw_java_9_home else None config["runtime"]["java9.home"] = java_9_home else: out("********************************************************************************" ) out("You don't have a valid JDK 9 installation and cannot benchmark source builds." ) out("") out("You can still benchmark binary distributions with e.g.:" ) out("") out(" %s --distribution-version=6.0.0" % PROGRAM_NAME) out("********************************************************************************" ) out("") current_version = 13 config["meta"]["config.version"] = str(current_version) # all migrations done config_file.store(config) logger.info("Successfully self-upgraded configuration to version [%s]" % target_version)
def migrate(config_file, current_version, target_version, out=print): logger.info("Upgrading configuration from version [%s] to [%s]." % (current_version, target_version)) # Something is really fishy. We don't want to downgrade the configuration. if current_version >= target_version: raise ConfigError( "The existing config file is available in a later version already. Expected version <= [%s] but found [%s]" % (target_version, current_version)) # but first a backup... config_file.backup() config = config_file.load(interpolation=None) if current_version == 0 and target_version > current_version: logger.info("Migrating config from version [0] to [1]") current_version = 1 config["meta"] = {} config["meta"]["config.version"] = str(current_version) # in version 1 we changed some directories from being absolute to being relative config["system"]["log.root.dir"] = "logs" config["provisioning"]["local.install.dir"] = "install" config["reporting"]["report.base.dir"] = "reports" if current_version == 1 and target_version > current_version: logger.info("Migrating config from version [1] to [2]") current_version = 2 config["meta"]["config.version"] = str(current_version) # no need to ask the user now if we are about to upgrade to version 4 config["reporting"]["datastore.type"] = "in-memory" config["reporting"]["datastore.host"] = "" config["reporting"]["datastore.port"] = "" config["reporting"]["datastore.secure"] = "" config["reporting"]["datastore.user"] = "" config["reporting"]["datastore.password"] = "" config["system"]["env.name"] = "local" if current_version == 2 and target_version > current_version: logger.info("Migrating config from version [2] to [3]") current_version = 3 config["meta"]["config.version"] = str(current_version) # Remove obsolete settings config["reporting"].pop("report.base.dir") config["reporting"].pop("output.html.report.filename") if current_version == 3 and target_version > current_version: root_dir = config["system"]["root.dir"] out("*****************************************************************************************" ) out("") out("You have an old configuration of Rally. Rally has now a much simpler setup" ) out("routine which will autodetect lots of settings for you and it also does not" ) out("require you to setup a metrics store anymore.") out("") out("Rally will now migrate your configuration but if you don't need advanced features" ) out("like a metrics store, then you should delete the configuration directory:" ) out("") out(" rm -rf %s" % config_file.config_dir) out("") out("and then rerun Rally's configuration routine:") out("") out(" %s configure" % PROGRAM_NAME) out("") out("Please also note you have %.1f GB of data in your current benchmark directory at" % convert.bytes_to_gb(io.get_size(root_dir))) out() out(" %s" % root_dir) out("") out("You might want to clean up this directory also.") out() out("For more details please see %s" % console.format.link( "https://github.com/elastic/rally/blob/master/CHANGELOG.md#030")) out("") out("*****************************************************************************************" ) out("") out("Pausing for 10 seconds to let you consider this message.") time.sleep(10) logger.info("Migrating config from version [3] to [4]") current_version = 4 config["meta"]["config.version"] = str(current_version) if len(config["reporting"]["datastore.host"]) > 0: config["reporting"]["datastore.type"] = "elasticsearch" else: config["reporting"]["datastore.type"] = "in-memory" # Remove obsolete settings config["build"].pop("maven.bin") config["benchmarks"].pop("metrics.stats.disk.device") if current_version == 4 and target_version > current_version: config["tracks"] = {} config["tracks"][ "default.url"] = "https://github.com/elastic/rally-tracks" current_version = 5 config["meta"]["config.version"] = str(current_version) if current_version == 5 and target_version > current_version: config["defaults"] = {} config["defaults"]["preserve_benchmark_candidate"] = str(False) current_version = 6 config["meta"]["config.version"] = str(current_version) if current_version == 6 and target_version > current_version: # Remove obsolete settings config.pop("provisioning") config["system"].pop("log.root.dir") current_version = 7 config["meta"]["config.version"] = str(current_version) if current_version == 7 and target_version > current_version: # move [system][root.dir] to [node][root.dir] if "node" not in config: config["node"] = {} config["node"]["root.dir"] = config["system"].pop("root.dir") # also move all references! for section in config: for k, v in config[section].items(): config[section][k] = v.replace("${system:root.dir}", "${node:root.dir}") current_version = 8 config["meta"]["config.version"] = str(current_version) if current_version == 8 and target_version > current_version: config["teams"] = {} config["teams"][ "default.url"] = "https://github.com/elastic/rally-teams" current_version = 9 config["meta"]["config.version"] = str(current_version) if current_version == 9 and target_version > current_version: config["distributions"] = {} config["distributions"]["release.1.url"] = "https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-" \ "{{VERSION}}.tar.gz" config["distributions"]["release.2.url"] = "https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/" \ "distribution/tar/elasticsearch/{{VERSION}}/elasticsearch-{{VERSION}}.tar.gz" config["distributions"][ "release.url"] = "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{{VERSION}}.tar.gz" config["distributions"]["release.cache"] = "true" current_version = 10 config["meta"]["config.version"] = str(current_version) if current_version == 10 and target_version > current_version: config["runtime"]["java.home"] = config["runtime"].pop("java8.home") current_version = 11 config["meta"]["config.version"] = str(current_version) # all migrations done config_file.store(config) logger.info("Successfully self-upgraded configuration to version [%s]" % target_version)
def start(self, car): # hardcoded for the moment, should actually be identical to internal launcher # Only needed on Mac: # hosts = [{"host": process.run_subprocess_with_output("docker-machine ip default")[0].strip(), "port": 9200}] hosts = [{"host": "localhost", "port": 9200}] client_options = self.cfg.opts("launcher", "client.options") # unified client config self.cfg.add(config.Scope.benchmark, "client", "hosts", hosts) self.cfg.add(config.Scope.benchmark, "client", "options", client_options) es = self.client_factory(hosts, client_options).create() t = telemetry.Telemetry( self.cfg, devices=[ # Be aware that some the meta-data are taken from the host system, not the container (e.g. number of CPU cores) so if the # Docker container constrains these, the metrics are actually wrong. telemetry.EnvironmentInfo(self.cfg, es, self.metrics_store), telemetry.NodeStats(self.cfg, es, self.metrics_store), telemetry.IndexStats(self.cfg, es, self.metrics_store), telemetry.DiskIo(self.cfg, self.metrics_store), telemetry.CpuUsage(self.cfg, self.metrics_store) ]) distribution_version = self.cfg.opts("source", "distribution.version", mandatory=False) install_dir = self._install_dir() io.ensure_dir(install_dir) java_opts = "" if car.heap: java_opts += "-Xms%s -Xmx%s " % (car.heap, car.heap) if car.java_opts: java_opts += car.java_opts vars = { "es_java_opts": java_opts, "container_memory_gb": "%dg" % (convert.bytes_to_gb(psutil.virtual_memory().total) // 2), "es_data_dir": "%s/data" % install_dir, "es_version": distribution_version } docker_cfg = self._render_template_from_file(vars) logger.info("Starting Docker container with configuration:\n%s" % docker_cfg) docker_cfg_path = self._docker_cfg_path() with open(docker_cfg_path, "wt") as f: f.write(docker_cfg) c = cluster.Cluster([], t) self._start_process(cmd="docker-compose -f %s up" % docker_cfg_path, node_name="rally0") # Wait for a little while: Plugins may still be initializing although the node has already started. time.sleep(10) t.attach_to_cluster(c) logger.info("Successfully started Docker container") return c
def migrate(config_file, current_version, target_version, out=print): logger.info("Upgrading configuration from version [%s] to [%s]." % (current_version, target_version)) # Something is really fishy. We don't want to downgrade the configuration. if current_version >= target_version: raise ConfigError("The existing config file is available in a later version already. Expected version <= [%s] but found [%s]" % (target_version, current_version)) # but first a backup... config_file.backup() config = config_file.load(interpolation=None) if current_version == 0 and target_version > current_version: logger.info("Migrating config from version [0] to [1]") current_version = 1 config["meta"] = {} config["meta"]["config.version"] = str(current_version) # in version 1 we changed some directories from being absolute to being relative config["system"]["log.root.dir"] = "logs" config["provisioning"]["local.install.dir"] = "install" config["reporting"]["report.base.dir"] = "reports" if current_version == 1 and target_version > current_version: logger.info("Migrating config from version [1] to [2]") current_version = 2 config["meta"]["config.version"] = str(current_version) # no need to ask the user now if we are about to upgrade to version 4 config["reporting"]["datastore.type"] = "in-memory" config["reporting"]["datastore.host"] = "" config["reporting"]["datastore.port"] = "" config["reporting"]["datastore.secure"] = "" config["reporting"]["datastore.user"] = "" config["reporting"]["datastore.password"] = "" config["system"]["env.name"] = "local" if current_version == 2 and target_version > current_version: logger.info("Migrating config from version [2] to [3]") current_version = 3 config["meta"]["config.version"] = str(current_version) # Remove obsolete settings config["reporting"].pop("report.base.dir") config["reporting"].pop("output.html.report.filename") if current_version == 3 and target_version > current_version: root_dir = config["system"]["root.dir"] out("*****************************************************************************************") out("") out("You have an old configuration of Rally. Rally has now a much simpler setup") out("routine which will autodetect lots of settings for you and it also does not") out("require you to setup a metrics store anymore.") out("") out("Rally will now migrate your configuration but if you don't need advanced features") out("like a metrics store, then you should delete the configuration directory:") out("") out(" rm -rf %s" % config_file.config_dir) out("") out("and then rerun Rally's configuration routine:") out("") out(" %s configure" % PROGRAM_NAME) out("") out("Please also note you have %.1f GB of data in your current benchmark directory at" % convert.bytes_to_gb(io.get_size(root_dir))) out() out(" %s" % root_dir) out("") out("You might want to clean up this directory also.") out() out("For more details please see %s" % console.format.link("https://github.com/elastic/rally/blob/master/CHANGELOG.md#030")) out("") out("*****************************************************************************************") out("") out("Pausing for 10 seconds to let you consider this message.") time.sleep(10) logger.info("Migrating config from version [3] to [4]") current_version = 4 config["meta"]["config.version"] = str(current_version) if len(config["reporting"]["datastore.host"]) > 0: config["reporting"]["datastore.type"] = "elasticsearch" else: config["reporting"]["datastore.type"] = "in-memory" # Remove obsolete settings config["build"].pop("maven.bin") config["benchmarks"].pop("metrics.stats.disk.device") if current_version == 4 and target_version > current_version: config["tracks"] = {} config["tracks"]["default.url"] = "https://github.com/elastic/rally-tracks" current_version = 5 config["meta"]["config.version"] = str(current_version) if current_version == 5 and target_version > current_version: config["defaults"] = {} config["defaults"]["preserve_benchmark_candidate"] = str(False) current_version = 6 config["meta"]["config.version"] = str(current_version) # all migrations done config_file.store(config) logger.info("Successfully self-upgraded configuration to version [%s]" % target_version)