Пример #1
0
def prepare_track(ctx):
    track_name = ctx.config.opts("system", "track")
    try:
        ctx.track = track.load_track(ctx.config, track_name)
    except FileNotFoundError:
        logger.error("Cannot load track [%s]" % track_name)
        raise exceptions.ImproperlyConfigured(
            "Cannot load track %s. You can list the available tracks with %s list tracks."
            % (track_name, PROGRAM_NAME))

    track.prepare_track(ctx.track, ctx.config)
    race_paths = paths.Paths(ctx.config)
    track_root = race_paths.track_root(track_name)
    ctx.config.add(config.Scope.benchmark, "system", "track.root.dir",
                   track_root)

    selected_challenge = ctx.config.opts("benchmarks", "challenge")
    for challenge in ctx.track.challenges:
        if challenge.name == selected_challenge:
            ctx.challenge = challenge

    if not ctx.challenge:
        raise exceptions.ImproperlyConfigured(
            "Unknown challenge [%s] for track [%s]. You can list the available tracks and their "
            "challenges with %s list tracks." %
            (selected_challenge, ctx.track.name, PROGRAM_NAME))

    race_paths = paths.Paths(ctx.config)
    ctx.config.add(
        config.Scope.challenge, "system", "challenge.root.dir",
        race_paths.challenge_root(ctx.track.name, ctx.challenge.name))
    ctx.config.add(
        config.Scope.challenge, "system", "challenge.log.dir",
        race_paths.challenge_logs(ctx.track.name, ctx.challenge.name))
Пример #2
0
def select_challenge(config, t):
    selected_challenge = config.opts("benchmarks", "challenge")
    for challenge in t.challenges:
        if challenge.name == selected_challenge:
            return challenge
    raise exceptions.ImproperlyConfigured("Unknown challenge [%s] for track [%s]. You can list the available tracks and their "
                                          "challenges with %s list tracks." % (selected_challenge, t.name, PROGRAM_NAME))
Пример #3
0
def dispatch_sub_command(cfg, sub_command):
    try:
        if sub_command == "compare":
            reporter.compare(cfg)
        elif sub_command == "list":
            list(cfg)
        elif sub_command == "race":
            racecontrol.run(cfg)
        else:
            raise exceptions.ImproperlyConfigured("Unknown subcommand [%s]" %
                                                  sub_command)
        return True
    except exceptions.RallyError as e:
        logging.exception("Cannot run subcommand [%s]." % sub_command)
        print("\nERROR: Cannot %s\n\nReason: %s" % (sub_command, e))
        print("")
        print_help_on_errors(cfg)
        return False
    except BaseException as e:
        logging.exception(
            "A fatal error occurred while running subcommand [%s]." %
            sub_command)
        print("\nFATAL: Cannot %s\n\nReason: %s" % (sub_command, e))
        print("")
        print_help_on_errors(cfg)
        return False
Пример #4
0
    def run(self, track):
        selected_setups = self.ctx.config.opts("benchmarks",
                                               "tracksetups.selected")
        any_selected = False
        for track_setup in track.track_setups:
            if track_setup.name in selected_setups:
                any_selected = True
                race_paths = paths.Paths(self.ctx.config)
                self.ctx.config.add(
                    config.Scope.trackSetup, "system", "track.setup.root.dir",
                    race_paths.track_setup_root(track.name, track_setup.name))
                self.ctx.config.add(
                    config.Scope.trackSetup, "system", "track.setup.log.dir",
                    race_paths.track_setup_logs(track.name, track_setup.name))
                print("Racing on track '%s' with setup '%s'" %
                      (track.name, track_setup.name))
                for step in self.steps:
                    step.run(track, track_setup)
            else:
                logger.debug("Skipping track setup [%s] (not selected)." %
                             track_setup.name)

        if not any_selected:
            raise exceptions.ImproperlyConfigured(
                "Unknown track setup(s) %s for track [%s]. You can list the available tracks and their "
                "track setups with esrally list tracks." %
                (selected_setups, track.name))
Пример #5
0
    def start(self, command):
        """
        Starts the provided command.

        :param command: A command name.
        :return: True on success, False otherwise
        """
        ctx = RacingContext(self._config)
        if command == "list":
            self._list(ctx)
        elif command == "race":
            try:
                pipeline = self._choose(pipelines, "pipeline", "You can list the available pipelines with esrally list pipelines.")(ctx)
                t = self._choose(track.tracks, "track", "You can list the available tracks with esrally list tracks.")
                metrics.RaceStore(self._config).store_race()
                pipeline.run(t)
                return True
            except exceptions.SystemSetupError as e:
                logging.exception("Cannot run benchmark")
                print("\nERROR: Cannot run benchmark\n\nReason: %s" % e)
                return False
            except exceptions.ImproperlyConfigured as e:
                logging.exception("Cannot run benchmark due to configuration error.")
                print("\nERROR: Cannot run benchmark\n\nReason: %s" % e)
                return False
        else:
            raise exceptions.ImproperlyConfigured("Unknown command [%s]" % command)
Пример #6
0
 def _choose(self, source, what, help):
     try:
         name = self._config.opts("system", what)
         return source[name]
     except KeyError:
         raise exceptions.ImproperlyConfigured("Unknown %s [%s]. %s" %
                                               (what, name, help))
Пример #7
0
def run(cfg):
    name = cfg.opts("system", "pipeline")
    try:
        pipeline = pipelines[name](RacingContext(cfg))
        pipeline()
    except KeyError:
        raise exceptions.ImproperlyConfigured(
            "Unknown pipeline [%s]. You can list the available pipelines with %s list pipelines."
            % (name, PROGRAM_NAME))
Пример #8
0
def prepare_car(ctx):
    selected_car = ctx.config.opts("benchmarks", "car")
    for c in car.cars:
        if c.name == selected_car:
            ctx.car = c

    if not ctx.car:
        raise exceptions.ImproperlyConfigured(
            "Unknown car [%s]. You can list the available cars with %s list cars."
            % (selected_car, PROGRAM_NAME))
Пример #9
0
def compare(cfg):
    baseline_ts = cfg.opts("report", "comparison.baseline.timestamp")
    contender_ts = cfg.opts("report", "comparison.contender.timestamp")

    if not baseline_ts or not contender_ts:
        raise exceptions.ImproperlyConfigured(
            "compare needs baseline and a contender")
    race_store = metrics.race_store(cfg)
    ComparisonReporter(cfg).report(race_store.find_by_timestamp(baseline_ts),
                                   race_store.find_by_timestamp(contender_ts))
Пример #10
0
def prepare_track(ctx):
    track_name = ctx.config.opts("system", "track")
    try:
        ctx.track = track.load_track(ctx.config, track_name)
    except FileNotFoundError:
        logger.error("Cannot load track [%s]" % track_name)
        raise exceptions.ImproperlyConfigured(
            "Cannot load track %s. You can list the available tracks with %s list tracks."
            % (track_name, PROGRAM_NAME))
    # TODO #71: Reconsider this in case we distribute drivers. *For now* the driver will only be on a single machine, so we're safe.
    track.prepare_track(ctx.track, ctx.config)
Пример #11
0
def list(cfg):
    what = cfg.opts("system", "list.config.option")
    if what == "telemetry":
        telemetry.list_telemetry(cfg)
    elif what == "tracks":
        track.list_tracks(cfg)
    elif what == "pipelines":
        racecontrol.list_pipelines()
    elif what == "races":
        metrics.list_races(cfg)
    elif what == "cars":
        car.list_cars()
    else:
        raise exceptions.ImproperlyConfigured("Cannot list unknown configuration option [%s]" % what)
Пример #12
0
def benchmark_internal(ctx):
    track_name = ctx.config.opts("system", "track")
    challenge_name = ctx.config.opts("benchmarks", "challenge")
    selected_car_name = ctx.config.opts("benchmarks", "car")

    print("Racing on track [%s] and challenge [%s] with car [%s]" %
          (track_name, challenge_name, selected_car_name))
    # TODO dm module refactoring: mechanic
    selected_car = None
    for c in car.cars:
        if c.name == selected_car_name:
            selected_car = c

    if not selected_car:
        raise exceptions.ImproperlyConfigured(
            "Unknown car [%s]. You can list the available cars with %s list cars."
            % (selected_car_name, PROGRAM_NAME))

    port = ctx.config.opts("provisioning", "node.http.port")
    hosts = [{"host": "localhost", "port": port}]
    client_options = ctx.config.opts("launcher", "client.options")
    # unified client config
    ctx.config.add(config.Scope.benchmark, "client", "hosts", hosts)
    ctx.config.add(config.Scope.benchmark, "client", "options", client_options)

    es_client = client.EsClientFactory(hosts, client_options).create()

    # TODO dm module refactoring: separate module? don't let the mechanic handle the metrics store but rather just provide it
    ctx.mechanic.start_metrics(track_name, challenge_name, selected_car_name)
    cluster = ctx.mechanic.start_engine(selected_car, es_client, port)
    actors = thespian.actors.ActorSystem()
    main_driver = actors.createActor(driver.Driver)

    #TODO dm: Retrieving the metrics store here is *dirty*...
    metrics_store = ctx.mechanic._metrics_store

    cluster.on_benchmark_start()
    completed = actors.ask(
        main_driver,
        driver.StartBenchmark(ctx.config, ctx.track, metrics_store.meta_info))
    cluster.on_benchmark_stop()
    if not hasattr(completed, "metrics"):
        raise exceptions.RallyError(
            "Driver has returned no metrics but instead [%s]. Terminating race without result."
            % str(completed))
    metrics_store.bulk_add(completed.metrics)

    ctx.mechanic.stop_engine(cluster)
    ctx.mechanic.revise_candidate()
    ctx.mechanic.stop_metrics()
Пример #13
0
def run(cfg):
    name = cfg.opts("system", "pipeline")
    try:
        pipeline = pipelines[name](RacingContext(cfg))
    except KeyError:
        raise exceptions.ImproperlyConfigured(
            "Unknown pipeline [%s]. You can list the available pipelines with %s list pipelines."
            % (name, PROGRAM_NAME))
    try:
        pipeline()
    except exceptions.RallyError as e:
        # just pass on our own errors. It should be treated differently on top-level
        raise e
    except BaseException:
        tb = sys.exc_info()[2]
        raise exceptions.RallyError(
            "This race ended early with a fatal crash. For details please see the logs."
        ).with_traceback(tb)
Пример #14
0
    def _put(self,
             level,
             level_key,
             name,
             value,
             unit,
             operation,
             operation_type,
             sample_type,
             absolute_time=None,
             relative_time=None):
        if level == MetaInfoScope.cluster:
            meta = self._meta_info[MetaInfoScope.cluster]
        elif level == MetaInfoScope.node:
            meta = self._meta_info[MetaInfoScope.cluster].copy()
            meta.update(self._meta_info[MetaInfoScope.node][level_key])
        else:
            raise exceptions.ImproperlyConfigured(
                "Unknown meta info level [%s] for metric [%s]" % (level, name))
        if absolute_time is None:
            absolute_time = self._clock.now()
        if relative_time is None:
            relative_time = self._stop_watch.split_time()

        doc = {
            "@timestamp": time.to_epoch_millis(absolute_time),
            "relative-time": int(relative_time * 1000 * 1000),
            "trial-timestamp": self._invocation,
            "environment": self._environment_name,
            "track": self._track,
            "challenge": self._challenge,
            "car": self._car,
            "name": name,
            "value": value,
            "unit": unit,
            "sample-type": sample_type.name.lower(),
            "meta": meta
        }
        if operation:
            doc["operation"] = operation
        if operation_type:
            doc["operation-type"] = operation_type.name

        self._add(doc)
Пример #15
0
    def add_meta_info(self, scope, scope_key, key, value):
        """
        Adds new meta information to the metrics store. All metrics entries that are created after calling this method are guaranteed to
        contain the added meta info (provided is on the same level or a level below, e.g. a cluster level metric will not contain node
        level meta information but all cluster level meta information will be contained in a node level metrics record).

        :param scope: The scope of the meta information. See MetaInfoScope.
        :param scope_key: The key within the scope. For cluster level metrics None is expected, for node level metrics the node name.
        :param key: The key of the meta information.
        :param value: The value of the meta information.
        """
        if scope == MetaInfoScope.cluster:
            self._meta_info[MetaInfoScope.cluster][key] = value
        elif scope == MetaInfoScope.node:
            if scope_key not in self._meta_info[MetaInfoScope.node]:
                self._meta_info[MetaInfoScope.node][scope_key] = {}
            self._meta_info[MetaInfoScope.node][scope_key][key] = value
        else:
            raise exceptions.ImproperlyConfigured("Unknown meta info scope [%s]" % scope)
Пример #16
0
 def _list(self, ctx):
     what = ctx.config.opts("system", "list.config.option")
     if what == "telemetry":
         telemetry.Telemetry(ctx.config).list()
     elif what == "tracks":
         print("Available tracks:\n")
         for t in track.tracks.values():
             print("* %s: %s" % (t.name, t.description))
             print("\tTrack setups for this track:")
             for track_setup in t.track_setups:
                 print("\t* %s" % track_setup.name)
             print("")
     elif what == "pipelines":
         print("Available pipelines:\n")
         for p in pipelines.values():
             pipeline = p(ctx)
             print("* %s: %s" % (pipeline.name, pipeline.description))
     else:
         raise exceptions.ImproperlyConfigured(
             "Cannot list unknown configuration option [%s]" % what)
Пример #17
0
    def _put(self, level, level_key, name, value, unit, sample_type):
        if level == MetaInfoScope.cluster:
            meta = self._meta_info[MetaInfoScope.cluster]
        elif level == MetaInfoScope.node:
            meta = self._meta_info[MetaInfoScope.cluster].copy()
            meta.update(self._meta_info[MetaInfoScope.node][level_key])
        else:
            raise exceptions.ImproperlyConfigured("Unknown meta info level [%s] for metric [%s]" % (level, name))

        doc = {
            "@timestamp": time.to_epoch_millis(self._clock.now()),
            "relative-time": int(self._stop_watch.split_time() * 1000 * 1000),
            "trial-timestamp": self._invocation,
            "environment": self._environment_name,
            "track": self._track,
            "track-setup": self._track_setup,
            "name": name,
            "value": value,
            "unit": unit,
            "sample-type": sample_type,
            "meta": meta
        }
        self._docs.append(doc)
Пример #18
0
    def _list(self, ctx):
        what = ctx.config.opts("system", "list.config.option")
        if what == "telemetry":
            print("Available telemetry devices:\n")
            print(tabulate.tabulate(telemetry.Telemetry(ctx.config).list(), ["Command", "Name", "Description"]))
            print("\nKeep in mind that each telemetry device may incur a runtime overhead which can skew results.")
        elif what == "tracks":
            print("Available tracks:\n")
            print(tabulate.tabulate([[t.name, t.short_description, ",".join(map(str, t.track_setups))] for t in track.tracks.values()],
                                    headers=["Name", "Description", "Track setups"]))

        elif what == "pipelines":
            print("Available pipelines:\n")
            print(tabulate.tabulate([[pipeline(ctx).name, pipeline(ctx).description] for pipeline in pipelines.values()],
                                    headers=["Name", "Description"]))
        elif what == "races":
            print("Recent races:\n")
            races = []
            for race in metrics.RaceStore(ctx.config).list():
                races.append([race["trial-timestamp"], race["track"], ",".join(race["track-setups"]), race["user-tag"]])

            print(tabulate.tabulate(races, headers=["Trial Timestamp", "Track", "Track setups", "User Tag"]))
        else:
            raise exceptions.ImproperlyConfigured("Cannot list unknown configuration option [%s]" % what)