def run(self): self.lap_counter.before_lap() lap = self.lap_counter.current_lap self.metrics_store.lap = lap self.logger.info("Telling mechanic of benchmark start.") self.send(self.mechanic, mechanic.OnBenchmarkStart(lap)) self.main_driver = self.createActor(driver.DriverActor, targetActorRequirements={"coordinator": True}) self.logger.info("Telling driver to start benchmark.") self.send(self.main_driver, driver.StartBenchmark(self.cfg, self.race.track, self.metrics_store.meta_info, lap))
def run(self, lap): """ Runs the provided lap of a benchmark. :param lap: The current lap number. :return: True iff the benchmark may go on. False iff the user has cancelled the benchmark. """ self.metrics_store.lap = lap logger.info("Notifying mechanic of benchmark start.") # we could use #tell() here but then the ask call to driver below will fail because it returns the response that mechanic # sends (see http://godaddy.github.io/Thespian/doc/using.html#sec-6-6-1). self.actor_system.ask(self.mechanic, mechanic.OnBenchmarkStart(lap)) logger.info("Asking driver to start benchmark.") main_driver = self.actor_system.createActor( driver.Driver, targetActorRequirements={"coordinator": True}, globalName="/rally/driver/coordinator") try: result = self.actor_system.ask( main_driver, driver.StartBenchmark(self.cfg, self.track, self.metrics_store.meta_info, lap)) except KeyboardInterrupt: result = self.actor_system.ask(main_driver, driver.BenchmarkCancelled()) logger.info("User has cancelled the benchmark.") if isinstance(result, driver.BenchmarkComplete): logger.info("Benchmark is complete.") logger.info("Bulk adding request metrics to metrics store.") self.metrics_store.bulk_add(result.metrics) stop_result = self.actor_system.ask(self.mechanic, mechanic.OnBenchmarkStop()) if isinstance(stop_result, mechanic.BenchmarkStopped): logger.info("Bulk adding system metrics to metrics store.") self.metrics_store.bulk_add(stop_result.system_metrics) else: raise exceptions.RallyError( "Mechanic has returned no metrics but instead [%s]. Terminating race without result." % str(stop_result)) logger.info("Flushing metrics data...") self.metrics_store.flush() logger.info("Flushing done") elif isinstance(result, driver.BenchmarkCancelled): logger.info("User has cancelled the benchmark.") return False elif isinstance(result, driver.BenchmarkFailure): logger.info("Driver has reported a benchmark failure.") raise exceptions.RallyError(result.message, result.cause) else: raise exceptions.RallyError( "Driver has returned no metrics but instead [%s]. Terminating race without result." % str(result)) return True
def run(self, lap): self.metrics_store.lap = lap main_driver = self.actor_system.createActor(driver.Driver) self.cluster.on_benchmark_start() result = self.actor_system.ask(main_driver, driver.StartBenchmark(self.cfg, self.track, self.metrics_store.meta_info, self.metrics_store.lap)) if isinstance(result, driver.BenchmarkComplete): self.cluster.on_benchmark_stop() self.metrics_store.bulk_add(result.metrics) self.metrics_store.flush() elif isinstance(result, driver.BenchmarkFailure): raise exceptions.RallyError(result.message, result.cause) else: raise exceptions.RallyError("Driver has returned no metrics but instead [%s]. Terminating race without result." % str(result))
def benchmark_internal(ctx): track_name = ctx.config.opts("system", "track") challenge_name = ctx.config.opts("benchmarks", "challenge") selected_car_name = ctx.config.opts("benchmarks", "car") print("Racing on track [%s] and challenge [%s] with car [%s]" % (track_name, challenge_name, selected_car_name)) # TODO dm module refactoring: mechanic selected_car = None for c in car.cars: if c.name == selected_car_name: selected_car = c if not selected_car: raise exceptions.ImproperlyConfigured( "Unknown car [%s]. You can list the available cars with %s list cars." % (selected_car_name, PROGRAM_NAME)) port = ctx.config.opts("provisioning", "node.http.port") hosts = [{"host": "localhost", "port": port}] client_options = ctx.config.opts("launcher", "client.options") # unified client config ctx.config.add(config.Scope.benchmark, "client", "hosts", hosts) ctx.config.add(config.Scope.benchmark, "client", "options", client_options) es_client = client.EsClientFactory(hosts, client_options).create() # TODO dm module refactoring: separate module? don't let the mechanic handle the metrics store but rather just provide it ctx.mechanic.start_metrics(track_name, challenge_name, selected_car_name) cluster = ctx.mechanic.start_engine(selected_car, es_client, port) actors = thespian.actors.ActorSystem() main_driver = actors.createActor(driver.Driver) #TODO dm: Retrieving the metrics store here is *dirty*... metrics_store = ctx.mechanic._metrics_store cluster.on_benchmark_start() completed = actors.ask( main_driver, driver.StartBenchmark(ctx.config, ctx.track, metrics_store.meta_info)) cluster.on_benchmark_stop() if not hasattr(completed, "metrics"): raise exceptions.RallyError( "Driver has returned no metrics but instead [%s]. Terminating race without result." % str(completed)) metrics_store.bulk_add(completed.metrics) ctx.mechanic.stop_engine(cluster) ctx.mechanic.revise_candidate() ctx.mechanic.stop_metrics()
def benchmark_external(ctx): # TODO dm module refactoring: we can just inline prepare_benchmark_external and simplify this code a bit track_name = ctx.config.opts("system", "track") challenge_name = ctx.config.opts("benchmarks", "challenge") print("Racing on track [%s] and challenge [%s]" % (track_name, challenge_name)) actors = thespian.actors.ActorSystem() main_driver = actors.createActor(driver.Driver) #TODO dm: Retrieving the metrics store here is *dirty*... metrics_store = ctx.mechanic._metrics_store ctx.cluster.on_benchmark_start() completed = actors.ask( main_driver, driver.StartBenchmark(ctx.config, ctx.track, metrics_store.meta_info)) ctx.cluster.on_benchmark_stop() if not hasattr(completed, "metrics"): raise exceptions.RallyError( "Driver has returned no metrics but instead [%s]. Terminating race without result." % str(completed)) metrics_store.bulk_add(completed.metrics) ctx.mechanic.stop_metrics()
def run(self): self.logger.info("Telling driver to start benchmark.") self.send(self.main_driver, driver.StartBenchmark())
def receiveMsg_PreparationComplete(self, msg, sender): self.coordinator.on_preparation_complete(msg.distribution_flavor, msg.distribution_version, msg.revision) self.logger.info("Telling driver to start benchmark.") self.send(self.main_driver, driver.StartBenchmark())