Esempio n. 1
0
    def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmark):
        self.logger.debug("--------------------------------")
        self.logger.debug("task_file: {}".format(task_file))
        self.logger.debug("scenario_args: {}".format(scenario_args))
        self.logger.debug("result_dir: {}".format(result_dir))
        self.logger.debug("test_name: {}".format(test_name))
        self.logger.debug("--------------------------------")

        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_before'])
        task_args = str(scenario_args).replace("'", "\"")
        plugins = []
        if "plugins" in self.config['rally']:
            if len(self.config['rally']['plugins']) > 0:
                for plugin in self.config['rally']['plugins']:
                    for name in plugin:
                        plugins.append(plugin[name])
        plugin_string = ""
        if len(plugins) > 0:
            plugin_string = "--plugin-paths {}".format(",".join(plugins))
        cmd = "source {}; ".format(get_workload_venv('rally', True))
        cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(
            plugin_string, task_file, task_args, test_name)
        from_time = time.time()
        self.tools.run_cmd(cmd)['stdout']
        to_time = time.time()
        if 'sleep_after' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_after'])
        to_ts = int(time.time() * 1000)
        self.grafana.create_grafana_urls({'from_ts': from_ts, 'to_ts': to_ts})
        self.grafana.print_dashboard_url(test_name)
        return (from_time, to_time)
Esempio n. 2
0
 def shaker_checks(self):
     cmd = "source {}; source {}; glance image-list | grep -w shaker-image".format(
         get_workload_venv('shaker', True), self.overcloudrc)
     if self.tools.run_cmd(cmd)['stdout'] == "":
         self.logger.error("Shaker Image is not built, try again")
         exit(1)
     else:
         self.logger.info("Shaker image is built, continuing")
Esempio n. 3
0
 def run_scenario(self, scenario, result_dir, test_name, filename,
                  shaker_uuid, es_ts, es_list, run):
     server_endpoint = self.config['shaker']['server']
     port_no = self.config['shaker']['port']
     flavor = self.config['shaker']['flavor']
     venv = get_workload_venv('shaker', True)
     shaker_region = self.config['shaker']['shaker_region']
     timeout = self.config['shaker']['join_timeout']
     self.logger.info(
         "The uuid for this shaker scenario is {}".format(shaker_uuid))
     cmd_env = ("source {0}; source {1}").format(venv, self.overcloudrc)
     if 'external' in filename and 'external_host' in self.config['shaker']:
         external_host = self.config['shaker']['external_host']
         cmd_shaker = (
             'shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}'
             ' --os-region-name {7} --agent-join-timeout {6}'
             ' --report {4}/{5}.html --output {4}/{5}.json'
             ' --book {4}/{5} --matrix "{{host: {8}}}" --debug'
             ' > {4}/{5}.log 2>&1').format(server_endpoint, port_no, flavor,
                                           filename, result_dir, test_name,
                                           timeout, shaker_region,
                                           external_host)
     else:
         cmd_shaker = (
             'shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}'
             ' --os-region-name {7} --agent-join-timeout {6}'
             ' --report {4}/{5}.html --output {4}/{5}.json'
             ' --book {4}/{5} --debug'
             ' > {4}/{5}.log 2>&1').format(server_endpoint, port_no, flavor,
                                           filename, result_dir, test_name,
                                           timeout, shaker_region)
     cmd = ("{}; {}").format(cmd_env, cmd_shaker)
     from_ts = int(time.time() * 1000)
     if 'sleep_before' in self.config['shaker']:
         time.sleep(self.config['shaker']['sleep_before'])
     from_time = time.time()
     self.tools.run_cmd(cmd)
     to_time = time.time()
     self.update_tests()
     self.update_total_tests()
     outputfile = os.path.join(result_dir, test_name + "." + "json")
     if 'sleep_after' in self.config['shaker']:
         time.sleep(self.config['shaker']['sleep_after'])
     to_ts = int(time.time() * 1000)
     # Snapshotting
     self.grafana.create_grafana_urls({'from_ts': from_ts, 'to_ts': to_ts})
     self.grafana.print_dashboard_url(test_name)
     # Send Data to elastic
     if self.config['elasticsearch']['enabled']:
         index_status = self.send_to_elastic(outputfile, scenario['name'],
                                             shaker_uuid, es_ts, es_list,
                                             run, test_name, result_dir)
         self.result_check(result_dir, test_name, scenario, to_time,
                           from_time, index_status)
     else:
         self.result_check(result_dir, test_name, scenario, to_time,
                           from_time)
Esempio n. 4
0
    def run_workloads(self):
        self.logger.info("Starting Shaker workloads")
        time_stamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
        scenarios = self.config.get('shaker')['scenarios']
        venv = get_workload_venv('shaker', False)
        default_time = 60
        self.shaker_checks()
        if (scenarios is not None and len(scenarios) > 0):
            for scenario in scenarios:
                if scenario['enabled']:
                    self.update_scenarios()
                    self.update_total_scenarios()
                    shaker_uuid = uuid.uuid4()
                    es_ts = datetime.datetime.utcnow()
                    es_list = []
                    if "time" in scenario:
                        test_time = scenario['time']
                    else:
                        test_time = default_time
                    for interval in range(0, test_time + 9):
                        es_list.append(datetime.datetime.utcnow() +
                                       datetime.timedelta(0, interval))

                    for run in range(self.config['browbeat']['rerun']):
                        self.logger.info("Scenario: {}".format(
                            scenario['name']))
                        self.logger.info("Run: {}".format(run))
                        fname = os.path.join(venv, scenario['file'])
                        self.set_scenario(scenario, fname, default_time)
                        self.logger.debug(
                            "Set Scenario File: {}".format(fname))
                        result_dir = self.tools.create_results_dir(
                            self.config['browbeat']['results'], time_stamp,
                            "shaker", scenario['name'] + "-" + str(run))
                        workload = self.__class__.__name__
                        self.workload_logger(result_dir, workload)
                        time_stamp1 = datetime.datetime.now().strftime(
                            "%Y%m%d-%H%M%S")
                        test_name = "{}-browbeat-{}-{}-{}".format(
                            time_stamp1, "shaker", scenario['name'], run)
                        self.run_scenario(scenario, result_dir, test_name,
                                          fname, shaker_uuid, es_ts, es_list,
                                          run)
                        self.get_stats()
                else:
                    self.logger.info(
                        "Skipping {} as scenario enabled: false".format(
                            scenario['name']))
            self.final_stats(self.scenario_count)
        else:
            self.logger.error(
                "Configuration file contains no shaker scenarios")
Esempio n. 5
0
    def run_workload(self, workload, run_iteration):
        self.logger.info("Starting Shaker workloads")
        time_stamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))

        venv = get_workload_venv('shaker', False)
        self.shaker_checks()

        self.update_total_scenarios()
        shaker_uuid = uuid.uuid4()
        es_ts = datetime.datetime.utcnow()
        es_list = []
        # Default test time to 60
        test_time = workload.get("time", 60)
        for interval in range(0, test_time + 9):
            es_list.append(
                datetime.datetime.utcnow() +
                datetime.timedelta(0, interval))

        rerun_range = range(self.config["browbeat"]["rerun"])
        if self.config["browbeat"]["rerun_type"] == "complete":
            # Compelete rerun type means force
            rerun_range = range(run_iteration, run_iteration + 1)

        for run in rerun_range:
            self.logger.info("Scenario: {}".format(workload['name']))
            self.logger.info("Run: {}".format(run))
            fname = os.path.join(venv, workload['file'])
            self.set_scenario(workload, fname, 60)
            self.logger.debug("Set Scenario File: {}".format(fname))
            result_dir = self.tools.create_results_dir(
                results_path, self.result_dir_ts, "shaker",
                workload['name'] + "-" + str(run))
            self.workload_logger(self.__class__.__name__)
            time_stamp1 = datetime.datetime.now().strftime(
                "%Y%m%d-%H%M%S")
            test_name = "{}-browbeat-{}-{}-{}".format(
                time_stamp1, "shaker", workload['name'], run)
            self.run_scenario(
                workload, result_dir, test_name, fname, shaker_uuid,
                es_ts, es_list, run)
Esempio n. 6
0
 def gen_scenario_json_file(self, task_id, test_name):
     cmd = "source {}; ".format(get_workload_venv('rally', True))
     cmd += "rally task results {} > {}.json".format(task_id, test_name)
     return self.tools.run_cmd(cmd)['stdout']
Esempio n. 7
0
 def gen_scenario_json(self, task_id):
     cmd = "source {}; ".format(get_workload_venv('rally', True))
     cmd += "rally task results {}".format(task_id)
     return self.tools.run_cmd(cmd)['stdout']
Esempio n. 8
0
 def gen_scenario_html(self, task_ids, test_name):
     all_task_ids = ' '.join(task_ids)
     cmd = "source {}; ".format(get_workload_venv('rally', True))
     cmd += "rally task report --task {} --out {}.html".format(
         all_task_ids, test_name)
     return self.tools.run_cmd(cmd)['stdout']
Esempio n. 9
0
    def run_benchmark(self,
                      benchmark_config,
                      result_dir,
                      test_name,
                      cloud_type="OpenStack"):
        self.logger.debug("--------------------------------")
        self.logger.debug("Benchmark_config: {}".format(benchmark_config))
        self.logger.debug("result_dir: {}".format(result_dir))
        self.logger.debug("test_name: {}".format(test_name))
        self.logger.debug("--------------------------------")

        # Build command to run
        if 'enabled' in benchmark_config:
            del benchmark_config['enabled']
        if 'type' in benchmark_config:
            del benchmark_config['type']
        cmd = ("source {0}; source {1}; "
               "{2}/PerfKitBenchmarker/pkb.py "
               "--cloud={3} --run_uri=browbeat".format(
                   get_workload_venv('perfkit', True), self.overcloudrc,
                   get_workload_venv('perfkit', False), cloud_type))
        for parameter, value in benchmark_config.iteritems():
            if not parameter == 'name':
                self.logger.debug("Parameter: {}, Value: {}".format(
                    parameter, value))
                cmd += " --{}={}".format(parameter, value)

        # Remove any old results
        if os.path.exists("/tmp/perfkitbenchmarker/runs/browbeat"):
            shutil.rmtree("/tmp/perfkitbenchmarker/runs/browbeat")

        self.logger.info("Running Perfkit Command: {}".format(cmd))
        stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
        stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
        from_ts = time.time()
        if 'sleep_before' in self.config['perfkit']:
            time.sleep(self.config['perfkit']['sleep_before'])
        process = subprocess.Popen(cmd,
                                   shell=True,
                                   stdout=stdout_file,
                                   stderr=stderr_file)
        process.communicate()
        if 'sleep_after' in self.config['perfkit']:
            time.sleep(self.config['perfkit']['sleep_after'])
        to_ts = time.time()

        # Determine success
        success = False
        try:
            with open("{}/pkb.stderr.log".format(result_dir), 'r') as stderr:
                if any('SUCCEEDED' in line for line in stderr):
                    self.logger.info("Benchmark completed.")
                    success = True
                else:
                    self.logger.error("Benchmark failed.")
        except IOError:
            self.logger.error(
                "File missing: {}/pkb.stderr.log".format(result_dir))

        # Copy all results
        for perfkit_file in glob.glob(
                "/tmp/perfkitbenchmarker/runs/browbeat/*"):
            shutil.move(perfkit_file, result_dir)
        if os.path.exists("/tmp/perfkitbenchmarker/runs/browbeat"):
            shutil.rmtree("/tmp/perfkitbenchmarker/runs/browbeat")

        # Grafana integration
        self.grafana.create_grafana_urls({
            'from_ts': int(from_ts * 1000),
            'to_ts': int(to_ts * 1000)
        })
        self.grafana.print_dashboard_url(test_name)

        return success, to_ts, from_ts