Beispiel #1
0
 def gen_scenario_json(self, task_id):
     if self.openstack:
         cmd = "source {}; ".format(get_workload_venv('rally', True))
     else:
         cmd = "source {}; ".format(get_workload_venv('rally-ovs', True))
     cmd += "rally task results {}".format(task_id)
     return self.tools.run_cmd(cmd)['stdout']
Beispiel #2
0
 def gen_scenario_html(self, task_ids, test_name):
     all_task_ids = ' '.join(task_ids)
     if self.openstack:
         cmd = "source {}; ".format(get_workload_venv('rally', True))
     else:
         cmd = "source {}; ".format(get_workload_venv('rally-ovs', True))
     cmd += "rally task report {} --out {}.html".format(
         all_task_ids, test_name)
     return self.tools.run_cmd(cmd)['stdout']
Beispiel #3
0
    def run_scenario(self, task_file, scenario_args, result_dir, test_name,
                     benchmark):
        self.logger.debug("--------------------------------")
        self.logger.debug("task_file: {}".format(task_file))
        self.logger.info("Running with scenario_args: {}".format(
            json.dumps(scenario_args)))
        self.logger.debug("result_dir: {}".format(result_dir))
        self.logger.debug("test_name: {}".format(test_name))
        self.logger.debug("--------------------------------")

        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_before'])
        task_args = json.dumps(scenario_args)
        plugins = []
        if "plugins" in self.config['rally']:
            if len(self.config['rally']['plugins']) > 0:
                for plugin in self.config['rally']['plugins']:
                    for name in plugin:
                        plugins.append(plugin[name])
        plugin_string = ""
        if len(plugins) > 0:
            plugin_string = "--plugin-paths {}".format(",".join(plugins))
        cmd = "source {}; ".format(get_workload_venv('rally', True))
        cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(
            plugin_string, task_file, task_args, test_name)
        from_time = time.time()
        self.tools.run_cmd(cmd)['stdout']
        to_time = time.time()
        if 'sleep_after' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_after'])
        to_ts = int(time.time() * 1000)
        self.grafana.create_grafana_urls({'from_ts': from_ts, 'to_ts': to_ts})
        self.grafana.print_dashboard_url(test_name)
        return (from_time, to_time)
Beispiel #4
0
    def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmark):
        self.logger.debug("--------------------------------")
        self.logger.debug("task_file: {}".format(task_file))
        self.logger.info("Running with scenario_args: {}".format(json.dumps(scenario_args)))
        self.logger.debug("result_dir: {}".format(result_dir))
        self.logger.debug("test_name: {}".format(test_name))
        self.logger.debug("--------------------------------")

        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_before'])
        task_args = json.dumps(scenario_args)
        plugins = []
        if "plugins" in self.config['rally']:
            if len(self.config['rally']['plugins']) > 0:
                for plugin in self.config['rally']['plugins']:
                    for name in plugin:
                        plugins.append(plugin[name])
        plugin_string = ""
        if len(plugins) > 0:
            plugin_string = "--plugin-paths {}".format(",".join(plugins))
        cmd = "source {}; ".format(get_workload_venv('rally', True))
        cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(
            plugin_string, task_file, task_args, test_name)
        from_time = time.time()
        self.tools.run_cmd(cmd)['stdout']
        to_time = time.time()
        if 'sleep_after' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_after'])
        to_ts = int(time.time() * 1000)
        self.grafana.create_grafana_urls({'from_ts': from_ts, 'to_ts': to_ts})
        self.grafana.print_dashboard_url(test_name)
        return (from_time, to_time)
Beispiel #5
0
 def shaker_checks(self):
     cmd = "source {}; source {}; glance image-list | grep -w shaker-image".format(
         get_workload_venv('shaker', True), self.overcloudrc)
     if self.tools.run_cmd(cmd)['stdout'] == "":
         self.logger.error("Shaker Image is not built, try again")
         exit(1)
     else:
         self.logger.info("Shaker image is built, continuing")
Beispiel #6
0
 def shaker_checks(self):
     cmd = "source {}; source {}; glance image-list | grep -w shaker-image".format(
         get_workload_venv('shaker', True), self.overcloudrc)
     if self.tools.run_cmd(cmd)['stdout'] == "":
         self.logger.error("Shaker Image is not built, try again")
         exit(1)
     else:
         self.logger.info("Shaker image is built, continuing")
Beispiel #7
0
 def run_scenario(self, scenario, result_dir, test_name, filename,
                  shaker_uuid, es_ts, es_list, run):
     server_endpoint = self.config['shaker']['server']
     port_no = self.config['shaker']['port']
     flavor = self.config['shaker']['flavor']
     venv = get_workload_venv('shaker', True)
     shaker_region = self.config['shaker']['shaker_region']
     timeout = self.config['shaker']['join_timeout']
     self.logger.info(
         "The uuid for this shaker scenario is {}".format(shaker_uuid))
     cmd_env = ("source {0}; source {1}").format(venv, self.overcloudrc)
     if 'external' in filename and 'external_host' in self.config['shaker']:
         external_host = self.config['shaker']['external_host']
         cmd_shaker = (
             'shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}'
             ' --os-region-name {7} --agent-join-timeout {6}'
             ' --report {4}/{5}.html --output {4}/{5}.json'
             ' --book {4}/{5} --matrix "{{host: {8}}}" --debug'
             ' > {4}/{5}.log 2>&1').format(server_endpoint, port_no, flavor,
                                           filename, result_dir, test_name,
                                           timeout, shaker_region,
                                           external_host)
     else:
         cmd_shaker = (
             'shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}'
             ' --os-region-name {7} --agent-join-timeout {6}'
             ' --report {4}/{5}.html --output {4}/{5}.json'
             ' --book {4}/{5} --debug'
             ' > {4}/{5}.log 2>&1').format(server_endpoint, port_no, flavor,
                                           filename, result_dir, test_name,
                                           timeout, shaker_region)
     cmd = ("{}; {}").format(cmd_env, cmd_shaker)
     from_ts = int(time.time() * 1000)
     if 'sleep_before' in self.config['shaker']:
         time.sleep(self.config['shaker']['sleep_before'])
     from_time = time.time()
     self.tools.run_cmd(cmd)
     to_time = time.time()
     self.update_total_tests()
     outputfile = os.path.join(result_dir, test_name + "." + "json")
     if 'sleep_after' in self.config['shaker']:
         time.sleep(self.config['shaker']['sleep_after'])
     to_ts = int(time.time() * 1000)
     # Snapshotting
     self.grafana.create_grafana_urls({'from_ts': from_ts, 'to_ts': to_ts})
     self.grafana.print_dashboard_url(test_name)
     # Send Data to elastic
     if self.config['elasticsearch']['enabled']:
         index_status = self.send_to_elastic(outputfile, scenario['name'],
                                             shaker_uuid, es_ts, es_list,
                                             run, test_name, result_dir)
         self.result_check(result_dir, test_name, scenario, to_time,
                           from_time, index_status)
     else:
         self.result_check(result_dir, test_name, scenario, to_time,
                           from_time)
Beispiel #8
0
 def run_scenario(self, scenario, result_dir, test_name, filename,
                  shaker_uuid, es_ts, es_list, run):
     server_endpoint = self.config['shaker']['server']
     port_no = self.config['shaker']['port']
     flavor = self.config['shaker']['flavor']
     venv = get_workload_venv('shaker', True)
     shaker_region = self.config['shaker']['shaker_region']
     timeout = self.config['shaker']['join_timeout']
     self.logger.info(
         "The uuid for this shaker scenario is {}".format(shaker_uuid))
     cmd_env = (
         "source {0}; source {1}").format(venv, self.overcloudrc)
     if 'external' in filename and 'external_host' in self.config['shaker']:
         external_host = self.config['shaker']['external_host']
         cmd_shaker = (
             'shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}'
             ' --os-region-name {7} --agent-join-timeout {6}'
             ' --report {4}/{5}.html --output {4}/{5}.json'
             ' --book {4}/{5} --matrix "{{host: {8}}}" --debug'
             ' > {4}/{5}.log 2>&1').format(server_endpoint,
                                           port_no, flavor, filename, result_dir,
                                           test_name, timeout, shaker_region,
                                           external_host)
     else:
         cmd_shaker = (
             'shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}'
             ' --os-region-name {7} --agent-join-timeout {6}'
             ' --report {4}/{5}.html --output {4}/{5}.json'
             ' --book {4}/{5} --debug'
             ' > {4}/{5}.log 2>&1').format(server_endpoint, port_no, flavor,
                                           filename, result_dir, test_name,
                                           timeout, shaker_region)
     cmd = ("{}; {}").format(cmd_env, cmd_shaker)
     from_ts = int(time.time() * 1000)
     if 'sleep_before' in self.config['shaker']:
         time.sleep(self.config['shaker']['sleep_before'])
     from_time = time.time()
     self.tools.run_cmd(cmd)
     to_time = time.time()
     self.update_total_tests()
     outputfile = os.path.join(result_dir, test_name + "." + "json")
     if 'sleep_after' in self.config['shaker']:
         time.sleep(self.config['shaker']['sleep_after'])
     to_ts = int(time.time() * 1000)
     # Snapshotting
     self.grafana.create_grafana_urls({'from_ts': from_ts, 'to_ts': to_ts})
     self.grafana.print_dashboard_url(test_name)
     # Send Data to elastic
     if self.config['elasticsearch']['enabled']:
         index_status = self.send_to_elastic(outputfile, scenario['name'], shaker_uuid,
                                             es_ts, es_list, run, test_name, result_dir)
         self.result_check(result_dir, test_name, scenario, to_time, from_time, index_status)
     else:
         self.result_check(result_dir, test_name, scenario, to_time, from_time)
Beispiel #9
0
    def run_workload(self, workload, run_iteration):
        self.logger.info("Starting Shaker workloads")
        time_stamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))

        venv = get_workload_venv('shaker', False)
        self.shaker_checks()

        self.update_total_scenarios()
        shaker_uuid = uuid.uuid4()
        es_ts = datetime.datetime.utcnow()
        es_list = []
        # Default test time to 60
        test_time = workload.get("time", 60)
        for interval in range(0, test_time + 9):
            es_list.append(
                datetime.datetime.utcnow() + datetime.timedelta(0, interval))

        rerun_range = range(self.config["browbeat"]["rerun"])
        if self.config["browbeat"]["rerun_type"] == "complete":
            # Compelete rerun type means force
            rerun_range = range(run_iteration, run_iteration + 1)

        for run in rerun_range:
            self.logger.info("Scenario: {}".format(workload['name']))
            self.logger.info("Run: {}".format(run))
            fname = os.path.join(venv, workload['file'])
            self.set_scenario(workload, fname, 60)
            self.logger.debug("Set Scenario File: {}".format(fname))
            result_dir = self.tools.create_results_dir(
                results_path, self.result_dir_ts, "shaker",
                workload['name'] + "-" + str(run))
            self.workload_logger(self.__class__.__name__)
            time_stamp1 = datetime.datetime.now().strftime(
                "%Y%m%d-%H%M%S")
            test_name = "{}-browbeat-{}-{}-{}".format(
                time_stamp1, "shaker", workload['name'], run)
            self.run_scenario(
                workload, result_dir, test_name, fname, shaker_uuid,
                es_ts, es_list, run)
Beispiel #10
0
 def gen_scenario_json_file(self, task_id, test_name):
     cmd = "source {}; ".format(get_workload_venv('rally', True))
     cmd += "rally task results {} > {}.json".format(task_id, test_name)
     return self.tools.run_cmd(cmd)['stdout']
Beispiel #11
0
    def run_benchmark(self, benchmark_config, result_dir, test_name, cloud_type="OpenStack"):
        self.logger.debug("--------------------------------")
        self.logger.debug("Benchmark_config: {}".format(benchmark_config))
        self.logger.debug("result_dir: {}".format(result_dir))
        self.logger.debug("test_name: {}".format(test_name))
        self.logger.debug("--------------------------------")

        # Build command to run
        if 'enabled' in benchmark_config:
            del benchmark_config['enabled']
        if 'type' in benchmark_config:
            del benchmark_config['type']
        cmd = ("source {0}; source {1}; "
               "{2}/PerfKitBenchmarker/pkb.py "
               "--cloud={3} --run_uri=browbeat".format(
                   get_workload_venv('perfkit', True),
                   self.overcloudrc,
                   get_workload_venv('perfkit', False), cloud_type))
        for parameter, value in benchmark_config.iteritems():
            if not parameter == 'name':
                self.logger.debug(
                    "Parameter: {}, Value: {}".format(parameter, value))
                cmd += " --{}={}".format(parameter, value)

        # Remove any old results
        if os.path.exists("/tmp/perfkitbenchmarker/runs/browbeat"):
            shutil.rmtree("/tmp/perfkitbenchmarker/runs/browbeat")

        self.logger.info("Running Perfkit Command: {}".format(cmd))
        stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
        stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
        from_ts = time.time()
        if 'sleep_before' in self.config['perfkit']:
            time.sleep(self.config['perfkit']['sleep_before'])
        process = subprocess.Popen(
            cmd, shell=True, stdout=stdout_file, stderr=stderr_file)
        process.communicate()
        if 'sleep_after' in self.config['perfkit']:
            time.sleep(self.config['perfkit']['sleep_after'])
        to_ts = time.time()

        # Determine success
        success = False
        try:
            with open("{}/pkb.stderr.log".format(result_dir), 'r') as stderr:
                if any('SUCCEEDED' in line for line in stderr):
                    self.logger.info("Benchmark completed.")
                    success = True
                else:
                    self.logger.error("Benchmark failed.")
        except IOError:
            self.logger.error(
                "File missing: {}/pkb.stderr.log".format(result_dir))

        # Copy all results
        for perfkit_file in glob.glob("/tmp/perfkitbenchmarker/runs/browbeat/*"):
            shutil.move(perfkit_file, result_dir)
        if os.path.exists("/tmp/perfkitbenchmarker/runs/browbeat"):
            shutil.rmtree("/tmp/perfkitbenchmarker/runs/browbeat")

        # Grafana integration
        self.grafana.create_grafana_urls(
            {'from_ts': int(from_ts * 1000),
             'to_ts': int(to_ts * 1000)})
        self.grafana.print_dashboard_url(test_name)

        return success, to_ts, from_ts
Beispiel #12
0
 def gen_scenario_json(self, task_id):
     cmd = "source {}; ".format(get_workload_venv('rally', True))
     cmd += "rally task results --uuid {}".format(task_id)
     return self.tools.run_cmd(cmd)['stdout']
Beispiel #13
0
    def run_workload(self, workload, run_iteration):
        """Runs a Browbeat Rally workload"""
        results = []
        self.logger.info("Running Rally workload: {}".format(workload["name"]))
        es_ts = datetime.datetime.utcnow()
        def_concurrencies = workload["concurrency"]
        def_times = workload["times"]
        self.logger.debug(
            "Default Concurrencies: {}".format(def_concurrencies))
        self.logger.debug("Default Times: {}".format(def_times))
        for scenario in workload["scenarios"]:
            if not scenario["enabled"]:
                self.logger.info("{} scenario is disabled".format(
                    scenario['name']))
                continue

            self.logger.info("Running Scenario: {}".format(scenario["name"]))
            self.logger.debug("Scenario File: {}".format(scenario["file"]))
            self.update_total_scenarios()
            scenario_name = scenario["name"]
            scenario_file = scenario["file"]

            del scenario["enabled"]
            del scenario["file"]
            del scenario["name"]
            if len(scenario) > 0:
                self.logger.debug(
                    "Overriding Scenario Args: {}".format(scenario))

            result_dir = self.tools.create_results_dir(
                results_path, self.result_dir_ts,
                self.__class__.__name__.lower(), workload["name"],
                scenario_name)

            self.logger.debug(
                "Created result directory: {}".format(result_dir))
            self.workload_logger(self.__class__.__name__)

            # Override concurrency/times
            if "concurrency" in scenario:
                concurrencies = scenario["concurrency"]
                del scenario["concurrency"]
            else:
                concurrencies = def_concurrencies

            if "times" not in scenario:
                scenario["times"] = def_times

            if "rally_deployment" in scenario:
                _rally_deployment = scenario["rally_deployment"]
            elif "rally_deployment" in workload:
                scenario["rally_deployment"] = workload["rally_deployment"]
                _rally_deployment = scenario["rally_deployment"]
            else:
                _rally_deployment = 'overcloud'
                self.logger.info("Default rally deployment {} in use.".format(
                    _rally_deployment))

            rally_deployments = ['undercloud', 'overcloud']
            if _rally_deployment in rally_deployments:
                cmd = "source {}; ".format(get_workload_venv('rally', True))
                cmd += "rally deployment use {}".format(_rally_deployment)
                cmd_stdout = self.tools.run_cmd(cmd)['stdout']
                if cmd_stdout == "Deployment {} is not found.".format(
                        _rally_deployment):
                    self.logger.error(
                        "Rally deployment {} is not found.".format(
                            _rally_deployment))
                    exit(1)
            else:
                self.logger.error("Wrong rally benchmark name specified.")
                continue

            concurrency_count_dict = collections.Counter()
            for concurrency in concurrencies:
                scenario["concurrency"] = concurrency

                # Correct iteration/rerun
                rerun_range = range(self.config["browbeat"]["rerun"])
                if self.config["browbeat"]["rerun_type"] == "complete":
                    rerun_range = range(run_iteration, run_iteration + 1)

                for run in rerun_range:
                    self.update_total_tests()
                    concurrency_count_dict[concurrency] += 1
                    test_name = "{}-browbeat-{}-{}-{}-iteration-{}".format(
                        es_ts.strftime("%Y%m%d-%H%M%S"), scenario_name,
                        concurrency, concurrency_count_dict[concurrency], run)

                    if not result_dir:
                        self.logger.error("Failed to create result directory")
                        exit(1)

                    from_time, to_time = self.run_scenario(
                        scenario_file, scenario, result_dir, test_name,
                        workload["name"])

                    new_test_name = test_name.split("-")
                    new_test_name = new_test_name[3:]
                    new_test_name = "-".join(new_test_name)

                    # Find task id (if task succeeded in running)
                    task_id = self.get_task_id(test_name)
                    if task_id:
                        self.logger.info(
                            "Generating Rally HTML for task_id : {}".format(
                                task_id))
                        self.gen_scenario_html([task_id], test_name)
                        self.gen_scenario_json_file(task_id, test_name)
                        results.append(task_id)
                        self.update_total_pass_tests()
                        if self.config["elasticsearch"]["enabled"]:
                            # Start indexing
                            index_status = self.json_result(
                                task_id, scenario_name, run, test_name,
                                result_dir)
                            if not index_status:
                                self.update_index_failures()
                            self.get_time_dict(to_time, from_time,
                                               workload["name"], new_test_name,
                                               self.__class__.__name__, "pass",
                                               index_status)
                        else:
                            self.get_time_dict(
                                to_time,
                                from_time,
                                workload["name"],
                                new_test_name,
                                self.__class__.__name__,
                                "pass",
                            )

                    else:
                        self.logger.error("Cannot find task_id")
                        self.update_total_fail_tests()
                        self.get_time_dict(to_time, from_time,
                                           workload["name"], new_test_name,
                                           self.__class__.__name__, "fail")

                    for data in glob.glob("./{}*".format(test_name)):
                        shutil.move(data, result_dir)

        self.logger.debug("Creating Combined Rally Reports")
        combined_html_name = "all-rally-run-{}".format(run_iteration)
        self.gen_scenario_html(results, combined_html_name)
        if os.path.isfile("{}.html".format(combined_html_name)):
            shutil.move(
                "{}.html".format(combined_html_name),
                "{}/{}/{}/{}".format(results_path, self.result_dir_ts,
                                     self.__class__.__name__.lower(),
                                     workload["name"]))
Beispiel #14
0
 def gen_scenario_json_file(self, task_id, test_name):
     cmd = "source {}; ".format(get_workload_venv('rally', True))
     cmd += "rally task results {} > {}.json".format(task_id, test_name)
     return self.tools.run_cmd(cmd)['stdout']
Beispiel #15
0
 def gen_scenario_json(self, task_id):
     cmd = "source {}; ".format(get_workload_venv('rally', True))
     cmd += "rally task results {}".format(task_id)
     return self.tools.run_cmd(cmd)['stdout']
Beispiel #16
0
 def gen_scenario_html(self, task_ids, test_name):
     all_task_ids = ' '.join(task_ids)
     cmd = "source {}; ".format(get_workload_venv('rally', True))
     cmd += "rally task report --task {} --out {}.html".format(
         all_task_ids, test_name)
     return self.tools.run_cmd(cmd)['stdout']