Example #1
0
 def __init__(self, config):
     self.logger = logging.getLogger('browbeat.Shaker')
     self.config = config
     self.tools = Tools(self.config)
     self.grafana = Grafana(self.config)
     self.error_count = 0
     self.pass_count = 0
     self.test_count = 0
     self.scenario_count = 0
Example #2
0
 def __init__(self, config, hosts=None):
     self.logger = logging.getLogger('browbeat.Rally')
     self.config = config
     self.tools = Tools(self.config)
     self.connmon = Connmon(self.config)
     self.grafana = Grafana(self.config)
     self.elastic = Elastic(self.config)
     self.error_count = 0
     self.pass_count = 0
     self.test_count = 0
     self.scenario_count = 0
Example #3
0
 def __init__(self, config):
     self.logger = logging.getLogger('browbeat.Shaker')
     self.config = config
     self.tools = Tools(self.config)
     self.grafana = Grafana(self.config)
     self.error_count = 0
     self.pass_count = 0
     self.test_count = 0
     self.scenario_count = 0
Example #4
0
 def __init__(self, config, hosts=None):
     self.logger = logging.getLogger('browbeat.Rally')
     self.config = config
     self.tools = Tools(self.config)
     self.connmon = Connmon(self.config)
     self.grafana = Grafana(self.config)
     self.error_count = 0
     self.pass_count = 0
     self.test_count = 0
     self.scenario_count = 0
Example #5
0
 def __init__(self, config):
     self.logger = logging.getLogger("browbeat.PerfKit")
     self.config = config
     self.error_count = 0
     self.tools = Tools(self.config)
     self.connmon = Connmon(self.config)
     self.grafana = Grafana(self.config)
     self.test_count = 0
     self.scenario_count = 0
     self.pass_count = 0
Example #6
0
class Rally(WorkloadBase):

    def __init__(self, config, hosts=None):
        self.logger = logging.getLogger('browbeat.Rally')
        self.config = config
        self.tools = Tools(self.config)
        self.connmon = Connmon(self.config)
        self.grafana = Grafana(self.config)
        self.elastic = Elastic(self.config)
        self.error_count = 0
        self.pass_count = 0
        self.test_count = 0
        self.scenario_count = 0

    def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmark):
        self.logger.debug("--------------------------------")
        self.logger.debug("task_file: {}".format(task_file))
        self.logger.debug("scenario_args: {}".format(scenario_args))
        self.logger.debug("result_dir: {}".format(result_dir))
        self.logger.debug("test_name: {}".format(test_name))
        self.logger.debug("--------------------------------")

        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_before'])
        task_args = str(scenario_args).replace("'", "\"")
        plugins = []
        if "plugins" in self.config['rally']:
            if len(self.config['rally']['plugins']) > 0:
                for plugin in self.config['rally']['plugins']:
                    for name in plugin:
                        plugins.append(plugin[name])
        plugin_string = ""
        if len(plugins) > 0:
            plugin_string = "--plugin-paths {}".format(",".join(plugins))
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(
            plugin_string, task_file,task_args, test_name)
        from_time = time.time()
        self.tools.run_cmd(cmd)
        to_time = time.time()
        if 'sleep_after' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_after'])
        to_ts = int(time.time() * 1000)
        self.grafana.create_grafana_urls({'from_ts':from_ts, 'to_ts':to_ts})
        self.grafana.print_dashboard_url(test_name)
        self.grafana.log_snapshot_playbook_cmd(from_ts, to_ts, result_dir, test_name)
        self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
        return (from_time, to_time)

    def update_tests(self):
        self.test_count += 1

    def update_pass_tests(self):
        self.pass_count += 1

    def update_fail_tests(self):
        self.error_count += 1

    def update_scenarios(self):
        self.scenario_count += 1

    def get_task_id(self, test_name):
        cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format(
            test_name)
        return self.tools.run_cmd(cmd)

    def _get_details(self):
        self.logger.info(
            "Current number of Rally scenarios executed:{}".format(
                self.scenario_count))
        self.logger.info("Current number of Rally tests executed:{}".format(self.test_count))
        self.logger.info("Current number of Rally tests passed:{}".format(self.pass_count))
        self.logger.info("Current number of Rally test failures:{}".format(self.error_count))

    def gen_scenario_html(self, task_ids, test_name):
        all_task_ids = ' '.join(task_ids)
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally task report --task {} --out {}.html".format(
            all_task_ids, test_name)
        return self.tools.run_cmd(cmd)

    def gen_scenario_json(self, task_id):
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally task results {}".format(task_id)
        return self.tools.run_cmd(cmd)

    def gen_scenario_json_file(self, task_id, test_name):
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally task results {} > {}.json".format(task_id, test_name)
        return self.tools.run_cmd(cmd)

    def rally_metadata(self, result, meta) :
        result['rally_metadata'] = meta
        return result

    def json_result(self,task_id):
        rally_data = {}
        rally_errors = []
        rally_sla = []
        self.logger.info("Loadding Task_ID {} JSON".format(task_id))
        rally_json = self.elastic.load_json(self.gen_scenario_json(task_id))
        if len(rally_json) < 1 :
            self.logger.error("Issue with Rally Results")
            return False
        for metrics in rally_json[0]['result']:
            for workload in metrics :
                if type(metrics[workload]) is dict:
                    for value in metrics[workload] :
                        if not type(metrics[workload][value]) is list:
                            if value not in rally_data:
                                rally_data[value] = []
                            rally_data[value].append(metrics[workload][value])
            if len(metrics['error']) > 0 :
                rally_errors.append({'action_name': value,
                                     'error': metrics['error']})
        rally_doc = []
        for workload in rally_data:
            if not type(rally_data[workload]) is dict :
                rally_stats = {'action': workload,
                               '90th':numpy.percentile(rally_data[workload], 90),
                               '95th':numpy.percentile(rally_data[workload], 95),
                               'Max':numpy.max(rally_data[workload]),
                               'Min':numpy.min(rally_data[workload]),
                               'Average':numpy.average(rally_data[workload]),
                               'Median':numpy.median(rally_data[workload]),
                               'Raw':rally_data[workload]}
                rally_doc.append(rally_stats)

        return {'rally_stats' : rally_doc,
                'rally_errors' : rally_errors,
                'rally_setup' : rally_json[0]['key']}

    def start_workloads(self):
        """Iterates through all rally scenarios in browbeat yaml config file"""
        results = OrderedDict()
        self.logger.info("Starting Rally workloads")
        es_ts = datetime.datetime.now()
        dir_ts = es_ts.strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(dir_ts))
        benchmarks = self.config.get('rally')['benchmarks']
        if len(benchmarks) > 0:
            for benchmark in benchmarks:
                if benchmark['enabled']:
                    self.logger.info("Benchmark: {}".format(benchmark['name']))
                    scenarios = benchmark['scenarios']
                    def_concurrencies = benchmark['concurrency']
                    def_times = benchmark['times']
                    self.logger.debug(
                        "Default Concurrencies: {}".format(def_concurrencies))
                    self.logger.debug("Default Times: {}".format(def_times))
                    for scenario in scenarios:
                        if scenario['enabled']:
                            self.update_scenarios()
                            self.update_total_scenarios()
                            scenario_name = scenario['name']
                            scenario_file = scenario['file']
                            self.logger.info(
                                "Running Scenario: {}".format(scenario_name))
                            self.logger.debug(
                                "Scenario File: {}".format(scenario_file))

                            del scenario['enabled']
                            del scenario['file']
                            del scenario['name']
                            if len(scenario) > 0:
                                self.logger.debug(
                                    "Overriding Scenario Args: {}".format(scenario))

                            result_dir = self.tools.create_results_dir(
                                self.config['browbeat'][
                                    'results'], dir_ts, benchmark['name'],
                                scenario_name)
                            self.logger.debug("Created result directory: {}".format(result_dir))
                            workload = self.__class__.__name__
                            self.workload_logger(result_dir, workload)

                            # Override concurrency/times
                            if 'concurrency' in scenario:
                                concurrencies = scenario['concurrency']
                                del scenario['concurrency']
                            else:
                                concurrencies = def_concurrencies
                            if 'times' not in scenario:
                                scenario['times'] = def_times

                            for concurrency in concurrencies:
                                scenario['concurrency'] = concurrency
                                for run in range(self.config['browbeat']['rerun']):
                                    if run not in results:
                                        results[run] = []
                                    self.update_tests()
                                    self.update_total_tests()
                                    test_name = "{}-browbeat-{}-{}-iteration-{}".format(
                                        dir_ts, scenario_name, concurrency, run)

                                    if not result_dir:
                                        self.logger.error(
                                            "Failed to create result directory")
                                        exit(1)

                                    # Start connmon before rally
                                    if self.config['connmon']['enabled']:
                                        self.connmon.start_connmon()

                                    from_time,to_time = self.run_scenario(
                                        scenario_file, scenario, result_dir, test_name,
                                        benchmark['name'])

                                    # Stop connmon at end of rally task
                                    if self.config['connmon']['enabled']:
                                        self.connmon.stop_connmon()
                                        try:
                                            self.connmon.move_connmon_results(
                                                result_dir, test_name)
                                        except:
                                            self.logger.error(
                                                "Connmon Result data missing, \
                                                Connmon never started")
                                            return False
                                        self.connmon.connmon_graphs(result_dir, test_name)
                                    new_test_name = test_name.split('-')
                                    new_test_name = new_test_name[3:]
                                    new_test_name = "-".join(new_test_name)

                                    # Find task id (if task succeeded in
                                    # running)
                                    task_id = self.get_task_id(test_name)
                                    if task_id:
                                        self.logger.info(
                                            "Generating Rally HTML for task_id : {}".
                                            format(task_id))
                                        self.gen_scenario_html([task_id], test_name)
                                        self.gen_scenario_json_file(task_id, test_name)
                                        results[run].append(task_id)
                                        self.update_pass_tests()
                                        self.update_total_pass_tests()
                                        self.get_time_dict(
                                            to_time, from_time, benchmark['name'], new_test_name,
                                            workload, "pass")
                                        if self.config['elasticsearch']['enabled'] :
                                            # Start indexing
                                            result_json = self.json_result(task_id)
                                            _meta = {'taskid' : task_id,
                                                     'timestamp': es_ts,
                                                     'workload' : {
                                                         'name' : benchmark['name'],
                                                         'scenario' : scenario_name,
                                                         'times' : scenario['times'],
                                                         'concurrency' : scenario['concurrency']},
                                                     'grafana': self.grafana.grafana_urls()
                                                     }
                                            if result_json :
                                                result = self.elastic.combine_metadata(
                                                    self.rally_metadata(result_json,_meta))
                                                if result is False :
                                                    self.logger.error
                                                    ("Error with ElasticSerach connector")
                                                else :
                                                    if len(result) < 1 :
                                                        self.logger.error(
                                                            "Issue with ElasticSearch Data, \
                                                            for task_id {}".format(task_id))
                                                    else :
                                                        self.elastic.index_result(result,
                                                                                  _id=task_id)
                                    else:
                                        self.logger.error("Cannot find task_id")
                                        self.update_fail_tests()
                                        self.update_total_fail_tests()
                                        self.get_time_dict(
                                            to_time, from_time, benchmark['name'], new_test_name,
                                            workload, "fail")

                                    for data in glob.glob("./{}*".format(test_name)):
                                        shutil.move(data, result_dir)

                                    self._get_details()

                        else:
                            self.logger.info(
                                "Skipping {} scenario enabled: false".format(scenario['name']))
                else:
                    self.logger.info(
                        "Skipping {} benchmarks enabled: false".format(benchmark['name']))
            self.logger.debug("Creating Combined Rally Reports")
            for run in results:
                combined_html_name = 'all-rally-run-{}'.format(run)
                self.gen_scenario_html(results[run], combined_html_name)
                if os.path.isfile('{}.html'.format(combined_html_name)):
                    shutil.move('{}.html'.format(combined_html_name),
                                '{}/{}'.format(self.config['browbeat']['results'], dir_ts))
        else:
            self.logger.error("Config file contains no rally benchmarks.")
Example #7
0
class Rally(WorkloadBase):

    def __init__(self, config, hosts=None):
        self.logger = logging.getLogger('browbeat.Rally')
        self.config = config
        self.tools = Tools(self.config)
        self.connmon = Connmon(self.config)
        self.grafana = Grafana(self.config)
        self.error_count = 0
        self.pass_count = 0
        self.test_count = 0
        self.scenario_count = 0

    def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmark):
        self.logger.debug("--------------------------------")
        self.logger.debug("task_file: {}".format(task_file))
        self.logger.debug("scenario_args: {}".format(scenario_args))
        self.logger.debug("result_dir: {}".format(result_dir))
        self.logger.debug("test_name: {}".format(test_name))
        self.logger.debug("--------------------------------")

        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_before'])
        task_args = str(scenario_args).replace("'", "\"")
        plugins = []
        if "plugins" in self.config['rally']:
            if len(self.config['rally']['plugins']) > 0:
                for plugin in self.config['rally']['plugins']:
                    for name in plugin:
                        plugins.append(plugin[name])
        plugin_string = ""
        if len(plugins) > 0:
            plugin_string = "--plugin-paths {}".format(",".join(plugins))
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(
            plugin_string, task_file,task_args, test_name)
        from_time = time.time()
        self.tools.run_cmd(cmd)
        to_time = time.time()
        if 'sleep_after' in self.config['rally']:
            time.sleep(self.config['rally']['sleep_after'])
        to_ts = int(time.time() * 1000)
        return (from_time, to_time)
        self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
        self.grafana.log_snapshot_playbook_cmd(
            from_ts, to_ts, result_dir, test_name)
        self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)

    def update_tests(self):
        self.test_count += 1

    def update_pass_tests(self):
        self.pass_count += 1

    def update_fail_tests(self):
        self.error_count += 1

    def update_scenarios(self):
        self.scenario_count += 1

    def get_task_id(self, test_name):
        cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format(
            test_name)
        return self.tools.run_cmd(cmd)

    def _get_details(self):
        self.logger.info(
            "Current number of Rally scenarios executed:{}".format(
                self.scenario_count))
        self.logger.info("Current number of Rally tests executed:{}".format(self.test_count))
        self.logger.info("Current number of Rally tests passed:{}".format(self.pass_count))
        self.logger.info("Current number of Rally test failures:{}".format(self.error_count))

    def gen_scenario_html(self, task_ids, test_name):
        all_task_ids = ' '.join(task_ids)
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally task report --task {} --out {}.html".format(
            all_task_ids, test_name)
        return self.tools.run_cmd(cmd)

    def gen_scenario_json(self, task_id, test_name):
        cmd = "source {}; ".format(self.config['rally']['venv'])
        cmd += "rally task results {} > {}.json".format(task_id, test_name)
        return self.tools.run_cmd(cmd)

    def start_workloads(self):
        """Iterates through all rally scenarios in browbeat yaml config file"""
        results = OrderedDict()
        self.logger.info("Starting Rally workloads")
        time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
        benchmarks = self.config.get('rally')['benchmarks']
        if len(benchmarks) > 0:
            for benchmark in benchmarks:
                if benchmark['enabled']:
                    self.logger.info("Benchmark: {}".format(benchmark['name']))
                    scenarios = benchmark['scenarios']
                    def_concurrencies = benchmark['concurrency']
                    def_times = benchmark['times']
                    self.logger.debug(
                        "Default Concurrencies: {}".format(def_concurrencies))
                    self.logger.debug("Default Times: {}".format(def_times))
                    for scenario in scenarios:
                        if scenario['enabled']:
                            self.update_scenarios()
                            self.update_total_scenarios()
                            scenario_name = scenario['name']
                            scenario_file = scenario['file']
                            self.logger.info(
                                "Running Scenario: {}".format(scenario_name))
                            self.logger.debug(
                                "Scenario File: {}".format(scenario_file))

                            del scenario['enabled']
                            del scenario['file']
                            del scenario['name']
                            if len(scenario) > 0:
                                self.logger.debug(
                                    "Overriding Scenario Args: {}".format(scenario))

                            result_dir = self.tools.create_results_dir(
                                self.config['browbeat'][
                                    'results'], time_stamp, benchmark['name'],
                                scenario_name)
                            self.logger.debug("Created result directory: {}".format(result_dir))
                            workload = self.__class__.__name__
                            self.workload_logger(result_dir, workload)

                            # Override concurrency/times
                            if 'concurrency' in scenario:
                                concurrencies = scenario['concurrency']
                                del scenario['concurrency']
                            else:
                                concurrencies = def_concurrencies
                            if 'times' not in scenario:
                                scenario['times'] = def_times

                            for concurrency in concurrencies:
                                scenario['concurrency'] = concurrency
                                for run in range(self.config['browbeat']['rerun']):
                                    if run not in results:
                                        results[run] = []
                                    self.update_tests()
                                    self.update_total_tests()
                                    test_name = "{}-browbeat-{}-{}-iteration-{}".format(
                                        time_stamp, scenario_name, concurrency, run)

                                    if not result_dir:
                                        self.logger.error(
                                            "Failed to create result directory")
                                        exit(1)

                                    # Start connmon before rally
                                    if self.config['connmon']['enabled']:
                                        self.connmon.start_connmon()

                                    from_time,to_time = self.run_scenario(
                                        scenario_file, scenario, result_dir, test_name,
                                        benchmark['name'])

                                    # Stop connmon at end of rally task
                                    if self.config['connmon']['enabled']:
                                        self.connmon.stop_connmon()
                                        try:
                                            self.connmon.move_connmon_results(
                                                result_dir, test_name)
                                        except:
                                            self.logger.error(
                                                "Connmon Result data missing, \
                                                Connmon never started")
                                            return False
                                        self.connmon.connmon_graphs(result_dir, test_name)
                                    new_test_name = test_name.split('-')
                                    new_test_name = new_test_name[3:]
                                    new_test_name = "-".join(new_test_name)

                                    # Find task id (if task succeeded in
                                    # running)
                                    task_id = self.get_task_id(test_name)
                                    if task_id:
                                        self.logger.info(
                                            "Generating Rally HTML for task_id : {}".
                                            format(task_id))
                                        self.gen_scenario_html(
                                            [task_id], test_name)
                                        self.gen_scenario_json(
                                            task_id, test_name)
                                        results[run].append(task_id)
                                        self.update_pass_tests()
                                        self.update_total_pass_tests()
                                        self.get_time_dict(
                                            to_time, from_time, benchmark['name'], new_test_name,
                                            workload, "pass")

                                    else:
                                        self.logger.error("Cannot find task_id")
                                        self.update_fail_tests()
                                        self.update_total_fail_tests()
                                        self.get_time_dict(
                                            to_time, from_time, benchmark['name'], new_test_name,
                                            workload, "fail")

                                    for data in glob.glob("./{}*".format(test_name)):
                                        shutil.move(data, result_dir)

                                    self._get_details()

                        else:
                            self.logger.info(
                                "Skipping {} scenario enabled: false".format(scenario['name']))
                else:
                    self.logger.info(
                        "Skipping {} benchmarks enabled: false".format(benchmark['name']))
            self.logger.debug("Creating Combined Rally Reports")
            for run in results:
                combined_html_name = 'all-rally-run-{}'.format(run)
                self.gen_scenario_html(results[run], combined_html_name)
                if os.path.isfile('{}.html'.format(combined_html_name)):
                    shutil.move('{}.html'.format(combined_html_name),
                                '{}/{}'.format(self.config['browbeat']['results'], time_stamp))
        else:
            self.logger.error("Config file contains no rally benchmarks.")
Example #8
0
class Shaker(WorkloadBase):

    def __init__(self, config):
        self.logger = logging.getLogger('browbeat.Shaker')
        self.config = config
        self.tools = Tools(self.config)
        self.grafana = Grafana(self.config)
        self.error_count = 0
        self.pass_count = 0
        self.test_count = 0
        self.scenario_count = 0

    def shaker_checks(self):
        cmd = "source /home/stack/overcloudrc; glance image-list | grep -w shaker-image"
        if self.tools.run_cmd(cmd) == "":
            self.logger.error("Shaker Image is not built, try again")
            exit(1)
        else:
            self.logger.info("Shaker image is built, continuing")

    def get_stats(self):
        self.logger.info("Current number of Shaker tests executed: {}".format(self.test_count))
        self.logger.info("Current number of Shaker tests passed: {}".format(self.pass_count))
        self.logger.info("Current number of Shaker tests failed: {}".format(self.error_count))

    def final_stats(self, total):
        self.logger.info("Total Shaker scenarios enabled by user: {}".format(total))
        self.logger.info("Total number of Shaker tests executed: {}".format(self.test_count))
        self.logger.info("Total number of Shaker tests passed: {}".format(self.pass_count))
        self.logger.info("Total number of Shaker tests failed: {}".format(self.error_count))

    def update_tests(self):
        self.test_count += 1

    def update_pass_tests(self):
        self.pass_count += 1

    def update_fail_tests(self):
        self.error_count += 1

    def update_scenarios(self):
        self.scenario_count += 1

    def set_scenario(self, scenario):
        fname = scenario['file']
        stream = open(fname, 'r')
        data = yaml.load(stream)
        stream.close()
        default_placement = "double_room"
        default_density = 1
        default_compute = 1
        default_progression = "linear"
        default_time = 60
        if "placement" in scenario:
            data['deployment']['accommodation'][1] = scenario['placement']
        else:
            data['deployment']['accommodation'][1] = default_placement
        if "density" in scenario:
            data['deployment']['accommodation'][
                2]['density'] = scenario['density']
        else:
            data['deployment']['accommodation'][2]['density'] = default_density
        if "compute" in scenario:
            data['deployment']['accommodation'][3][
                'compute_nodes'] = scenario['compute']
        else:
            data['deployment']['accommodation'][3][
                'compute_nodes'] = default_compute
        if "progression" in scenario:
            data['execution']['progression'] = scenario['progression']
        else:
            data['execution']['progression'] = default_progression
        data['execution']['tests'] = [d for d in data['execution']
                                      ['tests'] if d.get('class') == "iperf_graph"]
        if "time" in scenario:
            data['execution']['tests'][0]['time'] = scenario['time']
        else:
            data['execution']['tests'][0]['time'] = default_time
        with open(fname, 'w') as yaml_file:
            yaml_file.write(yaml.dump(data, default_flow_style=False))

    def get_uuidlist(self, data):
        uuidlist = []
        for key in data['records'].iterkeys():
            uuidlist.append(key)
        return uuidlist

    def result_check(self, result_dir, test_name, scenario, to_time, from_time):
        outputfile = os.path.join(result_dir,test_name + "." + "json")
        error = False
        with open(outputfile) as data_file:
            data = json.load(data_file)
        uuidlist = self.get_uuidlist(data)
        workload = self.__class__.__name__
        new_test_name = test_name.split('-')
        new_test_name = new_test_name[3:]
        new_test_name = '-'.join(new_test_name)
        for uuid in uuidlist:
            if data['records'][uuid]['status'] != "ok":
                error = True
        if error:
            self.logger.error("Failed Test: {}".format(scenario['name']))
            self.logger.error("saved log to: {}.log".format(os.path.join(result_dir, test_name)))
            self.update_fail_tests()
            self.update_total_fail_tests()
            self.get_time_dict(
                to_time,
                from_time,
                scenario['name'],
                new_test_name,
                workload,
                "fail")
        else:
            self.logger.info("Completed Test: {}".format(scenario['name']))
            self.logger.info(
                "Saved report to: {}".format(
                    os.path.join(
                        result_dir,
                        test_name +
                        "." +
                        "html")))
            self.logger.info("saved log to: {}.log".format(os.path.join(result_dir, test_name)))
            self.update_pass_tests()
            self.update_total_pass_tests()
            self.get_time_dict(
                to_time,
                from_time,
                scenario['name'],
                new_test_name,
                workload,
                "pass")

    def run_scenario(self, scenario, result_dir, test_name):
        filename = scenario['file']
        server_endpoint = self.config['shaker']['server']
        port_no = self.config['shaker']['port']
        flavor = self.config['shaker']['flavor']
        venv = self.config['shaker']['venv']
        shaker_region = self.config['shaker']['shaker_region']
        timeout = self.config['shaker']['join_timeout']
        cmd_1 = (
            "source {}/bin/activate; source /home/stack/overcloudrc").format(venv)
        cmd_2 = (
            "shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}"
            " --os-region-name {7} --agent-join-timeout {6}"
            " --report {4}/{5}.html --output {4}/{5}.json"
            " --debug > {4}/{5}.log 2>&1").format(
            server_endpoint,
            port_no,
            flavor,
            filename,
            result_dir,
            test_name,
            timeout,
            shaker_region)
        cmd = ("{}; {}").format(cmd_1, cmd_2)
        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['shaker']:
            time.sleep(self.config['shaker']['sleep_before'])
        from_time = time.time()
        self.tools.run_cmd(cmd)
        to_time = time.time()
        self.update_tests()
        self.update_total_tests()
        self.result_check(result_dir, test_name, scenario, to_time, from_time)
        if 'sleep_after' in self.config['shaker']:
            time.sleep(self.config['shaker']['sleep_after'])
        to_ts = int(time.time() * 1000)
        # Snapshotting
        self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
        self.grafana.log_snapshot_playbook_cmd(
            from_ts, to_ts, result_dir, test_name)
        self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)

    def run_shaker(self):
        self.logger.info("Starting Shaker workloads")
        time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
        scenarios = self.config.get('shaker')['scenarios']
        self.shaker_checks()
        scen_length = len(scenarios)
        if scen_length > 0:
            for scenario in scenarios:
                if scenario['enabled']:
                    self.update_scenarios()
                    self.update_total_scenarios()
                    self.logger.info("Scenario: {}".format(scenario['name']))
                    self.set_scenario(scenario)
                    self.logger.debug("Set Scenario File: {}".format(
                        scenario['file']))
                    result_dir = self.tools.create_results_dir(
                        self.config['browbeat']['results'], time_stamp, "shaker",
                        scenario['name'])
                    workload = self.__class__.__name__
                    self.workload_logger(result_dir, workload)
                    time_stamp1 = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
                    test_name = "{}-browbeat-{}-{}".format(time_stamp1,
                                                           "shaker", scenario['name'])
                    self.run_scenario(scenario, result_dir, test_name)
                    self.get_stats()
                else:
                    self.logger.info(
                        "Skipping {} as scenario enabled: false".format(
                            scenario['name']))
            self.final_stats(self.scenario_count)
        else:
            self.logger.error(
                "Configuration file contains no shaker scenarios")
Example #9
0
class PerfKit(WorkloadBase):

    def __init__(self, config):
        self.logger = logging.getLogger('browbeat.PerfKit')
        self.config = config
        self.error_count = 0
        self.tools = Tools(self.config)
        self.connmon = Connmon(self.config)
        self.grafana = Grafana(self.config)
        self.test_count = 0
        self.scenario_count = 0
        self.pass_count = 0

    def _log_details(self):
        self.logger.info(
            "Current number of Perkit scenarios executed: {}".format(
                self.scenario_count))
        self.logger.info("Current number of Perfkit test(s) executed: {}".format(self.test_count))
        self.logger.info("Current number of Perfkit test(s) succeeded: {}".format(self.pass_count))
        self.logger.info("Current number of Perfkit test failures: {}".format(self.error_count))

    def update_tests(self):
        self.test_count += 1

    def update_pass_tests(self):
        self.pass_count += 1

    def update_fail_tests(self):
        self.error_count += 1

    def update_scenarios(self):
        self.scenario_count += 1

    def run_benchmark(self, benchmark_config, result_dir, test_name, cloud_type="OpenStack"):
        self.logger.debug("--------------------------------")
        self.logger.debug("Benchmark_config: {}".format(benchmark_config))
        self.logger.debug("result_dir: {}".format(result_dir))
        self.logger.debug("test_name: {}".format(test_name))
        self.logger.debug("--------------------------------")

        # Build command to run
        if 'enabled' in benchmark_config:
            del benchmark_config['enabled']
        cmd = ("source /home/stack/overcloudrc; source {0}; "
               "/home/stack/perfkit-venv/PerfKitBenchmarker/pkb.py "
               "--cloud={1} --run_uri=browbeat".format(self.config['perfkit']['venv'], cloud_type))
        # Add default parameters as necessary
        for default_item, value in self.config['perfkit']['default'].iteritems():
            if default_item not in benchmark_config:
                benchmark_config[default_item] = value
        for parameter, value in benchmark_config.iteritems():
            if not parameter == 'name':
                self.logger.debug(
                    "Parameter: {}, Value: {}".format(parameter, value))
                cmd += " --{}={}".format(parameter, value)

        # Remove any old results
        if os.path.exists("/tmp/perfkitbenchmarker/run_browbeat"):
            shutil.rmtree("/tmp/perfkitbenchmarker/run_browbeat")

        if self.config['connmon']['enabled']:
            self.connmon.start_connmon()

        # Run PerfKit
        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['perfkit']:
            time.sleep(self.config['perfkit']['sleep_before'])
        self.logger.info("Running Perfkit Command: {}".format(cmd))
        stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
        stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
        from_time = time.time()
        process = subprocess.Popen(cmd, shell=True, stdout=stdout_file, stderr=stderr_file)
        process.communicate()
        to_time = time.time()
        if 'sleep_after' in self.config['perfkit']:
            time.sleep(self.config['perfkit']['sleep_after'])
        to_ts = int(time.time() * 1000)

        # Stop connmon at end of perfkit task
        if self.config['connmon']['enabled']:
            self.connmon.stop_connmon()
            try:
                self.connmon.move_connmon_results(result_dir, test_name)
                self.connmon.connmon_graphs(result_dir, test_name)
            except:
                self.logger.error("Connmon Result data missing, Connmon never started")
        workload = self.__class__.__name__
        new_test_name = test_name.split('-')
        new_test_name = new_test_name[2:]
        new_test_name = '-'.join(new_test_name)
        # Determine success
        try:
            with open("{}/pkb.stderr.log".format(result_dir), 'r') as stderr:
                if any('SUCCEEDED' in line for line in stderr):
                    self.logger.info("Benchmark completed.")
                    self.update_pass_tests()
                    self.update_total_pass_tests()
                    self.get_time_dict(
                        to_time,
                        from_time,
                        benchmark_config['benchmarks'],
                        new_test_name,
                        workload,
                        "pass")

                else:
                    self.logger.error("Benchmark failed.")
                    self.update_fail_tests()
                    self.update_total_fail_tests()
                    self.get_time_dict(
                        to_time,
                        from_time,
                        benchmark_config['benchmarks'],
                        new_test_name,
                        workload,
                        "fail")
        except IOError:
            self.logger.error(
                "File missing: {}/pkb.stderr.log".format(result_dir))

        # Copy all results
        for perfkit_file in glob.glob("/tmp/perfkitbenchmarker/run_browbeat/*"):
            shutil.move(perfkit_file, result_dir)
        if os.path.exists("/tmp/perfkitbenchmarker/run_browbeat"):
            shutil.rmtree("/tmp/perfkitbenchmarker/run_browbeat")

        # Grafana integration
        self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
        self.grafana.log_snapshot_playbook_cmd(
            from_ts, to_ts, result_dir, test_name)
        self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)

    def start_workloads(self):
        self.logger.info("Starting PerfKitBenchmarker Workloads.")
        time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
        benchmarks = self.config.get('perfkit')['benchmarks']
        if len(benchmarks) > 0:
            for benchmark in benchmarks:
                if benchmark['enabled']:
                    self.logger.info("Benchmark: {}".format(benchmark['name']))
                    self.update_scenarios()
                    self.update_total_scenarios()
                    for run in range(self.config['browbeat']['rerun']):
                        self.update_tests()
                        self.update_total_tests()
                        result_dir = self.tools.create_results_dir(
                            self.config['browbeat']['results'], time_stamp, benchmark['name'], run)
                        test_name = "{}-{}-{}".format(time_stamp, benchmark['name'], run)
                        workload = self.__class__.__name__
                        self.workload_logger(result_dir, workload)
                        self.run_benchmark(benchmark, result_dir, test_name)
                        self._log_details()
                else:
                    self.logger.info(
                        "Skipping {} benchmark, enabled: false".format(benchmark['name']))
        else:
            self.logger.error("Config file contains no perfkit benchmarks.")
Example #10
0
class Shaker(WorkloadBase):
    def __init__(self, config):
        self.logger = logging.getLogger('browbeat.Shaker')
        self.config = config
        self.tools = Tools(self.config)
        self.grafana = Grafana(self.config)
        self.error_count = 0
        self.pass_count = 0
        self.test_count = 0
        self.scenario_count = 0

    def shaker_checks(self):
        cmd = "source /home/stack/overcloudrc; glance image-list | grep -w shaker-image"
        if self.tools.run_cmd(cmd) == "":
            self.logger.error("Shaker Image is not built, try again")
            exit(1)
        else:
            self.logger.info("Shaker image is built, continuing")

    def get_stats(self):
        self.logger.info("Current number of Shaker tests executed: {}".format(
            self.test_count))
        self.logger.info("Current number of Shaker tests passed: {}".format(
            self.pass_count))
        self.logger.info("Current number of Shaker tests failed: {}".format(
            self.error_count))

    def final_stats(self, total):
        self.logger.info(
            "Total Shaker scenarios enabled by user: {}".format(total))
        self.logger.info("Total number of Shaker tests executed: {}".format(
            self.test_count))
        self.logger.info("Total number of Shaker tests passed: {}".format(
            self.pass_count))
        self.logger.info("Total number of Shaker tests failed: {}".format(
            self.error_count))

    def update_tests(self):
        self.test_count += 1

    def update_pass_tests(self):
        self.pass_count += 1

    def update_fail_tests(self):
        self.error_count += 1

    def update_scenarios(self):
        self.scenario_count += 1

    def set_scenario(self, scenario):
        fname = scenario['file']
        stream = open(fname, 'r')
        data = yaml.load(stream)
        stream.close()
        default_placement = "double_room"
        default_density = 1
        default_compute = 1
        default_progression = "linear"
        default_time = 60
        if "placement" in scenario:
            data['deployment']['accommodation'][1] = scenario['placement']
        else:
            data['deployment']['accommodation'][1] = default_placement
        if "density" in scenario:
            data['deployment']['accommodation'][2]['density'] = scenario[
                'density']
        else:
            data['deployment']['accommodation'][2]['density'] = default_density
        if "compute" in scenario:
            data['deployment']['accommodation'][3]['compute_nodes'] = scenario[
                'compute']
        else:
            data['deployment']['accommodation'][3][
                'compute_nodes'] = default_compute
        if "progression" in scenario:
            data['execution']['progression'] = scenario['progression']
        else:
            data['execution']['progression'] = default_progression
        data['execution']['tests'] = [
            d for d in data['execution']['tests']
            if d.get('class') == "iperf_graph"
        ]
        if "time" in scenario:
            data['execution']['tests'][0]['time'] = scenario['time']
        else:
            data['execution']['tests'][0]['time'] = default_time
        with open(fname, 'w') as yaml_file:
            yaml_file.write(yaml.dump(data, default_flow_style=False))

    def get_uuidlist(self, data):
        uuidlist = []
        for key in data['records'].iterkeys():
            uuidlist.append(key)
        return uuidlist

    def result_check(self, result_dir, test_name, scenario, to_time,
                     from_time):
        outputfile = os.path.join(result_dir, test_name + "." + "json")
        error = False
        with open(outputfile) as data_file:
            data = json.load(data_file)
        uuidlist = self.get_uuidlist(data)
        workload = self.__class__.__name__
        new_test_name = test_name.split('-')
        new_test_name = new_test_name[3:]
        new_test_name = '-'.join(new_test_name)
        for uuid in uuidlist:
            if data['records'][uuid]['status'] != "ok":
                error = True
        if error:
            self.logger.error("Failed Test: {}".format(scenario['name']))
            self.logger.error("saved log to: {}.log".format(
                os.path.join(result_dir, test_name)))
            self.update_fail_tests()
            self.update_total_fail_tests()
            self.get_time_dict(to_time, from_time, scenario['name'],
                               new_test_name, workload, "fail")
        else:
            self.logger.info("Completed Test: {}".format(scenario['name']))
            self.logger.info("Saved report to: {}".format(
                os.path.join(result_dir, test_name + "." + "html")))
            self.logger.info("saved log to: {}.log".format(
                os.path.join(result_dir, test_name)))
            self.update_pass_tests()
            self.update_total_pass_tests()
            self.get_time_dict(to_time, from_time, scenario['name'],
                               new_test_name, workload, "pass")

    def run_scenario(self, scenario, result_dir, test_name):
        filename = scenario['file']
        server_endpoint = self.config['shaker']['server']
        port_no = self.config['shaker']['port']
        flavor = self.config['shaker']['flavor']
        venv = self.config['shaker']['venv']
        shaker_region = self.config['shaker']['shaker_region']
        timeout = self.config['shaker']['join_timeout']
        cmd_1 = ("source {}/bin/activate; source /home/stack/overcloudrc"
                 ).format(venv)
        cmd_2 = (
            "shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}"
            " --os-region-name {7} --agent-join-timeout {6}"
            " --report {4}/{5}.html --output {4}/{5}.json"
            " --debug > {4}/{5}.log 2>&1").format(server_endpoint, port_no,
                                                  flavor, filename, result_dir,
                                                  test_name, timeout,
                                                  shaker_region)
        cmd = ("{}; {}").format(cmd_1, cmd_2)
        from_ts = int(time.time() * 1000)
        if 'sleep_before' in self.config['shaker']:
            time.sleep(self.config['shaker']['sleep_before'])
        from_time = time.time()
        self.tools.run_cmd(cmd)
        to_time = time.time()
        self.update_tests()
        self.update_total_tests()
        self.result_check(result_dir, test_name, scenario, to_time, from_time)
        if 'sleep_after' in self.config['shaker']:
            time.sleep(self.config['shaker']['sleep_after'])
        to_ts = int(time.time() * 1000)
        # Snapshotting
        self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
        self.grafana.log_snapshot_playbook_cmd(from_ts, to_ts, result_dir,
                                               test_name)
        self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)

    def run_shaker(self):
        self.logger.info("Starting Shaker workloads")
        time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
        scenarios = self.config.get('shaker')['scenarios']
        self.shaker_checks()
        scen_length = len(scenarios)
        if scen_length > 0:
            for scenario in scenarios:
                if scenario['enabled']:
                    self.update_scenarios()
                    self.update_total_scenarios()
                    self.logger.info("Scenario: {}".format(scenario['name']))
                    self.set_scenario(scenario)
                    self.logger.debug("Set Scenario File: {}".format(
                        scenario['file']))
                    result_dir = self.tools.create_results_dir(
                        self.config['browbeat']['results'], time_stamp,
                        "shaker", scenario['name'])
                    workload = self.__class__.__name__
                    self.workload_logger(result_dir, workload)
                    time_stamp1 = datetime.datetime.now().strftime(
                        "%Y%m%d-%H%M%S")
                    test_name = "{}-browbeat-{}-{}".format(
                        time_stamp1, "shaker", scenario['name'])
                    self.run_scenario(scenario, result_dir, test_name)
                    self.get_stats()
                else:
                    self.logger.info(
                        "Skipping {} as scenario enabled: false".format(
                            scenario['name']))
            self.final_stats(self.scenario_count)
        else:
            self.logger.error(
                "Configuration file contains no shaker scenarios")
Example #11
0
class PerfKit(WorkloadBase):
    def __init__(self, config):
        self.logger = logging.getLogger('browbeat.PerfKit')
        self.config = config
        self.error_count = 0
        self.tools = Tools(self.config)
        self.connmon = Connmon(self.config)
        self.grafana = Grafana(self.config)
        self.test_count = 0
        self.scenario_count = 0
        self.pass_count = 0

    def _log_details(self):
        self.logger.info(
            "Current number of Perkit scenarios executed: {}".format(
                self.scenario_count))
        self.logger.info(
            "Current number of Perfkit test(s) executed: {}".format(
                self.test_count))
        self.logger.info(
            "Current number of Perfkit test(s) succeeded: {}".format(
                self.pass_count))
        self.logger.info("Current number of Perfkit test failures: {}".format(
            self.error_count))

    def update_tests(self):
        self.test_count += 1

    def update_pass_tests(self):
        self.pass_count += 1

    def update_fail_tests(self):
        self.error_count += 1

    def update_scenarios(self):
        self.scenario_count += 1

    def run_benchmark(self,
                      benchmark_config,
                      result_dir,
                      test_name,
                      cloud_type="OpenStack"):
        self.logger.debug("--------------------------------")
        self.logger.debug("Benchmark_config: {}".format(benchmark_config))
        self.logger.debug("result_dir: {}".format(result_dir))
        self.logger.debug("test_name: {}".format(test_name))
        self.logger.debug("--------------------------------")

        # Build command to run
        if 'enabled' in benchmark_config:
            del benchmark_config['enabled']
        cmd = ("source /home/stack/overcloudrc; source {0}; "
               "/home/stack/perfkit-venv/PerfKitBenchmarker/pkb.py "
               "--cloud={1} --run_uri=browbeat".format(
                   self.config['perfkit']['venv'], cloud_type))
        # Add default parameters as necessary
        for default_item, value in self.config['perfkit']['default'].iteritems(
        ):
            if default_item not in benchmark_config:
                benchmark_config[default_item] = value
        for parameter, value in benchmark_config.iteritems():
            if not parameter == 'name':
                self.logger.debug("Parameter: {}, Value: {}".format(
                    parameter, value))
                cmd += " --{}={}".format(parameter, value)

        # Remove any old results
        if os.path.exists("/tmp/perfkitbenchmarker/run_browbeat"):
            shutil.rmtree("/tmp/perfkitbenchmarker/run_browbeat")

        if self.config['connmon']['enabled']:
            self.connmon.start_connmon()

        self.logger.info("Running Perfkit Command: {}".format(cmd))
        stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
        stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
        from_ts = time.time()
        if 'sleep_before' in self.config['perfkit']:
            time.sleep(self.config['perfkit']['sleep_before'])
        process = subprocess.Popen(cmd,
                                   shell=True,
                                   stdout=stdout_file,
                                   stderr=stderr_file)
        process.communicate()
        if 'sleep_after' in self.config['perfkit']:
            time.sleep(self.config['perfkit']['sleep_after'])
        to_ts = time.time()

        # Stop connmon at end of perfkit task
        if self.config['connmon']['enabled']:
            self.connmon.stop_connmon()
            try:
                self.connmon.move_connmon_results(result_dir, test_name)
                self.connmon.connmon_graphs(result_dir, test_name)
            except:
                self.logger.error(
                    "Connmon Result data missing, Connmon never started")

        workload = self.__class__.__name__
        new_test_name = test_name.split('-')
        new_test_name = new_test_name[2:]
        new_test_name = '-'.join(new_test_name)
        # Determine success
        try:
            with open("{}/pkb.stderr.log".format(result_dir), 'r') as stderr:
                if any('SUCCEEDED' in line for line in stderr):
                    self.logger.info("Benchmark completed.")
                    self.update_pass_tests()
                    self.update_total_pass_tests()
                    self.get_time_dict(to_ts, from_ts,
                                       benchmark_config['benchmarks'],
                                       new_test_name, workload, "pass")
                else:
                    self.logger.error("Benchmark failed.")
                    self.update_fail_tests()
                    self.update_total_fail_tests()
                    self.get_time_dict(to_ts, from_ts,
                                       benchmark_config['benchmarks'],
                                       new_test_name, workload, "fail")
        except IOError:
            self.logger.error(
                "File missing: {}/pkb.stderr.log".format(result_dir))

        # Copy all results
        for perfkit_file in glob.glob(
                "/tmp/perfkitbenchmarker/run_browbeat/*"):
            shutil.move(perfkit_file, result_dir)
        if os.path.exists("/tmp/perfkitbenchmarker/run_browbeat"):
            shutil.rmtree("/tmp/perfkitbenchmarker/run_browbeat")

        # Grafana integration
        self.grafana.create_grafana_urls({
            'from_ts': int(from_ts * 1000),
            'to_ts': int(to_ts * 1000)
        })
        self.grafana.print_dashboard_url(test_name)
        self.grafana.log_snapshot_playbook_cmd(from_ts, to_ts, result_dir,
                                               test_name)
        self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)

    def start_workloads(self):
        self.logger.info("Starting PerfKitBenchmarker Workloads.")
        time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        self.logger.debug("Time Stamp (Prefix): {}".format(time_stamp))
        benchmarks = self.config.get('perfkit')['benchmarks']
        if len(benchmarks) > 0:
            for benchmark in benchmarks:
                if benchmark['enabled']:
                    self.logger.info("Benchmark: {}".format(benchmark['name']))
                    self.update_scenarios()
                    self.update_total_scenarios()
                    for run in range(self.config['browbeat']['rerun']):
                        self.update_tests()
                        self.update_total_tests()
                        result_dir = self.tools.create_results_dir(
                            self.config['browbeat']['results'], time_stamp,
                            benchmark['name'], run)
                        test_name = "{}-{}-{}".format(time_stamp,
                                                      benchmark['name'], run)
                        workload = self.__class__.__name__
                        self.workload_logger(result_dir, workload)
                        self.run_benchmark(benchmark, result_dir, test_name)
                        self._log_details()
                else:
                    self.logger.info(
                        "Skipping {} benchmark, enabled: false".format(
                            benchmark['name']))
        else:
            self.logger.error("Config file contains no perfkit benchmarks.")