예제 #1
0
    def test_parse_ini_file_missing_section_header(self, mock_config_parser_type):
        mock_config_parser = mock_config_parser_type()
        mock_config_parser.read.side_effect = \
            configparser.MissingSectionHeaderError(mock.Mock(), 321, mock.Mock())

        with self.assertRaises(configparser.MissingSectionHeaderError):
            utils.parse_ini_file('my_path')
예제 #2
0
    def test_parse_ini_file_no_default_section_header(self, mock_config_parser_type):
        s1 = {
            'key1': 'value11',
            'key2': 'value22',
        }
        s2 = {
            'key1': 'value123',
            'key2': 'value234',
        }

        mock_config_parser = mock_config_parser_type()
        mock_config_parser.read.return_value = True
        mock_config_parser.sections.return_value = ['s1', 's2']
        mock_config_parser.items.side_effect = iter([
            configparser.NoSectionError(mock.Mock()),
            s1.items(),
            s2.items(),
        ])

        expected = {
            'DEFAULT': {},
            's1': s1,
            's2': s2,
        }
        result = utils.parse_ini_file('my_path')
        self.assertDictEqual(result, expected)
예제 #3
0
    def _create_data_source(self, ip):
        url = 'http://*****:*****@{}:{}/api/datasources'.format(ip, 3000)

        influx_conf = utils.parse_ini_file(consts.CONF_FILE)
        try:
            influx_url = influx_conf['dispatcher_influxdb']['target']
        except KeyError:
            LOG.exception('influxdb url not set in yardstick.conf')
            raise

        data = {
            "name": "yardstick",
            "type": "influxdb",
            "access": "proxy",
            "url": influx_url,
            "password": "******",
            "user": "******",
            "database": "yardstick",
            "basicAuth": True,
            "basicAuthUser": "******",
            "basicAuthPassword": "******",
            "isDefault": False,
        }
        try:
            HttpClient().post(url, data)
        except Exception:
            LOG.exception('Create datasources failed')
            raise
예제 #4
0
    def test_parse_ini_file(self, mock_config_parser_type):
        defaults = {
            'default1': 'value1',
            'default2': 'value2',
        }
        s1 = {
            'key1': 'value11',
            'key2': 'value22',
        }
        s2 = {
            'key1': 'value123',
            'key2': 'value234',
        }

        mock_config_parser = mock_config_parser_type()
        mock_config_parser.read.return_value = True
        mock_config_parser.sections.return_value = ['s1', 's2']
        mock_config_parser.items.side_effect = iter([
            defaults.items(),
            s1.items(),
            s2.items(),
        ])

        expected = {
            'DEFAULT': defaults,
            's1': s1,
            's2': s2,
        }
        result = utils.parse_ini_file('my_path')
        self.assertDictEqual(result, expected)
예제 #5
0
    def pre_start(self, args, **kwargs):
        atexit.register(self.atexit_handler)
        task_id = getattr(args, 'task_id')
        self.task_id = task_id if task_id else str(uuid.uuid4())
        self._set_log()
        try:
            output_config = utils.parse_ini_file(config_file)
        except Exception:
            # all error will be ignore, the default value is {}
            output_config = {}

        self._init_output_config(output_config)
        self._set_output_config(output_config, args.output_file)
        LOG.debug('Output configuration is: %s', output_config)

        self._set_dispatchers(output_config)

        # update dispatcher list
        if 'file' in output_config['DEFAULT']['dispatcher']:
            result = {'status': 0, 'result': {}}
            utils.write_json_to_file(args.output_file, result)

        total_start_time = time.time()
        parser = TaskParser(args.inputfile[0])
        if args.suite:
            # 1.parse suite, return suite_params info
            task_files, task_args, task_args_fnames = \
                parser.parse_suite()
        else:
            task_files = [parser.path]
            task_args = [args.task_args]
            task_args_fnames = [args.task_args_file]

            LOG.debug("task_files:%s, task_args:%s, task_args_fnames:%s",
                      task_files, task_args, task_args_fnames)

        if args.parse_only:
            sys.exit(0)

        return task_files, task_args, task_args_fnames, parser
예제 #6
0
파일: env.py 프로젝트: mythwm/yardstick-ha
    def _create_data_source(self, ip):
        url = 'http://*****:*****@{}:{}/api/datasources'.format(ip, consts.GRAFANA_PORT)
        influx_conf = utils.parse_ini_file(consts.CONF_FILE).get('dispatcher_influxdb', {})

        data = {
            "name": "yardstick",
            "type": "influxdb",
            "access": "proxy",
            "url": influx_conf.get('target', ''),
            "password": influx_conf.get('password', ''),
            "user": influx_conf.get('username', ''),
            "database": "yardstick",
            "basicAuth": True,
            "basicAuthUser": "******",
            "basicAuthPassword": "******",
            "isDefault": True,
        }
        try:
            HttpClient().post(url, data, timeout=60)
        except Exception:
            LOG.exception('Create datasources failed')
            raise
예제 #7
0
    def _create_data_source(self, ip):
        url = 'http://*****:*****@{}:{}/api/datasources'.format(ip, 3000)
        influx_conf = utils.parse_ini_file(consts.CONF_FILE).get('dispatcher_influxdb', {})

        data = {
            "name": "yardstick",
            "type": "influxdb",
            "access": "proxy",
            "url": influx_conf.get('target', ''),
            "password": influx_conf.get('password', ''),
            "user": influx_conf.get('username', ''),
            "database": "yardstick",
            "basicAuth": True,
            "basicAuthUser": "******",
            "basicAuthPassword": "******",
            "isDefault": False,
        }
        try:
            HttpClient().post(url, data)
        except Exception:
            LOG.exception('Create datasources failed')
            raise
예제 #8
0
 def test_parse_ini_file_no_file(self, mock_config_parser_type):
     mock_config_parser = mock_config_parser_type()
     mock_config_parser.read.return_value = False
     with self.assertRaises(RuntimeError):
         utils.parse_ini_file('my_path')
예제 #9
0
파일: task.py 프로젝트: warmwm/yardstick-wm
    def start(self, args, **kwargs):
        """Start a benchmark scenario."""

        atexit.register(self.atexit_handler)

        task_id = getattr(args, 'task_id')
        self.task_id = task_id if task_id else str(uuid.uuid4())

        self._set_log()

        try:
            output_config = utils.parse_ini_file(config_file)
        except Exception:
            # all error will be ignore, the default value is {}
            output_config = {}

        self._init_output_config(output_config)
        self._set_output_config(output_config, args.output_file)
        LOG.debug('Output configuration is: %s', output_config)

        self._set_dispatchers(output_config)

        # update dispatcher list
        if 'file' in output_config['DEFAULT']['dispatcher']:
            result = {'status': 0, 'result': {}}
            utils.write_json_to_file(args.output_file, result)

        total_start_time = time.time()
        parser = TaskParser(args.inputfile[0])

        if args.suite:
            # 1.parse suite, return suite_params info
            task_files, task_args, task_args_fnames = \
                parser.parse_suite()
        else:
            task_files = [parser.path]
            task_args = [args.task_args]
            task_args_fnames = [args.task_args_file]

        LOG.info("\ntask_files:%s, \ntask_args:%s, \ntask_args_fnames:%s",
                 task_files, task_args, task_args_fnames)

        if args.parse_only:
            sys.exit(0)

        testcases = {}
        # parse task_files
        for i in range(0, len(task_files)):
            one_task_start_time = time.time()
            parser.path = task_files[i]
            scenarios, run_in_parallel, meet_precondition, contexts = \
                parser.parse_task(self.task_id, task_args[i],
                                  task_args_fnames[i])

            self.contexts.extend(contexts)

            if not meet_precondition:
                LOG.info("meet_precondition is %s, please check envrionment",
                         meet_precondition)
                continue

            case_name = os.path.splitext(os.path.basename(task_files[i]))[0]
            try:
                data = self._run(scenarios, run_in_parallel, args.output_file)
            except KeyboardInterrupt:
                raise
            except Exception:
                LOG.exception("Running test case %s failed!", case_name)
                testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
            else:
                testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}

            if args.keep_deploy:
                # keep deployment, forget about stack
                # (hide it for exit handler)
                self.contexts = []
            else:
                for context in self.contexts[::-1]:
                    context.undeploy()
                self.contexts = []
            one_task_end_time = time.time()
            LOG.info("task %s finished in %d secs", task_files[i],
                     one_task_end_time - one_task_start_time)

        result = self._get_format_result(testcases)

        self._do_output(output_config, result)
        self._generate_reporting(result)

        total_end_time = time.time()
        LOG.info("total finished in %d secs",
                 total_end_time - total_start_time)

        scenario = scenarios[0]
        print("To generate report execute => yardstick report generate ",
              scenario['task_id'], scenario['tc'])

        print("Done, exiting")
        return result
예제 #10
0
    def do_benchmark(self, args, task_files_):
        testcases = {}
        parser = TaskParser(args.inputfile[0])
        if args.suite:
            # 1.parse suite, return suite_params info
            task_files, task_args, task_args_fnames = \
                parser.parse_suite()
        else:
            task_files = [parser.path]

        try:
            output_config = utils.parse_ini_file(config_file)
        except Exception:
            # all error will be ignore, the default value is {}
            output_config = {}

        total_start_time = time.time()

        for i in range(0, len(task_files)):
            one_task_start_time = time.time()
            case_name = os.path.splitext(os.path.basename(task_files[i]))[0]
            try:
                data = self._run(self.arr_scenarios[i],
                                 self.run_in_parallels[i], args.output_file)
            except KeyboardInterrupt:
                raise
            except Exception:
                LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True)
                testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
            else:
                LOG.info('Testcase: "%s" SUCCESS!!!', case_name)
                testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}

            if args.keep_deploy:
                # keep deployment, forget about stack
                # (hide it for exit handler)
                self.contexts = []
            else:
                for context in self.contexts[::-1]:
                    context.undeploy()
                self.contexts = []

            one_task_end_time = time.time()
            LOG.info("Task %s finished in %d secs", task_files[i],
                     one_task_end_time - one_task_start_time)

        result = self._get_format_result(testcases)
        LOG.info('-----------------------------------------------------')
        LOG.info(result)
        LOG.info('------------------------------')
        self._do_output(output_config, result)
        self._generate_reporting(result)

        total_end_time = time.time()
        LOG.info("Total finished in %d secs",
                 total_end_time - total_start_time)

        scenario = self.arr_scenarios[len(self.arr_scenarios) - 1][0]
        LOG.info(
            "To generate report, execute command "
            "'yardstick report generate %(task_id)s %(tc)s'", scenario)
        LOG.info("Task ALL DONE, exiting")
        return result
예제 #11
0
    def start(self, args, **kwargs):  # pylint: disable=unused-argument
        """Start a benchmark scenario."""

        atexit.register(self.atexit_handler)

        task_id = getattr(args, 'task_id')
        self.task_id = task_id if task_id else str(uuid.uuid4())

        self._set_log()

        try:
            output_config = utils.parse_ini_file(CONF_FILE)
        except Exception:  # pylint: disable=broad-except
            # all error will be ignore, the default value is {}
            output_config = {}

        self._init_output_config(output_config)
        self._set_output_config(output_config, args.output_file)
        LOG.debug('Output configuration is: %s', output_config)

        self._set_dispatchers(output_config)

        # update dispatcher list
        if 'file' in output_config['DEFAULT']['dispatcher']:
            result = {'status': 0, 'result': {}}
            utils.write_json_to_file(args.output_file, result)

        total_start_time = time.time()
        parser = TaskParser(args.inputfile[0])

        if args.suite:
            # 1.parse suite, return suite_params info
            task_files, task_args, task_args_fnames = parser.parse_suite()
        else:
            task_files = [parser.path]
            task_args = [args.task_args]
            task_args_fnames = [args.task_args_file]

        LOG.debug("task_files:%s, task_args:%s, task_args_fnames:%s",
                  task_files, task_args, task_args_fnames)

        if args.parse_only:
            sys.exit(0)

        testcases = {}
        tasks = self._parse_tasks(parser, task_files, args, task_args,
                                  task_args_fnames)

        # Execute task files.
        for i, _ in enumerate(task_files):
            one_task_start_time = time.time()
            self.contexts.extend(tasks[i]['contexts'])
            if not tasks[i]['meet_precondition']:
                LOG.info('"meet_precondition" is %s, please check environment',
                         tasks[i]['meet_precondition'])
                continue

            try:
                success, data = self._run(tasks[i]['scenarios'],
                                          tasks[i]['run_in_parallel'],
                                          output_config)
            except KeyboardInterrupt:
                raise
            except Exception:  # pylint: disable=broad-except
                LOG.error('Testcase: "%s" FAILED!!!',
                          tasks[i]['case_name'],
                          exc_info=True)
                testcases[tasks[i]['case_name']] = {
                    'criteria': 'FAIL',
                    'tc_data': []
                }
            else:
                if success:
                    LOG.info('Testcase: "%s" SUCCESS!!!',
                             tasks[i]['case_name'])
                    testcases[tasks[i]['case_name']] = {
                        'criteria': 'PASS',
                        'tc_data': data
                    }
                else:
                    LOG.error('Testcase: "%s" FAILED!!!',
                              tasks[i]['case_name'],
                              exc_info=True)
                    testcases[tasks[i]['case_name']] = {
                        'criteria': 'FAIL',
                        'tc_data': data
                    }

            if args.keep_deploy:
                # keep deployment, forget about stack
                # (hide it for exit handler)
                self.contexts = []
            else:
                for context in self.contexts[::-1]:
                    context.undeploy()
                self.contexts = []
            one_task_end_time = time.time()
            LOG.info("Task %s finished in %d secs", task_files[i],
                     one_task_end_time - one_task_start_time)

        result = self._get_format_result(testcases)

        self._do_output(output_config, result)
        self._generate_reporting(result)

        total_end_time = time.time()
        LOG.info("Total finished in %d secs",
                 total_end_time - total_start_time)

        LOG.info(
            'To generate report, execute command "yardstick report '
            'generate %s <YAML_NAME>"', self.task_id)
        LOG.info("Task ALL DONE, exiting")
        return result
예제 #12
0
    def start(self, args, **kwargs):
        """Start a benchmark scenario."""

        atexit.register(self.atexit_handler)

        self.task_id = kwargs.get('task_id', str(uuid.uuid4()))

        check_environment()

        try:
            self.config['yardstick'] = utils.parse_ini_file(config_file)
        except Exception:
            # all error will be ignore, the default value is {}
            self.config['yardstick'] = {}

        total_start_time = time.time()
        parser = TaskParser(args.inputfile[0])

        if args.suite:
            # 1.parse suite, return suite_params info
            task_files, task_args, task_args_fnames = \
                parser.parse_suite()
        else:
            task_files = [parser.path]
            task_args = [args.task_args]
            task_args_fnames = [args.task_args_file]

        LOG.info("\ntask_files:%s, \ntask_args:%s, \ntask_args_fnames:%s",
                 task_files, task_args, task_args_fnames)

        if args.parse_only:
            sys.exit(0)

        # parse task_files
        for i in range(0, len(task_files)):
            one_task_start_time = time.time()
            parser.path = task_files[i]
            scenarios, run_in_parallel, meet_precondition, contexts = \
                parser.parse_task(self.task_id, task_args[i],
                                  task_args_fnames[i])

            self.contexts.extend(contexts)

            if not meet_precondition:
                LOG.info("meet_precondition is %s, please check envrionment",
                         meet_precondition)
                continue

            self._run(scenarios, run_in_parallel, args.output_file)

            if args.keep_deploy:
                # keep deployment, forget about stack
                # (hide it for exit handler)
                self.contexts = []
            else:
                for context in self.contexts[::-1]:
                    context.undeploy()
                self.contexts = []
            one_task_end_time = time.time()
            LOG.info("task %s finished in %d secs", task_files[i],
                     one_task_end_time - one_task_start_time)

        total_end_time = time.time()
        LOG.info("total finished in %d secs",
                 total_end_time - total_start_time)

        scenario = scenarios[0]
        print("To generate report execute => yardstick report generate ",
              scenario['task_id'], scenario['tc'])

        print("Done, exiting")