Example #1
0
def main(configs):
    """
    starting point
    """

    options = Options(
    )  # command line arguments which are usually passed to `bzt`

    # hard defaults below
    options.log = None
    options.option = None
    options.quiet = None
    options.verbose = None
    options.no_system_configs = None
    options.aliases = []

    # configs = [] # list which contains bzt config files

    executor = CLI(options)

    try:
        code = executor.perform(configs)
    except BaseException as exc_top:
        logging.error("%s: %s", type(exc_top).__name__, exc_top)
        logging.debug("Exception: %s", traceback.format_exc())
        code = 1

    exit(code)
Example #2
0
def setup_test_logging():
    """ set up test logging for convenience in IDE """
    if not ROOT_LOGGER.handlers:
        CLI.log = ''  # means no log file will be created
        CLI.verbose = True
        CLI.setup_logging(CLI)
    else:
        ROOT_LOGGER.debug("Already set up logging")
Example #3
0
def setup_test_logging():
    """ set up test logging for convenience in IDE """
    if not ROOT_LOGGER.handlers:
        CLI.log = ''  # means no log file will be created
        CLI.verbose = True
        CLI.setup_logging(CLI)
    else:
        ROOT_LOGGER.debug("Already set up logging")
Example #4
0
 def setUp(self):
     super(TestCLI, self).setUp()
     self.log = os.path.dirname(__file__) + "/../build/bzt.log"
     self.verbose = True
     self.option = []
     self.datadir = os.path.dirname(__file__) + "/../build/acli"
     self.obj = CLI(self)
     self.aliases = []
     self.obj.engine = EngineEmul()
Example #5
0
def setup_test_logging():
    """ set up test logging for convenience in IDE """
    root = logging.getLogger('')
    if not root.handlers:
        CLI.log = None
        CLI.verbose = True
        CLI.setup_logging(CLI)
    else:
        root.debug("Already set up logging")
Example #6
0
def setup_test_logging():
    """ set up test logging for convenience in IDE """
    root = logging.getLogger('')
    if not root.handlers:
        CLI.log = None
        CLI.verbose = True
        CLI.setup_logging(CLI)
    else:
        root.debug("Already set up logging")
Example #7
0
 def setUp(self):
     super(TestCLI, self).setUp()
     self.log = os.path.join(os.path.dirname(__file__), "..", "build", "bzt.log")
     self.verbose = True
     self.no_system_configs = True
     self.option = []
     self.datadir = os.path.join(os.path.dirname(__file__), "..", "build", "acli")
     self.obj = CLI(self)
     self.aliases = []
     self.obj.engine = EngineEmul()
Example #8
0
    def setUp(self):
        super(TestCLI, self).setUp()
        self.logger = self.log
        self.log = os.path.join(BUILD_DIR, "bzt.log")
        self.verbose = False
        self.quiet = False
        self.no_system_configs = True
        self.option = []
        self.obj = CLI(self)
        self.assertTrue(os.path.exists(self.log))

        self.aliases = []
        self.obj.engine = EngineEmul()
Example #9
0
 def setUp(self):
     super(TestCLI, self).setUp()
     self.log = os.path.dirname(__file__) + "/../build/bzt.log"
     self.verbose = True
     self.option = []
     self.datadir = os.path.dirname(__file__) + "/../build/acli"
     self.obj = CLI(self)
     self.aliases = []
     self.obj.engine = EngineEmul()
Example #10
0
 def setUp(self):
     super(TestCLI, self).setUp()
     self.log = os.path.join(os.path.dirname(__file__), "..", "build", "bzt.log")
     self.verbose = True
     self.no_system_configs = True
     self.option = []
     self.datadir = os.path.join(os.path.dirname(__file__), "..", "build", "acli")
     self.obj = CLI(self)
     self.aliases = []
     self.obj.engine = EngineEmul()
Example #11
0
class TestCLI(BZTestCase):
    def setUp(self):
        super(TestCLI, self).setUp()
        self.logger = self.log
        self.log = os.path.join(os.path.dirname(__file__), "..", "build", "bzt.log")
        self.verbose = False
        self.quiet = False
        self.no_system_configs = True
        self.option = []
        self.datadir = os.path.join(os.path.dirname(__file__), "..", "build", "acli")
        self.obj = CLI(self)
        self.assertTrue(os.path.exists(self.log))

        self.aliases = []
        self.obj.engine = EngineEmul()

    def get_ret_code(self, configs):
        self.obj.engine.config.get("settings", force_set=True).get("default-executor", "mock", force_set=True)
        return self.obj.perform(configs)

    def tearDown(self):
        self.obj.close_log()
        self.log = self.logger
        super(BZTestCase, self).tearDown()

    def test_perform_normal(self):
        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(0, ret)

    def test_call_proc_error(self):
        ret = self.get_ret_code([RESOURCES_DIR + "yaml/wrong_cmd.yml"])
        self.assertEquals(1, ret)

        #from shellexec
        good_err = "DEBUG EngineEmul] Command 'wrong_cmd' returned non-zero exit status 1"

        # from CalledProcessError constructor in reraise()
        bad_err = "__init__() missing 1 required positional argument: 'cmd'"

        log_file = os.path.join(self.obj.engine.artifacts_dir, "bzt.log")
        log_content = codecs.open(log_file, encoding="utf-8").read()
        self.assertIn(good_err, log_content)
        self.assertNotIn(bad_err, log_content)

    def test_unicode_logging(self):
        """ check whether unicode symbols are logged correctly into file """
        self.verbose = False
        u_symbol = b'\xe3\x81\xbc'.decode(encoding='utf-8')  # U+307C, uniform for py2/3
        self.obj.options.option = ['bo=%s' % u_symbol]

        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEqual(0, ret)
        log_file = os.path.join(self.obj.engine.artifacts_dir, "bzt.log")
        log_content = codecs.open(log_file, encoding="utf-8").read()
        self.assertIn(u_symbol, log_content)

    def test_perform_aliases(self):
        self.aliases = ['test4']
        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(0, ret)
        self.assertEqual("mock4", self.obj.engine.config.get("services")[0]["module"])

    def test_perform_prepare_exc(self):
        self.obj.engine.prepare_exc = TaurusException()
        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_overrides(self):
        self.option.append("test.subkey5.-1=value")
        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=build/test/%Y-%m-%d_%H-%M-%S.%f")
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        self.option.append("execution.-1.option=value")
        ret = self.get_ret_code([])
        self.assertEquals(0, ret)

    def test_perform_overrides_fail(self):
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_prepare_err(self):
        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_prepare_err.json"])
        self.assertEquals(1, ret)

        prov = self.obj.engine.provisioning

        self.assertTrue(prov.was_prepare)
        self.assertFalse(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertFalse(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_start_err(self):
        conf = RESOURCES_DIR + "json/mock_start_err.json"
        self.assertEquals(1, self.get_ret_code([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_wait_err(self):
        conf = RESOURCES_DIR + "json/mock_wait_err.json"
        self.assertEquals(1, self.get_ret_code([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_end_err(self):
        conf = RESOURCES_DIR + "json/mock_end_err.json"
        self.assertEquals(1, self.get_ret_code([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_postproc_err(self):
        conf = RESOURCES_DIR + "json/mock_postproc_err.json"
        self.assertEquals(3, self.get_ret_code([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_jmx_shorthand(self):
        json_config = RESOURCES_DIR + "json/mock_normal.json"
        jmx1 = RESOURCES_DIR + "jmeter/jmx/dummy.jmx"
        jmx2 = RESOURCES_DIR + "jmeter/jmx/http.jmx"

        ret = self.get_ret_code([json_config, jmx1, jmx2])

        executions = self.obj.engine.config.get('execution', [])
        scenarios = [execution.get('scenario', {}) for execution in executions]
        scripts = set([scenario.get('script', None) for scenario in scenarios])

        self.assertEquals(0, ret)
        self.assertIn(jmx1, scripts)
        self.assertIn(jmx2, scripts)

    def test_override_artifacts_dir(self):
        # because EngineEmul sets up its own artifacts_dir
        self.obj.engine.artifacts_dir = None
        artifacts_dir = "build/tmp-test-artifacts"

        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=%s" % artifacts_dir)
        try:
            ret = self.get_ret_code([])
            self.assertEquals(0, ret)
            self.assertTrue(os.path.exists(artifacts_dir))
        finally:
            # cleanup artifacts dir
            if os.path.exists(artifacts_dir):
                shutil.rmtree(artifacts_dir)

    def test_logging_verbosity_adjustment(self):
        self.verbose = False
        ret = self.get_ret_code([
            RESOURCES_DIR + "json/mock_normal.json",
            ])
        self.assertEquals(0, ret)
        log_lines = open(os.path.join(self.obj.engine.artifacts_dir, "bzt.log")).readlines()
        checking = False
        found_line = False
        for line in log_lines:
            if "Leveling down" in line:
                found_line = True
                checking = True
            elif "Leveled up" in line:
                checking = False
            else:
                if checking:
                    self.assertNotIn("DEBUG", line)
        self.assertTrue(found_line)

    def test_cover_option_parser(self):
        parser = get_option_parser()
        parser.print_usage()

    def test_http_shorthand(self):
        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.default-executor=mock")
        code = self.get_ret_code(["http://blazedemo.com/"])
        self.assertEqual(code, 0)
        log_content = open(os.path.join(self.obj.engine.artifacts_dir, "bzt.log")).read()
        configs = re.findall(r'[^\s\']*http_.*\.yml', log_content)
        self.assertGreater(len(configs), 0)

    def test_normal(self):
        self.option.append("cli.linter.lint-and-exit=true")
        self.obj.engine.config.merge({"execution": [{"concurrency": 10, "scenario": {"script": "foo.jmx"}}]})
        ret = self.get_ret_code([])
        self.assertEquals(0, ret)

    def test_normal_error(self):
        self.option.append("cli.linter.lint-and-exit=true")
        self.obj.engine.config.merge({"execution": {"concurrency": 10, "scenarion": {"script": "foo.jmx"}}})
        ret = self.get_ret_code([])
        self.assertEquals(1, ret)

    def test_ignore(self):
        self.option.append("cli.linter.lint-and-exit=true")
        self.option.append("cli.linter.ignored-warnings.0=single-execution")
        self.obj.engine.config.merge({"execution": {"concurrency": 10, "scenario": {"script": "foo.jmx"}}})
        ret = self.get_ret_code([])
        self.assertEquals(0, ret)
Example #12
0
class TestCLI(BZTestCase):
    def setUp(self):
        super(TestCLI, self).setUp()
        self.logger = self.log
        self.log = os.path.join(os.path.dirname(__file__), "..", "build",
                                "bzt.log")
        self.verbose = False
        self.quiet = False
        self.no_system_configs = True
        self.option = []
        self.datadir = os.path.join(os.path.dirname(__file__), "..", "build",
                                    "acli")
        self.obj = CLI(self)
        self.assertTrue(os.path.exists(self.log))

        self.aliases = []
        self.obj.engine = EngineEmul()

    def get_ret_code(self, configs):
        self.obj.engine.config.get("settings",
                                   force_set=True).get("default-executor",
                                                       "mock",
                                                       force_set=True)
        return self.obj.perform(configs)

    def tearDown(self):
        self.obj.close_log()
        self.log = self.logger
        super(BZTestCase, self).tearDown()

    def test_perform_normal(self):
        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(0, ret)

    def test_call_proc_error(self):
        ret = self.get_ret_code([RESOURCES_DIR + "yaml/wrong_cmd.yml"])
        self.assertEquals(1, ret)

        #from shellexec
        good_err = "DEBUG EngineEmul] Command 'wrong_cmd' returned non-zero exit status 1"

        # from CalledProcessError constructor in reraise()
        bad_err = "__init__() missing 1 required positional argument: 'cmd'"

        log_file = os.path.join(self.obj.engine.artifacts_dir, "bzt.log")
        log_content = codecs.open(log_file, encoding="utf-8").read()
        self.assertIn(good_err, log_content)
        self.assertNotIn(bad_err, log_content)

    def test_unicode_logging(self):
        """ check whether unicode symbols are logged correctly into file """
        self.verbose = False
        u_symbol = b'\xe3\x81\xbc'.decode(
            encoding='utf-8')  # U+307C, uniform for py2/3
        self.obj.options.option = ['bo=%s' % u_symbol]

        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEqual(0, ret)
        log_file = os.path.join(self.obj.engine.artifacts_dir, "bzt.log")
        log_content = codecs.open(log_file, encoding="utf-8").read()
        self.assertIn(u_symbol, log_content)

    def test_perform_aliases(self):
        self.aliases = ['test4']
        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(0, ret)
        self.assertEqual("mock4",
                         self.obj.engine.config.get("services")[0]["module"])

    def test_perform_prepare_exc(self):
        self.obj.engine.prepare_exc = TaurusException()
        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_overrides(self):
        self.option.append("test.subkey5.-1=value")
        self.option.append("modules.mock=" + ModuleMock.__module__ + "." +
                           ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append(
            "settings.artifacts-dir=build/test/%Y-%m-%d_%H-%M-%S.%f")
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        self.option.append("execution.-1.option=value")
        ret = self.get_ret_code([])
        self.assertEquals(0, ret)

    def test_perform_overrides_fail(self):
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_prepare_err(self):
        ret = self.get_ret_code([RESOURCES_DIR + "json/mock_prepare_err.json"])
        self.assertEquals(1, ret)

        prov = self.obj.engine.provisioning

        self.assertTrue(prov.was_prepare)
        self.assertFalse(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertFalse(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_start_err(self):
        conf = RESOURCES_DIR + "json/mock_start_err.json"
        self.assertEquals(1, self.get_ret_code([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_wait_err(self):
        conf = RESOURCES_DIR + "json/mock_wait_err.json"
        self.assertEquals(1, self.get_ret_code([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_end_err(self):
        conf = RESOURCES_DIR + "json/mock_end_err.json"
        self.assertEquals(1, self.get_ret_code([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_postproc_err(self):
        conf = RESOURCES_DIR + "json/mock_postproc_err.json"
        self.assertEquals(3, self.get_ret_code([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_jmx_shorthand(self):
        json_config = RESOURCES_DIR + "json/mock_normal.json"
        jmx1 = RESOURCES_DIR + "jmeter/jmx/dummy.jmx"
        jmx2 = RESOURCES_DIR + "jmeter/jmx/http.jmx"

        ret = self.get_ret_code([json_config, jmx1, jmx2])

        executions = self.obj.engine.config.get('execution', [])
        scenarios = [execution.get('scenario', {}) for execution in executions]
        scripts = set([scenario.get('script', None) for scenario in scenarios])

        self.assertEquals(0, ret)
        self.assertIn(jmx1, scripts)
        self.assertIn(jmx2, scripts)

    def test_override_artifacts_dir(self):
        # because EngineEmul sets up its own artifacts_dir
        self.obj.engine.artifacts_dir = None
        artifacts_dir = "build/tmp-test-artifacts"

        self.option.append("modules.mock=" + ModuleMock.__module__ + "." +
                           ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=%s" % artifacts_dir)
        try:
            ret = self.get_ret_code([])
            self.assertEquals(0, ret)
            self.assertTrue(os.path.exists(artifacts_dir))
        finally:
            # cleanup artifacts dir
            if os.path.exists(artifacts_dir):
                shutil.rmtree(artifacts_dir)

    def test_logging_verbosity_adjustment(self):
        self.verbose = False
        ret = self.get_ret_code([
            RESOURCES_DIR + "json/mock_normal.json",
        ])
        self.assertEquals(0, ret)
        log_lines = open(os.path.join(self.obj.engine.artifacts_dir,
                                      "bzt.log")).readlines()
        checking = False
        found_line = False
        for line in log_lines:
            if "Leveling down" in line:
                found_line = True
                checking = True
            elif "Leveled up" in line:
                checking = False
            else:
                if checking:
                    self.assertNotIn("DEBUG", line)
        self.assertTrue(found_line)

    def test_cover_option_parser(self):
        parser = get_option_parser()
        parser.print_usage()

    def test_http_shorthand(self):
        self.option.append("modules.mock=" + ModuleMock.__module__ + "." +
                           ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.default-executor=mock")
        code = self.get_ret_code(["http://blazedemo.com/"])
        self.assertEqual(code, 0)
        log_content = open(
            os.path.join(self.obj.engine.artifacts_dir, "bzt.log")).read()
        configs = re.findall(r'[^\s\']*http_.*\.yml', log_content)
        self.assertGreater(len(configs), 0)

    def test_normal(self):
        self.option.append("cli.linter.lint-and-exit=true")
        self.obj.engine.config.merge({
            "execution": [{
                "concurrency": 10,
                "scenario": {
                    "script": "foo.jmx"
                }
            }]
        })
        ret = self.get_ret_code([])
        self.assertEquals(0, ret)

    def test_normal_error(self):
        self.option.append("cli.linter.lint-and-exit=true")
        self.obj.engine.config.merge({
            "execution": {
                "concurrency": 10,
                "scenarion": {
                    "script": "foo.jmx"
                }
            }
        })
        ret = self.get_ret_code([])
        self.assertEquals(1, ret)

    def test_ignore(self):
        self.option.append("cli.linter.lint-and-exit=true")
        self.option.append("cli.linter.ignored-warnings.0=single-execution")
        self.obj.engine.config.merge({
            "execution": {
                "concurrency": 10,
                "scenario": {
                    "script": "foo.jmx"
                }
            }
        })
        ret = self.get_ret_code([])
        self.assertEquals(0, ret)
Example #13
0
class TestCLI(BZTestCase):
    def setUp(self):
        super(TestCLI, self).setUp()
        self.log = os.path.join(os.path.dirname(__file__), "..", "build", "bzt.log")
        self.verbose = True
        self.no_system_configs = True
        self.option = []
        self.datadir = os.path.join(os.path.dirname(__file__), "..", "build", "acli")
        self.obj = CLI(self)
        self.aliases = []
        self.obj.engine = EngineEmul()

    def test_perform_normal(self):
        ret = self.obj.perform([__dir__() + "/json/mock_normal.json"])
        self.assertEquals(0, ret)

    def test_perform_overrides(self):
        self.option.append("test.subkey5.-1=value")
        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=build/test/%Y-%m-%d_%H-%M-%S.%f")
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        self.option.append("execution.-1.option=value")
        ret = self.obj.perform([])
        self.assertEquals(0, ret)

    def test_perform_overrides_fail(self):
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        ret = self.obj.perform([__dir__() + "/json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_prepare_err(self):
        ret = self.obj.perform([__dir__() + "/json/mock_prepare_err.json"])
        self.assertEquals(1, ret)

        prov = self.obj.engine.provisioning

        self.assertTrue(prov.was_prepare)
        self.assertFalse(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertFalse(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_start_err(self):
        conf = __dir__() + "/json/mock_start_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_wait_err(self):
        conf = __dir__() + "/json/mock_wait_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_end_err(self):
        conf = __dir__() + "/json/mock_end_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_postproc_err(self):
        conf = __dir__() + "/json/mock_postproc_err.json"
        self.assertEquals(3, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_jmx_shorthand(self):
        ret = self.obj.perform([
            __dir__() + "/json/mock_normal.json",
            __dir__() + "/jmx/dummy.jmx",
            __dir__() + "/jmx/dummy.jmx",
        ])
        self.assertEquals(0, ret)

    def test_override_artifacts_dir(self):
        # because EngineEmul sets up its own artifacts_dir
        self.obj.engine.artifacts_dir = None
        artifacts_dir = "/tmp/taurus-test-artifacts"

        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=%s" % artifacts_dir)
        try:
            ret = self.obj.perform([])
            self.assertEquals(0, ret)
            self.assertTrue(os.path.exists(artifacts_dir))
        finally:
            # cleanup artifacts dir
            if os.path.exists(artifacts_dir):
                shutil.rmtree(artifacts_dir)
Example #14
0
 def setup_logging(self):
     CLI.setup_logging(self.options)
     if self.options.quiet:
         logging.disable(logging.WARNING)
Example #15
0
class TestCLI(BZTestCase):
    def setUp(self):
        super(TestCLI, self).setUp()
        self.log = os.path.dirname(__file__) + "/../build/bzt.log"
        self.verbose = True
        self.option = []
        self.datadir = os.path.dirname(__file__) + "/../build/acli"
        self.obj = CLI(self)
        self.aliases = []
        self.obj.engine = EngineEmul()

    def test_perform_normal(self):
        ret = self.obj.perform([__dir__() + "/json/mock_normal.json"])
        self.assertEquals(0, ret)

    def test_perform_overrides(self):
        self.option.append("modules.mock=" + ModuleMock.__module__ + "." +
                           ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        ret = self.obj.perform([])
        self.assertEquals(0, ret)

    def test_perform_overrides_fail(self):
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        ret = self.obj.perform([__dir__() + "/json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_prepare_err(self):
        ret = self.obj.perform([__dir__() + "/json/mock_prepare_err.json"])
        self.assertEquals(1, ret)

        prov = self.obj.engine.provisioning

        self.assertTrue(prov.was_prepare)
        self.assertFalse(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertFalse(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_start_err(self):
        conf = __dir__() + "/json/mock_start_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_wait_err(self):
        conf = __dir__() + "/json/mock_wait_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_end_err(self):
        conf = __dir__() + "/json/mock_end_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_postproc_err(self):
        conf = __dir__() + "/json/mock_postproc_err.json"
        self.assertEquals(3, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_jmx_shorthand(self):
        ret = self.obj.perform([
            __dir__() + "/json/mock_normal.json",
            __dir__() + "/jmx/dummy.jmx"
        ])
        self.assertEquals(0, ret)
Example #16
0
class TestCLI(BZTestCase):
    def setUp(self):
        super(TestCLI, self).setUp()
        self.log = os.path.join(os.path.dirname(__file__), "..", "build", "bzt.log")
        self.verbose = False
        self.quiet = False
        self.no_system_configs = True
        self.option = []
        self.datadir = os.path.join(os.path.dirname(__file__), "..", "build", "acli")
        self.obj = CLI(self)
        self.aliases = []
        self.obj.engine = EngineEmul()

    def test_perform_normal(self):
        ret = self.obj.perform([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(0, ret)

    def test_perform_aliases(self):
        self.aliases = ['test']
        ret = self.obj.perform([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(0, ret)
        self.assertTrue(self.obj.engine.config['marker'])

    def test_perform_prepare_exc(self):
        self.obj.engine.prepare_exc = TaurusException()
        ret = self.obj.perform([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_overrides(self):
        self.option.append("test.subkey5.-1=value")
        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=build/test/%Y-%m-%d_%H-%M-%S.%f")
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        self.option.append("execution.-1.option=value")
        ret = self.obj.perform([])
        self.assertEquals(0, ret)

    def test_perform_overrides_fail(self):
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        ret = self.obj.perform([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_prepare_err(self):
        ret = self.obj.perform([RESOURCES_DIR + "json/mock_prepare_err.json"])
        self.assertEquals(1, ret)

        prov = self.obj.engine.provisioning

        self.assertTrue(prov.was_prepare)
        self.assertFalse(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertFalse(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_start_err(self):
        conf = RESOURCES_DIR + "json/mock_start_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_wait_err(self):
        conf = RESOURCES_DIR + "json/mock_wait_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_end_err(self):
        conf = RESOURCES_DIR + "json/mock_end_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_postproc_err(self):
        conf = RESOURCES_DIR + "json/mock_postproc_err.json"
        self.assertEquals(3, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_jmx_shorthand(self):
        json_config = RESOURCES_DIR + "json/mock_normal.json"
        jmx1 = RESOURCES_DIR + "jmeter/jmx/dummy.jmx"
        jmx2 = RESOURCES_DIR + "jmeter/jmx/http.jmx"

        ret = self.obj.perform([json_config, jmx1, jmx2])

        executions = self.obj.engine.config.get('execution', [])
        scenarios = [execution.get('scenario', {}) for execution in executions]
        scripts = set([scenario.get('script', None) for scenario in scenarios])

        self.assertEquals(0, ret)
        self.assertIn(jmx1, scripts)
        self.assertIn(jmx2, scripts)

    def test_override_artifacts_dir(self):
        # because EngineEmul sets up its own artifacts_dir
        self.obj.engine.artifacts_dir = None
        artifacts_dir = "build/tmp-test-artifacts"

        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=%s" % artifacts_dir)
        try:
            ret = self.obj.perform([])
            self.assertEquals(0, ret)
            self.assertTrue(os.path.exists(artifacts_dir))
        finally:
            # cleanup artifacts dir
            if os.path.exists(artifacts_dir):
                shutil.rmtree(artifacts_dir)

    def test_logging_verbosity_adjustment(self):
        self.verbose = False
        ret = self.obj.perform([
            RESOURCES_DIR + "json/mock_normal.json",
        ])
        self.assertEquals(0, ret)
        log_lines = open(os.path.join(self.obj.engine.artifacts_dir, "bzt.log")).readlines()
        checking = False
        found_line = False
        for line in log_lines:
            if "Leveling down" in line:
                found_line = True
                checking = True
            elif "Leveled up" in line:
                checking = False
            else:
                if checking:
                    self.assertNotIn("DEBUG", line)
        self.assertTrue(found_line)

    def test_cover_option_parser(self):
        parser = get_option_parser()
        parser.print_usage()
Example #17
0
 def setup_logging(self):
     CLI.setup_logging(self.options)
     if self.options.quiet:
         logging.disable(logging.WARNING)
Example #18
0
class TestCLI(BZTestCase):
    def setUp(self):
        super(TestCLI, self).setUp()
        self.log = os.path.dirname(__file__) + "/../build/bzt.log"
        self.verbose = True
        self.option = []
        self.datadir = os.path.dirname(__file__) + "/../build/acli"
        self.obj = CLI(self)
        self.aliases = []
        self.obj.engine = EngineEmul()

    def test_perform_normal(self):
        ret = self.obj.perform([__dir__() + "/json/mock_normal.json"])
        self.assertEquals(0, ret)

    def test_perform_overrides(self):
        self.option.append("test.subkey5.-1=value")
        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=build/test/%Y-%m-%d_%H-%M-%S.%f")
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        self.option.append("execution.-1.option=value")
        ret = self.obj.perform([])
        self.assertEquals(0, ret)

    def test_perform_overrides_fail(self):
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        ret = self.obj.perform([__dir__() + "/json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_prepare_err(self):
        ret = self.obj.perform([__dir__() + "/json/mock_prepare_err.json"])
        self.assertEquals(1, ret)

        prov = self.obj.engine.provisioning

        self.assertTrue(prov.was_prepare)
        self.assertFalse(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertFalse(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_start_err(self):
        conf = __dir__() + "/json/mock_start_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_wait_err(self):
        conf = __dir__() + "/json/mock_wait_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_end_err(self):
        conf = __dir__() + "/json/mock_end_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_postproc_err(self):
        conf = __dir__() + "/json/mock_postproc_err.json"
        self.assertEquals(3, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_jmx_shorthand(self):
        ret = self.obj.perform([
            __dir__() + "/json/mock_normal.json",
            __dir__() + "/jmx/dummy.jmx",
            __dir__() + "/jmx/dummy.jmx",
        ])
        self.assertEquals(0, ret)
Example #19
0
class TestCLI(BZTestCase):
    def setUp(self):
        super(TestCLI, self).setUp()
        self.log = os.path.join(os.path.dirname(__file__), "..", "build", "bzt.log")
        self.verbose = True
        self.no_system_configs = True
        self.option = []
        self.datadir = os.path.join(os.path.dirname(__file__), "..", "build", "acli")
        self.obj = CLI(self)
        self.aliases = []
        self.obj.engine = EngineEmul()

    def test_perform_normal(self):
        ret = self.obj.perform([__dir__() + "/json/mock_normal.json"])
        self.assertEquals(0, ret)

    def test_perform_overrides(self):
        self.option.append("test.subkey5.-1=value")
        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=build/test/%Y-%m-%d_%H-%M-%S.%f")
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        self.option.append("execution.-1.option=value")
        ret = self.obj.perform([])
        self.assertEquals(0, ret)

    def test_perform_overrides_fail(self):
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        ret = self.obj.perform([__dir__() + "/json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_prepare_err(self):
        ret = self.obj.perform([__dir__() + "/json/mock_prepare_err.json"])
        self.assertEquals(1, ret)

        prov = self.obj.engine.provisioning

        self.assertTrue(prov.was_prepare)
        self.assertFalse(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertFalse(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_start_err(self):
        conf = __dir__() + "/json/mock_start_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_wait_err(self):
        conf = __dir__() + "/json/mock_wait_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_end_err(self):
        conf = __dir__() + "/json/mock_end_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_postproc_err(self):
        conf = __dir__() + "/json/mock_postproc_err.json"
        self.assertEquals(3, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_jmx_shorthand(self):
        ret = self.obj.perform([
            __dir__() + "/json/mock_normal.json",
            __dir__() + "/jmx/dummy.jmx",
            __dir__() + "/jmx/dummy.jmx",
        ])
        self.assertEquals(0, ret)

    def test_override_artifacts_dir(self):
        # because EngineEmul sets up its own artifacts_dir
        self.obj.engine.artifacts_dir = None
        artifacts_dir = "build/tmp-test-artifacts"

        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=%s" % artifacts_dir)
        try:
            ret = self.obj.perform([])
            self.assertEquals(0, ret)
            self.assertTrue(os.path.exists(artifacts_dir))
        finally:
            # cleanup artifacts dir
            if os.path.exists(artifacts_dir):
                shutil.rmtree(artifacts_dir)

    def test_logging_verbosity_adjustment(self):
        was_verbose = self.verbose
        try:
            self.verbose = False
            ret = self.obj.perform([
                __dir__() + "/json/mock_normal.json",
            ])
            self.assertEquals(0, ret)
            log_lines = open(os.path.join(self.obj.engine.artifacts_dir, "bzt.log")).readlines()
            checking = False
            found_line = False
            for line in log_lines:
                if "Leveling down" in line:
                    found_line = True
                    checking = True
                elif "Leveled up" in line:
                    checking = False
                else:
                    if checking:
                        self.assertNotIn("DEBUG", line)
            self.assertTrue(found_line)
        finally:
            self.verbose = was_verbose
Example #20
0
class TestCLI(BZTestCase):
    def setUp(self):
        super(TestCLI, self).setUp()
        self.log = os.path.join(os.path.dirname(__file__), "..", "build", "bzt.log")
        self.verbose = False
        self.quiet = False
        self.no_system_configs = True
        self.option = []
        self.datadir = os.path.join(os.path.dirname(__file__), "..", "build", "acli")
        self.obj = CLI(self)
        self.aliases = []
        self.obj.engine = EngineEmul()

    def test_perform_normal(self):
        ret = self.obj.perform([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(0, ret)

    def test_perform_aliases(self):
        self.aliases = ['test']
        ret = self.obj.perform([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(0, ret)
        self.assertTrue(self.obj.engine.config['marker'])

    def test_perform_prepare_exc(self):
        self.obj.engine.prepare_exc = TaurusException()
        ret = self.obj.perform([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_overrides(self):
        self.option.append("test.subkey5.-1=value")
        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=build/test/%Y-%m-%d_%H-%M-%S.%f")
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        self.option.append("execution.-1.option=value")
        ret = self.obj.perform([])
        self.assertEquals(0, ret)

    def test_perform_overrides_fail(self):
        self.option.append("test.subkey2.0.sskey=value")
        self.option.append("test.subkey.0=value")
        ret = self.obj.perform([RESOURCES_DIR + "json/mock_normal.json"])
        self.assertEquals(1, ret)

    def test_perform_prepare_err(self):
        ret = self.obj.perform([RESOURCES_DIR + "json/mock_prepare_err.json"])
        self.assertEquals(1, ret)

        prov = self.obj.engine.provisioning

        self.assertTrue(prov.was_prepare)
        self.assertFalse(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertFalse(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_start_err(self):
        conf = RESOURCES_DIR + "json/mock_start_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertFalse(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_wait_err(self):
        conf = RESOURCES_DIR + "json/mock_wait_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_end_err(self):
        conf = RESOURCES_DIR + "json/mock_end_err.json"
        self.assertEquals(1, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_perform_postproc_err(self):
        conf = RESOURCES_DIR + "json/mock_postproc_err.json"
        self.assertEquals(3, self.obj.perform([conf]))

        prov = self.obj.engine.provisioning
        self.assertTrue(prov.was_prepare)
        self.assertTrue(prov.was_startup)
        self.assertTrue(prov.was_check)
        self.assertTrue(prov.was_shutdown)
        self.assertTrue(prov.was_postproc)

    def test_jmx_shorthand(self):
        json_config = RESOURCES_DIR + "json/mock_normal.json"
        jmx1 = RESOURCES_DIR + "jmeter/jmx/dummy.jmx"
        jmx2 = RESOURCES_DIR + "jmeter/jmx/http.jmx"

        ret = self.obj.perform([json_config, jmx1, jmx2])

        executions = self.obj.engine.config.get('execution', [])
        scenarios = [execution.get('scenario', {}) for execution in executions]
        scripts = set([scenario.get('script', None) for scenario in scenarios])

        self.assertEquals(0, ret)
        self.assertIn(jmx1, scripts)
        self.assertIn(jmx2, scripts)

    def test_override_artifacts_dir(self):
        # because EngineEmul sets up its own artifacts_dir
        self.obj.engine.artifacts_dir = None
        artifacts_dir = "build/tmp-test-artifacts"

        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.artifacts-dir=%s" % artifacts_dir)
        try:
            ret = self.obj.perform([])
            self.assertEquals(0, ret)
            self.assertTrue(os.path.exists(artifacts_dir))
        finally:
            # cleanup artifacts dir
            if os.path.exists(artifacts_dir):
                shutil.rmtree(artifacts_dir)

    def test_logging_verbosity_adjustment(self):
        self.verbose = False
        ret = self.obj.perform([
            RESOURCES_DIR + "json/mock_normal.json",
        ])
        self.assertEquals(0, ret)
        log_lines = open(os.path.join(self.obj.engine.artifacts_dir, "bzt.log")).readlines()
        checking = False
        found_line = False
        for line in log_lines:
            if "Leveling down" in line:
                found_line = True
                checking = True
            elif "Leveled up" in line:
                checking = False
            else:
                if checking:
                    self.assertNotIn("DEBUG", line)
        self.assertTrue(found_line)

    def test_cover_option_parser(self):
        parser = get_option_parser()
        parser.print_usage()

    def test_http_shorthand(self):
        self.option.append("modules.mock=" + ModuleMock.__module__ + "." + ModuleMock.__name__)
        self.option.append("provisioning=mock")
        self.option.append("settings.default-executor=mock")
        code = self.obj.perform(["http://blazedemo.com/"])
        self.assertEqual(code, 0)
        log_content = open(os.path.join(self.obj.engine.artifacts_dir, "bzt.log")).read()
        configs = re.findall(r'[^\s\']*http_.*\.yml', log_content)
        self.assertGreater(len(configs), 0)

    def test_normal(self):
        self.option.append("cli.linter.lint-and-exit=true")
        self.obj.engine.config.merge({"execution": [{"concurrency": 10, "scenario": {"script": "foo.jmx"}}]})
        ret = self.obj.perform([])
        self.assertEquals(0, ret)

    def test_normal_error(self):
        self.option.append("cli.linter.lint-and-exit=true")
        self.obj.engine.config.merge({"execution": {"concurrency": 10, "scenario": {"script": "foo.jmx"}}})
        ret = self.obj.perform([])
        self.assertEquals(1, ret)

    def test_ignore(self):
        self.option.append("cli.linter.lint-and-exit=true")
        self.option.append("cli.linter.ignored-warnings.0=single-execution")
        self.obj.engine.config.merge({"execution": {"concurrency": 10, "scenario": {"script": "foo.jmx"}}})
        ret = self.obj.perform([])
        self.assertEquals(0, ret)