Beispiel #1
0
    def generate_scenario(description_file):
        """
        Generates the test scenario list for a given description.

        :param description: A dictionary with the following entries:
            name (required) name for the api
            http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
            url (required) the url to be appended to the catalog url with '%s'
                for each resource mentioned
            resources: (optional) A list of resource names such as "server",
                "flavor", etc. with an element for each '%s' in the url. This
                method will call self.get_resource for each element when
                constructing the positive test case template so negative
                subclasses are expected to return valid resource ids when
                appropriate.
            json-schema (optional) A valid json schema that will be used to
                create invalid data for the api calls. For "GET" and "HEAD",
                the data is used to generate query strings appended to the url,
                otherwise for the body of the http call.
        """
        description = NegativeAutoTest.load_schema(description_file)
        LOG.debug(description)

        # NOTE(mkoderer): since this will be executed on import level the
        # config doesn't have to be in place (e.g. for the pep8 job).
        # In this case simply return.
        try:
            generator = importutils.import_class(
                CONF.negative.test_generator)()
        except cfg.ConfigFilesNotFoundError:
            LOG.critical(
                "Tempest config not found. Test scenarios aren't created")
            return
        generator.validate_schema(description)
        schema = description.get("json-schema", None)
        resources = description.get("resources", [])
        scenario_list = []
        expected_result = None
        for resource in resources:
            if isinstance(resource, dict):
                expected_result = resource['expected_result']
                resource = resource['name']
            LOG.debug("Add resource to test %s" % resource)
            scn_name = "inv_res_%s" % (resource)
            scenario_list.append((scn_name, {"resource": (resource,
                                                          str(uuid.uuid4())),
                                             "expected_result": expected_result
                                             }))
        if schema is not None:
            for name, schema, expected_result in generator.generate(schema):
                if (expected_result is None and
                    "default_result_code" in description):
                    expected_result = description["default_result_code"]
                scenario_list.append((name,
                                      {"schema": schema,
                                       "expected_result": expected_result}))
        LOG.debug(scenario_list)
        return scenario_list
Beispiel #2
0
    def setUp(self, **kwargs):
        method = kwargs['test_method'].split('.')
        self.test_method = method.pop()
        self.klass = importutils.import_class('.'.join(method))
        self.logger = logging.getLogger('.'.join(method))
        # valid options are 'process', 'application' , 'action'
        self.class_setup_per = kwargs.get('class_setup_per',
                                          SetUpClassRunTime.process)
        SetUpClassRunTime.validate(self.class_setup_per)

        if self.class_setup_per == SetUpClassRunTime.application:
            self.klass.setUpClass()
        self.setupclass_called = False
Beispiel #3
0
    def setUp(self, **kwargs):
        method = kwargs['test_method'].split('.')
        self.test_method = method.pop()
        self.klass = importutils.import_class('.'.join(method))
        self.logger = logging.getLogger('.'.join(method))
        # valid options are 'process', 'application' , 'action'
        self.class_setup_per = kwargs.get('class_setup_per',
                                          SetUpClassRunTime.process)
        SetUpClassRunTime.validate(self.class_setup_per)

        if self.class_setup_per == SetUpClassRunTime.application:
            self.klass.setUpClass()
        self.setupclass_called = False
Beispiel #4
0
    def generate_scenario(description_file):
        """
        Generates the test scenario list for a given description.

        :param description: A dictionary with the following entries:
            name (required) name for the api
            http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
            url (required) the url to be appended to the catalog url with '%s'
                for each resource mentioned
            resources: (optional) A list of resource names such as "server",
                "flavor", etc. with an element for each '%s' in the url. This
                method will call self.get_resource for each element when
                constructing the positive test case template so negative
                subclasses are expected to return valid resource ids when
                appropriate.
            json-schema (optional) A valid json schema that will be used to
                create invalid data for the api calls. For "GET" and "HEAD",
                the data is used to generate query strings appended to the url,
                otherwise for the body of the http call.
        """
        description = NegativeAutoTest.load_schema(description_file)
        LOG.debug(description)
        generator = importutils.import_class(
            CONF.negative.test_generator)()
        generator.validate_schema(description)
        schema = description.get("json-schema", None)
        resources = description.get("resources", [])
        scenario_list = []
        expected_result = None
        for resource in resources:
            if isinstance(resource, dict):
                expected_result = resource['expected_result']
                resource = resource['name']
            LOG.debug("Add resource to test %s" % resource)
            scn_name = "inv_res_%s" % (resource)
            scenario_list.append((scn_name, {"resource": (resource,
                                                          str(uuid.uuid4())),
                                             "expected_result": expected_result
                                             }))
        if schema is not None:
            for name, schema, expected_result in generator.generate(schema):
                if (expected_result is None and
                    "default_result_code" in description):
                    expected_result = description["default_result_code"]
                scenario_list.append((name,
                                      {"schema": schema,
                                       "expected_result": expected_result}))
        LOG.debug(scenario_list)
        return scenario_list
Beispiel #5
0
    def generate_scenario(description_file):
        """
        Generates the test scenario list for a given description.

        :param description: A dictionary with the following entries:
            name (required) name for the api
            http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
            url (required) the url to be appended to the catalog url with '%s'
                for each resource mentioned
            resources: (optional) A list of resource names such as "server",
                "flavor", etc. with an element for each '%s' in the url. This
                method will call self.get_resource for each element when
                constructing the positive test case template so negative
                subclasses are expected to return valid resource ids when
                appropriate.
            json-schema (optional) A valid json schema that will be used to
                create invalid data for the api calls. For "GET" and "HEAD",
                the data is used to generate query strings appended to the url,
                otherwise for the body of the http call.
        """
        description = NegativeAutoTest.load_schema(description_file)
        LOG.debug(description)
        generator = importutils.import_class(CONF.negative.test_generator)()
        generator.validate_schema(description)
        schema = description.get("json-schema", None)
        resources = description.get("resources", [])
        scenario_list = []
        expected_result = None
        for resource in resources:
            if isinstance(resource, dict):
                expected_result = resource['expected_result']
                resource = resource['name']
            LOG.debug("Add resource to test %s" % resource)
            scn_name = "inv_res_%s" % (resource)
            scenario_list.append((scn_name, {"resource": (resource,
                                                          str(uuid.uuid4())),
                                             "expected_result": expected_result
                                             }))
        if schema is not None:
            for invalid in generator.generate(schema):
                scenario_list.append((invalid[0],
                                      {"schema": invalid[1],
                                       "expected_result": invalid[2]}))
        LOG.debug(scenario_list)
        return scenario_list
Beispiel #6
0
    def execute(self, description):
        """
        Execute a http call on an api that are expected to
        result in client errors. First it uses invalid resources that are part
        of the url, and then invalid data for queries and http request bodies.

        :param description: A json file or dictionary with the following
        entries:
            name (required) name for the api
            http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
            url (required) the url to be appended to the catalog url with '%s'
                for each resource mentioned
            resources: (optional) A list of resource names such as "server",
                "flavor", etc. with an element for each '%s' in the url. This
                method will call self.get_resource for each element when
                constructing the positive test case template so negative
                subclasses are expected to return valid resource ids when
                appropriate.
            json-schema (optional) A valid json schema that will be used to
                create invalid data for the api calls. For "GET" and "HEAD",
                the data is used to generate query strings appended to the url,
                otherwise for the body of the http call.

        """
        LOG.info("Executing %s" % description["name"])
        LOG.debug(description)
        generator = importutils.import_class(CONF.negative.test_generator)()
        schema = description.get("json-schema", None)
        method = description["http-method"]
        url = description["url"]
        expected_result = None
        if "default_result_code" in description:
            expected_result = description["default_result_code"]

        resources = [
            self.get_resource(r) for r in description.get("resources", [])
        ]

        if hasattr(self, "resource"):
            # Note(mkoderer): The resources list already contains an invalid
            # entry (see get_resource).
            # We just send a valid json-schema with it
            valid_schema = None
            if schema:
                valid_schema = \
                    valid.ValidTestGenerator().generate_valid(schema)
            new_url, body = self._http_arguments(valid_schema, url, method)
        elif hasattr(self, "_negtest_name"):
            schema_under_test = \
                valid.ValidTestGenerator().generate_valid(schema)
            local_expected_result = \
                generator.generate_payload(self, schema_under_test)
            if local_expected_result is not None:
                expected_result = local_expected_result
            new_url, body = \
                self._http_arguments(schema_under_test, url, method)
        else:
            raise Exception("testscenarios are not active. Please make sure "
                            "that your test runner supports the load_tests "
                            "mechanism")

        if "admin_client" in description and description["admin_client"]:
            client = self.admin_client
        else:
            client = self.client
        resp, resp_body = client.send_request(method,
                                              new_url,
                                              resources,
                                              body=body)
        self._check_negative_response(expected_result, resp.status, resp_body)
Beispiel #7
0
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
    """
    Workload driver. Executes an action function against a nova-cluster.
    """
    logfiles = admin_manager.config.stress.target_logfiles
    log_check_interval = int(admin_manager.config.stress.log_check_interval)
    if logfiles:
        controller = admin_manager.config.stress.target_controller
        computes = _get_compute_nodes(controller)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node)
    for test in tests:
        if test.get('use_admin', False):
            manager = admin_manager
        else:
            manager = clients.Manager()
        for p_number in xrange(test.get('threads', 1)):
            if test.get('use_isolated_tenants', False):
                username = rand_name("stress_user")
                tenant_name = rand_name("stress_tenant")
                password = "******"
                identity_client = admin_manager.identity_client
                _, tenant = identity_client.create_tenant(name=tenant_name)
                identity_client.create_user(username,
                                            password,
                                            tenant['id'],
                                            "email")
                manager = clients.Manager(username=username,
                                          password="******",
                                          tenant_name=tenant_name)

            test_obj = importutils.import_class(test['action'])
            test_run = test_obj(manager, logger, max_runs, stop_on_error)

            kwargs = test.get('kwargs', {})
            test_run.setUp(**dict(kwargs.iteritems()))

            logger.debug("calling Target Object %s" %
                         test_run.__class__.__name__)

            mp_manager = multiprocessing.Manager()
            shared_statistic = mp_manager.dict()
            shared_statistic['runs'] = 0
            shared_statistic['fails'] = 0

            p = multiprocessing.Process(target=test_run.execute,
                                        args=(shared_statistic,))

            process = {'process': p,
                       'p_number': p_number,
                       'action': test['action'],
                       'statistic': shared_statistic}

            processes.append(process)
            p.start()
    if stop_on_error:
        # NOTE(mkoderer): only the parent should register the handler
        signal.signal(signal.SIGCHLD, sigchld_handler)
    end_time = time.time() + duration
    had_errors = False
    while True:
        if max_runs is None:
            remaining = end_time - time.time()
            if remaining <= 0:
                break
        else:
            remaining = log_check_interval
            all_proc_term = True
            for process in processes:
                if process['process'].is_alive():
                    all_proc_term = False
                    break
            if all_proc_term:
                break

        time.sleep(min(remaining, log_check_interval))
        if stop_on_error:
            for process in processes:
                if process['statistic']['fails'] > 0:
                    break

        if not logfiles:
            continue
        errors = _error_in_logs(logfiles, computes)
        if errors:
            had_errors = True
            break

    terminate_all_processes()

    sum_fails = 0
    sum_runs = 0

    logger.info("Statistics (per process):")
    for process in processes:
        if process['statistic']['fails'] > 0:
            had_errors = True
        sum_runs += process['statistic']['runs']
        sum_fails += process['statistic']['fails']
        logger.info(" Process %d (%s): Run %d actions (%d failed)" %
                    (process['p_number'],
                     process['action'],
                     process['statistic']['runs'],
                     process['statistic']['fails']))
    logger.info("Summary:")
    logger.info("Run %d actions (%d failed)" %
                (sum_runs, sum_fails))

    if not had_errors:
        logger.info("cleaning up")
        cleanup.cleanup(logger)
    if had_errors:
        return 1
    else:
        return 0
Beispiel #8
0
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
    """
    Workload driver. Executes an action function against a nova-cluster.
    """
    admin_manager = clients.AdminManager()

    ssh_user = CONF.stress.target_ssh_user
    ssh_key = CONF.stress.target_private_key_path
    logfiles = CONF.stress.target_logfiles
    log_check_interval = int(CONF.stress.log_check_interval)
    default_thread_num = int(CONF.stress.default_thread_number_per_action)
    if logfiles:
        controller = CONF.stress.target_controller
        computes = _get_compute_nodes(controller, ssh_user, ssh_key)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
    for test in tests:
        if test.get('use_admin', False):
            manager = admin_manager
        else:
            manager = clients.Manager()
        for p_number in moves.xrange(test.get('threads', default_thread_num)):
            if test.get('use_isolated_tenants', False):
                username = data_utils.rand_name("stress_user")
                tenant_name = data_utils.rand_name("stress_tenant")
                password = "******"
                identity_client = admin_manager.identity_client
                tenant = identity_client.create_tenant(name=tenant_name)
                identity_client.create_user(username, password, tenant['id'],
                                            "email")
                creds = cred_provider.get_credentials(username=username,
                                                      password=password,
                                                      tenant_name=tenant_name)
                manager = clients.Manager(credentials=creds)

            test_obj = importutils.import_class(test['action'])
            test_run = test_obj(manager, max_runs, stop_on_error)

            kwargs = test.get('kwargs', {})
            test_run.setUp(**dict(kwargs.iteritems()))

            LOG.debug("calling Target Object %s" % test_run.__class__.__name__)

            mp_manager = multiprocessing.Manager()
            shared_statistic = mp_manager.dict()
            shared_statistic['runs'] = 0
            shared_statistic['fails'] = 0

            p = multiprocessing.Process(target=test_run.execute,
                                        args=(shared_statistic, ))

            process = {
                'process': p,
                'p_number': p_number,
                'action': test_run.action,
                'statistic': shared_statistic
            }

            processes.append(process)
            p.start()
    if stop_on_error:
        # NOTE(mkoderer): only the parent should register the handler
        signal.signal(signal.SIGCHLD, sigchld_handler)
    end_time = time.time() + duration
    had_errors = False
    try:
        while True:
            if max_runs is None:
                remaining = end_time - time.time()
                if remaining <= 0:
                    break
            else:
                remaining = log_check_interval
                all_proc_term = True
                for process in processes:
                    if process['process'].is_alive():
                        all_proc_term = False
                        break
                if all_proc_term:
                    break

            time.sleep(min(remaining, log_check_interval))
            if stop_on_error:
                if any([
                        True for proc in processes
                        if proc['statistic']['fails'] > 0
                ]):
                    break

            if not logfiles:
                continue
            if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
                                  stop_on_error):
                had_errors = True
                break
    except KeyboardInterrupt:
        LOG.warning("Interrupted, going to print statistics and exit ...")

    if stop_on_error:
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
    terminate_all_processes()

    sum_fails = 0
    sum_runs = 0

    LOG.info("Statistics (per process):")
    for process in processes:
        if process['statistic']['fails'] > 0:
            had_errors = True
        sum_runs += process['statistic']['runs']
        sum_fails += process['statistic']['fails']
        LOG.info(" Process %d (%s): Run %d actions (%d failed)" %
                 (process['p_number'], process['action'],
                  process['statistic']['runs'], process['statistic']['fails']))
    LOG.info("Summary:")
    LOG.info("Run %d actions (%d failed)" % (sum_runs, sum_fails))

    if not had_errors and CONF.stress.full_clean_stack:
        LOG.info("cleaning up")
        cleanup.cleanup()
    if had_errors:
        return 1
    else:
        return 0
Beispiel #9
0
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
    """
    Workload driver. Executes an action function against a nova-cluster.
    """
    admin_manager = clients.AdminManager()

    ssh_user = CONF.stress.target_ssh_user
    ssh_key = CONF.stress.target_private_key_path
    logfiles = CONF.stress.target_logfiles
    log_check_interval = int(CONF.stress.log_check_interval)
    default_thread_num = int(CONF.stress.default_thread_number_per_action)
    if logfiles:
        controller = CONF.stress.target_controller
        computes = _get_compute_nodes(controller, ssh_user, ssh_key)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
    for test in tests:
        if test.get("use_admin", False):
            manager = admin_manager
        else:
            manager = clients.Manager()
        for p_number in moves.xrange(test.get("threads", default_thread_num)):
            if test.get("use_isolated_tenants", False):
                username = data_utils.rand_name("stress_user")
                tenant_name = data_utils.rand_name("stress_tenant")
                password = "******"
                identity_client = admin_manager.identity_client
                tenant = identity_client.create_tenant(name=tenant_name)
                identity_client.create_user(username, password, tenant["id"], "email")
                creds = auth.get_credentials(username=username, password=password, tenant_name=tenant_name)
                manager = clients.Manager(credentials=creds)

            test_obj = importutils.import_class(test["action"])
            test_run = test_obj(manager, max_runs, stop_on_error)

            kwargs = test.get("kwargs", {})
            test_run.setUp(**dict(kwargs.iteritems()))

            LOG.debug("calling Target Object %s" % test_run.__class__.__name__)

            mp_manager = multiprocessing.Manager()
            shared_statistic = mp_manager.dict()
            shared_statistic["runs"] = 0
            shared_statistic["fails"] = 0

            p = multiprocessing.Process(target=test_run.execute, args=(shared_statistic,))

            process = {"process": p, "p_number": p_number, "action": test_run.action, "statistic": shared_statistic}

            processes.append(process)
            p.start()
    if stop_on_error:
        # NOTE(mkoderer): only the parent should register the handler
        signal.signal(signal.SIGCHLD, sigchld_handler)
    end_time = time.time() + duration
    had_errors = False
    try:
        while True:
            if max_runs is None:
                remaining = end_time - time.time()
                if remaining <= 0:
                    break
            else:
                remaining = log_check_interval
                all_proc_term = True
                for process in processes:
                    if process["process"].is_alive():
                        all_proc_term = False
                        break
                if all_proc_term:
                    break

            time.sleep(min(remaining, log_check_interval))
            if stop_on_error:
                if any([True for proc in processes if proc["statistic"]["fails"] > 0]):
                    break

            if not logfiles:
                continue
            if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key, stop_on_error):
                had_errors = True
                break
    except KeyboardInterrupt:
        LOG.warning("Interrupted, going to print statistics and exit ...")

    if stop_on_error:
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
    terminate_all_processes()

    sum_fails = 0
    sum_runs = 0

    LOG.info("Statistics (per process):")
    for process in processes:
        if process["statistic"]["fails"] > 0:
            had_errors = True
        sum_runs += process["statistic"]["runs"]
        sum_fails += process["statistic"]["fails"]
        LOG.info(
            " Process %d (%s): Run %d actions (%d failed)"
            % (process["p_number"], process["action"], process["statistic"]["runs"], process["statistic"]["fails"])
        )
    LOG.info("Summary:")
    LOG.info("Run %d actions (%d failed)" % (sum_runs, sum_fails))

    if not had_errors and CONF.stress.full_clean_stack:
        LOG.info("cleaning up")
        cleanup.cleanup()
    if had_errors:
        return 1
    else:
        return 0
Beispiel #10
0
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
    """
    Workload driver. Executes an action function against a nova-cluster.
    """
    logfiles = admin_manager.config.stress.target_logfiles
    log_check_interval = int(admin_manager.config.stress.log_check_interval)
    if logfiles:
        controller = admin_manager.config.stress.target_controller
        computes = _get_compute_nodes(controller)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node)
    for test in tests:
        if test.get('use_admin', False):
            manager = admin_manager
        else:
            manager = clients.Manager()
        for p_number in xrange(test.get('threads', 1)):
            if test.get('use_isolated_tenants', False):
                username = rand_name("stress_user")
                tenant_name = rand_name("stress_tenant")
                password = "******"
                identity_client = admin_manager.identity_client
                _, tenant = identity_client.create_tenant(name=tenant_name)
                identity_client.create_user(username, password, tenant['id'],
                                            "email")
                manager = clients.Manager(username=username,
                                          password="******",
                                          tenant_name=tenant_name)

            test_obj = importutils.import_class(test['action'])
            test_run = test_obj(manager, logger, max_runs, stop_on_error)

            kwargs = test.get('kwargs', {})
            test_run.setUp(**dict(kwargs.iteritems()))

            logger.debug("calling Target Object %s" %
                         test_run.__class__.__name__)

            mp_manager = multiprocessing.Manager()
            shared_statistic = mp_manager.dict()
            shared_statistic['runs'] = 0
            shared_statistic['fails'] = 0

            p = multiprocessing.Process(target=test_run.execute,
                                        args=(shared_statistic, ))

            process = {
                'process': p,
                'p_number': p_number,
                'action': test['action'],
                'statistic': shared_statistic
            }

            processes.append(process)
            p.start()
    if stop_on_error:
        # NOTE(mkoderer): only the parent should register the handler
        signal.signal(signal.SIGCHLD, sigchld_handler)
    end_time = time.time() + duration
    had_errors = False
    while True:
        if max_runs is None:
            remaining = end_time - time.time()
            if remaining <= 0:
                break
        else:
            remaining = log_check_interval
            all_proc_term = True
            for process in processes:
                if process['process'].is_alive():
                    all_proc_term = False
                    break
            if all_proc_term:
                break

        time.sleep(min(remaining, log_check_interval))
        if stop_on_error:
            for process in processes:
                if process['statistic']['fails'] > 0:
                    break

        if not logfiles:
            continue
        errors = _error_in_logs(logfiles, computes)
        if errors:
            had_errors = True
            break

    terminate_all_processes()

    sum_fails = 0
    sum_runs = 0

    logger.info("Statistics (per process):")
    for process in processes:
        if process['statistic']['fails'] > 0:
            had_errors = True
        sum_runs += process['statistic']['runs']
        sum_fails += process['statistic']['fails']
        logger.info(
            " Process %d (%s): Run %d actions (%d failed)" %
            (process['p_number'], process['action'],
             process['statistic']['runs'], process['statistic']['fails']))
    logger.info("Summary:")
    logger.info("Run %d actions (%d failed)" % (sum_runs, sum_fails))

    if not had_errors:
        logger.info("cleaning up")
        cleanup.cleanup(logger)
    if had_errors:
        return 1
    else:
        return 0
Beispiel #11
0
    def execute(self, description):
        """
        Execute a http call on an api that are expected to
        result in client errors. First it uses invalid resources that are part
        of the url, and then invalid data for queries and http request bodies.

        :param description: A json file or dictionary with the following
        entries:
            name (required) name for the api
            http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
            url (required) the url to be appended to the catalog url with '%s'
                for each resource mentioned
            resources: (optional) A list of resource names such as "server",
                "flavor", etc. with an element for each '%s' in the url. This
                method will call self.get_resource for each element when
                constructing the positive test case template so negative
                subclasses are expected to return valid resource ids when
                appropriate.
            json-schema (optional) A valid json schema that will be used to
                create invalid data for the api calls. For "GET" and "HEAD",
                the data is used to generate query strings appended to the url,
                otherwise for the body of the http call.

        """
        LOG.info("Executing %s" % description["name"])
        LOG.debug(description)
        generator = importutils.import_class(
            CONF.negative.test_generator)()
        schema = description.get("json-schema", None)
        method = description["http-method"]
        url = description["url"]
        expected_result = None
        if "default_result_code" in description:
            expected_result = description["default_result_code"]

        resources = [self.get_resource(r) for
                     r in description.get("resources", [])]

        if hasattr(self, "resource"):
            # Note(mkoderer): The resources list already contains an invalid
            # entry (see get_resource).
            # We just send a valid json-schema with it
            valid_schema = None
            if schema:
                valid_schema = \
                    valid.ValidTestGenerator().generate_valid(schema)
            new_url, body = self._http_arguments(valid_schema, url, method)
        elif hasattr(self, "_negtest_name"):
            schema_under_test = \
                valid.ValidTestGenerator().generate_valid(schema)
            local_expected_result = \
                generator.generate_payload(self, schema_under_test)
            if local_expected_result is not None:
                expected_result = local_expected_result
            new_url, body = \
                self._http_arguments(schema_under_test, url, method)
        else:
            raise Exception("testscenarios are not active. Please make sure "
                            "that your test runner supports the load_tests "
                            "mechanism")

        if "admin_client" in description and description["admin_client"]:
            client = self.admin_client
        else:
            client = self.client
        resp, resp_body = client.send_request(method, new_url,
                                              resources, body=body)
        self._check_negative_response(expected_result, resp.status, resp_body)