def getinfo(session, request=None): filters = {} try: try: from rally import api as rally_api except ImportError: rally_api = None try: from rally.cli.commands import task as rally_task except ImportError: rally_task = None if rally_api is None or rally_task is None: raise ImportError task_list = [] filters["tags"] = ['Vitrage'] rally_tasklist = rally_api.API().task.list(**filters) for l_value in rally_tasklist: task_list.append(l_value['uuid']) out_path = './static/dashboard/project/' \ 'components/actions/vitrage.html' if task_list: rally_task.TaskCommands().report(rally_api.API(), task_list, out=out_path) except ImportError: LOG.warning('Failed to import Rally')
def execute(session, request): try: from rally import api as rally_api except ImportError: rally_api = None try: from rally.cli.commands import task as rally_task except ImportError: rally_task = None if rally_api is None or rally_task is None: LOG.warning('Failed to import Rally') else: temp_dict = json.loads(request) key = temp_dict.keys() if not temp_dict['syntaxcheck']: key.remove('syntaxcheck') file_path = './' + key[0] + '.json' with open(file_path, 'w') as make_file: json.dump(temp_dict[key[0]], make_file) rally_task.TaskCommands().start(rally_api.API(), file_path, tags=['Vitrage', key[0]]) os.remove(file_path) else: key.remove('syntaxcheck') file_path = './' + key[0] + '.json' with open(file_path, 'w') as make_file: json.dump(temp_dict[key[0]], make_file) rally_task.TaskCommands().validate(rally_api.API(), file_path) os.remove(file_path)
def run(args): steps = [ SetUpStep, ListPlugins, CreateVerifier, ShowVerifier, ListVerifiers, UpdateVerifier, ConfigureVerifier, ExtendVerifier, ListVerifierExtensions, ListVerifierTests, RunVerification, ShowVerification, ShowDetailedVerification, HtmlVerificationReport, JsonVerificationReport, JunitVerificationReport, ListVerifications, DeleteVerifierExtension, DestroyDeployment, DeleteVerifier ] if args.compare: # need to launch one more verification place_to_insert = steps.index(ShowDetailedVerification) + 1 # insert steps in reverse order to be able to use the same index steps.insert(place_to_insert, ShowDetailedSecondVerification) steps.insert(place_to_insert, ShowSecondVerification) steps.insert(place_to_insert, ReRunVerification) results = collections.OrderedDict() rapi = api.API() for step_cls in steps: step = step_cls(args, rapi=rapi) if step.check(results): step.run() results[step_cls] = step return results.values()
def setUp(self): super(SamplesTestCase, self).setUp() if os.environ.get("TOX_ENV_NAME") == "cover": self.skipTest("There is no need to check samples in coverage job.") if not hasattr(self, "samples_path"): self.skipTest("It is a base class.") self.rapi = api.API(skip_db_check=True)
def setUp(self): super(TaskCommandsTestCase, self).setUp() self.task = task.TaskCommands() self.fake_api = fakes.FakeAPI() with mock.patch("rally.api.API.check_db_revision"): self.real_api = api.API()
def run(self, **kwargs): self.start_time = time.time() if not os.path.exists(self.res_dir): os.makedirs(self.res_dir) rapi = api.API() api.CONF.set_default("use_stderr", False) api.CONF.set_default('log_dir', self.res_dir) api.CONF.set_default('log_file', 'rally.log') rally.common.logging.setup("rally") spec = env_mgr.EnvManager.create_spec_from_sys_environ()["spec"] try: env_mgr.EnvManager.get('my-kubernetes').delete(force=True) except exceptions.DBRecordNotFound: pass env = env_mgr.EnvManager.create('my-kubernetes', spec) result = env.check_health() self.__logger.debug("check health %s: %s", 'my-kubernetes', result) if not result['existing@kubernetes']['available']: self.__logger.error( "Cannot check env heath: %s", result['existing@kubernetes']['message']) return with open(pkg_resources.resource_filename( 'functest_kubernetes', 'rally/all-in-one.yaml')) as file: template = Template(file.read()) task = yaml.safe_load(template.render( concurrency=kwargs.get("concurrency", self.concurrency), times=kwargs.get("times", self.times), namespaces_count=kwargs.get( "namespaces_count", self.namespaces_count))) rapi.task.validate(deployment='my-kubernetes', config=task) task_instance = rapi.task.create(deployment='my-kubernetes') rapi.task.start( deployment='my-kubernetes', config=task, task=task_instance["uuid"]) self.details = rapi.task.get(task_instance["uuid"], detailed=False) self.__logger.debug("details: %s", self.details) if self.details['pass_sla']: self.result = 100 result = rapi.task.export( [task_instance["uuid"]], "html", output_dest=os.path.join( self.res_dir, "{}.html".format(self.case_name))) if "files" in result: for path in result["files"]: with open(path, "w+") as output: output.write(result["files"][path]) result = rapi.task.export( [task_instance["uuid"]], "junit-xml", output_dest=os.path.join( self.res_dir, "{}.xml".format(self.case_name))) if "files" in result: for path in result["files"]: with open(path, "w+") as output: output.write(result["files"][path]) self.stop_time = time.time()
def test_list(self): rally = utils.Rally() cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) output = rally("task start --task %s --tag foo" % config.filename) task_uuid = self._get_task_uuid(output) # obtain the task object from the database, to check that CLI prints # everything right rapi = api.API(config_file=rally.config_filename) task = rapi.task.get(task_id=task_uuid) actual = rally("task list --deployment MAIN") duration = "%s" % round(task["task_duration"], 3) duration += " " * (13 - len(duration)) expected = ( "+--------------------------------------+-----------------+" "---------------------+---------------+----------+--------+\n" "| UUID | Deployment name " "| Created at | Load duration | Status | Tag(s) |\n" "+--------------------------------------+-----------------+" "---------------------+---------------+----------+--------+\n" "| %(uuid)s | MAIN | %(created_at)s " "| %(duration)s | finished | 'foo' |\n" "+--------------------------------------+-----------------+" "---------------------+---------------+----------+--------+\n" % { "uuid": task_uuid, "created_at": task["created_at"].replace("T", " "), "duration": duration}) # self.assertEqual is not used here, since it doesn't show a big diff # and error message become useless if expected != actual: self.fail("AssertionError: Expected output is not equal to actual." "\nExpected:\"\"\"\n%s\n\"\"\"" "\nActual:\"\"\"\n%s\n\"\"\"" % (expected, actual)) self.assertIn("There are no tasks", rally("task list --status crashed")) self.assertIn("finished", rally("task list --status finished")) self.assertIn( "Deployment name", rally("task list --all-deployments")) self.assertRaises(utils.RallyCliError, rally, "task list --status not_existing_status")
def test_schema_is_valid(self): discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins")) files = { f for f in os.listdir(self.rally_jobs_path) if (os.path.isfile(os.path.join(self.rally_jobs_path, f)) and f.endswith(".yaml") and not f.endswith("_args.yaml")) } # TODO(andreykurilin): figure out why it fails files -= {"rally-mos.yaml", "sahara-clusters.yaml"} for filename in files: full_path = os.path.join(self.rally_jobs_path, filename) with open(full_path) as task_file: try: args_file = os.path.join( self.rally_jobs_path, filename.rsplit(".", 1)[0] + "_args.yaml") args = {} if os.path.exists(args_file): args = yaml.safe_load(open(args_file).read()) if not isinstance(args, dict): raise TypeError( "args file %s must be dict in yaml or json " "presentation" % args_file) task_inst = api._Task(api.API(skip_db_check=True)) task = task_inst.render_template( task_template=task_file.read(), **args) task = task_cfg.TaskConfig(yaml.safe_load(task)) task_obj = fakes.FakeTask({"uuid": full_path}) eng = engine.TaskEngine(task, task_obj, mock.Mock()) eng.validate(only_syntax=True) except Exception: print(traceback.format_exc()) self.fail("Wrong task input file: %s" % full_path)
def start(requestdict): """Starts Rally testing. Args: requestdict: A dictionary that is used to execute the Rally testing. Returns: None. """ LOG.debug("plugin_rally.py start()") try: from rally import api as rally_api except ImportError: rally_api = None try: from rally.cli.commands import task as rally_task except ImportError: rally_task = None if rally_api is None or rally_task is None: LOG.warning('Failed to import Rally') else: LOG.warning(requestdict) temp_dict = json.loads(requestdict) key = temp_dict.keys() # if not temp_dict['syntaxcheck']: # key.remove('syntaxcheck') file_path = './' + key[0] + '.json' LOG.warning(file_path) with open(file_path, 'w') as make_file: json.dump(temp_dict, make_file) rally_task.TaskCommands().start(rally_api.API(), file_path, tags=['Vitrage', key[0]]) os.remove(file_path)
def ansible_main(): module = AnsibleModule(argument_spec=dict( projects_count=dict( type="int", default=1, required=False ), users_per_project=dict( type="int", default=1, required=False ), parent_env_name=dict( type="str", required=True ), path_for_new_spec=dict( type="str", required=True ) )) # init Rally API as it makes all work for logging and config initialization api.API() original_spec, admin_creds = fetch_parent_env_and_admin_creds( module.params["parent_env_name"] ) users = create_projects_and_users( admin_creds, projects_count=module.params["projects_count"], users_per_project=module.params["users_per_project"] ) store_a_new_spec(original_spec, users, module.params["path_for_new_spec"]) module.exit_json(changed=True)
def export(self, uuid): """Export results of the task to the file. :param uuid: uuid of the task object """ rapi = api.API(config_args=sys.argv[1:], skip_db_check=True) task = rapi.task.get_detailed(uuid) LOG.debug("Got the task object by it's uuid %s. " % uuid) task_results = [{"key": x["key"], "result": x["data"]["raw"], "sla": x["data"]["sla"], "hooks": x["data"].get("hooks"), "load_duration": x["data"]["load_duration"], "full_duration": x["data"]["full_duration"]} for x in task["results"]] if self.type == "json": if task_results: res = json.dumps(task_results, sort_keys=False, indent=4, separators=(",", ": ")) LOG.debug("Got the task %s results." % uuid) else: msg = ("Task %s results would be available when it will " "finish." % uuid) raise exceptions.RallyException(msg) if os.path.dirname(self.path) and (not os.path.exists(os.path.dirname( self.path))): raise IOError("There is no such directory: %s" % os.path.dirname(self.path)) with open(self.path, "w") as f: LOG.debug("Writing task %s results to the %s." % ( uuid, self.connection_string)) f.write(res) LOG.debug("Task %s results was written to the %s." % ( uuid, self.connection_string))
def run(argv, categories): if len(argv) > 1 and argv[1] in ["version", "--version"]: _print_version() return 0 parser = lambda subparsers: _add_command_parsers(categories, subparsers) category_opt = cfg.SubCommandOpt("category", title="Command categories", help="Available categories", handler=parser) CONF.register_cli_opt(category_opt) help_msg = ("Additional custom plugin locations. Multiple files or " "directories may be specified. All plugins in the specified" " directories and subdirectories will be imported. Plugins in" " /opt/rally/plugins and ~/.rally/plugins will always be " "imported.") CONF.register_cli_opt(cfg.ListOpt("plugin-paths", default=os.environ.get( "RALLY_PLUGIN_PATHS"), help=help_msg)) # NOTE(andreykurilin): this dirty hack is done to unblock the gates. # Currently, we are using oslo.config for CLI purpose (don't do this!) # and it makes the things too complicated. # To discover which CLI method can be affected by warnings and which not # (based on suppress_warnings decorator) we need to obtain a desired # CLI method. It can be done only after initialization of oslo_config # which is located in rally.api.API init method. # Initialization of rally.api.API can produce a warning (for example, # from pymysql), so suppressing of warnings later will not work in such # case (it is what actually had happened now in our CI with the latest # release of PyMySQL). # # https://bitbucket.org/zzzeek/sqlalchemy/issues/4120/mysql-5720-warns-on-tx_isolation try: import pymysql warnings.filterwarnings("ignore", category=pymysql.Warning) except ImportError: pass try: rapi = api.API(config_args=argv[1:], skip_db_check=True) except exceptions.RallyException as e: print(e) return(2) if CONF.category.name == "bash-completion": print(_generate_bash_completion_script()) return(0) fn = CONF.category.action_fn fn_args = [encodeutils.safe_decode(arg) for arg in CONF.category.action_args] # api instance always is the first argument fn_args.insert(0, rapi) fn_kwargs = {} for k in CONF.category.action_kwargs: v = getattr(CONF.category, "action_kwarg_" + k) if v is None: continue if isinstance(v, str): v = encodeutils.safe_decode(v) fn_kwargs[k] = v # call the action with the remaining arguments # check arguments try: validate_args(fn, *fn_args, **fn_kwargs) except MissingArgs as e: # NOTE(mikal): this isn't the most helpful error message ever. It is # long, and tells you a lot of things you probably don't want to know # if you just got a single arg wrong. print(fn.__doc__) CONF.print_help() print("Missing arguments:") for missing in e.missing: for arg in fn.args: if arg[1].get("dest", "").endswith(missing): print(" " + arg[0][0]) break return(1) try: validate_deprecated_args(argv, fn) # skip db check for db and plugin commands if CONF.category.name not in ("db", "plugin"): rapi.check_db_revision() if getattr(fn, "_suppress_warnings", False): with warnings.catch_warnings(): warnings.simplefilter("ignore") ret = fn(*fn_args, **fn_kwargs) else: ret = fn(*fn_args, **fn_kwargs) return ret except (IOError, TypeError, ValueError, exceptions.RallyException, jsonschema.ValidationError) as e: known_errors = (exceptions.InvalidTaskConfig, ) if logging.is_debug() and not isinstance(e, known_errors): LOG.exception("Unexpected exception in CLI") else: print(e) return getattr(e, "error_code", 1) except sqlalchemy.exc.OperationalError as e: if logging.is_debug(): LOG.exception("Something went wrong with database") print(e) print("Looks like Rally can't connect to its DB.") print("Make sure that connection string in rally.conf is proper:") print(CONF.database.connection) return 1 except Exception: print("Command failed, please check log for more info") raise
def setUp(self): super(TaskSampleTestCase, self).setUp() if os.environ.get("TOX_ENV_NAME") == "cover": self.skipTest("There is no need to check samples in coverage job.") with mock.patch("rally.api.API.check_db_revision"): self.rapi = api.API()
def test_task_samples_are_valid(self): rally = utils.Rally(force_new_db=True) # In TestTaskSamples, Rally API will be called directly (not via # subprocess), so we need to change database options to temp database. db.db_options.set_defaults(db.CONF, connection="sqlite:///%s/db" % rally.tmp_dir) # let's use pre-created users to make TestTaskSamples quicker rapi = api.API(config_file=rally.config_filename) deployment = rapi.deployment._get("MAIN") admin_cred = deployment.get_credentials_for("openstack")["admin"] ctx = { "admin": { "credential": admin_cred }, "task": { "uuid": self.__class__.__name__ } } user_ctx = users.UserGenerator(ctx) user_ctx.setup() self.addCleanup(user_ctx.cleanup) config = deployment["config"] os_creds = config["creds"]["openstack"] user = copy.copy(os_creds["admin"]) user["username"] = ctx["users"][0]["credential"].username user["password"] = ctx["users"][0]["credential"].password if "project_name" in os_creds["admin"]: # it is Keystone user["project_name"] = ctx["users"][0]["credential"].tenant_name else: user["tenant_name"] = ctx["users"][0]["credential"].tenant_name config["creds"]["openstack"]["users"] = [user] rally("deployment destroy MAIN", write_report=False) deployment_cfg = os.path.join(rally.tmp_dir, "new_deployment.json") with open(deployment_cfg, "w") as f: f.write(json.dumps(config)) rally("deployment create --name MAIN --filename %s" % deployment_cfg, write_report=False) samples_path = os.path.join(os.path.dirname(rally_module.__file__), os.pardir, "samples", "tasks") for dirname, dirnames, filenames in os.walk(samples_path): # NOTE(rvasilets): Skip by suggest of boris-42 because in # future we don't what to maintain this dir if dirname.find("tempest-do-not-run-against-production") != -1: continue for filename in filenames: full_path = os.path.join(dirname, filename) # NOTE(hughsaunders): Skip non config files # (bug https://bugs.launchpad.net/rally/+bug/1314369) if os.path.splitext(filename)[1] not in (".json"): continue with open(full_path) as task_file: try: input_task = task_file.read() rendered_task = rapi.task.render_template(input_task) task_config = yaml.safe_load(rendered_task) rapi.task.validate("MAIN", task_config) except Exception as e: if not self._skip(six.text_type(e)): print(traceback.format_exc()) print("Failed on task config %s with error." % full_path) raise
def run(argv, categories): parser = lambda subparsers: _add_command_parsers(categories, subparsers) category_opt = cfg.SubCommandOpt("category", title="Command categories", help="Available categories", handler=parser) CONF.register_cli_opt(category_opt) help_msg = ("Additional custom plugin locations. Multiple files or " "directories may be specified. All plugins in the specified" " directories and subdirectories will be imported. Plugins in" " /opt/rally/plugins and ~/.rally/plugins will always be " "imported.") CONF.register_cli_opt( cfg.ListOpt("plugin-paths", default=os.environ.get("RALLY_PLUGIN_PATHS"), help=help_msg)) try: rapi = api.API(config_args=argv[1:], skip_db_check=True) except exceptions.RallyException as e: print(e) return (2) if CONF.category.name == "version": print(CONF.version) return (0) if CONF.category.name == "bash-completion": print(_generate_bash_completion_script()) return (0) fn = CONF.category.action_fn fn_args = [ encodeutils.safe_decode(arg) for arg in CONF.category.action_args ] # api instance always is the first argument fn_args.insert(0, rapi) fn_kwargs = {} for k in CONF.category.action_kwargs: v = getattr(CONF.category, "action_kwarg_" + k) if v is None: continue if isinstance(v, six.string_types): v = encodeutils.safe_decode(v) fn_kwargs[k] = v # call the action with the remaining arguments # check arguments try: validate_args(fn, *fn_args, **fn_kwargs) except MissingArgs as e: # NOTE(mikal): this isn't the most helpful error message ever. It is # long, and tells you a lot of things you probably don't want to know # if you just got a single arg wrong. print(fn.__doc__) CONF.print_help() print("Missing arguments:") for missing in e.missing: for arg in fn.args: if arg[1].get("dest", "").endswith(missing): print(" " + arg[0][0]) break return (1) try: validate_deprecated_args(argv, fn) # skip db check for db and plugin commands if CONF.category.name not in ("db", "plugin"): rapi.check_db_revision() if getattr(fn, "_suppress_warnings", False): with warnings.catch_warnings(): warnings.simplefilter("ignore") ret = fn(*fn_args, **fn_kwargs) else: ret = fn(*fn_args, **fn_kwargs) return (ret) except (IOError, TypeError, ValueError, exceptions.RallyException, jsonschema.ValidationError) as e: if logging.is_debug(): LOG.exception(e) print(e) return 1 except sqlalchemy.exc.OperationalError as e: if logging.is_debug(): LOG.exception(e) print(e) print("Looks like Rally can't connect to its DB.") print("Make sure that connection string in rally.conf is proper:") print(CONF.database.connection) return 1 except Exception: print(_("Command failed, please check log for more info")) raise
def test_task_samples_are_valid(self): rally = utils.Rally(force_new_db=True) # In TestTaskSamples, Rally API will be called directly (not via # subprocess), so we need to change database options to temp database. db.db_options.set_defaults(db.CONF, connection="sqlite:///%s/db" % rally.tmp_dir) # let's use pre-created users to make TestTaskSamples quicker rapi = api.API(config_file=rally.config_filename) deployment = rapi.deployment._get("MAIN") admin_cred = deployment.get_credentials_for("openstack")["admin"] ctx = { "admin": { "credential": admin_cred }, "task": { "uuid": self.__class__.__name__ } } user_ctx = users.UserGenerator(ctx) user_ctx.setup() self.addCleanup(user_ctx.cleanup) config = deployment["config"] os_creds = config["creds"]["openstack"] user = copy.copy(os_creds["admin"]) user["username"] = ctx["users"][0]["credential"].username user["password"] = ctx["users"][0]["credential"].password if "project_name" in os_creds["admin"]: # it is Keystone user["project_name"] = ctx["users"][0]["credential"].tenant_name else: user["tenant_name"] = ctx["users"][0]["credential"].tenant_name config["creds"]["openstack"]["users"] = [user] rally("deployment destroy MAIN", write_report=False) deployment_cfg = os.path.join(rally.tmp_dir, "new_deployment.json") with open(deployment_cfg, "w") as f: f.write(json.dumps(config)) rally("deployment create --name MAIN --filename %s" % deployment_cfg, write_report=False) # NOTE(andreykurilin): mock building credential to share one cache of # clients(it will allow to avoid hundreds of redundant # authentications) between validations of different samples deployment = rapi.deployment._get("MAIN") original_get_credentials_for = deployment.get_credentials_for creds_cache = {} def get_credentials_for(namespace): if namespace not in creds_cache: creds_cache[namespace] = original_get_credentials_for( namespace) return creds_cache[namespace] deployment.get_credentials_for = get_credentials_for deployment_patcher = mock.patch("rally.api.objects.Deployment.get") m_deployment = deployment_patcher.start() m_deployment.return_value = deployment self.addCleanup(deployment_patcher.stop) # store all failures and print them at once failed_samples = {} def publisher(queue): """List all samples and render task configs""" samples_path = os.path.join(os.path.dirname(rally_module.__file__), os.pardir, "samples", "tasks") for dirname, dirnames, filenames in os.walk(samples_path): # NOTE(rvasilets): Skip by suggest of boris-42 because in # future we don't what to maintain this dir if dirname.find("tempest-do-not-run-against-production") != -1: continue for filename in filenames: full_path = os.path.join(dirname, filename) # NOTE(hughsaunders): Skip non config files # (bug https://bugs.launchpad.net/rally/+bug/1314369) if os.path.splitext(filename)[1] != ".json": continue with open(full_path) as task_file: input_task = task_file.read() rendered_task = rapi.task.render_template( task_template=input_task) queue.append((full_path, rendered_task)) def consumer(_cache, sample): """Validate one sample""" full_path, rendered_task = sample task_config = yaml.safe_load(rendered_task) try: rapi.task.validate(deployment="MAIN", config=task_config) except Exception as e: if not self._skip(six.text_type(e)): failed_samples[full_path] = traceback.format_exc() broker.run(publisher, consumer, self.NUMBER_OF_THREADS) if failed_samples: self.fail("Validation failed on the one or several samples. " "See details below:\n%s" % "".join([ "\n======\n%s\n\n%s\n" % (k, v) for k, v in failed_samples.items() ]))
def main(): api = rapi.API(config_args=sys.argv[1:]) print(len(api.task.list())) return 0
def test_task_samples_are_valid(self): from rally_openstack.contexts.keystone import users rally = utils.Rally(force_new_db=True) # let's use pre-created users to make TestTaskSamples quicker rapi = api.API(config_file=rally.config_filename) deployment = rapi.deployment._get("MAIN") admin_cred = deployment.get_credentials_for("openstack")["admin"] ctx = { "env": { "platforms": { "openstack": { "admin": admin_cred.to_dict(), "users": []}}}, "task": {"uuid": self.__class__.__name__, "deployment_uuid": deployment["uuid"]}} user_ctx = users.UserGenerator(ctx) user_ctx.setup() self.addCleanup(user_ctx.cleanup) os_creds = deployment["config"]["openstack"] user = copy.copy(os_creds["admin"]) user["username"] = ctx["users"][0]["credential"].username user["password"] = ctx["users"][0]["credential"].password if "project_name" in os_creds["admin"]: # it is Keystone user["project_name"] = ctx["users"][0]["credential"].tenant_name else: user["tenant_name"] = ctx["users"][0]["credential"].tenant_name os_creds["users"] = [user] rally("deployment destroy MAIN", write_report=False) deployment_cfg = os.path.join(rally.tmp_dir, "new_deployment.json") with open(deployment_cfg, "w") as f: f.write(json.dumps({"openstack": os_creds})) rally("deployment create --name MAIN --filename %s" % deployment_cfg, write_report=False) # store all failures and print them at once failed_samples = {} def publisher(queue): """List all samples and render task configs""" samples_path = os.path.join( os.path.dirname(rally_openstack_module.__file__), os.pardir, "samples", "tasks") for dirname, dirnames, filenames in os.walk(samples_path): # NOTE(rvasilets): Skip by suggest of boris-42 because in # future we don't what to maintain this dir if dirname.find("tempest-do-not-run-against-production") != -1: continue for filename in filenames: full_path = os.path.join(dirname, filename) # NOTE(hughsaunders): Skip non config files # (bug https://bugs.launchpad.net/rally/+bug/1314369) if os.path.splitext(filename)[1] != ".json": continue with open(full_path) as task_file: input_task = task_file.read() rendered_task = rapi.task.render_template( task_template=input_task) queue.append((full_path, rendered_task)) def consumer(_cache, sample): """Validate one sample""" full_path, rendered_task = sample task_config = yaml.safe_load(rendered_task) try: rapi.task.validate(deployment="MAIN", config=task_config) except Exception as e: if not self._skip(six.text_type(e)): failed_samples[full_path] = traceback.format_exc() broker.run(publisher, consumer, self.NUMBER_OF_THREADS) if failed_samples: self.fail("Validation failed on the one or several samples. " "See details below:\n%s" % "".join(["\n======\n%s\n\n%s\n" % (k, v) for k, v in failed_samples.items()]))
def setUp(self): super(TaskSampleTestCase, self).setUp() self.rapi = api.API(skip_db_check=True)