def resolve_ref( self, ref: ResolveType, base_url: Optional[str] = None, checklinks: bool = True, strict_foreign_properties: bool = False, content_types: Optional[List[str]] = None, # Expected content-types ) -> ResolvedRefType: lref = ref obj = None # type: Optional[CommentedMap] resolved_obj = None # type: ResolveType inc = False mixin = None # type: Optional[MutableMapping[str, str]] if not base_url: base_url = file_uri(os.getcwd()) + "/" sl = SourceLine(None, None) # If `ref` is a dict, look for special directives. if isinstance(lref, CommentedMap): obj = lref if "$import" in obj: sl = SourceLine(obj, "$import") if len(obj) == 1: lref = obj["$import"] obj = None else: raise ValidationException( f"'$import' must be the only field in {obj}", sl ) elif "$include" in obj: sl = SourceLine(obj, "$include") if len(obj) == 1: lref = obj["$include"] inc = True obj = None else: raise ValidationException( f"'$include' must be the only field in {obj}", sl ) elif "$mixin" in obj: sl = SourceLine(obj, "$mixin") lref = obj["$mixin"] mixin = obj obj = None else: lref = None for identifier in self.identifiers: if identifier in obj: lref = obj[identifier] break if not lref: raise ValidationException( "Object `{}` does not have identifier field in {}".format( obj, self.identifiers ), sl, ) if not isinstance(lref, str): raise ValidationException( f"Expected CommentedMap or string, got {type(lref)}: `{lref}`" ) if isinstance(lref, str) and os.sep == "\\": # Convert Windows path separator in ref lref = lref.replace("\\", "/") url = self.expand_url(lref, base_url, scoped_id=(obj is not None)) # Has this reference been loaded already? if url in self.idx and (not mixin): resolved_obj = self.idx[url] if isinstance(resolved_obj, MutableMapping): metadata = self.idx.get( urllib.parse.urldefrag(url)[0], CommentedMap() ) # type: Union[CommentedMap, CommentedSeq, str, None] if isinstance(metadata, MutableMapping): if "$graph" in resolved_obj: metadata = _copy_dict_without_key(resolved_obj, "$graph") return resolved_obj["$graph"], metadata else: return resolved_obj, metadata else: raise ValidationException( "Expected CommentedMap, got {}: `{}`".format( type(metadata), metadata ) ) elif isinstance(resolved_obj, MutableSequence): metadata = self.idx.get(urllib.parse.urldefrag(url)[0], CommentedMap()) if isinstance(metadata, MutableMapping): return resolved_obj, metadata else: return resolved_obj, CommentedMap() elif isinstance(resolved_obj, str): return resolved_obj, CommentedMap() else: raise ValidationException( "Expected MutableMapping or MutableSequence, got {}: `{}`".format( type(resolved_obj), resolved_obj ) ) # "$include" directive means load raw text if inc: return self.fetch_text(url), CommentedMap() doc = None if isinstance(obj, MutableMapping): for identifier in self.identifiers: obj[identifier] = url doc_url = url else: # Load structured document doc_url, frg = urllib.parse.urldefrag(url) if doc_url in self.idx and (not mixin): # If the base document is in the index, it was already loaded, # so if we didn't find the reference earlier then it must not # exist. raise ValidationException( f"Reference `#{frg}` not found in file `{doc_url}`.", sl ) doc = self.fetch( doc_url, inject_ids=(not mixin), content_types=content_types ) # Recursively expand urls and resolve directives if bool(mixin): doc = copy.deepcopy(doc) if isinstance(doc, CommentedMap) and mixin is not None: doc.update(mixin) del doc["$mixin"] resolved_obj, metadata = self.resolve_all( doc, base_url, file_base=doc_url, checklinks=checklinks, strict_foreign_properties=strict_foreign_properties, ) else: resolved_obj, metadata = self.resolve_all( doc or obj, doc_url, checklinks=checklinks, strict_foreign_properties=strict_foreign_properties, ) # Requested reference should be in the index now, otherwise it's a bad # reference if not bool(mixin): if url in self.idx: resolved_obj = self.idx[url] else: raise ValidationException( "Reference `{}` is not in the index. Index contains: {}".format( url, ", ".join(self.idx) ) ) if isinstance(resolved_obj, CommentedMap): if "$graph" in resolved_obj: metadata = _copy_dict_without_key(resolved_obj, "$graph") return resolved_obj["$graph"], metadata else: return resolved_obj, metadata else: return resolved_obj, metadata
def documentation(self): """ Generate the DOCUMENTATION string for use in an Ansible module. :return: string containing formatted YAML """ doc_string = CommentedMap( ) # Gives us an OrderedDict that ruamel.yaml supports doc_string['module'] = self.module_name doc_string['short_description'] = "{} {}".format( self.project_name, self.helper.base_model_name) if not self.helper.base_model_name_snake.endswith('_list'): # module allows CRUD operations on the object doc_string['description'] = [ "Manage the lifecycle of a {} object. Supports check mode, and attempts to " "to be idempotent.".format(self.helper.base_model_name_snake) ] else: # module allows read only operations base_name = self.helper.base_model_name_snake.replace('_list', '') if not base_name.endswith('s'): base_name += 's' doc_string['description'] = [ "Retrieve a list of {}. List operations provide a snapshot read of the " "underlying objects, returning a resource_version representing a consistent " "version of the listed objects.".format(base_name) ] doc_string['version_added'] = ANSIBLE_VERSION_ADDED doc_string['author'] = "OpenShift (@openshift)" doc_string['options'] = CommentedMap() doc_string['requirements'] = [ "{} == {}".format(self.required_library['name'], self.required_library['version']) ] def add_option(pname, pdict, descr=None): """ Adds a new parameter option to doc_string['options']. :param pname: name of the option :param pdict: dict of option attributes :param descr: option list of description strings :return: None """ doc_string['options'][pname] = CommentedMap() if descr: doc_string['options'][pname]['description'] = descr elif pdict.get('description'): doc_string['options'][pname]['description'] = pdict[ 'description'] if pdict.get('required'): doc_string['options'][pname]['required'] = True if pdict.get('default', None) is not None: doc_string['options'][pname]['default'] = pdict['default'] if pdict.get('choices'): if isinstance(pdict.get('choices'), dict): doc_string['options'][pname]['choices'] = [ value for key, value in pdict['choices'].items() ] else: doc_string['options'][pname]['choices'] = pdict['choices'] if pdict.get('aliases'): doc_string['options'][pname]['aliases'] = pdict['aliases'] if pdict.get('type') and pdict.get('type') != 'str': doc_string['options'][pname]['type'] = pdict['type'] for raw_param_name in sorted( [x for x, _ in self.helper.argspec.items()]): param_name = PYTHON_KEYWORD_MAPPING.get(raw_param_name, raw_param_name) param_dict = self.helper.argspec[raw_param_name] if param_name.endswith('params'): descr = [self.__params_descr(param_name)] add_option(param_name, param_dict, descr=descr) elif param_dict.get('property_path'): # parameter comes from the model model_class = self.helper.model for path in param_dict['property_path']: path = PYTHON_KEYWORD_MAPPING.get(path, path) kind = model_class.swagger_types[path] if kind in ('str', 'bool', 'int', 'datetime', 'object', 'float') or \ kind.startswith('dict(') or \ kind.startswith('list['): docs = inspect.getdoc(getattr(model_class, path)) string_list = self.__doc_clean_up(docs.split('\n')) add_option(param_name, param_dict, descr=string_list) else: model_class = self.get_model_class(kind) elif param_dict.get('description'): # parameters is hard-coded in openshift.helper add_option(param_name, param_dict) return ruamel.yaml.dump(doc_string, Dumper=ruamel.yaml.RoundTripDumper, width=80)
def _ensure_services_key_exists(self): if 'services' not in self.config: self.config['services'] = CommentedMap([])
# Reading the schema is super slow - cache for the session @pytest.fixture(scope="session") def schema_ext11() -> Generator[Names, None, None]: with pkg_resources.resource_stream("cwltool", "extensions-v1.1.yml") as res: ext11 = res.read().decode("utf-8") cwltool.process.use_custom_schema("v1.1", "http://commonwl.org/cwltool", ext11) schema = cwltool.process.get_schema("v1.1")[1] assert isinstance(schema, Names) yield schema mpiReq = CommentedMap({"class": MPIRequirementName, "processes": 1}) containerReq = CommentedMap({"class": "DockerRequirement"}) basetool = CommentedMap({ "cwlVersion": "v1.1", "inputs": CommentedSeq(), "outputs": CommentedSeq() }) def mk_tool( schema: Names, opts: List[str], reqs: Optional[List[CommentedMap]] = None, hints: Optional[List[CommentedMap]] = None, ) -> Tuple[LoadingContext, RuntimeContext, CommentedMap]: tool = basetool.copy()
def get_root_node(self): """Returns root node for serialize""" return CommentedMap()
def _init_job(self, joborder, **kwargs): # type: (Dict[Text, Text], **Any) -> Builder """ kwargs: eval_timeout: javascript evaluation timeout use_container: do/don't use Docker when DockerRequirement hint provided make_fs_access: make an FsAccess() object with given basedir basedir: basedir for FsAccess docker_outdir: output directory inside docker for this job docker_tmpdir: tmpdir inside docker for this job docker_stagedir: stagedir inside docker for this job outdir: outdir on host for this job tmpdir: tmpdir on host for this job stagedir: stagedir on host for this job select_resources: callback to select compute resources """ builder = Builder() builder.job = cast(Dict[Text, Union[Dict[Text, Any], List, Text]], copy.deepcopy(joborder)) # Validate job order try: fillInDefaults(self.tool[u"inputs"], builder.job) normalizeFilesDirs(builder.job) validate.validate_ex( self.names.get_name("input_record_schema", ""), builder.job) except (validate.ValidationException, WorkflowException) as e: raise WorkflowException("Invalid job input record:\n" + Text(e)) builder.files = [] builder.bindings = CommentedSeq() builder.schemaDefs = self.schemaDefs builder.names = self.names builder.requirements = self.requirements builder.hints = self.hints builder.resources = {} builder.timeout = kwargs.get("eval_timeout") builder.debug = kwargs.get("debug") dockerReq, is_req = self.get_requirement("DockerRequirement") if dockerReq and is_req and not kwargs.get("use_container"): raise WorkflowException( "Document has DockerRequirement under 'requirements' but use_container is false. DockerRequirement must be under 'hints' or use_container must be true." ) builder.make_fs_access = kwargs.get("make_fs_access") or StdFsAccess builder.fs_access = builder.make_fs_access(kwargs["basedir"]) loadListingReq, _ = self.get_requirement("LoadListingRequirement") if loadListingReq: builder.loadListing = loadListingReq.get("loadListing") if dockerReq and kwargs.get("use_container"): builder.outdir = builder.fs_access.realpath( dockerReq.get("dockerOutputDirectory") or kwargs.get("docker_outdir") or "/var/spool/cwl") builder.tmpdir = builder.fs_access.realpath( kwargs.get("docker_tmpdir") or "/tmp") builder.stagedir = builder.fs_access.realpath( kwargs.get("docker_stagedir") or "/var/lib/cwl") else: builder.outdir = builder.fs_access.realpath( kwargs.get("outdir") or tempfile.mkdtemp()) builder.tmpdir = builder.fs_access.realpath( kwargs.get("tmpdir") or tempfile.mkdtemp()) builder.stagedir = builder.fs_access.realpath( kwargs.get("stagedir") or tempfile.mkdtemp()) if self.formatgraph: for i in self.tool["inputs"]: d = shortname(i["id"]) if d in builder.job and i.get("format"): checkFormat(builder.job[d], builder.do_eval(i["format"]), self.formatgraph) builder.bindings.extend( builder.bind_input(self.inputs_record_schema, builder.job)) if self.tool.get("baseCommand"): for n, b in enumerate(aslist(self.tool["baseCommand"])): builder.bindings.append({ "position": [-1000000, n], "datum": b }) if self.tool.get("arguments"): for i, a in enumerate(self.tool["arguments"]): lc = self.tool["arguments"].lc.data[i] fn = self.tool["arguments"].lc.filename builder.bindings.lc.add_kv_line_col(len(builder.bindings), lc) if isinstance(a, dict): a = copy.copy(a) if a.get("position"): a["position"] = [a["position"], i] else: a["position"] = [0, i] builder.bindings.append(a) elif ("$(" in a) or ("${" in a): cm = CommentedMap((("position", [0, i]), ("valueFrom", a))) cm.lc.add_kv_line_col("valueFrom", lc) cm.lc.filename = fn builder.bindings.append(cm) else: cm = CommentedMap((("position", [0, i]), ("datum", a))) cm.lc.add_kv_line_col("datum", lc) cm.lc.filename = fn builder.bindings.append(cm) builder.bindings.sort(key=lambda a: a["position"]) builder.resources = self.evalResources(builder, kwargs) return builder
def __init__( self, toolpath_object: CommentedMap, pos: int, loadingContext: LoadingContext, parentworkflowProv: Optional[ProvenanceProfile] = None, ) -> None: """Initialize this WorkflowStep.""" debug = loadingContext.debug if "id" in toolpath_object: self.id = toolpath_object["id"] else: self.id = "#step" + str(pos) loadingContext = loadingContext.copy() parent_requirements = copy.deepcopy( getdefault(loadingContext.requirements, [])) loadingContext.requirements = copy.deepcopy( toolpath_object.get("requirements", [])) assert loadingContext.requirements is not None # nosec for parent_req in parent_requirements: found_in_step = False for step_req in loadingContext.requirements: if parent_req["class"] == step_req["class"]: found_in_step = True break if not found_in_step: loadingContext.requirements.append(parent_req) loadingContext.requirements.extend( cast( List[CWLObjectType], get_overrides(getdefault(loadingContext.overrides_list, []), self.id).get("requirements", []), )) hints = copy.deepcopy(getdefault(loadingContext.hints, [])) hints.extend(toolpath_object.get("hints", [])) loadingContext.hints = hints try: if isinstance(toolpath_object["run"], CommentedMap): self.embedded_tool = loadingContext.construct_tool_object( toolpath_object["run"], loadingContext) # type: Process else: loadingContext.metadata = {} self.embedded_tool = load_tool(toolpath_object["run"], loadingContext) except ValidationException as vexc: if loadingContext.debug: _logger.exception("Validation exception") raise WorkflowException( "Tool definition %s failed validation:\n%s" % (toolpath_object["run"], indent(str(vexc)))) from vexc validation_errors = [] self.tool = toolpath_object = copy.deepcopy(toolpath_object) bound = set() if self.embedded_tool.get_requirement("SchemaDefRequirement")[0]: if "requirements" not in toolpath_object: toolpath_object["requirements"] = [] toolpath_object["requirements"].append( self.embedded_tool.get_requirement("SchemaDefRequirement")[0]) for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")): toolpath_object[toolfield] = [] for index, step_entry in enumerate(toolpath_object[stepfield]): if isinstance(step_entry, str): param = CommentedMap() # type: CommentedMap inputid = step_entry else: param = CommentedMap(step_entry.items()) inputid = step_entry["id"] shortinputid = shortname(inputid) found = False for tool_entry in self.embedded_tool.tool[toolfield]: frag = shortname(tool_entry["id"]) if frag == shortinputid: # if the case that the step has a default for a parameter, # we do not want the default of the tool to override it step_default = None if "default" in param and "default" in tool_entry: step_default = param["default"] param.update(tool_entry) param["_tool_entry"] = tool_entry if step_default is not None: param["default"] = step_default found = True bound.add(frag) break if not found: if stepfield == "in": param["type"] = "Any" param["used_by_step"] = used_by_step( self.tool, shortinputid) param["not_connected"] = True else: if isinstance(step_entry, Mapping): step_entry_name = step_entry["id"] else: step_entry_name = step_entry validation_errors.append( SourceLine(self.tool["out"], index, include_traceback=debug). makeError( "Workflow step output '%s' does not correspond to" % shortname(step_entry_name)) + "\n" + SourceLine( self.embedded_tool.tool, "outputs", include_traceback=debug, ).makeError(" tool output (expected '%s')" % ("', '".join([ shortname(tool_entry["id"]) for tool_entry in self.embedded_tool.tool["outputs"] ])))) param["id"] = inputid param.lc.line = toolpath_object[stepfield].lc.data[index][0] param.lc.col = toolpath_object[stepfield].lc.data[index][1] param.lc.filename = toolpath_object[stepfield].lc.filename toolpath_object[toolfield].append(param) missing_values = [] for _, tool_entry in enumerate(self.embedded_tool.tool["inputs"]): if shortname(tool_entry["id"]) not in bound: if "null" not in tool_entry[ "type"] and "default" not in tool_entry: missing_values.append(shortname(tool_entry["id"])) if missing_values: validation_errors.append( SourceLine(self.tool, "in", include_traceback=debug).makeError( "Step is missing required parameter%s '%s'" % ( "s" if len(missing_values) > 1 else "", "', '".join(missing_values), ))) if validation_errors: raise ValidationException("\n".join(validation_errors)) super().__init__(toolpath_object, loadingContext) if self.embedded_tool.tool["class"] == "Workflow": (feature, _) = self.get_requirement("SubworkflowFeatureRequirement") if not feature: raise WorkflowException( "Workflow contains embedded workflow but " "SubworkflowFeatureRequirement not in requirements") if "scatter" in self.tool: (feature, _) = self.get_requirement("ScatterFeatureRequirement") if not feature: raise WorkflowException( "Workflow contains scatter but ScatterFeatureRequirement " "not in requirements") inputparms = copy.deepcopy(self.tool["inputs"]) outputparms = copy.deepcopy(self.tool["outputs"]) scatter = aslist(self.tool["scatter"]) method = self.tool.get("scatterMethod") if method is None and len(scatter) != 1: raise ValidationException( "Must specify scatterMethod when scattering over multiple inputs" ) inp_map = {i["id"]: i for i in inputparms} for inp in scatter: if inp not in inp_map: SourceLine( self.tool, "scatter", ValidationException, debug).makeError( "Scatter parameter '%s' does not correspond to " "an input parameter of this step, expecting '%s'" % ( shortname(inp), "', '".join( shortname(k) for k in inp_map.keys()), )) inp_map[inp]["type"] = { "type": "array", "items": inp_map[inp]["type"] } if self.tool.get("scatterMethod") == "nested_crossproduct": nesting = len(scatter) else: nesting = 1 for _ in range(0, nesting): for oparam in outputparms: oparam["type"] = {"type": "array", "items": oparam["type"]} self.tool["inputs"] = inputparms self.tool["outputs"] = outputparms self.prov_obj = None # type: Optional[ProvenanceProfile] if loadingContext.research_obj is not None: self.prov_obj = parentworkflowProv if self.embedded_tool.tool["class"] == "Workflow": self.parent_wf = self.embedded_tool.parent_wf else: self.parent_wf = self.prov_obj
def v1_0to1_1(doc: Any, loader: Loader, baseuri: str) -> Tuple[Any, str]: # pylint: disable=unused-argument """Public updater for v1.0 to v1.1.""" doc = copy.deepcopy(doc) rewrite = { "http://commonwl.org/cwltool#WorkReuse": "WorkReuse", "http://arvados.org/cwl#ReuseRequirement": "WorkReuse", "http://commonwl.org/cwltool#TimeLimit": "ToolTimeLimit", "http://commonwl.org/cwltool#NetworkAccess": "NetworkAccess", "http://commonwl.org/cwltool#InplaceUpdateRequirement": "InplaceUpdateRequirement", "http://commonwl.org/cwltool#LoadListingRequirement": "LoadListingRequirement", } def rewrite_requirements( t: MutableMapping[str, Union[str, Dict[str, Any]]]) -> None: if "requirements" in t: for r in t["requirements"]: if isinstance(r, MutableMapping): if r["class"] in rewrite: r["class"] = rewrite[r["class"]] else: raise validate.ValidationException( "requirements entries must be dictionaries: {} {}.". format(type(r), r)) if "hints" in t: for r in t["hints"]: if isinstance(r, MutableMapping): if r["class"] in rewrite: r["class"] = rewrite[r["class"]] else: raise validate.ValidationException( "hints entries must be dictionaries: {} {}.".format( type(r), r)) if "steps" in t: for s in t["steps"]: if isinstance(s, MutableMapping): rewrite_requirements(s) else: raise validate.ValidationException( "steps entries must be dictionaries: {} {}.".format( type(s), s)) def update_secondaryFiles(t, top=False): # type: (Any, bool) -> Union[MutableSequence[MutableMapping[str, str]], MutableMapping[str, str]] if isinstance(t, CommentedSeq): new_seq = copy.deepcopy(t) for index, entry in enumerate(t): new_seq[index] = update_secondaryFiles(entry) return new_seq elif isinstance(t, MutableSequence): return CommentedSeq([update_secondaryFiles(p) for p in t]) elif isinstance(t, MutableMapping): return t elif top: return CommentedSeq([CommentedMap([("pattern", t)])]) else: return CommentedMap([("pattern", t)]) def fix_inputBinding(t): # type: (Dict[str, Any]) -> None for i in t["inputs"]: if "inputBinding" in i: ib = i["inputBinding"] for k in list(ib.keys()): if k != "loadContents": _logger.warning( SourceLine(ib, k).makeError( "Will ignore field '{}' which is not valid in {} " "inputBinding".format(k, t["class"]))) del ib[k] visit_class(doc, ("CommandLineTool", "Workflow"), rewrite_requirements) visit_class(doc, ("ExpressionTool", "Workflow"), fix_inputBinding) visit_field(doc, "secondaryFiles", partial(update_secondaryFiles, top=True)) upd = doc if isinstance(upd, MutableMapping) and "$graph" in upd: upd = upd["$graph"] for proc in aslist(upd): proc.setdefault("hints", CommentedSeq()) proc["hints"].insert( 0, CommentedMap([("class", "NetworkAccess"), ("networkAccess", True)])) proc["hints"].insert( 0, CommentedMap([("class", "LoadListingRequirement"), ("loadListing", "deep_listing")]), ) if "cwlVersion" in proc: del proc["cwlVersion"] return (doc, "v1.1")
def _init_job(self, joborder, **kwargs): # type: (Dict[Text, Text], **Any) -> Builder """ kwargs: eval_timeout: javascript evaluation timeout use_container: do/don't use Docker when DockerRequirement hint provided make_fs_access: make an FsAccess() object with given basedir basedir: basedir for FsAccess docker_outdir: output directory inside docker for this job docker_tmpdir: tmpdir inside docker for this job docker_stagedir: stagedir inside docker for this job outdir: outdir on host for this job tmpdir: tmpdir on host for this job stagedir: stagedir on host for this job select_resources: callback to select compute resources debug: enable debugging output js_console: enable javascript console output """ builder = Builder() builder.job = cast(Dict[Text, Union[Dict[Text, Any], List, Text]], copy.deepcopy(joborder)) # Validate job order try: fillInDefaults(self.tool[u"inputs"], builder.job) normalizeFilesDirs(builder.job) validate.validate_ex(self.names.get_name("input_record_schema", ""), builder.job, strict=False, logger=_logger_validation_warnings) except (validate.ValidationException, WorkflowException) as e: raise WorkflowException("Invalid job input record:\n" + Text(e)) builder.files = [] builder.bindings = CommentedSeq() builder.schemaDefs = self.schemaDefs builder.names = self.names builder.requirements = self.requirements builder.hints = self.hints builder.resources = {} builder.timeout = kwargs.get("eval_timeout") builder.debug = kwargs.get("debug") builder.js_console = kwargs.get("js_console") builder.mutation_manager = kwargs.get("mutation_manager") builder.make_fs_access = kwargs.get("make_fs_access") or StdFsAccess builder.fs_access = builder.make_fs_access(kwargs["basedir"]) builder.force_docker_pull = kwargs.get("force_docker_pull") loadListingReq, _ = self.get_requirement( "http://commonwl.org/cwltool#LoadListingRequirement") if loadListingReq: builder.loadListing = loadListingReq.get("loadListing") dockerReq, is_req = self.get_requirement("DockerRequirement") defaultDocker = None if dockerReq is None and "default_container" in kwargs: defaultDocker = kwargs["default_container"] if (dockerReq or defaultDocker) and kwargs.get("use_container"): if dockerReq: # Check if docker output directory is absolute if dockerReq.get("dockerOutputDirectory") and dockerReq.get( "dockerOutputDirectory").startswith('/'): builder.outdir = dockerReq.get("dockerOutputDirectory") else: builder.outdir = builder.fs_access.docker_compatible_realpath( dockerReq.get("dockerOutputDirectory") or kwargs.get("docker_outdir") or "/var/spool/cwl") elif defaultDocker: builder.outdir = builder.fs_access.docker_compatible_realpath( kwargs.get("docker_outdir") or "/var/spool/cwl") builder.tmpdir = builder.fs_access.docker_compatible_realpath( kwargs.get("docker_tmpdir") or "/tmp") builder.stagedir = builder.fs_access.docker_compatible_realpath( kwargs.get("docker_stagedir") or "/var/lib/cwl") else: builder.outdir = builder.fs_access.realpath( kwargs.get("outdir") or tempfile.mkdtemp()) builder.tmpdir = builder.fs_access.realpath( kwargs.get("tmpdir") or tempfile.mkdtemp()) builder.stagedir = builder.fs_access.realpath( kwargs.get("stagedir") or tempfile.mkdtemp()) if self.formatgraph: for i in self.tool["inputs"]: d = shortname(i["id"]) if d in builder.job and i.get("format"): checkFormat(builder.job[d], builder.do_eval(i["format"]), self.formatgraph) builder.bindings.extend( builder.bind_input(self.inputs_record_schema, builder.job)) if self.tool.get("baseCommand"): for n, b in enumerate(aslist(self.tool["baseCommand"])): builder.bindings.append({ "position": [-1000000, n], "datum": b }) if self.tool.get("arguments"): for i, a in enumerate(self.tool["arguments"]): lc = self.tool["arguments"].lc.data[i] fn = self.tool["arguments"].lc.filename builder.bindings.lc.add_kv_line_col(len(builder.bindings), lc) if isinstance(a, dict): a = copy.copy(a) if a.get("position"): a["position"] = [a["position"], i] else: a["position"] = [0, i] builder.bindings.append(a) elif ("$(" in a) or ("${" in a): cm = CommentedMap((("position", [0, i]), ("valueFrom", a))) cm.lc.add_kv_line_col("valueFrom", lc) cm.lc.filename = fn builder.bindings.append(cm) else: cm = CommentedMap((("position", [0, i]), ("datum", a))) cm.lc.add_kv_line_col("datum", lc) cm.lc.filename = fn builder.bindings.append(cm) # use python2 like sorting of heterogeneous lists # (containing str and int types), # TODO: unify for both runtime if six.PY3: key = cmp_to_key(cmp_like_py2) else: # PY2 key = lambda dict: dict["position"] builder.bindings.sort(key=key) builder.resources = self.evalResources(builder, kwargs) builder.job_script_provider = kwargs.get("job_script_provider", None) return builder
def test_str(): commented_map = CommentedMap() sec = Section("section1", commented_map) assert str(sec) == "section1"
def test_hash(): commented_map = CommentedMap() sec = Section("section1", commented_map) assert hash(sec) == hash("section1-[]") sec.add_item("item") assert hash(sec) == hash("section1-['item']")
def decode_json(cls, registry: Registry, data: str, args): raw_data = json.loads(data) result = CommentedMap() result['title'] = raw_data['title'] result['resource'] = 'quiz' result['url'] = raw_data['html_url'] result['published'] = raw_data['published'] result['settings'] = CommentedMap() result['settings']['quiz_type'] = raw_data['quiz_type'] if raw_data.get('points_possible') is not None: result['settings']['points_possible'] = raw_data['points_possible'] result['settings']['allowed_attempts'] = raw_data['allowed_attempts'] result['settings']['scoring_policy'] = raw_data['scoring_policy'] result['settings']['timing'] = CommentedMap() result['settings']['timing']['due_at'] = to_friendly_date( raw_data['due_at']) result['settings']['timing']['unlock_at'] = to_friendly_date( raw_data['unlock_at']) result['settings']['timing']['lock_at'] = to_friendly_date( raw_data['lock_at']) result['settings']['secrecy'] = CommentedMap() result['settings']['secrecy']['shuffle_answers'] = raw_data[ 'shuffle_answers'] result['settings']['secrecy']['time_limit'] = raw_data['time_limit'] result['settings']['secrecy']['one_question_at_a_time'] = raw_data[ 'one_question_at_a_time'] result['settings']['secrecy']['cant_go_back'] = raw_data[ 'cant_go_back'] result['settings']['secrecy']['show_correct_answers'] = raw_data[ 'show_correct_answers'] result['settings']['secrecy'][ 'show_correct_answers_last_attempt'] = raw_data[ 'show_correct_answers_last_attempt'] result['settings']['secrecy']['show_correct_answers_at'] = raw_data[ 'show_correct_answers_at'] result['settings']['secrecy']['hide_correct_answers_at'] = raw_data[ 'hide_correct_answers_at'] result['settings']['secrecy']['hide_results'] = raw_data[ 'hide_results'] result['settings']['secrecy']['one_time_results'] = raw_data[ 'one_time_results'] if raw_data['access_code']: result['settings']['secrecy']['access_code'] = raw_data[ 'access_code'] if raw_data['ip_filter']: result['settings']['secrecy']['ip_filter'] = raw_data['ip_filter'] # Handle questions and groups result['questions'] = [] available_groups = raw_data['groups'] used_groups = {} extra_files = [] for question in raw_data['questions']: quiz_question, destination_path, full_body = QuizQuestion.decode_question( registry, question, raw_data, args) if destination_path is not None: extra_files.append((destination_path, full_body)) quiz_group_id = question.get('quiz_group_id') if quiz_group_id is not None: quiz_group_id = str( quiz_group_id) # acbart: JSON only allows string keys if quiz_group_id not in used_groups: used_groups[quiz_group_id] = QuizGroup.decode_group( available_groups[quiz_group_id]) result['questions'].append(used_groups[quiz_group_id]) used_groups[quiz_group_id]['questions'].append(quiz_question) else: result['questions'].append(quiz_question) return h2m(raw_data['description'], result), extra_files
def resolve_all( self, document, # type: Union[CommentedMap, CommentedSeq] base_url, # type: Text file_base=None, # type: Text checklinks=True # type: bool ): # type: (...) -> Tuple[Union[CommentedMap, CommentedSeq, Text, None], Dict[Text, Any]] loader = self metadata = CommentedMap() # type: CommentedMap if file_base is None: file_base = base_url if isinstance(document, CommentedMap): # Handle $import and $include if (u'$import' in document or u'$include' in document): return self.resolve_ref(document, base_url=file_base, checklinks=checklinks) elif u'$mixin' in document: return self.resolve_ref(document, base_url=base_url, checklinks=checklinks) elif isinstance(document, CommentedSeq): pass elif isinstance(document, (list, dict)): raise Exception( "Expected CommentedMap or CommentedSeq, got %s: `%s`" % (type(document), document)) else: return (document, metadata) newctx = None # type: Optional[Loader] if isinstance(document, CommentedMap): # Handle $base, $profile, $namespaces, $schemas and $graph if u"$base" in document: base_url = document[u"$base"] if u"$profile" in document: if newctx is None: newctx = SubLoader(self) prof = self.fetch(document[u"$profile"]) newctx.add_namespaces(document.get(u"$namespaces", {})) newctx.add_schemas(document.get(u"$schemas", []), document[u"$profile"]) if u"$namespaces" in document: if newctx is None: newctx = SubLoader(self) newctx.add_namespaces(document[u"$namespaces"]) if u"$schemas" in document: if newctx is None: newctx = SubLoader(self) newctx.add_schemas(document[u"$schemas"], file_base) if newctx is not None: loader = newctx if u"$graph" in document: metadata = _copy_dict_without_key(document, u"$graph") document = document[u"$graph"] resolved_metadata = loader.resolve_all(metadata, base_url, file_base=file_base, checklinks=False)[0] if isinstance(resolved_metadata, dict): metadata = resolved_metadata else: raise validate.ValidationException( "Validation error, metadata must be dict: %s" % (resolved_metadata)) if isinstance(document, CommentedMap): self._normalize_fields(document, loader) self._resolve_idmap(document, loader) self._resolve_type_dsl(document, loader) base_url = self._resolve_identifier(document, loader, base_url) self._resolve_identity(document, loader, base_url) self._resolve_uris(document, loader, base_url) try: for key, val in document.items(): document[key], _ = loader.resolve_all(val, base_url, file_base=file_base, checklinks=False) except validate.ValidationException as v: _logger.warn("loader is %s", id(loader), exc_info=True) raise validate.ValidationException( "(%s) (%s) Validation error in field %s:\n%s" % (id(loader), file_base, key, validate.indent(six.text_type(v)))) elif isinstance(document, CommentedSeq): i = 0 try: while i < len(document): val = document[i] if isinstance(val, CommentedMap) and (u"$import" in val or u"$mixin" in val): l, _ = loader.resolve_ref(val, base_url=file_base, checklinks=False) if isinstance(l, CommentedSeq): lc = document.lc.data[i] del document[i] llen = len(l) for j in range(len(document) + llen, i + llen, -1): document.lc.data[j - 1] = document.lc.data[j - llen] for item in l: document.insert(i, item) document.lc.data[i] = lc i += 1 else: document[i] = l i += 1 else: document[i], _ = loader.resolve_all( val, base_url, file_base=file_base, checklinks=False) i += 1 except validate.ValidationException as v: _logger.warn("failed", exc_info=True) raise validate.ValidationException( "(%s) (%s) Validation error in position %i:\n%s" % (id(loader), file_base, i, validate.indent( six.text_type(v)))) for identifer in loader.identity_links: if identifer in metadata: if isinstance(metadata[identifer], (str, six.text_type)): metadata[identifer] = loader.expand_url( metadata[identifer], base_url, scoped_id=True) loader.idx[metadata[identifer]] = document if checklinks: all_doc_ids = {} # type: Dict[Text, Text] self.validate_links(document, u"", all_doc_ids) return document, metadata
def resolve_all( self, document: ResolveType, base_url: str, file_base: Optional[str] = None, checklinks: bool = True, strict_foreign_properties: bool = False, ) -> ResolvedRefType: loader = self metadata = CommentedMap() if file_base is None: file_base = base_url if isinstance(document, CommentedMap): # Handle $import and $include if "$import" in document or "$include" in document: return self.resolve_ref( document, base_url=file_base, checklinks=checklinks, strict_foreign_properties=strict_foreign_properties, ) elif "$mixin" in document: return self.resolve_ref( document, base_url=base_url, checklinks=checklinks, strict_foreign_properties=strict_foreign_properties, ) elif isinstance(document, CommentedSeq): pass elif isinstance(document, (list, dict)): raise ValidationException( "Expected CommentedMap or CommentedSeq, got {}: `{}`".format( type(document), document ) ) else: return (document, metadata) newctx = None # type: Optional["Loader"] if isinstance(document, CommentedMap): # Handle $base, $profile, $namespaces, $schemas and $graph if "$base" in document: base_url = document["$base"] if "$profile" in document: if newctx is None: newctx = SubLoader(self) newctx.add_namespaces(document.get("$namespaces", CommentedMap())) newctx.add_schemas(document.get("$schemas", []), document["$profile"]) if "$namespaces" in document: if newctx is None: newctx = SubLoader(self) newctx.add_namespaces(document["$namespaces"]) if "$schemas" in document: if newctx is None: newctx = SubLoader(self) newctx.add_schemas(document["$schemas"], file_base) if newctx is not None: loader = newctx for identifer in loader.identity_links: if identifer in document: if isinstance(document[identifer], str): document[identifer] = loader.expand_url( document[identifer], base_url, scoped_id=True ) loader.idx[document[identifer]] = document metadata = document if "$graph" in document: document = document["$graph"] if isinstance(document, CommentedMap): self._normalize_fields(document, loader) self._resolve_idmap(document, loader) self._resolve_dsl(document, loader) base_url = self._resolve_identifier(document, loader, base_url) self._resolve_identity(document, loader, base_url) self._resolve_uris(document, loader, base_url) try: for key, val in document.items(): subscope = "" # type: str if key in loader.subscopes: subscope = "/" + loader.subscopes[key] document[key], _ = loader.resolve_all( val, base_url + subscope, file_base=file_base, checklinks=False ) except ValidationException as v: _logger.warning("loader is %s", id(loader), exc_info=True) raise ValidationException( "({}) ({}) Validation error in field {}:".format( id(loader), file_base, key ), None, [v], ) from v elif isinstance(document, CommentedSeq): i = 0 try: while i < len(document): val = document[i] if isinstance(val, CommentedMap) and ( "$import" in val or "$mixin" in val ): l, import_metadata = loader.resolve_ref( val, base_url=file_base, checklinks=False ) metadata.setdefault("$import_metadata", {}) for identifier in loader.identifiers: if identifier in import_metadata: metadata["$import_metadata"][ import_metadata[identifier] ] = import_metadata if isinstance(l, CommentedSeq): lc = document.lc.data[i] del document[i] llen = len(l) for j in range(len(document) + llen, i + llen, -1): document.lc.data[j - 1] = document.lc.data[j - llen] for item in l: document.insert(i, item) document.lc.data[i] = lc i += 1 else: document[i] = l i += 1 else: document[i], _ = loader.resolve_all( val, base_url, file_base=file_base, checklinks=False ) i += 1 except ValidationException as v: _logger.warning("failed", exc_info=True) raise ValidationException( "({}) ({}) Validation error in position {}:".format( id(loader), file_base, i ), None, [v], ) from v if checklinks: all_doc_ids = {} # type: Dict[str, str] loader.validate_links( document, "", all_doc_ids, strict_foreign_properties=strict_foreign_properties, ) return document, metadata
def to_disk(self, force=False): result = CommentedMap() result['name'] = self.name result['pick'] = self.pick_count result['points'] = self.question_points return result
def test_issue_242(self): from ruamel.yaml.comments import CommentedMap d0 = CommentedMap([('a', 'b')]) assert d0['a'] == 'b'
def copy(src): cls = getattr(src, "mapCtor", src.__class__) if six.PY2 and cls is CommentedMap: return CommentedMap(src.items()) return cls(src)
def to_yaml(self, data): self._should_be_mapping(data) # TODO : Maximum minimum keys return CommentedMap([(self._key_validator.to_yaml(key), self._value_validator.to_yaml(value)) for key, value in data.items()])
def __init__(self, toolpath_object, pos, **kwargs): # type: (Dict[Text, Any], int, **Any) -> None if "id" in toolpath_object: self.id = toolpath_object["id"] else: self.id = "#step" + Text(pos) kwargs["requirements"] = kwargs.get( "requirements", []) + toolpath_object.get("requirements", []) kwargs["hints"] = kwargs.get("hints", []) + toolpath_object.get( "hints", []) try: if isinstance(toolpath_object["run"], dict): self.embedded_tool = kwargs.get("makeTool")( toolpath_object["run"], **kwargs) else: self.embedded_tool = load_tool( toolpath_object["run"], kwargs.get("makeTool"), kwargs, enable_dev=kwargs.get("enable_dev"), strict=kwargs.get("strict"), fetcher_constructor=kwargs.get("fetcher_constructor")) except validate.ValidationException as v: raise WorkflowException( u"Tool definition %s failed validation:\n%s" % (toolpath_object["run"], validate.indent(str(v)))) validation_errors = [] self.tool = toolpath_object = copy.deepcopy(toolpath_object) bound = set() for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")): toolpath_object[toolfield] = [] for n, step_entry in enumerate(toolpath_object[stepfield]): if isinstance(step_entry, six.string_types): param = CommentedMap() # type: CommentedMap inputid = step_entry else: param = CommentedMap(six.iteritems(step_entry)) inputid = step_entry["id"] shortinputid = shortname(inputid) found = False for tool_entry in self.embedded_tool.tool[toolfield]: frag = shortname(tool_entry["id"]) if frag == shortinputid: param.update(tool_entry) found = True bound.add(frag) break if not found: if stepfield == "in": param["type"] = "Any" else: validation_errors.append( SourceLine(self.tool["out"], n).makeError( "Workflow step output '%s' does not correspond to" % shortname(step_entry)) + "\n" + SourceLine(self.embedded_tool.tool, "outputs"). makeError(" tool output (expected '%s')" % ("', '".join([ shortname(tool_entry["id"]) for tool_entry in self.embedded_tool.tool[toolfield] ])))) param["id"] = inputid param.lc.line = toolpath_object[stepfield].lc.data[n][0] param.lc.col = toolpath_object[stepfield].lc.data[n][1] param.lc.filename = toolpath_object[stepfield].lc.filename toolpath_object[toolfield].append(param) missing = [] for i, tool_entry in enumerate(self.embedded_tool.tool["inputs"]): if shortname(tool_entry["id"]) not in bound: if "null" not in tool_entry[ "type"] and "default" not in tool_entry: missing.append(shortname(tool_entry["id"])) if missing: validation_errors.append( SourceLine(self.tool, "in").makeError( "Step is missing required parameter%s '%s'" % ("s" if len(missing) > 1 else "", "', '".join(missing)))) if validation_errors: raise validate.ValidationException("\n".join(validation_errors)) super(WorkflowStep, self).__init__(toolpath_object, **kwargs) if self.embedded_tool.tool["class"] == "Workflow": (feature, _) = self.get_requirement("SubworkflowFeatureRequirement") if not feature: raise WorkflowException( "Workflow contains embedded workflow but SubworkflowFeatureRequirement not in requirements" ) if "scatter" in self.tool: (feature, _) = self.get_requirement("ScatterFeatureRequirement") if not feature: raise WorkflowException( "Workflow contains scatter but ScatterFeatureRequirement not in requirements" ) inputparms = copy.deepcopy(self.tool["inputs"]) outputparms = copy.deepcopy(self.tool["outputs"]) scatter = aslist(self.tool["scatter"]) method = self.tool.get("scatterMethod") if method is None and len(scatter) != 1: raise validate.ValidationException( "Must specify scatterMethod when scattering over multiple inputs" ) inp_map = {i["id"]: i for i in inputparms} for s in scatter: if s not in inp_map: raise validate.ValidationException( SourceLine(self.tool, "scatter").makeError( u"Scatter parameter '%s' does not correspond to an input parameter of this " u"step, expecting '%s'" % (shortname(s), "', '".join( shortname(k) for k in inp_map.keys())))) inp_map[s]["type"] = { "type": "array", "items": inp_map[s]["type"] } if self.tool.get("scatterMethod") == "nested_crossproduct": nesting = len(scatter) else: nesting = 1 for r in range(0, nesting): for op in outputparms: op["type"] = {"type": "array", "items": op["type"]} self.tool["inputs"] = inputparms self.tool["outputs"] = outputparms
def test_CommentedMap(self): cm = CommentedMap() # check bug in ruamel.yaml is fixed: raises TypeError: source has undefined order self.assertEqual(cm, cm.copy())
def __call__(self, loader: 'Loader', node: yaml.Node) -> Generator[Any, None, None]: """Construct an object from a yaml node. This constructs an object of the user-defined type that this \ is the constructor for. It is registered with the yaml library, \ and called by it. Recursion is handled by calling the yaml \ library, so we only need to construct an object using the keys \ and values of the given MappingNode, and those values have been \ converted recursively for us. Since Python does not do type checks, we do a type check \ manually, to ensure that the class's constructor gets the types \ it expects. This avoids confusing errors, but moreover is a \ security features that ensures that regardless of the content \ of the YAML file, we produce the objects that the programmer \ defined and expects. Note that this yields rather than returns, in a somewhat odd \ way. That's directly from the PyYAML/ruamel.yaml documentation. Args: loader: The yatiml.loader that is creating this object. node: The node to construct from. Yields: The incomplete constructed object. """ logger.debug('Constructing an object of type {}'.format( self.class_.__name__)) if not isinstance(node, yaml.MappingNode): raise RecognitionError( ('{}{}Expected a MappingNode. There' ' is probably something wrong with your yatiml_savorize()' ' function.').format(node.start_mark, os.linesep)) # figure out which keys are extra and strip them of tags # to prevent constructing objects we haven't type checked argspec = inspect.getfullargspec(self.class_.__init__) self.__strip_extra_attributes(node, argspec.args) # create object and let yaml lib construct subobjects new_obj = self.class_.__new__(self.class_) # type: ignore yield new_obj mapping = CommentedMap() loader.construct_mapping(node, mapping, deep=True) # Convert ruamel.yaml's round-trip types to list and OrderedDict, # recursively for each attribute value in our mapping. Note that # mapping itself is still a CommentedMap. for key, value in mapping.copy().items(): if (isinstance(value, CommentedMap) or isinstance(value, CommentedSeq)): mapping[key] = self.__to_plain_containers(value) # do type check self.__check_no_missing_attributes(node, mapping) self.__type_check_attributes(node, mapping, argspec) # construct object, this should work now try: logger.debug('Calling __init__') if 'yatiml_extra' in argspec.args: attrs = self.__split_off_extra_attributes( mapping, argspec.args) new_obj.__init__(**attrs) else: new_obj.__init__(**mapping) except TypeError: # pragma: no cover raise RecognitionError( ('{}{}Could not construct object of class {}' ' from {}. This is a bug in YAtiML, please report.'.format( node.start_mark, os.linesep, self.class_.__name__, node))) logger.debug('Done constructing {}'.format(self.class_.__name__))
def test_CommentedMapEquality(self): cm = CommentedMap((("b", 2),)) cm.insert(1, "a", 1, comment="a comment") self.assertEqual(cm, {"a": 1, "b": 2})
def v1_0to1_1(doc, loader, baseuri): # pylint: disable=unused-argument # type: (Any, Loader, Text) -> Tuple[Any, Text] """Public updater for v1.0 to v1.1.""" doc = copy.deepcopy(doc) rewrite = { "http://commonwl.org/cwltool#WorkReuse": "WorkReuse", "http://arvados.org/cwl#ReuseRequirement": "WorkReuse", "http://commonwl.org/cwltool#TimeLimit": "ToolTimeLimit", "http://commonwl.org/cwltool#NetworkAccess": "NetworkAccess", "http://commonwl.org/cwltool#InplaceUpdateRequirement": "InplaceUpdateRequirement", "http://commonwl.org/cwltool#LoadListingRequirement": "LoadListingRequirement" } def rewrite_requirements(t): if "requirements" in t: for r in t["requirements"]: if r["class"] in rewrite: r["class"] = rewrite[r["class"]] if "hints" in t: for r in t["hints"]: if r["class"] in rewrite: r["class"] = rewrite[r["class"]] if "steps" in t: for s in t["steps"]: rewrite_requirements(s) def update_secondaryFiles(t): if isinstance(t, CommentedSeq): new_seq = copy.deepcopy(t) for index, entry in enumerate(t): new_seq[index] = update_secondaryFiles(entry) return new_seq elif isinstance(t, MutableSequence): return CommentedSeq([update_secondaryFiles(p) for p in t]) elif isinstance(t, MutableMapping): return t else: return CommentedMap([("pattern", t)]) def fix_inputBinding(t): for i in t["inputs"]: if "inputBinding" in i: ib = i["inputBinding"] for k in list(ib.keys()): if k != "loadContents": _logger.warning( SourceLine(ib, k).makeError( "Will ignore field '%s' which is not valid in %s inputBinding" % (k, t["class"]))) del ib[k] visit_class(doc, ("CommandLineTool", "Workflow"), rewrite_requirements) visit_class(doc, ("ExpressionTool", "Workflow"), fix_inputBinding) visit_field(doc, "secondaryFiles", update_secondaryFiles) upd = doc if isinstance(upd, MutableMapping) and "$graph" in upd: upd = upd["$graph"] for proc in aslist(upd): proc.setdefault("hints", CommentedSeq()) proc["hints"].insert( 0, CommentedMap([("class", "NetworkAccess"), ("networkAccess", True)])) proc["hints"].insert( 0, CommentedMap([("class", "LoadListingRequirement"), ("loadListing", "deep_listing")])) if "cwlVersion" in proc: del proc["cwlVersion"] return (doc, "v1.1")
def do_update(self, helper: ConfigUpdateHelper) -> None: super().do_update(helper) copy, copy_dict, base = helper copy("homeserver.asmux") if "appservice.protocol" in self and "appservice.address" not in self: protocol, hostname, port = (self["appservice.protocol"], self["appservice.hostname"], self["appservice.port"]) base["appservice.address"] = f"{protocol}://{hostname}:{port}" if "appservice.debug" in self and "logging" not in self: level = "DEBUG" if self["appservice.debug"] else "INFO" base["logging.root.level"] = level base["logging.loggers.mau.level"] = level base["logging.loggers.telethon.level"] = level copy("appservice.public.enabled") copy("appservice.public.prefix") copy("appservice.public.external") copy("appservice.provisioning.enabled") copy("appservice.provisioning.prefix") copy("appservice.provisioning.shared_secret") if base["appservice.provisioning.shared_secret"] == "generate": base["appservice.provisioning.shared_secret"] = self._new_token() copy("appservice.community_id") copy("metrics.enabled") copy("metrics.listen_port") copy("manhole.enabled") copy("manhole.path") copy("manhole.whitelist") copy("bridge.username_template") copy("bridge.alias_template") copy("bridge.displayname_template") copy("bridge.displayname_preference") copy("bridge.displayname_max_length") copy("bridge.allow_avatar_remove") copy("bridge.max_initial_member_sync") copy("bridge.sync_channel_members") copy("bridge.skip_deleted_members") copy("bridge.startup_sync") if "bridge.sync_dialog_limit" in self: base["bridge.sync_create_limit"] = self["bridge.sync_dialog_limit"] base["bridge.sync_update_limit"] = self["bridge.sync_dialog_limit"] else: copy("bridge.sync_update_limit") copy("bridge.sync_create_limit") copy("bridge.sync_direct_chats") copy("bridge.max_telegram_delete") copy("bridge.sync_matrix_state") copy("bridge.allow_matrix_login") copy("bridge.plaintext_highlights") copy("bridge.public_portals") copy("bridge.sync_with_custom_puppets") copy("bridge.sync_direct_chat_list") copy("bridge.double_puppet_server_map") copy("bridge.double_puppet_allow_discovery") if "bridge.login_shared_secret" in self: base["bridge.login_shared_secret_map"] = { base["homeserver.domain"]: self["bridge.login_shared_secret"] } else: copy("bridge.login_shared_secret_map") copy("bridge.telegram_link_preview") copy("bridge.invite_link_resolve") copy("bridge.inline_images") copy("bridge.image_as_file_size") copy("bridge.max_document_size") copy("bridge.parallel_file_transfer") copy("bridge.federate_rooms") copy("bridge.animated_sticker.target") copy("bridge.animated_sticker.args") copy("bridge.encryption.allow") copy("bridge.encryption.default") copy("bridge.encryption.database") copy("bridge.encryption.key_sharing.allow") copy("bridge.encryption.key_sharing.require_cross_signing") copy("bridge.encryption.key_sharing.require_verification") copy("bridge.private_chat_portal_meta") copy("bridge.delivery_receipts") copy("bridge.delivery_error_reports") copy("bridge.resend_bridge_info") copy("bridge.mute_bridging") copy("bridge.pinned_tag") copy("bridge.archive_tag") copy("bridge.backfill.invite_own_puppet") copy("bridge.backfill.takeout_limit") copy("bridge.backfill.initial_limit") copy("bridge.backfill.missed_limit") copy("bridge.backfill.disable_notifications") copy("bridge.backfill.normal_groups") copy("bridge.initial_power_level_overrides.group") copy("bridge.initial_power_level_overrides.user") copy("bridge.bot_messages_as_notices") if isinstance(self["bridge.bridge_notices"], bool): base["bridge.bridge_notices"] = { "default": self["bridge.bridge_notices"], "exceptions": ["@importantbot:example.com"], } else: copy("bridge.bridge_notices") copy("bridge.deduplication.pre_db_check") copy("bridge.deduplication.cache_queue_length") if "bridge.message_formats.m_text" in self: del self["bridge.message_formats"] copy_dict("bridge.message_formats", override_existing_map=False) copy("bridge.emote_format") copy("bridge.state_event_formats.join") copy("bridge.state_event_formats.leave") copy("bridge.state_event_formats.name_change") copy("bridge.filter.mode") copy("bridge.filter.list") copy("bridge.command_prefix") migrate_permissions = ("bridge.permissions" not in self or "bridge.whitelist" in self or "bridge.admins" in self) if migrate_permissions: permissions = self["bridge.permissions"] or CommentedMap() for entry in self["bridge.whitelist"] or []: permissions[entry] = "full" for entry in self["bridge.admins"] or []: permissions[entry] = "admin" base["bridge.permissions"] = permissions else: copy_dict("bridge.permissions") if "bridge.relaybot" not in self: copy("bridge.authless_relaybot_portals", "bridge.relaybot.authless_portals") else: copy("bridge.relaybot.private_chat.invite") copy("bridge.relaybot.private_chat.state_changes") copy("bridge.relaybot.private_chat.message") copy("bridge.relaybot.group_chat_invite") copy("bridge.relaybot.ignore_unbridged_group_chat") copy("bridge.relaybot.authless_portals") copy("bridge.relaybot.whitelist_group_admins") copy("bridge.relaybot.whitelist") copy("bridge.relaybot.ignore_own_incoming_events") copy("telegram.api_id") copy("telegram.api_hash") copy("telegram.bot_token") copy("telegram.connection.timeout") copy("telegram.connection.retries") copy("telegram.connection.retry_delay") copy("telegram.connection.flood_sleep_threshold") copy("telegram.connection.request_retries") copy("telegram.device_info.device_model") copy("telegram.device_info.system_version") copy("telegram.device_info.app_version") copy("telegram.device_info.lang_code") copy("telegram.device_info.system_lang_code") copy("telegram.server.enabled") copy("telegram.server.dc") copy("telegram.server.ip") copy("telegram.server.port") copy("telegram.proxy.type") copy("telegram.proxy.address") copy("telegram.proxy.port") copy("telegram.proxy.rdns") copy("telegram.proxy.username") copy("telegram.proxy.password")
def __get_attributes(self, model_class, doc_key=None): """ Recursively inspect the attributes of a given class :param model_class: model class :param doc_key: pointer to the current position in doc_string dict :return: None """ model_name = self.helper.get_base_model_name_snake( model_class.__name__) for raw_attribute in dir(model_class): attribute = PYTHON_KEYWORD_MAPPING.get(raw_attribute, raw_attribute) if isinstance(getattr(model_class, raw_attribute), property): kind = model_class.swagger_types[raw_attribute] docs = inspect.getdoc(getattr(model_class, raw_attribute)) string_list = self.__doc_clean_up(docs.split('\n')) if kind in ('str', 'int', 'bool', 'object', 'float'): doc_key[attribute] = CommentedMap() doc_key[attribute]['description'] = string_list doc_key[attribute]['type'] = kind elif attribute.endswith('params'): # Parameters are associate with a 'type' property. If the object has a 'type', then # it will also contain one or more 'param' objects, where each describes its # associated type. Rather than list every property of each param object, the # following attempts to summarize. snake_name = string_utils.snake_case_to_camel( attribute.replace('_params', '')) cap_snake_name = snake_name[:1].capitalize( ) + snake_name[1:] model_name_text = ' '.join( model_name.split('_')).capitalize() doc_key[attribute] = CommentedMap() doc_key[attribute]['description'] = ( model_name_text + ' parameters when I(type) is {}.'.format( cap_snake_name)) doc_key[attribute]['type'] = 'complex' doc_key[attribute][ 'returned'] = 'when I(type) is {}'.format( cap_snake_name) elif kind.startswith('list['): class_name = kind.replace('list[', '').replace(']', '') doc_key[attribute] = CommentedMap() doc_key[attribute]['description'] = string_list doc_key[attribute]['type'] = 'list' sub_model_class = None try: sub_model_class = self.get_model_class(class_name) except (AttributeError, KubernetesException): pass if sub_model_class: doc_key[attribute]['contains'] = CommentedMap() self.__get_attributes( sub_model_class, doc_key=doc_key[attribute]['contains']) else: doc_key[attribute]['contains'] = class_name elif kind.startswith('dict('): class_name = kind.replace('dict(', '').replace(')', '') doc_key[attribute] = CommentedMap() doc_key[attribute]['description'] = string_list doc_key[attribute]['type'] = 'complex' sub_model_class = None try: sub_model_class = self.get_model_class(class_name) except (AttributeError, KubernetesException): pass if sub_model_class: doc_key[attribute]['contains'] = CommentedMap() self.__get_attributes( sub_model_class, doc_key=doc_key[attribute]['contains']) else: doc_key[attribute]['contains'] = class_name elif kind == 'datetime': doc_key[attribute] = CommentedMap() doc_key[attribute]['description'] = string_list doc_key[attribute]['type'] = 'complex' doc_key[attribute]['contains'] = CommentedMap() elif kind == 'object': doc_key[attribute] = CommentedMap() doc_key[attribute]['description'] = string_list doc_key[attribute]['type'] = 'complex' doc_key[attribute]['contains'] = CommentedMap() else: doc_key[attribute] = CommentedMap() doc_key[attribute]['description'] = string_list doc_key[attribute]['type'] = 'complex'
def build_dockerswarm(all_layers: List['YamlComposer.Layer'], docker_img: str = 'gnes/gnes:alpine-latest', volumes: Dict = None, networks: Dict = None) -> str: with resource_stream( 'gnes', '/'.join( ('resources', 'compose', 'gnes-swarm.yml'))) as r: swarm_lines = _yaml.load(r) config_dict = {} for l_idx, layer in enumerate(all_layers): for c_idx, c in enumerate(layer.components): c_name = '%s%d%d' % (c['name'], l_idx, c_idx) args = [ '--%s %s' % (a, str(v) if ' ' not in str(v) else ('"%s"' % str(v))) for a, v in c.items() if a in YamlComposer.comp2args[c['name']] and a != 'yaml_path' and v ] if 'yaml_path' in c and c['yaml_path'] is not None: if c['yaml_path'].endswith( '.yml') or c['yaml_path'].endswith('.yaml'): args.append('--yaml_path /%s_yaml' % c_name) config_dict['%s_yaml' % c_name] = { 'file': c['yaml_path'] } else: args.append('--yaml_path %s' % c['yaml_path']) if l_idx + 1 < len(all_layers): next_layer = all_layers[l_idx + 1] _l_idx = l_idx + 1 else: next_layer = all_layers[0] _l_idx = 0 host_out_name = '' for _c_idx, _c in enumerate(next_layer.components): if _c['port_in'] == c['port_out']: host_out_name = '%s%d%d' % (_c['name'], _l_idx, _c_idx) break if l_idx - 1 >= 0: last_layer = all_layers[l_idx - 1] _l_idx = l_idx - 1 else: last_layer = all_layers[-1] _l_idx = len(all_layers) - 1 host_in_name = '' for _c_idx, _c in enumerate(last_layer.components): if _c['port_out'] == c['port_in']: host_in_name = '%s%d%d' % (_c['name'], _l_idx, _c_idx) break if 'BIND' not in c['socket_out']: args.append('--host_out %s' % host_out_name) if 'BIND' not in c['socket_in']: args.append('--host_in %s' % host_in_name) cmd = '%s %s' % (YamlComposer.comp2file[c['name']], ' '.join(args)) swarm_lines['services'][c_name] = CommentedMap({ 'image': docker_img, 'command': cmd, }) rep_c = YamlComposer.Layer.get_value(c, 'replicas') if rep_c > 1: swarm_lines['services'][c_name]['deploy'] = CommentedMap({ 'replicas': YamlComposer.Layer.get_value(c, 'replicas'), 'restart_policy': { 'condition': 'on-failure', 'max_attempts': 3, } }) if 'yaml_path' in c and c['yaml_path'] is not None \ and (c['yaml_path'].endswith('.yml') or c['yaml_path'].endswith('.yaml')): swarm_lines['services'][c_name]['configs'] = [ '%s_yaml' % c_name ] if c['name'] == 'Frontend': swarm_lines['services'][c_name]['ports'] = [ '%d:%d' % (c['grpc_port'], c['grpc_port']) ] if volumes: swarm_lines['volumes'] = volumes if networks: swarm_lines['networks'] = volumes swarm_lines['configs'] = config_dict stream = StringIO() _yaml.dump(swarm_lines, stream) return stream.getvalue().strip()
def fill_config(data_obj): """ Make sample config """ data_obj.insert(len(data_obj), "qualys_api_server", "https://qualysapi.qualys.eu", comment="Qualys API server URL") data_obj.insert(len(data_obj), "qualys_login", "some-user", comment="Qualys user login") data_obj.insert(len(data_obj), "qualys_password", "S0m3P@ssw0rd", comment="Qualys user password") data_obj.insert(len(data_obj), "qualys_option_profile_id", 12345, comment="Qualys option profile ID") data_obj.insert(len(data_obj), "qualys_report_template_id", 12345, comment="Qualys report template ID") data_obj.insert(len(data_obj), "qualys_scanner_type", "EXTERNAL", comment="Qualys scanner type: EXTERNAL or INTERNAL") data_obj.insert( len(data_obj), "qualys_scanner_pool", CommentedSeq(), comment= "(INTERNAL only) Qualys scanner pool: list of scanner appliances to choose from" ) pool_obj = data_obj["qualys_scanner_pool"] pool_obj.append("MY_SCANNER_Name1") pool_obj.append("MY_SCANNER_Name2") pool_obj.append("MY_OTHERSCANNER_Name") data_obj.insert(len(data_obj), "random_name", False, comment="Use random project name") data_obj.insert(len(data_obj), "target", "http://*****:*****@ssw0rd", comment="(optional) User password for authenticated scan") data_obj.insert( len(data_obj), "auth_script", CommentedSeq(), comment="(optional) Selenium-like script for authenticated scan") script_obj = data_obj["auth_script"] for command in [{ "command": "open", "target": "%Target%/login", "value": "" }, { "command": "waitForElementPresent", "target": "id=login_login", "value": "" }, { "command": "waitForElementPresent", "target": "id=login_password", "value": "" }, { "command": "waitForElementPresent", "target": "id=login_0", "value": "" }, { "command": "type", "target": "id=login_login", "value": "%Username%" }, { "command": "type", "target": "id=login_password", "value": "%Password%" }, { "command": "clickAndWait", "target": "id=login_0", "value": "" }]: command_obj = CommentedMap() command_obj.fa.set_flow_style() for key in ["command", "target", "value"]: command_obj.insert(len(command_obj), key, command[key]) script_obj.append(command_obj) data_obj.insert( len(data_obj), "logged_in_indicator", "Logout", comment= "(optional) Response regex that is always present for authenticated user" ) data_obj.insert( len(data_obj), "sleep_interval", 10, comment="(optional) Seconds to sleep after creating new resource") data_obj.insert( len(data_obj), "status_check_interval", 60, comment= "(optional) Seconds to wait between scan/report status checks") data_obj.insert(len(data_obj), "retries", 10, comment="(optional) API request retry count") data_obj.insert(len(data_obj), "retry_delay", 30, comment="(optional) API request retry delay") data_obj.insert(len(data_obj), "timeout", 120, comment="(optional) API request timeout") data_obj.insert( len(data_obj), "save_intermediates_to", "/data/intermediates/dast", comment= "(optional) Save scan intermediates (raw results, logs, ...)")
def __init__( self, config=None, path=None, validate=True, schema=None, loadHook=None, vault=None, ): try: self._yaml = None self.vault = vault self.path = None self.schema = schema self.lastModified = None if path: self.path = os.path.abspath(path) if os.path.isfile(self.path): statinfo = os.stat(self.path) self.lastModified = statinfo.st_mtime with open(self.path, "r") as f: config = f.read() # otherwise use default config else: self.path = None if isinstance(config, six.string_types): if self.path: # set name on a StringIO so parsing error messages include the path config = six.StringIO(config) config.name = self.path self.config = self.yaml.load(config) elif isinstance(config, dict): self.config = CommentedMap(config.items()) else: self.config = config if not isinstance(self.config, CommentedMap): raise UnfurlValidationError( 'invalid YAML document with contents: "%s"' % self.config) findAnchor(self.config, None) # create _anchorCache self._cachedDocIncludes = {} # schema should include defaults but can't validate because it doesn't understand includes # but should work most of time self.config.loadTemplate = self.loadInclude self.loadHook = loadHook self.baseDirs = [self.getBaseDir()] self.includes, expandedConfig = expandDoc(self.config, cls=makeMapWithBase( self.config, self.baseDirs[0])) self.expanded = expandedConfig errors = schema and self.validate(expandedConfig) if errors and validate: (message, schemaErrors) = errors raise UnfurlValidationError( "JSON Schema validation failed: " + message, errors) else: self.valid = not not errors except: if self.path: msg = "Unable to load yaml config at %s" % self.path else: msg = "Unable to parse yaml config" raise UnfurlError(msg, True)
def get(self, service_name): self._ensure_services_key_exists() return self.config['services'].get(service_name, CommentedMap([]))
def _init_job(self, joborder, runtimeContext): # type: (MutableMapping[Text, Text], RuntimeContext) -> Builder job = cast(Dict[Text, Union[Dict[Text, Any], List[Any], Text, None]], copy.deepcopy(joborder)) make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess) fs_access = make_fs_access(runtimeContext.basedir) # Validate job order try: fill_in_defaults(self.tool[u"inputs"], job, fs_access) normalizeFilesDirs(job) validate.validate_ex(self.names.get_name("input_record_schema", ""), job, strict=False, logger=_logger_validation_warnings) except (validate.ValidationException, WorkflowException) as e: raise WorkflowException("Invalid job input record:\n" + Text(e)) files = [] # type: List[Dict[Text, Text]] bindings = CommentedSeq() tmpdir = u"" stagedir = u"" loadListingReq, _ = self.get_requirement( "http://commonwl.org/cwltool#LoadListingRequirement") if loadListingReq: loadListing = loadListingReq.get("loadListing") else: loadListing = "deep_listing" # will default to "no_listing" in CWL v1.1 dockerReq, _ = self.get_requirement("DockerRequirement") defaultDocker = None if dockerReq is None and runtimeContext.default_container: defaultDocker = runtimeContext.default_container if (dockerReq or defaultDocker) and runtimeContext.use_container: if dockerReq: # Check if docker output directory is absolute if dockerReq.get("dockerOutputDirectory") and \ dockerReq.get("dockerOutputDirectory").startswith('/'): outdir = dockerReq.get("dockerOutputDirectory") else: outdir = dockerReq.get("dockerOutputDirectory") or \ runtimeContext.docker_outdir or random_outdir() elif defaultDocker: outdir = runtimeContext.docker_outdir or random_outdir() tmpdir = runtimeContext.docker_tmpdir or "/tmp" stagedir = runtimeContext.docker_stagedir or "/var/lib/cwl" else: outdir = fs_access.realpath( runtimeContext.outdir or tempfile.mkdtemp(prefix=getdefault( runtimeContext.tmp_outdir_prefix, DEFAULT_TMP_PREFIX))) if self.tool[u"class"] != 'Workflow': tmpdir = fs_access.realpath(runtimeContext.tmpdir or tempfile.mkdtemp()) stagedir = fs_access.realpath(runtimeContext.stagedir or tempfile.mkdtemp()) builder = Builder( job, files, bindings, self.schemaDefs, self.names, self.requirements, self.hints, runtimeContext.eval_timeout, runtimeContext.debug, {}, runtimeContext.js_console, runtimeContext.mutation_manager, self.formatgraph, make_fs_access, fs_access, runtimeContext.force_docker_pull, loadListing, outdir, tmpdir, stagedir, runtimeContext.job_script_provider) bindings.extend( builder.bind_input(self.inputs_record_schema, job, discover_secondaryFiles=getdefault( runtimeContext.toplevel, False))) if self.tool.get("baseCommand"): for n, b in enumerate(aslist(self.tool["baseCommand"])): bindings.append({"position": [-1000000, n], "datum": b}) if self.tool.get("arguments"): for i, a in enumerate(self.tool["arguments"]): lc = self.tool["arguments"].lc.data[i] fn = self.tool["arguments"].lc.filename bindings.lc.add_kv_line_col(len(bindings), lc) if isinstance(a, MutableMapping): a = copy.deepcopy(a) if a.get("position"): a["position"] = [a["position"], i] else: a["position"] = [0, i] bindings.append(a) elif ("$(" in a) or ("${" in a): cm = CommentedMap((("position", [0, i]), ("valueFrom", a))) cm.lc.add_kv_line_col("valueFrom", lc) cm.lc.filename = fn bindings.append(cm) else: cm = CommentedMap((("position", [0, i]), ("datum", a))) cm.lc.add_kv_line_col("datum", lc) cm.lc.filename = fn bindings.append(cm) # use python2 like sorting of heterogeneous lists # (containing str and int types), # TODO: unify for both runtime if PY3: key = functools.cmp_to_key(cmp_like_py2) else: # PY2 key = lambda d: d["position"] # This awkward construction replaces the contents of # "bindings" in place (because Builder expects it to be # mutated in place, sigh, I'm sorry) with its contents sorted, # supporting different versions of Python and ruamel.yaml with # different behaviors/bugs in CommentedSeq. bd = copy.deepcopy(bindings) del bindings[:] bindings.extend(sorted(bd, key=key)) if self.tool[u"class"] != 'Workflow': builder.resources = self.evalResources(builder, runtimeContext) return builder