Ejemplo n.º 1
0
 def sort_dict(self, initial_dict):
     result = []
     for k, v in sorted(initial_dict.iteritems()):
         v = self.sort_anything(v)
         result.append((k,v))
     cm = CommentedMap(result)
     if hasattr(initial_dict, '_yaml_comment'):
         cm._yaml_comment = initial_dict._yaml_comment
     return cm
Ejemplo n.º 2
0
    def fill_in_default_array_items(validator, items, instance, schema):
        if include_yaml_comments and items["type"] == "object":
            new_items = []
            for item in instance:
                new_item = CommentedMap(item)
                new_item.key_indent = instance.key_indent + yaml_indent
                new_items.append(new_item)
            instance.clear()
            instance.extend(new_items)

        # Descend into array list
        for _error in validate_items(validator, items, instance, schema):
            # Ignore validation errors
            pass
Ejemplo n.º 3
0
    def generate_orchestration_playbook(self, url=None, namespace=None, local_images=True, **kwargs):
        """
        Generate an Ansible playbook to orchestrate services.
        :param url: registry URL where images will be pulled from
        :param namespace: registry namespace
        :param local_images: bypass pulling images, and use local copies
        :return: playbook dict
        """
        for service_name in self.services:
            image = self.get_latest_image_for_service(service_name)
            if local_images:
                self.services[service_name]['image'] = image.tags[0]
            else:
                if namespace is not None:
                    image_url = urljoin('{}/'.format(urljoin(url, namespace)), image.tags[0])
                else:
                    image_url = urljoin(url, image.tags[0])
                self.services[service_name]['image'] = image_url

        if kwargs.get('k8s_auth'):
            self.k8s_client.set_authorization(kwargs['auth'])

        play = CommentedMap()
        play['name'] = u'Manage the lifecycle of {} on {}'.format(self.project_name, self.display_name)
        play['hosts'] = 'localhost'
        play['gather_facts'] = 'no'
        play['connection'] = 'local'
        play['roles'] = CommentedSeq()
        play['tasks'] = CommentedSeq()
        role = CommentedMap([
            ('role', 'kubernetes-modules')
        ])
        play['roles'].append(role)
        play.yaml_set_comment_before_after_key(
            'roles', before='Include Ansible Kubernetes and OpenShift modules', indent=4)
        play.yaml_set_comment_before_after_key('tasks', before='Tasks for setting the application state. '
                                               'Valid tags include: start, stop, restart, destroy', indent=4)
        play['tasks'].append(self.deploy.get_namespace_task(state='present', tags=['start']))
        play['tasks'].append(self.deploy.get_namespace_task(state='absent', tags=['destroy']))
        play['tasks'].extend(self.deploy.get_service_tasks(tags=['start']))
        play['tasks'].extend(self.deploy.get_deployment_tasks(engine_state='stop', tags=['stop', 'restart']))
        play['tasks'].extend(self.deploy.get_deployment_tasks(tags=['start', 'restart']))
        play['tasks'].extend(self.deploy.get_pvc_tasks(tags=['start']))

        playbook = CommentedSeq()
        playbook.append(play)

        logger.debug(u'Created playbook to run project', playbook=playbook)
        return playbook
Ejemplo n.º 4
0
    def __getitem__(self, key):
        if self._top is self:
            if key == "_top":
                return self._top

        val = CommentedMap.__getitem__(self, key)
        return self._evaluate_item(val)
Ejemplo n.º 5
0
def inject_defaults(instance, schema, include_yaml_comments=False, yaml_indent=2, cls=None, *args, **kwargs):
    """
    Like the above validate_and_inject_defaults, but:
    
    1. Ignore schema validation errors and 'required' property errors
    
    2. If no default is given for a property, inject '{{NO_DEFAULT}}',
       even if the property isn't supposed to be a string.
       
    3. If include_yaml_comments is True, insert CommentedMap objects instead of ordinary dicts,
       and insert a comment above each key, with the contents of the property "description" in the schema.
    
    Args:
        instance:
            The Python object to inject defaults into.  May be an empty dict ({}).

        schema:
            The schema data to pull defaults from

        include_yaml_comments:
            Whether or not to return ruamel.yaml-compatible dicts so that
            comments will be written when the data is dumped to YAML.
    
        yaml_indent:
            To ensure correctly indented comments, you must specify the indent
            step you plan to use when this data is eventually dumped as yaml.
    
    Returns:
        A copy of instance, with default values injected, and comments if specified.
    """
    if cls is None:
        cls = validators.validator_for(schema)
    cls.check_schema(schema)

    # Add default-injection behavior to the validator
    extended_cls = extend_with_default_without_validation(cls, include_yaml_comments, yaml_indent)
    
    if include_yaml_comments:
        instance = CommentedMap(instance)
        instance.key_indent = 0 # monkey-patch!
    else:
        instance = dict(instance)
    
    # Inject defaults.
    extended_cls(schema, *args, **kwargs).validate(instance)
    return instance
Ejemplo n.º 6
0
    def create(self, args, tccConfigObject):
        result = self.check(args)
        if not result:
            sys.exit()
        for attribute in self.mandatoryAttributes:
            setattr(self, attribute, getattr(args, attribute))
            if self.mandatoryAttributes[attribute] == "ref":
                refElementCategory = element_lookup[attribute]
                refElement = tcc.get(refElementCategory, getattr(args, attribute))
                back_refs = refElement.back_refs
                back_refs.append(args.name)
                for element in tccConfigObject[refElementCategory]:
                    if element["name"] == refElement.name:
                        element["back_refs"] = back_refs
        if not tccConfigObject:
            tccConfigObject = CommentedMap([(self.elementCategory, None)])
        elif self.elementCategory not in tccConfigObject:
            tccConfigObject.update(CommentedMap([(self.elementCategory, None)]))
        if not tccConfigObject[self.elementCategory]:
            tccConfigObject[self.elementCategory] = []
        attributes = copy.deepcopy(self.mandatoryAttributes)
        for element in self.mandatoryAttributes:
            attributes[element] = getattr(self, element)
        if hasattr(self, "back_refs"):
            attributes["back_refs"] = self.back_refs
        del self.mandatoryAttributes

        self.mgmtNetmask = tccConfigObject["Network"]["mgmt"]["netmask"]
        self.mgmtGateway = tccConfigObject["Network"]["mgmt"]["gateway"]
        self.mgmtDns = tccConfigObject["Network"]["mgmt"]["dns"]
        self.vxlanNetmask = tccConfigObject["Network"]["vxlan"]["netmask"]
        createFunction = getattr(self, self.createMethod)
        result = createFunction()
        if "Error" in result:
            print result["Error"]
            return False
        if args.type == "service":
            terminalDict = {attributes["terminal"]: self.Id}
            attributes["terminal"] = []
            attributes["terminal"].append(terminalDict)
        else:
            if hasattr(self, "Id"):
                attributes["Id"] = self.Id
        tccConfigObject[self.elementCategory].append(attributes)
        self.updateYaml(tccConfigObject)
        return self
Ejemplo n.º 7
0
    def create(self, args, tccConfigObject):
        result = self.check(args)
        if not result:
            sys.exit()
        for attribute in self.mandatoryAttributes:
            setattr(self,attribute,getattr(args,attribute))
            if self.mandatoryAttributes[attribute] == 'ref':
                refElementCategory = element_lookup[attribute]
                refElement = tcc.get(refElementCategory,getattr(args, attribute))
                back_refs = refElement.back_refs
                back_refs.append(args.name)
                for element in tccConfigObject[refElementCategory]:
                    if element['name'] == refElement.name:
                        element['back_refs'] = back_refs
        if not tccConfigObject:
            tccConfigObject = CommentedMap([(self.elementCategory, None)])
        elif self.elementCategory not in tccConfigObject:
            tccConfigObject.update(CommentedMap([(self.elementCategory, None)]))
        if not tccConfigObject[self.elementCategory]:
            tccConfigObject[self.elementCategory] = []
        attributes = copy.deepcopy(self.mandatoryAttributes)
        for element in self.mandatoryAttributes:
            attributes[element] = getattr(self, element)
        if hasattr(self,'back_refs'):
            attributes['back_refs'] = self.back_refs
        del self.mandatoryAttributes

        self.mgmtNetmask = tccConfigObject['Network']['mgmt']['netmask']
        self.mgmtGateway = tccConfigObject['Network']['mgmt']['gateway']
        self.mgmtDns = tccConfigObject['Network']['mgmt']['dns']
        self.vxlanNetmask = tccConfigObject['Network']['vxlan']['netmask']
        createFunction = getattr(self, self.createMethod)
        result = createFunction()

        if 'Error' in result:
            print result['Error']
            return False
        if hasattr(self,'Id'):
            attributes['Id'] = self.Id
        tccConfigObject[self.elementCategory].append(attributes)
        self.updateYaml(tccConfigObject)
        return self
Ejemplo n.º 8
0
    def set_default_object_properties(validator, properties, instance, schema):
        for property, subschema in properties.items():
            if instance == "{{NO_DEFAULT}}":
                continue
            if "default" in subschema:
                default = copy.deepcopy(subschema["default"])
                
                if isinstance(default, list):
                    try:
                        # Lists of numbers should use 'flow style'
                        # and so should lists-of-lists of numbers
                        # (e.g. bounding boxes like [[0,0,0],[1,2,3]])
                        if ( subschema["items"]["type"] in ("integer", "number") or
                             ( subschema["items"]["type"] == "array" and 
                               subschema["items"]["items"]["type"] in ("integer", "number") ) ):
                            default = flow_style(default)
                    except KeyError:
                        pass
                
                if include_yaml_comments and isinstance(default, dict):
                    default = CommentedMap(default)
                    # To keep track of the current indentation level,
                    # we just monkey-patch this member onto the dict.
                    default.key_indent = instance.key_indent + yaml_indent
                    default.from_default = True
                if include_yaml_comments and isinstance(default, list):
                    if not isinstance(default, CommentedSeq):
                        default = CommentedSeq(copy.copy(default))
                    
                    # To keep track of the current indentation level,
                    # we just monkey-patch this member onto the dict.
                    default.key_indent = instance.key_indent + yaml_indent
                    default.from_default = True
                if property not in instance:
                    instance[property] = default
            else:
                if property not in instance:
                    instance[property] = "{{NO_DEFAULT}}"

            if include_yaml_comments and "description" in subschema:
                comment = '\n' + subschema["description"]
                if comment[-1] == '\n':
                    comment = comment[:-1]
                instance.yaml_set_comment_before_after_key(property, comment, instance.key_indent)

        for _error in validate_properties(validator, properties, instance, schema):
            # Ignore validation errors
            pass
Ejemplo n.º 9
0
    def generate_orchestration_playbook(self, url=None, namespace=None, settings=None, repository_prefix=None,
                                        pull_from_url=None, tag=None, vault_files=None, **kwargs):
        """
        Generate an Ansible playbook to orchestrate services.
        :param url: registry URL where images were pushed.
        :param namespace: registry namespace
        :param repository_prefix: prefix to use for the image name
        :param settings: settings dict from container.yml
        :param pull_from_url: if url to pull from is different than url
        :return: playbook dict
        """

        def _update_service(service_name, service_config):
            if url and namespace:
                # Reference previously pushed image
                image_id = self.get_latest_image_id_for_service(service_name)
                if not image_id:
                    raise exceptions.AnsibleContainerConductorException(
                        u"Unable to get image ID for service {}. Did you forget to run "
                        u"`ansible-container build`?".format(service_name)
                    )
                image_tag = tag or self.get_build_stamp_for_image(image_id)
                if repository_prefix:
                    image_name = "{}-{}".format(repository_prefix, service_name)
                elif repository_prefix is None:
                    image_name = "{}-{}".format(self.project_name, service_name)
                else:
                    image_name = service_name
                repository = "{}/{}".format(namespace, image_name)
                image_name = "{}:{}".format(repository, image_tag)
                pull_url = pull_from_url if pull_from_url else url
                service_config['image'] = "{}/{}".format(pull_url.rstrip('/'), image_name)
            else:
                # We're using a local image, so check that the image was built
                image = self.get_latest_image_for_service(service_name)
                if image is None:
                    raise exceptions.AnsibleContainerConductorException(
                        u"No image found for service {}, make sure you've run `ansible-container "
                        u"build`".format(service_name)
                    )
                service_config['image'] = image.tags[0]

        for service_name, service in iteritems(self.services):
            # set the image property of each container
            if service.get('containers'):
                for container in service['containers']:
                    if container.get('roles'):
                        container_service_name = "{}-{}".format(service_name, container['container_name'])
                        _update_service(container_service_name, container)
                    else:
                        container['image'] = container['from']
            elif service.get('roles'):
                _update_service(service_name, service)
            else:
                service['image'] = service['from']

        play = CommentedMap()
        play['name'] = u'Manage the lifecycle of {} on {}'.format(self.project_name, self.display_name)
        play['hosts'] = 'localhost'
        play['gather_facts'] = 'no'
        play['connection'] = 'local'
        play['roles'] = CommentedSeq()
        play['vars_files'] = CommentedSeq()
        play['tasks'] = CommentedSeq()
        role = CommentedMap([
            ('role', 'ansible.kubernetes-modules')
        ])
        if vault_files:
            play['vars_files'].extend(vault_files)
        play['roles'].append(role)
        play.yaml_set_comment_before_after_key(
            'roles', before='Include Ansible Kubernetes and OpenShift modules', indent=4)
        play.yaml_set_comment_before_after_key('tasks', before='Tasks for setting the application state. '
                                               'Valid tags include: start, stop, restart, destroy', indent=4)
        play['tasks'].append(self.deploy.get_namespace_task(state='present', tags=['start']))
        play['tasks'].append(self.deploy.get_namespace_task(state='absent', tags=['destroy']))
        play['tasks'].extend(self.deploy.get_secret_tasks(tags=['start']))
        play['tasks'].extend(self.deploy.get_service_tasks(tags=['start']))
        play['tasks'].extend(self.deploy.get_deployment_tasks(engine_state='stop', tags=['stop', 'restart']))
        play['tasks'].extend(self.deploy.get_deployment_tasks(tags=['start', 'restart']))
        play['tasks'].extend(self.deploy.get_pvc_tasks(tags=['start']))

        playbook = CommentedSeq()
        playbook.append(play)

        logger.debug(u'Created playbook to run project', playbook=playbook)
        return playbook
Ejemplo n.º 10
0
    def __init__(
        self,
        toolpath_object: CommentedMap,
        pos: int,
        loadingContext: LoadingContext,
        parentworkflowProv: Optional[ProvenanceProfile] = None,
    ) -> None:
        """Initialize this WorkflowStep."""
        debug = loadingContext.debug
        if "id" in toolpath_object:
            self.id = toolpath_object["id"]
        else:
            self.id = "#step" + str(pos)

        loadingContext = loadingContext.copy()

        parent_requirements = copy.deepcopy(
            getdefault(loadingContext.requirements, []))
        loadingContext.requirements = copy.deepcopy(
            toolpath_object.get("requirements", []))
        assert loadingContext.requirements is not None  # nosec
        for parent_req in parent_requirements:
            found_in_step = False
            for step_req in loadingContext.requirements:
                if parent_req["class"] == step_req["class"]:
                    found_in_step = True
                    break
            if not found_in_step:
                loadingContext.requirements.append(parent_req)
        loadingContext.requirements.extend(
            cast(
                List[CWLObjectType],
                get_overrides(getdefault(loadingContext.overrides_list, []),
                              self.id).get("requirements", []),
            ))

        hints = copy.deepcopy(getdefault(loadingContext.hints, []))
        hints.extend(toolpath_object.get("hints", []))
        loadingContext.hints = hints

        try:
            if isinstance(toolpath_object["run"], CommentedMap):
                self.embedded_tool = loadingContext.construct_tool_object(
                    toolpath_object["run"], loadingContext)  # type: Process
            else:
                loadingContext.metadata = {}
                self.embedded_tool = load_tool(toolpath_object["run"],
                                               loadingContext)
        except ValidationException as vexc:
            if loadingContext.debug:
                _logger.exception("Validation exception")
            raise WorkflowException(
                "Tool definition %s failed validation:\n%s" %
                (toolpath_object["run"], indent(str(vexc)))) from vexc

        validation_errors = []
        self.tool = toolpath_object = copy.deepcopy(toolpath_object)
        bound = set()

        if self.embedded_tool.get_requirement("SchemaDefRequirement")[0]:
            if "requirements" not in toolpath_object:
                toolpath_object["requirements"] = []
            toolpath_object["requirements"].append(
                self.embedded_tool.get_requirement("SchemaDefRequirement")[0])

        for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")):
            toolpath_object[toolfield] = []
            for index, step_entry in enumerate(toolpath_object[stepfield]):
                if isinstance(step_entry, str):
                    param = CommentedMap()  # type: CommentedMap
                    inputid = step_entry
                else:
                    param = CommentedMap(step_entry.items())
                    inputid = step_entry["id"]

                shortinputid = shortname(inputid)
                found = False
                for tool_entry in self.embedded_tool.tool[toolfield]:
                    frag = shortname(tool_entry["id"])
                    if frag == shortinputid:
                        # if the case that the step has a default for a parameter,
                        # we do not want the default of the tool to override it
                        step_default = None
                        if "default" in param and "default" in tool_entry:
                            step_default = param["default"]
                        param.update(tool_entry)
                        param["_tool_entry"] = tool_entry
                        if step_default is not None:
                            param["default"] = step_default
                        found = True
                        bound.add(frag)
                        break
                if not found:
                    if stepfield == "in":
                        param["type"] = "Any"
                        param["used_by_step"] = used_by_step(
                            self.tool, shortinputid)
                        param["not_connected"] = True
                    else:
                        if isinstance(step_entry, Mapping):
                            step_entry_name = step_entry["id"]
                        else:
                            step_entry_name = step_entry
                        validation_errors.append(
                            SourceLine(self.tool["out"],
                                       index,
                                       include_traceback=debug).
                            makeError(
                                "Workflow step output '%s' does not correspond to"
                                % shortname(step_entry_name)) + "\n" +
                            SourceLine(
                                self.embedded_tool.tool,
                                "outputs",
                                include_traceback=debug,
                            ).makeError("  tool output (expected '%s')" %
                                        ("', '".join([
                                            shortname(tool_entry["id"])
                                            for tool_entry in
                                            self.embedded_tool.tool["outputs"]
                                        ]))))
                param["id"] = inputid
                param.lc.line = toolpath_object[stepfield].lc.data[index][0]
                param.lc.col = toolpath_object[stepfield].lc.data[index][1]
                param.lc.filename = toolpath_object[stepfield].lc.filename
                toolpath_object[toolfield].append(param)

        missing_values = []
        for _, tool_entry in enumerate(self.embedded_tool.tool["inputs"]):
            if shortname(tool_entry["id"]) not in bound:
                if "null" not in tool_entry[
                        "type"] and "default" not in tool_entry:
                    missing_values.append(shortname(tool_entry["id"]))

        if missing_values:
            validation_errors.append(
                SourceLine(self.tool, "in", include_traceback=debug).makeError(
                    "Step is missing required parameter%s '%s'" % (
                        "s" if len(missing_values) > 1 else "",
                        "', '".join(missing_values),
                    )))

        if validation_errors:
            raise ValidationException("\n".join(validation_errors))

        super().__init__(toolpath_object, loadingContext)

        if self.embedded_tool.tool["class"] == "Workflow":
            (feature,
             _) = self.get_requirement("SubworkflowFeatureRequirement")
            if not feature:
                raise WorkflowException(
                    "Workflow contains embedded workflow but "
                    "SubworkflowFeatureRequirement not in requirements")

        if "scatter" in self.tool:
            (feature, _) = self.get_requirement("ScatterFeatureRequirement")
            if not feature:
                raise WorkflowException(
                    "Workflow contains scatter but ScatterFeatureRequirement "
                    "not in requirements")

            inputparms = copy.deepcopy(self.tool["inputs"])
            outputparms = copy.deepcopy(self.tool["outputs"])
            scatter = aslist(self.tool["scatter"])

            method = self.tool.get("scatterMethod")
            if method is None and len(scatter) != 1:
                raise ValidationException(
                    "Must specify scatterMethod when scattering over multiple inputs"
                )

            inp_map = {i["id"]: i for i in inputparms}
            for inp in scatter:
                if inp not in inp_map:
                    SourceLine(
                        self.tool, "scatter", ValidationException,
                        debug).makeError(
                            "Scatter parameter '%s' does not correspond to "
                            "an input parameter of this step, expecting '%s'" %
                            (
                                shortname(inp),
                                "', '".join(
                                    shortname(k) for k in inp_map.keys()),
                            ))

                inp_map[inp]["type"] = {
                    "type": "array",
                    "items": inp_map[inp]["type"]
                }

            if self.tool.get("scatterMethod") == "nested_crossproduct":
                nesting = len(scatter)
            else:
                nesting = 1

            for _ in range(0, nesting):
                for oparam in outputparms:
                    oparam["type"] = {"type": "array", "items": oparam["type"]}
            self.tool["inputs"] = inputparms
            self.tool["outputs"] = outputparms
        self.prov_obj = None  # type: Optional[ProvenanceProfile]
        if loadingContext.research_obj is not None:
            self.prov_obj = parentworkflowProv
            if self.embedded_tool.tool["class"] == "Workflow":
                self.parent_wf = self.embedded_tool.parent_wf
            else:
                self.parent_wf = self.prov_obj
Ejemplo n.º 11
0
    def _init_job(self, joborder, **kwargs):
        # type: (Dict[Text, Text], **Any) -> Builder
        """
        kwargs:

        eval_timeout: javascript evaluation timeout
        use_container: do/don't use Docker when DockerRequirement hint provided
        make_fs_access: make an FsAccess() object with given basedir
        basedir: basedir for FsAccess
        docker_outdir: output directory inside docker for this job
        docker_tmpdir: tmpdir inside docker for this job
        docker_stagedir: stagedir inside docker for this job
        outdir: outdir on host for this job
        tmpdir: tmpdir on host for this job
        stagedir: stagedir on host for this job
        select_resources: callback to select compute resources
        """

        builder = Builder()
        builder.job = cast(Dict[Text, Union[Dict[Text, Any], List, Text]],
                           copy.deepcopy(joborder))

        # Validate job order
        try:
            fillInDefaults(self.tool[u"inputs"], builder.job)
            normalizeFilesDirs(builder.job)
            validate.validate_ex(
                self.names.get_name("input_record_schema", ""), builder.job)
        except (validate.ValidationException, WorkflowException) as e:
            raise WorkflowException("Invalid job input record:\n" + Text(e))

        builder.files = []
        builder.bindings = CommentedSeq()
        builder.schemaDefs = self.schemaDefs
        builder.names = self.names
        builder.requirements = self.requirements
        builder.hints = self.hints
        builder.resources = {}
        builder.timeout = kwargs.get("eval_timeout")
        builder.debug = kwargs.get("debug")

        dockerReq, is_req = self.get_requirement("DockerRequirement")

        if dockerReq and is_req and not kwargs.get("use_container"):
            raise WorkflowException(
                "Document has DockerRequirement under 'requirements' but use_container is false.  DockerRequirement must be under 'hints' or use_container must be true."
            )

        builder.make_fs_access = kwargs.get("make_fs_access") or StdFsAccess
        builder.fs_access = builder.make_fs_access(kwargs["basedir"])

        loadListingReq, _ = self.get_requirement("LoadListingRequirement")
        if loadListingReq:
            builder.loadListing = loadListingReq.get("loadListing")

        if dockerReq and kwargs.get("use_container"):
            builder.outdir = builder.fs_access.realpath(
                dockerReq.get("dockerOutputDirectory")
                or kwargs.get("docker_outdir") or "/var/spool/cwl")
            builder.tmpdir = builder.fs_access.realpath(
                kwargs.get("docker_tmpdir") or "/tmp")
            builder.stagedir = builder.fs_access.realpath(
                kwargs.get("docker_stagedir") or "/var/lib/cwl")
        else:
            builder.outdir = builder.fs_access.realpath(
                kwargs.get("outdir") or tempfile.mkdtemp())
            builder.tmpdir = builder.fs_access.realpath(
                kwargs.get("tmpdir") or tempfile.mkdtemp())
            builder.stagedir = builder.fs_access.realpath(
                kwargs.get("stagedir") or tempfile.mkdtemp())

        if self.formatgraph:
            for i in self.tool["inputs"]:
                d = shortname(i["id"])
                if d in builder.job and i.get("format"):
                    checkFormat(builder.job[d], builder.do_eval(i["format"]),
                                self.formatgraph)

        builder.bindings.extend(
            builder.bind_input(self.inputs_record_schema, builder.job))

        if self.tool.get("baseCommand"):
            for n, b in enumerate(aslist(self.tool["baseCommand"])):
                builder.bindings.append({
                    "position": [-1000000, n],
                    "datum": b
                })

        if self.tool.get("arguments"):
            for i, a in enumerate(self.tool["arguments"]):
                lc = self.tool["arguments"].lc.data[i]
                fn = self.tool["arguments"].lc.filename
                builder.bindings.lc.add_kv_line_col(len(builder.bindings), lc)
                if isinstance(a, dict):
                    a = copy.copy(a)
                    if a.get("position"):
                        a["position"] = [a["position"], i]
                    else:
                        a["position"] = [0, i]
                    builder.bindings.append(a)
                elif ("$(" in a) or ("${" in a):
                    cm = CommentedMap((("position", [0, i]), ("valueFrom", a)))
                    cm.lc.add_kv_line_col("valueFrom", lc)
                    cm.lc.filename = fn
                    builder.bindings.append(cm)
                else:
                    cm = CommentedMap((("position", [0, i]), ("datum", a)))
                    cm.lc.add_kv_line_col("datum", lc)
                    cm.lc.filename = fn
                    builder.bindings.append(cm)

        builder.bindings.sort(key=lambda a: a["position"])

        builder.resources = self.evalResources(builder, kwargs)

        return builder
Ejemplo n.º 12
0
 def to_disk(self, force=False):
     result = CommentedMap()
     result['name'] = self.name
     result['pick'] = self.pick_count
     result['points'] = self.question_points
     return result
Ejemplo n.º 13
0
    def build_dockerswarm(all_layers: List['YamlComposer.Layer'],
                          docker_img: str = 'gnes/gnes:alpine-latest',
                          volumes: Dict = None,
                          networks: Dict = None) -> str:
        with resource_stream(
                'gnes', '/'.join(
                    ('resources', 'compose', 'gnes-swarm.yml'))) as r:
            swarm_lines = _yaml.load(r)
        config_dict = {}
        for l_idx, layer in enumerate(all_layers):
            for c_idx, c in enumerate(layer.components):
                c_name = '%s%d%d' % (c['name'], l_idx, c_idx)
                args = [
                    '--%s %s' % (a, str(v) if ' ' not in str(v) else
                                 ('"%s"' % str(v))) for a, v in c.items()
                    if a in YamlComposer.comp2args[c['name']]
                    and a != 'yaml_path' and v
                ]
                if 'yaml_path' in c and c['yaml_path'] is not None:
                    if c['yaml_path'].endswith(
                            '.yml') or c['yaml_path'].endswith('.yaml'):
                        args.append('--yaml_path /%s_yaml' % c_name)
                        config_dict['%s_yaml' % c_name] = {
                            'file': c['yaml_path']
                        }
                    else:
                        args.append('--yaml_path %s' % c['yaml_path'])

                if l_idx + 1 < len(all_layers):
                    next_layer = all_layers[l_idx + 1]
                    _l_idx = l_idx + 1
                else:
                    next_layer = all_layers[0]
                    _l_idx = 0
                host_out_name = ''
                for _c_idx, _c in enumerate(next_layer.components):
                    if _c['port_in'] == c['port_out']:
                        host_out_name = '%s%d%d' % (_c['name'], _l_idx, _c_idx)
                        break

                if l_idx - 1 >= 0:
                    last_layer = all_layers[l_idx - 1]
                    _l_idx = l_idx - 1
                else:
                    last_layer = all_layers[-1]
                    _l_idx = len(all_layers) - 1

                host_in_name = ''
                for _c_idx, _c in enumerate(last_layer.components):
                    if _c['port_out'] == c['port_in']:
                        host_in_name = '%s%d%d' % (_c['name'], _l_idx, _c_idx)
                        break

                if 'BIND' not in c['socket_out']:
                    args.append('--host_out %s' % host_out_name)
                if 'BIND' not in c['socket_in']:
                    args.append('--host_in %s' % host_in_name)

                cmd = '%s %s' % (YamlComposer.comp2file[c['name']],
                                 ' '.join(args))
                swarm_lines['services'][c_name] = CommentedMap({
                    'image': docker_img,
                    'command': cmd,
                })

                rep_c = YamlComposer.Layer.get_value(c, 'replicas')
                if rep_c > 1:
                    swarm_lines['services'][c_name]['deploy'] = CommentedMap({
                        'replicas':
                        YamlComposer.Layer.get_value(c, 'replicas'),
                        'restart_policy': {
                            'condition': 'on-failure',
                            'max_attempts': 3,
                        }
                    })

                if 'yaml_path' in c and c['yaml_path'] is not None \
                        and (c['yaml_path'].endswith('.yml') or c['yaml_path'].endswith('.yaml')):
                    swarm_lines['services'][c_name]['configs'] = [
                        '%s_yaml' % c_name
                    ]

                if c['name'] == 'Frontend':
                    swarm_lines['services'][c_name]['ports'] = [
                        '%d:%d' % (c['grpc_port'], c['grpc_port'])
                    ]

        if volumes:
            swarm_lines['volumes'] = volumes
        if networks:
            swarm_lines['networks'] = volumes
        swarm_lines['configs'] = config_dict
        stream = StringIO()
        _yaml.dump(swarm_lines, stream)
        return stream.getvalue().strip()
Ejemplo n.º 14
0
    def update(self) -> None:
        base = self.load_base()
        if not base:
            return

        def copy(from_path, to_path=None) -> None:
            if from_path in self:
                base[to_path or from_path] = self[from_path]

        def copy_dict(from_path, to_path=None, override_existing_map=True) -> None:
            if from_path in self:
                to_path = to_path or from_path
                if override_existing_map or to_path not in base:
                    base[to_path] = CommentedMap()
                for key, value in self[from_path].items():
                    base[to_path][key] = value

        copy("homeserver.address")
        copy("homeserver.verify_ssl")
        copy("homeserver.domain")

        if "appservice.protocol" in self and "appservice.address" not in self:
            protocol, hostname, port = (self["appservice.protocol"], self["appservice.hostname"],
                                        self["appservice.port"])
            base["appservice.address"] = f"{protocol}://{hostname}:{port}"
        else:
            copy("appservice.address")
        copy("appservice.hostname")
        copy("appservice.port")
        copy("appservice.max_body_size")

        copy("appservice.database")
        copy("appservice.sqlalchemy_core_mode")

        copy("appservice.public.enabled")
        copy("appservice.public.prefix")
        copy("appservice.public.external")

        copy("appservice.provisioning.enabled")
        copy("appservice.provisioning.prefix")
        copy("appservice.provisioning.shared_secret")
        if base["appservice.provisioning.shared_secret"] == "generate":
            base["appservice.provisioning.shared_secret"] = self._new_token()

        copy("appservice.id")
        copy("appservice.bot_username")
        copy("appservice.bot_displayname")
        copy("appservice.bot_avatar")

        copy("appservice.as_token")
        copy("appservice.hs_token")

        copy("bridge.username_template")
        copy("bridge.alias_template")
        copy("bridge.displayname_template")

        copy("bridge.displayname_preference")

        copy("bridge.edits_as_replies")
        copy("bridge.highlight_edits")
        if isinstance(self["bridge.bridge_notices"], bool):
            base["bridge.bridge_notices"] = {
                "default": self["bridge.bridge_notices"],
                "exceptions": ["@importantbot:example.com"],
            }
        else:
            copy("bridge.bridge_notices")
        copy("bridge.bot_messages_as_notices")
        copy("bridge.max_initial_member_sync")
        copy("bridge.sync_channel_members")
        copy("bridge.sync_matrix_state")
        copy("bridge.max_telegram_delete")
        copy("bridge.allow_matrix_login")
        copy("bridge.inline_images")
        copy("bridge.plaintext_highlights")
        copy("bridge.public_portals")
        copy("bridge.native_stickers")
        copy("bridge.catch_up")
        copy("bridge.sync_with_custom_puppets")
        copy("bridge.telegram_link_preview")

        copy("bridge.deduplication.pre_db_check")
        copy("bridge.deduplication.cache_queue_length")

        if "bridge.message_formats.m_text" in self:
            del self["bridge.message_formats"]
        copy_dict("bridge.message_formats", override_existing_map=False)
        copy("bridge.state_event_formats.join")
        copy("bridge.state_event_formats.leave")
        copy("bridge.state_event_formats.name_change")

        copy("bridge.filter.mode")
        copy("bridge.filter.list")

        copy("bridge.command_prefix")

        migrate_permissions = ("bridge.permissions" not in self
                               or "bridge.whitelist" in self
                               or "bridge.admins" in self)
        if migrate_permissions:
            permissions = self["bridge.permissions"] or CommentedMap()
            for entry in self["bridge.whitelist"] or []:
                permissions[entry] = "full"
            for entry in self["bridge.admins"] or []:
                permissions[entry] = "admin"
            base["bridge.permissions"] = permissions
        else:
            copy_dict("bridge.permissions")

        if "bridge.relaybot" not in self:
            copy("bridge.authless_relaybot_portals", "bridge.relaybot.authless_portals")
        else:
            copy("bridge.relaybot.authless_portals")
            copy("bridge.relaybot.whitelist_group_admins")
            copy("bridge.relaybot.whitelist")
            copy("bridge.relaybot.ignore_own_incoming_events")

        copy("telegram.api_id")
        copy("telegram.api_hash")
        copy("telegram.bot_token")
        copy("telegram.server.enabled")
        copy("telegram.server.dc")
        copy("telegram.server.ip")
        copy("telegram.server.port")
        copy("telegram.proxy.type")
        copy("telegram.proxy.address")
        copy("telegram.proxy.port")
        copy("telegram.proxy.rdns")
        copy("telegram.proxy.username")
        copy("telegram.proxy.password")

        if "appservice.debug" in self and "logging" not in self:
            level = "DEBUG" if self["appservice.debug"] else "INFO"
            base["logging.root.level"] = level
            base["logging.loggers.mau.level"] = level
            base["logging.loggers.telethon.level"] = level
        else:
            copy("logging")

        self._data = base._data
        self.save()
Ejemplo n.º 15
0
    def resolve_ref(
        self,
        ref: ResolveType,
        base_url: Optional[str] = None,
        checklinks: bool = True,
        strict_foreign_properties: bool = False,
        content_types: Optional[List[str]] = None,  # Expected content-types
    ) -> ResolvedRefType:

        lref = ref
        obj = None  # type: Optional[CommentedMap]
        resolved_obj = None  # type: ResolveType
        inc = False
        mixin = None  # type: Optional[MutableMapping[str, str]]

        if not base_url:
            base_url = file_uri(os.getcwd()) + "/"

        sl = SourceLine(None, None)
        # If `ref` is a dict, look for special directives.
        if isinstance(lref, CommentedMap):
            obj = lref
            if "$import" in obj:
                sl = SourceLine(obj, "$import")
                if len(obj) == 1:
                    lref = obj["$import"]
                    obj = None
                else:
                    raise ValidationException(
                        f"'$import' must be the only field in {obj}", sl
                    )
            elif "$include" in obj:
                sl = SourceLine(obj, "$include")
                if len(obj) == 1:
                    lref = obj["$include"]
                    inc = True
                    obj = None
                else:
                    raise ValidationException(
                        f"'$include' must be the only field in {obj}", sl
                    )
            elif "$mixin" in obj:
                sl = SourceLine(obj, "$mixin")
                lref = obj["$mixin"]
                mixin = obj
                obj = None
            else:
                lref = None
                for identifier in self.identifiers:
                    if identifier in obj:
                        lref = obj[identifier]
                        break
                if not lref:
                    raise ValidationException(
                        "Object `{}` does not have identifier field in {}".format(
                            obj, self.identifiers
                        ),
                        sl,
                    )

        if not isinstance(lref, str):
            raise ValidationException(
                f"Expected CommentedMap or string, got {type(lref)}: `{lref}`"
            )

        if isinstance(lref, str) and os.sep == "\\":
            # Convert Windows path separator in ref
            lref = lref.replace("\\", "/")

        url = self.expand_url(lref, base_url, scoped_id=(obj is not None))
        # Has this reference been loaded already?
        if url in self.idx and (not mixin):
            resolved_obj = self.idx[url]
            if isinstance(resolved_obj, MutableMapping):
                metadata = self.idx.get(
                    urllib.parse.urldefrag(url)[0], CommentedMap()
                )  # type: Union[CommentedMap, CommentedSeq, str, None]
                if isinstance(metadata, MutableMapping):
                    if "$graph" in resolved_obj:
                        metadata = _copy_dict_without_key(resolved_obj, "$graph")
                        return resolved_obj["$graph"], metadata
                    else:
                        return resolved_obj, metadata
                else:
                    raise ValidationException(
                        "Expected CommentedMap, got {}: `{}`".format(
                            type(metadata), metadata
                        )
                    )
            elif isinstance(resolved_obj, MutableSequence):
                metadata = self.idx.get(urllib.parse.urldefrag(url)[0], CommentedMap())
                if isinstance(metadata, MutableMapping):
                    return resolved_obj, metadata
                else:
                    return resolved_obj, CommentedMap()
            elif isinstance(resolved_obj, str):
                return resolved_obj, CommentedMap()
            else:
                raise ValidationException(
                    "Expected MutableMapping or MutableSequence, got {}: `{}`".format(
                        type(resolved_obj), resolved_obj
                    )
                )

        # "$include" directive means load raw text
        if inc:
            return self.fetch_text(url), CommentedMap()

        doc = None
        if isinstance(obj, MutableMapping):
            for identifier in self.identifiers:
                obj[identifier] = url
            doc_url = url
        else:
            # Load structured document
            doc_url, frg = urllib.parse.urldefrag(url)
            if doc_url in self.idx and (not mixin):
                # If the base document is in the index, it was already loaded,
                # so if we didn't find the reference earlier then it must not
                # exist.
                raise ValidationException(
                    f"Reference `#{frg}` not found in file `{doc_url}`.", sl
                )
            doc = self.fetch(
                doc_url, inject_ids=(not mixin), content_types=content_types
            )

        # Recursively expand urls and resolve directives
        if bool(mixin):
            doc = copy.deepcopy(doc)
            if isinstance(doc, CommentedMap) and mixin is not None:
                doc.update(mixin)
                del doc["$mixin"]
            resolved_obj, metadata = self.resolve_all(
                doc,
                base_url,
                file_base=doc_url,
                checklinks=checklinks,
                strict_foreign_properties=strict_foreign_properties,
            )
        else:
            resolved_obj, metadata = self.resolve_all(
                doc or obj,
                doc_url,
                checklinks=checklinks,
                strict_foreign_properties=strict_foreign_properties,
            )

        # Requested reference should be in the index now, otherwise it's a bad
        # reference
        if not bool(mixin):
            if url in self.idx:
                resolved_obj = self.idx[url]
            else:
                raise ValidationException(
                    "Reference `{}` is not in the index. Index contains: {}".format(
                        url, ", ".join(self.idx)
                    )
                )

        if isinstance(resolved_obj, CommentedMap):
            if "$graph" in resolved_obj:
                metadata = _copy_dict_without_key(resolved_obj, "$graph")
                return resolved_obj["$graph"], metadata
            else:
                return resolved_obj, metadata
        else:
            return resolved_obj, metadata
Ejemplo n.º 16
0
    def _init_job(self, joborder, runtimeContext):
        # type: (MutableMapping[Text, Text], RuntimeContext) -> Builder

        job = cast(Dict[Text, Union[Dict[Text, Any], List[Any], Text, None]],
                   copy.deepcopy(joborder))

        make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess)
        fs_access = make_fs_access(runtimeContext.basedir)

        # Validate job order
        try:
            fill_in_defaults(self.tool[u"inputs"], job, fs_access)
            normalizeFilesDirs(job)
            validate.validate_ex(self.names.get_name("input_record_schema",
                                                     ""),
                                 job,
                                 strict=False,
                                 logger=_logger_validation_warnings)
        except (validate.ValidationException, WorkflowException) as e:
            raise WorkflowException("Invalid job input record:\n" + Text(e))

        files = []  # type: List[Dict[Text, Text]]
        bindings = CommentedSeq()
        tmpdir = u""
        stagedir = u""

        loadListingReq, _ = self.get_requirement(
            "http://commonwl.org/cwltool#LoadListingRequirement")
        if loadListingReq:
            loadListing = loadListingReq.get("loadListing")
        else:
            loadListing = "deep_listing"  # will default to "no_listing" in CWL v1.1

        dockerReq, _ = self.get_requirement("DockerRequirement")
        defaultDocker = None

        if dockerReq is None and runtimeContext.default_container:
            defaultDocker = runtimeContext.default_container

        if (dockerReq or defaultDocker) and runtimeContext.use_container:
            if dockerReq:
                # Check if docker output directory is absolute
                if dockerReq.get("dockerOutputDirectory") and \
                        dockerReq.get("dockerOutputDirectory").startswith('/'):
                    outdir = dockerReq.get("dockerOutputDirectory")
                else:
                    outdir = dockerReq.get("dockerOutputDirectory") or \
                        runtimeContext.docker_outdir or random_outdir()
            elif defaultDocker:
                outdir = runtimeContext.docker_outdir or random_outdir()
            tmpdir = runtimeContext.docker_tmpdir or "/tmp"
            stagedir = runtimeContext.docker_stagedir or "/var/lib/cwl"
        else:
            outdir = fs_access.realpath(
                runtimeContext.outdir or tempfile.mkdtemp(prefix=getdefault(
                    runtimeContext.tmp_outdir_prefix, DEFAULT_TMP_PREFIX)))
            if self.tool[u"class"] != 'Workflow':
                tmpdir = fs_access.realpath(runtimeContext.tmpdir
                                            or tempfile.mkdtemp())
                stagedir = fs_access.realpath(runtimeContext.stagedir
                                              or tempfile.mkdtemp())

        builder = Builder(
            job, files, bindings, self.schemaDefs, self.names,
            self.requirements, self.hints, runtimeContext.eval_timeout,
            runtimeContext.debug, {}, runtimeContext.js_console,
            runtimeContext.mutation_manager, self.formatgraph, make_fs_access,
            fs_access, runtimeContext.force_docker_pull, loadListing, outdir,
            tmpdir, stagedir, runtimeContext.job_script_provider)

        bindings.extend(
            builder.bind_input(self.inputs_record_schema,
                               job,
                               discover_secondaryFiles=getdefault(
                                   runtimeContext.toplevel, False)))

        if self.tool.get("baseCommand"):
            for n, b in enumerate(aslist(self.tool["baseCommand"])):
                bindings.append({"position": [-1000000, n], "datum": b})

        if self.tool.get("arguments"):
            for i, a in enumerate(self.tool["arguments"]):
                lc = self.tool["arguments"].lc.data[i]
                fn = self.tool["arguments"].lc.filename
                bindings.lc.add_kv_line_col(len(bindings), lc)
                if isinstance(a, MutableMapping):
                    a = copy.deepcopy(a)
                    if a.get("position"):
                        a["position"] = [a["position"], i]
                    else:
                        a["position"] = [0, i]
                    bindings.append(a)
                elif ("$(" in a) or ("${" in a):
                    cm = CommentedMap((("position", [0, i]), ("valueFrom", a)))
                    cm.lc.add_kv_line_col("valueFrom", lc)
                    cm.lc.filename = fn
                    bindings.append(cm)
                else:
                    cm = CommentedMap((("position", [0, i]), ("datum", a)))
                    cm.lc.add_kv_line_col("datum", lc)
                    cm.lc.filename = fn
                    bindings.append(cm)

        # use python2 like sorting of heterogeneous lists
        # (containing str and int types),
        # TODO: unify for both runtime
        if PY3:
            key = functools.cmp_to_key(cmp_like_py2)
        else:  # PY2
            key = lambda d: d["position"]

        # This awkward construction replaces the contents of
        # "bindings" in place (because Builder expects it to be
        # mutated in place, sigh, I'm sorry) with its contents sorted,
        # supporting different versions of Python and ruamel.yaml with
        # different behaviors/bugs in CommentedSeq.
        bd = copy.deepcopy(bindings)
        del bindings[:]
        bindings.extend(sorted(bd, key=key))

        if self.tool[u"class"] != 'Workflow':
            builder.resources = self.evalResources(builder, runtimeContext)
        return builder
Ejemplo n.º 17
0
def pack(
    document_loader,  # type: Loader
    processobj,  # type: Union[Dict[Text, Any], List[Dict[Text, Any]]]
    uri,  # type: Text
    metadata,  # type: Dict[Text, Text]
    rewrite_out=None  # type: Dict[Text, Text]
):  # type: (...) -> Dict[Text, Any]

    document_loader = SubLoader(document_loader)
    document_loader.idx = {}
    if isinstance(processobj, MutableMapping):
        document_loader.idx[processobj["id"]] = CommentedMap(
            iteritems(processobj))
    elif isinstance(processobj, MutableSequence):
        _, frag = urllib.parse.urldefrag(uri)
        for po in processobj:
            if not frag:
                if po["id"].endswith("#main"):
                    uri = po["id"]
            document_loader.idx[po["id"]] = CommentedMap(iteritems(po))
        document_loader.idx[metadata["id"]] = CommentedMap(iteritems(metadata))

    def loadref(base, uri):
        # type: (Optional[Text], Text) -> Union[Dict, List, Text, None]
        return document_loader.resolve_ref(uri, base_url=base)[0]

    ids = set()  # type: Set[Text]
    find_ids(processobj, ids)

    runs = {uri}
    find_run(processobj, loadref, runs)

    for f in runs:
        find_ids(document_loader.resolve_ref(f)[0], ids)

    names = set()  # type: Set[Text]
    if rewrite_out is None:
        rewrite = {}  # type: Dict[Text, Text]
    else:
        rewrite = rewrite_out

    mainpath, _ = urllib.parse.urldefrag(uri)

    def rewrite_id(r, mainuri):
        # type: (Text, Text) -> None
        if r == mainuri:
            rewrite[r] = "#main"
        elif r.startswith(mainuri) and r[len(mainuri)] in ("#", "/"):
            if r[len(mainuri):].startswith("#main/"):
                rewrite[r] = "#" + uniquename(r[len(mainuri) + 1:], names)
            else:
                rewrite[r] = "#" + uniquename("main/" + r[len(mainuri) + 1:],
                                              names)
        else:
            path, frag = urllib.parse.urldefrag(r)
            if path == mainpath:
                rewrite[r] = "#" + uniquename(frag, names)
            else:
                if path not in rewrite:
                    rewrite[path] = "#" + uniquename(shortname(path), names)

    sortedids = sorted(ids)

    for r in sortedids:
        rewrite_id(r, uri)

    packed = {
        "$graph": [],
        "cwlVersion": metadata["cwlVersion"]
    }  # type: Dict[Text, Any]
    namespaces = metadata.get('$namespaces', None)

    schemas = set()  # type: Set[Text]
    if '$schemas' in metadata:
        for each_schema in metadata["$schemas"]:
            schemas.add(each_schema)
    for r in sorted(runs):
        dcr, metadata = document_loader.resolve_ref(r)
        if isinstance(dcr, CommentedSeq):
            dcr = dcr[0]
            dcr = cast(CommentedMap, dcr)
        if not isinstance(dcr, MutableMapping):
            continue
        metadata = cast(Dict[Text, Any], metadata)
        if "$schemas" in metadata:
            for s in metadata["$schemas"]:
                schemas.add(s)
        if dcr.get("class") not in ("Workflow", "CommandLineTool",
                                    "ExpressionTool"):
            continue
        dc = cast(Dict[Text, Any], copy.deepcopy(dcr))
        v = rewrite[r]
        dc["id"] = v
        for n in ("name", "cwlVersion", "$namespaces", "$schemas"):
            if n in dc:
                del dc[n]
        packed["$graph"].append(dc)

    if schemas:
        packed["$schemas"] = list(schemas)

    for r in list(rewrite.keys()):
        v = rewrite[r]
        replace_refs(packed, rewrite, r + "/" if "#" in r else r + "#",
                     v + "/")

    import_embed(packed, set())

    if len(packed["$graph"]) == 1:
        # duplicate 'cwlVersion' and $schemas inside $graph when there is only
        # a single item because we will print the contents inside '$graph'
        # rather than whole dict
        packed["$graph"][0]["cwlVersion"] = packed["cwlVersion"]
        if schemas:
            packed["$graph"][0]["$schemas"] = list(schemas)
    # always include $namespaces in the #main
    if namespaces:
        packed["$graph"][0]["$namespaces"] = dict(cast(Dict, namespaces))

    return packed
Ejemplo n.º 18
0
def main(args: List[str]) -> None:
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)

    all_evidence = []

    log_records_all_files: Iterable[LogRecord] = \
        itertools.chain.from_iterable(parse_log_file(path) for path in args[1:])
    # noinspection PyTypeHints
    incoming_ips: DefaultDict[bytes, DefaultDict[InstanceUserAgent, TimeWindowAcc]] = \
        DefaultDict(lambda: DefaultDict(TimeWindowAcc))

    for log_record in log_records_all_files:
        if log_record.user_agent is None:
            continue
        instance_user_agent = classify_user_agent(log_record.user_agent)
        if instance_user_agent is None:
            continue
        incoming_ips[log_record.ip][instance_user_agent].add(
            log_record.timestamp)

    possible_instance_ips: Set[bytes] = set(incoming_ips.keys())
    possible_instance_hostnames: Set[str] = set()
    possible_instance_hostnames_and_ports: Set[Tuple[str, int]] = set()

    for ip in incoming_ips.keys():
        for instance_user_agent in incoming_ips[ip].keys():
            time_window = incoming_ips[ip][instance_user_agent]

            if instance_user_agent.url is not None:
                hostname_and_port = extract_hostname_and_port(
                    instance_user_agent.url)
                if hostname_and_port is not None:
                    hostname, port = hostname_and_port

                    possible_instance_hostnames.add(hostname)
                    possible_instance_hostnames_and_ports.add(
                        hostname_and_port)

                    all_evidence.append(
                        UserAgentEvidence(
                            ip=ip,
                            hostname=hostname,
                            domain=get_domain(hostname),
                            port=port,
                            instance_user_agent=instance_user_agent,
                            time_window=time_window,
                        ))

    for ip in possible_instance_ips:
        ip_str = fmt_ip(ip)
        try:
            time = datetime.now(timezone.utc)
            hostname, aliases, addresses = socket.gethostbyaddr(ip_str)
            aliases = [
                alias for alias in aliases
                if not alias.endswith('.in-addr.arpa')
                and not alias.endswith('.ip6.arpa')
            ]
            if addresses != [ip_str]:
                # TODO: when would this happen?
                logger.warning(
                    '%(ip_str)s resolved to multiple IPs: %(addresses)r', {
                        'ip_str': ip_str,
                        'addresses': addresses
                    })

            for alias in [hostname] + aliases:
                all_evidence.append(
                    ReverseDNSEvidence(
                        ip=ip,
                        hostname=alias,
                        domain=get_domain(alias),
                        time=time,
                    ))
        except OSError:
            logger.warning("Exception on reverse DNS lookup for %(ip_str)s!",
                           {'ip_str': ip_str},
                           exc_info=True)

    for hostname in possible_instance_hostnames:
        try:
            time = datetime.now(timezone.utc)
            # noinspection PyArgumentList
            for af, _, _, _, sockaddr in socket.getaddrinfo(
                    hostname,
                    None,
                    family=socket.AF_INET,
                    type=socket.SOCK_STREAM,
                    proto=socket.IPPROTO_IP):
                ip_str = sockaddr[0]
                ip = socket.inet_pton(af, ip_str)
                all_evidence.append(
                    ForwardDNSEvidence(
                        ip=ip,
                        hostname=hostname,
                        domain=get_domain(hostname),
                        time=time,
                    ))
        except OSError:
            logger.warning("Exception on forward DNS lookup for %(hostname)s!",
                           {'hostname': hostname},
                           exc_info=True)

    for hostname, port in possible_instance_hostnames_and_ports:
        logger.info("%s:%d", hostname, port)  # DEBUG
        time = datetime.now(timezone.utc)
        instance_user_agent = get_instance_info(hostname, port)

        if instance_user_agent is not None:
            all_evidence.append(
                TLSCertCheckEvidence(
                    hostname=hostname,
                    domain=get_domain(hostname),
                    port=port,
                    time=time,
                ))

            if instance_user_agent.server != UNKNOWN_SERVER_TYPE \
                    and instance_user_agent.url is not None:
                reported_hostname_and_port = extract_hostname_and_port(
                    instance_user_agent.url)
                if reported_hostname_and_port is not None:
                    reported_hostname, reported_port = reported_hostname_and_port
                    if hostname == reported_hostname and port == reported_port:
                        all_evidence.append(
                            InstanceAPIEvidence(
                                hostname=hostname,
                                domain=get_domain(hostname),
                                port=port,
                                instance_user_agent=instance_user_agent,
                                time=time,
                            ))

    # TODO: Ignores ports: I've not seen a non-443 instance yet.

    # Map of hostname to instance info accumulator.
    # noinspection PyTypeHints
    instances: DefaultDict[str, InstanceInfoAcc] = DefaultDict(InstanceInfoAcc)
    for evidence in all_evidence:
        instances[evidence.domain].add(evidence)

    frozen: OrderedDict[str, InstanceInfoFrozen] = OrderedDict()
    for instance in sorted(instances.keys()):
        frozen[instance] = instances[instance].freeze()

    # Dump output as YAML.
    yaml = YAML()
    yaml.indent(mapping=2, sequence=2, offset=1)
    yaml.dump(CommentedMap(frozen),
              sys.stdout)  # Hack: prevents !!omap annotation in YAML output
Ejemplo n.º 19
0
 def __init__(self, path: str, base_path: str) -> None:
     super().__init__()
     self._data = CommentedMap()
     self.path: str = path
     self.base_path: str = base_path
Ejemplo n.º 20
0
 def _recursive_get(self, data: CommentedMap, key: str, default_value: Any) -> Any:
     key, next_key = self._parse_key(key)
     if next_key is not None:
         next_data = data.get(key, CommentedMap())
         return self._recursive_get(next_data, next_key, default_value)
     return data.get(key, default_value)
Ejemplo n.º 21
0
 def __init__(self, data: Optional[CommentedMap] = None) -> None:
     self._data = data or CommentedMap()  # type: CommentedMap
Ejemplo n.º 22
0
class YAMLRoundtripConfig(MutableConfigFile, MutableAbstractItemAccessMixin, MutableAbstractDictFunctionsMixin):
    """
    Class for YAML-based (roundtrip) configurations
    """

    def __init__(self, owner: Any, manager: "m.StorageManager", path: str, *args: List[Any], **kwargs: Dict[Any, Any]):
        self.data = CommentedMap()

        super().__init__(owner, manager, path, *args, **kwargs)

    def load(self):
        with open(self.path, "r") as fh:
            self.data = yaml.round_trip_load(fh, version=(1, 2))

    def reload(self):
        self.unload()
        self.load()

    def unload(self):
        self.data.clear()

    def save(self):
        if not self.mutable:
            raise RuntimeError("You may not modify a defaults file at runtime - check the mutable attribute!")

        with open(self.path, "w") as fh:
            yaml.round_trip_dump(self.data, fh)

    # region: CommentedMap functions

    def insert(self, pos, key, value, *, comment=None):
        """
        Insert a `key: value` pair at the given position, attaching a comment if provided

        Wrapper for `CommentedMap.insert()`
        """

        return self.data.insert(pos, key, value, comment)

    def add_eol_comment(self, comment, *, key=NoComment, column=30):
        """
        Add an end-of-line comment for a key at a particular column (30 by default)

        Wrapper for `CommentedMap.yaml_add_eol_comment()`
        """

        # Setting the column to None as the API actually defaults to will raise an exception, so we have to
        # specify one unfortunately

        return self.data.yaml_add_eol_comment(comment, key=key, column=column)

    def set_comment_before_key(self, key, comment, *, indent=0):
        """
        Set a comment before a given key

        Wrapper for `CommentedMap.yaml_set_comment_before_after_key()`
        """

        return self.data.yaml_set_comment_before_after_key(
            key, before=comment, indent=indent, after=None, after_indent=None
        )

    def set_start_comment(self, comment, indent=0):
        """
        Set the starting comment

        Wrapper for `CommentedMap.yaml_set_start_comment()`
        """

        return self.data.yaml_set_start_comment(comment, indent=indent)

    # endregion

    # region: Dict functions

    def clear(self):
        return self.data.clear()

    def copy(self):
        return self.data.copy()

    def get(self, key, default=None):
        return self.data.get(key, default)

    def items(self):
        return self.data.items()

    def keys(self):
        return self.data.keys()

    def pop(self, key, default=None):
        return self.data.pop(key, default)

    def popitem(self):
        return self.data.popitem()

    def setdefault(self, key, default=None):
        if key not in self.data:
            self.data[key] = default
            return default

        return self.data[key]

    def update(self, other):
        return self.data.update(other)

    def values(self):
        return self.data.values()

    # endregion

    # Item access functions

    def __contains__(self, key):
        """
        Wrapper for `dict.__contains__()`
        """

        return self.data.__contains__(key)

    def __delitem__(self, key):
        """
        Wrapper for `dict.__delitem__()`
        """

        del self.data[key]

    def __getitem__(self, key):
        """
        Wrapper for `dict.__getitem__()`
        """

        return self.data.__getitem__(key)

    def __iter__(self):
        """
        Wrapper for `dict.__iter__()`
        """

        return self.data.__iter__()

    def __len__(self):
        """
        Wrapper for `dict.__len__()`
        """

        return self.data.__len__()

    def __setitem__(self, key, value):
        """
        Wrapper for `dict.__getitem__()`
        """

        return self.data.__setitem__(key, value)
Ejemplo n.º 23
0
def workflow_clean(document):  # type: (MutableMapping[Text, Any]) -> None
    """Transform draft-3 style Workflows to more idiomatic v1.0"""
    input_output_clean(document)
    hints_and_requirements_clean(document)
    outputs = document['outputs']
    for output_id in outputs:
        outputs[output_id]["outputSource"] = \
            outputs[output_id].pop("source").lstrip('#').replace(".", "/")
    new_steps = CommentedMap()
    for step in document["steps"]:
        new_step = CommentedMap()
        new_step.update(step)
        step = new_step
        step_id = step.pop("id")
        step_id_len = len(step_id)+1
        step["out"] = []
        for outp in step["outputs"]:
            clean_outp_id = outp["id"]
            if clean_outp_id.startswith(step_id):
                clean_outp_id = clean_outp_id[step_id_len:]
            step["out"].append(clean_outp_id)
        del step["outputs"]
        ins = CommentedMap()
        for inp in step["inputs"]:
            ident = inp["id"]
            if ident.startswith(step_id):
                ident = ident[step_id_len:]
            if 'source' in inp:
                inp["source"] = inp["source"].lstrip('#').replace(".", "/")
            del inp["id"]
            if len(inp) > 1:
                ins[ident] = inp
            elif len(inp) == 1:
                if "source" in inp:
                    ins[ident] = inp.popitem()[1]
                else:
                    ins[ident] = inp
            else:
                ins[ident] = {}
        step["in"] = ins
        del step["inputs"]
        if "scatter" in step:
            if isinstance(step["scatter"], (str, Text)) == 1:
                source = step["scatter"]
                if source.startswith(step_id):
                    source = source[step_id_len:]
                step["scatter"] = source
            elif isinstance(step["scatter"], list) and len(step["scatter"]) > 1:
                step["scatter"] = []
                for source in step["scatter"]:
                    if source.startswith(step_id):
                        source = source[step_id_len:]
                    step["scatter"].append(source)
            else:
                source = step["scatter"][0]
                if source.startswith(step_id):
                    source = source[step_id_len:]
                step["scatter"] = source
        if "description" in step:
            step["doc"] = step.pop("description")
        new_steps[step_id.lstrip('#')] = step
    document["steps"] = new_steps
Ejemplo n.º 24
0
    def resolve_all(
        self,
        document: ResolveType,
        base_url: str,
        file_base: Optional[str] = None,
        checklinks: bool = True,
        strict_foreign_properties: bool = False,
    ) -> ResolvedRefType:
        loader = self
        metadata = CommentedMap()
        if file_base is None:
            file_base = base_url

        if isinstance(document, CommentedMap):
            # Handle $import and $include
            if "$import" in document or "$include" in document:
                return self.resolve_ref(
                    document,
                    base_url=file_base,
                    checklinks=checklinks,
                    strict_foreign_properties=strict_foreign_properties,
                )
            elif "$mixin" in document:
                return self.resolve_ref(
                    document,
                    base_url=base_url,
                    checklinks=checklinks,
                    strict_foreign_properties=strict_foreign_properties,
                )
        elif isinstance(document, CommentedSeq):
            pass
        elif isinstance(document, (list, dict)):
            raise ValidationException(
                "Expected CommentedMap or CommentedSeq, got {}: `{}`".format(
                    type(document), document
                )
            )
        else:
            return (document, metadata)

        newctx = None  # type: Optional["Loader"]
        if isinstance(document, CommentedMap):
            # Handle $base, $profile, $namespaces, $schemas and $graph
            if "$base" in document:
                base_url = document["$base"]

            if "$profile" in document:
                if newctx is None:
                    newctx = SubLoader(self)
                newctx.add_namespaces(document.get("$namespaces", CommentedMap()))
                newctx.add_schemas(document.get("$schemas", []), document["$profile"])

            if "$namespaces" in document:
                if newctx is None:
                    newctx = SubLoader(self)
                newctx.add_namespaces(document["$namespaces"])

            if "$schemas" in document:
                if newctx is None:
                    newctx = SubLoader(self)
                newctx.add_schemas(document["$schemas"], file_base)

            if newctx is not None:
                loader = newctx

            for identifer in loader.identity_links:
                if identifer in document:
                    if isinstance(document[identifer], str):
                        document[identifer] = loader.expand_url(
                            document[identifer], base_url, scoped_id=True
                        )
                        loader.idx[document[identifer]] = document

            metadata = document
            if "$graph" in document:
                document = document["$graph"]

        if isinstance(document, CommentedMap):
            self._normalize_fields(document, loader)
            self._resolve_idmap(document, loader)
            self._resolve_dsl(document, loader)
            base_url = self._resolve_identifier(document, loader, base_url)
            self._resolve_identity(document, loader, base_url)
            self._resolve_uris(document, loader, base_url)

            try:
                for key, val in document.items():
                    subscope = ""  # type: str
                    if key in loader.subscopes:
                        subscope = "/" + loader.subscopes[key]
                    document[key], _ = loader.resolve_all(
                        val, base_url + subscope, file_base=file_base, checklinks=False
                    )
            except ValidationException as v:
                _logger.warning("loader is %s", id(loader), exc_info=True)
                raise ValidationException(
                    "({}) ({}) Validation error in field {}:".format(
                        id(loader), file_base, key
                    ),
                    None,
                    [v],
                ) from v

        elif isinstance(document, CommentedSeq):
            i = 0
            try:
                while i < len(document):
                    val = document[i]
                    if isinstance(val, CommentedMap) and (
                        "$import" in val or "$mixin" in val
                    ):
                        l, import_metadata = loader.resolve_ref(
                            val, base_url=file_base, checklinks=False
                        )
                        metadata.setdefault("$import_metadata", {})
                        for identifier in loader.identifiers:
                            if identifier in import_metadata:
                                metadata["$import_metadata"][
                                    import_metadata[identifier]
                                ] = import_metadata
                        if isinstance(l, CommentedSeq):
                            lc = document.lc.data[i]
                            del document[i]
                            llen = len(l)
                            for j in range(len(document) + llen, i + llen, -1):
                                document.lc.data[j - 1] = document.lc.data[j - llen]
                            for item in l:
                                document.insert(i, item)
                                document.lc.data[i] = lc
                                i += 1
                        else:
                            document[i] = l
                            i += 1
                    else:
                        document[i], _ = loader.resolve_all(
                            val, base_url, file_base=file_base, checklinks=False
                        )
                        i += 1
            except ValidationException as v:
                _logger.warning("failed", exc_info=True)
                raise ValidationException(
                    "({}) ({}) Validation error in position {}:".format(
                        id(loader), file_base, i
                    ),
                    None,
                    [v],
                ) from v

        if checklinks:
            all_doc_ids = {}  # type: Dict[str, str]
            loader.validate_links(
                document,
                "",
                all_doc_ids,
                strict_foreign_properties=strict_foreign_properties,
            )

        return document, metadata
Ejemplo n.º 25
0
 def __iter__(self):
     for item in CommentedMap.__iter__(self):
         yield item
Ejemplo n.º 26
0
    def resolve_all(
            self,
            document,  # type: Union[CommentedMap, CommentedSeq]
            base_url,  # type: Text
            file_base=None,  # type: Text
            checklinks=True  # type: bool
    ):
        # type: (...) -> Tuple[Union[CommentedMap, CommentedSeq, Text, None], Dict[Text, Any]]
        loader = self
        metadata = CommentedMap()  # type: CommentedMap
        if file_base is None:
            file_base = base_url

        if isinstance(document, CommentedMap):
            # Handle $import and $include
            if (u'$import' in document or u'$include' in document):
                return self.resolve_ref(document,
                                        base_url=file_base,
                                        checklinks=checklinks)
            elif u'$mixin' in document:
                return self.resolve_ref(document,
                                        base_url=base_url,
                                        checklinks=checklinks)
        elif isinstance(document, CommentedSeq):
            pass
        elif isinstance(document, (list, dict)):
            raise Exception(
                "Expected CommentedMap or CommentedSeq, got %s: `%s`" %
                (type(document), document))
        else:
            return (document, metadata)

        newctx = None  # type: Optional[Loader]
        if isinstance(document, CommentedMap):
            # Handle $base, $profile, $namespaces, $schemas and $graph
            if u"$base" in document:
                base_url = document[u"$base"]

            if u"$profile" in document:
                if newctx is None:
                    newctx = SubLoader(self)
                prof = self.fetch(document[u"$profile"])
                newctx.add_namespaces(document.get(u"$namespaces", {}))
                newctx.add_schemas(document.get(u"$schemas", []),
                                   document[u"$profile"])

            if u"$namespaces" in document:
                if newctx is None:
                    newctx = SubLoader(self)
                newctx.add_namespaces(document[u"$namespaces"])

            if u"$schemas" in document:
                if newctx is None:
                    newctx = SubLoader(self)
                newctx.add_schemas(document[u"$schemas"], file_base)

            if newctx is not None:
                loader = newctx

            if u"$graph" in document:
                metadata = _copy_dict_without_key(document, u"$graph")
                document = document[u"$graph"]
                resolved_metadata = loader.resolve_all(metadata,
                                                       base_url,
                                                       file_base=file_base,
                                                       checklinks=False)[0]
                if isinstance(resolved_metadata, dict):
                    metadata = resolved_metadata
                else:
                    raise validate.ValidationException(
                        "Validation error, metadata must be dict: %s" %
                        (resolved_metadata))

        if isinstance(document, CommentedMap):
            self._normalize_fields(document, loader)
            self._resolve_idmap(document, loader)
            self._resolve_type_dsl(document, loader)
            base_url = self._resolve_identifier(document, loader, base_url)
            self._resolve_identity(document, loader, base_url)
            self._resolve_uris(document, loader, base_url)

            try:
                for key, val in document.items():
                    document[key], _ = loader.resolve_all(val,
                                                          base_url,
                                                          file_base=file_base,
                                                          checklinks=False)
            except validate.ValidationException as v:
                _logger.warn("loader is %s", id(loader), exc_info=True)
                raise validate.ValidationException(
                    "(%s) (%s) Validation error in field %s:\n%s" %
                    (id(loader), file_base, key,
                     validate.indent(six.text_type(v))))

        elif isinstance(document, CommentedSeq):
            i = 0
            try:
                while i < len(document):
                    val = document[i]
                    if isinstance(val, CommentedMap) and (u"$import" in val
                                                          or u"$mixin" in val):
                        l, _ = loader.resolve_ref(val,
                                                  base_url=file_base,
                                                  checklinks=False)
                        if isinstance(l, CommentedSeq):
                            lc = document.lc.data[i]
                            del document[i]
                            llen = len(l)
                            for j in range(len(document) + llen, i + llen, -1):
                                document.lc.data[j -
                                                 1] = document.lc.data[j -
                                                                       llen]
                            for item in l:
                                document.insert(i, item)
                                document.lc.data[i] = lc
                                i += 1
                        else:
                            document[i] = l
                            i += 1
                    else:
                        document[i], _ = loader.resolve_all(
                            val,
                            base_url,
                            file_base=file_base,
                            checklinks=False)
                        i += 1
            except validate.ValidationException as v:
                _logger.warn("failed", exc_info=True)
                raise validate.ValidationException(
                    "(%s) (%s) Validation error in position %i:\n%s" %
                    (id(loader), file_base, i, validate.indent(
                        six.text_type(v))))

            for identifer in loader.identity_links:
                if identifer in metadata:
                    if isinstance(metadata[identifer], (str, six.text_type)):
                        metadata[identifer] = loader.expand_url(
                            metadata[identifer], base_url, scoped_id=True)
                        loader.idx[metadata[identifer]] = document

        if checklinks:
            all_doc_ids = {}  # type: Dict[Text, Text]
            self.validate_links(document, u"", all_doc_ids)

        return document, metadata
Ejemplo n.º 27
0
    def do_update(self, helper: ConfigUpdateHelper) -> None:
        super().do_update(helper)
        copy, copy_dict, base = helper

        copy("homeserver.asmux")

        if "appservice.protocol" in self and "appservice.address" not in self:
            protocol, hostname, port = (self["appservice.protocol"],
                                        self["appservice.hostname"],
                                        self["appservice.port"])
            base["appservice.address"] = f"{protocol}://{hostname}:{port}"
        if "appservice.debug" in self and "logging" not in self:
            level = "DEBUG" if self["appservice.debug"] else "INFO"
            base["logging.root.level"] = level
            base["logging.loggers.mau.level"] = level
            base["logging.loggers.telethon.level"] = level

        copy("appservice.public.enabled")
        copy("appservice.public.prefix")
        copy("appservice.public.external")

        copy("appservice.provisioning.enabled")
        copy("appservice.provisioning.prefix")
        copy("appservice.provisioning.shared_secret")
        if base["appservice.provisioning.shared_secret"] == "generate":
            base["appservice.provisioning.shared_secret"] = self._new_token()

        copy("appservice.community_id")

        copy("metrics.enabled")
        copy("metrics.listen_port")

        copy("manhole.enabled")
        copy("manhole.path")
        copy("manhole.whitelist")

        copy("bridge.username_template")
        copy("bridge.alias_template")
        copy("bridge.displayname_template")

        copy("bridge.displayname_preference")
        copy("bridge.displayname_max_length")
        copy("bridge.allow_avatar_remove")

        copy("bridge.max_initial_member_sync")
        copy("bridge.sync_channel_members")
        copy("bridge.skip_deleted_members")
        copy("bridge.startup_sync")
        if "bridge.sync_dialog_limit" in self:
            base["bridge.sync_create_limit"] = self["bridge.sync_dialog_limit"]
            base["bridge.sync_update_limit"] = self["bridge.sync_dialog_limit"]
        else:
            copy("bridge.sync_update_limit")
            copy("bridge.sync_create_limit")
        copy("bridge.sync_direct_chats")
        copy("bridge.max_telegram_delete")
        copy("bridge.sync_matrix_state")
        copy("bridge.allow_matrix_login")
        copy("bridge.plaintext_highlights")
        copy("bridge.public_portals")
        copy("bridge.sync_with_custom_puppets")
        copy("bridge.sync_direct_chat_list")
        copy("bridge.double_puppet_server_map")
        copy("bridge.double_puppet_allow_discovery")
        if "bridge.login_shared_secret" in self:
            base["bridge.login_shared_secret_map"] = {
                base["homeserver.domain"]: self["bridge.login_shared_secret"]
            }
        else:
            copy("bridge.login_shared_secret_map")
        copy("bridge.telegram_link_preview")
        copy("bridge.invite_link_resolve")
        copy("bridge.inline_images")
        copy("bridge.image_as_file_size")
        copy("bridge.max_document_size")
        copy("bridge.parallel_file_transfer")
        copy("bridge.federate_rooms")
        copy("bridge.animated_sticker.target")
        copy("bridge.animated_sticker.args")
        copy("bridge.encryption.allow")
        copy("bridge.encryption.default")
        copy("bridge.encryption.database")
        copy("bridge.encryption.key_sharing.allow")
        copy("bridge.encryption.key_sharing.require_cross_signing")
        copy("bridge.encryption.key_sharing.require_verification")
        copy("bridge.private_chat_portal_meta")
        copy("bridge.delivery_receipts")
        copy("bridge.delivery_error_reports")
        copy("bridge.resend_bridge_info")
        copy("bridge.mute_bridging")
        copy("bridge.pinned_tag")
        copy("bridge.archive_tag")
        copy("bridge.backfill.invite_own_puppet")
        copy("bridge.backfill.takeout_limit")
        copy("bridge.backfill.initial_limit")
        copy("bridge.backfill.missed_limit")
        copy("bridge.backfill.disable_notifications")
        copy("bridge.backfill.normal_groups")

        copy("bridge.initial_power_level_overrides.group")
        copy("bridge.initial_power_level_overrides.user")

        copy("bridge.bot_messages_as_notices")
        if isinstance(self["bridge.bridge_notices"], bool):
            base["bridge.bridge_notices"] = {
                "default": self["bridge.bridge_notices"],
                "exceptions": ["@importantbot:example.com"],
            }
        else:
            copy("bridge.bridge_notices")

        copy("bridge.deduplication.pre_db_check")
        copy("bridge.deduplication.cache_queue_length")

        if "bridge.message_formats.m_text" in self:
            del self["bridge.message_formats"]
        copy_dict("bridge.message_formats", override_existing_map=False)
        copy("bridge.emote_format")

        copy("bridge.state_event_formats.join")
        copy("bridge.state_event_formats.leave")
        copy("bridge.state_event_formats.name_change")

        copy("bridge.filter.mode")
        copy("bridge.filter.list")

        copy("bridge.command_prefix")

        migrate_permissions = ("bridge.permissions" not in self
                               or "bridge.whitelist" in self
                               or "bridge.admins" in self)
        if migrate_permissions:
            permissions = self["bridge.permissions"] or CommentedMap()
            for entry in self["bridge.whitelist"] or []:
                permissions[entry] = "full"
            for entry in self["bridge.admins"] or []:
                permissions[entry] = "admin"
            base["bridge.permissions"] = permissions
        else:
            copy_dict("bridge.permissions")

        if "bridge.relaybot" not in self:
            copy("bridge.authless_relaybot_portals",
                 "bridge.relaybot.authless_portals")
        else:
            copy("bridge.relaybot.private_chat.invite")
            copy("bridge.relaybot.private_chat.state_changes")
            copy("bridge.relaybot.private_chat.message")
            copy("bridge.relaybot.group_chat_invite")
            copy("bridge.relaybot.ignore_unbridged_group_chat")
            copy("bridge.relaybot.authless_portals")
            copy("bridge.relaybot.whitelist_group_admins")
            copy("bridge.relaybot.whitelist")
            copy("bridge.relaybot.ignore_own_incoming_events")

        copy("telegram.api_id")
        copy("telegram.api_hash")
        copy("telegram.bot_token")

        copy("telegram.connection.timeout")
        copy("telegram.connection.retries")
        copy("telegram.connection.retry_delay")
        copy("telegram.connection.flood_sleep_threshold")
        copy("telegram.connection.request_retries")

        copy("telegram.device_info.device_model")
        copy("telegram.device_info.system_version")
        copy("telegram.device_info.app_version")
        copy("telegram.device_info.lang_code")
        copy("telegram.device_info.system_lang_code")

        copy("telegram.server.enabled")
        copy("telegram.server.dc")
        copy("telegram.server.ip")
        copy("telegram.server.port")

        copy("telegram.proxy.type")
        copy("telegram.proxy.address")
        copy("telegram.proxy.port")
        copy("telegram.proxy.rdns")
        copy("telegram.proxy.username")
        copy("telegram.proxy.password")
Ejemplo n.º 28
0
 def decode_json(cls, registry: Registry, data: str, args):
     raw_data = json.loads(data)
     result = CommentedMap()
     result['title'] = raw_data['title']
     result['resource'] = 'quiz'
     result['url'] = raw_data['html_url']
     result['published'] = raw_data['published']
     result['settings'] = CommentedMap()
     result['settings']['quiz_type'] = raw_data['quiz_type']
     if raw_data.get('points_possible') is not None:
         result['settings']['points_possible'] = raw_data['points_possible']
     result['settings']['allowed_attempts'] = raw_data['allowed_attempts']
     result['settings']['scoring_policy'] = raw_data['scoring_policy']
     result['settings']['timing'] = CommentedMap()
     result['settings']['timing']['due_at'] = to_friendly_date(
         raw_data['due_at'])
     result['settings']['timing']['unlock_at'] = to_friendly_date(
         raw_data['unlock_at'])
     result['settings']['timing']['lock_at'] = to_friendly_date(
         raw_data['lock_at'])
     result['settings']['secrecy'] = CommentedMap()
     result['settings']['secrecy']['shuffle_answers'] = raw_data[
         'shuffle_answers']
     result['settings']['secrecy']['time_limit'] = raw_data['time_limit']
     result['settings']['secrecy']['one_question_at_a_time'] = raw_data[
         'one_question_at_a_time']
     result['settings']['secrecy']['cant_go_back'] = raw_data[
         'cant_go_back']
     result['settings']['secrecy']['show_correct_answers'] = raw_data[
         'show_correct_answers']
     result['settings']['secrecy'][
         'show_correct_answers_last_attempt'] = raw_data[
             'show_correct_answers_last_attempt']
     result['settings']['secrecy']['show_correct_answers_at'] = raw_data[
         'show_correct_answers_at']
     result['settings']['secrecy']['hide_correct_answers_at'] = raw_data[
         'hide_correct_answers_at']
     result['settings']['secrecy']['hide_results'] = raw_data[
         'hide_results']
     result['settings']['secrecy']['one_time_results'] = raw_data[
         'one_time_results']
     if raw_data['access_code']:
         result['settings']['secrecy']['access_code'] = raw_data[
             'access_code']
     if raw_data['ip_filter']:
         result['settings']['secrecy']['ip_filter'] = raw_data['ip_filter']
     # Handle questions and groups
     result['questions'] = []
     available_groups = raw_data['groups']
     used_groups = {}
     extra_files = []
     for question in raw_data['questions']:
         quiz_question, destination_path, full_body = QuizQuestion.decode_question(
             registry, question, raw_data, args)
         if destination_path is not None:
             extra_files.append((destination_path, full_body))
         quiz_group_id = question.get('quiz_group_id')
         if quiz_group_id is not None:
             quiz_group_id = str(
                 quiz_group_id)  # acbart: JSON only allows string keys
             if quiz_group_id not in used_groups:
                 used_groups[quiz_group_id] = QuizGroup.decode_group(
                     available_groups[quiz_group_id])
                 result['questions'].append(used_groups[quiz_group_id])
             used_groups[quiz_group_id]['questions'].append(quiz_question)
         else:
             result['questions'].append(quiz_question)
     return h2m(raw_data['description'], result), extra_files
Ejemplo n.º 29
0
    def __init__(
        self,
        config=None,
        path=None,
        validate=True,
        schema=None,
        loadHook=None,
        vault=None,
    ):
        try:
            self._yaml = None
            self.vault = vault
            self.path = None
            self.schema = schema
            self.lastModified = None
            if path:
                self.path = os.path.abspath(path)
                if os.path.isfile(self.path):
                    statinfo = os.stat(self.path)
                    self.lastModified = statinfo.st_mtime
                    with open(self.path, "r") as f:
                        config = f.read()
                # otherwise use default config
            else:
                self.path = None

            if isinstance(config, six.string_types):
                if self.path:
                    # set name on a StringIO so parsing error messages include the path
                    config = six.StringIO(config)
                    config.name = self.path
                self.config = self.yaml.load(config)
            elif isinstance(config, dict):
                self.config = CommentedMap(config.items())
            else:
                self.config = config
            if not isinstance(self.config, CommentedMap):
                raise UnfurlValidationError(
                    'invalid YAML document with contents: "%s"' % self.config)

            findAnchor(self.config, None)  # create _anchorCache
            self._cachedDocIncludes = {}
            # schema should include defaults but can't validate because it doesn't understand includes
            # but should work most of time
            self.config.loadTemplate = self.loadInclude
            self.loadHook = loadHook

            self.baseDirs = [self.getBaseDir()]
            self.includes, expandedConfig = expandDoc(self.config,
                                                      cls=makeMapWithBase(
                                                          self.config,
                                                          self.baseDirs[0]))
            self.expanded = expandedConfig
            errors = schema and self.validate(expandedConfig)
            if errors and validate:
                (message, schemaErrors) = errors
                raise UnfurlValidationError(
                    "JSON Schema validation failed: " + message, errors)
            else:
                self.valid = not not errors
        except:
            if self.path:
                msg = "Unable to load yaml config at %s" % self.path
            else:
                msg = "Unable to parse yaml config"
            raise UnfurlError(msg, True)
Ejemplo n.º 30
0
def add_space_between_main_sections(cwl: CommentedMap):
    for k in cwl.keys():
        if k in ["inputs", "outputs", "steps", "requirements", "hints", "baseCommand"]:
            cwl.yaml_set_comment_before_after_key(key=k, before="\n")
Ejemplo n.º 31
0
def copy(src):
    cls = getattr(src, "mapCtor", src.__class__)
    if six.PY2 and cls is CommentedMap:
        return CommentedMap(src.items())
    return cls(src)
Ejemplo n.º 32
0
def test_str():
    commented_map = CommentedMap()
    sec = Section("section1", commented_map)
    assert str(sec) == "section1"
Ejemplo n.º 33
0
    def __init__(self, toolpath_object, pos, **kwargs):
        # type: (Dict[Text, Any], int, **Any) -> None
        if "id" in toolpath_object:
            self.id = toolpath_object["id"]
        else:
            self.id = "#step" + Text(pos)

        kwargs["requirements"] = kwargs.get(
            "requirements", []) + toolpath_object.get("requirements", [])
        kwargs["hints"] = kwargs.get("hints", []) + toolpath_object.get(
            "hints", [])

        try:
            if isinstance(toolpath_object["run"], dict):
                self.embedded_tool = kwargs.get("makeTool")(
                    toolpath_object["run"], **kwargs)
            else:
                self.embedded_tool = load_tool(
                    toolpath_object["run"],
                    kwargs.get("makeTool"),
                    kwargs,
                    enable_dev=kwargs.get("enable_dev"),
                    strict=kwargs.get("strict"),
                    fetcher_constructor=kwargs.get("fetcher_constructor"))
        except validate.ValidationException as v:
            raise WorkflowException(
                u"Tool definition %s failed validation:\n%s" %
                (toolpath_object["run"], validate.indent(str(v))))

        validation_errors = []
        self.tool = toolpath_object = copy.deepcopy(toolpath_object)
        bound = set()
        for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")):
            toolpath_object[toolfield] = []
            for n, step_entry in enumerate(toolpath_object[stepfield]):
                if isinstance(step_entry, six.string_types):
                    param = CommentedMap()  # type: CommentedMap
                    inputid = step_entry
                else:
                    param = CommentedMap(six.iteritems(step_entry))
                    inputid = step_entry["id"]

                shortinputid = shortname(inputid)
                found = False
                for tool_entry in self.embedded_tool.tool[toolfield]:
                    frag = shortname(tool_entry["id"])
                    if frag == shortinputid:
                        param.update(tool_entry)
                        found = True
                        bound.add(frag)
                        break
                if not found:
                    if stepfield == "in":
                        param["type"] = "Any"
                    else:
                        validation_errors.append(
                            SourceLine(self.tool["out"], n).makeError(
                                "Workflow step output '%s' does not correspond to"
                                % shortname(step_entry)) + "\n" +
                            SourceLine(self.embedded_tool.tool, "outputs").
                            makeError("  tool output (expected '%s')" %
                                      ("', '".join([
                                          shortname(tool_entry["id"])
                                          for tool_entry in
                                          self.embedded_tool.tool[toolfield]
                                      ]))))
                param["id"] = inputid
                param.lc.line = toolpath_object[stepfield].lc.data[n][0]
                param.lc.col = toolpath_object[stepfield].lc.data[n][1]
                param.lc.filename = toolpath_object[stepfield].lc.filename
                toolpath_object[toolfield].append(param)

        missing = []
        for i, tool_entry in enumerate(self.embedded_tool.tool["inputs"]):
            if shortname(tool_entry["id"]) not in bound:
                if "null" not in tool_entry[
                        "type"] and "default" not in tool_entry:
                    missing.append(shortname(tool_entry["id"]))

        if missing:
            validation_errors.append(
                SourceLine(self.tool, "in").makeError(
                    "Step is missing required parameter%s '%s'" %
                    ("s" if len(missing) > 1 else "", "', '".join(missing))))

        if validation_errors:
            raise validate.ValidationException("\n".join(validation_errors))

        super(WorkflowStep, self).__init__(toolpath_object, **kwargs)

        if self.embedded_tool.tool["class"] == "Workflow":
            (feature,
             _) = self.get_requirement("SubworkflowFeatureRequirement")
            if not feature:
                raise WorkflowException(
                    "Workflow contains embedded workflow but SubworkflowFeatureRequirement not in requirements"
                )

        if "scatter" in self.tool:
            (feature, _) = self.get_requirement("ScatterFeatureRequirement")
            if not feature:
                raise WorkflowException(
                    "Workflow contains scatter but ScatterFeatureRequirement not in requirements"
                )

            inputparms = copy.deepcopy(self.tool["inputs"])
            outputparms = copy.deepcopy(self.tool["outputs"])
            scatter = aslist(self.tool["scatter"])

            method = self.tool.get("scatterMethod")
            if method is None and len(scatter) != 1:
                raise validate.ValidationException(
                    "Must specify scatterMethod when scattering over multiple inputs"
                )

            inp_map = {i["id"]: i for i in inputparms}
            for s in scatter:
                if s not in inp_map:
                    raise validate.ValidationException(
                        SourceLine(self.tool, "scatter").makeError(
                            u"Scatter parameter '%s' does not correspond to an input parameter of this "
                            u"step, expecting '%s'" %
                            (shortname(s), "', '".join(
                                shortname(k) for k in inp_map.keys()))))

                inp_map[s]["type"] = {
                    "type": "array",
                    "items": inp_map[s]["type"]
                }

            if self.tool.get("scatterMethod") == "nested_crossproduct":
                nesting = len(scatter)
            else:
                nesting = 1

            for r in range(0, nesting):
                for op in outputparms:
                    op["type"] = {"type": "array", "items": op["type"]}
            self.tool["inputs"] = inputparms
            self.tool["outputs"] = outputparms
Ejemplo n.º 34
0
def test_hash():
    commented_map = CommentedMap()
    sec = Section("section1", commented_map)
    assert hash(sec) == hash("section1-[]")
    sec.add_item("item")
    assert hash(sec) == hash("section1-['item']")
Ejemplo n.º 35
0
    def __call__(self, loader: 'Loader',
                 node: yaml.Node) -> Generator[Any, None, None]:
        """Construct an object from a yaml node.

        This constructs an object of the user-defined type that this \
        is the constructor for. It is registered with the yaml library, \
        and called by it. Recursion is handled by calling the yaml \
        library, so we only need to construct an object using the keys \
        and values of the given MappingNode, and those values have been \
        converted recursively for us.

        Since Python does not do type checks, we do a type check \
        manually, to ensure that the class's constructor gets the types \
        it expects. This avoids confusing errors, but moreover is a \
        security features that ensures that regardless of the content \
        of the YAML file, we produce the objects that the programmer \
        defined and expects.

        Note that this yields rather than returns, in a somewhat odd \
        way. That's directly from the PyYAML/ruamel.yaml documentation.

        Args:
            loader: The yatiml.loader that is creating this object.
            node: The node to construct from.

        Yields:
            The incomplete constructed object.
        """
        logger.debug('Constructing an object of type {}'.format(
            self.class_.__name__))
        if not isinstance(node, yaml.MappingNode):
            raise RecognitionError(
                ('{}{}Expected a MappingNode. There'
                 ' is probably something wrong with your yatiml_savorize()'
                 ' function.').format(node.start_mark, os.linesep))

        # figure out which keys are extra and strip them of tags
        # to prevent constructing objects we haven't type checked
        argspec = inspect.getfullargspec(self.class_.__init__)
        self.__strip_extra_attributes(node, argspec.args)

        # create object and let yaml lib construct subobjects
        new_obj = self.class_.__new__(self.class_)  # type: ignore
        yield new_obj
        mapping = CommentedMap()
        loader.construct_mapping(node, mapping, deep=True)

        # Convert ruamel.yaml's round-trip types to list and OrderedDict,
        # recursively for each attribute value in our mapping. Note that
        # mapping itself is still a CommentedMap.
        for key, value in mapping.copy().items():
            if (isinstance(value, CommentedMap)
                    or isinstance(value, CommentedSeq)):
                mapping[key] = self.__to_plain_containers(value)

        # do type check
        self.__check_no_missing_attributes(node, mapping)
        self.__type_check_attributes(node, mapping, argspec)

        # construct object, this should work now
        try:
            logger.debug('Calling __init__')
            if 'yatiml_extra' in argspec.args:
                attrs = self.__split_off_extra_attributes(
                    mapping, argspec.args)
                new_obj.__init__(**attrs)

            else:
                new_obj.__init__(**mapping)

        except TypeError:  # pragma: no cover
            raise RecognitionError(
                ('{}{}Could not construct object of class {}'
                 ' from {}. This is a bug in YAtiML, please report.'.format(
                     node.start_mark, os.linesep, self.class_.__name__, node)))
        logger.debug('Done constructing {}'.format(self.class_.__name__))
Ejemplo n.º 36
0
def v1_0to1_1(doc: Any, loader: Loader, baseuri: str) -> Tuple[Any, str]:  # pylint: disable=unused-argument
    """Public updater for v1.0 to v1.1."""
    doc = copy.deepcopy(doc)

    rewrite = {
        "http://commonwl.org/cwltool#WorkReuse":
        "WorkReuse",
        "http://arvados.org/cwl#ReuseRequirement":
        "WorkReuse",
        "http://commonwl.org/cwltool#TimeLimit":
        "ToolTimeLimit",
        "http://commonwl.org/cwltool#NetworkAccess":
        "NetworkAccess",
        "http://commonwl.org/cwltool#InplaceUpdateRequirement":
        "InplaceUpdateRequirement",
        "http://commonwl.org/cwltool#LoadListingRequirement":
        "LoadListingRequirement",
    }

    def rewrite_requirements(
            t: MutableMapping[str, Union[str, Dict[str, Any]]]) -> None:
        if "requirements" in t:
            for r in t["requirements"]:
                if isinstance(r, MutableMapping):
                    if r["class"] in rewrite:
                        r["class"] = rewrite[r["class"]]
                else:
                    raise validate.ValidationException(
                        "requirements entries must be dictionaries: {} {}.".
                        format(type(r), r))
        if "hints" in t:
            for r in t["hints"]:
                if isinstance(r, MutableMapping):
                    if r["class"] in rewrite:
                        r["class"] = rewrite[r["class"]]
                else:
                    raise validate.ValidationException(
                        "hints entries must be dictionaries: {} {}.".format(
                            type(r), r))
        if "steps" in t:
            for s in t["steps"]:
                if isinstance(s, MutableMapping):
                    rewrite_requirements(s)
                else:
                    raise validate.ValidationException(
                        "steps entries must be dictionaries: {} {}.".format(
                            type(s), s))

    def update_secondaryFiles(t, top=False):
        # type: (Any, bool) -> Union[MutableSequence[MutableMapping[str, str]], MutableMapping[str, str]]
        if isinstance(t, CommentedSeq):
            new_seq = copy.deepcopy(t)
            for index, entry in enumerate(t):
                new_seq[index] = update_secondaryFiles(entry)
            return new_seq
        elif isinstance(t, MutableSequence):
            return CommentedSeq([update_secondaryFiles(p) for p in t])
        elif isinstance(t, MutableMapping):
            return t
        elif top:
            return CommentedSeq([CommentedMap([("pattern", t)])])
        else:
            return CommentedMap([("pattern", t)])

    def fix_inputBinding(t):  # type: (Dict[str, Any]) -> None
        for i in t["inputs"]:
            if "inputBinding" in i:
                ib = i["inputBinding"]
                for k in list(ib.keys()):
                    if k != "loadContents":
                        _logger.warning(
                            SourceLine(ib, k).makeError(
                                "Will ignore field '{}' which is not valid in {} "
                                "inputBinding".format(k, t["class"])))
                        del ib[k]

    visit_class(doc, ("CommandLineTool", "Workflow"), rewrite_requirements)
    visit_class(doc, ("ExpressionTool", "Workflow"), fix_inputBinding)
    visit_field(doc, "secondaryFiles", partial(update_secondaryFiles,
                                               top=True))

    upd = doc
    if isinstance(upd, MutableMapping) and "$graph" in upd:
        upd = upd["$graph"]
    for proc in aslist(upd):
        proc.setdefault("hints", CommentedSeq())
        proc["hints"].insert(
            0,
            CommentedMap([("class", "NetworkAccess"),
                          ("networkAccess", True)]))
        proc["hints"].insert(
            0,
            CommentedMap([("class", "LoadListingRequirement"),
                          ("loadListing", "deep_listing")]),
        )
        if "cwlVersion" in proc:
            del proc["cwlVersion"]

    return (doc, "v1.1")
Ejemplo n.º 37
0
        def _service_to_container(name, service):
            container = CommentedMap()
            container['name'] = name
            container['securityContext'] = CommentedMap()
            container['state'] = 'present'

            volumes = []
            pod = {}
            for key, value in service.items():
                if key in self.IGNORE_DIRECTIVES:
                    pass
                elif key == 'cap_add':
                    if not container['securityContext'].get('Capabilities'):
                        container['securityContext']['Capabilities'] = dict(add=[], drop=[])
                    for cap in value:
                        if self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]:
                            container['securityContext']['Capabilities']['add'].append(
                                self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap])
                elif key == 'cap_drop':
                    if not container['securityContext'].get('Capabilities'):
                        container['securityContext']['Capabilities'] = dict(add=[], drop=[])
                    for cap in value:
                        if self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]:
                            container['securityContext']['Capabilities']['drop'].append(
                                self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap])
                elif key == 'command':
                    if isinstance(value, string_types):
                        container['args'] = shlex.split(value)
                    else:
                        container['args'] = value
                elif key == 'container_name':
                        container['name'] = value
                elif key == 'entrypoint':
                    if isinstance(value, string_types):
                        container['command'] = shlex.split(value)
                    else:
                        container['command'] = copy.copy(value)
                elif key == 'environment':
                    expanded_vars = self.expand_env_vars(value)
                    if expanded_vars:
                        container['env'] = expanded_vars
                elif key in ('ports', 'expose'):
                    if not container.get('ports'):
                        container['ports'] = []
                    self.add_container_ports(value, container['ports'])
                elif key == 'privileged':
                    container['securityContext']['privileged'] = value
                elif key == 'read_only':
                    container['securityContext']['readOnlyRootFileSystem'] = value
                elif key == 'stdin_open':
                    container['stdin'] = value
                elif key == 'volumes':
                    vols, vol_mounts = self.get_k8s_volumes(value)
                    if vol_mounts:
                        container['volumeMounts'] = vol_mounts
                    if vols:
                        volumes += vols
                elif key == 'working_dir':
                    container['workingDir'] = value
                else:
                    container[key] = value

            # Translate options:
            if service.get(self.CONFIG_KEY):
                for key, value in service[self.CONFIG_KEY].items():
                    if key == 'deployment':
                        for deployment_key, deployment_value in value.items():
                            if deployment_key != 'force':
                                self.copy_attribute(pod, deployment_key, deployment_value)

            return container, volumes, pod
Ejemplo n.º 38
0
    def _init_job(self, joborder, **kwargs):
        # type: (Dict[Text, Text], **Any) -> Builder
        """
        kwargs:

        eval_timeout: javascript evaluation timeout
        use_container: do/don't use Docker when DockerRequirement hint provided
        make_fs_access: make an FsAccess() object with given basedir
        basedir: basedir for FsAccess
        docker_outdir: output directory inside docker for this job
        docker_tmpdir: tmpdir inside docker for this job
        docker_stagedir: stagedir inside docker for this job
        outdir: outdir on host for this job
        tmpdir: tmpdir on host for this job
        stagedir: stagedir on host for this job
        select_resources: callback to select compute resources
        debug: enable debugging output
        js_console: enable javascript console output
        """

        builder = Builder()
        builder.job = cast(Dict[Text, Union[Dict[Text, Any], List, Text]],
                           copy.deepcopy(joborder))

        # Validate job order
        try:
            fillInDefaults(self.tool[u"inputs"], builder.job)
            normalizeFilesDirs(builder.job)
            validate.validate_ex(self.names.get_name("input_record_schema",
                                                     ""),
                                 builder.job,
                                 strict=False,
                                 logger=_logger_validation_warnings)
        except (validate.ValidationException, WorkflowException) as e:
            raise WorkflowException("Invalid job input record:\n" + Text(e))

        builder.files = []
        builder.bindings = CommentedSeq()
        builder.schemaDefs = self.schemaDefs
        builder.names = self.names
        builder.requirements = self.requirements
        builder.hints = self.hints
        builder.resources = {}
        builder.timeout = kwargs.get("eval_timeout")
        builder.debug = kwargs.get("debug")
        builder.js_console = kwargs.get("js_console")
        builder.mutation_manager = kwargs.get("mutation_manager")

        builder.make_fs_access = kwargs.get("make_fs_access") or StdFsAccess
        builder.fs_access = builder.make_fs_access(kwargs["basedir"])
        builder.force_docker_pull = kwargs.get("force_docker_pull")

        loadListingReq, _ = self.get_requirement(
            "http://commonwl.org/cwltool#LoadListingRequirement")
        if loadListingReq:
            builder.loadListing = loadListingReq.get("loadListing")

        dockerReq, is_req = self.get_requirement("DockerRequirement")
        defaultDocker = None

        if dockerReq is None and "default_container" in kwargs:
            defaultDocker = kwargs["default_container"]

        if (dockerReq or defaultDocker) and kwargs.get("use_container"):
            if dockerReq:
                # Check if docker output directory is absolute
                if dockerReq.get("dockerOutputDirectory") and dockerReq.get(
                        "dockerOutputDirectory").startswith('/'):
                    builder.outdir = dockerReq.get("dockerOutputDirectory")
                else:
                    builder.outdir = builder.fs_access.docker_compatible_realpath(
                        dockerReq.get("dockerOutputDirectory")
                        or kwargs.get("docker_outdir") or "/var/spool/cwl")
            elif defaultDocker:
                builder.outdir = builder.fs_access.docker_compatible_realpath(
                    kwargs.get("docker_outdir") or "/var/spool/cwl")
            builder.tmpdir = builder.fs_access.docker_compatible_realpath(
                kwargs.get("docker_tmpdir") or "/tmp")
            builder.stagedir = builder.fs_access.docker_compatible_realpath(
                kwargs.get("docker_stagedir") or "/var/lib/cwl")
        else:
            builder.outdir = builder.fs_access.realpath(
                kwargs.get("outdir") or tempfile.mkdtemp())
            builder.tmpdir = builder.fs_access.realpath(
                kwargs.get("tmpdir") or tempfile.mkdtemp())
            builder.stagedir = builder.fs_access.realpath(
                kwargs.get("stagedir") or tempfile.mkdtemp())

        if self.formatgraph:
            for i in self.tool["inputs"]:
                d = shortname(i["id"])
                if d in builder.job and i.get("format"):
                    checkFormat(builder.job[d], builder.do_eval(i["format"]),
                                self.formatgraph)

        builder.bindings.extend(
            builder.bind_input(self.inputs_record_schema, builder.job))

        if self.tool.get("baseCommand"):
            for n, b in enumerate(aslist(self.tool["baseCommand"])):
                builder.bindings.append({
                    "position": [-1000000, n],
                    "datum": b
                })

        if self.tool.get("arguments"):
            for i, a in enumerate(self.tool["arguments"]):
                lc = self.tool["arguments"].lc.data[i]
                fn = self.tool["arguments"].lc.filename
                builder.bindings.lc.add_kv_line_col(len(builder.bindings), lc)
                if isinstance(a, dict):
                    a = copy.copy(a)
                    if a.get("position"):
                        a["position"] = [a["position"], i]
                    else:
                        a["position"] = [0, i]
                    builder.bindings.append(a)
                elif ("$(" in a) or ("${" in a):
                    cm = CommentedMap((("position", [0, i]), ("valueFrom", a)))
                    cm.lc.add_kv_line_col("valueFrom", lc)
                    cm.lc.filename = fn
                    builder.bindings.append(cm)
                else:
                    cm = CommentedMap((("position", [0, i]), ("datum", a)))
                    cm.lc.add_kv_line_col("datum", lc)
                    cm.lc.filename = fn
                    builder.bindings.append(cm)

        # use python2 like sorting of heterogeneous lists
        # (containing str and int types),
        # TODO: unify for both runtime
        if six.PY3:
            key = cmp_to_key(cmp_like_py2)
        else:  # PY2
            key = lambda dict: dict["position"]
        builder.bindings.sort(key=key)
        builder.resources = self.evalResources(builder, kwargs)
        builder.job_script_provider = kwargs.get("job_script_provider", None)
        return builder
Ejemplo n.º 39
0
    def __init__(self, owner: Any, manager: "m.StorageManager", path: str, *args: List[Any], **kwargs: Dict[Any, Any]):
        self.data = CommentedMap()

        super().__init__(owner, manager, path, *args, **kwargs)
Ejemplo n.º 40
0
    def __init__(self, toolpath_object, pos, **kwargs):
        # type: (Dict[Text, Any], int, **Any) -> None
        if "id" in toolpath_object:
            self.id = toolpath_object["id"]
        else:
            self.id = "#step" + Text(pos)

        kwargs["requirements"] = kwargs.get("requirements", []) + toolpath_object.get("requirements", [])
        kwargs["hints"] = kwargs.get("hints", []) + toolpath_object.get("hints", [])

        try:
            if isinstance(toolpath_object["run"], dict):
                self.embedded_tool = kwargs.get("makeTool")(toolpath_object["run"], **kwargs)
            else:
                self.embedded_tool = load_tool(
                    toolpath_object["run"], kwargs.get("makeTool"), kwargs,
                    enable_dev=kwargs.get("enable_dev"),
                    strict=kwargs.get("strict"),
                    fetcher_constructor=kwargs.get("fetcher_constructor"))
        except validate.ValidationException as v:
            raise WorkflowException(
                u"Tool definition %s failed validation:\n%s" %
                (toolpath_object["run"], validate.indent(str(v))))

        validation_errors = []
        self.tool = toolpath_object = copy.deepcopy(toolpath_object)
        bound = set()
        for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")):
            toolpath_object[toolfield] = []
            for n, step_entry in enumerate(toolpath_object[stepfield]):
                if isinstance(step_entry, (str, unicode)):
                    param = CommentedMap()  # type: CommentedMap
                    inputid = step_entry
                else:
                    param = CommentedMap(step_entry.iteritems())
                    inputid = step_entry["id"]

                shortinputid = shortname(inputid)
                found = False
                for tool_entry in self.embedded_tool.tool[toolfield]:
                    frag = shortname(tool_entry["id"])
                    if frag == shortinputid:
                        param.update(tool_entry)  # type: ignore
                        found = True
                        bound.add(frag)
                        break
                if not found:
                    if stepfield == "in":
                        param["type"] = "Any"
                    else:
                        validation_errors.append(
                            SourceLine(self.tool["out"], n).makeError(
                                "Workflow step output '%s' does not correspond to" % shortname(step_entry))
                            + "\n" + SourceLine(self.embedded_tool.tool, "outputs").makeError(
                                "  tool output (expected '%s')" % (
                                    "', '".join(
                                        [shortname(tool_entry["id"]) for tool_entry in
                                         self.embedded_tool.tool[toolfield]]))))
                param["id"] = inputid
                param.lc.line = toolpath_object[stepfield].lc.data[n][0]
                param.lc.col = toolpath_object[stepfield].lc.data[n][1]
                param.lc.filename = toolpath_object[stepfield].lc.filename
                toolpath_object[toolfield].append(param)

        missing = []
        for i, tool_entry in enumerate(self.embedded_tool.tool["inputs"]):
            if shortname(tool_entry["id"]) not in bound:
                if "null" not in tool_entry["type"] and "default" not in tool_entry:
                    missing.append(shortname(tool_entry["id"]))

        if missing:
            validation_errors.append(SourceLine(self.tool, "in").makeError(
                "Step is missing required parameter%s '%s'" % ("s" if len(missing) > 1 else "", "', '".join(missing))))

        if validation_errors:
            raise validate.ValidationException("\n".join(validation_errors))

        super(WorkflowStep, self).__init__(toolpath_object, **kwargs)

        if self.embedded_tool.tool["class"] == "Workflow":
            (feature, _) = self.get_requirement("SubworkflowFeatureRequirement")
            if not feature:
                raise WorkflowException(
                    "Workflow contains embedded workflow but SubworkflowFeatureRequirement not in requirements")

        if "scatter" in self.tool:
            (feature, _) = self.get_requirement("ScatterFeatureRequirement")
            if not feature:
                raise WorkflowException("Workflow contains scatter but ScatterFeatureRequirement not in requirements")

            inputparms = copy.deepcopy(self.tool["inputs"])
            outputparms = copy.deepcopy(self.tool["outputs"])
            scatter = aslist(self.tool["scatter"])

            method = self.tool.get("scatterMethod")
            if method is None and len(scatter) != 1:
                raise validate.ValidationException("Must specify scatterMethod when scattering over multiple inputs")

            inp_map = {i["id"]: i for i in inputparms}
            for s in scatter:
                if s not in inp_map:
                    raise validate.ValidationException(
                        SourceLine(self.tool, "scatter").makeError(u"Scatter parameter '%s' does not correspond to an input parameter of this "
                                                                   u"step, expecting '%s'" % (shortname(s), "', '".join(shortname(k) for k in inp_map.keys()))))

                inp_map[s]["type"] = {"type": "array", "items": inp_map[s]["type"]}

            if self.tool.get("scatterMethod") == "nested_crossproduct":
                nesting = len(scatter)
            else:
                nesting = 1

            for r in xrange(0, nesting):
                for op in outputparms:
                    op["type"] = {"type": "array", "items": op["type"]}
            self.tool["inputs"] = inputparms
            self.tool["outputs"] = outputparms
Ejemplo n.º 41
0
    def __init__(self,
                 toolpath_object,      # type: Dict[Text, Any]
                 pos,                  # type: int
                 loadingContext,       # type: LoadingContext
                 parentworkflowProv=None  # type: Optional[CreateProvProfile]
                ):  # type: (...) -> None
        if "id" in toolpath_object:
            self.id = toolpath_object["id"]
        else:
            self.id = "#step" + Text(pos)

        loadingContext = loadingContext.copy()

        loadingContext.requirements = (getdefault(loadingContext.requirements, []) +
                                  toolpath_object.get("requirements", []) +
                                  get_overrides(getdefault(loadingContext.overrides_list, []),
                                                self.id).get("requirements", []))
        loadingContext.hints = getdefault(loadingContext.hints, []) + toolpath_object.get("hints", [])

        try:
            if isinstance(toolpath_object["run"], dict):
                self.embedded_tool = loadingContext.construct_tool_object(toolpath_object["run"], loadingContext)
            else:
                self.embedded_tool = load_tool(
                    toolpath_object["run"], loadingContext)
        except validate.ValidationException as vexc:
            if loadingContext.debug:
                _logger.exception("Validation exception")
            raise WorkflowException(
                u"Tool definition %s failed validation:\n%s" %
                (toolpath_object["run"], validate.indent(str(vexc))))

        validation_errors = []
        self.tool = toolpath_object = copy.deepcopy(toolpath_object)
        bound = set()
        for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")):
            toolpath_object[toolfield] = []
            for index, step_entry in enumerate(toolpath_object[stepfield]):
                if isinstance(step_entry, string_types):
                    param = CommentedMap()  # type: CommentedMap
                    inputid = step_entry
                else:
                    param = CommentedMap(six.iteritems(step_entry))
                    inputid = step_entry["id"]

                shortinputid = shortname(inputid)
                found = False
                for tool_entry in self.embedded_tool.tool[toolfield]:
                    frag = shortname(tool_entry["id"])
                    if frag == shortinputid:
                        #if the case that the step has a default for a parameter,
                        #we do not want the default of the tool to override it
                        step_default = None
                        if "default" in param and "default" in tool_entry:
                            step_default = param["default"]
                        param.update(tool_entry)
                        param["_tool_entry"] = tool_entry
                        if step_default is not None:
                            param["default"] = step_default
                        found = True
                        bound.add(frag)
                        break
                if not found:
                    if stepfield == "in":
                        param["type"] = "Any"
                        param["not_connected"] = True
                    else:
                        validation_errors.append(
                            SourceLine(self.tool["out"], index).makeError(
                                "Workflow step output '%s' does not correspond to"
                                % shortname(step_entry))
                            + "\n" + SourceLine(self.embedded_tool.tool, "outputs").makeError(
                                "  tool output (expected '%s')" % (
                                    "', '".join(
                                        [shortname(tool_entry["id"]) for tool_entry in
                                         self.embedded_tool.tool[toolfield]]))))
                param["id"] = inputid
                param.lc.line = toolpath_object[stepfield].lc.data[index][0]
                param.lc.col = toolpath_object[stepfield].lc.data[index][1]
                param.lc.filename = toolpath_object[stepfield].lc.filename
                toolpath_object[toolfield].append(param)

        missing = []
        for i, tool_entry in enumerate(self.embedded_tool.tool["inputs"]):
            if shortname(tool_entry["id"]) not in bound:
                if "null" not in tool_entry["type"] and "default" not in tool_entry:
                    missing.append(shortname(tool_entry["id"]))

        if missing:
            validation_errors.append(SourceLine(self.tool, "in").makeError(
                "Step is missing required parameter%s '%s'" %
                ("s" if len(missing) > 1 else "", "', '".join(missing))))

        if validation_errors:
            raise validate.ValidationException("\n".join(validation_errors))

        super(WorkflowStep, self).__init__(toolpath_object, loadingContext)

        if self.embedded_tool.tool["class"] == "Workflow":
            (feature, _) = self.get_requirement("SubworkflowFeatureRequirement")
            if not feature:
                raise WorkflowException(
                    "Workflow contains embedded workflow but "
                    "SubworkflowFeatureRequirement not in requirements")

        if "scatter" in self.tool:
            (feature, _) = self.get_requirement("ScatterFeatureRequirement")
            if not feature:
                raise WorkflowException(
                    "Workflow contains scatter but ScatterFeatureRequirement "
                    "not in requirements")

            inputparms = copy.deepcopy(self.tool["inputs"])
            outputparms = copy.deepcopy(self.tool["outputs"])
            scatter = aslist(self.tool["scatter"])

            method = self.tool.get("scatterMethod")
            if method is None and len(scatter) != 1:
                raise validate.ValidationException(
                    "Must specify scatterMethod when scattering over multiple inputs")

            inp_map = {i["id"]: i for i in inputparms}
            for inp in scatter:
                if inp not in inp_map:
                    raise validate.ValidationException(
                        SourceLine(self.tool, "scatter").makeError(
                            "Scatter parameter '%s' does not correspond to "
                            "an input parameter of this step, expecting '%s'"
                            % (shortname(inp), "', '".join(
                                shortname(k) for k in inp_map.keys()))))

                inp_map[inp]["type"] = {"type": "array", "items": inp_map[inp]["type"]}

            if self.tool.get("scatterMethod") == "nested_crossproduct":
                nesting = len(scatter)
            else:
                nesting = 1

            for index in range(0, nesting):
                for oparam in outputparms:
                    oparam["type"] = {"type": "array", "items": oparam["type"]}
            self.tool["inputs"] = inputparms
            self.tool["outputs"] = outputparms
        self.prov_obj = None  # type: Optional[CreateProvProfile]
        if loadingContext.research_obj:
            self.prov_obj = parentworkflowProv
            if self.embedded_tool.tool["class"] == "Workflow":
                self.parent_wf = self.embedded_tool.parent_wf
            else:
                self.parent_wf = self.prov_obj
        req_dict[key] = _create_sorted_commented_map(req_dict[key])


if __name__ == '__main__':
    args = get_args()

    bear_dirs = [PROJECT_BEAR_DIR]

    if args.bear_dirs is not None:
        bear_dirs.extend(args.bear_dirs)

    all_bears = get_all_bears(bear_dirs)

    instance_dict = get_all_requirements(all_bears)

    requirements = CommentedMap()
    requirements.yaml_set_start_comment(
        'This is an automatically generated file.\n'
        'And should not be edited by hand.')

    requirements['overrides'] = 'coala-build.yaml'
    requirements['gem_requirements'] = get_gem_requirements(
                                            instance_dict['GemRequirement'])
    requirements['r_script_requirements'] = get_r_requirements(
                                            instance_dict['RscriptRequirement'])
    requirements['npm_requirements'] = get_npm_requirements(
                                            instance_dict['NpmRequirement'])
    requirements['pip_requirements'] = get_pip_requirements(
                                            instance_dict['PipRequirement'])
    requirements['composer_requirements'] = get_composer_requirements(
                                            instance_dict['ComposerRequirement'])
Ejemplo n.º 43
0
 def __init__(self):
     CommentedMap.__init__(self)
Ejemplo n.º 44
0
 def test_CommentedMapEquality(self):
     cm = CommentedMap((("b", 2),))
     cm.insert(1, "a", 1, comment="a comment")
     self.assertEqual(cm, {"a": 1, "b": 2})
Ejemplo n.º 45
0
    def test_issue_242(self):
        from ruamel.yaml.comments import CommentedMap

        d0 = CommentedMap([('a', 'b')])
        assert d0['a'] == 'b'
Ejemplo n.º 46
0
def convert_xlsx_to_yaml_calendar(
    data_xlsx_fp: FileIO,
    start_date: date,
    *,
    item_delimiter: str = '|',
    relative_week_number_column: str = None,
    worksheet: str = None,
) -> str:
    """Converts an XLSX file to a YAML string representing a weekly calendar

    This function inputs an XLSX file, a start date, an item delimiter for
    decomposing Excel-file cell values into lists (defaulting to a vertical
    pipe), a key column for week numbers (defaulting to the left-most column,
    if not specified), and a worksheet name (defaulting to the first worksheet,
    if not specified), and outputs a string containing a YAML representation
    of the XLSX file as a dictionary keyed by the specified key column and
    having as values dictionaries encoding the row from the specified worksheet
    of the XLSX file corresponding to the key value

    Args:
        data_xlsx_fp: pointer to an XLSX file or file-like object with columns
            headers in its first row and ready to be read from; any column
            names in data_xlsx_fp corresponding to day names in the current
            locale, as identified by the calendar module, are treated as
            providing activities for the corresponding calendar date and will
            be ordered according to ISO 8601 in output; all other columns are
            treated as providing information about the week itself
        start_date: specifies the start date for the calendar, which is
            adjusted to the Monday of the week that the start_date appears in,
            per ISO 8601's specification that weeks run from Monday to Sunday
        item_delimiter: a string whose values will be used to split item values
            into lists
        relative_week_number_column: a column header from data_xlsx_fp, whose
            values should be used as key values in the YAML string generated;
            the values of relative_week_number_column should be integers, with
            the value one (1) representing the week that start_date appears in
        worksheet: a worksheet name from data_xlsx_fp, whose values should be
            used in the dictionary generated

    Returns:
         A string containing a YAML representation of the XLSX file as a
         dictionary keyed by the specified key column and having as values
         dictionaries encoding the row from the specified worksheet of the XLSX
         file corresponding to the key value

    """

    data_dict = convert_xlsx_to_dict(data_xlsx_fp,
                                     key=relative_week_number_column,
                                     worksheet=worksheet)

    start_date_adjusted = start_date - timedelta(days=start_date.weekday())

    weekdays_lookup_dict = {day.lower(): n for n, day in enumerate(day_name)}

    calendar_dict = CommentedMap()
    for week_number, week_data in data_dict.items():
        week_number = int(week_number)
        calendar_dict[week_number] = CommentedMap()
        for weekday in week_data:
            if (weekday == relative_week_number_column
                    or week_data[weekday] is None):
                continue
            if weekday.lower() in weekdays_lookup_dict:
                weekday_date = (
                    start_date_adjusted +
                    timedelta(days=7 * (int(week_number) - 1) +
                              int(weekdays_lookup_dict[weekday.lower()]))
                ).strftime('%d%b%Y').upper()
                calendar_dict[week_number][weekday] = CommentedMap()
                calendar_dict[week_number][weekday]['Date'] = weekday_date
                calendar_dict[week_number][weekday]['Activities'] = (
                    week_data[weekday].split(item_delimiter))

            else:
                calendar_dict[week_number][weekday] = (
                    week_data[weekday].split(item_delimiter))

    yaml = YAML()
    calendar_yaml = StringIO()
    yaml.dump(data=calendar_dict, stream=calendar_yaml)
    calendar_yaml.seek(0)

    return calendar_yaml.read()
Ejemplo n.º 47
0
    def bind_input(self,
                   schema,                   # type: MutableMapping[Text, Any]
                   datum,                    # type: Any
                   discover_secondaryFiles,  # type: bool
                   lead_pos=None,            # type: Optional[Union[int, List[int]]]
                   tail_pos=None,            # type: Optional[List[int]]
                  ):  # type: (...) -> List[MutableMapping[Text, Any]]

        if tail_pos is None:
            tail_pos = []
        if lead_pos is None:
            lead_pos = []

        bindings = []  # type: List[MutableMapping[Text, Text]]
        binding = None  # type: Optional[MutableMapping[Text,Any]]
        value_from_expression = False
        if "inputBinding" in schema and isinstance(schema["inputBinding"], MutableMapping):
            binding = CommentedMap(schema["inputBinding"].items())
            assert binding is not None

            bp = list(aslist(lead_pos))
            if "position" in binding:
                bp.extend(aslist(binding["position"]))
            else:
                bp.append(0)
            bp.extend(aslist(tail_pos))
            binding["position"] = bp

            binding["datum"] = datum
            if "valueFrom" in binding:
                value_from_expression = True

        # Handle union types
        if isinstance(schema["type"], MutableSequence):
            bound_input = False
            for t in schema["type"]:
                avsc = None  # type: Optional[Schema]
                if isinstance(t, string_types) and self.names.has_name(t, ""):
                    avsc = self.names.get_name(t, "")
                elif isinstance(t, MutableMapping) and "name" in t and self.names.has_name(t["name"], ""):
                    avsc = self.names.get_name(t["name"], "")
                if not avsc:
                    avsc = make_avsc_object(convert_to_dict(t), self.names)
                assert avsc is not None
                if validate.validate(avsc, datum):
                    schema = copy.deepcopy(schema)
                    schema["type"] = t
                    if not value_from_expression:
                        return self.bind_input(schema, datum, lead_pos=lead_pos, tail_pos=tail_pos, discover_secondaryFiles=discover_secondaryFiles)
                    else:
                        self.bind_input(schema, datum, lead_pos=lead_pos, tail_pos=tail_pos, discover_secondaryFiles=discover_secondaryFiles)
                        bound_input = True
            if not bound_input:
                raise validate.ValidationException(u"'%s' is not a valid union %s" % (datum, schema["type"]))
        elif isinstance(schema["type"], MutableMapping):
            st = copy.deepcopy(schema["type"])
            if binding is not None\
                    and "inputBinding" not in st\
                    and "type" in st\
                    and st["type"] == "array"\
                    and "itemSeparator" not in binding:
                st["inputBinding"] = {}
            for k in ("secondaryFiles", "format", "streamable"):
                if k in schema:
                    st[k] = schema[k]
            if value_from_expression:
                self.bind_input(st, datum, lead_pos=lead_pos, tail_pos=tail_pos, discover_secondaryFiles=discover_secondaryFiles)
            else:
                bindings.extend(self.bind_input(st, datum, lead_pos=lead_pos, tail_pos=tail_pos, discover_secondaryFiles=discover_secondaryFiles))
        else:
            if schema["type"] in self.schemaDefs:
                schema = self.schemaDefs[schema["type"]]

            if schema["type"] == "record":
                for f in schema["fields"]:
                    if f["name"] in datum and datum[f["name"]] is not None:
                        bindings.extend(self.bind_input(f, datum[f["name"]], lead_pos=lead_pos, tail_pos=f["name"], discover_secondaryFiles=discover_secondaryFiles))
                    else:
                        datum[f["name"]] = f.get("default")

            if schema["type"] == "array":
                for n, item in enumerate(datum):
                    b2 = None
                    if binding is not None:
                        b2 = copy.deepcopy(binding)
                        b2["datum"] = item
                    itemschema = {
                        u"type": schema["items"],
                        u"inputBinding": b2
                    }
                    for k in ("secondaryFiles", "format", "streamable"):
                        if k in schema:
                            itemschema[k] = schema[k]
                    bindings.extend(
                        self.bind_input(itemschema, item, lead_pos=n, tail_pos=tail_pos, discover_secondaryFiles=discover_secondaryFiles))
                binding = None

            def _capture_files(f):
                self.files.append(f)
                return f

            if schema["type"] == "File":
                self.files.append(datum)
                if (binding and binding.get("loadContents")) or schema.get("loadContents"):
                    with self.fs_access.open(datum["location"], "rb") as f:
                        datum["contents"] = f.read(CONTENT_LIMIT).decode("utf-8")

                if "secondaryFiles" in schema:
                    if "secondaryFiles" not in datum:
                        datum["secondaryFiles"] = []
                    for sf in aslist(schema["secondaryFiles"]):
                        if 'required' in sf:
                            sf_required = self.do_eval(sf['required'], context=datum)
                        else:
                            sf_required = True

                        if "$(" in sf["pattern"] or "${" in sf["pattern"]:
                            sfpath = self.do_eval(sf["pattern"], context=datum)
                        else:
                            sfpath = substitute(datum["basename"], sf["pattern"])

                        for sfname in aslist(sfpath):
                            if not sfname:
                                continue
                            found = False
                            for d in datum["secondaryFiles"]:
                                if not d.get("basename"):
                                    d["basename"] = d["location"][d["location"].rindex("/")+1:]
                                if d["basename"] == sfname:
                                    found = True
                            if not found:
                                sf_location = datum["location"][0:datum["location"].rindex("/")+1]+sfname
                                if isinstance(sfname, MutableMapping):
                                    datum["secondaryFiles"].append(sfname)
                                elif discover_secondaryFiles and self.fs_access.exists(sf_location):
                                    datum["secondaryFiles"].append({
                                        "location": sf_location,
                                        "basename": sfname,
                                        "class": "File"})
                                elif sf_required:
                                    raise WorkflowException("Missing required secondary file '%s' from file object: %s" % (
                                        sfname, json_dumps(datum, indent=4)))

                    normalizeFilesDirs(datum["secondaryFiles"])

                if "format" in schema:
                    try:
                        check_format(datum, self.do_eval(schema["format"]),
                                     self.formatgraph)
                    except validate.ValidationException as ve:
                        raise WorkflowException(
                            "Expected value of '%s' to have format %s but\n "
                            " %s" % (schema["name"], schema["format"], ve))

                visit_class(datum.get("secondaryFiles", []), ("File", "Directory"), _capture_files)

            if schema["type"] == "Directory":
                ll = schema.get("loadListing") or self.loadListing
                if ll and ll != "no_listing":
                    get_listing(self.fs_access, datum, (ll == "deep_listing"))
                self.files.append(datum)

            if schema["type"] == "Any":
                visit_class(datum, ("File", "Directory"), _capture_files)

        # Position to front of the sort key
        if binding is not None:
            for bi in bindings:
                bi["position"] = binding["position"] + bi["position"]
            bindings.append(binding)

        return bindings
Ejemplo n.º 48
0
 def to_yaml(self, data):
     self._should_be_mapping(data)
     # TODO : Maximum minimum keys
     return CommentedMap([(self._key_validator.to_yaml(key),
                           self._value_validator.to_yaml(value))
                          for key, value in data.items()])
Ejemplo n.º 49
0
 def test_CommentedMap(self):
     cm = CommentedMap()
     # check bug in ruamel.yaml is fixed: raises TypeError: source has undefined order
     self.assertEqual(cm, cm.copy())
Ejemplo n.º 50
0
        def _service_to_k8s_container(name, config, container_name=None):
            container = CommentedMap()

            if container_name:
                container['name'] = container_name
            else:
                container['name'] = container['name'] if config.get('container_name') else name

            container['securityContext'] = CommentedMap()
            container['state'] = 'present'
            volumes = []

            for key, value in iteritems(config):
                if key in self.IGNORE_DIRECTIVES:
                    pass
                elif key == 'cap_add':
                    if not container['securityContext'].get('Capabilities'):
                        container['securityContext']['Capabilities'] = dict(add=[], drop=[])
                    for cap in value:
                        if self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]:
                            container['securityContext']['Capabilities']['add'].append(
                                self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap])
                elif key == 'cap_drop':
                    if not container['securityContext'].get('Capabilities'):
                        container['securityContext']['Capabilities'] = dict(add=[], drop=[])
                    for cap in value:
                        if self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap]:
                            container['securityContext']['Capabilities']['drop'].append(
                                self.DOCKER_TO_KUBE_CAPABILITY_MAPPING[cap])
                elif key == 'command':
                    if isinstance(value, string_types):
                        container['args'] = shlex.split(value)
                    else:
                        container['args'] = copy.copy(value)
                elif key == 'container_name':
                    pass
                elif key == 'entrypoint':
                    if isinstance(value, string_types):
                        container['command'] = shlex.split(value)
                    else:
                        container['command'] = copy.copy(value)
                elif key == 'environment':
                    expanded_vars = self.expand_env_vars(value)
                    if expanded_vars:
                        if 'env' not in container:
                            container['env'] = []

                        container['env'].extend(expanded_vars)
                elif key in ('ports', 'expose'):
                    if not container.get('ports'):
                        container['ports'] = []
                    self.add_container_ports(value, container['ports'])
                elif key == 'privileged':
                    container['securityContext']['privileged'] = value
                elif key == 'read_only':
                    container['securityContext']['readOnlyRootFileSystem'] = value
                elif key == 'stdin_open':
                    container['stdin'] = value
                elif key == 'volumes':
                    vols, vol_mounts = self.get_k8s_volumes(value)
                    if vol_mounts:
                        if 'volumeMounts' not in container:
                            container['volumeMounts'] = []

                        container['volumeMounts'].extend(vol_mounts)
                    if vols:
                        volumes += vols
                elif key == 'secrets':
                    for secret, secret_config in iteritems(value):
                        if self.CONFIG_KEY in secret_config:
                            vols, vol_mounts, env_variables = self.get_k8s_secrets(secret, secret_config[self.CONFIG_KEY])

                            if vol_mounts:
                                if 'volumeMounts' not in container:
                                    container['volumeMounts'] = []

                                container['volumeMounts'].extend(vol_mounts)

                            if vols:
                                volumes += vols

                            if env_variables:
                                if 'env' not in container:
                                    container['env'] = []

                                container['env'].extend(env_variables)
                elif key == 'working_dir':
                    container['workingDir'] = value
                else:
                    container[key] = value
            return container, volumes