Example #1
0
def decrypt(config, output=False):
    stage = config['stage']
    check_encryption_required_fields(config['stages'][stage])
    region = config['stages'][stage]['keyRegion']
    kms = boto3.client('kms', region_name=region)

    enc_config = get_secret_config(config, stage)
    if isinstance(enc_config, string_types):
        # This is the old-style secretConfig, when everything was encrypted
        # into a single string.
        stage_cfg = base64.b64decode(enc_config)
        resp = kms.decrypt(CiphertextBlob=stage_cfg)
        plain = json.loads(resp['Plaintext'])
        if output:
            print('Decrypted config for stage {}:\n\n{}'.format(
                stage,
                yaml.round_trip_dump(plain)))
        return plain
    elif isinstance(enc_config, dict):
        # This is the new way, where all config items are encrypted separately.
        plain = {}
        for key, value in enc_config.items():
            if is_value_already_encrypted(value):
                ciphertext_blob = base64.b64decode(
                    value[len(ENCRYPTED_PREFIX):])
                resp = kms.decrypt(CiphertextBlob=ciphertext_blob)
                plain[key] = resp['Plaintext']
            else:
                raise Exception('Found unencrypted item in secretConfig: '
                                '{}'.format(key))
        if output:
            print('Decrypted config for stage {}:\n\n{}'.format(
                stage,
                yaml.round_trip_dump(plain)))
        return plain
Example #2
0
def sorted_boskos_config():
    """Get the sorted boskos configuration."""
    with open(test_infra('boskos/resources.yaml'), 'r') as fp:
        configs = yaml.round_trip_load(fp, preserve_quotes=True)
    for rtype in configs['resources']:
        rtype["names"] = sorted(rtype["names"])
    output = cStringIO.StringIO()
    yaml.round_trip_dump(
        configs, output, default_flow_style=False, width=float("inf"))
    return output
Example #3
0
def sorted_prow_config(prow_config_path=None):
    """Get the sorted Prow configuration."""
    with open(prow_config_path, 'r') as fp:
        configs = yaml.round_trip_load(fp, preserve_quotes=True)
    configs['periodics'] = sorted_seq(configs['periodics'])
    configs['presubmits'] = sorted_map(configs['presubmits'])
    configs['postsubmits'] = sorted_map(configs['postsubmits'])
    output = cStringIO.StringIO()
    yaml.round_trip_dump(
        configs, output, default_flow_style=False, width=float("inf"))
    return output
Example #4
0
    def put(self, path, value):
        ''' put path, value into a dict '''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError:
            entry = None

        if entry == value:
            return (False, self.yaml_dict)

        # deepcopy didn't work
        # Try to use ruamel.yaml and fallback to pyyaml
        try:
            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
                                                      default_flow_style=False),
                                 yaml.RoundTripLoader)
        except AttributeError:
            tmp_copy = copy.deepcopy(self.yaml_dict)

        # set the format attributes if available
        try:
            tmp_copy.fa.set_block_style()
        except AttributeError:
            pass

        result = Yedit.add_entry(tmp_copy, path, value, self.separator)
        if not result:
            return (False, self.yaml_dict)

        self.yaml_dict = tmp_copy

        return (True, self.yaml_dict)
 def generate(self, legacy_config):
     action_id = 1
     actions = { 'actions': {} }
     for project in legacy_config:
         if project == '.defaults' or project == '.regex':
             continue
         unit = legacy_config[project]['delete']['unit']
         count = legacy_config[project]['delete']['count']
         raw_regex = legacy_config[project][self.RAW_REGEX]
         if raw_regex:
             regex = project
         else:
             regex = self.format_regex(project)
         action = self.generate_action(unit, count, regex, exclude=False)
         actions['actions'][action_id] = action
         action_id += 1
     actions['actions'][action_id] = self.generate_defaults(legacy_config)
     with open(self.actions_file, 'w') as f:
         yaml.round_trip_dump(actions, f, default_flow_style=False, explicit_start=True)
def normalize_docker_compose_command():
    """
    Sub-command, see main()
    """
    parser = argparse.ArgumentParser(
        description='Normalize the input docker-compose file, then write it in the output')
    parser.add_argument('-i', '--input', type=str,
                        help='<Required> Path to the input yaml file', required=True)
    parser.add_argument('-o', '--output', type=str,
                        help='Path to the output file, or stdout by default')

    args = parser.parse_args(sys.argv[2:])
    input_file = open(args.input, 'r')
    content = input_file.read()
    input_file.close()

    output_data = normalize_docker_compose(content)

    output_file = open(args.output, 'w') if args.output else sys.stdout
    round_trip_dump(output_data, output_file)
    output_file.close()
Example #7
0
 def write_template(self, output, filename=None):
     if not filename:
         filename = 'swagger.yml'
     swagger_file = os.path.join(self.project_dir, filename)
     _, ext = os.path.splitext(filename)
     with open(swagger_file, 'w') as fh:
         # Could be `.yaml` or `.yml` :/
         if '.y' in ext:
             fh.write(yaml.round_trip_dump(output))
         elif '.json' in ext:
             fh.write(json.dumps(output))
     return swagger_file
Example #8
0
 def _process_section(self, section_value, callback=None, templar=None):
     if not templar:
         templar = self._templar
     processed = ordereddict()
     for key, value in section_value.items():
         if isinstance(value, string_types):
             # strings can be templated
             processed[key] = templar.template(value)
             if isinstance(processed[key], AnsibleUnsafeText):
                 processed[key] = str(processed[key])
         elif isinstance(value, (list, dict)):
             # if it's a dimensional structure, it's cheaper just to serialize
             # it, treat it like a template, and then deserialize it again
             buffer = BytesIO()  # use bytes explicitly, not unicode
             yaml.round_trip_dump(value, buffer)
             processed[key] = yaml.round_trip_load(
                 templar.template(buffer.getvalue()))
         else:
             # ints, booleans, etc.
             processed[key] = value
         if callback:
             callback(processed)
     return processed
Example #9
0
def install_extras(mkdocs_yml, theme=None):
    with open(mkdocs_yml) as f:
        config, indent, bsi = load_yaml_guess_indent(f)
        if theme is None and 'theme' not in config:
            raise ValueError('no theme specified in mkdocs.yml; pass ' +
                             '--theme instead')
        theme_dir = get_theme_dir(config.get('theme', theme))
        docs_dir = config.get('docs_dir', 'docs')

        for path, prop in [('css', 'extra_css'), ('js', 'extra_javascript')]:
            files = os.listdir(os.path.join(theme_dir, path))
            if not files:
                continue

            extras = config.setdefault(prop, [])
            for f in files:
                relpath = os.path.join(path, f)
                shutil.copyfile(os.path.join(theme_dir, relpath),
                                os.path.join(docs_dir, relpath))
                if relpath not in extras:
                    extras.append(relpath)
    yaml.round_trip_dump(config, open(mkdocs_yml, 'w'), indent=indent,
                         block_seq_indent=bsi)
Example #10
0
def delete_command():
    """
    Sub-command, see main()
    """
    parser = argparse.ArgumentParser(
        description='Delete one item from the input yaml file')
    parser.add_argument('path_to_key', type=str, nargs='+',
                        help='<Required> Yaml item to be deleted, e.g. "foo 0 bar"')
    parser.add_argument('-i', '--input', type=str,
                        help='<Required> Path to the input yaml files', required=True)
    parser.add_argument('-o', '--output', type=str,
                        help='Path to the output file, or stdout by default')

    args = parser.parse_args(sys.argv[2:])
    input_file = open(args.input, 'r')
    data = round_trip_load(input_file.read(), preserve_quotes=True)
    input_file.close()

    output_data, _ = delete_yaml_item(data, args.path_to_key, True)

    output_file = open(args.output, 'w') if args.output else sys.stdout
    round_trip_dump(output_data, output_file)
    output_file.close()
def update_generated_config(path, latest_version):
    print("Updating test_config.yaml...")
    with open(path, 'r') as f:
        config = yaml.round_trip_load(f)

    v = latest_version
    suffixes = ['beta', 'stable1', 'stable2', 'stable3']
    for i, s in enumerate(suffixes):
        vs = "%d.%d" % (v[0], v[1] + 1 - i)
        markers = config['k8sVersions'][s]
        markers['version'] = vs
        for j, arg in enumerate(markers['args']):
            markers['args'][j] = re.sub(
                r'latest(-\d+\.\d+)?', 'latest-%s' % vs, arg)

        node = config['nodeK8sVersions'][s]
        for k, arg in enumerate(node['args']):
            node['args'][k] = re.sub(
                r'master|release-\d+\.\d+', 'release-%s' % vs, arg)
        node['prowImage'] = node['prowImage'].rpartition('-')[0] + '-' + vs

    with open(path, 'w') as f:
        yaml.round_trip_dump(config, f)
Example #12
0
    def create(self, path, value):
        ''' create a yaml file '''
        if not self.file_exists():
            # deepcopy didn't work
            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
            # pylint: disable=no-member
            if hasattr(self.yaml_dict, 'fa'):
                tmp_copy.fa.set_block_style()
            result = Yedit.add_entry(tmp_copy, path, value, self.separator)
            if result:
                self.yaml_dict = tmp_copy
                return (True, self.yaml_dict)

        return (False, self.yaml_dict)
Example #13
0
def delete_command():
    """
    Sub-command, see main()
    """
    parser = argparse.ArgumentParser(
        description='Delete one item from the input yaml file')
    parser.add_argument('path_to_key', type=str, nargs='+',
                        help='<Required> Yaml item to be deleted, e.g. "foo 0 bar"')
    parser.add_argument('-i', '--input', type=str,
                        help='<Required> Path to the input yaml files', required=True)
    parser.add_argument('-o', '--output', type=str,
                        help='Path to the output file, or stdout by default')

    args = parser.parse_args(sys.argv[2:])
    input_file = open(args.input, 'r')
    data = round_trip_load(input_file.read(), preserve_quotes=True)
    input_file.close()

    output_data, _ = delete_yaml_item(data, args.path_to_key, True)

    output_file = open(args.output, 'w') if args.output else sys.stdout
    round_trip_dump(output_data, output_file)
    output_file.close()
Example #14
0
    def create(self, path, value):
        ''' create a yaml file '''
        if not self.file_exists():
            # deepcopy didn't work
            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False),  # noqa: E501
                                 yaml.RoundTripLoader)
            # pylint: disable=no-member
            if hasattr(self.yaml_dict, 'fa'):
                tmp_copy.fa.set_block_style()
            result = Yedit.add_entry(tmp_copy, path, value, self.separator)
            if result:
                self.yaml_dict = tmp_copy
                return (True, self.yaml_dict)

        return (False, self.yaml_dict)
Example #15
0
 def _process_section(self, section_value, callback=None, templar=None):
     if not templar:
         templar = self._templar
     processed = yaml.compat.ordereddict()
     for key, value in section_value.items():
         if isinstance(value, basestring):
             # strings can be templated
             processed[key] = templar.template(value)
             if isinstance(processed[key], AnsibleUnsafeText):
                 processed[key] = str(processed[key])
         elif isinstance(value, (list, dict)):
             # if it's a dimensional structure, it's cheaper just to serialize
             # it, treat it like a template, and then deserialize it again
             buffer = BytesIO() # use bytes explicitly, not unicode
             yaml.round_trip_dump(value, buffer)
             processed[key] = yaml.round_trip_load(
                 templar.template(buffer.getvalue())
             )
         else:
             # ints, booleans, etc.
             processed[key] = value
         if callback:
             callback(processed)
     return processed
Example #16
0
def edit_soa_configs(filename, instance, mem, cpu):
    with open(filename, 'r') as fi:
        yams = fi.read()
        yams = yams.replace('cpus: .', 'cpus: 0.')
        data = yaml.round_trip_load(yams, preserve_quotes=True)

    instdict = data[instance]
    if mem:
        instdict['mem'] = mem
    else:
        instdict['cpus'] = cpu
    out = yaml.round_trip_dump(data, width=10000)

    with open(filename, 'w') as fi:
        fi.write(out)
Example #17
0
def export_config(args, output_file=None):
    if not output_file:
        output_file = sys.stdout
    if os.path.exists(args.config):
        with open(args.config, 'rt') as f:
            config = yaml.round_trip_load(f.read())
        STATE['stages'] = config['stages']
        config['config'] = _decrypt_dict(config['config'])
    else:
        config = {
            'stages': {
                env['name']: {
                    'environment': env['name'],
                    'key': 'enter-key-name-here'
                }
                for env in STATE['awscreds'].environments
            },
            'config': {}
        }

    if args.json:
        output_file.write(json.dumps(config, indent=4))
    elif config:
        yaml.round_trip_dump(config, output_file)
Example #18
0
def export_config(args, output_file=None):
    if not output_file:
        output_file = sys.stdout
    if os.path.exists(args.config):
        with open(args.config, "rt") as f:
            config = yaml.round_trip_load(f.read())
        STATE["stages"] = config["stages"]
        config["config"] = _decrypt_dict(config["config"])
    else:
        config = {
            "stages": {
                env["name"]: {
                    "environment": env["name"],
                    "key": "enter-key-name-here"
                }
                for env in STATE["awscreds"].environments
            },
            "config": {},
        }

    if args.json:
        output_file.write(json.dumps(config, indent=4))
    elif config:
        yaml.round_trip_dump(config, output_file)
Example #19
0
def merge_command():
    """
    Sub-command, see main()
    """
    parser = argparse.ArgumentParser(
        description='Merge two or more yaml files and preserve the comments')
    parser.add_argument('-i', '--inputs', nargs='+', type=str,
                        help='<Required> List of input yaml files, merged from the last to the first',
                        required=True)
    parser.add_argument('-o', '--output', type=str,
                        help='Path to the output file, or stdout by default')

    args = parser.parse_args(sys.argv[2:])

    file_contents = []
    for f in args.inputs:
        file = open(f, 'r')
        file_contents.append(file.read())
        file.close()

    out_content = successive_merge(file_contents)
    output_file = open(args.output, 'w') if args.output else sys.stdout
    round_trip_dump(out_content, output_file)
    output_file.close()
Example #20
0
def update_yaml_from_filter(filename,
                            data,
                            dir_path=DATA_PATH,
                            ind=None,
                            bsi=None):
    '''
    更新yaml文件
    :param filename: 文件名称
    :param data: 写入数据
    :param dir_path: 文件路径
    :param ind:
    :param bsi:
    :return:
    '''
    file_url = os.path.join(dir_path, filename)
    try:
        with open(file_url, 'w', encoding='utf-8') as fp:
            round_trip_dump(data=data,
                            stream=fp,
                            block_seq_indent=bsi,
                            indent=ind)
    except Exception as error:
        print('异常')
        raise error
Example #21
0
def merge_command():
    """
    Sub-command, see main()
    """
    parser = argparse.ArgumentParser(
        description='Merge two or more yaml files and preserve the comments')
    parser.add_argument('-i', '--inputs', nargs='+', type=str,
                        help='<Required> List of input yaml files, merged from the last to the first',
                        required=True)
    parser.add_argument('-o', '--output', type=str,
                        help='Path to the output file, or stdout by default')

    args = parser.parse_args(sys.argv[2:])

    file_contents = []
    for f in args.inputs:
        file = open(f, 'r')
        file_contents.append(file.read())
        file.close()

    out_content = successive_merge(file_contents)
    output_file = open(args.output, 'w') if args.output else sys.stdout
    round_trip_dump(out_content, output_file)
    output_file.close()
Example #22
0
def write_configuration(config_filename, config, mode=0o600):
    '''
    Given a target config filename and a config data structure of nested OrderedDicts, write out the
    config to file as YAML. Create any containing directories as needed.
    '''
    if os.path.exists(config_filename):
        raise FileExistsError('{} already exists. Aborting.'.format(config_filename))

    try:
        os.makedirs(os.path.dirname(config_filename), mode=0o700)
    except (FileExistsError, FileNotFoundError):
        pass

    with open(config_filename, 'w') as config_file:
        config_file.write(yaml.round_trip_dump(config, indent=INDENT, block_seq_indent=INDENT))

    os.chmod(config_filename, mode)
Example #23
0
def decrypt(config, output=False):
    stage = config['stage']

    enc_config = get_secret_config(config, stage)
    if not isinstance(enc_config, basestring):
        raise Exception('Secret config for stage {} is not a '
                        'string! Did you forget to encrypt '
                        'first?'.format(stage))
    stage_cfg = base64.b64decode(enc_config)
    region = config['stages'][stage]['keyRegion']
    kms = boto3.client('kms', region_name=region)
    resp = kms.decrypt(CiphertextBlob=bytes(stage_cfg))
    plain = json.loads(resp['Plaintext'])
    if output:
        print('Decrypted config for stage {}:\n\n{}'.format(
            stage, yaml.round_trip_dump(plain)))
    return plain
Example #24
0
 def dump(self):
     """Dump this Project Schema to YAML."""
     my_dict = yaml.comments.CommentedMap()
     for key in REQUIRED_KEYS:
         if key in ['files', 'sources']:
             continue
         my_dict[key] = getattr(self, key)
     for key in OPTIONAL_KEYS:
         try:
             val = getattr(self, key)
             if val:
                 my_dict[key] = val
         except KeyError:
             pass
     my_dict['sources'] = self.sources
     my_dict['files'] = self.files
     return yaml.round_trip_dump(my_dict)
Example #25
0
def upload_workflow(arvRunner,
                    tool,
                    job_order,
                    project_uuid,
                    uuid=None,
                    submit_runner_ram=0,
                    name=None):

    packed = packed_workflow(arvRunner, tool)

    adjustDirObjs(job_order, trim_listing)
    adjustFileObjs(job_order, trim_anonymous_location)
    adjustDirObjs(job_order, trim_anonymous_location)

    main = [p for p in packed["$graph"] if p["id"] == "#main"][0]
    for inp in main["inputs"]:
        sn = shortname(inp["id"])
        if sn in job_order:
            inp["default"] = job_order[sn]

    if not name:
        name = tool.tool.get("label", os.path.basename(tool.tool["id"]))

    upload_dependencies(arvRunner, name, tool.doc_loader, packed,
                        tool.tool["id"], False)

    # TODO nowhere for submit_runner_ram to go.

    body = {
        "workflow": {
            "name": name,
            "description": tool.tool.get("doc", ""),
            "definition": yaml.round_trip_dump(packed)
        }
    }
    if project_uuid:
        body["workflow"]["owner_uuid"] = project_uuid

    if uuid:
        call = arvRunner.api.workflows().update(uuid=uuid, body=body)
    else:
        call = arvRunner.api.workflows().create(body=body)
    return call.execute(num_retries=arvRunner.num_retries)["uuid"]
Example #26
0
    def put(self, path, value):
        ''' put path, value into a dict '''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError:
            entry = None

        if entry == value:
            return (False, self.yaml_dict)

        # deepcopy didn't work
        # Try to use ruamel.yaml and fallback to pyyaml
        try:
            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
                                                      default_flow_style=False),
                                 yaml.RoundTripLoader)
        except AttributeError:
            tmp_copy = copy.deepcopy(self.yaml_dict)

        # set the format attributes if available
        try:
            tmp_copy.fa.set_block_style()
        except AttributeError:
            pass

        result = Yedit.add_entry(tmp_copy, path, value, self.separator)
        if result is None:
            return (False, self.yaml_dict)

        # When path equals "" it is a special case.
        # "" refers to the root of the document
        # Only update the root path (entire document) when its a list or dict
        if path == '':
            if isinstance(result, list) or isinstance(result, dict):
                self.yaml_dict = result
                return (True, self.yaml_dict)

            return (False, self.yaml_dict)

        self.yaml_dict = tmp_copy

        return (True, self.yaml_dict)
    def put(self, path, value):
        ''' put path, value into a dict '''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError:
            entry = None

        if entry == value:
            return (False, self.yaml_dict)

        # deepcopy didn't work
        # Try to use ruamel.yaml and fallback to pyyaml
        try:
            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
                                                      default_flow_style=False),
                                 yaml.RoundTripLoader)
        except AttributeError:
            tmp_copy = copy.deepcopy(self.yaml_dict)

        # set the format attributes if available
        try:
            tmp_copy.fa.set_block_style()
        except AttributeError:
            pass

        result = Yedit.add_entry(tmp_copy, path, value, self.separator)
        if result is None:
            return (False, self.yaml_dict)

        # When path equals "" it is a special case.
        # "" refers to the root of the document
        # Only update the root path (entire document) when its a list or dict
        if path == '':
            if isinstance(result, list) or isinstance(result, dict):
                self.yaml_dict = result
                return (True, self.yaml_dict)

            return (False, self.yaml_dict)

        self.yaml_dict = tmp_copy

        return (True, self.yaml_dict)
def best_practice_comment_round_trip(yaml_path):
    """Round-trips the YAML document. If comments (#) are
    incorrectly escaped or not using literal style they will
    affect tools parsing the content. Also catches indentation changes,
    extra spaces in unneeded areas, other minor stylistic changes."""
    with open(yaml_path, mode="r", encoding="utf-8") as f:
        yaml_str1 = f.read()

    yaml_object = yaml.round_trip_load(yaml_str1, preserve_quotes=True)
    yaml_str2 = yaml.round_trip_dump(yaml_object, width=300)

    results = list(difflib.Differ().compare(
        yaml_str1.splitlines(keepends=True),
        yaml_str2.splitlines(keepends=True)))

    for item in results:
        if item.startswith(("+ ", "- ", "? ")):
            pprint.pprint(item)

    assert yaml_str1 == yaml_str2
Example #29
0
    def serialize(cls, data):
        from ruamel import yaml

        yaml.representer.RoundTripRepresenter.add_representer(
            types.List, yaml.representer.RoundTripRepresenter.represent_list)
        yaml.representer.RoundTripRepresenter.add_representer(
            types.Dict, yaml.representer.RoundTripRepresenter.represent_dict)

        if settings.INDENT_YAML_BLOCKS:
            f = StringIO()
            y = yaml.YAML()
            y.indent(mapping=2, sequence=4, offset=2)
            y.dump(data, f)
            text = f.getvalue().strip() + '\n'
        else:
            text = yaml.round_trip_dump(data) or ""

        if text == "{}\n":
            return ""

        return text.replace('- \n', '-\n')
def print_parsed_obj(cwlyml, target_path=None, ofd=sys.stdout):
    if cwlyml.target['class'] == 'Workflow':
        cwlyml.target = update_workflow(cwlyml.target)
    else:
        cwlyml.load_target_as_ruamel_obj(target_path)
        cwlyml.target = update_clt(cwlyml.target)

    root_keys = [
        'class', 'cwlVersion', 'doc', 'requirements', 'hints', 'inputs',
        'steps', 'expression', 'outputs', 'baseCommand', 'arguments', 'stdin',
        'stdout'
    ]

    # print(ryaml.round_trip_dump(cwlyml.target, indent=2, explicit_end=False))
    st = ryaml.round_trip_dump(OrderedDict(
        sorted(cwlyml.target.items(), key=lambda t: root_keys.index(t[0]))),
                               indent=2,
                               explicit_end=False,
                               width=10000)
    final = re.sub('^-', '', re.sub('\n\n', '\n', st)).replace('!!omap\n-', '')
    ofd.write(re.sub('\n-', '\n', final))
Example #31
0
def to_yaml(message, stream=None):
    """Returns the YAML of the given protobuf message.

    Args:
        Message (obj): The protobuf message to be converted

    Returns:
        str: the resulting YAML

    """
    from google.protobuf.json_format import MessageToDict

    data = MessageToDict(message)

    from ruamel.yaml import YAML, round_trip_dump

    yaml = YAML(typ="safe")
    yaml.default_flow_style = False

    stream.write("---\n")
    return round_trip_dump(data, stream)
Example #32
0
    def put(self, path, value):
        ''' put path, value into a dict '''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError as _:
            entry = None

        if entry == value:
            return (False, self.yaml_dict)

        # deepcopy didn't work
        tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
        # pylint: disable=no-member
        if hasattr(self.yaml_dict, 'fa'):
            tmp_copy.fa.set_block_style()
        result = Yedit.add_entry(tmp_copy, path, value, self.separator)
        if not result:
            return (False, self.yaml_dict)

        self.yaml_dict = tmp_copy

        return (True, self.yaml_dict)
Example #33
0
    def run(self) -> Sequence[nodes.Node]:  # type: ignore
        """
		Process the content of the directive.
		"""

        plugin_name = self.options.get("plugin-name",
                                       self.env.config.github_repository)
        flake8_version = self.options.get("flake8-version", "3.8.4")

        config = {
            "repo":
            "https://gitlab.com/pycqa/flake8",
            "rev":
            flake8_version,
            "hooks": [{
                "id":
                "flake8",
                "additional_dependencies":
                [f"{plugin_name}=={self.arguments[0]}"]
            }]
        }

        targetid = f'pre-commit-{self.env.new_serialno("pre-commit"):d}'
        targetnode = nodes.section(ids=[targetid])

        yaml_output = yaml.round_trip_dump([config], default_flow_style=False)
        if not yaml_output:
            return []

        content = f".. code-block:: yaml\n\n{indent(yaml_output, '    ')}\n\n"
        view = StringList(content.split('\n'))
        pre_commit_node = nodes.paragraph(rawsource=content)
        self.state.nested_parse(view, self.content_offset, pre_commit_node)

        pre_commit_f8_node_purger.add_node(self.env, pre_commit_node,
                                           targetnode, self.lineno)

        return [pre_commit_node]
Example #34
0
def upload_workflow(arvRunner, tool, job_order, project_uuid, uuid=None,
                    submit_runner_ram=0, name=None):

    packed = packed_workflow(arvRunner, tool)

    adjustDirObjs(job_order, trim_listing)
    adjustFileObjs(job_order, trim_anonymous_location)
    adjustDirObjs(job_order, trim_anonymous_location)

    main = [p for p in packed["$graph"] if p["id"] == "#main"][0]
    for inp in main["inputs"]:
        sn = shortname(inp["id"])
        if sn in job_order:
            inp["default"] = job_order[sn]

    if not name:
        name = tool.tool.get("label", os.path.basename(tool.tool["id"]))

    upload_dependencies(arvRunner, name, tool.doc_loader,
                        packed, tool.tool["id"], False)

    # TODO nowhere for submit_runner_ram to go.

    body = {
        "workflow": {
            "name": name,
            "description": tool.tool.get("doc", ""),
            "definition":yaml.round_trip_dump(packed)
        }}
    if project_uuid:
        body["workflow"]["owner_uuid"] = project_uuid

    if uuid:
        call = arvRunner.api.workflows().update(uuid=uuid, body=body)
    else:
        call = arvRunner.api.workflows().create(body=body)
    return call.execute(num_retries=arvRunner.num_retries)["uuid"]
Example #35
0
    def dump(self, configs, path):
        cfg = configparser.ConfigParser(allow_no_value=True)
        leftovers = []

        root_comment = getattr(configs, '__doc__', '')
        if root_comment:
            cfg.setdefault(self.root_name, {})
            for comment in root_comment.split(os.linesep):
                cfg.set(self.root_name, f'# {comment}', None)

        for p, value, prop in configs.get_prop_paths():
            if '.' in p:
                top = '.'.join(p.split('.')[:-1])
            else:
                top = self.root_name

            cfg.setdefault(top, {})

            if isinstance(value, Nestable):
                # Put it before the first value
                if prop.comment:
                    leftovers = prop.comment.split(os.linesep)
                continue

            if prop.comment:
                for c in itertools.chain(
                        *[leftovers, prop.comment.split(os.linesep)]):
                    cfg.set(top, f'# {c}', None)
                leftovers = []

            if isinstance(value, list):
                cfg[top][prop.name] = yaml.round_trip_dump(value)
            else:
                cfg[top][prop.name] = str(value)

        with open(path, 'w') as f:
            cfg.write(f)
Example #36
0
    def create_proj(self):
        yml = CommentedMap()
        yml['project'] = CommentedMap()

        #
        def _add(name):
            items = getattr(self, name)
            #if BuildItem.trivial_item(items):
            #    yml['project'][name] = "_default_"
            #elif BuildItem.no_flags_in_collection(items):
            if BuildItem.no_flags_in_collection(items):
                out = []
                for s in items:
                    out.append(s.name)
                yml['project'][name] = out
            else:
                out = []
                for s in items:
                    cm = CommentedMap()
                    cm[s.name] = CommentedMap()
                    s.save_config(cm[s.name])
                    out.append(cm)
                yml['project'][name] = out

        #
        _add('systems')
        _add('architectures')
        _add('compilers')
        _add('build_types')
        _add('variants')
        txt = yaml.round_trip_dump(yml)
        fn = self.kwargs['output_file']
        if not os.path.isabs(fn):
            fn = os.path.join(self.root_dir, fn)
        with open(fn, "w") as f:
            f.write(txt)
Example #37
0
    def put(self, path, value):
        ''' put path, value into a dict '''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError:
            entry = None

        if entry == value:
            return (False, self.yaml_dict)

        # deepcopy didn't work
        tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
                                                  default_flow_style=False),
                             yaml.RoundTripLoader)
        # pylint: disable=no-member
        if hasattr(self.yaml_dict, 'fa'):
            tmp_copy.fa.set_block_style()
        result = Yedit.add_entry(tmp_copy, path, value, self.separator)
        if not result:
            return (False, self.yaml_dict)

        self.yaml_dict = tmp_copy

        return (True, self.yaml_dict)
Example #38
0
def get_pattern_review():
    global pattern, pattern_list, pattern_iri_prefix, repo_name, gh_paths
    yaml_content = pattern_list[pattern]
    res = ""
    for line in yaml.round_trip_dump(yaml_content, indent=5, block_seq_indent=3).splitlines(True):
        res += line[3:]
    iri = str(yaml_content['pattern_iri'])
    path_gh = "https://github.com/{}/blob/master/{}".format(repo_name,gh_paths[pattern])
    gh_issue = get_issue(pattern)
    return html.Div([
        
        html.H2('Pattern review for {}'.format(pattern)),
        html.Div([html.H4("Name: "), html.Div(str(yaml_content['pattern_name']),id="pname")]),
        html.Div([html.H4("IRI: "), html.Div(html.A(href=path_gh, children=iri, target="_blank"))]),
        html.Div([html.H4("GitHub discussion: "), html.Div(html.A(href=gh_issue, children="{}".format(gh_issue), target="_blank"))]),
        html.Div([html.H4("Description: "),html.Div(get_val(yaml_content,'description'))]),
        html.Div([html.H4("Classes: "),html.Div(get_val(yaml_content,'classes',True))]),
        html.Div([html.H4("Relations: "),html.Div(get_val(yaml_content,'relations',True))]),
        html.Div([html.H4("Annotation Properties: "),html.Span(get_val(yaml_content,'annotationProperties',True))]),
        html.Div([html.H4("Variables: "),html.Div(get_val(yaml_content,'vars'))]),
        html.Div([html.H4("Phenotype name: "),html.Div(get_val(yaml_content,'name'))]),
        html.Div([html.H4("Phenotype annotations: "),html.Div(get_val(yaml_content,'annotations'))]),
        html.Div([html.H4("Phenotype definition: "),html.Div(get_val(yaml_content,'def'))]),
        html.Div([html.H4("EQ: "),html.Div(get_val(yaml_content,'equivalentTo'))]),
        dcc.RadioItems(
            id='radio-sign',
            options = [
                {'label': 'Sign', 'value': 'sign'},
                {'label': 'Unsign', 'value': 'unsign'},
                ],
            value = "",
            labelStyle={'display': 'inline-block'}
            ),
        html.Button('Submit', id='sign-pattern'),
        html.Div("",id='signed'),
        ])
    def create(self, path, value):
        ''' create a yaml file '''
        if not self.file_exists():
            # deepcopy didn't work
            # Try to use ruamel.yaml and fallback to pyyaml
            try:
                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
                                                          default_flow_style=False),
                                     yaml.RoundTripLoader)
            except AttributeError:
                tmp_copy = copy.deepcopy(self.yaml_dict)

            # set the format attributes if available
            try:
                tmp_copy.fa.set_block_style()
            except AttributeError:
                pass

            result = Yedit.add_entry(tmp_copy, path, value, self.separator)
            if result:
                self.yaml_dict = tmp_copy
                return (True, self.yaml_dict)

        return (False, self.yaml_dict)
Example #40
0
    def create(self, path, value):
        ''' create a yaml file '''
        if not self.file_exists():
            # deepcopy didn't work
            # Try to use ruamel.yaml and fallback to pyyaml
            try:
                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
                                                          default_flow_style=False),
                                     yaml.RoundTripLoader)
            except AttributeError:
                tmp_copy = copy.deepcopy(self.yaml_dict)

            # set the format attributes if available
            try:
                tmp_copy.fa.set_block_style()
            except AttributeError:
                pass

            result = Yedit.add_entry(tmp_copy, path, value, self.separator)
            if result:
                self.yaml_dict = tmp_copy
                return (True, self.yaml_dict)

        return (False, self.yaml_dict)
Example #41
0
def main(argsl=None,                   # type: List[str]
         args=None,                    # type: argparse.Namespace
         job_order_object=None,        # type: MutableMapping[Text, Any]
         stdin=sys.stdin,              # type: IO[Any]
         stdout=None,                  # type: Union[TextIO, StreamWriter]
         stderr=sys.stderr,            # type: IO[Any]
         versionfunc=versionstring,    # type: Callable[[], Text]
         logger_handler=None,          #
         custom_schema_callback=None,  # type: Callable[[], None]
         executor=None,                # type: Callable[..., Tuple[Dict[Text, Any], Text]]
         loadingContext=None,          # type: LoadingContext
         runtimeContext=None           # type: RuntimeContext
        ):  # type: (...) -> int
    if not stdout:  # force UTF-8 even if the console is configured differently
        if (hasattr(sys.stdout, "encoding")  # type: ignore
                and sys.stdout.encoding != 'UTF-8'):  # type: ignore
            if PY3 and hasattr(sys.stdout, "detach"):
                stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
            else:
                stdout = getwriter('utf-8')(sys.stdout)  # type: ignore
        else:
            stdout = cast(TextIO, sys.stdout)  # type: ignore

    _logger.removeHandler(defaultStreamHandler)
    if logger_handler is not None:
        stderr_handler = logger_handler
    else:
        stderr_handler = logging.StreamHandler(stderr)
    _logger.addHandler(stderr_handler)
    # pre-declared for finally block
    workflowobj = None
    prov_log_handler = None  # type: Optional[logging.StreamHandler]
    try:
        if args is None:
            if argsl is None:
                argsl = sys.argv[1:]
            args = arg_parser().parse_args(argsl)
            if args.record_container_id:
                if not args.cidfile_dir:
                    args.cidfile_dir = os.getcwd()
                del args.record_container_id

        if runtimeContext is None:
            runtimeContext = RuntimeContext(vars(args))
        else:
            runtimeContext = runtimeContext.copy()

        # If on Windows platform, a default Docker Container is used if not
        # explicitely provided by user
        if onWindows() and not runtimeContext.default_container:
            # This docker image is a minimal alpine image with bash installed
            # (size 6 mb). source: https://github.com/frol/docker-alpine-bash
            runtimeContext.default_container = windows_default_container_id

        # If caller parsed its own arguments, it may not include every
        # cwltool option, so fill in defaults to avoid crashing when
        # dereferencing them in args.
        for key, val in iteritems(get_default_args()):
            if not hasattr(args, key):
                setattr(args, key, val)

        # Configure logging
        rdflib_logger = logging.getLogger("rdflib.term")
        rdflib_logger.addHandler(stderr_handler)
        rdflib_logger.setLevel(logging.ERROR)
        if args.quiet:
            # Silence STDERR, not an eventual provenance log file
            stderr_handler.setLevel(logging.WARN)
        if runtimeContext.debug:
            # Increase to debug for both stderr and provenance log file
            _logger.setLevel(logging.DEBUG)
            rdflib_logger.setLevel(logging.DEBUG)
        formatter = None  # type: Optional[logging.Formatter]
        if args.timestamps:
            formatter = logging.Formatter("[%(asctime)s] %(message)s",
                                          "%Y-%m-%d %H:%M:%S")
            stderr_handler.setFormatter(formatter)
        ##

        if args.version:
            print(versionfunc())
            return 0
        _logger.info(versionfunc())

        if args.print_supported_versions:
            print("\n".join(supported_cwl_versions(args.enable_dev)))
            return 0

        if not args.workflow:
            if os.path.isfile("CWLFile"):
                setattr(args, "workflow", "CWLFile")
            else:
                _logger.error("")
                _logger.error("CWL document required, no input file was provided")
                arg_parser().print_help()
                return 1
        if args.relax_path_checks:
            command_line_tool.ACCEPTLIST_RE = command_line_tool.ACCEPTLIST_EN_RELAXED_RE

        if args.ga4gh_tool_registries:
            ga4gh_tool_registries[:] = args.ga4gh_tool_registries
        if not args.enable_ga4gh_tool_registry:
            del ga4gh_tool_registries[:]

        if custom_schema_callback is not None:
            custom_schema_callback()
        elif args.enable_ext:
            res = pkg_resources.resource_stream(__name__, 'extensions.yml')
            use_custom_schema("v1.0", "http://commonwl.org/cwltool", res.read())
            res.close()
        else:
            use_standard_schema("v1.0")
        if args.provenance:
            if not args.compute_checksum:
                _logger.error("--provenance incompatible with --no-compute-checksum")
                return 1
            ro = ResearchObject(
                temp_prefix_ro=args.tmpdir_prefix, orcid=args.orcid,
                full_name=args.cwl_full_name)
            runtimeContext.research_obj = ro
            log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
            prov_log_handler = logging.StreamHandler(log_file_io)

            class ProvLogFormatter(logging.Formatter):
                """Enforce ISO8601 with both T and Z."""
                def __init__(self):  # type: () -> None
                    super(ProvLogFormatter, self).__init__(
                        "[%(asctime)sZ] %(message)s")

                def formatTime(self, record, datefmt=None):
                    # type: (logging.LogRecord, str) -> str
                    record_time = time.gmtime(record.created)
                    formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", record_time)
                    with_msecs = "%s,%03d" % (formatted_time, record.msecs)
                    return with_msecs
            prov_log_handler.setFormatter(ProvLogFormatter())
            _logger.addHandler(prov_log_handler)
            _logger.debug(u"[provenance] Logging to %s", log_file_io)
            if argsl is not None:
                # Log cwltool command line options to provenance file
                _logger.info("[cwltool] %s %s", sys.argv[0], u" ".join(argsl))
            _logger.debug(u"[cwltool] Arguments: %s", args)

        if loadingContext is None:
            loadingContext = LoadingContext(vars(args))
        else:
            loadingContext = loadingContext.copy()
        loadingContext.loader = default_loader(loadingContext.fetcher_constructor)
        loadingContext.research_obj = runtimeContext.research_obj
        loadingContext.disable_js_validation = \
            args.disable_js_validation or (not args.do_validate)
        loadingContext.construct_tool_object = getdefault(
            loadingContext.construct_tool_object, workflow.default_make_tool)
        loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
        loadingContext.do_update = not (args.pack or args.print_subgraph)

        uri, tool_file_uri = resolve_tool_uri(
            args.workflow, resolver=loadingContext.resolver,
            fetcher_constructor=loadingContext.fetcher_constructor)

        try_again_msg = "" if args.debug else ", try again with --debug for more information"

        try:
            job_order_object, input_basedir, jobloader = load_job_order(
                args, stdin, loadingContext.fetcher_constructor,
                loadingContext.overrides_list, tool_file_uri)

            if args.overrides:
                loadingContext.overrides_list.extend(load_overrides(
                    file_uri(os.path.abspath(args.overrides)), tool_file_uri))

            loadingContext, workflowobj, uri = fetch_document(
                uri, loadingContext)

            if args.print_deps and loadingContext.loader:
                printdeps(workflowobj, loadingContext.loader, stdout,
                          args.relative_deps, uri)
                return 0

            loadingContext, uri \
                = resolve_and_validate_document(loadingContext, workflowobj, uri,
                                    preprocess_only=(args.print_pre or args.pack),
                                    skip_schemas=args.skip_schemas)
            
            if loadingContext.loader is None:
                raise Exception("Impossible code path.")
            processobj, metadata = loadingContext.loader.resolve_ref(uri)
            processobj = cast(CommentedMap, processobj)
            if args.pack:
                stdout.write(print_pack(loadingContext.loader, processobj, uri, metadata))
                return 0

            if args.provenance and runtimeContext.research_obj:
                # Can't really be combined with args.pack at same time
                runtimeContext.research_obj.packed_workflow(
                    print_pack(loadingContext.loader, processobj, uri, metadata))

            if args.print_pre:
                stdout.write(json_dumps(processobj, indent=4, sort_keys=True, separators=(',', ': ')))
                return 0

            tool = make_tool(uri, loadingContext)
            if args.make_template:
                def my_represent_none(self, data):  # pylint: disable=unused-argument
                    """Force clean representation of 'null'."""
                    return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
                yaml.RoundTripRepresenter.add_representer(type(None), my_represent_none)
                yaml.round_trip_dump(
                    generate_input_template(tool), sys.stdout,
                    default_flow_style=False, indent=4, block_seq_indent=2)
                return 0

            if args.validate:
                print("{} is valid CWL.".format(args.workflow))
                return 0

            if args.print_rdf:
                stdout.write(printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer))
                return 0

            if args.print_dot:
                printdot(tool, loadingContext.loader.ctx, stdout)
                return 0

            if args.print_targets:
                for f in ("outputs", "steps", "inputs"):
                    if tool.tool[f]:
                        _logger.info("%s%s targets:", f[0].upper(), f[1:-1])
                        stdout.write("  "+"\n  ".join([shortname(t["id"]) for t in tool.tool[f]])+"\n")
                return 0

            if args.target:
                if isinstance(tool, Workflow):
                    url = urllib.parse.urlparse(tool.tool["id"])
                    if url.fragment:
                        extracted = get_subgraph([tool.tool["id"] + "/" + r for r in args.target], tool)
                    else:
                        extracted = get_subgraph([loadingContext.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
                                                 for r in args.target],
                                                 tool)
                else:
                    _logger.error("Can only use --target on Workflows")
                    return 1
                loadingContext.loader.idx[extracted["id"]] = extracted
                tool = make_tool(extracted["id"],
                                 loadingContext)

            if args.print_subgraph:
                if "name" in tool.tool:
                    del tool.tool["name"]
                stdout.write(json_dumps(tool.tool, indent=4, sort_keys=True, separators=(',', ': ')))
                return 0

        except (validate.ValidationException) as exc:
            _logger.error(u"Tool definition failed validation:\n%s", exc,
                          exc_info=args.debug)
            return 1
        except (RuntimeError, WorkflowException) as exc:
            _logger.error(u"Tool definition failed initialization:\n%s", exc,
                          exc_info=args.debug)
            return 1
        except Exception as exc:
            _logger.error(
                u"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
                try_again_msg,
                exc if not args.debug else "",
                exc_info=args.debug)
            return 1

        if isinstance(tool, int):
            return tool
        # If on MacOS platform, TMPDIR must be set to be under one of the
        # shared volumes in Docker for Mac
        # More info: https://dockstore.org/docs/faq
        if sys.platform == "darwin":
            default_mac_path = "/private/tmp/docker_tmp"
            if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
                runtimeContext.tmp_outdir_prefix = default_mac_path
            if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
                runtimeContext.tmpdir_prefix = default_mac_path

        for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
            if getattr(runtimeContext, dirprefix) and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX:
                sl = "/" if getattr(runtimeContext, dirprefix).endswith("/") or dirprefix == "cachedir" \
                    else ""
                setattr(runtimeContext, dirprefix,
                        os.path.abspath(getattr(runtimeContext, dirprefix)) + sl)
                if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))):
                    try:
                        os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
                    except Exception as e:
                        _logger.error("Failed to create directory: %s", e)
                        return 1

        if args.cachedir:
            if args.move_outputs == "move":
                runtimeContext.move_outputs = "copy"
            runtimeContext.tmp_outdir_prefix = args.cachedir

        runtimeContext.secret_store = getdefault(runtimeContext.secret_store, SecretStore())
        runtimeContext.make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess)
        try:
            initialized_job_order_object = init_job_order(
                job_order_object, args, tool, jobloader, stdout,
                print_input_deps=args.print_input_deps,
                relative_deps=args.relative_deps,
                make_fs_access=runtimeContext.make_fs_access,
                input_basedir=input_basedir,
                secret_store=runtimeContext.secret_store)
        except SystemExit as err:
            return err.code

        if not executor:
            if args.parallel:
                executor = MultithreadedJobExecutor()
                runtimeContext.select_resources = executor.select_resources
            else:
                executor = SingleJobExecutor()

        try:
            runtimeContext.basedir = input_basedir
            del args.workflow
            del args.job_order

            conf_file = getattr(args, "beta_dependency_resolvers_configuration", None)  # Text
            use_conda_dependencies = getattr(args, "beta_conda_dependencies", None)  # Text

            if conf_file or use_conda_dependencies:
                runtimeContext.job_script_provider = DependenciesConfiguration(args)
            else:
                runtimeContext.find_default_container = functools.partial(
                    find_default_container,
                    default_container=runtimeContext.default_container,
                    use_biocontainers=args.beta_use_biocontainers)

            (out, status) = executor(tool,
                                     initialized_job_order_object,
                                     runtimeContext,
                                     logger=_logger)

            if out is not None:
                if runtimeContext.research_obj is not None:
                    runtimeContext.research_obj.create_job(
                        out, None, True)

                def loc_to_path(obj):
                    for field in ("path", "nameext", "nameroot", "dirname"):
                        if field in obj:
                            del obj[field]
                    if obj["location"].startswith("file://"):
                        obj["path"] = uri_file_path(obj["location"])

                visit_class(out, ("File", "Directory"), loc_to_path)

                # Unsetting the Generation from final output object
                visit_class(out, ("File", ), MutationManager().unset_generation)

                if isinstance(out, string_types):
                    stdout.write(out)
                else:
                    stdout.write(json_dumps(out, indent=4,  # type: ignore
                                            ensure_ascii=False))
                stdout.write("\n")
                if hasattr(stdout, "flush"):
                    stdout.flush()  # type: ignore

            if status != "success":
                _logger.warning(u"Final process status is %s", status)
                return 1
            _logger.info(u"Final process status is %s", status)
            return 0

        except (validate.ValidationException) as exc:
            _logger.error(u"Input object failed validation:\n%s", exc,
                          exc_info=args.debug)
            return 1
        except UnsupportedRequirement as exc:
            _logger.error(
                u"Workflow or tool uses unsupported feature:\n%s", exc,
                exc_info=args.debug)
            return 33
        except WorkflowException as exc:
            _logger.error(
                u"Workflow error%s:\n%s", try_again_msg, strip_dup_lineno(Text(exc)),
                exc_info=args.debug)
            return 1
        except Exception as exc:  # pylint: disable=broad-except
            _logger.error(
                u"Unhandled error%s:\n  %s", try_again_msg, exc, exc_info=args.debug)
            return 1

    finally:
        if args and runtimeContext and runtimeContext.research_obj \
                and workflowobj and loadingContext:
            research_obj = runtimeContext.research_obj
            if loadingContext.loader is not None:
                research_obj.generate_snapshot(prov_deps(
                    workflowobj, loadingContext.loader, uri))
            else:
                _logger.warning("Unable to generate provenance snapshot "
                    " due to missing loadingContext.loader.")
            if prov_log_handler is not None:
                # Stop logging so we won't half-log adding ourself to RO
                _logger.debug(u"[provenance] Closing provenance log file %s",
                    prov_log_handler)
                _logger.removeHandler(prov_log_handler)
                # Ensure last log lines are written out
                prov_log_handler.flush()
                # Underlying WritableBagFile will add the tagfile to the manifest
                prov_log_handler.stream.close()
                prov_log_handler.close()
            research_obj.close(args.provenance)

        _logger.removeHandler(stderr_handler)
        _logger.addHandler(defaultStreamHandler)
Example #42
0
    def show_pipeline(self, dependencies, format_):
        report = ""
        p = Profile()

        for element in dependencies:
            line = format_

            key = element._get_display_key()
            dim_keys = not key.strict

            line = p.fmt_subst(line,
                               "name",
                               element._get_full_name(),
                               fg="blue",
                               bold=True)
            line = p.fmt_subst(line,
                               "key",
                               key.brief,
                               fg="yellow",
                               dim=dim_keys)
            line = p.fmt_subst(line,
                               "full-key",
                               key.full,
                               fg="yellow",
                               dim=dim_keys)

            try:
                if not element._has_all_sources_resolved():
                    line = p.fmt_subst(line, "state", "no reference", fg="red")
                else:
                    if element.get_kind() == "junction":
                        line = p.fmt_subst(line,
                                           "state",
                                           "junction",
                                           fg="magenta")
                    elif not element._can_query_cache():
                        line = p.fmt_subst(line, "state", "waiting", fg="blue")
                    elif element._cached_failure():
                        line = p.fmt_subst(line, "state", "failed", fg="red")
                    elif element._cached_success():
                        line = p.fmt_subst(line,
                                           "state",
                                           "cached",
                                           fg="magenta")
                    elif not element._can_query_source_cache():
                        line = p.fmt_subst(line, "state", "waiting", fg="blue")
                    elif element._fetch_needed():
                        line = p.fmt_subst(line,
                                           "state",
                                           "fetch needed",
                                           fg="red")
                    elif element._buildable():
                        line = p.fmt_subst(line,
                                           "state",
                                           "buildable",
                                           fg="green")
                    else:
                        line = p.fmt_subst(line, "state", "waiting", fg="blue")
            except BstError as e:
                # Provide context to plugin error
                e.args = ("Failed to determine state for {}: {}".format(
                    element._get_full_name(), str(e)), )
                raise e

            # Element configuration
            if "%{config" in format_:
                line = p.fmt_subst(
                    line,
                    "config",
                    yaml.round_trip_dump(element._Element__config,
                                         default_flow_style=False,
                                         allow_unicode=True),
                )

            # Variables
            if "%{vars" in format_:
                variables = dict(element._Element__variables)
                line = p.fmt_subst(
                    line, "vars",
                    yaml.round_trip_dump(variables,
                                         default_flow_style=False,
                                         allow_unicode=True))

            # Environment
            if "%{env" in format_:
                environment = element._Element__environment
                line = p.fmt_subst(
                    line, "env",
                    yaml.round_trip_dump(environment,
                                         default_flow_style=False,
                                         allow_unicode=True))

            # Public
            if "%{public" in format_:
                environment = element._Element__public
                line = p.fmt_subst(
                    line, "public",
                    yaml.round_trip_dump(environment,
                                         default_flow_style=False,
                                         allow_unicode=True))

            # Workspaced
            if "%{workspaced" in format_:
                line = p.fmt_subst(
                    line,
                    "workspaced",
                    "(workspaced)" if element._get_workspace() else "",
                    fg="yellow")

            # Workspace-dirs
            if "%{workspace-dirs" in format_:
                workspace = element._get_workspace()
                if workspace is not None:
                    path = workspace.get_absolute_path()
                    if path.startswith("~/"):
                        path = os.path.join(os.getenv("HOME", "/root"),
                                            path[2:])
                    line = p.fmt_subst(line, "workspace-dirs",
                                       "Workspace: {}".format(path))
                else:
                    line = p.fmt_subst(line, "workspace-dirs", "")

            # Dependencies
            if "%{deps" in format_:
                deps = [
                    e._get_full_name()
                    for e in element._dependencies(_Scope.ALL, recurse=False)
                ]
                line = p.fmt_subst(
                    line, "deps",
                    yaml.safe_dump(deps, default_style=None).rstrip("\n"))

            # Build Dependencies
            if "%{build-deps" in format_:
                build_deps = [
                    e._get_full_name()
                    for e in element._dependencies(_Scope.BUILD, recurse=False)
                ]
                line = p.fmt_subst(
                    line, "build-deps",
                    yaml.safe_dump(build_deps,
                                   default_style=False).rstrip("\n"))

            # Runtime Dependencies
            if "%{runtime-deps" in format_:
                runtime_deps = [
                    e._get_full_name()
                    for e in element._dependencies(_Scope.RUN, recurse=False)
                ]
                line = p.fmt_subst(
                    line, "runtime-deps",
                    yaml.safe_dump(runtime_deps,
                                   default_style=False).rstrip("\n"))

            report += line + "\n"

        return report.rstrip("\n")
Example #43
0
def dump_yaml_rt(data):
    """Dump round-tripped YAML."""
    return yaml.round_trip_dump(data, indent=4, width=999)
Example #44
0
def _render_configuration(config):
    '''
    Given a config data structure of nested OrderedDicts, render the config as YAML and return it.
    '''
    return yaml.round_trip_dump(config, indent=INDENT, block_seq_indent=INDENT)
def format_repo_url(repo):
    return f'[{repo}](https://github.com/{repo})'


if __name__ == '__main__':

    remote_config_file = 'https://raw.githubusercontent.com/NCAR/xdev/xdevbot/xdevbot.yaml'
    config_file = 'xdevbot.yaml'
    resp = urllib.request.urlopen(remote_config_file)
    original_config = yaml.safe_load(resp)
    new_config, old_config, repos, error_messages_to_report = configure(
        original_config)
    if new_config != old_config:
        with open(config_file, 'w') as file_obj:
            yaml.round_trip_dump(new_config,
                                 file_obj,
                                 indent=2,
                                 block_seq_indent=2)

    added_hooks = {}
    removed_hooks = {}
    loop = asyncio.get_event_loop()
    add_tasks = [
        loop.create_task(install_repo_webhook(repo, added_hooks))
        for repo in repos['add']
    ]
    remove_tasks = [
        loop.create_task(delete_repo_webhook(repo, removed_hooks))
        for repo in repos['remove']
    ]
    loop.run_until_complete(asyncio.gather(*add_tasks))
    loop.run_until_complete(asyncio.gather(*remove_tasks))
Example #46
0
def fake_mpi_conf(tmp_path_factory: Any) -> Generator[str, None, None]:
    """
    Make a super simple mpirun-alike for applications that don't actually use MPI.

    It just runs the command multiple times (in serial).

    Then create a plaform MPI config YAML file that should make it work
    for the testing examples.
    """
    mpirun_text = """#!{interpreter}
import argparse
import sys
import subprocess
from io import StringIO
from typing import List

def make_parser():
    p = argparse.ArgumentParser()
    p.add_argument("--num", type=int, help="number of times to run the application")
    p.add_argument(
        "--no-fail", help="add this flag to actually work", action="store_true"
    )
    p.add_argument(
        "progargs", nargs=argparse.REMAINDER, help="The program and its arguments"
    )
    return p

class Runner:
    def __init__(self):
        if sys.stdin.isatty():
            self.indata = None
        else:
            self.indata = sys.stdin.read().encode(sys.stdin.encoding)

    def run_once(self, args: List[str]):
        subprocess.run(
            args, input=self.indata, stdout=sys.stdout, stderr=sys.stderr
        ).check_returncode()

    def run_many(self, n: int, args: List[str]):
        for i in range(n):
            self.run_once(args)

if __name__ == "__main__":
    args = make_parser().parse_args()
    assert args.no_fail == True, "Didn't set the --no-fail flag"
    r = Runner()
    r.run_many(args.num, args.progargs)
""".format(
        interpreter=sys.executable
    )
    mpitmp = tmp_path_factory.mktemp("fake_mpi")
    mpirun_file = mpitmp / "fake_mpirun"
    mpirun_file.write_text(mpirun_text)
    mpirun_file.chmod(0o755)

    plat_conf = {
        "runner": str(mpirun_file),
        "nproc_flag": "--num",
        "extra_flags": ["--no-fail"],
        "env_set": {"TEST_MPI_FOO": "bar"},
        "env_pass": ["USER"],
    }
    plat_conf_file = mpitmp / "plat_mpi.yml"
    plat_conf_file.write_text(yaml.round_trip_dump(plat_conf))

    yield str(plat_conf_file)

    plat_conf_file.unlink()
    mpirun_file.unlink()
    mpitmp.rmdir()
Example #47
0
def main(argsl=None,                   # type: List[str]
         args=None,                    # type: argparse.Namespace
         job_order_object=None,        # type: MutableMapping[Text, Any]
         stdin=sys.stdin,              # type: IO[Any]
         stdout=None,                  # type: Union[TextIO, StreamWriter]
         stderr=sys.stderr,            # type: IO[Any]
         versionfunc=versionstring,    # type: Callable[[], Text]
         logger_handler=None,          #
         custom_schema_callback=None,  # type: Callable[[], None]
         executor=None,                # type: Callable[..., Tuple[Dict[Text, Any], Text]]
         loadingContext=None,          # type: LoadingContext
         runtimeContext=None           # type: RuntimeContext
        ):  # type: (...) -> int
    if not stdout:  # force UTF-8 even if the console is configured differently
        if (hasattr(sys.stdout, "encoding")  # type: ignore
                and sys.stdout.encoding != 'UTF-8'):  # type: ignore
            if PY3 and hasattr(sys.stdout, "detach"):
                stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
            else:
                stdout = getwriter('utf-8')(sys.stdout)  # type: ignore
        else:
            stdout = cast(TextIO, sys.stdout)  # type: ignore

    _logger.removeHandler(defaultStreamHandler)
    if logger_handler is not None:
        stderr_handler = logger_handler
    else:
        stderr_handler = logging.StreamHandler(stderr)
    _logger.addHandler(stderr_handler)
    # pre-declared for finally block
    workflowobj = None
    prov_log_handler = None  # type: Optional[logging.StreamHandler]
    try:
        if args is None:
            if argsl is None:
                argsl = sys.argv[1:]
            args = arg_parser().parse_args(argsl)
            if args.record_container_id:
                if not args.cidfile_dir:
                    args.cidfile_dir = os.getcwd()
                del args.record_container_id

        if runtimeContext is None:
            runtimeContext = RuntimeContext(vars(args))
        else:
            runtimeContext = runtimeContext.copy()

        # If on Windows platform, a default Docker Container is used if not
        # explicitely provided by user
        if onWindows() and not runtimeContext.default_container:
            # This docker image is a minimal alpine image with bash installed
            # (size 6 mb). source: https://github.com/frol/docker-alpine-bash
            runtimeContext.default_container = windows_default_container_id

        # If caller parsed its own arguments, it may not include every
        # cwltool option, so fill in defaults to avoid crashing when
        # dereferencing them in args.
        for key, val in iteritems(get_default_args()):
            if not hasattr(args, key):
                setattr(args, key, val)

        # Configure logging
        rdflib_logger = logging.getLogger("rdflib.term")
        rdflib_logger.addHandler(stderr_handler)
        rdflib_logger.setLevel(logging.ERROR)
        if args.quiet:
            # Silence STDERR, not an eventual provenance log file
            stderr_handler.setLevel(logging.WARN)
        if runtimeContext.debug:
            # Increase to debug for both stderr and provenance log file
            _logger.setLevel(logging.DEBUG)
            rdflib_logger.setLevel(logging.DEBUG)
        formatter = None  # type: Optional[logging.Formatter]
        if args.timestamps:
            formatter = logging.Formatter("[%(asctime)s] %(message)s",
                                          "%Y-%m-%d %H:%M:%S")
            stderr_handler.setFormatter(formatter)
        ##

        if args.version:
            print(versionfunc())
            return 0
        _logger.info(versionfunc())

        if args.print_supported_versions:
            print("\n".join(supported_cwl_versions(args.enable_dev)))
            return 0

        if not args.workflow:
            if os.path.isfile("CWLFile"):
                setattr(args, "workflow", "CWLFile")
            else:
                _logger.error("")
                _logger.error("CWL document required, no input file was provided")
                arg_parser().print_help()
                return 1
        if args.relax_path_checks:
            command_line_tool.ACCEPTLIST_RE = command_line_tool.ACCEPTLIST_EN_RELAXED_RE

        if args.ga4gh_tool_registries:
            ga4gh_tool_registries[:] = args.ga4gh_tool_registries
        if not args.enable_ga4gh_tool_registry:
            del ga4gh_tool_registries[:]

        if custom_schema_callback is not None:
            custom_schema_callback()
        elif args.enable_ext:
            res = pkg_resources.resource_stream(__name__, 'extensions.yml')
            use_custom_schema("v1.0", "http://commonwl.org/cwltool", res.read())
            res.close()
        else:
            use_standard_schema("v1.0")
        if args.provenance:
            if not args.compute_checksum:
                _logger.error("--provenance incompatible with --no-compute-checksum")
                return 1
            ro = ResearchObject(
                temp_prefix_ro=args.tmpdir_prefix, orcid=args.orcid,
                full_name=args.cwl_full_name)
            runtimeContext.research_obj = ro
            log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
            prov_log_handler = logging.StreamHandler(log_file_io)

            class ProvLogFormatter(logging.Formatter):
                """Enforce ISO8601 with both T and Z."""
                def __init__(self):  # type: () -> None
                    super(ProvLogFormatter, self).__init__(
                        "[%(asctime)sZ] %(message)s")

                def formatTime(self, record, datefmt=None):
                    # type: (logging.LogRecord, str) -> str
                    record_time = time.gmtime(record.created)
                    formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", record_time)
                    with_msecs = "%s,%03d" % (formatted_time, record.msecs)
                    return with_msecs
            prov_log_handler.setFormatter(ProvLogFormatter())
            _logger.addHandler(prov_log_handler)
            _logger.debug(u"[provenance] Logging to %s", log_file_io)
            if argsl is not None:
                # Log cwltool command line options to provenance file
                _logger.info("[cwltool] %s %s", sys.argv[0], u" ".join(argsl))
            _logger.debug(u"[cwltool] Arguments: %s", args)

        if loadingContext is None:
            loadingContext = LoadingContext(vars(args))
        else:
            loadingContext = loadingContext.copy()
        loadingContext.loader = default_loader(loadingContext.fetcher_constructor)
        loadingContext.research_obj = runtimeContext.research_obj
        loadingContext.disable_js_validation = \
            args.disable_js_validation or (not args.do_validate)
        loadingContext.construct_tool_object = getdefault(
            loadingContext.construct_tool_object, workflow.default_make_tool)
        loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
        loadingContext.do_update = not (args.pack or args.print_subgraph)

        uri, tool_file_uri = resolve_tool_uri(
            args.workflow, resolver=loadingContext.resolver,
            fetcher_constructor=loadingContext.fetcher_constructor)

        try_again_msg = "" if args.debug else ", try again with --debug for more information"

        try:
            job_order_object, input_basedir, jobloader = load_job_order(
                args, stdin, loadingContext.fetcher_constructor,
                loadingContext.overrides_list, tool_file_uri)

            if args.overrides:
                loadingContext.overrides_list.extend(load_overrides(
                    file_uri(os.path.abspath(args.overrides)), tool_file_uri))

            loadingContext, workflowobj, uri = fetch_document(
                uri, loadingContext)

            assert loadingContext.loader is not None

            if args.print_deps:
                printdeps(workflowobj, loadingContext.loader, stdout,
                           args.relative_deps, uri)
                return 0

            loadingContext, uri \
                = resolve_and_validate_document(loadingContext, workflowobj, uri,
                                    preprocess_only=(args.print_pre or args.pack),
                                    skip_schemas=args.skip_schemas)
            assert loadingContext.loader is not None
            processobj, metadata = loadingContext.loader.resolve_ref(uri)
            processobj = cast(CommentedMap, processobj)
            if args.pack:
                stdout.write(print_pack(loadingContext.loader, processobj, uri, metadata))
                return 0

            if args.provenance and runtimeContext.research_obj:
                # Can't really be combined with args.pack at same time
                runtimeContext.research_obj.packed_workflow(
                    print_pack(loadingContext.loader, processobj, uri, metadata))

            if args.print_pre:
                stdout.write(json_dumps(processobj, indent=4, sort_keys=True, separators=(',', ': ')))
                return 0

            tool = make_tool(uri, loadingContext)
            if args.make_template:
                def my_represent_none(self, data):  # pylint: disable=unused-argument
                    """Force clean representation of 'null'."""
                    return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
                yaml.RoundTripRepresenter.add_representer(type(None), my_represent_none)
                yaml.round_trip_dump(
                    generate_input_template(tool), sys.stdout,
                    default_flow_style=False, indent=4, block_seq_indent=2)
                return 0

            if args.validate:
                print("{} is valid CWL.".format(args.workflow))
                return 0

            if args.print_rdf:
                stdout.write(printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer))
                return 0

            if args.print_dot:
                printdot(tool, loadingContext.loader.ctx, stdout)
                return 0

            if args.print_targets:
                for f in ("outputs", "steps", "inputs"):
                    if tool.tool[f]:
                        _logger.info("%s%s targets:", f[0].upper(), f[1:-1])
                        stdout.write("  "+"\n  ".join([shortname(t["id"]) for t in tool.tool[f]])+"\n")
                return 0

            if args.target:
                if isinstance(tool, Workflow):
                    url = urllib.parse.urlparse(tool.tool["id"])
                    if url.fragment:
                        extracted = get_subgraph([tool.tool["id"] + "/" + r for r in args.target], tool)
                    else:
                        extracted = get_subgraph([loadingContext.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
                                                 for r in args.target],
                                                 tool)
                else:
                    _logger.error("Can only use --target on Workflows")
                    return 1
                loadingContext.loader.idx[extracted["id"]] = extracted
                tool = make_tool(extracted["id"],
                                 loadingContext)

            if args.print_subgraph:
                if "name" in tool.tool:
                    del tool.tool["name"]
                stdout.write(json_dumps(tool.tool, indent=4, sort_keys=True, separators=(',', ': ')))
                return 0

        except (validate.ValidationException) as exc:
            _logger.error(u"Tool definition failed validation:\n%s", exc,
                          exc_info=args.debug)
            return 1
        except (RuntimeError, WorkflowException) as exc:
            _logger.error(u"Tool definition failed initialization:\n%s", exc,
                          exc_info=args.debug)
            return 1
        except Exception as exc:
            _logger.error(
                u"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
                try_again_msg,
                exc if not args.debug else "",
                exc_info=args.debug)
            return 1

        if isinstance(tool, int):
            return tool
        # If on MacOS platform, TMPDIR must be set to be under one of the
        # shared volumes in Docker for Mac
        # More info: https://dockstore.org/docs/faq
        if sys.platform == "darwin":
            default_mac_path = "/private/tmp/docker_tmp"
            if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
                runtimeContext.tmp_outdir_prefix = default_mac_path
            if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
                runtimeContext.tmpdir_prefix = default_mac_path

        for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
            if getattr(runtimeContext, dirprefix) and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX:
                sl = "/" if getattr(runtimeContext, dirprefix).endswith("/") or dirprefix == "cachedir" \
                    else ""
                setattr(runtimeContext, dirprefix,
                        os.path.abspath(getattr(runtimeContext, dirprefix)) + sl)
                if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))):
                    try:
                        os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
                    except Exception as e:
                        _logger.error("Failed to create directory: %s", e)
                        return 1

        if args.cachedir:
            if args.move_outputs == "move":
                runtimeContext.move_outputs = "copy"
            runtimeContext.tmp_outdir_prefix = args.cachedir

        runtimeContext.secret_store = getdefault(runtimeContext.secret_store, SecretStore())
        runtimeContext.make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess)
        try:
            initialized_job_order_object = init_job_order(
                job_order_object, args, tool, jobloader, stdout,
                print_input_deps=args.print_input_deps,
                relative_deps=args.relative_deps,
                make_fs_access=runtimeContext.make_fs_access,
                input_basedir=input_basedir,
                secret_store=runtimeContext.secret_store)
        except SystemExit as err:
            return err.code

        if not executor:
            if args.parallel:
                executor = MultithreadedJobExecutor()
                runtimeContext.select_resources = executor.select_resources
            else:
                executor = SingleJobExecutor()
        assert executor is not None

        try:
            runtimeContext.basedir = input_basedir
            del args.workflow
            del args.job_order

            conf_file = getattr(args, "beta_dependency_resolvers_configuration", None)  # Text
            use_conda_dependencies = getattr(args, "beta_conda_dependencies", None)  # Text

            if conf_file or use_conda_dependencies:
                runtimeContext.job_script_provider = DependenciesConfiguration(args)
            else:
                runtimeContext.find_default_container = functools.partial(
                    find_default_container,
                    default_container=runtimeContext.default_container,
                    use_biocontainers=args.beta_use_biocontainers)

            (out, status) = executor(tool,
                                     initialized_job_order_object,
                                     runtimeContext,
                                     logger=_logger)

            if out is not None:
                if runtimeContext.research_obj is not None:
                    runtimeContext.research_obj.create_job(
                        out, None, True)

                def loc_to_path(obj):
                    for field in ("path", "nameext", "nameroot", "dirname"):
                        if field in obj:
                            del obj[field]
                    if obj["location"].startswith("file://"):
                        obj["path"] = uri_file_path(obj["location"])

                visit_class(out, ("File", "Directory"), loc_to_path)

                # Unsetting the Generation from final output object
                visit_class(out, ("File", ), MutationManager().unset_generation)

                if isinstance(out, string_types):
                    stdout.write(out)
                else:
                    stdout.write(json_dumps(out, indent=4,  # type: ignore
                                            ensure_ascii=False))
                stdout.write("\n")
                if hasattr(stdout, "flush"):
                    stdout.flush()  # type: ignore

            if status != "success":
                _logger.warning(u"Final process status is %s", status)
                return 1
            _logger.info(u"Final process status is %s", status)
            return 0

        except (validate.ValidationException) as exc:
            _logger.error(u"Input object failed validation:\n%s", exc,
                          exc_info=args.debug)
            return 1
        except UnsupportedRequirement as exc:
            _logger.error(
                u"Workflow or tool uses unsupported feature:\n%s", exc,
                exc_info=args.debug)
            return 33
        except WorkflowException as exc:
            _logger.error(
                u"Workflow error%s:\n%s", try_again_msg, strip_dup_lineno(Text(exc)),
                exc_info=args.debug)
            return 1
        except Exception as exc:  # pylint: disable=broad-except
            _logger.error(
                u"Unhandled error%s:\n  %s", try_again_msg, exc, exc_info=args.debug)
            return 1

    finally:
        if args and runtimeContext and runtimeContext.research_obj \
                and workflowobj:
            research_obj = runtimeContext.research_obj
            assert loadingContext is not None
            assert loadingContext.loader is not None
            prov_dependencies = prov_deps(workflowobj, loadingContext.loader, uri)
            research_obj.generate_snapshot(prov_dependencies)
            if prov_log_handler is not None:
                # Stop logging so we won't half-log adding ourself to RO
                _logger.debug(u"[provenance] Closing provenance log file %s",
                    prov_log_handler)
                _logger.removeHandler(prov_log_handler)
                # Ensure last log lines are written out
                prov_log_handler.flush()
                # Underlying WritableBagFile will add the tagfile to the manifest
                prov_log_handler.stream.close()
                prov_log_handler.close()
            research_obj.close(args.provenance)

        _logger.removeHandler(stderr_handler)
        _logger.addHandler(defaultStreamHandler)
Example #48
0
    def save(self):
        if not self.mutable:
            raise RuntimeError("You may not modify a defaults file at runtime - check the mutable attribute!")

        with open(self.path, "w") as fh:
            yaml.round_trip_dump(self.data, fh)
Example #49
0
def comment_yaml_item(data, path_to_key, data_contains_list=True):
    """
    (EXPERIMENTAL) Comment a yaml item given its path_to_key (e.g. [foo 0 bar]), with comment preservation
    Inspired from https://stackoverflow.com/a/43927974 @cherrot
    """
    if data_contains_list:
        path_to_key = list(map(str_or_int_map, path_to_key))

    parent = data.mlget(path_to_key[:-1], list_ok=data_contains_list) if len(path_to_key) > 1 else data
    item_key = path_to_key[-1]
    deleted_item = item_key

    next_key = None

    if isinstance(parent, CommentedMap):
        if item_key not in parent:
            raise KeyError("the key \'{}\' does not exist".format(item_key))
        # don't just pop the value for item_key that way you lose comments
        # in the original YAML, instead deepcopy and delete what is not needed
        block_copy = deepcopy(parent)
        found = False
        keys = [k for k in parent.keys()]
        for key in reversed(keys):
            if key == item_key:
                found = True
            else:
                if not found:
                    next_key = key
                del block_copy[key]

        # now delete the key and its value, but preserve its preceding comments
        preceding_comments = parent.ca.items.get(item_key, [None, None, None, None])[1]

        if next_key is None:
            if parent.ca.comment is None:
                parent.ca.comment = [None, []]
            if parent.ca.comment[1] is None:
                parent.ca.comment[1] = []
            comment_list = parent.ca.comment[1]
        else:
            comment_list = parent.ca.items.get(next_key, [None, None, None, None])[1]
            if comment_list is None:
                parent.ca.items[next_key] = [None, [], None, None]
                comment_list = parent.ca.items.get(next_key)[1]
        if preceding_comments is not None:
            for c in reversed(preceding_comments):
                comment_list.insert(0, c)
        del parent[item_key]
    elif isinstance(parent, CommentedSeq):
        if not is_int(item_key) or item_key >= len(parent):
            raise RuntimeError("the key \'{}\' is not an integer or exceeds its parent's length".format(item_key))
        else:
            block_copy = deepcopy(parent)
            for i in reversed(range(len(parent))):
                if i != item_key:
                    del block_copy[i]

            next_key = item_key
            preceding_comments = deepcopy(parent.ca.items.get(item_key, [None, None, None, None])[1])
            parent.pop(item_key)  # CommentedSet.pop(idx) automatically shifts all ca.items' indexes !

            if len(parent) == 1 or next_key == len(parent):
                comment_list = parent.ca.end  # TODO: fix this, the appended comments don't show up in some case
            else:
                comment_list = parent.ca.items.get(next_key, [None, None, None, None])[1]
                if comment_list is None:
                    parent.ca.items[next_key] = [None, [], None, None]
                    comment_list = parent.ca.items.get(next_key)[1]

            if preceding_comments is not None:
                for c in reversed(preceding_comments):
                    comment_list.insert(0, c)
    else:
        raise RuntimeError("Couldn't reach the last item following the path_to_key " + str(path_to_key))

    key_dept = len(path_to_key) - 1
    if is_int(path_to_key[-1]) and key_dept > 0:
        key_dept = key_dept - 1
    comment_list_copy = deepcopy(comment_list)
    del comment_list[:]

    start_mark = StreamMark(None, None, None, 2 * key_dept)
    skip = True
    for line in round_trip_dump(block_copy).splitlines(True):
        if skip:
            if line.strip(' ').startswith('#'):  # and deleted_item not in line:
                continue
            skip = False
        comment_list.append(CommentToken('#' + line, start_mark, None))
    comment_list.extend(comment_list_copy)

    return data
Example #50
0
for x in dirs_set:
    if not x.endswith("/"):
        # Dirs must end with "/" to be created
        x += "/"
    num = slash_sort(x)

    if num not in dir_ordering:
        dir_ordering[num] = []
    dir_ordering[num].append(x)

# Finally, pretty things up

final_dirs = []
final_files = sorted(list(files_set))

for key in sorted(dir_ordering.keys()):
    final_dirs += sorted(dir_ordering[key])

final_list = final_dirs + sorted(final_files)

# Store our new list of files and write it to the yaml file

data["files"] = final_list

try:
    with open(filename, "w") as fh:
        yaml.round_trip_dump(data, fh, default_flow_style=False)
except Exception as e:
    print("Failed to dump YAML data: {}".format(e))
    exit(1)