示例#1
0
def main():
    # First, build a dict of the ruleset definition
    ruleset = {}
    with open(args.aclTemplate, 'r') as infile:
        for entry in yaml.safe_load_all(infile):
            ruleset.update(entry)

    aclType = ruleset['acl_type']

    # Build a crazy dict of dicts of lists to create the appropriate YAML heirachy
    yamlRuleset = {}
    yamlRuleset['ACL'] = {}
    yamlRuleset['ACL']['acl_id'] = ruleset['acl_id']
    yamlRuleset['ACL']['acl_type'] = aclType
    yamlRuleset['ACL']['description'] = ruleset['description']
    yamlRuleset['ACL']['name'] = ruleset['name']
    yamlRuleset['ACL']['rules'] = []

    # Build a dict of all the rules we know about
    rules = {}
    for filename in os.listdir(args.rules):
        with open(args.rules + filename) as infile:
            for entry in yaml.safe_load_all(infile):
                rules.update(entry)

    # Build a dict of all service definitions
    services = {}
    for filename in os.listdir(args.service):
        with open(args.service + filename, 'r') as infile:
            allServices = list(yaml.safe_load_all(infile))  # Convert generator to list
            for entry in allServices:
                #print("Adding: %s" % entry)
                services[entry['name']] = entry

    # Build a dict of all the objects we know about
    objects = {}
    for filename in os.listdir(args.objects):
        with open(args.objects + filename) as infile:
            for entry in yaml.safe_load_all(infile):
                objects.update(entry)


    for service in ruleset['SACP']:
        service = service.strip("{} ")
        if services[service]['acl_type'] != aclType:
            print("Error: SACP definition %s is not of type %s" % (services[service]['name'], aclType))
            exit(1)
        yamlRuleset = parseService(service, services, rules, yamlRuleset)

    # Sort ruleset by IP/prefix size
    yamlRuleset = addLengths(yamlRuleset, objects)

    # Sort the compiled rules list first by dest prefix, then source, finally weight.
    yamlRuleset['ACL']['rules'].sort(key=lambda x: (x['_dlen'], x['_slen'], x['_weight']), reverse=True)

    print(yaml.dump(yamlRuleset, explicit_start=True, default_flow_style=False))
    print('...')
示例#2
0
  def ReadFileObject(self, file_object):
    """Reads artifact definitions from a file-like object.

    Args:
      file_object: the file-like object to read from.

    Yields:
      Artifact definitions (instances of ArtifactDefinition).

    Raises:
      FormatError: if the format of the YAML artifact definition is not set
                   or incorrect.
    """
    # TODO: add try, except?
    yaml_generator = yaml.safe_load_all(file_object)

    last_artifact_definition = None
    for yaml_definition in yaml_generator:
      try:
        artifact_definition = self.ReadArtifactDefinitionValues(yaml_definition)
      except errors.FormatError as exception:
        error_location = u'At start'
        if last_artifact_definition:
          error_location = u'After: {0}'.format(last_artifact_definition.name)

        raise errors.FormatError(u'{0} {1}'.format(error_location, exception))

      yield artifact_definition
      last_artifact_definition = artifact_definition
示例#3
0
def update_conditions_for_sr(cond_dict, queues, subreddit):
    cond_dict[subreddit.name] = {}
    conditions = [Condition(d)
                  for d in yaml.safe_load_all(subreddit.conditions_yaml)
                  if isinstance(d, dict)]
    for queue in queues:
        cond_dict[subreddit.name][queue] = filter_conditions(conditions, queue)
示例#4
0
    def find_metadata(self, gallery, lang):
        """Search for a gallery metadata file.

        If there is an metadata file for the gallery, use that to determine
        captions and the order in which images shall be displayed in the
        gallery. You only need to list the images if a specific ordering or
        caption is required. The metadata file is YAML-formatted, with field
        names of
        #
        name:
        caption:
        order:
        #
        If a numeric order value is specified, we use that directly, otherwise
        we depend on how PyYAML returns the information - which may or may not
        be in the same order as in the file itself. Non-numeric ordering is not
        supported. If no caption is specified, then we return an empty string.
        Returns a string (l18n'd filename), list (ordering), dict (captions),
        dict (image metadata).
        """
        base_meta_path = os.path.join(gallery, "metadata.yml")
        localized_meta_path = utils.get_translation_candidate(self.site.config,
                                                              base_meta_path, lang)
        order = []
        captions = {}
        custom_metadata = {}
        used_path = ""

        if os.path.isfile(localized_meta_path):
            used_path = localized_meta_path
        elif os.path.isfile(base_meta_path):
            used_path = base_meta_path
        else:
            return "", [], {}, {}

        self.logger.debug("Using {0} for gallery {1}".format(
            used_path, gallery))
        with open(used_path, "r", encoding='utf-8-sig') as meta_file:
            if yaml is None:
                utils.req_missing(['PyYAML'], 'use metadata.yml files for galleries')
            meta = yaml.safe_load_all(meta_file)
            for img in meta:
                # load_all and safe_load_all both return None as their
                # final element, so skip it
                if not img:
                    continue
                if 'name' in img:
                    img_name = img.pop('name')
                    if 'caption' in img and img['caption']:
                        captions[img_name] = img.pop('caption')

                    if 'order' in img and img['order'] is not None:
                        order.insert(img.pop('order'), img_name)
                    else:
                        order.append(img_name)
                    custom_metadata[img_name] = img
                else:
                    self.logger.error("no 'name:' for ({0}) in {1}".format(
                        img, used_path))
        return used_path, order, captions, custom_metadata
示例#5
0
    def close(self):
        def get_report_path(report_id):
            return os.path.join(config.main.report_dir, report_id)

        report_filename = get_report_path(self.report_id)
        try:
            with open(report_filename) as fd:
                g = yaml.safe_load_all(fd)
                report_details = g.next()
        except IOError:
            raise ReportNotFound

        timestamp = otime.timestamp(datetime.fromtimestamp(report_details['start_time']))
        dst_filename = '{test_name}-{timestamp}-{probe_asn}-probe.yamloo'.format(
                timestamp=timestamp,
                **report_details)

        dst_path = os.path.join(config.main.archive_dir,
                                report_details['probe_cc'])

        if not os.path.isdir(dst_path):
            os.mkdir(dst_path)

        dst_path = os.path.join(dst_path, dst_filename)
        os.rename(report_filename, dst_path)

        del config.reports[self.report_id]
示例#6
0
文件: misc.py 项目: tv42/teuthology
def read_config(ctx):
    filename = os.path.join(os.environ['HOME'], '.teuthology.yaml')
    ctx.teuthology_config = {}
    with file(filename) as f:
        g = yaml.safe_load_all(f)
        for new in g:
            ctx.teuthology_config.update(new)
示例#7
0
    def Build(self, renderer):
        repository = self.args.repository
        profile_metadata = repository.Metadata(self.args.profile_name)

        sources = []
        for pattern in self.args.patterns:
            sources.extend(fnmatch.filter(repository.ListFiles(), pattern))

        # Find the latest modified source
        last_modified = 0
        for source in sources:
            source_metadata = repository.Metadata(source)
            last_modified = max(
                last_modified, source_metadata["LastModified"])

        if not profile_metadata or (
                last_modified > profile_metadata["LastModified"]):
            definitions = []
            for source in sources:
                definitions.extend(yaml.safe_load_all(
                    repository.GetData(source, raw=True)))

            # Transform the data as required.
            data = {
                "$ARTIFACTS": definitions,
                "$METADATA": dict(
                    ProfileClass="ArtifactProfile",
                )
            }

            repository.StoreData(self.args.profile_name, utils.PPrint(data),
                                 raw=True)
            renderer.format("Building artifact profile {0}\n",
                            self.args.profile_name)
def ArtifactsFromYaml(yaml_content):
  """Get a list of Artifacts from json."""
  try:
    raw_list = list(yaml.safe_load_all(yaml_content))
  except ValueError as e:
    raise ArtifactDefinitionError("Invalid json for artifact: %s" % e)

  # Try to do the right thing with json/yaml formatted as a list.
  if (isinstance(raw_list, list) and len(raw_list) == 1 and
      isinstance(raw_list[0], list)):
    raw_list = raw_list[0]

  # Convert json into artifact and validate.
  valid_artifacts = []
  for artifact_dict in raw_list:
    # In this case we are feeding parameters directly from potentially
    # untrusted yaml/json to our RDFValue class. However, safe_load ensures
    # these are all primitive types as long as there is no other deserialization
    # involved, and we are passing these into protobuf primitive types.
    try:
      artifact_value = rdfvalue.Artifact(**artifact_dict)
      valid_artifacts.append(artifact_value)
    except (TypeError, AttributeError) as e:
      raise ArtifactDefinitionError("Invalid artifact definition for %s: %s" %
                                    (artifact_dict.get("name"), e))

  return valid_artifacts
示例#9
0
    def test_create_and_update_report(self):
        report_header = {
            'software_name': 'ooni-test',
            'software_version': '0.1',
            'test_name': 'some-test',
            'test_version': '0.1',
            'probe_asn': 'AS0'
        }
        response = yield self.request('/report', "POST", report_header)
        response_body = json.loads(response.body)
        self.assertIn('backend_version', response_body)
        self.assertIn('report_id', response_body)

        report_id = response_body['report_id']
        response = yield self.update_report(report_id)
        response_body = json.loads(response.body)

        with open(report_id) as f:
            self.filenames.add(report_id)
            written_report = yaml.safe_load_all(f)

            written_report_header = written_report.next()
            for key in report_header.keys():
                self.assertEqual(written_report_header[key], report_header[key])
            self.assertEqual(yaml.safe_load(sample_report_entry),
                             written_report.next())
示例#10
0
 def read_file(self, filename):
     logger.info("Reading file %s", filename)
     with file(filename) as fd:
         properties = yaml.safe_load_all(fd).next()
         for key, loader in self._data_loaders.items():
             properties[key] = loader(filename)
         return properties
示例#11
0
文件: seminar.py 项目: jayfo/web-dub
def seminar_update_template():
    env = jinja2.Environment(
        loader=jinja2.FileSystemLoader(searchpath='_seminars'),
        trim_blocks=True,
        lstrip_blocks=True,
        undefined=jinja2.StrictUndefined
    )
    template = env.get_template('_template.md')

    seminar_paths = [
        seminar_file_entry.path
        for seminar_file_entry
        in os.scandir('_seminars')
        if (
            seminar_file_entry.is_file()
            and os.path.splitext(seminar_file_entry.name)[1] == '.md'
            and seminar_file_entry.name != '_template.md'
        )
    ]

    for seminar_path_current in seminar_paths:
        # Load the existing seminar file
        with open(seminar_path_current, encoding='utf-8') as f:
            # Parse the YAML of the seminar
            seminar = list(yaml.safe_load_all(f))[0]

            # If we ever have more than one version, we'll need to check things here
            assert seminar['version'] == 1

        # Write it back using the template
        seminar_rendered = template.render(seminar)
        with open(seminar_path_current, encoding='utf-8', mode='w') as f:
            f.write(seminar_rendered)
示例#12
0
def find_playbooks():
    ''' find Ansible playbooks'''
    all_playbooks = set()
    included_playbooks = set()

    exclude_dirs = ('adhoc', 'tasks')
    for yaml_file in find_files(
            os.path.join(os.getcwd(), 'playbooks'),
            exclude_dirs, None, r'\.ya?ml$'):
        with open(yaml_file, 'r') as contents:
            for task in yaml.safe_load_all(contents) or {}:
                if not isinstance(task, dict):
                    # Skip yaml files which are not a dictionary of tasks
                    continue
                if 'include' in task or 'import_playbook' in task:
                    # Add the playbook and capture included playbooks
                    all_playbooks.add(yaml_file)
                    if 'include' in task:
                        directive = task['include']
                    else:
                        directive = task['import_playbook']
                    included_file_name = directive.split()[0]
                    included_file = os.path.normpath(
                        os.path.join(os.path.dirname(yaml_file),
                                     included_file_name))
                    included_playbooks.add(included_file)
                elif 'hosts' in task:
                    all_playbooks.add(yaml_file)
    return all_playbooks, included_playbooks
示例#13
0
def check_file(printer, path, config):
  """Check YAML file 'path' for differences.

  :param printer: Where we report differences to.
  :param str path: The YAML file to test.
  :param dict config: Contains Kubernetes parsing and access configuration.
  :return: Number of differences found.
  """
  with open(path, 'r') as stream:
    expected = yaml.safe_load_all(stream)

    differences = 0
    for data in expected:
      # data can be None, e.g. in cases where the doc ends with a '---'
      if not data:
        continue
      try:
        for kube_obj in KubeObject.from_dict(data, config["namespace"]):
          printer.add(path, kube_obj)

          try:
            running = kube_obj.get_from_cluster(config["kubeconfig"])
          except subprocess.CalledProcessError as e:
            printer.diff(path, Difference(e.output, None))
            differences += 1
            continue

          for difference in diff("", kube_obj.data, running):
            differences += 1
            printer.diff(path, difference)
      except Exception:
        print("Failed parsing %s." % (path))
        raise

    return differences
示例#14
0
文件: db.py 项目: iffy/studytext
 def info(self, path):
     """
     Get info about a particular path.
     """
     p = self.safePath(path)
     info = {
         'path': os.path.relpath(p, self.root),
         'dir': os.path.isdir(p),
         'file': os.path.isfile(p),
     }
     info['parent'] = os.path.join(info['path'], '..')
     info['name'] = os.path.basename(p)
     if info['dir']:
         # directory
         meta_filename = os.path.join(p, '_meta.yml')
         if os.path.exists(meta_filename):
             metadata = yaml.safe_load(open(meta_filename, 'rb'))
             if metadata:
                 info.update(metadata)
     else:
         # file
         if p.endswith('.md'):
             metadata = yaml.safe_load_all(open(p, 'rb')).next()
             if metadata and isinstance(metadata, dict):
                 info.update(metadata)
     return info
    def test_yaml_load_sanity(self):
        data = list(yaml.safe_load_all("""
---
title: Test One
tags:
- inf1000
- oblig1
- arrays
text: |-
    Hello

    Cruel

    World
---
title: Test Two
text: Testtext
"""))
        self.assertEquals(len(data), 2)
        self.assertEquals(data[0], {
            'title': 'Test One',
            'tags': ['inf1000', 'oblig1', 'arrays'],
            'text': 'Hello\n\nCruel\n\nWorld'
        })
        self.assertEquals(data[1], {
            'title': 'Test Two',
            'text': 'Testtext'
        })
示例#16
0
def importFromYaml():
    p = GangaEventProcessor()
    count = 0
    
    filename = raw_input('Enter YAML file path: ')
    print 'Importing data from "%s"' % filename
    try:
        f = open(filename, 'rb')
    except IOError:
        print 'Error! File %s doesn\'t exists!' % filename
        return
        
    try:
        data=yaml.safe_load_all(f)
    except:
        print 'Error! Wrong data format.'
        return
    
    for message in data:
        try:
            headers = message['headers']
            body = eval(message['body'])
            timestamp = headers['_publisher_timestamp']
            timestamp = timestamp[:timestamp.find('.')]
            
            data = [timestamp,body['event'],body]
            
            p.process_event(data)
            count = count + 1
        except:
            pass
        
    print '\nProcessed %d events.' % count
示例#17
0
def _load_yaml(yaml_string):
    # TODO: Rip the backwards-compatibility out of this at some later date.

    # JSON is supposed to be a subset of YAML: en.wikipedia.org/wiki/YAML#JSON
    # So we switched the client-side encoding of the diagnostic data from YAML
    # to JSON (and got huge performance improvements), and we were still able
    # to decode it the same way. But... it turns out that, at least in Python's
    # YAML implementation, there is some JSON that's not valid YAML. I.e.:
    # >>> x = json.loads('{"key": "hi\\/there"}')
    # ... print x
    # {u'key': u'hi/there'}
    # >>> x = yaml.load('{"key": "hi\\/there"}')
    # ... print x
    # <big stack trace>
    # found unknown escape character '/'
    # in "<string>", line 1, column 13:
    # {"key": "hi\/there"}
    #
    # So we're going to try loading `yaml_string` as JSON first, and then fall
    # back to YAML if that fails.

    logger.debug_log('maildecryptor._load_yaml start')

    try:
        obj = json.loads(yaml_string)
    except:
        yaml_docs = []
        for yaml_doc in yaml.safe_load_all(yaml_string):
            yaml_docs.append(yaml_doc)

        obj = _upgrade_old_object(yaml_docs)

    logger.debug_log('maildecryptor._load_yaml end')

    return obj
示例#18
0
文件: lock.py 项目: alsall/teuthology
def updatekeys(ctx):
    loglevel = logging.INFO
    if ctx.verbose:
        loglevel = logging.DEBUG

    logging.basicConfig(
        level=loglevel,
    )

    misc.read_config(ctx)

    machines = [canonicalize_hostname(m) for m in ctx.machines]

    if ctx.targets:
        try:
            with file(ctx.targets) as f:
                g = yaml.safe_load_all(f)
                for new in g:
                    if 'targets' in new:
                        for t in new['targets'].iterkeys():
                            machines.append(t)
        except IOError as e:
            raise argparse.ArgumentTypeError(str(e))

    return scan_for_locks(ctx, machines)
示例#19
0
def initialize(queues, reload_mod_subs=True):
    global r

    subreddits = (session.query(Subreddit)
                         .filter(Subreddit.enabled == True)
                         .all())
    for sr in subreddits:
        sr.name = sr.name.lower()

    if reload_mod_subs:
        r.user._mod_subs = None
        logging.info('Getting list of moderated subreddits')
        modded_subs = r.user.get_cached_moderated_reddits().keys()
    else:
        modded_subs = r.user._mod_subs.keys()

    # get rid of any subreddits the bot doesn't moderate
    subreddits = [s for s in subreddits if s.name in modded_subs]

    sr_dict = {}
    cond_dict = {}
    for sr in subreddits:
        sr_dict[sr.name] = sr
        cond_dict[sr.name] = {}

        conditions = [Condition(d)
                      for d in yaml.safe_load_all(sr.conditions_yaml)
                      if isinstance(d, dict)]
        for queue in queues:
            cond_dict[sr.name][queue] = filter_conditions(conditions, queue)

    return (sr_dict, cond_dict)
示例#20
0
    def __init__(self, *args, **kwargs):
        super(ArtifactsCollector, self).__init__(*args, **kwargs)
        self.artifact_profile = self.session.LoadProfile("artifacts")

        extra_definitions = [
            open(x).read() for x in self.plugin_args.artifact_files]
        extra_definitions.extend(self.plugin_args.definitions or [])

        # Make a copy of the artifact registry.
        if extra_definitions:
            self.artifact_profile = self.artifact_profile.copy()

            for definition in extra_definitions:
                for definition_data in yaml.safe_load_all(definition):
                    self.artifact_profile.AddDefinition(definition_data)

        self.seen = set()
        self.supported_os = self.get_supported_os(self.session)
        if self.supported_os is None:
            raise plugin.PluginError(
                "Unable to determine running environment.")

        # Make sure the args make sense.
        if self.plugin_args.output_path is None:
            if self.plugin_args.copy_files:
                raise plugin.PluginError(
                    "Can only copy files when an output file is specified.")
            if self.plugin_args.create_timeline:
                raise plugin.PluginError(
                    "Can only create timelines when an output file "
                    "is specified.")
示例#21
0
def _ReadArgGroupsFromFile(arg_file):
  """Collects all the arg groups defined in the yaml file into a dictionary.

  Each dictionary key is an arg-group name whose corresponding value is a nested
  dictionary containing arg-name: arg-value pairs defined in that group.

  Args:
    arg_file: str, the name of the YAML argument file to open and parse.

  Returns:
    A dict containing all arg-groups found in the arg_file.

  Raises:
    BadFileException: the yaml package encountered a ScannerError.
  """
  with open(arg_file, 'r') as data:
    yaml_generator = yaml.safe_load_all(data)
    all_groups = {}
    try:
      for d in yaml_generator:
        if d is None:
          log.warning('Ignoring empty yaml document.')
        elif isinstance(d, dict):
          all_groups.update(d)
        else:
          raise yaml.scanner.ScannerError(
              '[{0}] is not a valid argument group.'.format(str(d)))
    except yaml.scanner.ScannerError as error:
      raise calliope_exceptions.BadFileException(
          'Error parsing YAML file [{0}]: {1}'.format(arg_file, str(error)))
  return all_groups
示例#22
0
    def __init__(self, filename, conf):
        self.conf = conf

        self.table = None
        self.contents = None
        self.dfn = None
        self.final = False

        self.spec = []

        dfn_dir = os.path.abspath(os.path.dirname(filename))

        with open(filename, 'r') as f:
            definition = yaml.safe_load(f)

            filter_specs = []

            for dfn in definition['files']:
                if isinstance(dfn, dict):
                    if 'file' in dfn:
                        filter_specs.append( (dfn['file'],  dfn['level'], True) )
                    elif 'text' in dfn:
                        if 'title' in dfn:
                            filter_specs.append( ( (dfn['title'], dfn['text']), dfn['level'], False ) )
                        else:
                            filter_specs.append( ( dfn['text'], dfn['level'], False ) )
                    else:
                        raise Exception('[ERROR] [toc]: problem with {0} in {0}'.format(dfn, filename))
                else:
                    filter_specs.append( (dfn,  1, True) )

        all_objs = {}

        self._first_source = definition['sources'][0]

        for source in definition['sources']:
            with open(os.path.join(dfn_dir, source), 'r') as f:
                objs = yaml.safe_load_all(f)

                for obj in objs:
                    all_objs[obj['file']] = obj

        for fn, level, is_file in filter_specs:
            if is_file is True:
                try:
                    obj = all_objs[fn]
                    obj['level'] = level
                    self.spec.append(obj)
                except KeyError:
                    raise Exception('[ERROR] [toc]: KeyError "{0}" in file: {1}'.format(fn, filename))
            else:
                # translation
                if isinstance(fn, tuple):
                    self.spec.append( { 'name': fn[0],
                                        'level': level,
                                        'text': fn[1] } )
                else:
                    self.spec.append( { 'name': None,
                                        'level': level,
                                        'text': fn } )
示例#23
0
 def __init__(self, report_file):
     self._f = open(report_file)
     self._skipped_line = 0
     self.report_file = report_file
     self.report_document = yaml.safe_load_all(self._f)
     self.parse_header()
     self.detect_nettest_format()
示例#24
0
	def add_eyes(self, views):
		try:
			with open(views, 'r') as f:
				for view in yaml.safe_load_all(f):
					self.eyes.extend(view)
		except IOError as inst:
			sys.stderr.write("Perspective file not found\n")
示例#25
0
    def close(self):
        def get_report_path(report_id):
            return os.path.join(self.report_dir, report_id)

        report_filename = get_report_path(self.report_id)
        try:
            with open(report_filename) as fd:
                g = yaml.safe_load_all(fd)
                report_details = g.next()
        except IOError:
            raise e.ReportNotFound

        dst_filename = report_file_name(report_details)
        dst_path = os.path.join(self.archive_dir,
                                report_details['probe_cc'])

        if not os.path.isdir(dst_path):
            os.mkdir(dst_path)

        dst_path = os.path.join(dst_path, dst_filename)
        os.rename(report_filename, dst_path)

        if not self.delayed_call.called:
            self.delayed_call.cancel()
        del self.reports[self.report_id]
示例#26
0
	def __init__(self, filename):
		f = open(filename,'r')
		yamloo = yaml.safe_load_all(f)
		self.report_header = yamloo.next()
		self.report_entries = []
		for entry in yamloo:
			self.report_entries.append(entry)
示例#27
0
文件: ls.py 项目: charpty/teuthology
def ls(archive_dir, verbose):
    for j in get_jobs(archive_dir):
        job_dir = os.path.join(archive_dir, j)
        summary = {}
        try:
            with file(os.path.join(job_dir, "summary.yaml")) as f:
                g = yaml.safe_load_all(f)
                for new in g:
                    summary.update(new)
        except IOError as e:
            if e.errno == errno.ENOENT:
                print_debug_info(j, job_dir, archive_dir)
                continue
            else:
                raise

        print "{job} {status} {owner} {desc} {duration}s".format(
            job=j,
            owner=summary.get("owner", "-"),
            desc=summary.get("description", "-"),
            status=get_status(summary),
            duration=int(summary.get("duration", 0)),
        )
        if verbose and "failure_reason" in summary:
            print "    {reason}".format(reason=summary["failure_reason"])
示例#28
0
def load_from_stream(stream):
    """
    Load configuration from a stream.

    A stream could be a string or file descriptor
    """
    return _merge_flow(yaml.safe_load_all(stream))
def get_bridge_reachability_reports(s3):
    """
    Fetches Tor bridge reachability reports from AWS S3
    :param s3:
    :param debug:
    :return:
    """
    reports = []
    keys = get_bridge_reachability_report_keys(s3=s3)

    for i, key in enumerate(keys):
        print("[%d/%d] %s" % (i+1, len(keys), key))

        gzipped_yml = s3.get_key(key).get_contents_as_string()
        yml = list(yaml.safe_load_all(zlib.decompress(bytes(gzipped_yml), 15+32)))

        # Parse YAML file into header/payload, and associated payload with header
        header = yml[0]
        report = header
        report['results'] = []

        for subreport in yml[1:]:
            if subreport['record_type'] == 'entry':
                print("subreport")
                report['results'].append(subreport)
            if subreport['record_type'] == 'footer':
                report['footer'] = subreport
        reports.append(report)
    return reports
示例#30
0
  def ArtifactsFromYaml(self, yaml_content):
    """Get a list of Artifacts from yaml."""
    raw_list = list(yaml.safe_load_all(yaml_content))

    # TODO(hanuszczak): I am very sceptical about that "doing the right thing"
    # below. What are the real use cases?

    # Try to do the right thing with json/yaml formatted as a list.
    if (isinstance(raw_list, list) and len(raw_list) == 1 and
        isinstance(raw_list[0], list)):
      raw_list = raw_list[0]

    # Convert json into artifact and validate.
    valid_artifacts = []
    for artifact_dict in raw_list:
      # In this case we are feeding parameters directly from potentially
      # untrusted yaml/json to our RDFValue class. However, safe_load ensures
      # these are all primitive types as long as there is no other
      # deserialization involved, and we are passing these into protobuf
      # primitive types.
      try:
        artifact_value = rdf_artifacts.Artifact(**artifact_dict)
        valid_artifacts.append(artifact_value)
      except (TypeError, AttributeError, type_info.TypeValueError) as e:
        name = artifact_dict.get("name")
        raise rdf_artifacts.ArtifactDefinitionError(
            name, "invalid definition", cause=e)

    return valid_artifacts
#!/usr/bin/env python3

import sys
import traceback

import yaml

errors_found = False

for zuul_yaml_file in sys.argv[1:]:
    print(f"Processing {zuul_yaml_file!r}...", flush=True)
    with open(zuul_yaml_file, "r") as yaml_stream:
        for doc_no, yaml_doc in enumerate(yaml.safe_load_all(yaml_stream), 1):
            print(f"Verifying YAML doc #{doc_no}...", flush=True)
            yaml_doc_errors_found = False
            try:
                for item in yaml_doc:
                    if "job" in item:
                        job = item["job"]
                        if not job["name"].startswith("fi-ansible--"):
                            yaml_doc_errors_found = errors_found = True
                            print(
                                f"Locally defined Zuul job {job['name']!r} must be named"
                                " 'fi-ansible-...'")
            except Exception:
                yaml_doc_errors_found = errors_found = True
                traceback.print_exc()

            if yaml_doc_errors_found:
                print(f"Verification failed.", flush=True)
            else:
        'deployment-validation-operator',
    })

csv['spec']['install']['spec']['permissions'] = []
with open(manifest_dir / 'role.yaml', 'r') as stream:
    operator_role = yaml.safe_load(stream)
    csv['spec']['install']['spec']['permissions'].append({
        'rules':
        operator_role['rules'],
        'serviceAccountName':
        'deployment-validation-operator',
    })

with open(manifest_dir / 'operator.yaml', 'r') as stream:
    operator_components = []
    operator = yaml.safe_load_all(stream)
    for doc in operator:
        operator_components.append(doc)
    # There is only one yaml document in the operator deployment
    operator_deployment = operator_components[0]
    csv['spec']['install']['spec']['deployments'][0]['spec'] = \
        operator_deployment['spec']

csv['spec']['install']['spec']['deployments'][0]['spec']['template']['spec']['containers'][0]['image'] = \
    '${IMAGE}:${IMAGE_TAG}'

now = datetime.datetime.now()
csv['metadata']['annotations']['createdAt'] = \
    now.strftime('%Y-%m-%dT%H:%M:%SZ')

yaml.dump(template, sys.stdout, default_flow_style=False)
示例#33
0
 def data(self):
     if not self._data:
         with open(self.yamlf, "rb") as f:
             self._data = list(yaml.safe_load_all(f))
         return self._data
示例#34
0
    def set_pod_spec(self, event):
        self.model.unit.status = MaintenanceStatus("Setting pod spec")

        try:
            self._check_leader()

            image_details = self._check_image_details()
        except CheckFailed as check_failed:
            self.model.unit.status = check_failed.status
            return

        validating, mutating = yaml.safe_load_all(
            Path("src/webhooks.yaml").read_text())

        self.model.pod.set_spec(
            {
                "version":
                3,
                "serviceAccount": {
                    "roles": [{
                        "global":
                        True,
                        "rules": [
                            {
                                "apiGroups": [""],
                                "resources": [
                                    "configmaps",
                                    "serviceaccounts",
                                    "services",
                                    "events",
                                    "namespaces",
                                    "persistentvolumes",
                                    "persistentvolumeclaims",
                                    "pods",
                                    "pods/log",
                                    "pods/status",
                                ],
                                "verbs": ["*"],
                            },
                            {
                                "apiGroups": ["apps"],
                                "resources": ["deployments"],
                                "verbs": ["*"],
                            },
                            {
                                "apiGroups": ["rbac.authorization.k8s.io"],
                                "resources": [
                                    "roles",
                                    "rolebindings",
                                ],
                                "verbs": ["*"],
                            },
                            {
                                "apiGroups": ["batch"],
                                "resources": ["jobs", "cronjobs"],
                                "verbs": ["*"],
                            },
                            {
                                "apiGroups": ["kubeflow.org"],
                                "resources": [
                                    "experiments",
                                    "experiments/status",
                                    "experiments/finalizers",
                                    "trials",
                                    "trials/status",
                                    "trials/finalizers",
                                    "suggestions",
                                    "suggestions/status",
                                    "suggestions/finalizers",
                                    "tfjobs",
                                    "pytorchjobs",
                                    "mpijobs",
                                    "xgboostjobs",
                                    "mxjobs",
                                ],
                                "verbs": ["*"],
                            },
                        ],
                    }],
                },
                "containers": [{
                    "name":
                    "katib-controller",
                    "imageDetails":
                    image_details,
                    "command": ["./katib-controller"],
                    "args": [
                        f"--webhook-port={self.model.config['webhook-port']}",
                        "--trial-resources=Job.v1.batch",
                        "--trial-resources=TFJob.v1.kubeflow.org",
                        "--trial-resources=PyTorchJob.v1.kubeflow.org",
                        "--trial-resources=MPIJob.v1.kubeflow.org",
                        "--trial-resources=PipelineRun.v1beta1.tekton.dev",
                    ],
                    "ports": [
                        {
                            "name": "webhook",
                            "containerPort": self.model.config["webhook-port"],
                        },
                        {
                            "name": "metrics",
                            "containerPort": self.model.config["metrics-port"],
                        },
                    ],
                    "envConfig": {
                        "KATIB_CORE_NAMESPACE": self.model.name,
                    },
                    "volumeConfig": [{
                        "name":
                        "certs",
                        "mountPath":
                        "/tmp/cert",
                        "files": [
                            {
                                "path": "tls.crt",
                                "content": self._stored.cert,
                            },
                            {
                                "path": "tls.key",
                                "content": self._stored.key,
                            },
                        ],
                    }],
                }],
            },
            k8s_resources={
                "kubernetesResources": {
                    "customResourceDefinitions": [{
                        "name":
                        crd["metadata"]["name"],
                        "spec":
                        crd["spec"]
                    } for crd in yaml.safe_load_all(
                        Path("src/crds.yaml").read_text())],
                    "mutatingWebhookConfigurations": [{
                        "name":
                        mutating["metadata"]["name"],
                        "webhooks":
                        mutating["webhooks"],
                    }],
                    "validatingWebhookConfigurations": [{
                        "name":
                        validating["metadata"]["name"],
                        "webhooks":
                        validating["webhooks"],
                    }],
                },
                "configMaps": {
                    "katib-config": {
                        f: Path(f"src/{f}.json").read_text()
                        for f in (
                            "metrics-collector-sidecar",
                            "suggestion",
                            "early-stopping",
                        )
                    },
                    "trial-template": {
                        f + suffix: Path(f"src/{f}.yaml").read_text()
                        for f, suffix in (
                            ("defaultTrialTemplate", ".yaml"),
                            ("enasCPUTemplate", ""),
                            ("pytorchJobTemplate", ""),
                        )
                    },
                },
            },
        )

        self.model.unit.status = ActiveStatus()
示例#35
0
def validate_object(REPO_PATH,
                    schema_path,
                    manifest_path,
                    return_objects,
                    verbose,
                    lookups=None,
                    macros=None):
    ''' Validate scheme '''
    error = False

    # uuids
    baselines_uuids = []
    story_uuids = []
    detection_uuids = []
    investigation_uuids = []

    schema_file = path.join(path.expanduser(REPO_PATH), schema_path)

    try:
        schema = json.loads(open(schema_file, 'rb').read())
    except IOError:
        print "ERROR: reading baseline schema file {0}".format(schema_file)

    objects = {}
    manifest_files = path.join(path.expanduser(REPO_PATH), manifest_path)

    for manifest_file in glob.glob(manifest_files):
        if verbose:
            print "processing manifest {0}".format(manifest_file)

        with open(manifest_file, 'r') as stream:
            try:
                object = list(yaml.safe_load_all(stream))[0]
            except yaml.YAMLError as exc:
                print(exc)
                print "Error reading {0}".format(manifest_file)
                error = True
                continue

        try:
            jsonschema.validate(instance=object, schema=schema)
        except jsonschema.exceptions.ValidationError as json_ve:
            print "ERROR: {0} at:\n\t{1}".format(json.dumps(json_ve.message),
                                                 manifest_file)
            print "\tAffected Object: {}".format(json.dumps(json_ve.instance))
            error = True

        objects[object['name']] = object

        # validate content
        if schema_path == 'spec/v2/lookups.spec.json':
            error = error or validate_lookups_content(REPO_PATH, "lookups/%s",
                                                      object, manifest_file)
        elif schema_path == 'spec/v2/baselines.spec.json':
            error = error or validate_baselines_content(
                object, macros, lookups, manifest_file, baselines_uuids)
        elif schema_path == 'spec/v2/story.spec.json':
            error = error or validate_story_content(object, manifest_file,
                                                    story_uuids)
        elif schema_path == 'spec/v2/detections.spec.json':
            error = error or validate_detection_content(
                object, macros, lookups, manifest_file, detection_uuids)
        elif schema_path == 'spec/v2/investigations.spec.json':
            error = error or validate_investigation_content(
                object, macros, lookups, manifest_file, investigation_uuids)

    if return_objects:
        return error, objects
    else:
        return error
示例#36
0
def launch_kubernetes_kernel(kernel_id, port_range, response_addr,
                             spark_context_init_mode):
    # Launches a containerized kernel as a kubernetes pod.

    config.load_incluster_config()

    # Capture keywords and their values.
    keywords = dict()

    # Factory values...
    # Since jupyter lower cases the kernel directory as the kernel-name, we need to capture its case-sensitive
    # value since this is used to locate the kernel launch script within the image.
    keywords['eg_port_range'] = port_range
    keywords['eg_response_address'] = response_addr
    keywords['kernel_id'] = kernel_id
    keywords['kernel_name'] = os.path.basename(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    keywords['kernel_spark_context_init_mode'] = spark_context_init_mode

    # Walk env variables looking for names prefixed with KERNEL_.  When found, set corresponding keyword value
    # with name in lower case.
    for name, value in os.environ.items():
        if name.startswith('KERNEL_'):
            keywords[name.lower()] = yaml.safe_load(value)

    # Substitute all template variable (wrapped with {{ }}) and generate `yaml` string.
    k8s_yaml = generate_kernel_pod_yaml(keywords)

    # For each k8s object (kind), call the appropriate API method.  Too bad there isn't a method
    # that can take a set of objects.
    #
    # Creation for additional kinds of k8s objects can be added below.  Refer to
    # https://github.com/kubernetes-client/python for API signatures.  Other examples can be found in
    # https://github.com/jupyter-incubator/enterprise_gateway/blob/master/enterprise_gateway/services/processproxies/k8s.py
    #
    kernel_namespace = keywords['kernel_namespace']
    k8s_objs = yaml.safe_load_all(k8s_yaml)
    for k8s_obj in k8s_objs:
        if k8s_obj.get('kind'):
            if k8s_obj['kind'] == 'Pod':
                #print("{}".format(k8s_obj))  # useful for debug
                client.CoreV1Api(client.ApiClient()).create_namespaced_pod(
                    body=k8s_obj, namespace=kernel_namespace)
            elif k8s_obj['kind'] == 'Secret':
                client.CoreV1Api(client.ApiClient()).create_namespaced_secret(
                    body=k8s_obj, namespace=kernel_namespace)
            elif k8s_obj['kind'] == 'PersistentVolumeClaim':
                client.CoreV1Api(client.ApiClient()
                                 ).create_namespaced_persistent_volume_claim(
                                     body=k8s_obj, namespace=kernel_namespace)
            elif k8s_obj['kind'] == 'PersistentVolume':
                client.CoreV1Api(
                    client.ApiClient()).create_persistent_volume(body=k8s_obj)
            else:
                sys.exit(
                    "ERROR - Unhandled Kubernetes object kind '{}' found in yaml file - kernel launch terminating!"
                    .format(k8s_obj['kind']))
        else:
            sys.exit(
                "ERROR - Unknown Kubernetes object '{}' found in yaml file - kernel launch terminating!"
                .format(k8s_obj))
示例#37
0
def realYaml():
    path = os.path.dirname(os.path.abspath(__file__))
    with open('config.yaml', 'r', encoding='utf-8') as f:
        return list(yaml.safe_load_all(f))
示例#38
0
文件: manifest.py 项目: andysodt/id3c
def parse_using_config(config_file):
    """
    Parse multiple manifest sheets specified by a config file.

    <config.yaml> must be a file with at least one YAML document in it.  Each
    document corresponds closely to the command-line options taken by the
    "parse" command (a sibling to this command).  For example, the following
    configuration contains two documents:

    \b
        ---
        workbook: OneDrive/SFS Prospective Samples 2018-2019.xlsx
        sheet: HMC
        sample_column: "Barcode ID*"
        extra_columns:
          collection:
            name: "Collection ID*"
            barcode: true
          aliquots:
            name: "Aliquot [ABC]"
            multiple: true
          date: "Collection date*"
          aliquot_date: "Date aliquoted"
          racks:
            name: "Rack [ABC]*"
            multiple: true
          notes: "Notes"
    \b
        ---
        workbook: OneDrive/SFS Retrospective Samples 2018-2019.xlsx
        sheet: HMC
        sample_column: "Barcode ID*"
        extra_columns:
          aliquots:
            name: "Aliquot [ABC]"
            multiple: true
          date: "Collection date*"
          aliquot_date: "Date aliquoted"
          racks:
            name: "Rack [ABC]*"
            multiple: true
          test_results: "Test ResulTS"
        ...

    The key: value pairs in "extra_columns" name destination record fields (as
    the key) and source columns (as the value).  For most source columns, a
    simple string name (or shell-glob pattern) is enough.  Other behaviour is
    available by using a dictionary value.

    To collect values from multiple source columns into one record field,
    specify a dictionary like:

    \b
        field:
          name: column_[abc]
          multiple: true

    To mark a field as containing unique barcodes, similar to the built-in
    "sample_column" option, specify a dictionary like:

    \b
        field:
          name: column
          barcode: true

    Barcode fields are checked for duplicates and any records containing a
    duplicated value are dropped with a warning.

    Relative paths in <config.yaml> are treated relative to the containing
    directory of the configuration file itself.

    All manifest records parsed are output to stdout as newline-delimited JSON
    records.  You will likely want to redirect stdout to a file.
    """
    configs = list(yaml.safe_load_all(config_file))

    if config_file.name != "<stdin>":
        config_dir = dirname(config_file.name)

        # dirname is the empty string if we're in the same directory as the
        # config file.
        if config_dir:
            chdir(config_dir)

    for config in configs:
        try:
            kwargs = {
                "workbook": config["workbook"],
                "sheet": config["sheet"],
                "sample_column": config["sample_column"],
                "extra_columns": list(config.get("extra_columns", {}).items()),
                "sample_type": config.get("sample_type")
            }
        except KeyError as key:
            LOG.error(f"Required key «{key}» missing from config {config}")
            raise key from None

        dump_ndjson(_parse(**kwargs))
示例#39
0
    def __init__(self, k8s_kind=None, *args, **kwargs):
        self.client = None
        self.warnings = []

        mutually_exclusive = [
            ('resource_definition', 'src'),
            ('merge_type', 'apply'),
        ]

        KubernetesAnsibleModule.__init__(self, *args,
                                         mutually_exclusive=mutually_exclusive,
                                         supports_check_mode=True,
                                         **kwargs)
        self.kind = k8s_kind or self.params.get('kind')
        self.api_version = self.params.get('api_version')
        self.name = self.params.get('name')
        self.namespace = self.params.get('namespace')
        resource_definition = self.params.get('resource_definition')
        validate = self.params.get('validate')
        if validate:
            if LooseVersion(self.openshift_version) < LooseVersion("0.8.0"):
                self.fail_json(msg="openshift >= 0.8.0 is required for validate")
        self.append_hash = self.params.get('append_hash')
        if self.append_hash:
            if not HAS_K8S_CONFIG_HASH:
                self.fail_json(msg=missing_required_lib("openshift >= 0.7.2", reason="for append_hash"),
                               exception=K8S_CONFIG_HASH_IMP_ERR)
        if self.params['merge_type']:
            if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
                self.fail_json(msg=missing_required_lib("openshift >= 0.6.2", reason="for merge_type"))
        if self.params.get('apply') is not None:
            if LooseVersion(self.openshift_version) < LooseVersion("0.9.0"):
                self.fail_json(msg=missing_required_lib("openshift >= 0.9.0", reason="for apply"))
            self.apply = self.params['apply']
        else:
            self.apply = LooseVersion(self.openshift_version) >= LooseVersion("0.9.0")

        if resource_definition:
            if isinstance(resource_definition, string_types):
                try:
                    self.resource_definitions = yaml.safe_load_all(resource_definition)
                except (IOError, yaml.YAMLError) as exc:
                    self.fail(msg="Error loading resource_definition: {0}".format(exc))
            elif isinstance(resource_definition, list):
                self.resource_definitions = resource_definition
            else:
                self.resource_definitions = [resource_definition]
        src = self.params.get('src')
        if src:
            self.resource_definitions = self.load_resource_definitions(src)
        try:
            self.resource_definitions = [item for item in self.resource_definitions if item]
        except AttributeError:
            pass

        if not resource_definition and not src:
            implicit_definition = dict(
                kind=self.kind,
                apiVersion=self.api_version,
                metadata=dict(name=self.name)
            )
            if self.namespace:
                implicit_definition['metadata']['namespace'] = self.namespace
            self.resource_definitions = [implicit_definition]
示例#40
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            http_agent=dict(type='str', default=USER_AGENT),
            url_username=dict(type='str',
                              default='admin',
                              aliases=['username']),
            url_password=dict(type='str',
                              default='',
                              no_log=True,
                              aliases=['password']),
            force_basic_auth=dict(type='bool', default=True),
            validate_certs=dict(type='bool', default=False),
            certificate_authority_data=dict(type='str'),
            insecure=dict(type='bool', default=False),
            api_endpoint=dict(type='str', required=True),
            patch_operation=dict(
                type='str',
                default='Strategic Merge Patch',
                aliases=['patch_strategy'],
                choices=['JSON Patch', 'Merge Patch',
                         'Strategic Merge Patch']),
            file_reference=dict(type='str'),
            inline_data=dict(type='str'),
            state=dict(type='str',
                       default='present',
                       choices=['absent', 'present', 'replace', 'update'])),
        mutually_exclusive=(('file_reference', 'inline_data'),
                            ('url_username', 'insecure'), ('url_password',
                                                           'insecure')),
        required_one_of=(('file_reference', 'inline_data')),
    )

    if not HAS_LIB_YAML:
        module.fail_json(msg="missing python library: yaml")

    decode_cert_data(module)

    api_endpoint = module.params.get('api_endpoint')
    state = module.params.get('state')
    insecure = module.params.get('insecure')
    inline_data = module.params.get('inline_data')
    file_reference = module.params.get('file_reference')
    patch_operation = module.params.get('patch_operation')

    if inline_data:
        if not isinstance(inline_data, dict) and not isinstance(
                inline_data, list):
            data = yaml.safe_load(inline_data)
        else:
            data = inline_data
    else:
        try:
            f = open(file_reference, "r")
            data = [x for x in yaml.safe_load_all(f)]
            f.close()
            if not data:
                module.fail_json(msg="No valid data could be found.")
        except:
            module.fail_json(
                msg=
                "The file '%s' was not found or contained invalid YAML/JSON data"
                % file_reference)

    # set the transport type and build the target endpoint url
    transport = 'https'
    if insecure:
        transport = 'http'

    target_endpoint = "%s://%s" % (transport, api_endpoint)

    body = []
    changed = False

    # make sure the data is a list
    if not isinstance(data, list):
        data = [data]

    for item in data:
        namespace = "default"
        if item and 'metadata' in item:
            namespace = item.get('metadata', {}).get('namespace', "default")
            kind = item.get('kind', '').lower()
            try:
                url = target_endpoint + KIND_URL[kind]
            except KeyError:
                module.fail_json(
                    msg="invalid resource kind specified in the data: '%s'" %
                    kind)
            url = url.replace("{namespace}", namespace)
        else:
            url = target_endpoint

        if state == 'present':
            item_changed, item_body = k8s_create_resource(module, url, item)
        elif state == 'absent':
            item_changed, item_body = k8s_delete_resource(module, url, item)
        elif state == 'replace':
            item_changed, item_body = k8s_replace_resource(module, url, item)
        elif state == 'update':
            item_changed, item_body = k8s_update_resource(
                module, url, item, patch_operation)

        changed |= item_changed
        body.append(item_body)

    module.exit_json(changed=changed, api_response=body)
示例#41
0
    def generate_deployment_yaml(
        token: str = None,
        api: str = None,
        namespace: str = None,
        image_pull_secrets: str = None,
        resource_manager_enabled: bool = False,
        rbac: bool = False,
        latest: bool = False,
        mem_request: str = None,
        mem_limit: str = None,
        cpu_request: str = None,
        cpu_limit: str = None,
        image_pull_policy: str = None,
        service_account_name: str = None,
        labels: Iterable[str] = None,
        env_vars: dict = None,
        backend: str = None,
    ) -> str:
        """
        Generate and output an installable YAML spec for the agent.

        Args:
            - token (str, optional): A `RUNNER` token to give the agent
            - api (str, optional): A URL pointing to the Prefect API. Defaults to
                `https://api.prefect.io`
            - namespace (str, optional): The namespace to create Prefect jobs in. Defaults
                to `default`
            - image_pull_secrets (str, optional): The name of an image pull secret to use
                for Prefect jobs
            - resource_manager_enabled (bool, optional): Whether to include the resource
                manager as part of the YAML. Defaults to `False`
            - rbac (bool, optional): Whether to include default RBAC configuration as
                part of the YAML. Defaults to `False`
            - latest (bool, optional): Whether to use the `latest` Prefect image.
                Defaults to `False`
            - mem_request (str, optional): Requested memory for Prefect init job.
            - mem_limit (str, optional): Limit memory for Prefect init job.
            - cpu_request (str, optional): Requested CPU for Prefect init job.
            - cpu_limit (str, optional): Limit CPU for Prefect init job.
            - image_pull_policy (str, optional): imagePullPolicy to use for Prefect init job.
                Job defaults to `IfNotPresent`.
            - service_account_name (str, optional): Name of a service account to use for
                Prefect init job. Job defaults to using `default` service account.
            - labels (List[str], optional): a list of labels, which are arbitrary string
                identifiers used by Prefect Agents when polling for work
            - env_vars (dict, optional): additional environment variables to attach to all
                jobs created by this agent and to set in the agent's own environment
            - backend (str, optional): toggle which backend to use for this agent.
                Defaults to backend currently set in config.

        Returns:
            - str: A string representation of the generated YAML
        """

        # Use defaults if not provided
        token = token or ""
        api = api or "https://api.prefect.io"
        namespace = namespace or "default"
        labels = labels or []
        mem_request = mem_request or ""
        mem_limit = mem_limit or ""
        cpu_request = cpu_request or ""
        cpu_limit = cpu_limit or ""
        image_pull_policy = image_pull_policy or ""
        service_account_name = service_account_name or ""
        backend = backend or config.backend

        version = prefect.__version__.split("+")
        image_version = ("latest" if len(version) > 1 or latest else
                         (version[0] + "-python3.6"))

        with open(path.join(path.dirname(__file__), "deployment.yaml"),
                  "r") as deployment_file:
            deployment = yaml.safe_load(deployment_file)

        agent_env = deployment["spec"]["template"]["spec"]["containers"][0][
            "env"]

        # Populate env vars
        agent_env[0]["value"] = token
        agent_env[1]["value"] = api
        agent_env[2]["value"] = namespace
        agent_env[3]["value"] = image_pull_secrets or ""
        agent_env[4]["value"] = str(labels)
        agent_env[11]["value"] = backend

        # Populate job resource env vars
        agent_env[5]["value"] = mem_request
        agent_env[6]["value"] = mem_limit
        agent_env[7]["value"] = cpu_request
        agent_env[8]["value"] = cpu_limit
        agent_env[9]["value"] = image_pull_policy
        agent_env[10]["value"] = service_account_name

        if env_vars:
            for k, v in env_vars.items():
                agent_env.append({
                    "name": f"PREFECT__CLOUD__AGENT__ENV_VARS__{k}",
                    "value": v
                })

        # Use local prefect version for image
        deployment["spec"]["template"]["spec"]["containers"][0][
            "image"] = "prefecthq/prefect:{}".format(image_version)

        # Populate resource manager if requested
        if resource_manager_enabled:
            resource_manager_env = deployment["spec"]["template"]["spec"][
                "containers"][1]["env"]

            resource_manager_env[0]["value"] = token
            resource_manager_env[1]["value"] = api
            resource_manager_env[3]["value"] = namespace

            # Use local prefect version for image
            deployment["spec"]["template"]["spec"]["containers"][1][
                "image"] = "prefecthq/prefect:{}".format(image_version)
        else:
            del deployment["spec"]["template"]["spec"]["containers"][1]

        # Populate image pull secrets if provided
        if image_pull_secrets:
            agent_env = deployment["spec"]["template"]["spec"][
                "imagePullSecrets"][0]["name"] = image_pull_secrets
        else:
            del deployment["spec"]["template"]["spec"]["imagePullSecrets"]

        # Load RBAC if specified
        rbac_yaml = []
        if rbac:
            with open(path.join(path.dirname(__file__), "rbac.yaml"),
                      "r") as rbac_file:
                rbac_generator = yaml.safe_load_all(rbac_file)

                for document in rbac_generator:
                    document["metadata"]["namespace"] = namespace
                    rbac_yaml.append(document)

        output_yaml = [deployment]
        output_yaml.extend(rbac_yaml)
        return yaml.safe_dump_all(output_yaml, explicit_start=True)
示例#42
0
 def data(self):
     # 如果是第一次调用data,读取yaml文档,否则直接返回之前保存的数据
     if not self._data:
         with open(self.yaml,'rb') as f:
             self._data = list(yaml.safe_load_all(f))   #组成一个list
     return self._data
示例#43
0
def main(ctx):
    if ctx.verbose:
        teuthology.log.setLevel(logging.DEBUG)

    misc.read_config(ctx)

    ret = 0
    user = ctx.owner
    machines = [
        misc.canonicalize_hostname(m, user=False) for m in ctx.machines
    ]
    machines_to_update = []

    if ctx.targets:
        try:
            with file(ctx.targets) as f:
                g = yaml.safe_load_all(f)
                for new in g:
                    if 'targets' in new:
                        for t in new['targets'].iterkeys():
                            machines.append(t)
        except IOError as e:
            raise argparse.ArgumentTypeError(str(e))

    if ctx.f:
        assert ctx.lock or ctx.unlock, \
            '-f is only supported by --lock and --unlock'
    if machines:
        assert ctx.lock or ctx.unlock or ctx.list or ctx.list_targets \
            or ctx.update or ctx.brief, \
            'machines cannot be specified with that operation'
    else:
        if ctx.lock:
            log.error("--lock requires specific machines passed as arguments")
        else:
            # This condition might never be hit, but it's not clear.
            assert ctx.num_to_lock or ctx.list or ctx.list_targets or \
                ctx.summary or ctx.brief, \
                'machines must be specified for that operation'
    if ctx.all:
        assert ctx.list or ctx.list_targets or ctx.brief, \
            '--all can only be used with --list, --list-targets, and --brief'
        assert ctx.owner is None, \
            '--all and --owner are mutually exclusive'
        assert not machines, \
            '--all and listing specific machines are incompatible'
    if ctx.num_to_lock:
        assert ctx.machine_type, \
            'must specify machine type to lock'

    if ctx.brief or ctx.list or ctx.list_targets:
        assert ctx.desc is None, '--desc does nothing with --list/--brief'

        # we may need to update host keys for vms.  Don't do it for
        # every vm; however, update any vms included in the list given
        # to the CLI (machines), or any owned by the specified owner or
        # invoking user if no machines are specified.
        vmachines = []
        statuses = get_statuses(machines)
        owner = ctx.owner or misc.get_user()
        for machine in statuses:
            if machine['is_vm'] and machine['locked'] and \
               (machines or machine['locked_by'] == owner):
                vmachines.append(machine['name'])
        if vmachines:
            log.info("updating host keys for %s", ' '.join(sorted(vmachines)))
            do_update_keys(vmachines)
            # get statuses again to refresh any updated keys
            statuses = get_statuses(machines)
        if statuses:
            statuses = winnow(statuses, ctx.machine_type, 'machine_type')
            if not machines and ctx.owner is None and not ctx.all:
                ctx.owner = misc.get_user()
            statuses = winnow(statuses, ctx.owner, 'locked_by')
            statuses = winnow(statuses, ctx.status, 'up', lambda s: s['up'] ==
                              (ctx.status == 'up'))
            statuses = winnow(statuses, ctx.locked, 'locked',
                              lambda s: s['locked'] == (ctx.locked == 'true'))
            statuses = winnow(statuses, ctx.desc, 'description')
            statuses = winnow(statuses, ctx.desc_pattern, 'description',
                              lambda s: s['description'] and \
                                        ctx.desc_pattern in s['description'])
            if ctx.json_query:
                statuses = json_matching_statuses(ctx.json_query, statuses)
            statuses = winnow(statuses, ctx.os_type, 'os_type')
            statuses = winnow(statuses, ctx.os_version, 'os_version')

            # When listing, only show the vm_host's name, not every detail
            for s in statuses:
                if not s.get('is_vm', False):
                    continue
                # with an OpenStack API, there is no host for a VM
                if s['vm_host'] is None:
                    continue
                vm_host_name = s.get('vm_host', dict())['name']
                if vm_host_name:
                    s['vm_host'] = vm_host_name
            if ctx.list:
                print json.dumps(statuses, indent=4)

            elif ctx.brief:
                for s in sorted(statuses, key=lambda s: s.get('name')):
                    locked = "un" if s['locked'] == 0 else "  "
                    mo = re.match('\w+@(\w+?)\..*', s['name'])
                    host = mo.group(1) if mo else s['name']
                    print '{host} {locked}locked {owner} "{desc}"'.format(
                        locked=locked,
                        host=host,
                        owner=s['locked_by'],
                        desc=s['description'])

            else:
                frag = {'targets': {}}
                for f in statuses:
                    frag['targets'][f['name']] = f['ssh_pub_key']
                print yaml.safe_dump(frag, default_flow_style=False)
        else:
            log.error('error retrieving lock statuses')
            ret = 1

    elif ctx.summary:
        do_summary(ctx)
        return 0

    elif ctx.lock:
        if not vps_version_or_type_valid(ctx.machine_type, ctx.os_type,
                                         ctx.os_version):
            log.error('Invalid os-type or version detected -- lock failed')
            return 1
        for machine in machines:
            if not lock_one(machine, user, ctx.desc):
                ret = 1
                if not ctx.f:
                    return ret
            else:
                machines_to_update.append(machine)
                provision.create_if_vm(ctx, machine)
    elif ctx.unlock:
        if ctx.owner is None and user is None:
            user = misc.get_user()
        # If none of them are vpm, do them all in one shot
        if not filter(is_vm, machines):
            res = unlock_many(machines, user)
            return 0 if res else 1
        for machine in machines:
            if not unlock_one(ctx, machine, user):
                ret = 1
                if not ctx.f:
                    return ret
            else:
                machines_to_update.append(machine)
    elif ctx.num_to_lock:
        result = lock_many(ctx, ctx.num_to_lock, ctx.machine_type, user,
                           ctx.desc, ctx.os_type, ctx.os_version)
        if not result:
            ret = 1
        else:
            machines_to_update = result.keys()
            if ctx.machine_type == 'vps':
                shortnames = ' '.join([
                    misc.decanonicalize_hostname(name)
                    for name in result.keys()
                ])
                if len(result) < ctx.num_to_lock:
                    log.error("Locking failed.")
                    for machine in result:
                        unlock_one(ctx, machine, user)
                    ret = 1
                else:
                    log.info("Successfully Locked:\n%s\n" % shortnames)
                    log.info("Unable to display keys at this time (virtual " +
                             "machines are booting).")
                    log.info(
                        "Please run teuthology-lock --list-targets %s once " +
                        "these machines come up.", shortnames)
            else:
                print yaml.safe_dump(dict(targets=result),
                                     default_flow_style=False)
    elif ctx.update:
        assert ctx.desc is not None or ctx.status is not None, \
            'you must specify description or status to update'
        assert ctx.owner is None, 'only description and status may be updated'
        machines_to_update = machines

        if ctx.desc is not None or ctx.status is not None:
            for machine in machines_to_update:
                update_lock(machine, ctx.desc, ctx.status)

    return ret
示例#44
0
def test_files_contain_allowlisted_keys():
    for filepath in get_markdown_files():
        with open(filepath, 'r') as stream:
            docs = yaml.safe_load_all(stream)
            for doc in filter(None, docs):
                validate_document_has_allowlisted_keys(doc, filepath)
示例#45
0
    def generate_deployment_yaml(
        token: str = None,
        api: str = None,
        namespace: str = None,
        image_pull_secrets: str = None,
        resource_manager_enabled: bool = False,
        rbac: bool = False,
        latest: bool = False,
        mem_request: str = None,
        mem_limit: str = None,
        cpu_request: str = None,
        cpu_limit: str = None,
        labels: Iterable[str] = None,
    ) -> str:
        """
        Generate and output an installable YAML spec for the agent.

        Args:
            - token (str, optional): A `RUNNER` token to give the agent
            - api (str, optional): A URL pointing to the Prefect API. Defaults to
                `https://api.prefect.io`
            - namespace (str, optional): The namespace to create Prefect jobs in. Defaults
                to `default`
            - image_pull_secrets (str, optional): The name of an image pull secret to use
                for Prefect jobs
            - resource_manager_enabled (bool, optional): Whether to include the resource
                manager as part of the YAML. Defaults to `False`
            - rbac (bool, optional): Whether to include default RBAC configuration as
                part of the YAML. Defaults to `False`
            - latest (bool, optional): Whether to use the `latest` Prefect image.
                Defaults to `False`
            - mem_request (str, optional): Requested memory for Prefect init job.
            - mem_limit (str, optional): Limit memory for Prefect init job.
            - cpu_request (str, optional): Requested CPU for Prefect init job.
            - cpu_limit (str, optional): Limit CPU for Prefect init job.
            - labels (List[str], optional): a list of labels, which are arbitrary string
                identifiers used by Prefect Agents when polling for work

        Returns:
            - str: A string representation of the generated YAML
        """

        # Use defaults if not provided
        token = token or ""
        api = api or "https://api.prefect.io"
        namespace = namespace or "default"
        labels = labels or []
        mem_request = mem_request or ""
        mem_limit = mem_limit or ""
        cpu_request = cpu_request or ""
        cpu_limit = cpu_limit or ""

        version = prefect.__version__.split("+")
        image_version = ("latest" if len(version) > 1 or latest else
                         (version[0] + "-python3.6"))

        with open(path.join(path.dirname(__file__), "deployment.yaml"),
                  "r") as deployment_file:
            deployment = yaml.safe_load(deployment_file)

        agent_env = deployment["spec"]["template"]["spec"]["containers"][0][
            "env"]

        # Populate env vars
        agent_env[0]["value"] = token
        agent_env[1]["value"] = api
        agent_env[2]["value"] = namespace
        agent_env[3]["value"] = image_pull_secrets or ""
        agent_env[4]["value"] = str(labels)

        # Populate job resource env vars
        agent_env[5]["value"] = mem_request
        agent_env[6]["value"] = mem_limit
        agent_env[7]["value"] = cpu_request
        agent_env[8]["value"] = cpu_limit

        # Use local prefect version for image
        deployment["spec"]["template"]["spec"]["containers"][0][
            "image"] = "prefecthq/prefect:{}".format(image_version)

        # Populate resource manager if requested
        if resource_manager_enabled:
            resource_manager_env = deployment["spec"]["template"]["spec"][
                "containers"][1]["env"]

            resource_manager_env[0]["value"] = token
            resource_manager_env[1]["value"] = api
            resource_manager_env[3]["value"] = namespace

            # Use local prefect version for image
            deployment["spec"]["template"]["spec"]["containers"][1][
                "image"] = "prefecthq/prefect:{}".format(image_version)
        else:
            del deployment["spec"]["template"]["spec"]["containers"][1]

        # Populate image pull secrets if provided
        if image_pull_secrets:
            agent_env = deployment["spec"]["template"]["spec"][
                "imagePullSecrets"][0]["name"] = image_pull_secrets
        else:
            del deployment["spec"]["template"]["spec"]["imagePullSecrets"]

        # Load RBAC if specified
        rbac_yaml = []
        if rbac:
            with open(path.join(path.dirname(__file__), "rbac.yaml"),
                      "r") as rbac_file:
                rbac_generator = yaml.safe_load_all(rbac_file)

                for document in rbac_generator:
                    document["metadata"]["namespace"] = namespace
                    rbac_yaml.append(document)

        output_yaml = [deployment]
        output_yaml.extend(rbac_yaml)
        return yaml.safe_dump_all(output_yaml, explicit_start=True)
示例#46
0
def compile_calendar():
    # Obtain our stored sequences
    with open('_compile-calendar-sequences.yml', encoding='utf-8') as f:
        seminar_calendar_sequences = yaml.safe_load(f)['sequences']
    # Iterate over all our seminar files
    seminar_paths = [
        os.path.normpath(seminar_file_entry.path)
        for seminar_file_entry
        in os.scandir('_seminars')
        if seminar_file_entry.is_file() and
           os.path.normpath(seminar_file_entry.path) != os.path.normpath('_seminars/_template.md')
    ]

    # Maintain the sequence field for each seminar
    for seminar_path_current in seminar_paths:
        # Get the hash and sequence from the file, to compare against our stored data
        with open(seminar_path_current, encoding='utf-8') as f:
            hash = hashlib.md5()
            for line in f:
                hash.update(line.strip().encode(encoding='utf-8'))
            seminar_hash_current = hash.hexdigest()
        with open(seminar_path_current, encoding='utf-8') as f:
            seminar_sequence_current = list(yaml.safe_load_all(f))[0]['sequence']

        # Regardless of platform we're on, standardize the path we store (e.g., slashes)
        seminar_path_stored = posixpath.join(*os.path.normpath(seminar_path_current).split(os.sep))
        if seminar_path_stored not in seminar_calendar_sequences:
            # This is a seminar that is new to our sequence tracking,
            # decrement the initial sequence so it will be hashed and incremented
            seminar_calendar_sequences[seminar_path_stored] = {
                'hash': 'invalid_hash_to_force_update',
                'sequence': seminar_sequence_current - 1
            }

        seminar_hash_stored = seminar_calendar_sequences[seminar_path_stored]['hash']
        seminar_sequence_stored = seminar_calendar_sequences[seminar_path_stored]['sequence']
        if seminar_hash_current != seminar_hash_stored:
            # # Change detected, we need to bump the sequence
            # seminar_sequence_current = max(seminar_sequence_current, seminar_sequence_stored) + 1
            #
            # # pyyaml does not preserve comments, so we brute force modification of the seminar yml file
            # with open(seminar_path_current, encoding='utf-8') as f:
            #     seminar_contents = f.read()
            # seminar_contents = re.sub(
            #     'sequence: {}'.format('(\d*)'),
            #     'sequence: {}'.format(seminar_sequence_current),
            #     seminar_contents
            # )
            # with open(seminar_path_current, 'w', encoding='utf-8') as f:
            #     f.write(seminar_contents)
            #
            # # That changed the file, so update our hash, then store the updated sequence
            # with open(seminar_path_current, 'rb') as f:
            #     seminar_hash_current = hashlib.md5(f.read()).hexdigest()
            #
            # seminar_calendar_sequences[seminar_path_stored] = {
            #     'hash': seminar_hash_current,
            #     'sequence': seminar_sequence_current
            # }

            if seminar_sequence_current > seminar_sequence_stored:
                seminar_calendar_sequences[seminar_path_stored] = {
                    'hash': seminar_hash_current,
                    'sequence': seminar_sequence_current
                }

    # Store our updated sequences
    data = {'sequences': seminar_calendar_sequences}
    with open('_compile-calendar-sequences.yml', 'w', encoding='utf-8') as f:
        yaml.dump(
            data,
            stream=f,
            default_flow_style=False
        )

    # Now generate the ics file from our seminars and their sequences
    ics = icalendar.Calendar()
    ics.add('PRODID', '-//DUB//DUB Calendar')
    ics.add('VERSION', '2.0')
    ics.add('CALSCALE','GREGORIAN')
    ics.add('METHOD', 'PUBLISH')
    ics.add('X-WR-CALNAME', 'DUB Calendar')
    ics.add('X-WR-TIMEZONE', 'America/Los_Angeles')
    ics.add('X-WR-CALDESC', 'Calendar of DUB seminars.')

    for seminar_path_current in seminar_paths:
        # Parse the seminar
        with open(seminar_path_current, encoding='utf-8') as f:
            seminar_contents = list(yaml.safe_load_all(f))[0]

        # Add seminar as calendar event
        ics_event = icalendar.Event()

        # Give it a UID
        ics_event.add(
            'UID',
            seminar_contents['date'].strftime('%Y-%m-%d') + '@dub.washington.edu'
        )

        # Give it DTSTART and DTEND
        timezone = pytz.timezone('America/Los_Angeles')
        seminar_start_time = timezone.localize(datetime.combine(
            seminar_contents['date'],
            datetime.strptime(seminar_contents['time'], '%I:%M %p').time()
        ))
        seminar_end_time = timezone.localize(datetime.combine(
            seminar_contents['date'],
            datetime.strptime(seminar_contents['time_end'], '%I:%M %p').time()
        ))
        ics_event.add('DTSTART', seminar_start_time)
        ics_event.add('DTEND', seminar_end_time)

        # Generate SUMMARY from applicable components
        if seminar_contents.get('no_seminar', False):
            # Flagged as not having a seminar

            # Title should indicate why there's no seminar
            seminar_summary = seminar_contents['title']
        else:
            # We have a seminar, but its data may not yet be complete
            seminar_summary = 'DUB Seminar'

            if not seminar_contents.get('tbd_speakers', False):
                # We have speakers

                # If they all have the same affiliation, we'll collapse it, so let's check
                speaker_affiliations = [
                    speaker_current['affiliation'] if not speaker_current.get('affiliation_none', False) else 'affiliation_none'
                    for speaker_current
                    in seminar_contents['speakers']
                ]
                if len(set(speaker_affiliations)) == 1:
                    # Everybody has the same affiliation
                    # But it's still possible that affiliation is affiliation_none
                    if not seminar_contents['speakers'][0].get('affiliation_none', False):
                        # We have a legit affiliation
                        seminar_summary = '{} - {} ({})'.format(
                            seminar_summary,
                            ', '.join(
                                [
                                    ' '.join(speaker_current['name'][1:] + [speaker_current['name'][0]])
                                    for speaker_current
                                    in seminar_contents['speakers']
                                ]
                            ),
                            seminar_contents['speakers'][0]['affiliation']
                        )
                    else:
                        # Everybody has no affiliation
                        seminar_summary = '{} - {}'.format(
                            seminar_summary,
                            ', '.join(
                                [
                                    ' '.join(speaker_current['name'][1:] + [speaker_current['name'][0]])
                                    for speaker_current
                                    in seminar_contents['speakers']
                                ]
                            )
                        )
                else:
                    # Distinct affiliations
                    seminar_summary = '{} - {}'.format(
                        seminar_summary,
                        ', '.join(
                            [
                                '{}{}'.format(
                                    ' '.join(speaker_current['name'][1:] + [speaker_current['name'][0]]),
                                    '({})'.format(speaker_current['affiliation']) if not speaker_current.get('affiliation_none', False) else ''
                                )
                                for speaker_current
                                in seminar_contents['speakers']
                            ]
                        )
                    )

            if not seminar_contents.get('tbd_title', False):
                # We have a title
                seminar_summary = '{} - "{}"'.format(
                    seminar_summary,
                    seminar_contents['title']
                )

        ics_event.add('SUMMARY', seminar_summary)

        # Add the location unless it has an override
        if not seminar_contents.get('no_seminar', False):
            if seminar_contents.get('location_override_calendar', False):
                ics_event.add('LOCATION', seminar_contents['location_override_calendar'])
            else:
                ics_event.add('LOCATION', seminar_contents['location'])

        # This description generation is still a bit sketchy, should decide what we want
        #
        # Generate description string from applicable components
        if not seminar_contents.get('no_seminar', False):
            description_string = ''
            if 'text_override_seminar_page' in seminar_contents:
                description_string = seminar_contents['text_override_seminar_page']
            else:
                if 'tbd_bio' not in seminar_contents:
                    if 'tbd_abstract' not in seminar_contents:
                        description_string = seminar_contents['abstract'] + '\r\n' + seminar_contents['bio']
                    else:
                        description_string = seminar_contents['bio']
                elif 'tbd_abstract' not in seminar_contents:
                    description_string = seminar_contents['abstract']

            # Parse description as markdown
            class SensibleParagraphs(markdown.extensions.Extension):
                def extendMarkdown(self, md, md_globals):
                    br_tag = markdown.inlinepatterns.SubstituteTagPattern(r'\n', None)
                    md.inlinePatterns.add('nl', br_tag, '_end')

            ics_event.add('DESCRIPTION', markdown.markdown(description_string, extensions=[SensibleParagraphs()]))

        # That's our complete event
        ics.add_component(ics_event)

    # Store the ics file output
    with open('calendar.ics', 'wb') as f:
        f.write(ics.to_ical())
示例#47
0
def compile_calendar_increment_all_sequences():
    # Obtain our stored sequences
    with open('_compile-calendar-sequences.yml', encoding='utf-8') as f:
        seminar_calendar_sequences = yaml.safe_load(f)['sequences']

    # Iterate over all our seminar files
    seminar_paths = [
        os.path.normpath(seminar_file_entry.path)
        for seminar_file_entry
        in os.scandir('_seminars')
        if seminar_file_entry.is_file() and
           os.path.normpath(seminar_file_entry.path) != os.path.normpath('_seminars/_template.md')
    ]

    for seminar_path_current in seminar_paths:
        # Get the hash and sequence from the file
        with open(seminar_path_current, encoding='utf-8') as f:
            hash = hashlib.md5()
            for line in f:
                hash.update(line.strip().encode(encoding='utf-8'))
            seminar_hash_current = hash.hexdigest()
        with open(seminar_path_current, encoding='utf-8') as f:
            seminar_sequence_current = list(yaml.safe_load_all(f))[0]['sequence']

        # Regardless of platform we're on, standardize the path we store (e.g., slashes)
        seminar_path_stored = posixpath.join(*os.path.normpath(seminar_path_current).split(os.sep))

        if seminar_path_stored not in seminar_calendar_sequences:
            seminar_calendar_sequences[seminar_path_stored] = {
                'hash': seminar_hash_current,
                'sequence': seminar_sequence_current
            }

        seminar_hash_stored = seminar_calendar_sequences[seminar_path_stored]['hash']
        seminar_sequence_stored = seminar_calendar_sequences[seminar_path_stored]['sequence']

        # Bump the sequence
        seminar_sequence_current = max(seminar_sequence_current, seminar_sequence_stored) + 1

        # pyyaml does not preserve comments, so we brute force modification of the seminar yml file
        with open(seminar_path_current, encoding='utf-8') as f:
            seminar_contents = f.read()
        seminar_contents = re.sub(
            'sequence: {}'.format('(\d*)'),
            'sequence: {}'.format(seminar_sequence_current),
            seminar_contents
        )
        with open(seminar_path_current, 'w', encoding='utf-8') as f:
            f.write(seminar_contents)

        # That changed the file, so update our hash, then store the updated sequence
        with open(seminar_path_current, encoding='utf-8') as f:
            hash = hashlib.md5()
            for line in f:
                hash.update(line.strip().encode(encoding='utf-8'))
            seminar_hash_current = hash.hexdigest()

        seminar_calendar_sequences[seminar_path_stored] = {
            'hash': seminar_hash_current,
            'sequence': seminar_sequence_current
        }

    # Store our updated sequences
    data = {'sequences': seminar_calendar_sequences}
    with open('_compile-calendar-sequences.yml', 'w', encoding='utf-8') as f:
        yaml.dump(
            data,
            stream=f,
            default_flow_style=False
        )
示例#48
0
文件: toc-depth.py 项目: LeeHowes/CPP
#!/usr/bin/env python3

import sys
import yaml

frontmatter = next(yaml.safe_load_all(sys.stdin))
toc_depth = frontmatter.get('toc-depth')
if toc_depth is not None:
    print(toc_depth)
示例#49
0
# Tjekker om der er et argument med fra deployment der ikke er all
# Hvis det er all, så vil alle blive customers blive deployet
if sys.argv[1] == 'all':
    limit = None
else:
    limit = sys.argv[1]

for customers in customer_files:
    # Sætter kunden navn til filnavnet uden exention
    customer_name = os.path.basename(customers)[:-4]
    if limit is not None:
        # Tjekker om limit bliver brugt i deployment
        if limit == customer_name:
            with open(customers, 'r') as customer_file:
                # Læser indholder at customer filen
                customer_content = yaml.safe_load_all(customer_file)
                for content in customer_content:
                    # Tilføjer customer data til et samlet dictonary
                    all_customers[customer_name] = content
    else:
        with open(customers, 'r') as customer_file:
            # Læser indholder at customer filen
            customer_content = yaml.safe_load_all(customer_file)
            for content in customer_content:
                # Tilføjer customer data til et samlet dictonary
                all_customers[customer_name] = content

# Outputter indholder et det samlet dictonary
print(json.dumps(all_customers))
    csv['spec']['displayName'] = operator_name
    csv['spec']['description'] = "SRE operator - " + operator_name
    csv['spec']['version'] = operator_version

    csv['spec']['install']['spec']['clusterPermissions'] = []

    SA_NAME = operator_name
    clusterrole_names_csv = []

    for subdir, dirs, files in os.walk(operator_assets_dir):
        for file in files:
            file_path = subdir + os.sep + file

            # Parse each file and look for ClusterRoleBindings to the SA
            with open(file_path) as stream:
                yaml_file = yaml.safe_load_all(stream)
                for obj in yaml_file:
                    if obj['kind'] == 'ClusterRoleBinding':
                        for subject in obj['subjects']:
                            if subject['kind'] == 'ServiceAccount' and subject['name'] == SA_NAME:
                                clusterrole_names_csv.append(obj['roleRef']['name'])

    csv['spec']['install']['spec']['deployments'] = []
    csv['spec']['install']['spec']['deployments'].append({'spec':{}})

    for subdir, dirs, files in os.walk(operator_assets_dir):
        for file in files:
            file_path = subdir + os.sep + file
            # Parse files to manage clusterPermissions and deployments in csv
            with open(file_path) as stream:
                yaml_file = yaml.safe_load_all(stream)
示例#51
0
def create_from_yaml(k8s_client,
                     yaml_file=None,
                     yaml_objects=None,
                     verbose=False,
                     namespace="default",
                     **kwargs):
    """
    Perform an action from a yaml file. Pass True for verbose to
    print confirmation information.
    Input:
    yaml_file: string. Contains the path to yaml file.
    k8s_client: an ApiClient object, initialized with the client args.
    yaml_objects: List[dict]. Optional list of YAML objects; used instead
        of reading the `yaml_file`. Default is None.
    verbose: If True, print confirmation from the create action.
        Default is False.
    namespace: string. Contains the namespace to create all
        resources inside. The namespace must preexist otherwise
        the resource creation will fail. If the API object in
        the yaml file already contains a namespace definition
        this parameter has no effect.

    Available parameters for creating <kind>:
    :param async_req bool
    :param bool include_uninitialized: If true, partially initialized
        resources are included in the response.
    :param str pretty: If 'true', then the output is pretty printed.
    :param str dry_run: When present, indicates that modifications
        should not be persisted. An invalid or unrecognized dryRun
        directive will result in an error response and no further
        processing of the request.
        Valid values are: - All: all dry run stages will be processed

    Returns:
        The created kubernetes API objects.

    Raises:
        FailToCreateError which holds list of `client.rest.ApiException`
        instances for each object that failed to create.
    """
    def create_with(objects):
        failures = []
        k8s_objects = []
        for yml_document in objects:
            if yml_document is None:
                continue
            try:
                created = create_from_dict(k8s_client,
                                           yml_document,
                                           verbose,
                                           namespace=namespace,
                                           **kwargs)
                k8s_objects.append(created)
            except FailToCreateError as failure:
                failures.extend(failure.api_exceptions)
        if failures:
            raise FailToCreateError(failures)
        return k8s_objects

    if yaml_objects:
        yml_document_all = yaml_objects
        return create_with(yml_document_all)
    elif yaml_file:
        with open(path.abspath(yaml_file)) as f:
            yml_document_all = yaml.safe_load_all(f)
            return create_with(yml_document_all)
    else:
        raise ValueError(
            'One of `yaml_file` or `yaml_objects` arguments must be provided')