Esempio n. 1
0
def create_ag_services_yaml(env,
                            namespace,
                            ag,
                            ag_service_template=AG_SERVICE_TEMPLATE,
                            filepath=AG_SERVICES_YAML_FILENAME):
    with open(ag_service_template) as f:
        ag_service_yaml = AgServiceYaml(yaml.load(f.read()))
        if env is Environment.ON_PREM:
            ag_service_yaml_list = create_ag_services(
                "NodePort", ag_service_yaml.data, namespace=namespace, ag=ag,annotations=env.service_annotations)
        elif env is Environment.AKS:
            ag_service_yaml_list = create_ag_services(
                "LoadBalancer",
                ag_service_yaml.data,
                namespace=namespace,
                ag=ag,
                annotations=env.service_annotations)
        else:
            raise ValueError("Invalid env")
        ag_services_yaml_raw = [
            ags_yaml.data for ags_yaml in ag_service_yaml_list
        ]
        with open(filepath, "w") as ag_services_yaml_file:
            yaml.dump_all(ag_services_yaml_raw, ag_services_yaml_file)
        log(LogLevel.INFO, "ag-services YAML file:", filepath)
        return ag_service_yaml_list
Esempio n. 2
0
def dump_example( yml_name ):
  """
  Example demonstrating how to load several pieces of data into a YAML file.
  """

  # Create some slush data to put in the YAML file.

  # A single basic type.
  foo = "bar"

  # A built-in aggregated type.
  mammals = {}
  mammals["cat"]     = "Frisky"
  mammals["camel"]   = "Humpy"
  mammals["dolphin"] = "Flipper"

  # A single user-defined type.
  dumb = dummy.Dummy("First dummy!")

  # An aggregation of user-defined types. (uses list comprehension)
  dum_dums = [dummy.Dummy("Dum Dum %s" % x) for x in range(0,5)]

  # Open the YAML file for writing.
  yml = open( yml_name, 'w' )

  # Use the dump_all() method to write to the YAML file. The dump_all()
  # method takes a list or generator and will write all data to the
  # YAML file.
  data = [foo, mammals, dumb, dum_dums]
  yaml.dump_all(data, yml)
  yml.close()
Esempio n. 3
0
def create_sqlservers_yaml(env,
                           namespace,
                           sqlservers,
                           ags,
                           sa_password,
                           k8s_agent_image,
                           sqlserver_template=SQLSERVER_TEMPLATE,
                           filepath=SQLSERVER_YAML_FILENAME):
    with open(sqlserver_template) as f:
        sqlserver_yaml_template = SqlServerYaml(yaml.load_all(f.read()))
        sqlserver_yaml_list = []
        for sqlserver_name in sqlservers:
            sqlserver_yaml = sqlserver_yaml_template.copy()
            sqlserver_yaml.set_name(sqlserver_name)
            sqlserver_yaml.set_namespace(namespace)
            sqlserver_yaml.set_availability_groups(ags)
            sqlserver_yaml.set_agent_image(k8s_agent_image)
            if env is Environment.ON_PREM:
                sqlserver_yaml.set_volume_claim_template_with_selector(
                    namespace)
                sqlserver_yaml.set_service_type("NodePort")
            elif env is Environment.AKS:
                sqlserver_yaml.set_volume_claim_template_with_storage_class()
                sqlserver_yaml.set_service_type("LoadBalancer")
            else:
                raise ValueError("Invalid Environment type")
            for data in sqlserver_yaml.data:
                sqlserver_yaml_list.append(data)

        with open(filepath, "w") as sqlserver_yaml_file:
            yaml.dump_all(sqlserver_yaml_list, sqlserver_yaml_file)
        log(LogLevel.INFO, "SQL Server YAML file:", filepath)
Esempio n. 4
0
    def test_unsafe(self):
        dummy = Dummy()

        with self.assertRaises(yaml.representer.RepresenterError):
            yaml.dump_all([dummy])

        with self.assertRaises(yaml.representer.RepresenterError):
            yaml.dump(dummy, Dumper=yDumper)

        # reverse monkey patch and try again
        monkey_patch_pyyaml_reverse()

        with tempfile.TemporaryFile(suffix='.yaml') as f:
            yaml.dump_all([dummy], stream=f)
            f.seek(0)  # rewind

            doc_unsafe = yaml.load(f)
            self.assertTrue(type(doc_unsafe) is Dummy)

            monkey_patch_pyyaml()
            with self.assertRaises(yaml.constructor.ConstructorError):
                f.seek(0)  # rewind
                safe_yaml_load(f)

            with self.assertRaises(yaml.constructor.ConstructorError):
                f.seek(0)  # rewind
                yaml.load(f)
Esempio n. 5
0
    def merge(self, merged_output):
        """
        # Concatenate the two input files together along with a generated
        #  audit header. Dump the result in yaml and xml formats
        """

        # Create an audit header
        a = audit_header.AuditHeader()
        a.set_fields('tabulator_aggregation',
                     'Pito Salas', 'TTV Tabulator TAB02', 
                     'TTV Tabulator 0.1 JUL-1-2008', self.new_prov)

        # Dump merge into a file in yaml format
        with open(''.join([merged_output,'.yml']), 'w') as stream:
            stream.write(a.serialize_yaml())
            yaml.dump_all(self.b1, stream)
            stream.write('---\n')
            yaml.dump_all(self.b2, stream)

        # Dump merge into a file in xml format        
        with open(''.join([merged_output,'.xml']), 'w') as stream:
            stream.write(a.serialize_xml())
            for file in (self.b1, self.b2):
                for record in file:
                    stream.writelines(xml_serialize(record, 0))
Esempio n. 6
0
        def rows(self):
            opt = self.options
            with _open_archive(opt, mode = 'w') as _archive:
                _archive.delete(opt['dataset'] + '\\datadict.txt')
                _archive.delete(opt['dataset'] + '\\data.txt')
                _archive.delete(opt['dataset'] + '\\info.txt')

                with _write_utf8(_archive, opt['dataset'], 'datadict.txt') as out:
                    yaml.dump_all(opt['_variables'].filter(), out, default_flow_style = False, encoding = None)

                datadict_hash = out.digest.hexdigest()
                try:
                    with _write_utf8(_archive, opt['dataset'], 'data.txt') as out:
                        out.write('\t'.join(v.name for v in opt['_variables'].filter()) + '\n')
                        
                        while True:
                            row = (yield)
                            out.write('\t'.join(save_var(v, value) for v, value in opt['_variables'].pair_filter(row)) + '\n')                   
                except (GeneratorExit, StopIteration):
                    pass

                data_hash = out.digest.hexdigest()
                with _write_utf8(_archive, opt['dataset'], 'info.txt') as out:
                    yaml.dump({
                        'cases' : self.saved_rows,
                        'api_version' : API_VERSION,
                        'file_version' : opt['version'],
                        'data_hash' : data_hash,
                        'dict_hash' : datadict_hash,
                    }, out, encoding = None, default_flow_style = False)
Esempio n. 7
0
def export(options):
    log_entries = []

    cn = MySQLdb.connect(host=options.host, user=options.username, 
            passwd=options.password, db=options.db, use_unicode=True)

    cur = cn.cursor()
    cur.execute('SELECT id, post_title, post_date, guid FROM wp_posts '
            'INNER JOIN wp_term_relationships ON '
            'wp_term_relationships.object_id = wp_posts.id '
            'WHERE post_status = %s AND term_taxonomy_id = %s '
            'ORDER BY post_date ASC', ('publish', 14))
    for row in cur.fetchall():
        id, title, date, guid = row
        entry = {'Title': title, 'Date': date, 'GUID': guid}
        subcur = cn.cursor()
        subcur.execute('SELECT meta_key, meta_value FROM wp_postmeta '
                'WHERE post_id = %s', (id,))
        for key, value in subcur.fetchall():
            if key == '_readinglog_url': entry['URL'] = value
            elif key == '_readinglog_author': entry['Author'] = value
            elif key == '_readinglog_rating': entry['Rating'] = float(value)
        log_entries.append(entry)

    if options.output is not None:
        f = open(options.output, 'w')
    else:
        f = sys.stdout
    
    yaml.add_representer(unicode, lambda dumper, value: 
            dumper.represent_scalar(u'tag:yaml.org,2002:str', value))
    yaml.dump_all(log_entries, f, default_flow_style=False, allow_unicode=True)
Esempio n. 8
0
    def dump(self, path):
        # create path as directory
        import os
        os.makedirs(path, exist_ok=True)
        for subdir in [self.move_dir, self.predicate_dir]:
            os.makedirs('{!s}/{!s}'.format(path, subdir), exist_ok=True)
        # gather predicates and moves and set in directories
        predicates = []
        moves      = []
        algorithms = []
        for entity in self.entities:
            name = entity.__class__.__name__
            if   name ==      Move.__name__:      moves.append(entity)
            elif name == Predicate.__name__: predicates.append(entity)
            elif name == Algorithm.__name__: algorithms.append(entity)
            else: raise Exception('Encountered an invalid object: {!r}'.format(name))

        for p in predicates:
            with open('/'.join([path, self.predicate_dir, p.filename]), 'w') as f:
                f.writelines(p._definition)

        for p in moves:
            with open('/'.join([path, self.move_dir, p.filename]), 'w') as f:
                f.writelines(p._definition)

        yaml.dump_all(self.sorted(),
                      open('{}/{}'.format(path, self.description_document), 'w'),
                      explicit_start=True)
Esempio n. 9
0
def format_info(value, format, cols_width=None, dumper=None):
    if format in(INFO_FORMAT.DICT, INFO_FORMAT.JSON, INFO_FORMAT.YAML):
        value['component_details'] = json_loads(value['component_details'])

    if format == INFO_FORMAT.JSON:
        return json_dumps(value)

    elif format == INFO_FORMAT.YAML:
        buff = StringIO()
        yaml.dump_all([value], default_flow_style=False, indent=4, Dumper=dumper, stream=buff)
        value = buff.getvalue()
        buff.close()

        return value

    elif format == INFO_FORMAT.TEXT:
        cols_width = (elem.strip() for elem in cols_width.split(','))
        cols_width = [int(elem) for elem in cols_width]

        table = Texttable()
        table.set_cols_width(cols_width)

        # Use text ('t') instead of auto so that boolean values don't get converted into ints
        table.set_cols_dtype(['t', 't'])

        rows = [['Key', 'Value']]
        rows.extend(sorted(value.items()))

        table.add_rows(rows)

        return table.draw()

    else:
        return value
Esempio n. 10
0
 def save(self, filename, silent=True):
     """Save this configuration as a YAML file. YAML files generally have 
     the ``.yaml`` or ``.yml`` extension. If the filename ends in 
     ``.dat``, the configuration will be saved as a raw dictionary literal.
     
     :param string filename: The filename on which to save the configuration.
     :param bool silent: Unused.
     
     """
     if hasattr(filename,'read') and hasattr(filename,'readlines'):
         filename.write("# %s: <stream>" % self.name)
         yaml.dump_all(self._save_yaml_callback() + [self.store],
              filename, default_flow_style=False, encoding='utf-8', Dumper=PyshellDumper)
     else:
         with open(filename, "w") as stream:
             stream.write("# %s: %s\n" % (self.name, filename))
             if re.search(r"(\.yaml|\.yml)$", filename):
                 yaml.dump_all(
                     self._save_yaml_callback() + [self.store], stream, 
                     default_flow_style=False, encoding='utf-8', Dumper=PyshellDumper)
             elif re.search(r"\.dat$", filename):
                 for document in self._save_yaml_callback():
                     stream.write(str(document))
                     stream.write("\n---\n")
                 stream.write(str(self.store))
             elif not silent:
                 raise ValueError("Filename Error, not "
                     "(.dat,.yaml,.yml): %s" % filename)
         self._filename = filename
    def write(self, correlation_iterable, stream):
        """
        :type correlation_iterable: Iterable
"""
        yaml.add_representer(Correlation, CorrelationSerializerYaml._correlation_yaml_representer)

        yaml.dump_all(correlation_iterable, stream = stream, default_flow_style = False)
Esempio n. 12
0
def main(datasets):
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)

    for dataset in datasets:
        path = Path(dataset)

        if path.is_dir():
            paths = list(path.glob('*.xml'))
        elif path.suffix != '.xml':
            raise RuntimeError('want xml')
        else:
            paths = [path]

        documents = []
        for path in paths:
            logging.info("Processing %s...", path)
            try:
                documents += prepare_dataset(path)
            except Exception as e:
                logging.info("Failed: %s", e)

        if documents:
            yaml_path = str(path.parent.joinpath('agdc-metadata.yaml'))
            logging.info("Writing %s dataset(s) into %s", len(documents), yaml_path)
            with open(yaml_path, 'w') as stream:
                yaml.dump_all(documents, stream)
        else:
            logging.info("No datasets discovered. Bye!")
Esempio n. 13
0
 def save_to_file(self, file_path):
     """
         Saves the replay data to a YAML file.
     """
     
     with open(file_path, "w") as f:
         yaml.dump_all(self.entries, f, explicit_start=True)
Esempio n. 14
0
def create_season_config(config, db, output_file):
	info("Checking for new shows")
	shows = _get_primary_source_shows(config)
	
	debug("Outputting new shows")
	with open(output_file, "w", encoding="utf-8") as f:
		yaml.dump_all(shows, f, explicit_start=True, default_flow_style=False)
Esempio n. 15
0
def dump_tree(tree, fd, ctx):
    """
    Dump a tree of objects, possibly containing custom types, to YAML.

    Parameters
    ----------
    tree : object
        Tree of objects, possibly containing custom data types.

    fd : pyasdf.generic_io.GenericFile
        A file object to dump the serialized YAML to.

    ctx : Context
        The writing context.
    """
    if not isinstance(ctx, Context):
        ctx = Context(ctx)

    class AsdfDumperTmp(AsdfDumper):
        pass
    AsdfDumperTmp.ctx = ctx

    tag = tree.yaml_tag
    tag = tag[:tag.index('/core/asdf') + 1]
    tree = custom_tree_to_tagged_tree(tree, ctx)
    validate_tagged_tree(tree, ctx)

    yaml.dump_all(
        [tree], stream=fd, Dumper=AsdfDumperTmp,
        explicit_start=True, explicit_end=True,
        version=ctx.versionspec.yaml_version,
        allow_unicode=True,
        encoding='utf-8',
        tags={'!': tag})
Esempio n. 16
0
def save(config_list, file_name):
    res = [cfg._cfg for cfg in config_list]

    with file(file_name, 'w') as f:
        if len(res) > 1:
            yaml.dump_all(res, f)
        else:
            yaml.dump(res, f)
def main(num_chars, num_equipment, num_traits, arcanum):
    chars = []

    arcanum = {"yes":True, "no":False, "default":None}[arcanum]

    for _ in range(num_chars):
        chars.append( Character(num_traits=num_traits, num_equipment=num_equipment, force_arcanum=arcanum) )

    print yaml.dump_all(chars, default_flow_style=False)
def main(num_arcana, kind, distribution):
    arcana = []
    for _ in range(num_arcana):
        _kind = kind
        if _kind == "random":
            _kind = distribution.pick()
        arcana.append( Arcanum(_kind) )

    print yaml.dump_all(arcana, default_flow_style=False)
Esempio n. 19
0
 def dump(self, outfile):
   ret = []
   for index in sorted(self.results.keys()):
     data = {}
     data['index'] = index + 1
     data['result'] = self.results[index]
     ret.append(data)
   with open(outfile, 'w') as out:
     yaml.dump_all(ret, stream=out, allow_unicode = True, default_flow_style=False)
Esempio n. 20
0
def dumps(obj):
    """Dump a python object -> blob and apply our pretty styling."""
    buff = six.BytesIO()
    yaml.dump_all([obj], buff,
                  explicit_start=True, indent=2,
                  default_flow_style=False,
                  line_break="\n", Dumper=PrettySafeDumper,
                  allow_unicode=True)
    return buff.getvalue()
Esempio n. 21
0
def brozzler_list_sites(argv=None):
    argv = argv or sys.argv
    arg_parser = argparse.ArgumentParser(
            prog=os.path.basename(argv[0]),
            formatter_class=BetterArgumentDefaultsHelpFormatter)
    arg_parser.add_argument(
            '--yaml', dest='yaml', action='store_true', help=(
                'yaml output (default is json)'))
    group = arg_parser.add_mutually_exclusive_group(required=True)
    group.add_argument(
            '--active', dest='active', action='store_true', help=(
                'list all active sites'))
    group.add_argument(
            '--job', dest='job', metavar='JOB_ID', help=(
                'list sites for a particular job'))
    group.add_argument(
            '--jobless', dest='jobless', action='store_true', help=(
                'list all jobless sites'))
    group.add_argument(
            '--site', dest='site', metavar='SITE_ID', help=(
                'list only the specified site'))
    group.add_argument(
            '--all', dest='all', action='store_true', help=(
                'list all sites'))
    add_rethinkdb_options(arg_parser)
    add_common_options(arg_parser, argv)

    args = arg_parser.parse_args(args=argv[1:])
    configure_logging(args)

    rr = rethinker(args)

    reql = rr.table('sites')
    if args.job:
        try:
            job_id = int(args.job)
        except ValueError:
            job_id = args.job
        reql = reql.get_all(job_id, index='job_id')
    elif args.jobless:
        reql = reql.filter(~r.row.has_fields('job_id'))
    elif args.active:
        reql = reql.between(
                ['ACTIVE', r.minval], ['ACTIVE', r.maxval],
                index='sites_last_disclaimed')
    elif args.site:
        reql = reql.get_all(args.site)
    logging.debug('querying rethinkdb: %s', reql)
    results = reql.run()
    if args.yaml:
        yaml.dump_all(
                results, stream=sys.stdout, explicit_start=True,
                default_flow_style=False)
    else:
        for result in results:
            print(json.dumps(result, cls=Jsonner, indent=2))
Esempio n. 22
0
File: jot.py Progetto: ttaylordev/z
def dump(terms, dump_terms):
    """Create a YAML dump of bookmarks matching the given search terms."""
    mset = search_terms(terms)
    ds = []
    for m in mset:
        d = standardize(doc2dict(m.document))
        if dump_terms:
            d['terms'] = [t.term for t in m.document]
        ds.append(d)
    yaml.dump_all(ds, stream=sys.stdout, Dumper=OrderedDumper)
Esempio n. 23
0
File: lint.py Progetto: sg-/mbed-os
def orphans_cmd():
    """Find and print all orphan targets"""
    orphans = Target.get_json_target_data().keys()
    for tgt in TARGET_MAP.values():
        for name in tgt.resolution_order_names:
            if name in orphans:
                orphans.remove(name)
    if orphans:
        print dump_all([orphans], default_flow_style=False)
    return len(orphans)
Esempio n. 24
0
 def dump(self, ps, fmode='a'):
     """
     Serialize particle objects into a YAML stream.
     """
     data = Stream.to_dumper(ps)
     with open(self.fname, fmode) as fobj:
         yaml.dump_all(data,
                       stream=fobj,
                       default_flow_style=False,
                       explicit_start=True)
Esempio n. 25
0
def safe_file(dictionary, stream_file):
    '''Saving the file using yaml.dump_all'''
    yaml.dump_all([dictionary], 
		  stream=stream_file, 
		  default_flow_style=False, 
		  width=2048, 
		  line_break=False, 
		  allow_unicode=True, 
		  explicit_start=True)
    stream_file.close()
Esempio n. 26
0
def save_releases():
    global RELEASES_FILENAME, RELEASES
    f = open(RELEASES_FILENAME, "w")
    yaml.dump_all(RELEASES,
                  f,
                  default_flow_style=False,
                  width=80,
                  default_style=False)
    f.close()
    print "Releases saved."
Esempio n. 27
0
 def inspect(self, domain, name='*', path='', metadata=False, format='json', **kv):
     files = self.find(domain, name, path, metadata or (format == 'list'))
     if format == 'list':
         for md in files:
             print md['relpath']
     elif format == 'json':
         json.dump(list(files), sys.stdout, cls=SOLJsonEncoder)
     elif format == 'yaml':
         yaml.dump_all(list(files), sys.stdout, default_flow_style=False, Dumper=SOLYamlEncoder)
     else:
         raise ValueError('Unknown output format: "%s"' % format)
Esempio n. 28
0
def save_users():
    global USERS_FILENAME, USERS
    f = open(USERS_FILENAME, "w")
    yaml.dump_all(USERS,
                  f,
                  explicit_start=True,
                  default_flow_style=False,
                  width=80,
                  default_style=False)
    f.close()
    print "Users saved."
Esempio n. 29
0
def save_issues():
    global ISSUES_FILENAME, ISSUES
    f = open(ISSUES_FILENAME, "w")
    yaml.dump_all(ISSUES,
                  f,
                  explicit_start=True,
                  default_flow_style=False,
                  width=80,
                  default_style=False)
    f.close()
    print "Issues saved."
Esempio n. 30
0
def create_operator_yaml(namespace,
                         k8s_agent_image,
                         operator_template=OPERATOR_TEMPLATE,
                         filepath=OPERATOR_YAML_FILENAME):
    with open(operator_template) as f:
        operator_yaml = OperatorYaml(yaml.load_all(f.read()))
        operator_yaml.set_namespace(namespace)
        operator_yaml.set_agent_image(k8s_agent_image)
        with open(filepath, "w") as operator_yaml_file:
            yaml.dump_all(operator_yaml.data, operator_yaml_file)
        log(LogLevel.INFO, "operator YAML file:", filepath)
Esempio n. 31
0
    print()
    print('$' * 40)
    print('$$ Values')
    print('$' * 40)
    for vfline in difflib.unified_diff(
            helmreq.allowedValuesRaw().splitlines(keepends=True),
            helmreqnew.allowedValuesRaw().splitlines(keepends=True),
            fromfile='before',
            tofile='after'):
        print(vfline, end='')

    print()
    print('$' * 40)
    print('$$ Templates')
    print('$' * 40)
    helmres = helmreq.generate()
    dump = yaml.dump_all(helmres.data, Dumper=yaml.Dumper, sort_keys=False)
    helmresnew = helmreqnew.generate()
    dumpnew = yaml.dump_all(helmresnew.data,
                            Dumper=yaml.Dumper,
                            sort_keys=False)
    for vfline in difflib.unified_diff(dump.splitlines(keepends=True),
                                       dumpnew.splitlines(keepends=True),
                                       fromfile='before',
                                       tofile='after'):
        print(vfline, end='')

else:
    print('Same version')
Esempio n. 32
0
def main(args=None, input_format="yaml", program_name="yq"):
    parser = get_parser(program_name)
    args, jq_args = parser.parse_known_args(args=args)
    for arg in jq_arg_spec:
        values = getattr(args, arg, None)
        if values is not None:
            for value_group in values:
                jq_args.append(arg)
                jq_args.extend(value_group)
    if getattr(args, "--from-file") or getattr(args, "-f"):
        args.files.insert(0, argparse.FileType()(args.jq_filter))
    else:
        jq_args.append(args.jq_filter)

    if sys.stdin.isatty() and not args.files:
        return parser.print_help()

    converting_output = args.yaml_output or args.xml_output or args.toml_output

    try:
        # Note: universal_newlines is just a way to induce subprocess to make stdin a text buffer and encode it for us
        jq = subprocess.Popen(
            ["jq"] + jq_args,
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE if converting_output else None,
            universal_newlines=True)
    except OSError as e:
        msg = "{}: Error starting jq: {}: {}. Is jq installed and available on PATH?"
        parser.exit(msg.format(program_name, type(e).__name__, e))

    try:
        input_streams = args.files if args.files else [sys.stdin]

        if converting_output:
            # TODO: enable true streaming in this branch (with asyncio, asyncproc, a multi-shot variant of
            # subprocess.Popen._communicate, etc.)
            # See https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
            input_docs = []
            for input_stream in input_streams:
                if input_format == "yaml":
                    input_docs.extend(
                        yaml.load_all(input_stream, Loader=OrderedLoader))
                elif input_format == "xml":
                    import xmltodict
                    input_docs.append(
                        xmltodict.parse(input_stream.read(),
                                        disable_entities=True))
                elif input_format == "toml":
                    import toml
                    input_docs.append(toml.load(input_stream))
                else:
                    raise Exception("Unknown input format")
            input_payload = "\n".join(
                json.dumps(doc, cls=JSONDateTimeEncoder) for doc in input_docs)
            jq_out, jq_err = jq.communicate(input_payload)
            json_decoder = json.JSONDecoder(object_pairs_hook=OrderedDict)
            if args.yaml_output:
                yaml.dump_all(decode_docs(jq_out, json_decoder),
                              stream=sys.stdout,
                              Dumper=OrderedDumper,
                              width=args.width,
                              allow_unicode=True,
                              default_flow_style=False)
            elif args.xml_output:
                import xmltodict
                for doc in decode_docs(jq_out, json_decoder):
                    if args.xml_root:
                        doc = {args.xml_root: doc}
                    elif not isinstance(doc, OrderedDict):
                        msg = (
                            "{}: Error converting JSON to XML: cannot represent non-object types at top level. "
                            "Use --xml-root=name to envelope your output with a root element."
                        )
                        parser.exit(msg.format(program_name))
                    full_document = True if args.xml_dtd else False
                    try:
                        xmltodict.unparse(doc,
                                          output=sys.stdout,
                                          full_document=full_document,
                                          pretty=True,
                                          indent="  ")
                    except ValueError as e:
                        if "Document must have exactly one root" in str(e):
                            raise Exception(
                                str(e) +
                                " Use --xml-root=name to envelope your output with a root element"
                            )
                        else:
                            raise
                    sys.stdout.write(b"\n" if sys.version_info < (3,
                                                                  0) else "\n")
            elif args.toml_output:
                import toml
                for doc in decode_docs(jq_out, json_decoder):
                    if not isinstance(doc, OrderedDict):
                        msg = "{}: Error converting JSON to TOML: cannot represent non-object types at top level."
                        parser.exit(msg.format(program_name))

                    if USING_PYTHON2:
                        # For Python 2, dump the string and encode it into bytes.
                        output = toml.dumps(doc)
                        sys.stdout.write(output.encode("utf-8"))
                    else:
                        # For Python 3, write the unicode to the buffer directly.
                        toml.dump(doc, sys.stdout)
        else:
            if input_format == "yaml":
                for input_stream in input_streams:
                    for doc in yaml.load_all(input_stream,
                                             Loader=OrderedLoader):
                        json.dump(doc, jq.stdin, cls=JSONDateTimeEncoder)
                        jq.stdin.write("\n")
            elif input_format == "xml":
                import xmltodict
                for input_stream in input_streams:
                    json.dump(
                        xmltodict.parse(input_stream.read(),
                                        disable_entities=True), jq.stdin)
                    jq.stdin.write("\n")
            elif input_format == "toml":
                import toml
                for input_stream in input_streams:
                    json.dump(toml.load(input_stream), jq.stdin)
                    jq.stdin.write("\n")
            else:
                raise Exception("Unknown input format")

            jq.stdin.close()
            jq.wait()
        for input_stream in input_streams:
            input_stream.close()
        exit(jq.returncode)
    except Exception as e:
        parser.exit("{}: Error running jq: {}: {}.".format(
            program_name,
            type(e).__name__, e))
Esempio n. 33
0
                          flags=(cv2.CALIB_FIX_ASPECT_RATIO +
                                 cv2.CALIB_FIX_INTRINSIC),
                          criteria=(cv2.TERM_CRITERIA_COUNT +
                                    cv2.TERM_CRITERIA_EPS, 300, 1e-6))

print("calibrated with RMS error =", rms)
print("\nCamera matrices (L and R respectively):\n", cameraMatrixL,
      "\n \n", cameraMatrixR)
print("\nDistortion coefficients:\n", distCoeffsL.ravel(), "\n \n",
      distCoeffsR.ravel(), "\n")
print("Rotational matrix between camera 1 and camera 2 \n", R, "\n")
print("Translational matrix between camera 1 and camera 2 \n", T, "\n")

print("Writing intrinsics to /data/intrinsics.yml... ", end='')
with open("../data/instrinsics.yml", 'w') as intrStream:
    yaml.dump_all(["CM1", cameraMatrixL, "D1", distCoeffsL, "CM2",
                   cameraMatrixR, "D2", distCoeffsR], intrStream)
print("intrinsics written")


print("Writing extrinsics to /data/extrinsics.yml... ", end='')
RL, RR, PL, PR, Q, \
    roiL, roiR = cv2.stereoRectify(cameraMatrix1=cameraMatrixL,
                                   distCoeffs1=distCoeffsL,
                                   cameraMatrix2=cameraMatrixR,
                                   distCoeffs2=distCoeffsR,
                                   imageSize=stereo_imgsize,
                                   R=R, T=T, R1=None, R2=None,
                                   P1=None, P2=None, Q=None,
                                   alpha=0,
                                   newImageSize=stereo_imgsize)
with open("../data/extrinsics.yml", 'w') as extrStream:
Esempio n. 34
0
    def save(self, *args):
        jobs = args[0]
        print('Saving updated list to %r' % self.filename)

        with open(self.filename, 'w') as fp:
            yaml.dump_all([job.serialize() for job in jobs], fp, default_flow_style=False)
Esempio n. 35
0
def _get_data_as_collection(data):
    collection = []
    for repo, documents in data.items():
        for document in documents:
            collection.append(document)
    return yaml.dump_all(collection, Dumper=yaml.SafeDumper)
Esempio n. 36
0
                        "minus": float(minus)
                    }
                }]
            })

    # dump YAML documents for data tables
    print("Path={}".format(object.path))
    data_document = {
        "independent_variables": independent_variables,
        "dependent_variables": dependent_variables
    }
    with open("{}.yaml".format(object.name), "w") as data_stream:
        yaml.dump(data_document, data_stream)
    print("Written {} object to {}.yaml\n".format(object.type, object.name))

    # dump YAML document for single YAML file
    from copy import copy
    single_yaml_document = copy(submission[-1])
    del single_yaml_document["data_file"]
    single_yaml_document.update(data_document)
    single_yaml.append(single_yaml_document)

# dump YAML documents for metadata
with open("submission.yaml", "w") as submission_stream:
    yaml.dump_all(submission, submission_stream)
print("Written metadata to submission.yaml")

# dump YAML documents for single YAML file
with open("1512299.yaml", "w") as single_yaml_stream:
    yaml.dump_all(single_yaml, single_yaml_stream)
print("Written single-YAML-file format to 1512299.yaml")
Esempio n. 37
0
def main():
    argparser = ArgumentParser(
        description="Assign and verfify UUIDs of Sigma rules")
    argparser.add_argument(
        "--verify",
        "-V",
        action="store_true",
        help=
        "Verify existence and uniqueness of UUID assignments. Exits with error code if verification fails."
    )
    argparser.add_argument("--verbose",
                           "-v",
                           action="store_true",
                           help="Be verbose.")
    argparser.add_argument("--recursive",
                           "-r",
                           action="store_true",
                           help="Recurse into directories.")
    argparser.add_argument(
        "--error",
        "-e",
        action="store_true",
        help="Exit with error code 10 on verification failures.")
    argparser.add_argument("inputs",
                           nargs="+",
                           help="Sigma rule files or repository directories")
    args = argparser.parse_args()

    if args.verbose:
        print_verbose()

    if args.recursive:
        paths = [
            p for pathname in args.inputs for p in Path(pathname).glob("**/*")
            if p.is_file()
        ]
    else:
        paths = [Path(pathname) for pathname in args.inputs]

    yaml.add_representer(dict, yaml_preserve_order)

    uuids = set()
    passed = True
    for path in paths:
        print_verbose("Rule {}".format(str(path)))
        with path.open("r") as f:
            rules = list(yaml.safe_load_all(f))

        if args.verify:
            i = 1
            for rule in rules:
                if "title" in rule:  # Rule with a title should also have a UUID
                    try:
                        UUID(rule["id"])
                    except ValueError:  # id is not a valid UUID
                        print("Rule {} in file {} has a malformed UUID '{}'.".
                              format(i, str(path), rule["id"]))
                        passed = False
                    except KeyError:  # rule has no id
                        print("Rule {} in file {} has no UUID.".format(
                            i, str(path)))
                        passed = False
                i += 1
        else:
            newrules = list()
            changed = False
            i = 1
            for rule in rules:
                if "title" in rule and "id" not in rule:  # only assign id to rules that have a title and no id
                    newrule = dict()
                    changed = True
                    for k, v in rule.items():
                        newrule[k] = v
                        if k == "title":  # insert id after title
                            uuid = uuid4()
                            newrule["id"] = str(uuid)
                            print("Assigned UUID '{}' to rule {} in file {}.".
                                  format(uuid, i, str(path)))
                    newrules.append(newrule)
                else:
                    newrules.append(rule)
                i += 1

            if changed:
                with path.open("w") as f:
                    yaml.dump_all(newrules,
                                  f,
                                  Dumper=SigmaYAMLDumper,
                                  indent=4,
                                  width=160,
                                  default_flow_style=False)

    if not passed:
        print("The Sigma rules listed above don't have an ID. The ID must be:")
        print("* Contained in the 'id' attribute")
        print("* a valid UUIDv4 (randomly generated)")
        print("* Unique in this repository")
        print(
            "Please generate one with the sigma_uuid tool or here: https://www.uuidgenerator.net/version4"
        )
        if args.error:
            exit(10)
Esempio n. 38
0
def yaml_dump_all(*args, **kwargs):
    kwargs.setdefault('Dumper', NoAliasDumper)
    return yaml.dump_all(*args, **kwargs)
Esempio n. 39
0
def write_cache(cache):
    with open(CACHE_DB, "w") as buff:
        yaml.dump_all(cache, stream=buff, explicit_start=True)
Esempio n. 40
0
 def write_yaml_all(path, yaml_data: list):
     with open(path, mode='w', encoding='utf-8') as f:
         yaml.dump_all(yaml_data, f)
Esempio n. 41
0
def dump_all(docs, stream):
    doc2 = docs
    conv_docs = []
    for doc in doc2:
        conv_docs.append(objdict_to_dict(doc))
    yaml.dump_all(conv_docs, stream, default_flow_style=False)
Esempio n. 42
0
def main() -> None:
    language_choices = sorted(scenario_config.LANGUAGES.keys())
    argp = argparse.ArgumentParser(description='Generates load test configs.')
    argp.add_argument('-l',
                      '--language',
                      choices=language_choices,
                      required=True,
                      help='Language to benchmark.')
    argp.add_argument('-t',
                      '--template',
                      type=str,
                      required=True,
                      help='LoadTest configuration yaml file template.')
    argp.add_argument('-s',
                      '--substitutions',
                      action='extend',
                      nargs='+',
                      default=[],
                      type=str,
                      help='Template substitutions, in the form key=value.')
    argp.add_argument('-p',
                      '--prefix',
                      default='',
                      type=str,
                      help='Test name prefix.')
    argp.add_argument('-u',
                      '--uniquifiers',
                      action='extend',
                      nargs='+',
                      default=[],
                      type=str,
                      help='One or more strings to make the test name unique.')
    argp.add_argument(
        '-d',
        nargs='?',
        const=True,
        default=False,
        type=bool,
        help='Use creation date and time as an addditional uniquifier.')
    argp.add_argument('-a',
                      '--annotations',
                      action='extend',
                      nargs='+',
                      default=[],
                      type=str,
                      help='Test annotations, in the form key=value.')
    argp.add_argument('-r',
                      '--regex',
                      default='.*',
                      type=str,
                      help='Regex to select scenarios to run.')
    argp.add_argument(
        '--category',
        choices=['all', 'inproc', 'scalable', 'smoketest', 'sweep'],
        default='all',
        help='Select a category of tests to run.')
    argp.add_argument(
        '--client_language',
        choices=language_choices,
        help='Select only scenarios with a specified client language.')
    argp.add_argument(
        '--server_language',
        choices=language_choices,
        help='Select only scenarios with a specified server language.')
    argp.add_argument('--runs_per_test',
                      default=1,
                      type=int,
                      help='Number of copies to generate for each test.')
    argp.add_argument('-o',
                      '--output',
                      type=str,
                      help='Output file name. Output to stdout if not set.')
    args = argp.parse_args()

    substitutions = parse_key_value_args(args.substitutions)

    with open(args.template) as f:
        base_config = yaml.safe_load(
            string.Template(f.read()).substitute(substitutions))

    scenario_filter = scenario_config_exporter.scenario_filter(
        scenario_name_regex=args.regex,
        category=args.category,
        client_language=args.client_language,
        server_language=args.server_language)

    scenarios = scenario_config_exporter.gen_scenarios(args.language,
                                                       scenario_filter)

    uniquifiers = args.uniquifiers
    if args.d:
        uniquifiers.append(now_string())

    annotations = parse_key_value_args(args.annotations)

    configs = gen_loadtest_configs(base_config,
                                   scenarios,
                                   loadtest_name_prefix=args.prefix,
                                   uniquifiers=uniquifiers,
                                   annotations=annotations,
                                   runs_per_test=args.runs_per_test)

    configure_yaml()

    with open(args.output, 'w') if args.output else sys.stdout as f:
        yaml.dump_all(configs, stream=f)
configs = list(yaml.load_all(urllib2.urlopen(args.config_yaml)))

#update size in place
for config in configs:
    if config['kind'] == 'StatefulSet':
        if args.size_in_gb:
            config['spec']['volumeClaimTemplates'][0]['spec']['resources'][
                'requests']['storage'] = str(args.size_in_gb) + 'Gi'

        if args.replicas:
            config['spec']['replicas'] = args.replicas

        for container in config['spec']['template']['spec']['containers']:
            if container['name'] == 'cockroachdb':
                container['command'] = [
                    "/bin/bash", "-ecx",
                    "exec /cockroach/cockroach start --logtostderr --insecure --host $(hostname -f) --locality="
                    + args.locality + " --http-host 0.0.0.0 --attrs=" +
                    args.attrs +
                    " --join cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb --cache 25% --max-sql-memory 25%"
                ]
                break

        if args.version_tag:
            for container in config['spec']['template']['spec']['containers']:
                if container['name'] == 'cockroachdb':
                    container['image'] = args.version_tag
                    break

sys.stdout.write(yaml.dump_all(configs))
Esempio n. 44
0
def safe_dump(data, **kwargs):
    kwargs["default_flow_style"] = False
    return yaml.dump_all([data], None, Dumper=PrettyPrinterDumper, **kwargs)
Esempio n. 45
0
 def save_features(self, training_data, filename):
      
     stream = open(filename,'w')
     yaml.dump_all(training_data, stream)
     print "Saving features to %s" % os.path.abspath(filename)        
     stream.close()
Esempio n. 46
0
    detectionsGeo = []
    lookupGeo = []

    rospy.init_node('listener', anonymous=True)
    #transform = tf2_ros.Buffer.lookup_transform('turtle1', 'world', rospy.Time.now()) #tf2_ros.Buffer.lookupTransform('odom', 'base_footprint')
    topicSubscriber()

    rospy.spin()
    filename = "latest_output_file.yaml"
    filenameGeo = "latest_output_fileGeo.yaml"
    filepath = rospkg.RosPack().get_path(
        'hk_ros_2021') + '/exported_detection_logs/'

    with open(filepath + filename, 'w') as outfile:
        yaml.dump_all(detections, outfile, explicit_start=True)
        outfile.close()

    with open(filepath + filenameGeo, 'w') as outfile:
        yaml.dump_all(detectionsGeo, outfile, explicit_start=True)

#transforms:
#  -
#    header:
#      seq: 0
#      stamp:
#        secs: 1607481189
#        nsecs: 209141609
#      frame_id: "camera"
#    child_frame_id: "tag_9"
#    transform:
Esempio n. 47
0
    def test_scaling(self):
        with tempfile.NamedTemporaryFile("w+") as example_cluster_file, \
                tempfile.NamedTemporaryFile("w+") as operator_file:

            example_cluster_config_path = get_operator_config_path(
                "example_cluster.yaml")
            operator_config_path = get_operator_config_path("operator.yaml")

            operator_config = list(
                yaml.safe_load_all(open(operator_config_path).read()))
            example_cluster_config = yaml.safe_load(
                open(example_cluster_config_path).read())

            # Set image and pull policy
            podTypes = example_cluster_config["spec"]["podTypes"]
            pod_specs = [operator_config[-1]["spec"]] + [
                podType["podConfig"]["spec"] for podType in podTypes
            ]
            for pod_spec in pod_specs:
                pod_spec["containers"][0]["image"] = IMAGE
                pod_spec["containers"][0]["imagePullPolicy"] = PULL_POLICY

            # Config set-up for this test.
            example_cluster_config["spec"]["maxWorkers"] = 100
            example_cluster_config["spec"]["idleTimeoutMinutes"] = 1
            worker_type = podTypes[1]
            # Make sure we have the right type
            assert "worker" in worker_type["name"]
            worker_type["maxWorkers"] = 100
            # Key for the first part of this test:
            worker_type["minWorkers"] = 30

            yaml.dump(example_cluster_config, example_cluster_file)
            yaml.dump_all(operator_config, operator_file)

            files = [example_cluster_file, operator_file]
            for file in files:
                file.flush()

            # Start operator and a 30-pod-cluster.
            print(">>>Starting operator and a cluster.")
            for file in files:
                cmd = f"kubectl -n {NAMESPACE} apply -f {file.name}"
                subprocess.check_call(cmd, shell=True)

            # Check that autoscaling respects minWorkers by waiting for
            # 32 pods in the namespace.
            print(">>>Waiting for pods to join cluster.")
            wait_for_pods(32)

            # Check scale-down.
            print(">>>Decreasing min workers to 0.")
            example_cluster_edit = copy.deepcopy(example_cluster_config)
            # Set minWorkers to 0:
            example_cluster_edit["spec"]["podTypes"][1]["minWorkers"] = 0
            yaml.dump(example_cluster_edit, example_cluster_file)
            example_cluster_file.flush()
            cm = f"kubectl -n {NAMESPACE} apply -f {example_cluster_file.name}"
            subprocess.check_call(cm, shell=True)
            print(">>>Sleeping for a minute while workers time-out.")
            time.sleep(60)
            print(">>>Verifying scale-down.")
            wait_for_pods(2)

            # Test scale up and scale down after task submission.
            command = f"kubectl -n {NAMESPACE}"\
                " port-forward service/example-cluster-ray-head 10001:10001"
            command = command.split()
            print(">>>Port-forwarding head service.")
            self.proc = subprocess.Popen(command)
            try:
                # Wait a bit for the port-forwarding connection to be
                # established.
                time.sleep(10)
                # Check that job submission works
                submit_scaling_job(client_port="10001", num_tasks=15)
                # Clean up
                self.proc.kill()
            except Exception as e:
                # Clean up on failure
                self.proc.kill()
                raise (e)

            print(">>>Sleeping for a minute while workers time-out.")
            time.sleep(60)
            print(">>>Verifying scale-down.")
            wait_for_pods(2)
Esempio n. 48
0
 def save(self):
     with open('servers.yml', 'a') as file:
         yaml.dump_all(self.servers.values(), file)
Esempio n. 49
0
                    css_class=css_class, change=change.strip())
            author_htm += '\t\t\t</ul>\n'
            if len(changes_added) > 0:
                entry_htm += author_htm
        if write_entry and days_written <= MAX_DATE_ENTRIES:
            changelog.write(entry_htm)
            days_written += 1
        else:
            remove_dates.append(_date)

    with open(os.path.join(targetDir, 'templates', 'footer.html'), 'r') as h:
        for line in h:
            changelog.write(line)

for _date in remove_dates:
    del all_changelog_entries[_date]
    print('Removing {} (old/invalid)'.format(_date))

with open(changelog_cache, 'w') as f:
    cache_head = 'DO NOT EDIT THIS FILE BY HAND!  AUTOMATICALLY GENERATED BY ss13_genchangelog.py.'
    yaml.dump_all([cache_head, all_changelog_entries],
                  f,
                  default_flow_style=False)

if len(del_after):
    print('Cleaning up...')
    for fileName in del_after:
        if os.path.isfile(fileName):
            print(' Deleting {0} (delete-after set)...'.format(fileName))
            os.remove(fileName)
def start_charm():
    layer.status.maintenance('configuring container')

    config = hookenv.config()
    image_info = layer.docker_resource.get_info('jupyterhub-image')
    service_name = hookenv.service_name()

    hub_port = 8000
    api_port = 8081

    if is_flag_set('endpoint.ambassador.joined'):
        annotations = {
            'getambassador.io/config': yaml.dump_all([
                {
                    'apiVersion': 'ambassador/v0',
                    'kind':  'Mapping',
                    'name':  'tf_hub',
                    'prefix': '/hub/',
                    'rewrite': '/hub/',
                    'service': f'{service_name}:{hub_port}',
                    'use_websocket': True,
                    'timeout_ms': 30000,
                },
                {
                    'apiVersion': 'ambassador/v0',
                    'kind':  'Mapping',
                    'name':  'tf_hub_user',
                    'prefix': '/user/',
                    'rewrite': '/user/',
                    'service': f'{service_name}:{hub_port}',
                    'use_websocket': True,
                    'timeout_ms': 30000,
                },
            ]),
        }
    else:
        annotations = {}

    pip_installs = [
        'jhub-remote-user-authenticator',
        'jupyterhub-dummyauthenticator',
        'jupyterhub-kubespawner',
        'oauthenticator',
    ]

    layer.caas_base.pod_spec_set({
        'service': {'annotations': annotations},
        'containers': [
            {
                'name': 'jupyterhub',
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
                # TODO: Move to init containers to pip install when juju supports it
                'command': [
                    'bash',
                    '-c',
                    f'pip install {" ".join(pip_installs)} && jupyterhub -f /etc/config/jupyterhub_config.py',
                ],
                'ports': [
                    {
                        'name': 'hub',
                        'containerPort': hub_port,
                    },
                    {
                        'name': 'api',
                        'containerPort': api_port,
                    },
                ],
                'config': {
                    'K8S_SERVICE_NAME': service_name,
                    'AUTHENTICATOR': config['authenticator'],
                    'NOTEBOOK_STORAGE_SIZE': config['notebook-storage-size'],
                    'NOTEBOOK_STORAGE_CLASS': config['notebook-storage-class'],
                    'NOTEBOOK_IMAGE': config['notebook-image'],
                },
                'files': [
                    {
                        'name': 'configs',
                        'mountPath': '/etc/config',
                        'files': {
                            Path(filename).name: Path(filename).read_text()
                            for filename in glob('files/*')
                        },
                    },
                ],
            },
        ],
    })

    layer.status.maintenance('creating container')
    set_flag('charm.kubeflow-jupyterhub.started')
Esempio n. 51
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')
    service_name = hookenv.service_name()

    api = endpoint_from_name('pipelines-api').services()[0]
    minio = endpoint_from_name('minio').services()[0]['hosts'][0]

    port = hookenv.config('port')

    layer.caas_base.pod_spec_set({
        'version':
        2,
        'serviceAccount': {
            'rules': [
                {
                    'apiGroups': [''],
                    'resources': ['pods', 'pods/log'],
                    'verbs': ['create', 'get', 'list'],
                },
                {
                    'apiGroups': ['kubeflow.org'],
                    'resources': ['viewers'],
                    'verbs': ['create', 'get', 'list', 'watch', 'delete'],
                },
            ]
        },
        'service': {
            'annotations': {
                'getambassador.io/config':
                yaml.dump_all([{
                    'apiVersion': 'ambassador/v0',
                    'kind': 'Mapping',
                    'name': 'pipeline-ui',
                    'prefix': '/pipeline',
                    'rewrite': '/pipeline',
                    'service': f'{service_name}:{port}',
                    'use_websocket': True,
                    'timeout_ms': 30000,
                }])
            }
        },
        'containers': [{
            'name': 'pipelines-ui',
            'imageDetails': {
                'imagePath': image_info.registry_path,
                'username': image_info.username,
                'password': image_info.password,
            },
            'config': {
                'ML_PIPELINE_SERVICE_HOST': api['service_name'],
                'ML_PIPELINE_SERVICE_PORT': api['hosts'][0]['port'],
                'MINIO_HOST': minio['hostname'],
                'MINIO_PORT': minio['port'],
                'MINIO_NAMESPACE': os.environ['JUJU_MODEL_NAME'],
                'ALLOW_CUSTOM_VISUALIZATIONS': True,
            },
            'ports': [{
                'name': 'ui',
                'containerPort': port
            }],
        }],
    })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Esempio n. 52
0
        adjusted_spec_path = _adjust_path(cfg_path, spec_file)
        segment_dicts = load_spec(adjusted_spec_path)
        yield [start_line, end_line, segment_dicts]


def load_cfg(cfg_path):
    return list(preload_cfg(cfg_path))


def _get_yaml(filename):
    with r_open(filename) as file:
        return list(yaml.load_all(file))


def get_yaml(filename):
    try:
        return _get_yaml(filename)
    except FileNotFoundError:
        raise FileNotFoundError("YAML file not found: " + filename)
    except:
        raise Exception("Error parsing YAML file: " + filename)


if __name__ == "__main__":
    fn = "kep/test/temp/test_cfg.txt"
    import os
    print(os.path.exists(fn))
    obj = [["1", "2", "3"], ["5", "6", "7"]]
    print(yaml.dump_all(obj))
    z = _get_yaml(fn)
    print(z)
Esempio n. 53
0
def write_yaml(data):
    """ A function to write YAML file"""
    with open('toyaml.yml', 'a') as f:
        yaml.dump_all(data, f, default_flow_style=False)
Esempio n. 54
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')

    service_name = hookenv.service_name()

    admin_port = hookenv.config('admin-port')
    port = hookenv.config('port')

    grpc = endpoint_from_name('metadata-grpc').services()[0]
    envoy_yaml = Template(Path('files/envoy.yaml.tmpl').read_text()).render(
        port=port,
        grpc_host=grpc['service_name'],
        grpc_port=grpc['hosts'][0]['port'],
        admin_port=admin_port,
    )

    layer.caas_base.pod_spec_set(
        spec={
            'version':
            2,
            'service': {
                'annotations': {
                    'getambassador.io/config':
                    yaml.dump_all([{
                        'apiVersion': 'ambassador/v0',
                        'kind': 'Mapping',
                        'name': 'metadata-proxy',
                        'prefix': '/ml_metadata.MetadataStoreService/',
                        'rewrite': '/ml_metadata.MetadataStoreService/',
                        'service': f'{service_name}:{port}',
                        'use_websocket': True,
                        'grpc': True,
                        'timeout_ms': 30000,
                    }])
                }
            },
            'containers': [{
                'name':
                'metadata-envoy',
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
                'command': ['/usr/local/bin/envoy'],
                'args': ['-c', '/config/envoy.yaml'],
                'ports': [
                    {
                        'name': 'grpc',
                        'containerPort': port
                    },
                    {
                        'name': 'admin',
                        'containerPort': admin_port
                    },
                ],
                'files': [{
                    'name': 'config',
                    'mountPath': '/config',
                    'files': {
                        'envoy.yaml': envoy_yaml
                    },
                }],
            }],
        })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
Esempio n. 55
0
def pod_network(ctx, config):
    cluster_name = config['cluster']
    pnet = config.get('pod_network', 'calico')
    if pnet == 'flannel':
        r = ctx.kubeadm[cluster_name].bootstrap_remote.run(
            args=[
                'curl',
                'https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml',
            ],
            stdout=BytesIO(),
        )
        assert r.exitstatus == 0
        flannel = list(
            yaml.load_all(r.stdout.getvalue(), Loader=yaml.FullLoader))
        for o in flannel:
            if o.get('data', {}).get('net-conf.json'):
                log.info(f'Updating {o}')
                o['data']['net-conf.json'] = o['data'][
                    'net-conf.json'].replace(
                        '10.244.0.0/16',
                        str(ctx.kubeadm[cluster_name].pod_subnet))
                log.info(f'Now {o}')
        flannel_yaml = yaml.dump_all(flannel)
        log.debug(f'Flannel:\n{flannel_yaml}')
        _kubectl(ctx, config, ['apply', '-f', '-'], stdin=flannel_yaml)

    elif pnet == 'calico':
        _kubectl(ctx, config, [
            'apply', '-f',
            'https://docs.projectcalico.org/manifests/tigera-operator.yaml'
        ])
        cr = {
            'apiVersion': 'operator.tigera.io/v1',
            'kind': 'Installation',
            'metadata': {
                'name': 'default'
            },
            'spec': {
                'calicoNetwork': {
                    'ipPools': [{
                        'blockSize':
                        26,
                        'cidr':
                        str(ctx.kubeadm[cluster_name].pod_subnet),
                        'encapsulation':
                        'IPIPCrossSubnet',
                        'natOutgoing':
                        'Enabled',
                        'nodeSelector':
                        'all()',
                    }]
                }
            }
        }
        _kubectl(ctx, config, ['create', '-f', '-'], stdin=yaml.dump(cr))

    else:
        raise RuntimeError(f'unrecognized pod_network {pnet}')

    try:
        yield

    finally:
        if pnet == 'flannel':
            _kubectl(ctx, config, [
                'delete',
                '-f',
                'https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml',
            ])

        elif pnet == 'calico':
            _kubectl(ctx, config, ['delete', 'installation', 'default'])
            _kubectl(ctx, config, [
                'delete', '-f',
                'https://docs.projectcalico.org/manifests/tigera-operator.yaml'
            ])
Esempio n. 56
0
 def _write(file, manifest):
     with open(file, "w") as f:
         f.write(yaml.dump_all(manifest))
Esempio n. 57
0
def main(args=None, input_format="yaml"):
    args, jq_args = parser.parse_known_args(args=args)
    for arg in jq_arg_spec:
        values = getattr(args, arg, None)
        if values is not None:
            for value_group in values:
                jq_args.append(arg)
                jq_args.extend(value_group)
    if getattr(args, "--from-file") or getattr(args, "-f"):
        args.files.insert(0, argparse.FileType()(args.jq_filter))
    else:
        jq_args.append(args.jq_filter)

    if sys.stdin.isatty() and not args.files:
        return parser.print_help()

    try:
        # Note: universal_newlines is just a way to induce subprocess to make stdin a text buffer and encode it for us
        jq = subprocess.Popen(["jq"] + jq_args,
                              stdin=subprocess.PIPE,
                              stdout=subprocess.PIPE
                              if args.yaml_output or args.xml_output else None,
                              universal_newlines=True)
    except OSError as e:
        msg = "yq: Error starting jq: {}: {}. Is jq installed and available on PATH?"
        parser.exit(msg.format(type(e).__name__, e))

    try:
        input_streams = args.files if args.files else [sys.stdin]
        if args.yaml_output or args.xml_output:
            # TODO: enable true streaming in this branch (with asyncio, asyncproc, a multi-shot variant of
            # subprocess.Popen._communicate, etc.)
            # See https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
            input_docs = []
            for input_stream in input_streams:
                if input_format == "yaml":
                    input_docs.extend(
                        yaml.load_all(input_stream, Loader=OrderedLoader))
                elif input_format == "xml":
                    import xmltodict
                    input_docs.append(
                        xmltodict.parse(input_stream.read(),
                                        disable_entities=True))
            input_payload = "\n".join(
                json.dumps(doc, cls=JSONDateTimeEncoder) for doc in input_docs)
            jq_out, jq_err = jq.communicate(input_payload)
            json_decoder = json.JSONDecoder(object_pairs_hook=OrderedDict)
            if args.yaml_output:
                yaml.dump_all(decode_docs(jq_out, json_decoder),
                              stream=sys.stdout,
                              Dumper=OrderedDumper,
                              width=args.width,
                              allow_unicode=True,
                              default_flow_style=False)
            elif args.xml_output:
                import xmltodict
                for doc in decode_docs(jq_out, json_decoder):
                    if not isinstance(doc, OrderedDict):
                        parser.exit(
                            "yq: Error converting JSON to XML: cannot represent non-object types at top level"
                        )
                    xmltodict.unparse(doc,
                                      output=sys.stdout,
                                      full_document=False,
                                      pretty=True,
                                      indent="  ")
                    sys.stdout.write(b"\n" if sys.version_info < (3,
                                                                  0) else "\n")
        else:
            if input_format == "yaml":
                for input_stream in input_streams:
                    for doc in yaml.load_all(input_stream,
                                             Loader=OrderedLoader):
                        json.dump(doc, jq.stdin, cls=JSONDateTimeEncoder)
                        jq.stdin.write("\n")
            elif input_format == "xml":
                import xmltodict
                for input_stream in input_streams:
                    json.dump(
                        xmltodict.parse(input_stream.read(),
                                        disable_entities=True), jq.stdin)
                    jq.stdin.write("\n")
            jq.stdin.close()
            jq.wait()
        for input_stream in input_streams:
            input_stream.close()
        exit(jq.returncode)
    except Exception as e:
        parser.exit("yq: Error running jq: {}: {}.".format(
            type(e).__name__, e))
Esempio n. 58
0
def setup_pvs(ctx, config):
    """
    Create PVs for all scratch LVs and set up a trivial provisioner
    """
    log.info('Scanning for scratch devices')
    crs = []
    for remote in ctx.cluster.remotes.keys():
        ls = remote.read_file('/scratch_devs').decode(
            'utf-8').strip().splitlines()
        log.info(f'Scratch devices on {remote.shortname}: {ls}')
        for dev in ls:
            devname = dev.split('/')[-1].replace("_", "-")
            crs.append({
                'apiVersion': 'v1',
                'kind': 'PersistentVolume',
                'metadata': {
                    'name': f'{remote.shortname}-{devname}'
                },
                'spec': {
                    'volumeMode': 'Block',
                    'accessModes': ['ReadWriteOnce'],
                    'capacity': {
                        'storage': '100Gi'
                    },  # doesn't matter?
                    'persistentVolumeReclaimPolicy': 'Retain',
                    'storageClassName': 'scratch',
                    'local': {
                        'path': dev
                    },
                    'nodeAffinity': {
                        'required': {
                            'nodeSelectorTerms': [{
                                'matchExpressions': [{
                                    'key':
                                    'kubernetes.io/hostname',
                                    'operator':
                                    'In',
                                    'values': [remote.shortname]
                                }]
                            }]
                        }
                    }
                }
            })
            # overwriting first few MB is enough to make k8s happy
            remote.run(args=[
                'sudo', 'dd', 'if=/dev/zero', f'of={dev}', 'bs=1M', 'count=10'
            ])
    crs.append({
        'kind': 'StorageClass',
        'apiVersion': 'storage.k8s.io/v1',
        'metadata': {
            'name': 'scratch'
        },
        'provisioner': 'kubernetes.io/no-provisioner',
        'volumeBindingMode': 'WaitForFirstConsumer',
    })
    y = yaml.dump_all(crs)
    log.info('Creating PVs + StorageClass')
    log.debug(y)
    _kubectl(ctx, config, ['create', '-f', '-'], stdin=y)

    yield
Esempio n. 59
0
def ppyaml(*records, **kw):
    if kw: records += (kw,)
    print yaml.dump_all(records, Dumper=PPDumper, **DEFAULT_OPTIONS)
Esempio n. 60
0
 def write_all(self, *context):
     with open(self.file, 'w', encoding='utf8') as f:
         return yaml.dump_all(list(context), f)