예제 #1
0
    def do_list(self):
        """
        Usage: list
        """
        archives = self.cache._for_adf('archives')

        if len(archives):
            for n, archive in enumerate(archives.iteritems()):
                archive = archive[1]
                status = "LOCAL"
                cert = ""
                if 'signature' in archive:
                    status = "COMPLETE"
                    cert = archive['signature'].aid
                elif 'links' in archive and archive['links'].upload:
                    status = "UPLOADED"
                    if self.verbose:
                        cert = archive['links'].upload
                title = archive['archive'].title
                size = archive_size(archive['archive'])
                print "{:03d} {:>6} {:>20} {:>10} {:>10}".format(
                    n+1, size, title, status, cert)
                if self.debug > 2:
                    for doc in archive.itervalues():
                        pyaml.dump(doc, sys.stdout)
                    print
        else:
            print "No available archives."
예제 #2
0
 def writeConfig(self): 
   log.info("Writing config...")
   trigs = {}
   for i in self.triggers:
     trigs[i.trigger] = i.text
   log.info("Saving : {}".format(trigs))
   settings = {
               "bot": {
                 "name": self.botname,
                 "version": self.bot_version,
                 "msg_prefix": self.bot_prefix,
                 "cmd_prefix": self.cmd_prefix,
                 "debug_logging": False 
               },
               "modules": {
                 "path": self.path,
                 "load": self.init_modules
               },
               "triggers": trigs,
               "admins": self.admins,
               "ai":{"model_directory":"models"}
             }
   log.info(pyaml.dump(settings))
   with open("discord-config.conf", "w") as f:
     f.write(pyaml.dump(settings))
   log.info("Written") 
예제 #3
0
	def test_str_style_pick(self):
		a = pyaml.dump(data_str_multiline)
		b = pyaml.dump(data_str_multiline, string_val_style='|')
		self.assertEqual(a, b)
		b = pyaml.dump(data_str_multiline, string_val_style='plain')
		self.assertNotEqual(a, b)
		self.assertTrue(pyaml.dump('waka waka', string_val_style='|').startswith('|-\n'))
		self.assertEqual(pyaml.dump(dict(a=1), string_val_style='|'), 'a: 1\n')
예제 #4
0
 def write_settings_file(self, eps_values):
     eps_values['min_samples'] = self.min_samples
     eps_values['only_core_samples'] = self.only_core_samples
     settingspath = self.pm.blotchfile.parent / 'clustering_settings.yaml'
     settingspath.parent.mkdir(exist_ok=True, parents=True)
     logger.info("Writing settings file at %s", str(settingspath))
     with open(settingspath, 'w') as fp:
         pyaml.dump(eps_values, fp)
예제 #5
0
파일: dump.py 프로젝트: mk-fg/pretty-yaml
 def test_str_style_pick(self):
     a = pyaml.dump(data_str_multiline)
     b = pyaml.dump(data_str_multiline, string_val_style="|")
     self.assertEqual(a, b)
     b = pyaml.dump(data_str_multiline, string_val_style="plain")
     self.assertNotEqual(a, b)
     self.assertTrue(pyaml.dump("waka waka", string_val_style="|").startswith("|-\n"))
     self.assertEqual(pyaml.dump(dict(a=1), string_val_style="|"), "a: 1\n")
예제 #6
0
def type_diff(a, b):
    s = StringIO.StringIO()
    pyaml.dump(a, s, safe = True)
    a = s.getvalue().splitlines()
    s = StringIO.StringIO()
    pyaml.dump(b, s, safe = True)
    b = s.getvalue().splitlines()
    return '\n'.join(difflib.Differ().compare(a, b))
예제 #7
0
	def test_colons_in_strings(self):
		val1 = {'foo': ['bar:', 'baz', 'bar:bazzo', 'a: b'], 'foo:': 'yak:'}
		val1_str = pyaml.dump(val1)
		val2 = yaml.safe_load(val1_str)
		val2_str = pyaml.dump(val2)
		val3 = yaml.safe_load(val2_str)
		self.assertEqual(val1, val2)
		self.assertEqual(val1_str, val2_str)
		self.assertEqual(val2, val3)
예제 #8
0
파일: dump.py 프로젝트: mk-fg/pretty-yaml
 def test_colons_in_strings(self):
     val1 = {"foo": ["bar:", "baz", "bar:bazzo", "a: b"], "foo:": "yak:"}
     val1_str = pyaml.dump(val1)
     val2 = yaml.safe_load(val1_str)
     val2_str = pyaml.dump(val2)
     val3 = yaml.safe_load(val2_str)
     self.assertEqual(val1, val2)
     self.assertEqual(val1_str, val2_str)
     self.assertEqual(val2, val3)
예제 #9
0
파일: dump.py 프로젝트: mk-fg/pretty-yaml
 def test_empty_strings(self):
     val1 = {"key": ["", "stuff", "", "more"], "": "value", "k3": ""}
     val1_str = pyaml.dump(val1)
     val2 = yaml.safe_load(val1_str)
     val2_str = pyaml.dump(val2)
     val3 = yaml.safe_load(val2_str)
     self.assertEqual(val1, val2)
     self.assertEqual(val1_str, val2_str)
     self.assertEqual(val2, val3)
예제 #10
0
파일: dump.py 프로젝트: mk-fg/pretty-yaml
	def test_empty_strings(self):
		val1 = {'key': ['', 'stuff', '', 'more'], '': 'value', 'k3': ''}
		val1_str = pyaml.dump(val1)
		val2 = yaml.safe_load(val1_str)
		val2_str = pyaml.dump(val2)
		val3 = yaml.safe_load(val2_str)
		self.assertEqual(val1, val2)
		self.assertEqual(val1_str, val2_str)
		self.assertEqual(val2, val3)
예제 #11
0
    def dump(self, content, filepath, indent=4):
        """
        Dump settings content to filepath.

        Args:
            content (str): Settings content.
            filepath (str): Settings file location.
        """
        with open(filepath, 'w') as fp:
            pyaml.dump(content, dst=fp, indent=indent)
예제 #12
0
파일: base.py 프로젝트: robotice/robotice
    def dump(self, data, path):
        """dump data to file
        """

        with open(path, "w") as f:

            if isinstance(data, MergeableDict):
                pyaml.dump(data.convert_to(data), f)
            else:
                pyaml.dump(data, f)
예제 #13
0
def main(argv=None):
	import argparse
	parser = argparse.ArgumentParser(
		description='Process and dump prettified YAML to stdout.')
	parser.add_argument('path', nargs='?', metavar='path',
		help='Path to YAML to read (default: use stdin).')
	opts = parser.parse_args(argv or sys.argv[1:])

	src = open(opts.path) if opts.path else sys.stdin
	try: data = yaml.load(src)
	finally: src.close()

	pyaml.dump(data, sys.stdout)
예제 #14
0
    def do_list(self):
        """
        Usage: list
        """
        archives = list(self.cache._for_adf('archives').iteritems())

        if len(archives):
            ui.print_archives_header()
            bydate = sorted(archives,
                            key=compose(creation, operator.itemgetter(1)))
            for n, docs in enumerate(bydate):
                fname = docs[0]
                archive = docs[1]
                status = self.cache.archive_status(fname, archive)
                cert = ""
                if status == ArchiveStatus.Completed:
                    status = "COMPLETE"
                    cert = archive['signature'].aid
                elif status == ArchiveStatus.InProgress:
                    status = "IN PROGRESS"
                    if self.verbose:
                        cert = archive['links'].upload
                elif status == ArchiveStatus.Failed:
                    status = "FAILED"
                    if self.verbose:
                        cert = archive['links'].upload
                elif status == ArchiveStatus.Local:
                    status = "LOCAL"
                elif status == ArchiveStatus.Paused:
                    status = "PAUSED"
                else:
                    status = "UNKNOWN"
                title = archive['archive'].title
                size = archive_size(archive['archive'])

                ui.print_archives_line(archive={
                    'num': n+1,
                    'size': size,
                    'title': title,
                    'status': status,
                    'cert': cert,
                    'created': archive['archive'].meta.created,
                    'capsule': archive_capsule(archive) or '-'
                })
                if self.debug > 2:
                    for doc in archive.itervalues():
                        pyaml.dump(doc, sys.stdout)
                    print
            print
        else:
            print "No available archives."
예제 #15
0
파일: dump.py 프로젝트: mk-fg/pretty-yaml
	def test_multiple_docs(self):
		docs = [yaml.safe_load(large_yaml), dict(a=1, b=2, c=3)]
		docs_str = pyaml.dump_all(docs, vspacing=[3, 2])
		self.assertTrue(docs_str.startswith('---'))
		self.assertIn('---\n\n\n\na: 1\n\n\n\nb: 2\n\n\n\nc: 3\n', docs_str)
		docs_str2 = pyaml.dump(docs, vspacing=[3, 2], multiple_docs=True)
		self.assertEqual(docs_str, docs_str2)
		docs_str2 = pyaml.dump(docs, vspacing=[3, 2])
		self.assertNotEqual(docs_str, docs_str2)
		docs_str2 = pyaml.dump_all(docs, explicit_start=False)
		self.assertFalse(docs_str2.startswith('---'))
		self.assertNotEqual(docs_str, docs_str2)
		docs_str = pyaml.dump(docs, multiple_docs=True, explicit_start=False)
		self.assertEqual(docs_str, docs_str2)
  def writeConfig(self):
    """Save the config, all currently loaded modules will be saved to init_modules"""
    self.log.info("Writing config...")

    #Add all of our triggers to a nice little dictionary,
    #So we can write it to a file
    trigs = {}
    for i in self.triggers:
      trigs[i.trigger] = i.send_text

    settings = {
        "bot" : {
          "name": self.bot_name,
          "version": self.bot_version,
          "msg_prefix": self.msg_prefix,
          "cmd_prefix": self.command_prefix,
          "debug_logging": False
        },
        "modules" : {
          "path": self.module_path,
          "load": self.loadedModules
        },
        "triggers": trigs,
        "ai": {
          "model_directory": "models"
        },
        "admins": self.admins
      } 

    self.log.info(settings)
    
    #Write the config in YAML format 
    with open("command-proc.conf", "w") as f:
      f.write(pyaml.dump(settings))
    self.log.info("Written")
예제 #17
0
	def test_pyyaml_params(self):
		d = {'foo': 'lorem ipsum ' * 30} # 300+ chars
		for w in 40, 80, 200:
			lines = pyaml.dump(d, width=w, indent=10).splitlines()
			for n, line in enumerate(lines, 1):
				self.assertLess(len(line), w*1.2)
				if n != len(lines): self.assertGreater(len(line), w*0.8)
예제 #18
0
파일: node.py 프로젝트: mediatum/mediatum
 def to_yaml(self):
     """overwrite default DeclarativeBase.to_yaml method because we need to convert MutableDicts first
     """
     node_dict = self.to_dict()
     node_dict["attrs"] = dict(node_dict["attrs"])
     node_dict["system_attrs"] = dict(node_dict["system_attrs"])
     return pyaml.dump(node_dict)
예제 #19
0
def write():  # pragma: no cover
    global _cache
    yfn = yaml_file_name()
    touch(yfn)
    with open(yfn, "w") as outfile:
        outfile.write(dump(_cache, vspacing=[1, 0]))
    return None
예제 #20
0
	def test_print(self):
		self.assertIs(pyaml.print, pyaml.pprint)
		self.assertIs(pyaml.print, pyaml.p)
		buff = io.BytesIO()
		b = pyaml.dump(data_str_multiline, dst=bytes)
		pyaml.print(data_str_multiline, file=buff)
		self.assertEqual(b, buff.getvalue())
예제 #21
0
 def write(self):
     d = self._serialize()
     s = pyaml.dump(d)
     p = os.path.dirname(self.filename)
     if not os.path.exists(p):
         os.makedirs(p)
     with open(self.filename, 'w') as f:
         f.write(s)
     self._reset_changes()
예제 #22
0
def output_ja_text(data, wordtypes):
    textdata = filter(data)
    t = Tokenizer()
    tokens = t.tokenize(textdata)
    words = sorted([token.surface
                    for token in tokens
                    if token.part_of_speech.split(',')[0] in wordtypes])
    dictionary = count_words(words)
    return pyaml.dump(dictionary, sys.stdout, vspacing=[0, 1])
예제 #23
0
파일: dump.py 프로젝트: mk-fg/pretty-yaml
 def test_single_dash_strings(self):
     strip_seq_dash = lambda line: line.lstrip().lstrip("-").lstrip()
     val1 = {"key": ["-", "-stuff", "- -", "- more-", "more-", "--"]}
     val1_str = pyaml.dump(val1)
     val2 = yaml.safe_load(val1_str)
     val2_str = pyaml.dump(val2)
     val3 = yaml.safe_load(val2_str)
     self.assertEqual(val1, val2)
     self.assertEqual(val1_str, val2_str)
     self.assertEqual(val2, val3)
     val1_str_lines = val1_str.splitlines()
     self.assertEqual(strip_seq_dash(val1_str_lines[2]), "-stuff")
     self.assertEqual(strip_seq_dash(val1_str_lines[5]), "more-")
     self.assertEqual(strip_seq_dash(val1_str_lines[6]), "--")
     val1 = {"key": "-"}
     val1_str = pyaml.dump(val1)
     val2 = yaml.safe_load(val1_str)
     val2_str = pyaml.dump(val2)
     val3 = yaml.safe_load(val2_str)
예제 #24
0
파일: dump.py 프로젝트: mk-fg/pretty-yaml
	def test_single_dash_strings(self):
		strip_seq_dash = lambda line: line.lstrip().lstrip('-').lstrip()
		val1 = {'key': ['-', '-stuff', '- -', '- more-', 'more-', '--']}
		val1_str = pyaml.dump(val1)
		val2 = yaml.safe_load(val1_str)
		val2_str = pyaml.dump(val2)
		val3 = yaml.safe_load(val2_str)
		self.assertEqual(val1, val2)
		self.assertEqual(val1_str, val2_str)
		self.assertEqual(val2, val3)
		val1_str_lines = val1_str.splitlines()
		self.assertEqual(strip_seq_dash(val1_str_lines[2]), '-stuff')
		self.assertEqual(strip_seq_dash(val1_str_lines[5]), 'more-')
		self.assertEqual(strip_seq_dash(val1_str_lines[6]), '--')
		val1 = {'key': '-'}
		val1_str = pyaml.dump(val1)
		val2 = yaml.safe_load(val1_str)
		val2_str = pyaml.dump(val2)
		val3 = yaml.safe_load(val2_str)
예제 #25
0
	def test_vspacing(self):
		data = yaml.safe_load(large_yaml)
		a = self.flatten(data)
		b = pyaml.dump(data, unicode, vspacing=[2, 1])
		self.assertEqual(a, self.flatten(yaml.safe_load(b)))
		pos, pos_list = 0, list()
		while True:
			pos = b.find(u'\n', pos+1)
			if pos < 0: break
			pos_list.append(pos)
		self.assertEqual( pos_list,
			[ 12, 13, 25, 33, 53, 74, 89, 108, 158, 185, 265, 300, 345, 346, 356, 376, 400, 426, 427,
				460, 461, 462, 470, 508, 564, 603, 604, 605, 611, 612, 665, 666, 690, 691, 715, 748,
				777, 806, 807, 808, 817, 818, 832, 843, 878, 948, 949, 961, 974, 1009, 1032, 1052,
				1083, 1102, 1123, 1173, 1195, 1234, 1257, 1276, 1300, 1301, 1312, 1325, 1341, 1359,
				1374, 1375, 1383, 1397, 1413, 1431, 1432, 1453, 1454, 1467, 1468, 1485, 1486, 1487,
				1498, 1499, 1530, 1531, 1551, 1552, 1566, 1577, 1590, 1591, 1612, 1613, 1614, 1622,
				1623, 1638, 1648, 1649, 1657, 1658, 1688, 1689, 1698, 1720, 1730 ] )
		b = pyaml.dump(data, unicode)
		self.assertNotIn('\n\n', b)
예제 #26
0
    def do_list(self):
        """
        Usage: list
        """
        certs = self.cache._for_adf('certs')

        if len(certs):
            for n, cert in enumerate(certs.iteritems()):
                cert = cert[1]
                aid = cert['signature'].aid
                title = cert['archive'].title
                size = archive_size(cert['archive'])
                print "{:>10} {:>6} {:<}".format(
                    aid, size, title)
                if self.debug > 2:
                    for doc in cert.itervalues():
                        pyaml.dump(doc, sys.stdout)
                    print
        else:
            print "No available certificates."
예제 #27
0
def write_file(path, data, token):
    try:
        branch = local_repo.create_branch(token, get_latest_commit())
    except Exception as e:
        # TODO more specific exception
        branch = repo.lookup_branch('master')

    author = committer = Signature('Joe S', '*****@*****.**')
    print(path)
    print(pretty_yaml.dump(data, string_val_style='>'))

    return token
예제 #28
0
    def test_monkey_patch(self):
    	d = {'a': {'b': 'c'}}

	# test default
	pyaml.dump(d)
	self.assertFalse(pyaml.patched_values['safe'])
	self.assertFalse(pyaml.patched_values['force_embed'])
	self.assertTrue(pyaml.patched_values['patched'])

	self.assertTrue(hasattr(yaml.serializer.Serializer, 'orig_anchor_node'))
	self.assertTrue(hasattr(yaml.serializer.Serializer, 'orig_serialize_node'))
	self.assertTrue(hasattr(yaml.emitter.Emitter, 'orig_expect_block_sequence'))
	self.assertTrue(hasattr(yaml.emitter.Emitter, 'orig_expect_block_sequence_item'))

	# test resetting
	pyaml.dump(d, safe=True)
	self.assertTrue(pyaml.patched_values['safe'])
	self.assertFalse(pyaml.patched_values['force_embed'])
	self.assertTrue(pyaml.patched_values['patched'])

	self.assertFalse(hasattr(yaml.serializer.Serializer, 'orig_anchor_node'))
	self.assertFalse(hasattr(yaml.serializer.Serializer, 'orig_serialize_node'))
	self.assertFalse(hasattr(yaml.emitter.Emitter, 'orig_expect_block_sequence'))
	self.assertFalse(hasattr(yaml.emitter.Emitter, 'orig_expect_block_sequence_item'))

	# test patching again
	pyaml.dump(d, force_embed=True)
	self.assertFalse(pyaml.patched_values['safe'])
	self.assertTrue(pyaml.patched_values['force_embed'])
	self.assertTrue(pyaml.patched_values['patched'])

	self.assertTrue(hasattr(yaml.serializer.Serializer, 'orig_anchor_node'))
	self.assertTrue(hasattr(yaml.serializer.Serializer, 'orig_serialize_node'))
	self.assertTrue(hasattr(yaml.emitter.Emitter, 'orig_expect_block_sequence'))
	self.assertTrue(hasattr(yaml.emitter.Emitter, 'orig_expect_block_sequence_item'))
예제 #29
0
 def _print_dict_ip(d, header=None, format='table'):
     if format == "json":
         return json.dumps(d, indent=4)
     elif format == "yaml":
         return pyaml.dump(d)
     elif format == "table":
         return dict_printer(d,
                             order=["network",
                                    "version",
                                    "addr"],
                             output="table",
                             sort_keys=True)
     else:
         return d
예제 #30
0
    def yaml(self):
        logs_list = None
        if self.logs is not None:
            logs_list = [lm.yaml() for lm in self.logs]

        data = OrderedDict([
            ('service_name', self.name),
            ('team_name', self.team_name),
            ('port', self.port),
            ('healthcheck_url', self.healthcheck_url),
            ('logs', logs_list),
            ('environments', [e.yaml() for e in self.environments])
        ])
        return pyaml.dump(OrderedDict((k, v) for k, v in data.items() if v is not None))
예제 #31
0
    def convertGraph(self, graph):
        """Convert graph."""
        cisgraph = fbpToCis(graph['content'])

        cisgraph = as_str(cisgraph, recurse=True, allow_pass=True)
        
        # Write to temp file and validate
        tmpfile = tempfile.NamedTemporaryFile(suffix="yml", prefix="cis",
                                              delete=False)
        yaml.safe_dump(cisgraph, tmpfile, default_flow_style=False)
        yml_prep = prep_yaml(tmpfile.name)
        os.remove(tmpfile.name)

        s = get_schema()
        yml_norm = s.normalize(yml_prep)
        try:
            s.validate(yml_norm)
        except BaseException as e:
            print(e)
            raise RestException('Invalid graph %s', 400, e)

        self.setRawResponse()
        return pyaml.dump(cisgraph)
예제 #32
0
 def auth(self):
     status = True
     try:
         config_file, filename = tempfile.mkstemp()
         config_content = {
             'apiVersion':
             'v1',
             'clusters': [{
                 'cluster': self.metadata['cluster'],
                 'name': self.name,
             }],
             'contexts': [{
                 'context': {
                     'cluster': self.name,
                     'user': self.name,
                 },
                 'name': self.name,
             }],
             'current-context':
             self.name,
             'kind':
             'Config',
             'preferences': {},
             'users': [{
                 'name': self.name,
                 'user': self.metadata['user']
             }]
         }
         os.write(config_file, pyaml.dump(config_content).encode())
         os.close(config_file)
         self.config_wrapper = pykube.KubeConfig.from_file(filename)
         os.remove(filename)
         self.api = pykube.HTTPClient(self.config_wrapper)
     except URLError as exception:
         logger.error(exception)
         status = False
     return status
예제 #33
0
def to_yaml(value):
    """
    Output value as YAML string
    """
    return pyaml.dump(value)
예제 #34
0
def map(function, input_list):
    """
    map operation of Couler
    """
    # Enforce the function to run and lock to add into step
    if callable(function):
        states._update_steps_lock = False
        # TODO (terrytangyuan): Support functions with multiple arguments.
        para = input_list[0]
        inner = function(para)
        if inner is None:
            raise SyntaxError("require function return value")
        states._update_steps_lock = True
    else:
        raise TypeError("require loop over a function to run")

    inner_dict = output.extract_step_return(inner)
    template_name = inner_dict["name"]
    inner_step = Step(name=inner_dict["id"], template=template_name)

    parameters = []
    items_param_name = "%s-para-name" % template_name
    items_param_dict = {"name": items_param_name}
    function_template = states.workflow.get_template(template_name)
    function_template_dict = function_template.to_dict()

    if "resource" in function_template_dict:
        # Update the template with the new dynamic `metadata.name`.
        manifest_dict = yaml.safe_load(
            function_template_dict["resource"]["manifest"]
        )
        manifest_dict["metadata"]["name"] = (
            "'{{inputs.parameters.%s}}'" % items_param_name
        )
        function_template = states.workflow.get_template(template_name)
        function_template.manifest = pyaml.dump(manifest_dict)
        # Append this items parameter to input parameters in the template
        function_template.args.append(items_param_dict)
        states.workflow.add_template(function_template)
        input_parameters = [items_param_dict]
    else:
        input_parameters = function_template_dict["inputs"]["parameters"]

    for para_name in input_parameters:
        parameters.append(
            {
                "name": para_name["name"],
                "value": '"{{item.%s}}"' % para_name["name"],
            }
        )

    inner_step.arguments = {"parameters": parameters}

    with_items = []
    for para_values in input_list:
        item = {}
        if not isinstance(para_values, list):
            para_values = [para_values]

        for j in range(len(input_parameters)):
            para_name = input_parameters[j]["name"]
            item[para_name] = para_values[j]

        with_items.append(item)

    inner_step.with_items = with_items
    states.workflow.add_step(inner_dict["id"], inner_step)

    return inner_step
예제 #35
0
def run_job(
    manifest,
    success_condition,
    failure_condition,
    timeout=None,
    retry=None,
    step_name=None,
    pool=None,
    env=None,
    set_owner_reference=True,
):
    """
    Create a k8s job. For example, the pi-tmpl template in
    https://github.com/argoproj/argo/blob/master/examples/k8s-jobs.yaml
    :param manifest: YAML specification of the job to be created.
    :param success_condition: expression for verifying job success.
    :param failure_condition: expression for verifying job failure.
    :param timeout: To limit the elapsed time for a workflow in seconds.
    :param step_name: is only used while developing functions of step zoo.
    :param env: environmental parameter with a dict types, e.g., {"OS_ENV_1": "OS_ENV_value"}  # noqa: E501
    :param set_owner_reference: Whether to set the workflow as the job's owner reference.
        If `True`, the job will be deleted once the workflow is deleted.
    :return: output
    """
    if manifest is None:
        raise ValueError("Input manifest can not be null")

    func_name, caller_line = utils.invocation_location()
    func_name = (utils.argo_safe_name(step_name)
                 if step_name is not None else func_name)

    args = []
    if states.workflow.get_template(func_name) is None:
        if states._outputs_tmp is not None and env is not None:
            env["inferred_outputs"] = states._outputs_tmp

        # Generate the inputs for the manifest template
        envs, parameters, args = utils.generate_parameters_run_job(env)

        # update the env
        if env is not None:
            manifest_dict = yaml.safe_load(manifest)
            manifest_dict["spec"]["env"] = envs

            # TODO this is used to pass the test cases,
            # should be fixed in a better way
            if ("labels" in manifest_dict["metadata"] and "argo.step.owner"
                    in manifest_dict["metadata"]["labels"]):
                manifest_dict["metadata"]["labels"][
                    "argo.step.owner"] = "'{{pod.name}}'"

            manifest = pyaml.dump(manifest_dict)

        template = Job(
            name=func_name,
            args=args,
            action="create",
            manifest=manifest,
            set_owner_reference=set_owner_reference,
            success_condition=success_condition,
            failure_condition=failure_condition,
            timeout=timeout,
            retry=retry,
            pool=pool,
        )
        states.workflow.add_template(template)

    step_name = step_update_utils.update_step(func_name, args, step_name,
                                              caller_line)

    # return job name and job uid for reference
    rets = _job_output(step_name, func_name)
    states._steps_outputs[step_name] = rets

    pb_step = proto_repr.step_repr(  # noqa: F841
        step_name=step_name,
        tmpl_name=func_name,
        image=None,
        source=None,
        script_output=None,
        input=None,
        output=rets,
        manifest=manifest,
        success_cond=success_condition,
        failure_cond=failure_condition,
    )

    return rets
예제 #36
0
def train(
    image=None,
    command="",
    secret=None,
    no_chief=True,
    chief_image=None,
    chief_resources=None,
    chief_restart_policy="Never",
    chief_command=None,
    num_ps=0,
    ps_image=None,
    ps_resources=None,
    ps_restart_policy="Never",
    ps_command=None,
    num_workers=0,
    worker_image=None,
    worker_resources=None,
    worker_restart_policy="Never",
    worker_command=None,
    clean_pod_policy="Running",
    timeout=None,
):
    name = "tf-train-%s" % str(uuid.uuid4())
    success_condition = ("status.replicaStatuses.Worker.succeeded == %s" %
                         num_workers)
    failure_condition = "status.replicaStatuses.Worker.failed > 0"

    manifest = copy.deepcopy(manifest_template)
    manifest["metadata"].update({"name": name})
    manifest["spec"].update({"cleanPodPolicy": clean_pod_policy})

    if not no_chief:
        chief_image = chief_image if chief_image else image
        chief_command = chief_command if chief_command else command

        chief_pod = _generate_pod_spec(
            pod_template,
            container_template,
            allowed_pod_types=pod_types,
            pod_type="Chief",
            image=chief_image,
            replicas=1,
            secret=secret,
            command=chief_command,
            resources=chief_resources,
            restart_policy=chief_restart_policy,
        )

        manifest["spec"]["tfReplicaSpecs"].update({"Chief": chief_pod})

    if num_ps > 0:
        ps_image = ps_image if ps_image else image
        ps_command = ps_command if ps_command else command

        ps_pod = _generate_pod_spec(
            pod_template,
            container_template,
            allowed_pod_types=pod_types,
            pod_type="PS",
            image=ps_image,
            replicas=num_ps,
            secret=secret,
            command=ps_command,
            resources=ps_resources,
            restart_policy=ps_restart_policy,
        )

        manifest["spec"]["tfReplicaSpecs"].update({"PS": ps_pod})

    if num_workers > 0:
        worker_image = worker_image if worker_image else image
        worker_command = worker_command if worker_command else command

        worker_pod = _generate_pod_spec(
            pod_template,
            container_template,
            allowed_pod_types=pod_types,
            pod_type="Worker",
            image=worker_image,
            replicas=num_workers,
            secret=secret,
            command=worker_command,
            resources=worker_resources,
            restart_policy=worker_restart_policy,
        )

        manifest["spec"]["tfReplicaSpecs"].update({"Worker": worker_pod})

    step_name, _ = utils.invocation_location()

    couler.run_job(
        manifest=pyaml.dump(manifest),
        success_condition=success_condition,
        failure_condition=failure_condition,
        step_name=step_name,
        timeout=timeout,
    )
예제 #37
0
 def test_print_args(self):
     buff = io.BytesIO()
     args = 1, 2, 3
     b = pyaml.dump(args, dst=bytes)
     pyaml.print(*args, file=buff)
     self.assertEqual(b, buff.getvalue())
예제 #38
0
 def test_force_embed(self):
     b = pyaml.dump(data, unicode, force_embed=True)
     c = pyaml.dump(data, unicode, safe=True, force_embed=True)
     for char, dump in it.product('*&', [b, c]):
         self.assertNotIn(char, dump)
예제 #39
0
 def test_simple(self):
     a = self.flatten(data)
     b = pyaml.dump(data, unicode)
     self.assertEqual(a, self.flatten(yaml.safe_load(b)))
예제 #40
0
    model.load_state_dict(torch.load(experiment.model.state_dict))
model.to(experiment.session.device)

# Optimizer
optimizer: torch.optim.Optimizer = import_(
    experiment.optimizer.fn)(model.parameters(), *experiment.optimizer.args,
                             **experiment.optimizer.kwargs)
if 'state_dict' in experiment.optimizer:
    optimizer.load_state_dict(torch.load(experiment.optimizer.state_dict))

# Logger
if len(experiment.session.log.when) > 0:
    logger = SummaryWriter(experiment.session.log.folder)
    logger.add_text(
        'Experiment',
        textwrap.indent(pyaml.dump(experiment, safe=True, sort_dicts=False),
                        '    '), experiment.samples)
else:
    logger = None

# Saver
if len(experiment.session.checkpoint.when) > 0:
    saver = Saver(experiment.session.checkpoint.folder)
    if experiment.epoch == 0:
        saver.save_experiment(experiment, suffix=f'e{experiment.epoch:04d}')
else:
    saver = None
# endregion

# Datasets and dataloaders
dataset = SolubilityDataset(experiment.session.data.path)
예제 #41
0
    def save(self) -> 'ConfigurationLoader':
        with self._config_file.open('w') as fp:
            pyaml.dump(self._config, fp)

        return self
예제 #42
0

dataloader_train, dataloader_val = get_dataloaders(ex, session)
session['misc']['samples'] = {
    'train': len(dataloader_train.dataset),
    'val': len(dataloader_val.dataset),
}
session['misc']['targets'] = {
    'train': len(dataloader_train.dataset.datasets),
    'val': len(dataloader_val.dataset.datasets),
}

if ex['completed_epochs'] == 0:
    saver.save_experiment(ex, epoch=ex['completed_epochs'], samples=ex['samples'])
    logger.add_text('Experiment',
                    textwrap.indent(pyaml.dump(ex, safe=True, sort_dicts=False, force_embed=True), '    '),
                    global_step=ex['samples'])
# endregion


# region Training
def training_function(trainer, batch: DecoyBatch):
    batch = batch.to(session['device'])
    results = model(batch)

    loss_local_lddt = torch.tensor(0., device=session['device'])
    loss_global_gdtts = torch.tensor(0., device=session['device'])
    loss_rank_gdtts = torch.tensor(0., device=session['device'])

    if ex['loss']['local_lddt']['weight'] > 0:
        node_mask = torch.isfinite(batch.lddt)
def train(args):
    cuda_availability = torch.cuda.is_available()
    print('\n*************************')
    print('`CUDA` available: {}'.format(cuda_availability))
    print('Device specified: {}'.format(args.device))
    print('*************************\n')

    # load the config of the trained model:
    with open(args.pretrained_output / "train_arguments.yaml") as yaml_data:
        pretrain_arguments = yaml.load(yaml_data, Loader=yaml.FullLoader)

    pretrained_model = algorithms[pretrain_arguments["alg"]].load(
        args.pretrained_output /
        "".join(pretrain_arguments["model_name"].split(".")[:-1]),
        device='cpu')

    # Prepare tensorboard logging
    log_name = '{}_{}_{}'.format(args.experiment_name, args.task_name,
                                 datetime.now().strftime('%d-%m_%H-%M-%S'))
    run_dir = args.tensorboard_log + "/" + log_name
    Path(run_dir).mkdir(parents=True, exist_ok=True)
    callbacks = []
    # callbacks.append(CheckpointCallback(
    #    save_freq=1000000, save_path=run_dir, name_prefix='rl_model'))
    callbacks.append(LoggingCallback(logpath=run_dir))

    train_args = copy.copy(args)
    train_args.config = train_args.config.name
    pyaml.dump(train_args.__dict__,
               open(os.path.join(run_dir, 'train_arguments.yaml'), 'w'))

    assert args.task_name == pretrain_arguments[
        "task_name"], "Envs must match for transfer learning"

    # Create the vectorized environment
    n_envs = train_args.n_envs  # Number of processes to use
    env = make_vec_env(args.task_name, n_envs=n_envs)

    # define network architecture
    if "GnnPolicy" in args.policy and args.net_arch is not None:
        for net_arch_part in args.net_arch.keys():
            for i, (layer_class_name,
                    layer_size) in enumerate(args.net_arch[net_arch_part]):
                if hasattr(nn, layer_class_name):
                    args.net_arch[net_arch_part][i] = (getattr(
                        nn, layer_class_name), layer_size)
                elif hasattr(nerve_net_conv, layer_class_name):
                    args.net_arch[net_arch_part][i] = (getattr(
                        nerve_net_conv, layer_class_name), layer_size)
                else:

                    def get_class(x):
                        return globals()[x]

                    c = get_class(layer_size)
                    assert c is not None, f"Unkown layer class '{layer_class_name}'"
                    args.net_arch[net_arch_part][i] = (c, layer_size)

    with open(os.path.join(run_dir, 'net_arch.txt'), 'w') as fp:
        fp.write(str(args.net_arch))

    # Create the model
    alg_class = algorithms[args.alg]
    policy_kwargs = dict()
    if args.net_arch is not None:
        policy_kwargs['net_arch'] = args.net_arch
    if args.activation_fn is not None:
        policy_kwargs["activation_fn"] = activation_functions[
            args.activation_fn]
    # policy_kwargs['device'] = args.device if args.device is not None else get_device('auto')
    if "GnnPolicy" in args.policy:
        policy_kwargs["mlp_extractor_kwargs"] = {
            "task_name": args.task_name,
            'device': args.device,
            'gnn_for_values': args.gnn_for_values,
            'controller_option': controller_option[args.controller_option],
            'embedding_option': embedding_option[args.embedding_option],
            'root_option': root_option[args.root_option],
            'drop_body_nodes': args.drop_body_nodes,
            'use_sibling_relations': args.use_sibling_relations,
            'xml_assets_path': args.xml_assets_path,
            'policy_readout_mode': args.policy_readout_mode
        }
    alg_kwargs = args.__dict__.copy()
    alg_kwargs.pop("config", None)
    alg_kwargs.pop("task_name", None)
    alg_kwargs.pop("policy", None)
    alg_kwargs.pop("activation_fn", None)
    alg_kwargs.pop("gnn_for_values", None)
    alg_kwargs.pop("embedding_option", None)
    alg_kwargs.pop("controller_option", None)
    alg_kwargs.pop("root_option", None)
    alg_kwargs.pop("xml_assets_path", None)
    alg_kwargs.pop("alg", None)
    alg_kwargs.pop("net_arch", None)
    alg_kwargs.pop("experiment_name", None)
    alg_kwargs.pop("job_dir", None)
    alg_kwargs.pop("total_timesteps", None)
    alg_kwargs.pop("model_name", None)
    alg_kwargs.pop("n_envs", None)
    alg_kwargs.pop("drop_body_nodes", None)
    alg_kwargs.pop("use_sibling_relations", None)
    alg_kwargs.pop("experiment_name_suffix", None)
    alg_kwargs.pop("policy_readout_mode", None)
    alg_kwargs.pop("pretrained_output", None)

    model = alg_class(
        args.policy,
        env,
        verbose=1,
        #   n_steps=args.n_steps,
        policy_kwargs=policy_kwargs,
        #   device=args.device,
        #   tensorboard_log=args.tensorboard_log,
        #   learning_rate=args.learning_rate,
        #   batch_size=args.batch_size,
        #   n_epochs=args.n_epochs,
        **alg_kwargs)

    # model.learn(total_timesteps=args.total_timesteps,
    #             callback=callbacks,
    #             tb_log_name=log_name)

    # PPO Learn parameters:
    total_timesteps = args.total_timesteps
    callback = callbacks
    log_interval = 1
    eval_env = make_vec_env(args.task_name, n_envs=1)
    eval_freq = 1e4
    n_eval_episodes = 3
    tb_log_name = log_name
    eval_log_path = None
    reset_num_timesteps = True

    #################################
    ##### Custom Transfer Learn #####
    #################################

    iteration = 0
    total_timesteps, callback = model._setup_learn(
        total_timesteps, eval_env, callback, eval_freq, n_eval_episodes,
        eval_log_path, reset_num_timesteps, tb_log_name)

    ### setup pretrained model ###
    pretrained_model.num_timesteps = 0
    pretrained_model._episode_num = 0
    pretrained_model._total_timesteps = total_timesteps
    pretrained_model.ep_info_buffer = deque(maxlen=100)
    pretrained_model.ep_success_buffer = deque(maxlen=100)
    pretrained_model._last_obs = model.env.reset()
    pretrained_model._last_dones = np.zeros((model.env.num_envs, ), dtype=bool)

    callback.on_training_start(locals(), globals())

    while pretrained_model.num_timesteps < total_timesteps:

        continue_training = pretrained_model.collect_rollouts(
            model.env,
            callback,
            model.rollout_buffer,
            n_rollout_steps=model.n_steps)

        if continue_training is False:
            break

        iteration += 1
        model._update_current_progress_remaining(
            pretrained_model.num_timesteps, total_timesteps)

        # Display training infos
        if log_interval is not None and iteration % log_interval == 0:
            fps = int(pretrained_model.num_timesteps /
                      (time.time() - model.start_time))
            logger.record("time/iterations", iteration, exclude="tensorboard")
            if len(model.ep_info_buffer) > 0 and len(
                    model.ep_info_buffer[0]) > 0:
                logger.record(
                    "rollout/ep_rew_mean",
                    safe_mean(
                        [ep_info["r"] for ep_info in model.ep_info_buffer]))
                logger.record(
                    "rollout/ep_len_mean",
                    safe_mean(
                        [ep_info["l"] for ep_info in model.ep_info_buffer]))
            logger.record("time/fps", fps)
            logger.record("time/time_elapsed",
                          int(time.time() - model.start_time),
                          exclude="tensorboard")
            logger.record("time/total_timesteps",
                          pretrained_model.num_timesteps,
                          exclude="tensorboard")
            logger.dump(step=pretrained_model.num_timesteps)

        model.train()

    callback.on_training_end()

    model.save(
        os.path.join(args.tensorboard_log + "/" + log_name, args.model_name))
예제 #44
0
 def test_ordereddict(self):
     d = OrderedDict((i, '') for i in reversed(range(10)))
     lines = pyaml.dump(d).splitlines()
     self.assertEqual(lines, list(reversed(sorted(lines))))
예제 #45
0
def create_docker_compose_cli(jsonData):
    ###### HardCoded Values ######
    # n_orgs = args[1]
    # n_peer_org = args[2]
    peer_name = "peer"
    # TODO: Update to take value from json
    network_name = ["byfn"]

    with open("./network/template/docker-compose-cli-template.yaml") as f:
        list_doc = yaml.load(f)

    #### Networks ####
    list_doc["version"] = "'" + str(2) + "'"

    networks = dict.fromkeys(network_name, )
    list_doc["networks"] = networks

    #### Volumes ####

    ## Orderer Volume ##
    org_final_name = []
    for orderer in jsonData["organizations"]["ordererOrg"]["url"]:
        org_final_name.append(orderer)

    ### ORG Volumes ###
    for i in range(0, len(jsonData["organizations"]["peerOrgs"])):
        org = jsonData["organizations"]["peerOrgs"][i]["url"]
        for peer in range(
                0, int(jsonData["organizations"]["peerOrgs"][i]["count"])):
            orgName = peer_name + str(peer) + "." + org
            org_final_name.append(orgName)

    volumes = dict.fromkeys(org_final_name, )
    list_doc["volumes"] = volumes

    #### Services ####

    services = {}

    for org in org_final_name:
        if not org in services:
            services[org] = {}
        if not "extends" in services[org]:
            services[org]["extends"] = {}

        if not "service" in services[org]["extends"]:
            services[org]["extends"]["service"] = {}
        if not "file" in services[org]["extends"]:
            services[org]["extends"]["file"] = {}

        if not "container_name" in services[org]:
            services[org]["container_name"] = {}
        if not "networks" in services[org]:
            services[org]["networks"] = {}

        services[org]["extends"]["service"] = org
        services[org]["extends"]["file"] = "base/docker-compose-base.yaml"
        services[org]["container_name"] = org
        services[org]["networks"] = network_name

    list_doc["services"].update(services)

    #### Services CLI ####

    for i in range(0, len(list_doc["services"]["cli"]["environment"])):
        env = list_doc["services"]["cli"]["environment"][i]
        if list_doc["services"]["cli"]["environment"][i].find("org1") != -1:
            env = env.replace("org1",
                              jsonData["organizations"]["peerOrgs"][0]["name"])
        if list_doc["services"]["cli"]["environment"][i].find("Org1MSP") != -1:
            env = env.replace(
                "Org1MSP", jsonData["organizations"]["peerOrgs"][0]["mspID"])

        list_doc["services"]["cli"]["environment"][i] = env

    list_doc["services"]["cli"]["depends_on"] = org_final_name
    list_doc["services"]["cli"]["networks"] = network_name
    # print (list_doc["services"])

    with open("./network/docker-compose-cli.yaml", "w+") as f:
        pyaml.dump(list_doc, f, vspacing=[2, 1])
예제 #46
0
def train(
    image=None,
    command="",
    secret=None,
    master_image=None,
    master_resources=None,
    master_restart_policy="Never",
    master_command=None,
    num_workers=0,
    worker_image=None,
    worker_resources=None,
    worker_restart_policy="Never",
    worker_command=None,
    clean_pod_policy="Running",
    timeout=None,
):
    name = "pytorch-train-%s" % str(uuid.uuid4())
    success_condition = "status.pytorchReplicaStatuses.Worker.succeeded > 0"
    failure_condition = "status.pytorchReplicaStatuses.Worker.failed > 0"

    manifest = copy.deepcopy(manifest_template)
    manifest["metadata"].update({"name": name})
    manifest["spec"].update({"cleanPodPolicy": clean_pod_policy})

    master_image = master_image if master_image else image
    master_command = master_command if master_command else command

    chief_pod = _generate_pod_spec(
        pod_template,
        container_template,
        allowed_pod_types=pod_types,
        pod_type="Master",
        image=master_image,
        replicas=1,
        secret=secret,
        command=master_command,
        resources=master_resources,
        restart_policy=master_restart_policy,
    )

    manifest["spec"]["pytorchReplicaSpecs"].update({"Master": chief_pod})

    if num_workers > 0:
        worker_image = worker_image if worker_image else image
        worker_command = worker_command if worker_command else command

        worker_pod = _generate_pod_spec(
            pod_template,
            container_template,
            allowed_pod_types=pod_types,
            pod_type="Worker",
            image=worker_image,
            replicas=num_workers,
            secret=secret,
            command=worker_command,
            resources=worker_resources,
            restart_policy=worker_restart_policy,
        )

        manifest["spec"]["pytorchReplicaSpecs"].update({"Worker": worker_pod})

    step_name, _ = utils.invocation_location()

    couler.run_job(
        manifest=pyaml.dump(manifest),
        success_condition=success_condition,
        failure_condition=failure_condition,
        step_name=step_name,
        timeout=timeout,
    )
예제 #47
0
 def __init__(self, problems):
     message = 'Organization structure is invalid. Problems:\n%s' % pyaml.dump(problems)
     Exception.__init__(self, message)
예제 #48
0
        },
        "validation": {
            "accuracy": val_accuracy,
            "loss": val_loss
        }
    }


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--runpath", type=Path, required=True)
    parser.add_argument("--datapath", type=Path, required=True)
    parser.add_argument("--batch_size", type=int, required=True)
    parser.add_argument("--learning_rate", type=float, required=True)
    parser.add_argument("--weight_decay", type=float, required=True)
    parser.add_argument("--number_epochs", type=int, required=True)
    parser.add_argument("--number_workers", type=int, required=True)
    parser.add_argument("--device", type=str, default="cpu")
    args = parser.parse_args()

    random_hash = ''.join(
        random.choices(string.ascii_uppercase + string.digits, k=10))
    args.logdir = args.runpath / f"bs{args.batch_size}_lr{args.learning_rate}_wd{args.weight_decay}_{random_hash}"

    metric_dictionary = train(args)

    config = vars(args)
    final_log_dictionary = {"config": config, "results": metric_dictionary}
    with open(args.logdir / "final_results.yaml", "w") as outfile:
        pyaml.dump(final_log_dictionary, outfile)
예제 #49
0
def main():
    # region Setup
    conf = parse_args()
    setup_seeds(conf.session.seed)
    tb_logger, tb_img_logger, json_logger = setup_all_loggers(conf)
    logger.info("Parsed configuration:\n" +
                pyaml.dump(OmegaConf.to_container(conf),
                           safe=True,
                           sort_dicts=False,
                           force_embed=True))

    # region Predicate classification engines
    datasets, dataset_metadata = build_datasets(conf.dataset)
    dataloaders = build_dataloaders(conf, datasets)

    model = build_model(conf.model,
                        dataset_metadata["train"]).to(conf.session.device)
    criterion = PredicateClassificationCriterion(conf.losses)

    pred_class_trainer = Trainer(pred_class_training_step, conf)
    pred_class_trainer.model = model
    pred_class_trainer.criterion = criterion
    pred_class_trainer.optimizer, scheduler = build_optimizer_and_scheduler(
        conf.optimizer, pred_class_trainer.model)

    pred_class_validator = Validator(pred_class_validation_step, conf)
    pred_class_validator.model = model
    pred_class_validator.criterion = criterion

    pred_class_tester = Validator(pred_class_validation_step, conf)
    pred_class_tester.model = model
    pred_class_tester.criterion = criterion
    # endregion

    if "resume" in conf:
        checkpoint = Path(conf.resume.checkpoint).expanduser().resolve()
        logger.debug(f"Resuming checkpoint from {checkpoint}")
        Checkpoint.load_objects(
            {
                "model": pred_class_trainer.model,
                "optimizer": pred_class_trainer.optimizer,
                "scheduler": scheduler,
                "trainer": pred_class_trainer,
            },
            checkpoint=torch.load(checkpoint,
                                  map_location=conf.session.device),
        )
        logger.info(f"Resumed from {checkpoint}, "
                    f"epoch {pred_class_trainer.state.epoch}, "
                    f"samples {pred_class_trainer.global_step()}")
    # endregion

    # region Predicate classification training callbacks
    def increment_samples(trainer: Trainer):
        images = trainer.state.batch[0]
        trainer.state.samples += len(images)

    pred_class_trainer.add_event_handler(Events.ITERATION_COMPLETED,
                                         increment_samples)

    ProgressBar(persist=True, desc="Pred class train").attach(
        pred_class_trainer, output_transform=itemgetter("losses"))

    tb_logger.attach(
        pred_class_trainer,
        OptimizerParamsHandler(
            pred_class_trainer.optimizer,
            param_name="lr",
            tag="z",
            global_step_transform=pred_class_trainer.global_step,
        ),
        Events.EPOCH_STARTED,
    )

    pred_class_trainer.add_event_handler(
        Events.ITERATION_COMPLETED,
        PredicateClassificationMeanAveragePrecisionBatch())
    pred_class_trainer.add_event_handler(Events.ITERATION_COMPLETED,
                                         RecallAtBatch(sizes=(5, 10)))

    tb_logger.attach(
        pred_class_trainer,
        OutputHandler(
            "train",
            output_transform=lambda o: {
                **o["losses"],
                "pc/mAP": o["pc/mAP"].mean().item(),
                **{k: r.mean().item()
                   for k, r in o["recalls"].items()},
            },
            global_step_transform=pred_class_trainer.global_step,
        ),
        Events.ITERATION_COMPLETED,
    )

    pred_class_trainer.add_event_handler(
        Events.EPOCH_COMPLETED,
        log_metrics,
        "Predicate classification training",
        "train",
        json_logger=None,
        tb_logger=tb_logger,
        global_step_fn=pred_class_trainer.global_step,
    )
    pred_class_trainer.add_event_handler(
        Events.EPOCH_COMPLETED,
        PredicateClassificationLogger(
            grid=(2, 3),
            tag="train",
            logger=tb_img_logger.writer,
            metadata=dataset_metadata["train"],
            global_step_fn=pred_class_trainer.global_step,
        ),
    )
    tb_logger.attach(
        pred_class_trainer,
        EpochHandler(
            pred_class_trainer,
            tag="z",
            global_step_transform=pred_class_trainer.global_step,
        ),
        Events.EPOCH_COMPLETED,
    )

    pred_class_trainer.add_event_handler(
        Events.EPOCH_COMPLETED,
        lambda _: pred_class_validator.run(dataloaders["val"]))
    # endregion

    # region Predicate classification validation callbacks
    ProgressBar(persist=True,
                desc="Pred class val").attach(pred_class_validator)

    if conf.losses["bce"]["weight"] > 0:
        Average(output_transform=lambda o: o["losses"]["loss/bce"]).attach(
            pred_class_validator, "loss/bce")
    if conf.losses["rank"]["weight"] > 0:
        Average(output_transform=lambda o: o["losses"]["loss/rank"]).attach(
            pred_class_validator, "loss/rank")
    Average(output_transform=lambda o: o["losses"]["loss/total"]).attach(
        pred_class_validator, "loss/total")

    PredicateClassificationMeanAveragePrecisionEpoch(
        itemgetter("target", "output")).attach(pred_class_validator, "pc/mAP")
    RecallAtEpoch((5, 10),
                  itemgetter("target",
                             "output")).attach(pred_class_validator,
                                               "pc/recall_at")

    pred_class_validator.add_event_handler(
        Events.EPOCH_COMPLETED,
        lambda val_engine: scheduler.step(val_engine.state.metrics["loss/total"
                                                                   ]),
    )
    pred_class_validator.add_event_handler(
        Events.EPOCH_COMPLETED,
        log_metrics,
        "Predicate classification validation",
        "val",
        json_logger,
        tb_logger,
        pred_class_trainer.global_step,
    )
    pred_class_validator.add_event_handler(
        Events.EPOCH_COMPLETED,
        PredicateClassificationLogger(
            grid=(2, 3),
            tag="val",
            logger=tb_img_logger.writer,
            metadata=dataset_metadata["val"],
            global_step_fn=pred_class_trainer.global_step,
        ),
    )
    pred_class_validator.add_event_handler(
        Events.COMPLETED,
        EarlyStopping(
            patience=conf.session.early_stopping.patience,
            score_function=lambda val_engine: -val_engine.state.metrics[
                "loss/total"],
            trainer=pred_class_trainer,
        ),
    )
    pred_class_validator.add_event_handler(
        Events.COMPLETED,
        Checkpoint(
            {
                "model": pred_class_trainer.model,
                "optimizer": pred_class_trainer.optimizer,
                "scheduler": scheduler,
                "trainer": pred_class_trainer,
            },
            DiskSaver(
                Path(conf.checkpoint.folder).expanduser().resolve() /
                conf.fullname),
            score_function=lambda val_engine: val_engine.state.metrics[
                "pc/recall_at_5"],
            score_name="pc_recall_at_5",
            n_saved=conf.checkpoint.keep,
            global_step_transform=pred_class_trainer.global_step,
        ),
    )
    # endregion

    if "test" in conf.dataset:
        # region Predicate classification testing callbacks
        if conf.losses["bce"]["weight"] > 0:
            Average(
                output_transform=lambda o: o["losses"]["loss/bce"],
                device=conf.session.device,
            ).attach(pred_class_tester, "loss/bce")
        if conf.losses["rank"]["weight"] > 0:
            Average(
                output_transform=lambda o: o["losses"]["loss/rank"],
                device=conf.session.device,
            ).attach(pred_class_tester, "loss/rank")
        Average(
            output_transform=lambda o: o["losses"]["loss/total"],
            device=conf.session.device,
        ).attach(pred_class_tester, "loss/total")

        PredicateClassificationMeanAveragePrecisionEpoch(
            itemgetter("target", "output")).attach(pred_class_tester, "pc/mAP")
        RecallAtEpoch((5, 10),
                      itemgetter("target",
                                 "output")).attach(pred_class_tester,
                                                   "pc/recall_at")

        ProgressBar(persist=True,
                    desc="Pred class test").attach(pred_class_tester)

        pred_class_tester.add_event_handler(
            Events.EPOCH_COMPLETED,
            log_metrics,
            "Predicate classification test",
            "test",
            json_logger,
            tb_logger,
            pred_class_trainer.global_step,
        )
        pred_class_tester.add_event_handler(
            Events.EPOCH_COMPLETED,
            PredicateClassificationLogger(
                grid=(2, 3),
                tag="test",
                logger=tb_img_logger.writer,
                metadata=dataset_metadata["test"],
                global_step_fn=pred_class_trainer.global_step,
            ),
        )
        # endregion

    # region Run
    log_effective_config(conf, pred_class_trainer, tb_logger)
    if not ("resume" in conf and conf.resume.test_only):
        max_epochs = conf.session.max_epochs
        if "resume" in conf:
            max_epochs += pred_class_trainer.state.epoch
        pred_class_trainer.run(
            dataloaders["train"],
            max_epochs=max_epochs,
            seed=conf.session.seed,
            epoch_length=len(dataloaders["train"]),
        )

    if "test" in conf.dataset:
        pred_class_tester.run(dataloaders["test"])

    add_session_end(tb_logger.writer, "SUCCESS")
    tb_logger.close()
    tb_img_logger.close()
예제 #50
0
import json
import pyaml
import glob
import collections
import os

os.chdir(os.path.join(os.pardir, 'data'))

for filename in glob.glob('*.json'):
    jsonfile = os.path.splitext(filename)[0]
    yamlfile = jsonfile + '.yaml'

    with open(jsonfile + ".json", "r") as jfile:
        payload = json.load(jfile, object_pairs_hook=collections.OrderedDict)

    with open(
            yamlfile,
            'w',
    ) as yfile:
        pyaml.dump(payload, yfile, safe=True)
        print(payload)
예제 #51
0
 def test_dst(self):
     buff = io.BytesIO()
     self.assertIs(pyaml.dump(data, buff), None)
     self.assertIsInstance(pyaml.dump(data, str), str)
     self.assertIsInstance(pyaml.dump(data, unicode), unicode)
예제 #52
0
 def render_to_stream(self, stream):
     for fname, obj in self.render():
         click.secho('---\n# File: {}'.format(fname),
                     file=stream,
                     fg='blue')
         pyaml.dump(obj, stream, safe=True)
예제 #53
0
 def test_ids(self):
     b = pyaml.dump(data, unicode)
     self.assertNotIn('&id00', b)
     self.assertIn('query_dump_clone: *query_dump_clone', b)
     self.assertIn("'id в уникоде': &ids_-_id2_v_unikode",
                   b)  # kinda bug - should be just "id"
 def test_config_from_file(self, key, attr, value, tmpdir):
     config_file = tmpdir.join("config.yaml")
     with config_file.open("w") as fobj:
         pyaml.dump({key: value}, fobj, safe=True, default_style='"')
     config = Configuration(["--config-file", config_file.strpath])
     assert getattr(config, attr) == value
예제 #55
0
 def test_str_long(self):
     b = pyaml.dump(data_str_long, unicode)
     self.assertNotIn('"', b)
     self.assertNotIn("'", b)
     self.assertEqual(len(b.splitlines()), 1)
예제 #56
0
def form_submissions(hashid, format=None):
    if not current_user.upgraded:
        return jsonerror(402, {'error': "Please upgrade your account."})

    form = Form.get_with_hashid(hashid)

    for cont in form.controllers:
        if cont.id == current_user.id: break
    else:
        if request_wants_json():
            return jsonerror(403, {'error': "You do not control this form."})
        else:
            return redirect(url_for('dashboard'))

    if not format:
        # normal request.
        if request_wants_json():
            return jsonify({
                'host': form.host,
                'email': form.email,
                'submissions': [dict(s.data, date=s.submitted_at.isoformat()) for s in form.submissions]
            })
        else:
            fields = set()
            for s in form.submissions:
                fields.update(s.data.keys())
            fields -= EXCLUDE_KEYS

            submissions = []
            for sub in form.submissions:
                for f in fields:
                    value = sub.data.get(f, '')
                    typ = type(value)
                    sub.data[f] = value if typ is unicode or typ is str \
                                  else pyaml.dump(value, safe=True)
                submissions.append(sub)

            return render_template('forms/submissions.html',
                form=form,
                fields=sorted(fields),
                submissions=submissions
            )
    elif format:
        # an export request, format can be json or csv
        if format == 'json':
            return Response(
                json.dumps({
                    'host': form.host,
                    'email': form.email,
                    'submissions': [dict(s.data, date=s.submitted_at.isoformat()) for s in form.submissions]
                }, sort_keys=True, indent=2),
                mimetype='application/json',
                headers={
                    'Content-Disposition': 'attachment; filename=form-%s-submissions-%s.json' \
                                % (hashid, datetime.datetime.now().isoformat().split('.')[0])
                }
            )
        elif format == 'csv':
            out = io.BytesIO()
            fieldnames = set(field for sub in form.submissions for field in sub.data.keys())
            fieldnames = ['date'] + sorted(fieldnames)
            
            w = csv.DictWriter(out, fieldnames=fieldnames, encoding='utf-8')
            w.writeheader()
            for sub in form.submissions:
                w.writerow(dict(sub.data, date=sub.submitted_at.isoformat()))

            return Response(
                out.getvalue(),
                mimetype='text/csv',
                headers={
                    'Content-Disposition': 'attachment; filename=form-%s-submissions-%s.csv' \
                                % (hashid, datetime.datetime.now().isoformat().split('.')[0])
                }
            )
예제 #57
0
 def test_namedtuple(self):
     TestTuple = namedtuple('TestTuple', 'y x z')
     val = TestTuple(1, 2, 3)
     val_str = pyaml.dump(val)
     self.assertEqual(
         val_str, u'y: 1\nx: 2\nz: 3\n')  # namedtuple order was preserved
예제 #58
0
def main(PULL=0, VARS="test_vars.yml"):
    build_config = pull_and_protein.main(PULL=PULL, )
    open(VARS, 'w').write(pyaml.dump(build_config))
    build_edition.main(varsfile=VARS)
예제 #59
0
def to_yaml(self):
    return pyaml.dump(self.to_dict())
예제 #60
0
        dn = os.path.basename(dirname)
        directory[dn] = []

        if dirnames:
            for d in dirnames:
                directory[dn].append(dir_to_dict(path=os.path.join(path, d)))

            for f in filenames:
                directory[dn].append(f)
        else:
            directory[dn] = filenames

        return directory

if len(sys.argv) == 1:
    p = os.getcwd()
elif len(sys.argv) == 2:
    p = os.path.abspath(sys.argv[1])
else:
    sys.stderr.write("Unexpected argument {}\n".format(sys.argv[2:]))

try:
    with open("{}.yaml".format(os.path.basename(p)), "w") as f:
        try:
            yaml.dump(dir_to_dict(path=p), f, default_flow_style=False)
            print("Dictionary written to {}.yaml".format(os.path.basename(p)))
        except Exception as e:
            print(e)
except Exception as e:
    print(e)