Exemplo n.º 1
0
    def test_20_dump_and_multi_load(self):
        a = dict(name="a", a=1, b=dict(b=[0, 1], c="C"))
        b = dict(a=2, b=dict(b=[1, 2, 3, 4, 5], d="D"))

        a_path = os.path.join(self.workdir, "a.json")
        b_path = os.path.join(self.workdir, "b.json")

        A.dump(a, a_path)
        self.assertTrue(os.path.exists(a_path))

        A.dump(b, b_path)
        self.assertTrue(os.path.exists(b_path))

        a1 = A.multi_load([a_path, b_path], merge=A.MS_DICTS)

        self.assertEquals(a1["name"],   a["name"])
        self.assertEquals(a1["a"],      b["a"])
        self.assertEquals(a1["b"]["b"], b["b"]["b"])
        self.assertEquals(a1["b"]["c"], a["b"]["c"])
        self.assertEquals(a1["b"]["d"], b["b"]["d"])

        a2 = A.multi_load([a_path, b_path], merge=A.MS_DICTS_AND_LISTS)

        self.assertEquals(a2["name"],   a["name"])
        self.assertEquals(a2["a"],      b["a"])
        self.assertEquals(a2["b"]["b"], [0, 1, 2, 3, 4, 5])
        self.assertEquals(a2["b"]["c"], a["b"]["c"])
        self.assertEquals(a2["b"]["d"], b["b"]["d"])
Exemplo n.º 2
0
def main():
    p = option_parser()
    (options, args) = p.parse_args()

    RU.init_log(DEBUG if options.verbose else INFO)

    if not args:
        p.print_usage()
        sys.exit(1)

    # host_prof_specs = args[0]

    root = os.path.abspath(options.root)
    all_rpms = [x["name"] for x in RR.list_installed_rpms(root)]

    (excludes, removes) = make_excl_packages_list(options.ppaths,
                                                  options.gpaths)
    remove_candidates = RU.select_from_list(removes, all_rpms)

    if options.use_dnf:
        (excludes, xs) = RED.compute_removed(remove_candidates, root, excludes)
    else:
        xs = RR.compute_removed(remove_candidates, root, excludes=excludes)

    data = dict(removed=xs, excludes=excludes)

    if options.output:
        anyconfig.dump(dict(data=data, ), options.output, forced_type="yaml")
    else:
        res = anyconfig.dumps(dict(data=data, ), forced_type="yaml")
        print(res)
Exemplo n.º 3
0
    def test_20_dump_and_multi_load(self):
        obja = dict(name="a", a=1, b=dict(b=[0, 1], c="C"))
        objb = dict(a=2, b=dict(b=[1, 2, 3, 4, 5], d="D"))

        a_path = os.path.join(self.workdir, "a.json")
        b_path = os.path.join(self.workdir, "b.json")

        TT.dump(obja, a_path)
        self.assertTrue(os.path.exists(a_path))

        TT.dump(objb, b_path)
        self.assertTrue(os.path.exists(b_path))

        obja1 = TT.multi_load([a_path, b_path], ac_merge=TT.MS_DICTS)

        self.assertEqual(obja1["name"], obja["name"])
        self.assertEqual(obja1["a"], objb["a"])
        self.assertEqual(obja1["b"]["b"], objb["b"]["b"])
        self.assertEqual(obja1["b"]["c"], obja["b"]["c"])
        self.assertEqual(obja1["b"]["d"], objb["b"]["d"])

        obja2 = TT.multi_load([a_path, b_path], ac_merge=TT.MS_DICTS_AND_LISTS)

        self.assertEqual(obja2["name"], obja["name"])
        self.assertEqual(obja2["a"], objb["a"])
        self.assertEqual(obja2["b"]["b"], [0, 1, 2, 3, 4, 5])
        self.assertEqual(obja2["b"]["c"], obja["b"]["c"])
        self.assertEqual(obja2["b"]["d"], objb["b"]["d"])
Exemplo n.º 4
0
def main():
    requests.packages.urllib3.disable_warnings()
    defaultConfig = {
        'url': 'https://stream.watsonplatform.net/text-to-speech/api',
        'user': '******',
        'password': '******',
        'voice': 'en-US_AllisonVoice',
        'chunk': 2048
    }
    home = os.path.expanduser("~")
    defaultConfigFile = home + '/.config-tts-watson.yml'
    parser = argparse.ArgumentParser(
        description='Text to speech using watson')

    parser.add_argument('-f', action='store', dest='configFile', default=defaultConfigFile,
                        help='config file',
                        required=False)
    parser.add_argument('text_to_transform', action='store', nargs='+')
    args = parser.parse_args()
    conf = anyconfig.container(defaultConfig)
    if not os.path.isfile(args.configFile):
        print "Config file '" + args.configFile + "' doesn't exist."
        print "Creating it ..."
        user = raw_input("Watson user: "******"Watson password: "******" ".join(args.text_to_transform))
Exemplo n.º 5
0
def main():
    defaultConfig = {
        'url': 'https://gateway.watsonplatform.net/dialog/api',
        'user': '******',
        'password': '******',
    }
    home = os.path.expanduser("~")
    defaultConfigFile = home + '/.config-dialog-watson.yml'
    parser = argparse.ArgumentParser(
        description='Text to speech using watson')

    parser.add_argument('-f', action='store', dest='configFile', default=defaultConfigFile,
                        help='config file',
                        required=False)
    parser.add_argument('dialog_file', action='store', nargs=1)
    parser.add_argument('-n', '--name', dest='dialog_name', action='store', help='Dialog name', required=True)
    parser.add_argument('--clean', dest='clean', action='store_true')
    args = parser.parse_args()
    dialog_file = "".join(args.dialog_file)
    conf = anyconfig.container(defaultConfig)
    if not os.path.isfile(args.configFile):
        print "Config file '" + args.configFile + "' doesn't exist."
        print "Creating it ..."
        user = raw_input("Watson user: "******"Watson password: "******"".join(args.dialog_name), bconf.url,
                          os.path.dirname(dialog_file) + "/dialog_id_file.txt")
    if args.clean:
        watsonClient.clean_dialogs()

    resp = watsonClient.start_dialog()
    print ''
    print bcolors.WARNING + "Watson: " + bcolors.OKBLUE + "\n".join(resp.response) + bcolors.ENDC
    while True:
        userResponse = raw_input(bcolors.WARNING + "You: " + bcolors.OKGREEN)
        resp = watsonClient.converse(userResponse)
        print bcolors.WARNING + "Watson: " + bcolors.OKBLUE + "\n".join(resp.response) + bcolors.ENDC
        if userResponse == "bye":
            break
    print ""
    print "Your profile:"
    for name, value in watsonClient.get_profile().get_data().iteritems():
        print "\t" + name + ": " + value
    if __name__ == "__main__":
        main()
Exemplo n.º 6
0
def print_outputs(ps, format=None, output=None):
    if format:
        out = open(output, 'w') if output else sys.stdout
        for p in ps:
            print(format.format(**p), file=out)
    else:
        if output:
            anyconfig.dump(sorted(process_datetime_g(ps),
                                  key=operator.itemgetter("name")),
                           output)
        else:
            for p in ps:
                print(pprint.pformat(p), sys.stdout)
Exemplo n.º 7
0
 def fake_run_config(request, fake_root_dir):
     config_path = str(fake_root_dir / request.param)
     anyconfig.dump(
         {
             "run": {
                 "pipeline": "pipeline1",
                 "tag": ["tag1", "tag2"],
                 "node_names": ["node1", "node2"],
             }
         },
         config_path,
     )
     return config_path
Exemplo n.º 8
0
    def test_10_dump_and_load(self):
        obj = dict(name="a", a=1, b=dict(b=[1, 2], c="C"))
        obj_path = os.path.join(self.workdir, "a.json")

        TT.dump(obj, obj_path)
        self.assertTrue(os.path.exists(obj_path))

        obj1 = TT.load(obj_path)

        self.assertEqual(obj1["name"], obj["name"])
        self.assertEqual(obj1["a"], obj["a"])
        self.assertEqual(obj1["b"]["b"], obj["b"]["b"])
        self.assertEqual(obj1["b"]["c"], obj["b"]["c"])
Exemplo n.º 9
0
def gen_nni_config(common_nni_config_file: Union[str, Path], new_config_path: Union[str, Path], kedro_pipeline: str, project_ctx: KedroContext, optimize_mode: str = 'minimize', hp_tunner: str = 'TPE', early_stopping: Optional[str] = 'Medianstop', command_opts: Union[str, Sequence[str]] = '') -> bool:
    """ Generates NNI configuration file in order to run an NNI Hyperparameters Search on given pipeline (by default, new `kedro_pipeline`-specific NNI config file will be saved in the same directory as `common_nni_config_file` and will be named after its respective pipeline name, See `handlle_nni_nas_trial`).
    Fills missing NNI configuration fields from defaults/commons NNI YAML config (won't change any existing values from given NNI YAML configuration but appends missing parameters with some defaults).
    It means given NNI YAML config file should only contain parameters which are common to any NNI HP/NAS API usage in DeepCV and this function will populate other parameters according to a specific training pipeline.
    NOTE: `gen_nni_config` wont overwrite any existing NNI configuration named after the same pipeline (i.e., if '{kedro_pipeline}_nni_config.yml' already exists, this function wont do anything).
    .. See [NNI HP API documentation for more details on NNI YAML configuration file](https://nni.readthedocs.io/en/latest/hyperparameter_tune.html)
    """
    common_nni_config_file = Path(common_nni_config_file)

    if not common_nni_config_file.exists():
        msg = f'Error: Couldn\'t find provided NNI config defaults/template/commons at: "{common_nni_config_file}"'
        logging.error(msg)
        raise FileNotFoundError(msg)
    if new_config_path.exists():
        logging.warn(f'Warning: `deepcv.meta.nni_tools.gen_nni_config` called but YAML NNI config file already exists for this pipeline ("{kedro_pipeline}"), '
                     f'"{new_config_path.name}" YAML config wont be modified, you may want to delete it before if you need to update it.{NL}'
                     f'Also note that you can customize "{common_nni_config_file}" config if you need to change NNI common behavior for any NNI HP/NAS API usage in DeepCV (Hyperparameter searches and Neural Architecture Searches based on NNI); All NNI configuration are generated from this template/common/default YAML config. See also "deepcv.meta.nni_tools.gen_nni_config" function for more details about NNI config handling in DeepCV.')
        return False

    experiment, trial = get_nni_or_mlflow_experiment_and_trial()
    nni_config = anyconfig.load(common_nni_config_file, ac_parser='yaml')

    nni_config['authorName'] = getattr(nni_config, 'authorName', __author__)
    nni_config['experimentName'] = getattr(nni_config, 'experimentName', (experiment if experiment not in (None, '') else f'{project_ctx.project_name}_{kedro_pipeline}'.lower()))
    nni_config['searchSpacePath'] = getattr(nni_config, 'searchSpacePath', common_nni_config_file.parent / f'hp_search_spaces/{kedro_pipeline}_search_space.json')
    nni_config['trialConcurrency'] = getattr(nni_config, 'trialConcurrency', 1)
    nni_config['maxTrialNum'] = getattr(nni_config, , -1)
    nni_config['trainingServicePlatform'] = getattr(nni_config, 'trainingServicePlatform', 'local')

    trial_conf = nni_config['trial'] if 'trial' in nni_config else dict()
    trial_conf['command'] = getattr(trial_conf, 'command', f'kedro run --pipeline={kedro_pipeline} {command_opts if isinstance(command_opts, str) else " ".join(command_opts)}')
    trial_conf['codeDir'] = getattr(trial_conf, 'codeDir', common_nni_config_file / r'../../src/deepcv')
    trial_conf['gpuNum'] = getattr(trial_conf, 'gpuNum', 0)
    nni_config['trial'] = trial_conf

    tuner_conf = nni_config['tuner'] if 'tuner' in nni_config else dict()
    tuner_conf['builtinTunerName'] = getattr(tuner_conf, 'builtinTunerName', hp_tunner)
    tuner_conf['classArgs'] = getattr(tuner_conf, 'classArgs', {'optimize_mode': optimize_mode})
    nni_config['tuner'] = tuner_conf

    if early_stopping is not None:
        assesor_conf = nni_config['assessor'] if 'assessor' in nni_config else dict()
        assesor_conf['builtinAssessorName'] = getattr(assesor_conf, 'builtinAssessorName', early_stopping)
        assesor_conf['classArgs'] = getattr(assesor_conf, 'classArgs', {'optimize_mode': optimize_mode, 'start_step': 8})
        nni_config['assessor'] = assesor_conf

    # Save final NNI configuration as a new YAML file named after its respective training pipeline
    anyconfig.dump(nni_config, new_config_path, ac_parser='yaml')
    return True
Exemplo n.º 10
0
    def test_10_dump_and_load(self):
        a = dict(name="a", a=1, b=dict(b=[1, 2], c="C"))

        a_path = os.path.join(self.workdir, "a.json")

        A.dump(a, a_path)
        self.assertTrue(os.path.exists(a_path))

        a1 = A.load(a_path)

        # FIXME: Too verbose
        self.assertEquals(a1["name"],   a["name"])
        self.assertEquals(a1["a"],      a["a"])
        self.assertEquals(a1["b"]["b"], a["b"]["b"])
        self.assertEquals(a1["b"]["c"], a["b"]["c"])
Exemplo n.º 11
0
def write_config(config, write_path):
    if is_under_path(ASSETS_DIR, write_path):
        raise Exception(
            "Invariant: Should not be writing to populus assets path")

    with open(write_path, 'w') as config_file:
        anyconfig.dump(
            dict(config),
            config_file,
            sort_keys=True,
            indent=2,
            separators=(',', ': '),
        )

    return write_path
Exemplo n.º 12
0
    def test_10_dump_and_load(self):
        a = dict(name="a", a=1, b=dict(b=[1, 2], c="C"))

        a_path = os.path.join(self.workdir, "a.json")

        A.dump(a, a_path)
        self.assertTrue(os.path.exists(a_path))

        a1 = A.load(a_path)

        # FIXME: Too verbose
        self.assertEquals(a1["name"], a["name"])
        self.assertEquals(a1["a"], a["a"])
        self.assertEquals(a1["b"]["b"], a["b"]["b"])
        self.assertEquals(a1["b"]["c"], a["b"]["c"])
Exemplo n.º 13
0
    def save(self, obj, filename, savedir=None, **kwargs):
        """
        :param obj: Object to save
        :param filename: File base name to save
        :param savedir: Directory to save results
        :param kwargs: Extra keyword arguments passed to anyconfig.dump
        """
        if anyconfig.utils.is_iterable(obj):
            obj = dict(data=obj, )  # Top level data should be a dict.

        if savedir is None:
            savedir = self.workdir

        filepath = os.path.join(savedir, "%s.json" % filename)
        anyconfig.dump(obj, filepath, **kwargs)
Exemplo n.º 14
0
def _save_data_as_json(data, filepath, top_key="data"):
    """
    :param filepath: JSON file path
    :param data: Data to save, maybe a list or mapping object
    :para top_key:
        Top level mapping key to be used to save list data (valid JSON data is
        a mapping object)
    """
    if not hasattr(data, "keys"):
        data = {top_key: data}  # Necessary to make `data` as valid JSON data.

    if not os.path.exists(os.path.dirname(filepath)):
        os.makedirs(os.path.dirname(filepath))

    anyconfig.dump(data, filepath)
    LOG.debug("saved: %s", filepath)
Exemplo n.º 15
0
    def test_20_dump_and_multi_load(self):
        obj_diff = dict(a=2, b=dict(b=[1, 2, 3, 4, 5], d='D'))

        with tempfile.TemporaryDirectory() as tmpdir:
            a_path = pathlib.Path(tmpdir) / 'a.json'
            b_path = pathlib.Path(tmpdir) / 'b.json'

            TT.dump(self.obj, a_path)
            self.assertTrue(a_path.exists())

            TT.dump(obj_diff, b_path)
            self.assertTrue(b_path.exists())

            ref = copy.copy(self.obj)
            obj_1 = TT.multi_load([a_path, b_path], ac_merge=TT.MS_DICTS)
            TT.merge(ref, obj_diff, ac_merge=TT.MS_DICTS)
            self.assertEqual(obj_1, ref)
Exemplo n.º 16
0
async def main(args):
    config = _Schema.config(json.load(args.config))
    memory = _Schema.memory(json.load(args.memory))
    modules = list({
        model.__module__
        for model in (SubTrigger, IdentityGroup, IdentityLink, Note)
    })
    await Tortoise.init(db_url=args.database, modules={"db": modules})
    try:
        await Tortoise.generate_schemas(safe=True)
        data = Data(config, memory, args.database,
                    os.path.dirname(args.config.name))
        await data.migrate_all()
        config = await data.make_config()
        anyconfig.dump(config, args.output)
    finally:
        await Tortoise.close_connections()
Exemplo n.º 17
0
def dump_networks_from_config_files(config_files,
                                    output=None,
                                    max_prefix=NET_MAX_PREFIX):
    """
    Load network related data from parsed fortigate config files.

    :param config_files: A list of fortios' config files parsed
    :param output: Output file path
    :param max_prefix: Max prefix for networks
    """
    nodes_links = make_networks_from_config_files(config_files,
                                                  max_prefix=max_prefix)

    if output is None:
        output = os.path.join(os.path.dirname(config_files[0]), "output.yml")

    anyconfig.dump(nodes_links, output)
Exemplo n.º 18
0
def main():
    psr = option_parser()
    (options, args) = psr.parse_args()

    if options.verbose:
        LOG.setLevel(logging.DEBUG)

    if not args:
        psr.print_help()
        sys.exit(1)

    LOG.info("Loading changelogs from: %s", args[0])
    res = get_changelogs_from_otherxml(args[0])

    LOG.info("changelogs of %d RPMs were found" % len(res))
    data = dict(date=datetime.datetime.now().strftime("%Y-%m-%d"), data=res)
    out = sys.stdout if options.output is None else options.output
    anyconfig.dump(data, out, ac_parser="json")
Exemplo n.º 19
0
def main():
    psr = option_parser()
    (options, args) = psr.parse_args()

    if options.verbose:
        LOG.setLevel(logging.DEBUG)

    if not args:
        psr.print_help()
        sys.exit(1)

    LOG.info("Loading changelogs from: %s", args[0])
    res = get_changelogs_from_otherxml(args[0])

    LOG.info("changelogs of %d RPMs were found" % len(res))
    data = dict(date=datetime.datetime.now().strftime("%Y-%m-%d"), data=res)
    out = sys.stdout if options.output is None else options.output
    anyconfig.dump(data, out, ac_parser="json")
Exemplo n.º 20
0
def main():

    py3_input_conversion = lambda x: input(
        x) if sys.version_info.major >= 3 else raw_input(x)

    requests.packages.urllib3.disable_warnings()
    defaultConfig = {
        'url': 'https://stream.watsonplatform.net/text-to-speech/api',
        'user': '******',
        'password': '******',
        'voice': 'en-US_AllisonVoice',
        'chunk': 2048
    }
    home = os.path.expanduser("~")
    defaultConfigFile = home + '/.config-tts-watson.yml'
    parser = argparse.ArgumentParser(description='Text to speech using watson')

    parser.add_argument('-f',
                        action='store',
                        dest='configFile',
                        default=defaultConfigFile,
                        help='config file',
                        required=False)
    parser.add_argument('text_to_transform', action='store', nargs='+')
    args = parser.parse_args()
    conf = anyconfig.container(defaultConfig)
    if not os.path.isfile(args.configFile):
        print("Config file '" + args.configFile + "' doesn't exist.")
        print("Creating it ...")
        user = py3_input_conversion("Watson user: "******"Watson password: "******" ".join(args.text_to_transform))
Exemplo n.º 21
0
def parse_show_config_and_dump(inpath, outpath, cnames=CNF_NAMES):
    """
    Similiar to the above :func:`parse_show_config` but save results as JSON
    file (path: `outpath`).

    :param inpath:
        a str or :class:`pathlib.Path` object represents file path contains
        'show full-configuration` or any other 'show ...' outputs
    :param outpath: (JSON) file path to save parsed results

    :return: A mapping object contains parsed results
    :raises: IOError, OSError
    """
    data = parse_show_config(inpath)  # {"configs": [...]}

    utils.ensure_dir_exists(outpath)
    anyconfig.dump(data, outpath)

    cnfs = list_configs_from_config_data_0(data, filepath=inpath)
    vdoms = list_vdom_names(cnfs)

    fwcnfs = list_configs_from_configs_data(cnfs)
    try:
        hostname = hostname_from_configs(fwcnfs)
    except ValueError as exc:
        LOG.warning("%r: %s\nCould not resovle hostname", exc, inpath)
        hostname = "unknown-{!s}".format(checksum(inpath))

    if hostname:  # It should have this in most cases.
        outdir = os.path.join(os.path.dirname(outpath), hostname)

        anyconfig.dump(
            dict(timestamp=timestamp(),
                 hostname=hostname,
                 vdoms=vdoms,
                 origina_data=inpath), os.path.join(outdir, METADATA_FILENAME))
        anyconfig.dump(data, os.path.join(outdir, ALL_FILENAME))

        for name in cnames:
            xcnfs = configs_by_name(fwcnfs, name)

            for xcnf in xcnfs:
                fname = config_filename(xcnf["config"])
                opath = os.path.join(outdir, fname)
                odata = xcnf.get("edits", xcnf)  # only dump edits if avail.

                anyconfig.dump(odata, opath)

    return data
Exemplo n.º 22
0
def gen_vnet_files(cf, tmpldirs, workdir, force):
    """
    Generate libvirt network def. XML files.

    :param cf: An instance of miniascape.config.ConfFiles
    :param tmpldirs: Template search paths
    :param workdir: Working dir to save generated XML files
    :param force: Existing ones may be overwritten if True
    """
    nets = cf.load_nets_confs()
    outdir = _netoutdir(workdir)
    tmpl = _find_template(tmpldirs, _netxml_path())
    tpaths = [os.path.dirname(tmpl)]

    logging.debug("Network XML: tpaths={}, tmpl={}".format(tpaths, tmpl))

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    for name in nets:
        netconf = os.path.join(outdir, "{}.yml".format(name))
        if os.path.exists(netconf) and not force:
            logging.warn("Net conf already exists: " + netconf)
            return

        logging.debug("Dump conf for the net: " + name)
        anyconfig.dump(nets[name], netconf)

        netxml = os.path.join(outdir, "{}.xml".format(name))
        if os.path.exists(netxml) and not force:
            logging.warn("Net xml already exists: " + netxml)
            return

        nc = anyconfig.load(netconf, ac_template=True)
        nc["hosts"] = hosts_w_unique_ips(nc)
        nc["hosts_w_unique_macs"] = hosts_w_unique_macs(nc)

        logging.debug("Generating network xml: " + netxml)
        miniascape.template.render_to(tmpl, nc, netxml, tpaths)
Exemplo n.º 23
0
def gen_vnet_files(cf, tmpldirs, workdir, force):
    """
    Generate libvirt network def. XML files.

    :param cf: An instance of miniascape.config.ConfFiles
    :param tmpldirs: Template search paths
    :param workdir: Working dir to save generated XML files
    :param force: Existing ones may be overwritten if True
    """
    nets = cf.load_nets_confs()
    outdir = _netoutdir(workdir)
    tmpl = _find_template(tmpldirs, _netxml_path())
    tpaths = [os.path.dirname(tmpl)]

    logging.debug("Network XML: tpaths={}, tmpl={}".format(tpaths, tmpl))

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    for name in nets:
        netconf = os.path.join(outdir, "{}.yml".format(name))
        if os.path.exists(netconf) and not force:
            logging.warn("Net conf already exists: " + netconf)
            return

        logging.debug("Dump conf for the net: " + name)
        anyconfig.dump(nets[name], netconf)

        netxml = os.path.join(outdir, "{}.xml".format(name))
        if os.path.exists(netxml) and not force:
            logging.warn("Net xml already exists: " + netxml)
            return

        nc = anyconfig.load(netconf, ac_template=True)
        nc["hosts"] = hosts_w_unique_ips(nc)
        nc["hosts_w_unique_macs"] = hosts_w_unique_macs(nc)

        logging.debug("Generating network xml: " + netxml)
        miniascape.template.render_to(tmpl, nc, netxml, tpaths)
Exemplo n.º 24
0
def test_load(tmp_path):
    assert _CNF_FILES

    for in_path in _CNF_FILES:
        exp_path = in_path.replace('.txt', '.json')
        out_path = tmp_path / 'out.json'

        if not out_path.exists():
            continue  # The reference test data is not ready.

        try:
            psr = TT.Parser()
            cnf = psr.load(anyconfig.ioinfo.make(in_path), ac_ordered=True)
            assert cnf
            assert isinstance(cnf, F.DEF_DICT)

            anyconfig.dump(cnf, out_path)
            ocnf = anyconfig.load(out_path, ac_ordered=False)
            exp = anyconfig.load(exp_path, ac_ordered=False)
            assert ocnf == exp, f'{ocnf!r} vs. {exp!r}'

        except AssertionError as exc:
            raise AssertionError(f'file: {in_path}, exc={exc!s}')
Exemplo n.º 25
0
def output_results(errata, packages, updates, distro, workdir):
    """
    """
    metadata = dict(generator="rpmkit.extras.listerrata_for_releases",
                    version="0.1",
                    last_updated=_TODAY,
                    channels=', '.join(distro["channels"]),
                    nerrata=len(errata),
                    npackages=len(packages),
                    nupdates=len(updates))
    metadata.update(distro)

    anyconfig.dump(dict(metadata=metadata, data=errata),
                   os.path.join(workdir, "errata.json"))
    anyconfig.dump(dict(metadata=metadata, data=packages),
                   os.path.join(workdir, "packages.json"))
    anyconfig.dump(dict(metadata=metadata, data=updates),
                   os.path.join(workdir, "updates.json"))

    with open(os.path.join(workdir, "updates.txt"), 'w') as f:
        for u in updates:
            f.write(u.get("path", u.get("filename", "N/A")) + '\n')

    with open(os.path.join(workdir, "errata.csv"), 'w') as f:
        f.write("advisory,synopsis,issue_date,url\n")
        for e in errata:
            adv = e.get("advisory", e.get("id"))
            adv_s = adv.replace(':', '-')
            url = "https://rhn.redhat.com/errata/{}.html".format(adv_s)

            f.write("{},{},{},{}\n".format(
                adv, e.get("synopsis", e.get("title")),
                e.get("issue_date", e.get("issued")), url))

    fn = os.path.join(workdir, "geniso.sh")
    with open(fn, 'w') as f:
        f.write(gen_mkiso_script(distro, metadata))
    os.chmod(fn, 0o755)
Exemplo n.º 26
0
def output_results(errata, packages, updates, distro, workdir):
    """
    """
    metadata = dict(generator="rpmkit.extras.listerrata_for_releases",
                    version="0.1", last_updated=_TODAY,
                    channels=', '.join(distro["channels"]),
                    nerrata=len(errata), npackages=len(packages),
                    nupdates=len(updates))
    metadata.update(distro)

    anyconfig.dump(dict(metadata=metadata, data=errata),
                   os.path.join(workdir, "errata.json"))
    anyconfig.dump(dict(metadata=metadata, data=packages),
                   os.path.join(workdir, "packages.json"))
    anyconfig.dump(dict(metadata=metadata, data=updates),
                   os.path.join(workdir, "updates.json"))

    with open(os.path.join(workdir, "updates.txt"), 'w') as f:
        for u in updates:
            f.write(u.get("path", u.get("filename", "N/A")) + '\n')

    with open(os.path.join(workdir, "errata.csv"), 'w') as f:
        f.write("advisory,synopsis,issue_date,url\n")
        for e in errata:
            adv = e.get("advisory", e.get("id"))
            adv_s = adv.replace(':', '-')
            url = "https://rhn.redhat.com/errata/{}.html".format(adv_s)

            f.write("{},{},{},{}\n".format(adv,
                                           e.get("synopsis", e.get("title")),
                                           e.get("issue_date",
                                                 e.get("issued")), url))

    fn = os.path.join(workdir, "geniso.sh")
    with open(fn, 'w') as f:
        f.write(gen_mkiso_script(distro, metadata))
    os.chmod(fn, 0o755)
Exemplo n.º 27
0
    def __init__(self, config, model, criterion, train_loader, validate_loader, metric_cls, post_process=None):
        config['trainer']['output_dir'] = os.path.join(str(pathlib.Path(os.path.abspath(__name__)).parent.parent),
                                                       config['trainer']['output_dir'])
        self.save_dir = config['trainer']['output_dir']
        self.checkpoint_dir = os.path.join(self.save_dir, 'checkpoint')

        if not os.path.exists(self.checkpoint_dir):
            os.makedirs(self.checkpoint_dir)

        self.global_step = 0
        self.start_epoch = 0
        self.config = config
        self.model = model
        self.criterion = criterion

        # logger
        self.epochs = self.config['trainer']['epochs']
        self.log_iter = self.config['trainer']['log_iter']

        anyconfig.dump(config, os.path.join(self.save_dir, 'config.yaml'))
        self.logger = setup_logger(os.path.join(self.save_dir, 'train.log'))
        self.logger_info(pformat(self.config))

        # device
        if self.config['trainer']['CUDA_VISIBLE_DEVICES'] is not None:
            os.environ.setdefault("CUDA_VISIBLE_DEVICES", str(self.config['trainer']['CUDA_VISIBLE_DEVICES']))
            if torch.cuda.is_available():
                self.with_cuda = True
                torch.backends.cudnn.benchmark = True
                self.device = torch.device('cuda')
                if torch.cuda.device_count() > 1:
                    self.is_distributed = True
                    torch.cuda.manual_seed_all(self.config['trainer']['seed'])
                    self.model = torch.nn.DataParallel(self.model)
                else:
                    self.is_distributed = False
                    torch.cuda.manual_seed(self.config['trainer']['seed'])
            else:
                self.is_distributed = False
                self.with_cuda = False
                self.device = torch.device("cpu")
                torch.manual_seed(self.config['trainer']['seed'])
        else:
            self.is_distributed = False
            self.with_cuda = False
            self.device = torch.device("cpu")
            torch.manual_seed(self.config['trainer']['seed'])
        self.logger_info('train with device {} {} and pytorch {}'.format(self.device,
                                                                         'distributed' if self.is_distributed is not None and self.is_distributed else 'single',
                                                                         torch.__version__))
        self.model.to(self.device)

        # metrics and optimizer
        self.metrics = {'recall': 0, 'precision': 0, 'hmean': 0, 'train_loss': float('inf'), 'best_model_epoch': 0}
        self.optimizer = self._initialize('optimizer', torch.optim, model.parameters())

        # checkpoint
        if self.config['trainer']['resume_checkpoint'] != '':
            self._load_checkpoint(self.config['trainer']['resume_checkpoint'], False)
            self.net_save_path_best = ''
        else:
            net_save_path_latest = os.path.join(self.checkpoint_dir, "model_latest.pth")
            if os.path.isfile(net_save_path_latest):
                self._load_checkpoint(net_save_path_latest, False)

            self.net_save_path_best = os.path.join(self.checkpoint_dir, "model_best*.pth")
            if glob.glob(self.net_save_path_best):
                self.net_save_path_best = glob.glob(self.net_save_path_best)[0]
                self._load_checkpoint(self.net_save_path_best, True)
            else:
                self.net_save_path_best = ''

        # normalize
        self.UN_Normalize = False
        for t in self.config['dataset']['train']['dataset']['args']['transforms']:
            if t['type'] == 'Normalize':
                self.normalize_mean = t['args']['mean']
                self.normalize_std = t['args']['std']
                self.UN_Normalize = True

        self.show_images_iter = self.config['trainer']['show_images_iter']
        self.train_loader = train_loader
        if validate_loader is not None:
            assert post_process is not None and metric_cls is not None
        self.validate_loader = validate_loader
        self.post_process = post_process
        self.metric_cls = metric_cls
        self.train_loader_len = len(train_loader)

        # lr_scheduler
        warmup_iters = config['lr_scheduler']['args']['warmup_epoch'] * self.train_loader_len
        if self.start_epoch > 1:
            self.config['lr_scheduler']['args']['last_epoch'] = (self.start_epoch - 1) * self.train_loader_len
        self.scheduler = WarmupPolyLR(self.optimizer, max_iters=self.epochs * self.train_loader_len,
                                      warmup_iters=warmup_iters, **config['lr_scheduler']['args'])

        self.logger_info(
            'train dataset has {} samples,{} in dataloader, validate dataset has {} samples,{} in dataloader'.format(
                len(self.train_loader.dataset), self.train_loader_len, len(self.validate_loader.dataset),
                len(self.validate_loader)))

        self.epoch_result = {'train_loss': 0, 'lr': 0, 'time': 0, 'epoch': 0}
Exemplo n.º 28
0
def main(config_path: str, dry: bool = False):
    config = load_config(config_path)
    base_config = ({} if config['base_config'] is None else anyconfig.load(
        config['base_config']))

    # if needed prepare environment
    if not dry:
        if os.path.exists(config['working_dir']):
            print(f'Error: "{config["working_dir"]}" does already exist.')
            sys.exit(-1)

        os.makedirs(config['working_dir'])

        exec_dir = os.getcwd()
        os.chdir(config['working_dir'])

    # setup and run schedule
    special_extra_keys = ['repetitions']  # these get handled individually

    config_schedule = []
    for entry in config['config_parameters']:
        tmp = []

        # handle possible pairing
        if 'paired' in entry:
            # sanity checks
            for e in entry['paired']:
                if len(e['values']) != len(entry['values']):
                    raise RuntimeError(
                        f'Invalid pairing for "{entry["key"]}" & "{e["key"]}"')

            # generate associations
            paired_data = []
            for i in range(len(entry['values'])):
                case_values = [(c['key'], c['values'][i])
                               for c in entry['paired']]

                paired_data.append([{
                    'key': key,
                    'value': val
                } for key, val in case_values])
        else:
            paired_data = [None] * len(entry['values'])

        # add to schedule
        for val, pair in zip(entry['values'], paired_data):
            tmp.append(('config', entry['key'], val, pair))
        config_schedule.append(tmp)

    extra_schedule = [[('extra', k, v, None) for v in vs]
                      for k, vs in config['extra_parameters'].items()
                      if k not in special_extra_keys]
    schedule = config_schedule + extra_schedule

    for spec in tqdm(itertools.product(*schedule),
                     total=functools.reduce(operator.mul,
                                            [len(s) for s in schedule], 1),
                     desc='Setting up environments'):
        # create custom config
        cur_conf = copy.deepcopy(base_config)

        for t, k, v, p in spec:
            if t != 'config':
                continue

            # set main parameter
            assign_to_dict(cur_conf, k, v)

            # set potential paired parameters
            if p is not None:
                for entry in p:
                    k, v = entry['key'], entry['value']
                    assign_to_dict(cur_conf, k, v)

        # assert subset relation (so only valid keys are used)
        if config['base_config'] is not None:
            cur_keys = dict_to_keyset(cur_conf)
            base_keys = dict_to_keyset(base_config)
            if cur_keys != base_keys:
                msg = 'Generated config is invalid.\n'

                only_cur = cur_keys - base_keys
                msg += 'Only in generated config:\n'
                for k in only_cur:
                    msg += f' > {k}\n'

                raise RuntimeError(msg)

        # make spec sortable
        c = lambda x: (NESTED_PARAMETER_SEPARATOR.join(x)
                       if isinstance(x, list) else x)  # noqa: E731
        c2 = lambda x: str(x).replace('/', '_')  # noqa: E731

        spec = [(t, c(k), v, p) for t, k, v, p in spec]

        # extract extra info
        extra_info = {k: v for t, k, v, p in sorted(spec) if t == 'extra'}
        repetition_count = config['extra_parameters']['repetitions']

        for rep in range(repetition_count):
            # assemble index
            idx = PARAMETER_SEPARATOR.join([
                f'{k}{PARAMETER_ASSIGNMENT}{c2(v)}'
                for t, k, v, p in sorted(spec)
            ])
            rep_app = (
                f'{PARAMETER_SEPARATOR}repetition{PARAMETER_ASSIGNMENT}{rep+1}'
                if repetition_count > 1 else '')
            target_dir = f'run{SECTION_SEPARATOR}{idx}{rep_app}'

            # abort if in dry run
            if dry:
                print(target_dir)
                pprint(cur_conf)
                pprint(extra_info)
                print()
                continue

            # setup environment
            if os.path.isdir(os.path.join(exec_dir, config['project_source'])):
                shutil.copytree(
                    os.path.join(exec_dir, config['project_source']),
                    target_dir)
            else:
                sh.git.clone(config['project_source'], target_dir)

            if 'git_branch' in extra_info:
                sh.git.checkout(extra_info['git_branch'], _cwd=target_dir)

            if config['base_config'] is not None:
                conf_name = os.path.basename(config['base_config'])
            else:
                conf_name = TEMP_CONFIG_NAME
            anyconfig.dump(cur_conf, f'{target_dir}/{conf_name}')

            for sym in config['symlinks']:
                sym_path = os.path.relpath(os.path.join(
                    exec_dir, os.path.dirname(config_path), sym),
                                           start=target_dir)
                if not os.path.exists(os.path.join(target_dir, sym_path)):
                    print(f'Cannot find "{sym_path}"')

                sym_base = os.path.basename(os.path.normpath(sym))

                os.symlink(sym_path,
                           os.path.join(target_dir, sym_base),
                           target_is_directory=os.path.isdir(sym))
Exemplo n.º 29
0
def gen_site_conf_files(conf, tmpldirs, workdir):
    """
    Generate site specific config files for host, networks and guests from a
    config dict.

        conf :: dict -> .../{common,host,networks.d,guests}/**/*.yml

    :param conf: Object holding config parameters
    :param tmpldirs: Template path list
    :param workdir: Working top dir, e.g. miniascape-workdir-201303121

    :return: Configuration topdir where generated config files under
    """
    outdir = os.path.join(workdir, conf.get("site", G.M_SITE_DEFAULT))
    if not os.path.exists(outdir):
        os.makedirs(outdir)

    logging.info("Generating site config: %s", outdir)
    baseyml = "00_base.yml"  # Config file loaded first.
    common_conf = conf.get("common", {})
    common_conf["site"] = conf.get("site", G.M_SITE_DEFAULT)

    for (cnf, subdir) in ((common_conf, "common"),
                          (conf.get("host", {}), "host.d")):
        anyconfig.dump(cnf, os.path.join(outdir, subdir, baseyml))

    # ex. /usr/share/miniascape/templates/config/
    tpaths = [os.path.join(d, "config") for d in tmpldirs]
    logging.debug("Template paths for site confs: %r", tpaths)
    for net in conf.get("networks", []):
        noutdir = os.path.join(outdir, "networks.d", net["name"])
        miniascape.template.render_to("network.j2", context=net,
                                      output=os.path.join(noutdir, baseyml),
                                      tpaths=tpaths)

    guests_key = "guests"
    for ggroup in conf.get("guests", []):
        ggoutdir = os.path.join(outdir, "guests.d", ggroup["name"])
        if not os.path.exists(ggoutdir):
            os.makedirs(ggoutdir)

        ggroup_conf = dict()
        for k, v in ggroup.iteritems():
            if k != guests_key:
                ggroup_conf[k] = v

        anyconfig.dump(ggroup_conf, os.path.join(ggoutdir, baseyml))

        for guest in ggroup["guests"]:
            for k in ("name", "hostname", "fqdn"):
                name = guest.get(k, None)
                if name is not None:
                    break
            else:
                raise NoNameGuestError("Guest must have a name or hostname or "
                                       "fqdn: guest=" + str(guest))

            goutdir = os.path.join(ggoutdir, name)
            if not os.path.exists(goutdir):
                os.makedirs(goutdir)

            anyconfig.dump(guest, os.path.join(goutdir, baseyml))

    return outdir
Exemplo n.º 30
0
def mountConfig(config: dict, path: str = None):
    tmp_file = tempfile.NamedTemporaryFile()
    anyconfig.dump(config, tmp_file.name, ac_parser="yaml")

    return tmp_file.name
Exemplo n.º 31
0
def _save_file(data, path):
    if anyconfig:
        anyconfig.dump(data, path)
    else:
        with open(path, "w") as writer:
            json.dump(data, writer)
Exemplo n.º 32
0
def gen_site_conf_files(conf, tmpldirs, workdir):
    """
    Generate site specific config files for host, networks and guests from a
    config dict.

        conf :: dict -> .../{common,host,networks.d,guests}/**/*.yml

    :param conf: Object holding config parameters
    :param tmpldirs: Template path list
    :param workdir: Working top dir, e.g. miniascape-workdir-201303121

    :return: Configuration topdir where generated config files under
    """
    outdir = os.path.join(workdir, conf.get("site", G.M_SITE_DEFAULT))
    if not os.path.exists(outdir):
        os.makedirs(outdir)

    logging.info("Generating site config: %s", outdir)
    baseyml = "00_base.yml"  # Config file loaded first.
    common_conf = conf.get("common", {})
    common_conf["site"] = conf.get("site", G.M_SITE_DEFAULT)

    for (cnf, subdir) in ((common_conf, "common"), (conf.get("host",
                                                             {}), "host.d")):
        anyconfig.dump(cnf, os.path.join(outdir, subdir, baseyml))

    # ex. /usr/share/miniascape/templates/config/
    tpaths = [os.path.join(d, "config") for d in tmpldirs]
    logging.debug("Template paths for site confs: %r", tpaths)
    for net in conf.get("networks", []):
        noutdir = os.path.join(outdir, "networks.d", net["name"])
        miniascape.template.render_to("network.j2",
                                      context=net,
                                      output=os.path.join(noutdir, baseyml),
                                      tpaths=tpaths)

    guests_key = "guests"
    for ggroup in conf.get("guests", []):
        ggoutdir = os.path.join(outdir, "guests.d", ggroup["name"])
        if not os.path.exists(ggoutdir):
            os.makedirs(ggoutdir)

        ggroup_conf = dict()
        for k, v in ggroup.iteritems():
            if k != guests_key:
                ggroup_conf[k] = v

        anyconfig.dump(ggroup_conf, os.path.join(ggoutdir, baseyml))

        for guest in ggroup["guests"]:
            for k in ("name", "hostname", "fqdn"):
                name = guest.get(k, None)
                if name is not None:
                    break
            else:
                raise NoNameGuestError("Guest must have a name or hostname or "
                                       "fqdn: guest=" + str(guest))

            goutdir = os.path.join(ggoutdir, name)
            if not os.path.exists(goutdir):
                os.makedirs(goutdir)

            anyconfig.dump(guest, os.path.join(goutdir, baseyml))

    return outdir
Exemplo n.º 33
0
    def __init__(self,
                 config,
                 model,
                 criterion,
                 metric_cls,
                 train_loader,
                 validate_loader,
                 post_process=None):
        config['trainer']['output_dir'] = os.path.join(
            str(pathlib.Path(os.path.abspath(__name__)).parent),
            config['trainer']['output_dir'])
        config['name'] = config['name'] + '_' + model.name
        self.save_dir = os.path.join(config['trainer']['output_dir'],
                                     config['name'])
        self.checkpoint_dir = os.path.join(self.save_dir, 'checkpoint')

        if config['trainer']['resume_checkpoint'] == '' and config['trainer'][
                'finetune_checkpoint'] == '':
            shutil.rmtree(self.save_dir, ignore_errors=True)
        if not os.path.exists(self.checkpoint_dir):
            os.makedirs(self.checkpoint_dir)

        self.global_step = 0
        self.start_epoch = 0
        self.config = config
        self.model = model
        self.criterion = criterion
        self.metric_cls = metric_cls
        # logger and tensorboard
        self.epochs = self.config['trainer']['epochs']
        self.log_iter = self.config['trainer']['log_iter']
        self.tensorboard_enable = self.config['trainer']['tensorboard']
        if config['local_rank'] == 0:
            anyconfig.dump(config, os.path.join(self.save_dir, 'config.yaml'))
            self.logger = setup_logger(os.path.join(self.save_dir,
                                                    'train.log'))
            self.logger_info(pformat(self.config))

        # device
        torch.manual_seed(self.config['trainer']['seed'])  # 为CPU设置随机种子
        if torch.cuda.device_count() > 0 and torch.cuda.is_available():
            self.with_cuda = True
            torch.backends.cudnn.benchmark = True
            self.device = torch.device("cuda")
            torch.cuda.manual_seed(
                self.config['trainer']['seed'])  # 为当前GPU设置随机种子
            torch.cuda.manual_seed_all(
                self.config['trainer']['seed'])  # 为所有GPU设置随机种子
        else:
            self.with_cuda = False
            self.device = torch.device("cpu")
        self.logger_info('train with device {} and pytorch {}'.format(
            self.device, torch.__version__))

        self.optimizer = self._initialize('optimizer', torch.optim,
                                          model.parameters())

        # resume or finetune
        if self.config['trainer']['resume_checkpoint'] != '':
            self._load_checkpoint(self.config['trainer']['resume_checkpoint'],
                                  resume=True)
        elif self.config['trainer']['finetune_checkpoint'] != '':
            self._load_checkpoint(
                self.config['trainer']['finetune_checkpoint'], resume=False)

        if self.config['lr_scheduler']['type'] != 'WarmupPolyLR':
            self.scheduler = self._initialize('lr_scheduler',
                                              torch.optim.lr_scheduler,
                                              self.optimizer)
        self.metrics = {
            'recall': 0,
            'precision': 0,
            'hmean': 0,
            'train_loss': float('inf'),
            'best_model_epoch': 0
        }
        self.model.to(self.device)

        # 分布式训练
        if torch.cuda.device_count() > 1:
            local_rank = config['local_rank']
            self.model = torch.nn.parallel.DistributedDataParallel(
                self.model,
                device_ids=[local_rank],
                output_device=local_rank,
                broadcast_buffers=False,
                find_unused_parameters=True)

        self.show_images_iter = self.config['trainer']['show_images_iter']
        self.train_loader = train_loader
        if validate_loader is not None:
            assert post_process is not None
        self.validate_loader = validate_loader
        self.post_process = post_process
        self.train_loader_len = len(train_loader)
        if self.config['lr_scheduler']['type'] == 'WarmupPolyLR':
            warmup_iters = config['lr_scheduler']['args'][
                'warmup_epoch'] * self.train_loader_len
            if self.start_epoch > 1:
                self.config['lr_scheduler']['args']['last_epoch'] = (
                    self.start_epoch - 1) * self.train_loader_len
            self.scheduler = WarmupPolyLR(self.optimizer,
                                          max_iters=self.epochs *
                                          self.train_loader_len,
                                          warmup_iters=warmup_iters,
                                          **config['lr_scheduler']['args'])
        if self.validate_loader is not None:
            self.logger_info(
                'train dataset has {} samples,{} in dataloader, validate dataset has {} samples,{} in dataloader'
                .format(len(self.train_loader.dataset), self.train_loader_len,
                        len(self.validate_loader.dataset),
                        len(self.validate_loader)))
        else:
            self.logger_info(
                'train dataset has {} samples,{} in dataloader'.format(
                    len(self.train_loader.dataset), self.train_loader_len))

        if self.tensorboard_enable and config['local_rank'] == 0:
            from torch.utils.tensorboard import SummaryWriter
            self.writer = SummaryWriter(self.save_dir)
            try:
                dummy_input = torch.zeros(1, 3, 640, 640).to(self.device)
                self.writer.add_graph(self.model, dummy_input)
                torch.cuda.empty_cache()
            except:
                import traceback
                self.logger.error(traceback.format_exc())
                self.logger.warn('add graph to tensorboard failed')
Exemplo n.º 34
0
 def persist(self):
     anyconfig.dump({'pyutrack': self.__config}, self.__path, 'ini')
Exemplo n.º 35
0
    def __init__(self, config, model, criterion):
        config['trainer']['output_dir'] = os.path.join(
            str(pathlib.Path(os.path.abspath(__name__)).parent),
            config['trainer']['output_dir'])
        config['name'] = config['name'] + '_' + model.name
        start_time = time.strftime("%Y%m%d_%H%M%S", time.localtime())
        self.save_dir = os.path.join(config['trainer']['output_dir'],
                                     config['name']) + '_' + start_time
        self.checkpoint_dir = os.path.join(self.save_dir, 'checkpoint')
        if config['trainer']['resume_checkpoint'] == '' and config['trainer'][
                'finetune_checkpoint'] == '':
            shutil.rmtree(self.save_dir,
                          ignore_errors=True)  # 表示递归删除文件夹下的所有子文件夹和子文件。
        if not os.path.exists(self.checkpoint_dir):
            os.makedirs(self.checkpoint_dir)
        self.distributed = config['distributed']
        self.global_step = 0
        self.start_epoch = 0
        self.config = config
        self.model = model
        self.criterion = criterion
        # logger and tensorboard
        self.tensorboard_enable = self.config['trainer'][
            'tensorboard']  # 是否开启tensorbord
        self.epochs = self.config['trainer']['epochs']
        self.log_iter = self.config['trainer']['log_iter']

        anyconfig.dump(config, os.path.join(self.save_dir,
                                            'config.yaml'))  # ?? 作用未知
        self.logger = setup_logger(os.path.join(self.save_dir,
                                                'train.log'))  # 新建一个logger对象
        self.logger_info(pformat(self.config))  # 格式化输出

        # device
        torch.manual_seed(self.config['trainer']['seed'])  # 为CPU设置随机种子
        if torch.cuda.device_count() > 0 and torch.cuda.is_available():
            self.with_cuda = True
            torch.backends.cudnn.benchmark = True
            torch.backends.cudnn.deterministic = True  # ,每次返回的卷积算法将是确定的,即默认算法。
            self.device = torch.device("cuda")
            torch.cuda.manual_seed(
                self.config['trainer']['seed'])  # 为当前GPU设置随机种子
            torch.cuda.manual_seed_all(
                self.config['trainer']['seed'])  # 为所有GPU设置随机种子
        else:
            self.with_cuda = False
            self.device = torch.device("cpu")

        self.logger_info('train with device {} and pytorch {}'.format(
            self.device, torch.__version__))
        # metrics  暂时不写
        self.metrics = {
            'Mean Acc': 0,
            'MeanIoU': 0,
            'train_loss': float('inf'),
            'best_model_epoch': 0
        }

        self.optimizer = self._initialize('optimizer', torch.optim,
                                          model.parameters())
        # SGD   torch.optim.

        # resume or finetune
        if self.config['trainer']['resume_checkpoint'] != '':
            self._load_checkpoint(self.config['trainer']['resume_checkpoint'],
                                  resume=True)
        elif self.config['trainer']['finetune_checkpoint'] != '':
            self._load_checkpoint(
                self.config['trainer']['finetune_checkpoint'], resume=False)

        if self.config['lr_scheduler']['type'] != 'WarmupPolyLR':
            self.scheduler = self._initialize('lr_scheduler',
                                              torch.optim.lr_scheduler,
                                              self.optimizer)

        self.model.to(self.device)
        # 使用 tensorboard 绘制曲线
        if self.tensorboard_enable and config['local_rank'] == 0:
            from torch.utils.tensorboard import SummaryWriter

            self.writer = SummaryWriter(self.save_dir)
            try:
                # add graph
                dummy_input = torch.zeros(1, 3, 640, 640).to(self.device)
                self.writer.add_graph(self.model, dummy_input)
                torch.cuda.empty_cache()
            except:
                import traceback
                self.logger.error(traceback.format_exc())
                self.logger.warn('add graph to tensorboard failed')

        # 分布式训练
        if self.distributed:
            local_rank = config['local_rank']
            self.world_size = int(os.environ['WORLD_SIZE'])
            self.model = torch.nn.parallel.DistributedDataParallel(
                self.model,
                device_ids=[local_rank],
                output_device=local_rank,
                broadcast_buffers=False,
                find_unused_parameters=True)
        else:
            self.model = torch.nn.DataParallel(model)
        #  make inverse Normalize 使反向规格化   ??? 不懂啥意思
        self.UN_Normalize = False
        for t in self.config['dataset']['train']['dataset']['args'][
                'transforms']:
            if t['type'] == 'Normalize':
                self.normalize_mean = t['args']['mean']
                self.normalize_std = t['args']['std']
                self.UN_Normalize = True
Exemplo n.º 36
0
def main():
    defaultConfig = {
        'url': 'https://gateway.watsonplatform.net/dialog/api',
        'user': '******',
        'password': '******',
    }
    home = os.path.expanduser("~")
    defaultConfigFile = home + '/.config-dialog-watson.yml'
    parser = argparse.ArgumentParser(description='Text to speech using watson')

    parser.add_argument('-f',
                        action='store',
                        dest='configFile',
                        default=defaultConfigFile,
                        help='config file',
                        required=False)
    parser.add_argument('dialog_file', action='store', nargs=1)
    parser.add_argument('-n',
                        '--name',
                        dest='dialog_name',
                        action='store',
                        help='Dialog name',
                        required=True)
    parser.add_argument('--clean', dest='clean', action='store_true')
    args = parser.parse_args()
    dialog_file = "".join(args.dialog_file)
    conf = anyconfig.container(defaultConfig)
    if not os.path.isfile(args.configFile):
        print "Config file '" + args.configFile + "' doesn't exist."
        print "Creating it ..."
        user = raw_input("Watson user: "******"Watson password: "******"".join(args.dialog_name), bconf.url,
                          os.path.dirname(dialog_file) + "/dialog_id_file.txt")
    if args.clean:
        watsonClient.clean_dialogs()

    resp = watsonClient.start_dialog()
    print ''
    print bcolors.WARNING + "Watson: " + bcolors.OKBLUE + "\n".join(
        resp.response) + bcolors.ENDC
    while True:
        userResponse = raw_input(bcolors.WARNING + "You: " + bcolors.OKGREEN)
        resp = watsonClient.converse(userResponse)
        print bcolors.WARNING + "Watson: " + bcolors.OKBLUE + "\n".join(
            resp.response) + bcolors.ENDC
        if userResponse == "bye":
            break
    print ""
    print "Your profile:"
    for name, value in watsonClient.get_profile().get_data().iteritems():
        print "\t" + name + ": " + value
    if __name__ == "__main__":
        main()
def _create_kedro_config(project_path, payload, yml=True):
    kedro_conf = project_path / ".kedro.yml" if yml else project_path / "pyproject.toml"
    with kedro_conf.open("w") as fd:
        anyconfig.dump(payload, fd)
Exemplo n.º 38
0
def parse_show_config_and_dump(inpath, outdir=None, cnames=CNF_NAMES):
    """
    :param inpath:
        a str or :class:`pathlib.Path` object represents file path contains
        'show full-configuration` or any other 'show ...' outputs
    :param outdir: Dir to save parsed results as JSON files

    :return:
        ((all) output filepath, a mapping object contains parsed result)

    :raises: IOError, OSError
    """
    cnf = parse_show_config(inpath)  # {"configs": [...]}

    vdoms = list_vdom_names(cnf)
    _has_vdoms = vdoms and len(vdoms) > 1

    try:
        # It should have this in most cases.
        hostname = hostname_from_configs(cnf, has_vdoms_=_has_vdoms)
    except ValueError as exc:
        LOG.warning("%r: %s\nCould not resovle hostname", exc, inpath)
        hostname = unknown_name()

    if not outdir:
        outdir = "out"

    houtdir = os.path.join(outdir, hostname)

    outpath = os.path.join(houtdir, ALL_FILENAME)
    utils.save_file(cnf, outpath)

    gmark = '*'
    opts = dict(has_vdoms_=_has_vdoms)
    for cname in cnames:
        if gmark in cname:
            cregexp = re.compile(cname)
            if cregexp:
                ccnames = list_cnames_for_regexp(cnf,
                                                 regexp=cregexp,
                                                 has_vdoms_=_has_vdoms)

                for ccn in ccnames:
                    pexp = "configs[?config=='{}'].edits[]".format(ccn)
                    ccnf = jmespath_search(pexp, cnf, **opts)
                    ccname = cname_to_filename(ccn)
                    anyconfig.dump(ccnf, os.path.join(houtdir, ccname))
        else:
            # TODO: Save configs per global and VDoms?
            pexp = "configs[?config=='{}'].edits[]".format(cname)
            ccnf = jmespath_search(pexp, cnf, **opts)
            ccname = cname_to_filename(cname)
            anyconfig.dump(ccnf, os.path.join(houtdir, ccname))

    anyconfig.dump(
        dict(timestamp=utils.timestamp(),
             hostname=hostname,
             vdoms=vdoms,
             origina_data=inpath), os.path.join(houtdir, METADATA_FILENAME))

    return (outpath, cnf)
Exemplo n.º 39
0
 def fake_run_config_with_params(fake_run_config, request):
     config = anyconfig.load(fake_run_config)
     config["run"].update(request.param)
     anyconfig.dump(config, fake_run_config)
     return fake_run_config
Exemplo n.º 40
0
    def __init__(self, config, model, criterion):
        config['trainer']['output_dir'] = os.path.join(
            str(pathlib.Path(os.path.abspath(__name__)).parent),
            config['trainer']['output_dir'])
        config['name'] = config['name'] + '_' + model.name
        self.save_dir = os.path.join(config['trainer']['output_dir'],
                                     config['name'])
        self.checkpoint_dir = os.path.join(self.save_dir, 'checkpoint')

        if config['trainer']['resume_checkpoint'] == '' and config['trainer'][
                'finetune_checkpoint'] == '':
            shutil.rmtree(self.save_dir, ignore_errors=True)
        if not os.path.exists(self.checkpoint_dir):
            os.makedirs(self.checkpoint_dir)

        self.global_step = 0
        self.start_epoch = 0
        self.config = config
        self.model = model
        self.criterion = criterion
        # logger and tensorboard
        self.tensorboard_enable = self.config['trainer']['tensorboard']
        self.epochs = self.config['trainer']['epochs']
        self.log_iter = self.config['trainer']['log_iter']

        anyconfig.dump(config, os.path.join(self.save_dir, 'config.yaml'))
        self.logger = setup_logger(os.path.join(self.save_dir, 'train.log'))
        self.logger_info(pformat(self.config))

        # device
        torch.manual_seed(self.config['trainer']['seed'])  # 为CPU设置随机种子
        if torch.cuda.device_count() > 0 and torch.cuda.is_available():
            self.with_cuda = True
            torch.backends.cudnn.benchmark = True
            self.device = torch.device("cuda")
            torch.cuda.manual_seed(
                self.config['trainer']['seed'])  # 为当前GPU设置随机种子
            torch.cuda.manual_seed_all(
                self.config['trainer']['seed'])  # 为所有GPU设置随机种子
        else:
            self.with_cuda = False
            self.device = torch.device("cpu")
        self.logger_info('train with device {} and pytorch {}'.format(
            self.device, torch.__version__))
        # metrics
        self.metrics = {
            'recall': 0,
            'precision': 0,
            'hmean': 0,
            'train_loss': float('inf')
        }

        self.optimizer = self._initialize('optimizer', torch.optim,
                                          model.parameters())

        # resume or finetune
        if self.config['trainer']['resume_checkpoint'] != '':
            self._laod_checkpoint(self.config['trainer']['resume_checkpoint'],
                                  resume=True)
        elif self.config['trainer']['finetune_checkpoint'] != '':
            self._laod_checkpoint(
                self.config['trainer']['finetune_checkpoint'], resume=False)

        if self.config['lr_scheduler']['type'] != 'WarmupPolyLR':
            self.scheduler = self._initialize('lr_scheduler',
                                              torch.optim.lr_scheduler,
                                              self.optimizer)

        self.model.to(self.device)

        if self.tensorboard_enable and config['local_rank'] == 0:
            from torch.utils.tensorboard import SummaryWriter
            self.writer = SummaryWriter(self.save_dir)
            try:
                # add graph
                dummy_input = torch.zeros(1, 3, 640, 640).to(self.device)
                self.writer.add_graph(self.model, dummy_input)
                torch.cuda.empty_cache()
            except:
                import traceback
                self.logger.error(traceback.format_exc())
                self.logger.warn('add graph to tensorboard failed')
        # 分布式训练
        if torch.cuda.device_count() > 1:
            local_rank = config['local_rank']
            self.model = torch.nn.parallel.DistributedDataParallel(
                self.model,
                device_ids=[local_rank],
                output_device=local_rank,
                broadcast_buffers=False,
                find_unused_parameters=True)
        # make inverse Normalize
        self.UN_Normalize = False
        for t in self.config['dataset']['train']['dataset']['args'][
                'transforms']:
            if t['type'] == 'Normalize':
                self.normalize_mean = t['args']['mean']
                self.normalize_std = t['args']['std']
                self.UN_Normalize = True
Exemplo n.º 41
0
 def save_bare_config_dict(self, bare_cfg: Dict):
     return anyconfig.dump(bare_cfg, self.get_config_file())
Exemplo n.º 42
0
 def save(self, filename=None):
     with open(filename or self.filename, "w") as f:
         anyconfig.dump(self.dict, f)
Exemplo n.º 43
0
def main(cmd_map=_ARGS_CMD_MAP):
    p = option_parser()
    (options, args) = p.parse_args()

    RU.init_log(DEBUG if options.verbose else INFO)

    if not args:
        p.print_usage()
        sys.exit(1)

    (cmd, rpms) = (args[0], args[1:])

    cmd = cmd_map.get(cmd[0], cmd_map.get(cmd[:3], False))
    if not cmd:
        print("Error: Invalid command: " + cmd)
        p.print_usage()
        sys.exit(1)

    root = os.path.abspath(options.root)
    all_rpms = [x["name"] for x in RR.list_installed_rpms(root)]

    if options.excludes:
        if is_file(options.excludes):
            excludes = load_list_from_file(options.excludes)
        else:
            excludes = options.excludes.split(',')

        excludes = RU.select_from_list(excludes, all_rpms)
        logging.info("%d RPMs found in given excludes list" % len(excludes))
    else:
        excludes = []

    if cmd == CMD_REMOVE:
        if not rpms:
            print("remove (erase) command requires RPMs: list of RPM names or "
                  "glob/regex patterns, or a file contains RPM names or "
                  "glob/regex patterns line by line")
            sys.exit(1)

        if len(rpms) == 1 and is_file(rpms[0]):
            rpms = load_list_from_file(rpms[0])

        rpms = RU.select_from_list(rpms, all_rpms)

        if options.use_dnf:
            (excludes, xs) = RED.compute_removed(rpms, root, excludes)
        else:
            xs = RR.compute_removed(rpms, root, excludes=excludes)

        data = dict(removed=xs, )

    elif cmd == CMD_STANDALONES:
        xs = sorted(RR.list_standalones(root, options.st_nrpms, excludes))
        data = dict(standalones=xs, )

    elif cmd == CMD_UPDATES:
        xs = [dict(name=x.name, version=x.version, release=x.release,
                   arch=x.arch, epoch=int(x.epoch)) for x
              in RED.compute_updates(root, options.repos, True)]

        if options.latest:
            xs = RR.find_latests(xs)

        if options.format == 'simple':
            xfmt = "%(name)s-%(epoch)s:%(version)s-%(release)s.%(arch)s"
            xs = sorted(xfmt % x for x in xs)

        data = dict(updates=xs, )
    else:
        xs = sorted(RR.get_leaves(root))
        data = dict(leaves=xs, )

    output = open(options.output, 'w') if options.output else sys.stdout

    if options.format != "simple":
        if options.output:
            anyconfig.dump(dict(data=data, ), options.output,
                           forced_type=options.format)
        else:
            res = anyconfig.dumps(dict(data=data, ),
                                  forced_type=options.format)
            print(res)
    else:
        if options.output:
            ext = os.path.splitext(options.output)[1][1:]

            if ext in _FMT_CHOICES:
                anyconfig.dump(dict(data=data, ), options.output,
                               forced_type=ext)
            else:
                with open(options.output, 'w') as out:
                    for x in xs:
                        out.write(x + '\n')
        else:
            for x in xs:
                print(x)

    output.close()
Exemplo n.º 44
0
def main(cmd_map=_ARGS_CMD_MAP):
    p = option_parser()
    (options, args) = p.parse_args()

    RU.init_log(DEBUG if options.verbose else INFO)

    if not args:
        p.print_usage()
        sys.exit(1)

    (cmd, rpms) = (args[0], args[1:])

    cmd = cmd_map.get(cmd[0], cmd_map.get(cmd[:3], False))
    if not cmd:
        print("Error: Invalid command: " + cmd)
        p.print_usage()
        sys.exit(1)

    root = os.path.abspath(options.root)
    all_rpms = [x["name"] for x in RR.list_installed_rpms(root)]

    if options.excludes:
        if is_file(options.excludes):
            excludes = load_list_from_file(options.excludes)
        else:
            excludes = options.excludes.split(',')

        excludes = RU.select_from_list(excludes, all_rpms)
        logging.info("%d RPMs found in given excludes list" % len(excludes))
    else:
        excludes = []

    if cmd == CMD_REMOVE:
        if not rpms:
            print("remove (erase) command requires RPMs: list of RPM names or "
                  "glob/regex patterns, or a file contains RPM names or "
                  "glob/regex patterns line by line")
            sys.exit(1)

        if len(rpms) == 1 and is_file(rpms[0]):
            rpms = load_list_from_file(rpms[0])

        rpms = RU.select_from_list(rpms, all_rpms)

        if options.use_dnf:
            (excludes, xs) = RED.compute_removed(rpms, root, excludes)
        else:
            xs = RR.compute_removed(rpms, root, excludes=excludes)

        data = dict(removed=xs, )

    elif cmd == CMD_STANDALONES:
        xs = sorted(RR.list_standalones(root, options.st_nrpms, excludes))
        data = dict(standalones=xs, )

    elif cmd == CMD_UPDATES:
        xs = [
            dict(name=x.name,
                 version=x.version,
                 release=x.release,
                 arch=x.arch,
                 epoch=int(x.epoch))
            for x in RED.compute_updates(root, options.repos, True)
        ]

        if options.latest:
            xs = RR.find_latests(xs)

        if options.format == 'simple':
            xfmt = "%(name)s-%(epoch)s:%(version)s-%(release)s.%(arch)s"
            xs = sorted(xfmt % x for x in xs)

        data = dict(updates=xs, )
    else:
        xs = sorted(RR.get_leaves(root))
        data = dict(leaves=xs, )

    output = open(options.output, 'w') if options.output else sys.stdout

    if options.format != "simple":
        if options.output:
            anyconfig.dump(dict(data=data, ),
                           options.output,
                           forced_type=options.format)
        else:
            res = anyconfig.dumps(dict(data=data, ),
                                  forced_type=options.format)
            print(res)
    else:
        if options.output:
            ext = os.path.splitext(options.output)[1][1:]

            if ext in _FMT_CHOICES:
                anyconfig.dump(dict(data=data, ),
                               options.output,
                               forced_type=ext)
            else:
                with open(options.output, 'w') as out:
                    for x in xs:
                        out.write(x + '\n')
        else:
            for x in xs:
                print(x)

    output.close()