Пример #1
0
def test_setattr_default():
    b = DefaultMunch(foo='bar', this_is='useful when subclassing')
    assert hasattr(b.values, '__call__')

    b.values = 'uh oh'
    assert b.values == 'uh oh'
    assert b['values'] is None

    assert b.__default__ is None
    assert '__default__' not in b
Пример #2
0
def test_copy_default():
    undefined = object()
    m = DefaultMunch.fromDict({'urmom': {'sez': {'what': 'what'}}}, undefined)
    c = m.copy()
    assert c is not m
    assert c.urmom is not m.urmom
    assert c.urmom.sez is not m.urmom.sez
    assert c.urmom.sez.what == 'what'
    assert c == m
    assert c.urmom.sez.foo is undefined
    assert c.urmom.sez.__undefined__ is undefined
Пример #3
0
                        seed=seed,
                        no_graphics=True)
 brain = env.brains[env.brain_names[0]]
 env_info = env.reset(train_mode=True)[env.brain_names[0]]
 n_agents = len(env_info.agents)
 print('Number of agents:', n_agents)
 action_size = brain.vector_action_space_size
 state_size = brain.vector_observation_space_size
 state_multiplier = brain.num_stacked_vector_observations
 action_type = brain.vector_action_space_type
 comment = f"MADDPG Unity Tennis"
 log_dir = os.path.join('./runs', current_time + '_' + comment)
 os.mkdir(log_dir)
 print(f"logging to {log_dir}")
 writer = SummaryWriter(log_dir=log_dir)
 config = DefaultMunch()
 config.seed = seed
 config.n_episodes = 40000
 config.max_t = 1000
 config.buffer_size = 100000
 config.batch_size = 200
 config.gamma = 0.99
 config.tau = 0.001
 config.lr_actor = 0.0001
 config.lr_critic = 0.0001
 config.n_agents = n_agents
 config.state_size = state_size * state_multiplier
 config.action_size = action_size
 config.learn_start = 10000
 config.max_action = 1  # maximum value allowed for each action
 config.memory = ExperienceReplayMemory(config.buffer_size, seed)
Пример #4
0
 def __init__(self, values):
     self.values = DefaultMunch.fromDict(values)
Пример #5
0
class Experiment(object):
    def __init__(self, conf_path=None, experiments_dir=None, hp=None):

        self.conf_path = conf_path
        self.conf = None
        self.log = None
        self.trainer = None
        self.tee = None
        self.current_run = 0

        self.fast_text_model = None

        if self.conf_path:
            self.set_conf(self.conf_path)
        else:
            self.conf = DefaultMunch(None).fromDict(default_conf)

        if experiments_dir:
            self.conf.experiments_dir = experiments_dir
        assert self.conf.experiments_dir is not None
        self.conf.experiments_dir = Path(self.conf.experiments_dir).resolve()

        if not self.conf.experiments_dir.exists():
            print("Creating %s" % str(self.conf.experiments_dir))
            self.conf.experiments_dir.mkdir(parents=True)

        if not self.conf.exp_id:
            self.conf.exp_id = str(datetime.datetime.now())[:10]

        self.dir = get_new_dir(self.conf.experiments_dir, self.conf.exp_id)

        self.summary = {
            "params": {p: []
                       for p in self.conf.randomizable_params},
            "other": {},
            "metrics": {m: []
                        for m in metrics},
        }

    def set_conf(self, path):
        with open(path, "r") as f:
            conf = yaml.safe_load(f)
        self.conf = DefaultMunch(None).fromDict(conf)

    def randomize(self, conf=None, verbose=0):
        conf = conf or self.conf
        params = conf.randomizable_params

        for p_name, p in params.items():
            if self.conf.trainer_type == "FT_DST" and p_name == "embedding_dim":
                self.summary["params"][p_name].append(300)
                continue
            if p.type == "range":
                values = np.arange(p.min, p.max, p.step)
            elif p.type == "list":
                values = np.array(p.vals)
            elif p.type == "fixed":
                value = np.array(p.value)
            else:
                raise ValueError("Unkonw type {} for {}".format(
                    p.type, p_name))
            if p.type != "fixed":
                if p.distribution == "normal":
                    value = normal_choice(values)
                elif p.distribution == "uniform":
                    value = uniform_choice(values)
                else:
                    # p.distribution == "deterministic"
                    value = values[self.current_run % len(values)]

            setattr(self.trainer.hp, p_name, value.tolist())
            self.summary["params"][p_name].append(value)
            if verbose > 0:
                print("{:20}: {:10}".format(p_name, value))
        if verbose > 0:
            print("{:20}: {:10}".format("trainer.hp.dir", self.trainer.hp.dir))

    def dump_conf(self, path):
        stringified = []
        for attr, val in self.conf.items():
            if isinstance(val, Path):
                self.conf[attr] = str(val)
                stringified.append(attr)
        with open(path, "w") as f:
            yaml.safe_dump(self.conf, f, default_flow_style=False)
        for attr in stringified:
            self.conf[attr] = Path(self.conf[attr])

    def summarize(self):
        metrics = pd.DataFrame(self.summary["metrics"])
        params = pd.DataFrame(self.summary["params"])
        other = "\n".join("{:20}: {}".format(k, v)
                          for k, v in self.summary["other"].items())
        summary = "{}\n\n{}".format(
            other,
            pd.concat([metrics, params], axis=1).to_string())
        with open(self.dir / "summary.txt", "a") as f:
            f.write(summary)
        metrics.to_csv(self.dir / "metrics.csv")
        params.to_csv(self.dir / "params.csv")
        self.dump_conf(self.dir / "conf.yaml")

    def setup(self, log=True):
        hp = HP(base_dir=self.dir)
        if self.conf.trainer_type == "DST":
            for attr, val in self.conf.hyperparameter.items():
                if val is not None:
                    setattr(hp, attr, val)
            self.trainer = DST(hp=hp)
        elif self.conf.trainer_type == "CDST":
            for attr, val in self.conf.hyperparameter.items():
                if val is not None:
                    setattr(hp, attr, val)
            self.trainer = CDST(hp=hp)
        elif self.conf.trainer_type == "FT_DST":
            for attr, val in self.conf.hyperparameter.items():
                if attr != "embedding_dim":
                    if val is not None:
                        setattr(hp, attr, val)
            if not self.fast_text_model:
                print("Setting fast_text_model...", end="")
                self.fast_text_model = FastText.load_fasttext_format(
                    hp.fast_text_model_file)
                print("Ok.")
            self.trainer = FT_DST(fast_text_model=self.fast_text_model, hp=hp)
        else:
            raise ValueError("Unknown Trainer")

        self.log = log
        if log:
            self.tee = Tee(str(self.trainer.hp.dir / "log.txt"))
            sys.stdout = self.tee

    def reset(self, verbose=0):
        self.setup()
        self.randomize(verbose)

    def delete(self):
        shutil.rmtree(self.dir, ignore_errors=True)

    def update_metrics(self, metrics):
        if metrics is None:
            metrics = None, None, None, None
        acc, mic, mac, wei = metrics
        self.summary["metrics"]["micro_f1"].append(mic)
        self.summary["metrics"]["macro_f1"].append(mac)
        self.summary["metrics"]["weighted_f1"].append(wei)
        self.summary["metrics"]["accuracy"].append(acc)

    def get_samples(self, samples, sample_size, is_val=False):
        preds, ys = None, None
        for _ in range(samples):
            x, y = self.trainer.get_input_pair(is_val, sample_size)
            pred = self.trainer.infer(x)
            if preds is None:
                preds, ys = pred, y
            else:
                preds = np.concatenate((preds, pred), axis=0)
                ys = np.concatenate((ys, y), axis=0)
        return expit(preds), ys

    def eval(self, thresholds, samples, sample_size, is_val=False):
        preds, ys = self.get_samples(samples, sample_size, is_val)
        averages = [None, "micro", "macro", "weighted"]

        metrics = {str(av): [] for av in averages}

        for av in averages:
            for threshold in thresholds:
                metrics[str(av)].append(
                    f1_score(ys, preds > threshold, average=av))
        return metrics

    def run(self, n_runs=None, randomize=True, log=True, verbose=0):
        n_runs = n_runs or self.conf.n_runs
        if n_runs is None:
            n_runs = np.iinfo(int).max
        print("\n= = > Run", self.current_run)

        self.setup(log)

        if randomize:
            self.randomize(verbose=verbose)

        while self.current_run < n_runs:
            if self.current_run > 0:
                print("\n= = > Run", self.current_run)
            try:
                metrics = self.trainer.train()
                self.trainer.dump_logits()
                self.update_metrics(metrics)
                self.summarize()
            except EndOfExperiment:
                print("\nStopping experiment. Delete?")
                answer = input("y/n")
                if "y" in answer:
                    self.delete()
                    return
                else:
                    self.summary["other"][
                        "interrupting"] = "Keyboard interrupted"
                    self.summarize()
                break

            self.current_run += 1
            if self.current_run < n_runs:
                self.reset(verbose)
            # End of run
        # End of all runs
        self.summary["other"]["interrupting"] = "Done: all runs performed."
        self.summarize()
Пример #6
0
def test_delattr_default():
    b = DefaultMunch(lol=42)
    del b.lol

    assert b.lol is None
    assert b['lol'] is None
Пример #7
0
def test_fromDict_default():
    undefined = object()
    b = DefaultMunch.fromDict({'urmom': {'sez': {'what': 'what'}}}, undefined)
    assert b.urmom.sez.what == 'what'
    assert b.urmom.sez.foo is undefined
Пример #8
0
    def __call__(self, value, path=""):
        log.debug('Visiting %s', path)
        if value in self.visited:
            raise UtilException("Serialisation failed: cycle detected: %s" %
                                path)
        self.visited.append(value)
        log.debug('Type is  %s', value.__class__)

        if isinstance(value, model.Base):
            names = dir(value)

            son = DefaultMunch(undefined)
            for name in names:
                if not self.can_emit(name, path):
                    continue
                v = getattr(value, name)
                if self.omit and v is None:
                    continue
                if hasattr(v, '__call__'):
                    continue
                son[to_camel_case(name)] = self(v, "%s/%s" % (path, name))

        elif isinstance(value, str):
            son = value
        elif isinstance(value, datetime.datetime):
            son = value.timestamp()
        elif isinstance(value, datetime.date):
            son = datetime.datetime.combine(
                value, datetime.datetime.min.time()).timestamp()
        elif isinstance(value, uuid.UUID):
            son = str(value)
        elif (hasattr(value, '__getitem__') and hasattr(value, 'keys')
              and hasattr(value, 'values')
              and not isinstance(value, RowProxy)):
            # Dictionaries
            son = DefaultMunch(undefined)
            for name in value.keys():
                if not self.can_emit(name, path):
                    continue
                v = value[name]
                if self.omit and v is None:
                    continue
                son[to_camel_case(name)] = self(v, "%s/%s" % (path, name))

        elif hasattr(value, '__iter__'):
            # Lists
            son = []
            for i, v in enumerate(value):
                if not self.can_emit(i, path):
                    continue
                if self.omit and v is None:
                    continue
                son.append(self(v, "%s/%d" % (path, i)))
        else:
            son = value

        if (isinstance(son, str)
                and any(s.search(path) for s in self._sanitise)):
            son = bleach.clean(son, strip=True)

        self.visited.pop()
        return son
Пример #9
0
 def open(self):
     with open(self.__path, "r") as ymlfile:
         self.__config = safe_load(ymlfile)
         self.__config = DefaultMunch.fromDict(self.__config)
Пример #10
0
 print(f'Worker_id={worker_id}')
 env = UnityEnvironment("./environment/Tennis_Linux/Tennis.x86_64",
                        worker_id=worker_id,
                        seed=seed,
                        no_graphics=False)
 brain = env.brains[env.brain_names[0]]
 env_info = env.reset(train_mode=False)[env.brain_names[0]]
 n_agents = len(env_info.agents)
 print('Number of agents:', n_agents)
 action_size = brain.vector_action_space_size
 state_size = brain.vector_observation_space_size
 state_multiplier = brain.num_stacked_vector_observations
 action_type = brain.vector_action_space_type
 comment = f"MADDPG Unity Tennis"
 rand_seed = 0
 config = DefaultMunch()
 config.seed = seed
 config.n_episodes = 10
 config.max_t = 1000
 config.buffer_size = 100000
 config.batch_size = 200
 config.gamma = 0.99
 config.tau = 0.001
 config.lr_actor = 0.0001
 config.lr_critic = 0.001
 config.n_agents = n_agents
 config.state_size = state_size * state_multiplier
 config.action_size = action_size
 config.learn_start = 3000
 config.max_action = 1
 config.memory = ExperienceReplayMemory(config.buffer_size, rand_seed)
Пример #11
0
  # missing variable mapping
  schema.csl.fields.text['volume-title'] = [ 'volumeTitle' ]

  # status is publication status, not legal status
  schema.csl.fields.text.status = [ 'status ']

  with open(os.path.join(root, 'setup/csl-vars.json')) as f:
    csl_vars = set(json.load(f).keys())
    fix_csl_vars(schema.csl.fields.text, 'jurism', csl_vars)
    fix_csl_vars(schema.csl.fields.date, 'jurism', csl_vars)
    fix_csl_vars(schema.csl.names, 'jurism', csl_vars)

  return Munch.toDict(schema)

data = DefaultMunch.fromDict({
  'zotero': fix_zotero_schema(load('https://api.zotero.org/schema', 'zotero.json')),
  'jurism': fix_jurism_schema(load('https://raw.githubusercontent.com/Juris-M/zotero-schema/master/schema-jurism.json', 'juris-m.json')),
}, None)

class ExtraFields:
  @staticmethod
  def to_json(obj):
    if isinstance(obj, Munch):
      return {
        k: v
        for k, v in Munch.toDict(obj).items()
        if not v is None and not (type(v) == list and len(v) == 0)
      }
    else:
      return obj

  def __init__(self):
Пример #12
0
def _main(cli_args, chosen_deployment_name):
    """Deployment entry point.

    :param cli_args: The command-line arguments
    :type cli_args: ``list``
    :param chosen_deployment_name: The deployment file
    :type chosen_deployment_name: ``str``
    :returns: True on success
    :rtype: ``bool``
    """

    config_file = os.path.join(
        OKD_DEPLOYMENTS_DIRECTORY, chosen_deployment_name,
        io.get_deployment_config_filename(chosen_deployment_name))
    if not os.path.isfile(config_file):
        print('Config file does not exist ({})'.format(chosen_deployment_name))
        return False
    with codecs.open(config_file, 'r', 'utf8') as stream:
        deployment = DefaultMunch.fromDict(yaml.load(stream))

    # First check:
    # is the version present
    # and do we support it?
    if 'version' not in deployment:
        print('The deployment configuration has no version.')
        return False
    if deployment.version not in SUPPORTED_DEPLOYMENT_VERSIONS:
        supported_versions = str(SUPPORTED_DEPLOYMENT_VERSIONS[0])
        for version in SUPPORTED_DEPLOYMENT_VERSIONS[1:]:
            supported_versions += ', {}'.format(version)
        print('The deployment configuration file version ({})'
              ' is not supported.'.format(deployment.version))
        print('Supported versions are: {}'.format(supported_versions))
        return False

    # There must be an okd/inventories directory
    inventory_dir = deployment.okd.inventory_dir
    if not os.path.isdir('okd/inventories/{}'.format(inventory_dir)):
        print('Missing "okd/inventories" directory')
        print('Expected to find the inventory directory "{}"'
              ' but it was not there.'.format(inventory_dir))
        print('Every deployment must have an "inventories" directory')
        return False

    # If the cluster SSH user is not defined,
    # insert it.
    if 'ssh_user' not in deployment.cluster:
        print('Setting default SSH user "{}"'.format(
            OKD_DEFAULT_CLUSTER_SSH_USER))
        deployment.cluster.ssh_user = OKD_DEFAULT_CLUSTER_SSH_USER

    # -----
    # Hello
    # -----
    io.banner(deployment.name, full_heading=True, quiet=False)
    if not cli_args.auto_acknowledge and not cli_args.just_plan:

        # Display the orchestration description
        # (f there is one)
        if deployment.description:
            io.description(deployment.description)

        confirmation_word = io.get_confirmation_word()
        target = 'CREATE the Cluster' \
            if cli_args.cluster else 'INSTALL OpenShift/OKD'
        confirmation = input('Enter "{}" to {}: '.format(
            confirmation_word, target))
        if confirmation != confirmation_word:
            print('Phew! That was close!')
            return True

    # Some key information...
    okd_admin_password = os.environ.get(OKD_ADMIN_PASSWORD_ENV)
    if not okd_admin_password:
        io.error('You must define {}'.format(OKD_ADMIN_PASSWORD_ENV))

    okd_api_hostname = deployment.cluster.public_hostname
    okd_api_port = deployment.cluster.api_port

    # -------
    # Ansible (A specific version)
    # -------
    # Install the ansible version name in the deployment file

    cmd = 'pip install --upgrade pip setuptools --user'
    rv, _ = io.run(cmd, '.', cli_args.quiet)
    if not rv:
        return False

    cmd = 'pip install ansible=={} --user'. \
        format(deployment.okd.ansible_version)
    rv, _ = io.run(cmd, '.', cli_args.quiet)
    if not rv:
        return False

    t_dir = deployment.cluster.terraform_dir
    if cli_args.cluster:

        # ------
        # Render (jinja2 files)
        # ------
        # Translate content of Jinja2 template files
        # using the deployment configuration's YAML file content.

        if not cli_args.skip_rendering:

            cmd = './render.py {} --ssh-user {}'.\
                format(chosen_deployment_name,
                       deployment.cluster.ssh_user)
            cwd = '.'
            rv, _ = io.run(cmd, cwd, cli_args.quiet)
            if not rv:
                return False

        # If the deployment file has a 'my_machines' section
        # then we assume the user's provided their own cluster
        # and the Terraform step is not needed.
        if 'my_machines' in deployment:

            # -----------------
            # Manual Templating
            # -----------------
            # The user has provided their own cluster
            # and defined it in the my_machines section
            # of their deployment configuration.
            #
            # Here we process the rendered inventory files
            # just as Terraform would do.
            io.banner('Templating ...')
            print('inventory')
            if not templater.render(deployment):
                return False

            print('bastion/inventory')
            file_name = 'ansible/bastion/inventory.yaml.tpl'
            if not templater.\
                    render(deployment,
                           template_file_name=file_name):
                return False

            print('post-okd/inventory')
            file_name = 'ansible/post-okd/inventory.yaml.tpl'
            if not templater. \
                    render(deployment,
                           template_file_name=file_name,
                           admin_password=okd_admin_password):
                return False

        else:

            # ---------
            # Terraform
            # ---------
            # Create compute instances for the cluster.

            cmd = 'terraform init'
            cwd = 'terraform/{}'.format(t_dir)
            rv, _ = io.run(cmd, cwd, cli_args.quiet)
            if not rv:
                return False

            # Plan or Apply?
            action = 'plan' if cli_args.just_plan else 'apply -auto-approve'
            cmd = 'terraform {}' \
                  ' -state=.terraform.{}'.format(action,
                                                 chosen_deployment_name)
            cwd = 'terraform/{}'.format(t_dir)
            rv, _ = io.run(cmd, cwd, cli_args.quiet)
            if not rv:
                return False

        if cli_args.just_plan:
            # Just plan means just that...
            return True

        # -------
        # Ansible
        # -------
        # Run the bastion site file.

        if not cli_args.skip_pre_okd:

            extra_env = ''
            if deployment.okd.certificates:
                if deployment.okd.certificates.generate_api_cert:

                    certbot_email = os.environ.get(OKD_CERTBOT_EMAIL_ENV)
                    if not certbot_email:
                        io.error(
                            'You must define {}'.format(OKD_CERTBOT_EMAIL_ENV))
                        return False

                    extra_env += ' -e master_cert_email="{}"'.\
                        format(certbot_email)
                    extra_env += ' -e public_hostname="{}"'. \
                        format(deployment.cluster.public_hostname)

                elif (deployment.okd.certificates.wildcard_cert
                      or deployment.okd.certificates.master_api_cert):

                    # User-supplied certificates -
                    # expect a vault password file
                    # in the deployment directory
                    extra_env += ' --vault-password-file' \
                                 ' {}/{}/vault-pass.txt'.\
                        format(OKD_DEPLOYMENTS_DIRECTORY,
                               chosen_deployment_name)

            if OKD_DEPLOYMENTS_DIRECTORY != 'deployments':
                extra_env += ' -e deployments_directory="{}"'.\
                    format(OKD_DEPLOYMENTS_DIRECTORY)
            else:
                extra_env += ' -e deployments_directory="../../deployments"'

            keypair_name = os.environ.get(OKD_KEYPAIR_NAME_ENV)
            if not keypair_name:
                io.error('You must define {}'.format(OKD_KEYPAIR_NAME_ENV))
                return False

            cmd = 'ansible-playbook site.yaml' \
                  ' {}' \
                  ' -e keypair_name={}' \
                  ' -e inventory_dir={}' \
                  ' -e cluster_ssh_user={}' \
                  ' -e deployment_name={}'.format(extra_env,
                                                  keypair_name,
                                                  deployment.okd.inventory_dir,
                                                  deployment.cluster.ssh_user,
                                                  chosen_deployment_name)
            cwd = 'ansible/bastion'
            rv, _ = io.run(cmd, cwd, cli_args.quiet)
            if not rv:
                return False

        # Now expose the Bastion's IP...

        if 'my_machines' in deployment:

            # Simulate the final step in Terraform,
            # i.e. exposing the bastion.
            # Doing this simplifies things for the user
            # i.e. "it looks and feels the same"

            io.banner('terraform output ...')
            print('bastion_ip = {}'.format(deployment.my_machines.bastion))

        else:

            cmd = 'terraform output' \
                  ' -state=.terraform.{}'.format(chosen_deployment_name)
            cwd = 'terraform/{}'.format(t_dir)
            rv, _ = io.run(cmd, cwd, cli_args.quiet)
            if not rv:
                return False

        # Leave.
        return True

    # If we get here we're installing OpenShift/OKD
    # (on a cluster that is assumed to exist)...

    # -----
    # Clone (OpenShift Ansible Repo)
    # -----
    # ...and checkout the revision defined by the deployment tag.

    if not cli_args.skip_okd:

        # If the expected clone directory does not exist
        # then clone OpenShift Ansible.
        if not os.path.exists('openshift-ansible'):

            cmd = 'git clone' \
                  ' https://github.com/openshift/openshift-ansible.git' \
                  ' --no-checkout'
            cwd = '.'
            rv, _ = io.run(cmd, cwd, cli_args.quiet)
            if not rv:
                return False

        # Checkout the required OpenShift Ansible TAG
        cmd = 'git checkout tags/{}'. \
            format(deployment.okd.ansible_tag)
        cwd = 'openshift-ansible'
        rv, _ = io.run(cmd, cwd, cli_args.quiet)
        if not rv:
            return False

    # -------
    # Ansible (Pre-OKD)
    # -------

    if not cli_args.skip_pre_okd:

        extra_env = ''
        if deployment.okd.certificates and\
                deployment.okd.certificates.generate_api_cert:
            extra_env += ' -e public_hostname={}'. \
                format(deployment.cluster.public_hostname)
        cmd = 'ansible-playbook site.yaml' \
              ' {}' \
              ' -i ../../okd/inventories/{}/inventory.yaml'.\
            format(extra_env, inventory_dir)
        cwd = 'ansible/pre-okd'
        rv, _ = io.run(cmd, cwd, cli_args.quiet)
        if not rv:
            return False

    # -------
    # Ansible (OKD)
    # -------
    # Deploy using the playbooks named in the deployment
    # (from the checked-out version).

    if not cli_args.skip_okd:

        for play in deployment.okd.play:
            cmd = 'ansible-playbook ../openshift-ansible/playbooks/{}.yml' \
                  ' -i inventories/{}/inventory.yaml'.\
                format(play, inventory_dir)
            cwd = 'okd'
            rv, _ = io.run(cmd, cwd, cli_args.quiet)
            if not rv:
                return False

    # -------
    # Ansible (Post-OKD)
    # -------

    if not cli_args.skip_post_okd:

        # Always run the 'site' playbook.
        # This adds the OKD admin and (optional) developer user accounts
        # and other common things like template deployment.
        #
        # The following variables are made available to all the playbooks: -
        #
        # - okd_api_hostname
        # - okd_admin
        # - okd_admin_password

        extra_env = ''
        dev_password = os.environ.get(OKD_DEVELOPER_PASSWORD_ENV)
        if dev_password:
            extra_env += ' -e okd_developer_password={}'.format(dev_password)
        # The template namespace
        # (optionally defined in the configuration)
        if deployment.okd.template and deployment.okd.template.namespace:
            template_namespace = deployment.okd.template.namespace
            extra_env += ' -e template_namespace={}'.format(template_namespace)
        cmd = 'ansible-playbook site.yaml' \
              '{}' \
              ' -e okd_api_hostname=https://{}:{}' \
              ' -e okd_admin=admin' \
              ' -e okd_admin_password={}' \
              ' -e okd_deployment={}'. \
            format(extra_env,
                   okd_api_hostname, okd_api_port,
                   okd_admin_password, chosen_deployment_name)
        cwd = 'ansible/post-okd'
        rv, _ = io.run(cmd, cwd, cli_args.quiet)
        if not rv:
            return False

        # Now iterate through the plays listed in the cluster's
        # 'post_okd' list...

        if deployment.okd.post_okd:
            for play in deployment.okd.post_okd:
                # Any user-defined 'extra' variables?
                play_vars = ''
                if play.vars:
                    for var in play.vars:
                        play_vars += '-e {} '.format(var)
                    play_vars = play_vars[:-1]
                # Run the user playbook...
                cmd = 'ansible-playbook playbooks/{}/deploy.yaml' \
                    ' -e okd_api_hostname=https://{}:{}' \
                    ' -e okd_admin=admin' \
                    ' -e okd_admin_password={}' \
                    ' -e okd_deployment={}' \
                    ' {}'.\
                    format(play.play,
                           okd_api_hostname, okd_api_port,
                           okd_admin_password, chosen_deployment_name,
                           play_vars)
                cwd = 'ansible/post-okd'
                rv, _ = io.run(cmd, cwd, cli_args.quiet)
                if not rv:
                    return False

    # -------
    # Success
    # -------

    # OK if we get here.
    # Cluster created and OKD installed.
    return True
Пример #13
0
                True)))


@cli.command()
@click.pass_context
def link(ctx):
    final_projects = ctx.obj.manager.get_projects()

    for proj in final_projects:
        result = proj.link_global()
        click.echo(repr(result))


@cli.command()
@click.option("-t",
              "--version_type",
              type=click.Choice(["patch", "minor", "major"]),
              default="patch")
@click.option("-m", "--msg", type=str)
@click.pass_context
def deploy(ctx, version_type, msg):
    final_projects = ctx.obj.manager.get_projects()

    for proj in final_projects:
        result = proj.deploy(msg=msg, version_type=version_type)
        click.echo(repr(result))


if __name__ == "__main__":
    cli(obj=DefaultMunch(None, {}))
Пример #14
0
 def set_conf(self, path):
     with open(path, "r") as f:
         conf = yaml.safe_load(f)
     self.conf = DefaultMunch(None).fromDict(conf)
Пример #15
0
from pprint import pformat
from re import sub

from flask_babel import lazy_gettext, lazy_ngettext

from munch import DefaultMunch as Munch

from toml import dumps, loads

from . import dt

Munch.__name__ = 'X'
Munch.__repr__ = lambda self: f'X{pformat(self.__dict__)}'

Munch.toTOML = lambda self: dumps(self)
Munch.fromTOML = lambda data: Munch.fromDict(loads(data))


def X(_dict={}, **params):
    '''
    :class:`X` is a `Munch <https://pypi.org/project/munch/>`_.
    :meth:`X` is also a function that returns a new :class:`X`
    from a ``dict`` or a set of parameters.

    :class:`Munch` is "`a dictionary that supports attribute-style access`".
    :meth:`X` offers us a few options::

        user = X(id=1, name='sheila', email='*****@*****.**')
        user                            # we can create a new X from parameters
            X{'email': '*****@*****.**', 'id': 1, 'name': 'sheila'}
        user['id']           # we can index by the usual brace attribute syntax
Пример #16
0
    def updateMethod(self, method):
        """Update the method for detecting clap

        :param dict method: A dict type parameter which defines a clap detection method
        """
        self.method = Objectify.fromDict(method, False)
Пример #17
0
 def copy(self):
     return Policy(
         DefaultMunch(undefined, self.context),
         self.rules.copy(),
         self.error_factory, self.aspect)
Пример #18
0
def cfg_strategy_fixture():
    b = DefaultMunch()
    b.url = "test_url"
    b.api_key = "test_key"
    b.api_secret = "test_secret"

    b.name = "market_maker"
    b.instrument_name = "TEST-PERP"
    b.mid_price_based_calculation = False

    b.tick_size = 1
    b.price_rounding = 2
    b.cancel_orders_on_start = False
    b.stop_strategy_on_error = True
    b.cancel_orders_on_reconnection = True

    b.orders = DefaultMunch()
    b.orders.asks = [[0, 1]]
    b.orders.bids = [[0, 1]]
    return b
Пример #19
0
def test_pickle_default():
    b = DefaultMunch.fromDict({"a": "b"})
    assert pickle.loads(pickle.dumps(b)) == b
Пример #20
0
            "frame_selection_method":
            "random",  # frame selection method [choose from: 'random']
            "img_resize": (256, 256),  # image resize

            # Caching Options (Text Embeddings and Image Feature Extraction)
            "use_cache":
            True,  # If True, preprocess text and store as cache, else, process text during runtime
            "text_embedding_name":
            "bert_pretrained",  # text embedder [choose from: 'bert_pretrained']
            "image_feature_extraction":
            "None"  # image feature extraction method [choose from: 'None']
        }
    },
    "model": {
        "storygan": {
            "img_size": 256,
            "text_dim": 768,
            "text_hidden_dim": 75,
            "noise_dim": 75,
            "gen_channel": 192 * 8,
            "dis_channel": 96,
            "latent_img_dim": 15,
            "label_num": 2
        }
    }
}

config = DefaultMunch.fromDict(
    config, None
)  # for attribute style access of dictionary type  ex) config.dataset ..etc
Пример #21
0
def test_repr_default():
    b = DefaultMunch(foo=DefaultMunch(lol=True), ponies='are pretty!')
    assert repr(b).startswith("DefaultMunch(None, {'")
    assert "'ponies': 'are pretty!'" in repr(b)
Пример #22
0
def munchify_transform(config):
    '''
    Convert a nested dictionary into a JavaScript-style object (Munch).
    '''
    from munch import DefaultMunch
    return DefaultMunch.fromDict(config)
Пример #23
0
def test_fromDict_default():
    undefined = object()
    b = DefaultMunch.fromDict({'urmom': {'sez': {'what': 'what'}}}, undefined)
    assert b.urmom.sez.what == 'what'
    assert b.urmom.sez.foo is undefined
Пример #24
0
from collections import deque
from FireflyEnv.env_utils import range_angle
from munch import DefaultMunch
from Config import Config

arg = Config()
rewards = deque(maxlen=100)

# read configuration parameters
#from Config import Config
#arg = Config()

filename = '20200301-121927'  # agent information
argument = torch.load(arg.data_path + 'data/' + '20200301-121927_arg.pkl')

arg = DefaultMunch.fromDict(argument)

# fix random seed
import random
random.seed(arg.SEED_NUMBER)
import torch
torch.manual_seed(arg.SEED_NUMBER)
if torch.cuda.is_available():
    torch.cuda.manual_seed(arg.SEED_NUMBER)
import numpy as np
np.random.seed(arg.SEED_NUMBER)
import time

torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
Пример #25
0
def test_pickle():
    b = DefaultMunch.fromDict({"a": "b"})
    assert pickle.loads(pickle.dumps(b)) == b
Пример #26
0
def extract_data_as_separate_csv(
    file_path: Path = None,
    result_folder: Path = Path(os.getcwd()),
    filter_columns: List[str] = None,
    sort_by_columns: List[str] = None,
    transform_function: Callable[[Union[int, str]],
                                 str] = None) -> Dict[str, Any]:
    """Read a CSV file from specified path, and extract the unique values from specified `filter_columns` in separate
    CSV files.

    Example:
    If we want to extract data for unique MAC addresses appeared in source, destination MAC address
    fields to individual files, we can call the function as split_csv(input_file, results_folder, data_columns=[
    'src_mac', 'dst_mac']). This will look for all unique MAC addresses appeared in 'src_mac' or 'dst_mac' columns.
    For each of these MAC addresses, it will filter all rows where that mac address appeared in 'src_mac' or
    'dst_mac' columns, and output the result to separate CSV file (in specified results_folder). At the end,
    the function will provide summary object where keys are unique MAC addresses, and values contain path to output
    file, and number of rows in the output file for specific MAC address.


    Parameters
    -----------
    file_path: str
        Path to input data file
        :type: string
    result_folder: str
        Path to folder where output CSV files should be written. default: current working directory
    filter_columns: List[str]
        columns from where unique values are extracted
    transform_function: function
        This function is used to transform unique values to human-understandable format. For example, data file
        contains MAC address as integer. If filtering is done over mac address, you can pass a transform function which\
         converts integer representation to MAC to be used in file-names, and summary data

    Returns
    -------
    summary: dictionary
        JSON (dictionary object) containing

    Raises
    ------
    GenericError: exception
        Error raised if
        - Input file does not exist, or specified path is directory
        - Output folder path does not exist or specified path is not a directory
        - Data can not be loaded to DataFrame from a CSV file
        - Data can not be written from DataFrame to a CSV file
    """
    output_folder = Path(result_folder)
    if output_folder.exists() is True and output_folder.is_dir() is False:
        raise GenericError(
            'Specified output folder `{}` is not a directory'.format(
                result_folder))
    if output_folder.exists() is False:
        os.makedirs(output_folder, exist_ok=True)

    data = load_csv_to_dataframe(file_path,
                                 fill_empty_values=True,
                                 verify_columns=filter_columns)
    base_filename_id = file_path.name.strip().split('.')[0]
    summary_data = DefaultMunch()

    unique_values = []
    for col in filter_columns:
        unique_values.extend(list(data[col].unique()))

    unique_values = set(unique_values)
    logging.debug('`%s` unique values', len(unique_values))

    for val in unique_values:
        st = time.time()
        if transform_function is not None:
            str_val = transform_function(val)
        else:
            str_val = val

        output_file_path = '{folder}/{prefix}-{value}.csv'.format(
            folder=output_folder, prefix=base_filename_id, value=str_val)
        filtered_data = get_filtered_data_from_data_frame(
            data, filter_columns, val, sort_by_columns)
        write_dataframe_to_csv_file(filtered_data,
                                    output_file_path,
                                    True,
                                    header=True,
                                    index=False)

        summary_data[str_val] = DefaultMunch(file=output_file_path,
                                             row_count=filtered_data.shape[0],
                                             time_taken=time.time() - st)
        logging.debug(
            'Successfully filtered data for value = `%s` to `%s` in `%s` seconds',
            str_val, summary_data[str_val]['file'],
            summary_data[str_val]['time_taken'])

    return summary_data