Example #1
0
    def __init__(self, slinky, stepfile):
        self.slinky = slinky
        self.path = os.path.abspath(stepfile)
        self.module = os.path.split(stepfile)[1]
        self.steps = {}

        with open(self.path) as inf:
            firstline = inf.readline()

        if firstline.startswith('#!') and "python" in firstline:
            out = self._evil_fast_python_help_reader(self.path)
        else:
            out = self._general_reasonable_help_reader(self.path)

        try:
            yaml_for_out = yaml.safe_load(out)
            if yaml_for_out:
                for name, info in yaml.safe_load(out).items():
                    if name in self.steps:
                        raise Exception, "stepfile %s has duplicate definitions for '%s'" % (self.path, name)
                    self.steps[name] = Step(self, name, info)
            else:
                raise ValueError("There was no yaml.safe_load(out) for self.path `{}`:\n{}".format(self.path, out))
        except yaml.scanner.ScannerError as e:
            raise ValueError("{e} thrown while looking for step {sn} {ss} and out {out}".format(
                e=str(e), sn=self.path, ss=ss, out=out))
Example #2
0
 def __read_over(self, overstate):
     '''
     Read in the overstate file
     '''
     if overstate:
         with salt.utils.fopen(overstate) as fp_:
             try:
                 # TODO Use render system
                 return self.__sort_stages(yaml.safe_load(fp_))
             except Exception:
                 return {}
     if self.saltenv not in self.opts['file_roots']:
         return {}
     for root in self.opts['file_roots'][self.saltenv]:
         fn_ = os.path.join(
                 root,
                 self.opts.get('overstate', 'overstate.sls')
                 )
         if not os.path.isfile(fn_):
             continue
         with salt.utils.fopen(fn_) as fp_:
             try:
                 # TODO Use render system
                 return self.__sort_stages(yaml.safe_load(fp_))
             except Exception:
                 return {}
     return {}
Example #3
0
def load_config_file(path):
    """Load configuration settings from file.

    Return tuple of configuration dictionary and base path.
    """
    if os.path.isdir(path):
        for filename in ('.s3_website.yaml', '.s3_website.yml'):
            filepath = os.path.join(path, filename)
            try:
                with open(filepath, 'r') as f:
                    config = yaml.safe_load(f)
            except:
                logger.debug('Unable to load config from {}'.format(filepath),
                             exc_info=True)
            else:
                base_path = path
                break
        else:
            raise ValueError(
                'Unable to find .s3_website.yaml in {}'.format(path))
    else:
        with open(path, 'r') as f:
            config = yaml.safe_load(f)
        base_path = os.path.dirname(path)

    return config, base_path
Example #4
0
def process_remote_sources(raw_config, environment=None):
    """Stage remote package sources and merge in remote configs.

    Args:
        raw_config (str): the raw stacker configuration string.
        environment (dict, optional): any environment values that should be
            passed to the config

    Returns:
        str: the raw stacker configuration string

    """

    config = yaml.safe_load(raw_config)
    if config and config.get('package_sources'):
        processor = SourceProcessor(
            sources=config['package_sources'],
            stacker_cache_dir=config.get('stacker_cache_dir')
        )
        processor.get_package_sources()
        if processor.configs_to_merge:
            for i in processor.configs_to_merge:
                logger.debug("Merging in remote config \"%s\"", i)
                remote_config = yaml.safe_load(open(i))
                config = merge_map(remote_config, config)
            # Call the render again as the package_sources may have merged in
            # additional environment lookups
            if not environment:
                environment = {}
            return render(str(config), environment)

    return raw_config
Example #5
0
 def _validate_output(serial_str):
     try:
         yaml.safe_load(StringIO(serial_str))
     except Exception:
         return False
     else:
         return True
Example #6
0
 def get_playbook(self):
     """
     If necessary, fetch and read the playbook file
     """
     playbook = self.config['playbook']
     if isinstance(playbook, list):
         # Multiple plays in a list
         self.playbook = playbook
     elif isinstance(playbook, str) and playbook.startswith(('http://',
                                                            'https://')):
         response = requests.get(playbook)
         response.raise_for_status()
         self.playbook = yaml.safe_load(response.text)
     elif isinstance(playbook, str):
         try:
             playbook_path = os.path.expanduser(playbook)
             if not playbook_path.startswith('/'):
                 # If the path is not absolute at this point, look for the
                 # playbook in the repo dir. If it's not there, we assume
                 # the path is relative to the working directory
                 pb_in_repo = os.path.join(self.repo_path, playbook_path)
                 if os.path.exists(pb_in_repo):
                     playbook_path = pb_in_repo
             self.playbook_file = file(playbook_path)
             playbook_yaml = yaml.safe_load(self.playbook_file)
             self.playbook = playbook_yaml
         except Exception:
             log.error("Unable to read playbook file %s", playbook)
             raise
     else:
         raise TypeError(
             "playbook value must either be a list, URL or a filename")
     log.info("Playbook: %s", self.playbook)
 def configure(self):
     if os.path.isfile(installation_path + "/conf/user_data.yaml.tmpl"):
         userdatayaml = installation_path + "/user-data/" + self.username
         if os.path.isfile(userdatayaml):
             userdatayaml_data_stream = open(userdatayaml, "r")
             yaml_parsed_userdata = yaml.safe_load(userdatayaml_data_stream)
             userdatayaml_data_stream.close()
             myversion = yaml_parsed_userdata.get("PHP")
             backend_config_file = installation_path + "/conf/backends.yaml"
             backend_data_yaml = open(backend_config_file, "r")
             backend_data_yaml_parsed = yaml.safe_load(backend_data_yaml)
             backend_data_yaml.close()
             if "PHP" in backend_data_yaml_parsed:
                 php_backends_dict = backend_data_yaml_parsed["PHP"]
                 php_path = php_backends_dict.get(myversion)
                 php_profile_set(self.username, php_path)
                 path_to_socket = php_path + "/var/run/" + self.username + ".sock"
                 if os.path.islink("/opt/fpmsockets/" + self.username + ".sock"):
                     os.remove("/opt/fpmsockets/" + self.username + ".sock")
                     os.symlink(path_to_socket, "/opt/fpmsockets/" + self.username + ".sock")
                 else:
                     os.symlink(path_to_socket, "/opt/fpmsockets/" + self.username + ".sock")
             else:
                 print("ERROR:: PHP Backends missing")
         else:
             subprocess.call("cp " + installation_path + "/conf/user_data.yaml.tmpl " + userdatayaml, shell=True)
             cpuser_uid = pwd.getpwnam(self.username).pw_uid
             cpuser_gid = grp.getgrnam(self.username).gr_gid
             os.chown(userdatayaml, cpuser_uid, cpuser_gid)
             os.chmod(userdatayaml, 0660)
             self.configure()
     else:
         sys.exit(0)
 def _subp(self, *args, **kwargs):
     # supports subp calling with cmd as args or kwargs
     if 'args' not in kwargs:
         kwargs['args'] = args[0]
     self.subp_called.append(kwargs)
     snap_cmds = []
     args = kwargs['args']
     # here we basically parse the snappy command invoked
     # and append to snapcmds a list of (mode, pkg, config)
     if args[0:2] == ['snappy', 'config']:
         if args[3] == "-":
             config = kwargs.get('data', '')
         else:
             with open(args[3], "rb") as fp:
                 config = yaml.safe_load(fp.read())
         self.snapcmds.append(['config', args[2], config])
     elif args[0:2] == ['snappy', 'install']:
         config = None
         pkg = None
         for arg in args[2:]:
             if arg.startswith("-"):
                 continue
             if not pkg:
                 pkg = arg
             elif not config:
                 cfgfile = arg
                 if cfgfile == "-":
                     config = kwargs.get('data', '')
                 elif cfgfile:
                     with open(cfgfile, "rb") as fp:
                         config = yaml.safe_load(fp.read())
         self.snapcmds.append(['install', pkg, config])
Example #9
0
  def __init__(self, filename=None, data=None, fd=None):
    super(YamlParser, self).__init__()

    if fd:
      self.parsed = yaml.safe_load(fd)
      self.fd = fd
      try:
        self.filename = fd.name
      except AttributeError:
        self.filename = None

    elif filename:
      try:
        self.parsed = yaml.safe_load(open(filename, "rb")) or OrderedYamlDict()

      except IOError as e:
        if e.errno == errno.EACCES:
          # Specifically catch access denied errors, this usually indicates the
          # user wanted to read the file, and it existed, but they lacked the
          # permissions.
          raise IOError(e)
        else:
          self.parsed = OrderedYamlDict()
      except OSError:
        self.parsed = OrderedYamlDict()

      self.filename = filename

    elif data is not None:
      fd = StringIO.StringIO(data)
      self.parsed = yaml.safe_load(fd)
      self.filename = filename
    else:
      raise Error("Filename not specified.")
Example #10
0
def get_disks(vm_):
    '''
    Return the disks of a named vm

    CLI Example::

        salt '*' virt.get_disks <vm name>
    '''
    disks = {}
    doc = minidom.parse(StringIO(get_xml(vm_)))
    for elem in doc.getElementsByTagName('disk'):
        sources = elem.getElementsByTagName('source')
        targets = elem.getElementsByTagName('target')
        if len(sources) > 0:
            source = sources[0]
        else:
            continue
        if len(targets) > 0:
            target = targets[0]
        else:
            continue
        if 'dev' in list(target.attributes) and 'file' in list(source.attributes):
            disks[target.getAttribute('dev')] = {
                'file': source.getAttribute('file')}
    for dev in disks:
        try:
            disks[dev].update(yaml.safe_load(subprocess.Popen('qemu-img info '
                + disks[dev]['file'],
                shell=True,
                stdout=subprocess.PIPE).communicate()[0]))
        except TypeError:
            disks[dev].update(yaml.safe_load('image: Does not exist'))
    return disks
Example #11
0
def test_configuration_with_binary_strings():
    """
    Regression test: serialization was failing on binary strings
    """

    import yaml

    obj = '\xaa\xbb\x00\xff\xff\x00ABC'
    assert yaml.load(yaml.dump(obj)) == obj
    assert yaml.safe_load(yaml.safe_dump(obj)) == obj

    obj = {'blob': '\xaa\xbb\x00\xff\xff\x00ABC'}
    assert yaml.load(yaml.dump(obj)) == obj
    assert yaml.safe_load(yaml.safe_dump(obj)) == obj

    obj = {
        'function': 'jobcontrol.utils.testing:job_simple_echo',
        'title': None,
        'notes': None,
        # 'args': ('\xaa\xbb\x00\xff\xff\x00ABC',),
        'args': '\xaa\xbb\x00\xff\xff\x00ABC',
        'dependencies': [],
        'kwargs': {},
        'id': 'f974e89f-4ae3-40cc-8316-b78e42bd5cc8',
    }
    dump(obj)
Example #12
0
    def wait_until_started(self, wait_load=True):
        """ Wait until server is started.

        Server consists of two parts:
        1) wait until server is listening on sockets
        2) wait until server tells us his status

        """
        if wait_load:
            msg = 'entering the event loop|will retry binding|hot standby mode'
            p = self.process if not self.gdb and not self.lldb else None
            self.logfile_pos.seek_wait(msg, p, self.name)
        while True:
            try:
                temp = AdminConnection('localhost', self.admin.port)
                if not wait_load:
                    ans = yaml.safe_load(temp.execute("2 + 2"))
                    return True
                ans = yaml.safe_load(temp.execute('box.info.status'))[0]
                if ans in ('running', 'hot_standby', 'orphan'):
                    return True
                elif ans in ('loading'):
                    continue
                else:
                    raise Exception(
                        "Strange output for `box.info.status`: %s" % (ans)
                    )
            except socket.error as e:
                if e.errno == errno.ECONNREFUSED:
                    time.sleep(0.1)
                    continue
                raise
Example #13
0
def render_template(path, week=None, **kwargs):
    with open('out/report.yml') as r:
        report = yaml.safe_load(r)

    with open('bloggers.yml') as f:
        users = yaml.safe_load(f)
    if week:
        week = parse(week, default=START)
    else:
        week = START

    week = (week - START).days / 7
    week_start = START + (week * datetime.timedelta(7))
    week_end   = START + ((week + 1) * datetime.timedelta(7))

    good = []
    lame = []
    skip = []
    userlist = []

    class User(object):
        pass

    for (un, rec) in users.items():
        u = User()
        u.username = un
        u.links = rec['links']
        u.start = rec['start']
        u.end   = rec.get('end')
        u.skip  = parse_skip(rec)
        u.weeks = report.get(un, [])

        userlist.append(u)

    def user_key(u):
        return (u.start, u.username)

    userlist.sort(key=user_key)

    for u in userlist:
        user_start = parse(u.start, default=START)
        if u.end and parse(u.end, default=START) <= week_start:
            continue

        if should_skip(u.skip, week):
            pass
        elif user_start > week_start:
            skip.append(u)
        elif len(u.weeks) <= week or not u.weeks[week]:
            lame.append(u)
        else:
            good.append(u)

    debts = get_debts()

    return Template(filename=path, output_encoding='utf-8').render(
        week=week, week_start=week_start,week_end=week_end,
        good=good, lame=lame, skip=skip, userlist=userlist,
        pool=get_balance('Pool'), paid=get_balance('Pool:Paid'),
        debts=debts, **kwargs)
Example #14
0
def get_disks(vm_):
    """
    Return the disks of a named vm

    CLI Example::

        salt '*' virt.get_disks <vm name>
    """
    disks = {}
    doc = minidom.parse(StringIO.StringIO(get_xml(vm_)))
    for elem in doc.getElementsByTagName("disk"):
        sources = elem.getElementsByTagName("source")
        targets = elem.getElementsByTagName("target")
        if len(sources) > 0:
            source = sources[0]
        else:
            continue
        if len(targets) > 0:
            target = targets[0]
        else:
            continue
        if "dev" in target.attributes.keys() and "file" in source.attributes.keys():
            disks[target.getAttribute("dev")] = {"file": source.getAttribute("file")}
    for dev in disks:
        try:
            disks[dev].update(
                yaml.safe_load(
                    subprocess.Popen(
                        "qemu-img info " + disks[dev]["file"], shell=True, stdout=subprocess.PIPE
                    ).communicate()[0]
                )
            )
        except TypeError:
            disks[dev].update(yaml.safe_load("image: Does not exist"))
    return disks
Example #15
0
def test_deprecation(spec_fixture, workspace_manager_fixture,  # noqa
                               test_workspace, tmpdir):
    """Verify execution runs with deprecated option """

    my_temp_dir = tmpdir.mkdir("tmp")
    deprecated_output = my_temp_dir.join("deprecated_output.yml")

    deprecated_input_string = \
        ['example', '--deprecated-way', 'TestingValue', '--dry-run',
         '-o', str(deprecated_output)]

    output = my_temp_dir.join("output.yml")

    input_string = \
        ['example', '--new-way', 'TestingValue', '--dry-run',
         '-o', str(output)]

    spec_manager = api.SpecManager()
    spec_manager.register_spec(spec_fixture)

    workspace_manager_fixture.activate(test_workspace.name)
    spec_manager.run_specs(args=deprecated_input_string)
    spec_manager.run_specs(args=input_string)

    with open(deprecated_output.strpath) as fp:
        deprecated_yml = yaml.safe_load(fp)["provision"]

    with open(output.strpath) as fp:
        new_yml = yaml.safe_load(fp)["provision"]

    assert deprecated_yml.get('new', None).get('way', None) == 'TestingValue'
    assert new_yml.get('new', None).get('way', None) == 'TestingValue'
def _read_config(config_file_path):
    
    if not config_file_path:
        config_file_path = CONFIG_FILE_NAME
    defaults_config_file_path = os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        DEFAULTS_CONFIG_FILE_NAME)

    if not os.path.exists(config_file_path) or not os.path.exists(
            defaults_config_file_path):
        if not os.path.exists(defaults_config_file_path):
            raise ValueError('Missing the defaults configuration file; '
                             'expected to find it at {0}'.format(
                defaults_config_file_path))
        raise ValueError('Missing the configuration file; expected to find '
                         'it at {0}'.format(config_file_path))

    lgr.debug('reading provider config files')
    with open(config_file_path, 'r') as config_file, \
            open(defaults_config_file_path, 'r') as defaults_config_file:

        lgr.debug('safe loading user config')
        user_config = yaml.safe_load(config_file.read())

        lgr.debug('safe loading default config')
        defaults_config = yaml.safe_load(defaults_config_file.read())

    lgr.debug('merging configs')
    merged_config = _deep_merge_dictionaries(user_config, defaults_config) \
        if user_config else defaults_config
    return merged_config
Example #17
0
 def validate_config(self, template_data, yaml_file):
     try:
         yaml.safe_load(template_data)
     except (yaml.scanner.ScannerError, yaml.YAMLError) as e:
         self.log.error("Config for file {} contains invalid yaml, got "
                        "error {}".format(yaml_file, e))
         raise e
Example #18
0
def get_bundle(bundle, to_file=False):
    """ Attempts to grab the bundle.yaml

    Arguments:
    bundle: URL of bundle or absolute path to local bundle
    to_file: store to a temporary file

    Returns:
    Dictionary of bundle's yaml unless to_file is True,
    then returns the path to the downloaded bundle
    """
    if path.isfile(bundle):
        if to_file:
            with NamedTemporaryFile(mode="w", encoding="utf-8",
                                    delete=False) as tempf:
                shutil.copyfile(bundle, tempf.name)
            return tempf.name
        else:
            with open(bundle) as f:
                return yaml.safe_load(f.read())

    req = requests.get(bundle)
    if not req.ok:
        raise Exception("Problem getting bundle: {}".format(req))
    if to_file:
        with NamedTemporaryFile(mode="w", encoding="utf-8",
                                delete=False) as tempf:
            tempf.write(req.text)
            return tempf.name
    else:
        return yaml.safe_load(req.text)
Example #19
0
def init(config, use, debug, verbose, accounts, tags, policies, resource=None, policy_tags=()):
    level = verbose and logging.DEBUG or logging.INFO
    logging.basicConfig(
        level=level,
        format="%(asctime)s: %(name)s:%(levelname)s %(message)s")

    logging.getLogger('botocore').setLevel(logging.ERROR)
    logging.getLogger('custodian').setLevel(logging.WARNING)
    logging.getLogger('custodian.s3').setLevel(logging.ERROR)
    logging.getLogger('urllib3').setLevel(logging.WARNING)

    with open(config) as fh:
        accounts_config = yaml.safe_load(fh.read())
        jsonschema.validate(accounts_config, CONFIG_SCHEMA)

    if use:
        with open(use) as fh:
            custodian_config = yaml.safe_load(fh.read())
    else:
        custodian_config = {}

    accounts_config['accounts'] = list(accounts_iterator(accounts_config))
    filter_policies(custodian_config, policy_tags, policies, resource)
    filter_accounts(accounts_config, tags, accounts)

    load_resources()
    MainThreadExecutor.c7n_async = False
    executor = debug and MainThreadExecutor or ProcessPoolExecutor
    return accounts_config, custodian_config, executor
Example #20
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--old', required=True,
                        help='Current policy file')
    parser.add_argument('--new', required=True,
                        help='New policy file')
    parser.add_argument('--mode',
                        choices=['add', 'remove'],
                        default='remove',
                        help='Diffs to be shown')
    parsed_args = parser.parse_args()

    with open(parsed_args.old) as f:
        old_data = yaml.safe_load(f)

    with open(parsed_args.new) as f:
        new_data = yaml.safe_load(f)

    added = set(new_data.keys()) - set(old_data.keys())
    removed = set(old_data.keys()) - set(new_data.keys())

    if parsed_args.mode == 'remove':
        for key in sorted(removed):
            print(key)

    if parsed_args.mode == 'add':
        for key in sorted(added):
            print(key)
Example #21
0
 def __init__(self):
     try:
         self.name=m3tt.get_m3_config()['sys_test']
     except KeyError:
         print 'Key sys_test not defined in m3_config.yml. Exiting...'
         exit()
     path=m3tt.get_m3_config_path()
     filename= path+'sys/m3_sys_test_'+self.name+'.yml'
     try:
         f=file(filename,'r')
         self.config= yaml.safe_load(f.read())
         f.close()
     except IOError:
         print 'File ',filename,'not found. Exiting...'
         exit()
     self.log=[]
     
     #load included
     self.config_include={'not_val':[],'in_range':[]}
     if self.config.has_key('include'):
         for fi in self.config['include']:
             filename= path+'sys/m3_sys_test_'+fi+'.yml'
             try:
                 f=file(filename,'r')
                 print 'Including: ',filename
                 c= yaml.safe_load(f.read())
                 f.close()
                 if c.has_key('not_val'):
                     self.config_include['not_val'].append(c['not_val'])
                 if c.has_key('in_range'):
                     self.config_include['in_range'].append(c['in_range'])
             except IOError:
                 print 'File ',filename,'not found. Exiting...'
                 exit()
Example #22
0
    def test_save_shortcut_updated(self):
        OLD_YML = """\
        projects:
            default: 12345
            prod: 33333
        """
        runner = CliRunner()
        with runner.isolated_filesystem():
            with open('scrapinghub.yml', 'w') as f:
                f.write(textwrap.dedent(OLD_YML))
            conf = ShubConfig()
            conf.load_file('scrapinghub.yml')
            del conf.projects['prod']
            print(conf.projects)
            conf.save('scrapinghub.yml')
            with open('scrapinghub.yml', 'r') as f:
                new_yml = yaml.safe_load(f)
            # Should not contain 'projects'
            self.assertEqual(new_yml, {'project': 12345})

            conf = ShubConfig()
            conf.load_file('scrapinghub.yml')
            # Should also work in reverse
            conf.projects['prod'] = 33333
            conf.save('scrapinghub.yml')
            with open('scrapinghub.yml', 'r') as f:
                new_yml = yaml.safe_load(f)
            # Should not contain 'project' singleton
            self.assertEqual(
                new_yml,
                {'projects': {'default': 12345, 'prod': 33333}},
            )

            # Make sure it is readable again
            ShubConfig().load_file('scrapinghub.yml')
Example #23
0
 def settings(self):
     core_path = os.path.join(get_root_path('indico'), 'modules', 'events', 'themes.yaml')
     with open(core_path) as f:
         core_data = f.read()
     core_settings = yaml.safe_load(core_data)
     # YAML doesn't give us access to anchors so we need to include the base yaml.
     # Since duplicate keys are invalid (and may start failing in the future) we
     # rename them - this also makes it easy to throw them away after parsing the
     # file provided by a plugin.
     core_data = re.sub(r'^(\S+:)$', r'__core_\1', core_data, flags=re.MULTILINE)
     for plugin, path in values_from_signal(signals.plugin.get_event_themes_files.send(), return_plugins=True):
         with open(path) as f:
             data = f.read()
         settings = {k: v
                     for k, v in yaml.safe_load(core_data + '\n' + data).viewitems()
                     if not k.startswith('__core_')}
         # We assume there's no more than one theme plugin that provides defaults.
         # If that's not the case the last one "wins". We could reject this but it
         # is quite unlikely that people have multiple theme plugins in the first
         # place, even more so theme plugins that specify defaults.
         core_settings['defaults'].update(settings.get('defaults', {}))
         # Same for definitions - we assume plugin authors are responsible enough
         # to avoid using definition names that are likely to cause collisions.
         # Either way, if someone does this on purpose chances are good they want
         # to override a default style so let them do so...
         for name, definition in settings.get('definitions', {}).viewitems():
             definition['plugin'] = plugin
             definition.setdefault('user_visible', False)
             core_settings['definitions'][name] = definition
     return core_settings
Example #24
0
def generate_confs(tileset, ignore_warnings=True, renderd=False):
    """
    Takes a Tileset object and returns mapproxy and seed config files
    """
    # Start with a sane configuration using MapProxy's defaults
    mapproxy_config = load_default_config()

    tileset_conf_json = get_mapproxy_conf(tileset)
    tileset_conf = yaml.safe_load(tileset_conf_json)

    # merge our config
    load_config(mapproxy_config, config_dict=tileset_conf)

    seed_conf_json = get_seed_conf(tileset)
    seed_conf = yaml.safe_load(seed_conf_json)

    errors, informal_only = validate_options(mapproxy_config)
    if not informal_only or (errors and not ignore_warnings):
        raise ConfigurationError('invalid configuration - {}'.format(', '.join(errors)))

    mapproxy_cf = ProxyConfiguration(mapproxy_config, seed=seed, renderd=renderd)

    errors, informal_only = validate_seed_conf(seed_conf)
    if not informal_only:
        raise SeedConfigurationError('invalid seed configuration - {}'.format(', '.join(errors)))
    seed_cf = SeedingConfiguration(seed_conf, mapproxy_conf=mapproxy_cf)

    return mapproxy_cf, seed_cf
Example #25
0
def configure_sources(update=False,
                      sources_var='install_sources',
                      keys_var='install_keys'):
    """
    Configure multiple sources from charm configuration

    Example config:
        install_sources:
          - "ppa:foo"
          - "http://example.com/repo precise main"
        install_keys:
          - null
          - "a1b2c3d4"

    Note that 'null' (a.k.a. None) should not be quoted.
    """
    sources = safe_load(config(sources_var))
    keys = config(keys_var)
    if keys is not None:
        keys = safe_load(keys)
    if isinstance(sources, basestring) and (
            keys is None or isinstance(keys, basestring)):
        add_source(sources, keys)
    else:
        if not len(sources) == len(keys):
            msg = 'Install sources and keys lists are different lengths'
            raise SourceConfigError(msg)
        for src_num in range(len(sources)):
            add_source(sources[src_num], keys[src_num])
    if update:
        apt_update(fatal=True)
Example #26
0
 def get_playbook(self):
     """
     If necessary, fetch and read the playbook file
     """
     playbook = self.config['playbook']
     if isinstance(playbook, list):
         # Multiple plays in a list
         self.playbook = playbook
     elif isinstance(playbook, str) and playbook.startswith(('http://',
                                                            'https://')):
         response = requests.get(playbook)
         response.raise_for_status()
         self.playbook = yaml.safe_load(response.text)
     elif isinstance(playbook, str):
         try:
             playbook_path = os.path.expanduser(playbook)
             self.playbook_file = file(playbook_path)
             playbook_yaml = yaml.safe_load(self.playbook_file)
             self.playbook = playbook_yaml
         except Exception:
             log.error("Unable to read playbook file %s", playbook)
             raise
     else:
         raise TypeError(
             "playbook value must either be a list, URL or a filename")
     log.info("Playbook: %s", self.playbook)
Example #27
0
    def test_config_yaml(self):
        import yaml

        cli.config(Args(), ["--format=yaml"])
        data1 = yaml.safe_load(self.stdout.getvalue().strip().replace(helper.ROOT_PATH, "<ROOT>"))
        data2 = yaml.safe_load(config_file("data.json"))
        self.assertEqual(data1, data2)
def test_for_consistency(tests_path):
    """
    Ensure that there is consistency between environment.yml dependencies
    and conda.recipe/meta.yaml requirements.
    """
    dev_pkgs = set([
        'pytest',
        'pytest-pep8',
        'pytest-xdist',
        'pycodestyle',
        'pylint',
        'coverage'
    ])
    # read conda.recipe/meta.yaml requirements
    meta_file = os.path.join(tests_path, '..', '..',
                             'conda.recipe', 'meta.yaml')
    with open(meta_file, 'r') as stream:
        meta = yaml.safe_load(stream)
    bld = set(meta['requirements']['build'])
    run = set(meta['requirements']['run'])
    # confirm conda.recipe/meta.yaml build and run requirements are the same
    assert bld == run
    # read environment.yml dependencies
    envr_file = os.path.join(tests_path, '..', '..',
                             'environment.yml')
    with open(envr_file, 'r') as stream:
        envr = yaml.safe_load(stream)
    env = set(envr['dependencies'])
    # confirm that extras in env (relative to run) equal the dev_pkgs set
    extras = env - run
    assert extras == dev_pkgs
Example #29
0
File: server.py Project: Nozdi/mud
    def __init__(self, connections_amount=2, connection_propability=0.2):
        self.__fire = 1000
        with open("items.yaml") as data_file:
            items_data = safe_load(data_file)
            self.items = [ Item(elem['name'], elem['description'], elem['capacity'])
                          for elem in items_data ]

        with open("rooms.yaml") as data_file:
            rooms_data = safe_load(data_file)
            self.rooms = [ Room(
                            elem['name'],
                            elem['description'],
                            random.sample(self.items, random.randint(0,2)),
                            bool(elem.get('water_source', 0))
                                ) for elem in rooms_data ]

        self.graph = graph_gen(len(self.rooms), connections_amount,\
                               connection_propability)
        random.shuffle(self.rooms)
        for no, room in enumerate(self.rooms):
            room.neighbors = [self.rooms[index].name for index in \
                                                self.graph.neighbors(no)]

        #we start at 0
        self.fired_room = self.rooms[random.randint(1, len(self.rooms))].name

        self.spread_fire()
Example #30
0
def generate_ui(path, modules=[]):
    """Takes a path to a YAML UI definition, and generates a UI tree for it.

    :param definition: A UI definition representing the UI to be created.
    :param modules: (Optional) A list of module names which need to be
        imported in order to generate the UI tree. This should include all
        module names which define custom widgets or callbacks using
        decorators that are used in the definition.

    """
    for module in modules:
        importlib.import_module(module)

    with open(path, 'r') as ui_file:
        ui = yaml.safe_load(ui_file)

    full_style = {}
    for style_path in ui.get('include', []):
        with open(style_path, 'r') as style_file:
            style = yaml.safe_load(style_file)
        for definition in style:
            full_style[definition['name']] = definition['properties']

    definition = ui['definition']
    root_class = yamlui.class_mapping.get(definition['object'])
    if root_class is None:
        raise Exception('ERROR: Root class is an unrecognised widget type.')

    root = root_class(definition, style=full_style)
    ui_name = os.path.basename(path)
    yamlui.trees[ui_name] = build_dictionary(root, ui_name)

    return root
Example #31
0
    def create_yaml_files(self, datafile):
        # Load existing Yaml file (If any)
        # Load main verifications datafiles
        #main_file = OrderedDict()
        main_file = {}
        nxos_file = {
            'extends':
            '%ENV{VIRTUAL_ENV}/genie_yamls/verification_datafile.yaml'
        }
        iosxe_file = {
            'extends':
            '%ENV{VIRTUAL_ENV}/genie_yamls/verification_datafile.yaml'
        }
        iosxr_file = {
            'extends':
            '%ENV{VIRTUAL_ENV}/genie_yamls/verification_datafile.yaml'
        }
        nxos = []
        iosxe = []
        iosxr = []
        # Load the file
        with open(datafile, 'r') as f:
            parser_yaml = yaml.safe_load(f)

        main_yaml = os.path.join(YAMLS, 'verification_datafile.yaml')
        with open(main_yaml, 'r') as f:
            content = yaml.safe_load(f)

        nxos_yaml = os.path.join(YAMLS, 'nxos',
                                 'verification_datafile_nxos.yaml')
        with open(nxos_yaml, 'r') as f:
            nxos_content = yaml.safe_load(f)

        iosxe_yaml = os.path.join(YAMLS, 'iosxe',
                                  'verification_datafile_iosxe.yaml')
        with open(iosxe_yaml, 'r') as f:
            iosxe_content = yaml.safe_load(f)

        iosxr_yaml = os.path.join(YAMLS, 'iosxr',
                                  'verification_datafile_xr.yaml')
        with open(iosxr_yaml, 'r') as f:
            iosxr_content = yaml.safe_load(f)

        # All parser should be in this verification datafile
        for osx in self.parsers:
            if osx == 'nxos':
                os_yaml = nxos_content
                os_file = nxos_file
                triggers = nxos
            elif osx == 'iosxe':
                os_yaml = iosxe_content
                os_file = iosxe_file
                triggers = iosxe
            elif osx == 'iosxr':
                os_yaml = iosxr_content
                os_file = iosxr_file
                triggers = iosxr

            for file in self.parsers[osx]:
                for parser in self.parsers[osx][file]:
                    # Check which context exists
                    for context in CONTEXTS:
                        if not hasattr(parser, context):
                            continue

                        parser_name = parser.__name__
                        # Verification name
                        verification_name = 'Verify_{p}'.format(
                            p=parser_name.replace('Show', ''))
                        if context != 'cli':
                            verification_name = '{v}_{c}'.format(
                                v=verification_name, c=context)

                        values = []
                        if parser_name in parser_yaml:

                            # initial index number
                            index_num = None
                            # For all of the combination, add it
                            # Make the lists ready to go
                            for key, items in sorted(
                                    parser_yaml[parser_name].items(),
                                    reverse=True):

                                if isinstance(items, dict):
                                    if key not in parser.__module__:
                                        continue
                                    for ky, val in sorted(items.items(),
                                                          reverse=True):
                                        count = 0
                                        if ky == 'zos':
                                            try:
                                                index_num = val.index(osx)
                                            except:
                                                values.append(None)
                                                break
                                            continue

                                        if index_num is not None:
                                            val = val[index_num]

                                        for item in val:
                                            if item == '' or item is None:
                                                count += 1
                                                continue
                                            try:
                                                values[count].extend(
                                                    [ky, val[count]])
                                            except IndexError:
                                                values.append([ky, val[count]])
                                            count += 1

                                else:
                                    count = 0
                                    if key == 'zos':
                                        try:
                                            index_num = items.index(osx)
                                        except:
                                            values.append(None)
                                            break
                                        continue

                                    if index_num is not None:
                                        items = items[index_num]

                                    for item in items:
                                        if item == '' or item is None:
                                            count += 1
                                            continue
                                        try:
                                            values[count].extend(
                                                [key, items[count]])
                                        except IndexError:
                                            values.append([key, items[count]])
                                        count += 1
                        else:
                            values.append(None)

                        for value in values:

                            if value is not None:
                                veri_name = '{v}_{e}'.format(
                                    v=verification_name,
                                    e='_'.join(value).replace(' ', '_'))
                            else:
                                veri_name = verification_name

                            main_file[veri_name] = {}
                            main_file[veri_name]['source'] = {
                                'class': 'genie.harness.base.Template'
                            }
                            main_file[veri_name]['context'] = context
                            main_file[veri_name]['cmd'] = {}
                            main_file[veri_name]['cmd'][
                                'pkg'] = 'genie.libs.parser'
                            main_file[veri_name]['cmd'][
                                'class'] = '{f}.{p}'.format(f=file,
                                                            p=parser.__name__)

                            os_file[veri_name] = {}
                            if veri_name not in EXCLUDE_DEVICES:
                                os_file[veri_name]['devices'] = {'uut': 'None'}

                            if value is not None:
                                for i in range(0, len(value), 2):
                                    if value[i + 1] != 'default':
                                        if 'parameters' not in os_file[
                                                veri_name]:
                                            os_file[veri_name][
                                                'parameters'] = {}
                                        os_file[veri_name]['parameters'][
                                            value[i]] = value[i + 1]

                            if veri_name in content:
                                # Good already exists
                                # Do not copy source and cmd
                                # But keep the rest
                                try:
                                    del content[veri_name]['source']
                                except:
                                    pass
                                try:
                                    del content[veri_name]['cmd']
                                except:
                                    pass
                                merge_dict(main_file[veri_name],
                                           content[veri_name])

                            if veri_name in os_yaml:
                                merge_dict(os_file[veri_name],
                                           os_yaml[veri_name])
                            triggers.append(veri_name)

        # Create the files
        with open('verification_datafile.yaml', 'w') as f:
            yaml.dump(main_file, f, default_flow_style=False)

        self.clean_up('nxos')
        with open('nxos/verification_datafile_nxos.yaml', 'w') as f:
            yaml.dump(nxos_file, f, default_flow_style=False)

        self.clean_up('iosxe')
        with open('iosxe/verification_datafile_iosxe.yaml', 'w') as f:
            yaml.dump(iosxe_file, f, default_flow_style=False)

        self.clean_up('iosxr')
        with open('iosxr/verification_datafile_xr.yaml', 'w') as f:
            yaml.dump(iosxr_file, f, default_flow_style=False)

        log.info(banner('nxos'))
        log.info('\n'.join(nxos))

        log.info(banner('iosxe'))
        log.info('\n'.join(iosxe))

        log.info(banner('iosxr'))
        log.info('\n'.join(iosxr))
        return main_file
Example #32
0
# This script is to contain all the parameters within the simulation 

# from parameters.config import read_config
import yaml

with open("params.yml", 'r') as stream:
    params_dict = yaml.safe_load(stream)
    print (params_dict)

# Cahn Hilliard Model type: 
MOBILITY_MODEL = params_dict["MOBILITY_MODEL"]

# Compositon parameters
A_RAW = params_dict["A_RAW"]
NOISE_MAGNITUDE = params_dict["NOISE_MAGNITUDE"]

# Material parameters (either PS_PMMA / PS_PB)
MATERIAL_CHOICE = params_dict["MATERIAL_CHOICE"]
SIZE_DISPARITY = params_dict["SIZE_DISPARITY"]


# Homogenous Free energy function (PC_SAFT, FH, etc)
GIBBS = params_dict["GIBBS"]

# Numerics
DT = params_dict["DT"]
TIME_MAX = params_dict["TIME_MAX"]
N_CELLS = params_dict["N_CELLS"]
DOMAIN_LENGTH = params_dict["DOMAIN_LENGTH"]
theta_ch = params_dict["theta_ch"]
MESH_TYPE = params_dict["MESH_TYPE"]
Example #33
0
def update_nrpe_checks():
    if os.path.isdir(NAGIOS_PLUGINS):
        rsync(os.path.join(charm_dir(), 'scripts',
                           'check_rabbitmq.py'),
              os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py'))
        rsync(os.path.join(charm_dir(), 'scripts',
                           'check_rabbitmq_queues.py'),
              os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py'))
    if config('stats_cron_schedule'):
        script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh')
        cronjob = CRONJOB_CMD.format(schedule=config('stats_cron_schedule'),
                                     timeout=config('cron-timeout'),
                                     command=script)
        rsync(os.path.join(charm_dir(), 'scripts',
                           'collect_rabbitmq_stats.sh'), script)
        write_file(STATS_CRONFILE, cronjob)
    if config('management_plugin'):
        rsync(os.path.join(charm_dir(), 'scripts',
                           'check_rabbitmq_cluster.py'),
              os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_cluster.py'))
    elif os.path.isfile(STATS_CRONFILE):
        os.remove(STATS_CRONFILE)

    # Find out if nrpe set nagios_hostname
    hostname = nrpe.get_nagios_hostname()
    myunit = nrpe.get_nagios_unit_name()

    # create unique user and vhost for each unit
    current_unit = local_unit().replace('/', '-')
    user = '******' % current_unit
    vhost = 'nagios-%s' % current_unit
    password = rabbit.get_rabbit_password(user, local=True)

    rabbit.create_vhost(vhost)
    rabbit.create_user(user, password, ['monitoring'])
    rabbit.grant_permissions(user, vhost)

    nrpe_compat = nrpe.NRPE(hostname=hostname)
    if config('ssl') in ['off', 'on']:
        cmd = ('{plugins_dir}/check_rabbitmq.py --user {user} '
               '--password {password} --vhost {vhost}')
        cmd = cmd.format(plugins_dir=NAGIOS_PLUGINS, user=user,
                         password=password, vhost=vhost)
        nrpe_compat.add_check(
            shortname=rabbit.RABBIT_USER,
            description='Check RabbitMQ {%s}' % myunit,
            check_cmd=cmd
        )
    if config('ssl') in ['only', 'on']:
        log('Adding rabbitmq SSL check', level=DEBUG)
        cmd = ('{plugins_dir}/check_rabbitmq.py --user {user} '
               '--password {password} --vhost {vhost} '
               '--ssl --ssl-ca {ssl_ca} --port {port}')
        cmd = cmd.format(plugins_dir=NAGIOS_PLUGINS,
                         user=user,
                         password=password,
                         port=int(config('ssl_port')),
                         vhost=vhost,
                         ssl_ca=SSL_CA_FILE)
        nrpe_compat.add_check(
            shortname=rabbit.RABBIT_USER + "_ssl",
            description='Check RabbitMQ (SSL) {%s}' % myunit,
            check_cmd=cmd
        )

    if config('queue_thresholds'):
        cmd = ""
        # If value of queue_thresholds is incorrect we want the hook to fail
        for item in yaml.safe_load(config('queue_thresholds')):
            cmd += ' -c "{}" "{}" {} {}'.format(*item)
        nrpe_compat.add_check(
            shortname=rabbit.RABBIT_USER + '_queue',
            description='Check RabbitMQ Queues',
            check_cmd='{}/check_rabbitmq_queues.py{} {}'.format(
                        NAGIOS_PLUGINS, cmd, STATS_DATAFILE)
        )
    if config('management_plugin'):
        # add NRPE check
        nrpe_compat.add_check(
            shortname=rabbit.RABBIT_USER + '_cluster',
            description='Check RabbitMQ Cluster',
            check_cmd='{}/check_rabbitmq_cluster.py --port {} --user {} --password {}'.format(
                        NAGIOS_PLUGINS,
                        rabbit.get_managment_port(),
                        user,
                        password
            )
        )

    nrpe_compat.write()
Example #34
0
def from_yaml(data):
    if isinstance(data, string_types):
        return yaml.safe_load(data)
    return data
Example #35
0
    comp = zlib.compress(weights_np, level=9)
    print(len(comp))
    # sim.export(path=os.path.join(
    #                os.path.join(exp_folder,
    #                             image_name, 'checkpoints')), filename_prefix='model_aimet_quantized_retrained', dummy_input=dummy_in, set_onnx_layer_names=False)

    print(res)
    return quantized_model, res, len(comp), state_dict


if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    exp_folder = 'siren/exp/KODAK21_epochs10000_lr0.0001_hdims32_hlayer4_nerf_sine_l1_reg5e-05_enc_scale10.0/'
    #exp_folder = 'siren/exp/KODAK21_epochs10000_lr0.0001_hdims64_hlayer4_nerf_sine_enc_scale10.0/'
    exp_folder = '/home/yannick/PycharmProjects/INR/exp/KODAK21_epochs10000_lr0.0001_mlp_[8]_hdims64_hlayer2_nerf_sine_enc_scale4.0'
    TRAINING_FLAGS = yaml.safe_load(open(os.path.join(exp_folder, 'FLAGS.yml'), 'r'))
    image_name = 'kodim21'
    imglob = glob.glob('/home/yannick/KODAK/kodim21.png')
    for im in imglob:
        image_name = im.split('/')[-1].split('.')[0]

        img_dataset = dataio.ImageFile(im)
        img = PIL.Image.open(im)
        scale = TRAINING_FLAGS['downscaling_factor']
        image_resolution = (img.size[1] // scale, img.size[0] // scale)

        coord_dataset = dataio.Implicit2DWrapper(img_dataset, sidelength=image_resolution)

        dataloader = DataLoader(coord_dataset, shuffle=True, batch_size=1, pin_memory=True, num_workers=0)
        input_shape = (1, coord_dataset.mgrid.shape[0], coord_dataset.mgrid.shape[1])
        print(input_shape)
Example #36
0
    def __init__(self, config_file):
        with open(config_file, 'r') as file:
            try:
                self.config = yaml.safe_load(file)
            except yaml.YAMLError as err:
                print(err)

        self.floats = []
        if 'floats' in self.config:
            for floatOpt in self.config['floats']:
                for opt, val in floatOpt.items():
                    if len(val) > 0 and val != 'default':
                        self.floats += [LatexLengthOption(opt, val)]

        self.compact_items = False
        self.compact_enums = False
        if 'lists' in self.config:
            list_config = self.config['lists']
            if 'compact-items' in list_config:
                self.compact_items = list_config['compact-items']
            if 'compact-enums' in list_config:
                self.compact_enums = list_config['compact-enums']

        self.headings = []
        if 'headings' in self.config:
            heads_config = self.config['headings']
            for head in heads_config:
                head_config = heads_config[head]
                left = head_config['left'] or "0pt"
                above = head_config['above'] or "0pt"
                below = head_config['below'] or "0pt"
                self.headings += [
                    LatexCommand('titlespacing', None,
                                 [f'\\{head}', left, above, below])
                ]

        self.paragraphs = []
        if 'paragraphs' in self.config:
            for opt, val in self.config['paragraphs'].items():
                if len(val) > 0 and val != 'default':
                    self.paragraphs += [LatexLengthOption(opt, val)]

        self.equations = []
        if 'equations' in self.config:
            for opt, val in self.config['equations'].items():
                if len(val) > 0 and val != 'default':
                    self.equations += [LatexLengthOption(opt, val)]

        spread_factor = 1.0
        if 'line-spread' in self.config:
            spread_factor = self.config['line-spread']
        self.line_spread = [LatexCommand('linespread', [], str(spread_factor))]

        self.caption_opts = []
        if 'captions' in self.config:
            if 'above-skip' in self.config['captions']:
                aboveskip = self.config['captions']['above-skip']
                if aboveskip != 'default':
                    self.caption_opts += [f'above-skip={aboveskip}']
            if 'below-skip' in self.config['captions']:
                belowskip = self.config['captions']['below-skip']
                if belowskip != 'default':
                    self.caption_opts += [f'below-skip={belowskip}']
            if 'skip' in self.config['captions']:
                skip = self.config['captions']['skip']
                if skip != 'default':
                    self.caption_opts += [f'skip={skip}']
            if 'font' in self.config['captions']:
                font = self.config['captions']['font']
                if font != 'default':
                    self.caption_opts += [f'font={font}']
Example #37
0
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
if os.getenv('GAE_APPLICATION', None):
    ALLOWED_HOSTS = [
        'really-site-312814.an.r.appspot.com',
    ]
else:
    DEBUG = True
    ALLOWED_HOSTS = ['*']
    import yaml
    with open(os.path.join(BASE_DIR, 'secrets', 'secret_dev.yaml')) as file:
        objs = yaml.safe_load(file)
        for obj in objs:
            os.environ[obj] = objs[obj]
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
# Application definition

INSTALLED_APPS = [
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'livereload',
    'django.contrib.messages',
    'django.contrib.staticfiles',
    'django.contrib.sitemaps',
    'mysite',
 def get_install_config(self):
     return yaml.safe_load(self.api_client.get_cluster_install_config(self.id))
Example #39
0
        if rank:
            article['popularity'] = int(rank[0]['Ranking'])
        else:
            article['popularity'] = 0

        logging.debug("Add to list")
        parsed_articles.append(article)

# Sort by title
parsed_articles = sorted(parsed_articles, key=lambda x: x['Title'])

all_tags = []
all_langs = []

with open(path.join(doc_directory, "toc.yml"), 'r') as stream:
    main_toc = yaml.safe_load(stream)

all_topics = defaultdict(list)

for article in parsed_articles:
    # toc_link=list(find_all(main_toc,article['file_url'],'item'))
    # if len(toc_link) == 0:
    #     main_toc= update_toc(article, main_toc)

    # Sort the tags
    article['tags'] = sorted(article['tags'])

    # Define the primnary topic for the article
    article['topic'] = categories[article['category'][0]]

    # Build topic array
Example #40
0
    },
    {
        'name': 'TestNetwork_5',
        'vlanId': 5
    },
    {
        'name': 'TestNetwork_9',
        'vlanId': 9
    },
    {
        'name': 'TestNetwork_10',
        'vlanId': 10
    },
]

DICT_PARAMS_WITH_CHANGES = yaml.safe_load(YAML_PARAMS_WITH_CHANGES)["data"]


class EthernetNetworkModuleSpec(unittest.TestCase, OneViewBaseTestCase):
    """
    OneViewBaseTestCase provides the mocks used in this test case
    """
    def setUp(self):
        self.configure_mocks(self, EthernetNetworkModule)
        self.resource = self.mock_ov_client.ethernet_networks

    def test_should_create_new_ethernet_network(self):
        self.resource.get_by.return_value = []
        self.resource.create.return_value = DEFAULT_ENET_TEMPLATE

        self.mock_ansible_module.params = PARAMS_FOR_PRESENT
Example #41
0
    def test_should_include(self,
                            builder_context: PipelineBuilderContext) -> None:
        """Test calculating configuration for the given prescription unit."""
        prescription_str = f"""\
name: GroupStep
type: step.Group
should_include:
  adviser_pipeline: true
match:
  group:
  - package_version:
      name: numpy
      version: '!=1.21.4'
      index_url:
        not: 'https://pypi.org/simple'
  - package_version:
      name: tensorflow
      version: '~=2.7.0'
  - package_version:
      name: pandas
      index_url: 'https://thoth-station.ninja'
      develop: true
  - package_version:
      name: werkzeug
      version: '==1.0.0'
      index_url: 'https://pypi.org/simple'
      develop: false
run:
  stack_info:
  - type: INFO
    message: Running the unit.
    link: 'https://thoth-station.ninja'
"""
        prescription = yaml.safe_load(prescription_str)
        PRESCRIPTION_GROUP_STEP_SCHEMA(prescription)
        GroupStepPrescription.set_prescription(prescription)

        configurations = list(
            GroupStepPrescription.should_include(builder_context))

        assert configurations == [
            {
                "match": {
                    "package_version": {
                        "index_url": {
                            "not": "https://pypi.org/simple"
                        },
                        "name": "numpy",
                        "version": "!=1.21.4",
                    },
                    "state": {
                        "resolved_dependencies": [
                            {
                                "name": "tensorflow",
                                "version": "~=2.7.0"
                            },
                            {
                                "develop": True,
                                "index_url": "https://thoth-station.ninja",
                                "name": "pandas"
                            },
                            {
                                "develop": False,
                                "index_url": "https://pypi.org/simple",
                                "name": "werkzeug",
                                "version": "==1.0.0",
                            },
                        ]
                    },
                },
                "multi_package_resolution": True,
                "package_name": "numpy",
                "prescription": {
                    "run": False
                },
                "run": {
                    "stack_info": [{
                        "link": "https://thoth-station.ninja",
                        "message": "Running the unit.",
                        "type": "INFO"
                    }]
                },
            },
            {
                "match": {
                    "package_version": {
                        "name": "tensorflow",
                        "version": "~=2.7.0"
                    },
                    "state": {
                        "resolved_dependencies": [
                            {
                                "index_url": {
                                    "not": "https://pypi.org/simple"
                                },
                                "name": "numpy",
                                "version": "!=1.21.4"
                            },
                            {
                                "develop": True,
                                "index_url": "https://thoth-station.ninja",
                                "name": "pandas"
                            },
                            {
                                "develop": False,
                                "index_url": "https://pypi.org/simple",
                                "name": "werkzeug",
                                "version": "==1.0.0",
                            },
                        ]
                    },
                },
                "multi_package_resolution": True,
                "package_name": "tensorflow",
                "prescription": {
                    "run": False
                },
                "run": {
                    "stack_info": [{
                        "link": "https://thoth-station.ninja",
                        "message": "Running the unit.",
                        "type": "INFO"
                    }]
                },
            },
            {
                "match": {
                    "package_version": {
                        "develop": True,
                        "index_url": "https://thoth-station.ninja",
                        "name": "pandas"
                    },
                    "state": {
                        "resolved_dependencies": [
                            {
                                "index_url": {
                                    "not": "https://pypi.org/simple"
                                },
                                "name": "numpy",
                                "version": "!=1.21.4"
                            },
                            {
                                "name": "tensorflow",
                                "version": "~=2.7.0"
                            },
                            {
                                "develop": False,
                                "index_url": "https://pypi.org/simple",
                                "name": "werkzeug",
                                "version": "==1.0.0",
                            },
                        ]
                    },
                },
                "multi_package_resolution": True,
                "package_name": "pandas",
                "prescription": {
                    "run": False
                },
                "run": {
                    "stack_info": [{
                        "link": "https://thoth-station.ninja",
                        "message": "Running the unit.",
                        "type": "INFO"
                    }]
                },
            },
            {
                "match": {
                    "package_version": {
                        "develop": False,
                        "index_url": "https://pypi.org/simple",
                        "name": "werkzeug",
                        "version": "==1.0.0",
                    },
                    "state": {
                        "resolved_dependencies": [
                            {
                                "index_url": {
                                    "not": "https://pypi.org/simple"
                                },
                                "name": "numpy",
                                "version": "!=1.21.4"
                            },
                            {
                                "name": "tensorflow",
                                "version": "~=2.7.0"
                            },
                            {
                                "develop": True,
                                "index_url": "https://thoth-station.ninja",
                                "name": "pandas"
                            },
                        ]
                    },
                },
                "multi_package_resolution": True,
                "package_name": "werkzeug",
                "prescription": {
                    "run": False
                },
                "run": {
                    "stack_info": [{
                        "link": "https://thoth-station.ninja",
                        "message": "Running the unit.",
                        "type": "INFO"
                    }]
                },
            },
        ]

        for conf in configurations:
            GroupStepPrescription.CONFIGURATION_SCHEMA(conf)
Example #42
0
 def load_yaml(self, fp):
     if self._reader_format == "yaml":
         return pd.DataFrame(safe_load(fp))
Example #43
0
#!/usr/bin/python

import yaml
import glob
import os

# Load old configurations
try:
    sfconfig = yaml.safe_load(open("/etc/software-factory/sfconfig.yaml"))
except IOError:
    sfconfig = {'nodepool': {}}
images = yaml.safe_load(open("nodepool/images.yaml"))
labels = yaml.safe_load(open("nodepool/labels.yaml"))

# Prepare the new nodepool.yaml
nodepool = {
    'labels': labels['labels'],
    'diskimages': [],
    'providers': [],
}

# Import provider setting from sfconfig.yaml
for provider in sfconfig["nodepool"].get("providers", []):
    new_provider = {
        "name": provider["name"],
        "cloud": provider["name"],
        "clean-floating-ips": True,
        "images": [],
    }
    if "network" in provider and provider.get("network") is not None:
        new_provider["networks"] = [{"name": provider["network"]}]
        else:
            last_read_message_id = await process_messages(
                client,
                messages_list,
                config["media_types"],
                config["file_formats"],
            )
            pagination_count = 0
            messages_list = []
            messages_list.append(message)
    if messages_list:
        last_read_message_id = await process_messages(
            client,
            messages_list,
            config["media_types"],
            config["file_formats"],
        )

    await client.stop()
    config["last_read_message_id"] = last_read_message_id
    return config


if __name__ == "__main__":
    f = open(os.path.join(THIS_DIR, "config.yaml"))
    config = yaml.safe_load(f)
    f.close()
    updated_config = asyncio.get_event_loop().run_until_complete(
        begin_import(config, pagination_limit=100))
    update_config(updated_config)
import yaml
from task_18_1 import send_show_command
from task_18_2 import send_config_commands

commands = [
    "logging 10.255.255.1", "logging buffered 20010", "no logging console"
]
command = "sh ip int br"


def send_commands(device, *, config=None, show=None):
    if show and config:
        raise ValueError(
            "Можно передавать только один из аргументов show/config")
    elif show:
        return send_show_command(device, show)
    elif config:
        return send_config_commands(device, config)


if __name__ == "__main__":
    commands = [
        "logging 10.255.255.1", "logging buffered 20010", "no logging console"
    ]
    command = "sh ip int br"
    with open("devices.yaml") as f:
        devices = yaml.safe_load(f)
    r1 = devices[0]
    print(send_commands(r1, config=commands))
    print(send_commands(r1, show=command))
def get_template_without_crds(path):
    template = yaml.safe_load(open(path))
    template['objects'] = [
        obj for obj in template['objects'] if obj['kind'] != 'CustomResourceDefinition'
    ]
    return template
Example #47
0
 def load(file):
     with open(file) as f:
         yaml.safe_load(f)
         new_dict = Dictionary()
         return new_dict
Example #48
0
def run(numWorlds,
        numElems,
        paramsPath,
        outputDir,
        plotVerbosity,
        consoleVerbosity,
        params=None):
    if params is None:
        with open(paramsPath) as paramsFile:
            params = yaml.safe_load(paramsFile)

    # Setup params
    n = params["n"]
    w = params["w"]
    tmParams = params["tmParams"]
    tpParams = params["tpParams"]
    isOnline = params["isOnline"]
    onlineTrainingReps = params["onlineTrainingReps"] if isOnline else "N/A"
    completeSequenceLength = numElems**2
    print(
        "Experiment parameters: "
        "(# worlds = {0}, # elements = {1}, n = {2}, w = {3}, "
        "online = {4}, onlineReps = {5})".format(numWorlds, numElems, n, w,
                                                 isOnline, onlineTrainingReps))
    print "Temporal memory parameters: {0}".format(tmParams)
    print "Temporal pooler parameters: {0}".format(tpParams)
    print

    # Setup experiment
    start = time.time()
    runner, exhaustiveAgents, randomAgents = setupExperiment(
        n, w, numElems, numWorlds, tmParams, tpParams)

    # Training phase
    # print "Training: (worlds: {0}, elements: {1})...".format(numWorlds, numElems)
    # print
    # if isOnline:
    #   trainOnline(runner, exhaustiveAgents, completeSequenceLength,
    #               onlineTrainingReps, consoleVerbosity)
    # else:
    #   trainTwoPass(runner, exhaustiveAgents, completeSequenceLength, consoleVerbosity)
    # print "Done training."
    # print

    # Custom Cosyne training
    print "Training temporal memory and temporal pooler..."
    numSequences = 3
    sequenceLength = 50 * (completeSequenceLength / numSequences)
    sequences = runner.generateSequences(sequenceLength,
                                         randomAgents,
                                         verbosity=consoleVerbosity,
                                         numSequences=numSequences)
    runner.feedLayers(sequences,
                      tmLearn=True,
                      tpLearn=True,
                      verbosity=consoleVerbosity,
                      showProgressInterval=SHOW_PROGRESS_INTERVAL)
    plotExperimentState(runner, plotVerbosity, numWorlds, numElems, isOnline,
                        "Training")

    # Test TM and TP on randomly moving agents
    # runTestPhase(runner, randomAgents, numWorlds, numElems,
    #              completeSequenceLength, consoleVerbosity)
    # plotExperimentState(runner, plotVerbosity, numWorlds, numElems, isOnline, "Testing")
    elapsed = int(time.time() - start)
    print "Total time: {0:2} seconds.".format(elapsed)

    # Write results to output file
    writeOutput(outputDir, runner, numElems, numWorlds, elapsed)
    if plotVerbosity >= 1:
        raw_input("Press any key to exit...")
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)

import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

graph_lag=graph_hp=graph_cpam='n'
router_mode='3'
topos_names=['rn001_001']

########################################################################
# Globals
########################################################################

settings = yaml.safe_load(open('plugins/settings.yml'))

########################################################################
# Program
########################################################################

db = MySQLdb.connect(host   = settings['mysql']['host'],
					 port   = settings['mysql']['port'],
					 user   = settings['mysql']['username'],
					 passwd = settings['mysql']['password'],
					 db     = settings['mysql']['database'],
					 )
posProg 	= 0
posTopo 	= 1
posMode		= 2
posFormat	= 3
Example #50
0
def main(args=None):
    args = parser.parse_args(args)

    # read config file and default config
    with open('config/default.yaml') as f:
        default_config = utils.AttrDict(yaml.safe_load(f))

    with open(args.config) as f:
        config = utils.AttrDict(yaml.safe_load(f))
        
        if args.learning_rate is not None:
            args.reset_learning_rate = True
        
        # command-line parameters have higher precedence than config file
        for k, v in vars(args).items():
            if v is not None:
                config[k] = v

        # set default values for parameters that are not defined
        for k, v in default_config.items():
            config.setdefault(k, v)

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'  # disable TensorFlow's debugging logs
    decoding_mode = any(arg is not None for arg in (args.decode, args.eval, args.align))

    # enforce parameter constraints
    assert config.steps_per_eval % config.steps_per_checkpoint == 0, (
        'steps-per-eval should be a multiple of steps-per-checkpoint')
    assert decoding_mode or args.train, (
        'you need to specify at least one action (decode, eval, align, or train)')
    assert not (args.average and args.ensemble)

    if args.train and args.purge:
        utils.log('deleting previous model')
        shutil.rmtree(config.model_dir, ignore_errors=True)

    os.makedirs(config.model_dir, exist_ok=True)

    # copy config file to model directory
    config_path = os.path.join(config.model_dir, 'config.yaml')
    if args.train and not os.path.exists(config_path):
        with open(args.config) as config_file, open(config_path, 'w') as dest_file:
            content = config_file.read()
            content = re.sub(r'model_dir:.*?\n', 'model_dir: {}\n'.format(config.model_dir), content,
                             flags=re.MULTILINE)
            dest_file.write(content)

    # also copy default config
    config_path = os.path.join(config.model_dir, 'default.yaml')
    if args.train and not os.path.exists(config_path):
        shutil.copy('config/default.yaml', config_path)

    # copy source code to model directory
    tar_path =  os.path.join(config.model_dir, 'code.tar.gz')
    if args.train and not os.path.exists(tar_path):
        with tarfile.open(tar_path, "w:gz") as tar:
            for filename in os.listdir('translate'):
                if filename.endswith('.py'):
                    tar.add(os.path.join('translate', filename), arcname=filename)

    logging_level = logging.DEBUG if args.verbose else logging.INFO
    # always log to stdout in decoding and eval modes (to avoid overwriting precious train logs)
    log_path = os.path.join(config.model_dir, config.log_file)
    logger = utils.create_logger(log_path if args.train else None)
    logger.setLevel(logging_level)

    utils.log('label: {}'.format(config.label))
    utils.log('description:\n  {}'.format('\n  '.join(config.description.strip().split('\n'))))

    utils.log(' '.join(sys.argv))  # print command line
    try:  # print git hash
        commit_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
        utils.log('commit hash {}'.format(commit_hash))
    except:
        pass

    utils.log('tensorflow version: {}'.format(tf.__version__))

    # log parameters
    utils.debug('program arguments')
    for k, v in sorted(config.items(), key=itemgetter(0)):
        utils.debug('  {:<20} {}'.format(k, pformat(v)))

    if isinstance(config.dev_prefix, str):
        config.dev_prefix = [config.dev_prefix]

    if config.tasks is not None:
        config.tasks = [utils.AttrDict(task) for task in config.tasks]
        tasks = config.tasks
    else:
        tasks = [config]

    for task in tasks:
        for parameter, value in config.items():
            task.setdefault(parameter, value)

        task.encoders = [utils.AttrDict(encoder) for encoder in task.encoders]
        task.decoders = [utils.AttrDict(decoder) for decoder in task.decoders]

        for encoder_or_decoder in task.encoders + task.decoders:
            for parameter, value in task.items():
                encoder_or_decoder.setdefault(parameter, value)

    device = None
    if config.no_gpu:
        device = '/cpu:0'
        device_id = None
    elif config.gpu_id is not None:
        device = '/gpu:{}'.format(config.gpu_id)
        device_id = config.gpu_id
    else:
        device_id = 0

    # hide other GPUs so that TensorFlow won't use memory on them
    os.environ['CUDA_VISIBLE_DEVICES'] = '' if device_id is None else str(device_id)

    utils.log('creating model')
    utils.log('using device: {}'.format(device))

    with tf.device(device):
        config.checkpoint_dir = os.path.join(config.model_dir, 'checkpoints')

        if config.weight_scale:
            if config.initializer == 'uniform':
                initializer = tf.random_uniform_initializer(minval=-config.weight_scale, maxval=config.weight_scale)
            else:
                initializer = tf.random_normal_initializer(stddev=config.weight_scale)
        else:
            initializer = None

        tf.get_variable_scope().set_initializer(initializer)

        # exempt from creating gradient ops
        config.decode_only = decoding_mode

        if config.tasks is not None:
            model = MultiTaskModel(**config)
        else:
            model = TranslationModel(**config)

    # count parameters
    utils.log('model parameters ({})'.format(len(tf.global_variables())))
    parameter_count = 0
    for var in tf.global_variables():
        utils.log('  {} {}'.format(var.name, var.get_shape()))

        if not var.name.startswith('gradients'):  # not counting parameters created by training algorithm (e.g. Adam)
            v = 1
            for d in var.get_shape():
                v *= d.value
            parameter_count += v
    utils.log('number of parameters: {:.2f}M'.format(parameter_count / 1e6))

    tf_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
    tf_config.gpu_options.allow_growth = config.allow_growth
    tf_config.gpu_options.per_process_gpu_memory_fraction = config.mem_fraction

    def average_checkpoints(main_sess, sessions):
        for var in tf.global_variables():
            avg_value = sum(sess.run(var) for sess in sessions) / len(sessions)
            main_sess.run(var.assign(avg_value))

    with tf.Session(config=tf_config) as sess:
        best_checkpoint = os.path.join(config.checkpoint_dir, 'best')

        if config.ensemble and len(config.checkpoints) > 1:
            model.initialize(config.checkpoints)
        elif config.average and len(config.checkpoints) > 1:
            model.initialize(reset=True)
            sessions = [tf.Session(config=tf_config) for _ in config.checkpoints]
            for sess_, checkpoint in zip(sessions, config.checkpoints):
                model.initialize(sess=sess_, checkpoints=[checkpoint])
            average_checkpoints(sess, sessions)
        elif (not config.checkpoints and decoding_mode and
             (os.path.isfile(best_checkpoint + '.index') or os.path.isfile(best_checkpoint + '.index'))):
            # in decoding and evaluation mode, unless specified otherwise (by `checkpoints`),
            # try to load the best checkpoint
            model.initialize([best_checkpoint])
        else:
            # loads last checkpoint, unless `reset` is true
            model.initialize(**config)

        if args.decode is not None:
            model.decode(**config)
        elif args.eval is not None:
            model.evaluate(on_dev=False, **config)
        elif args.align is not None:
            model.align(**config)
        elif args.train:
            try:
                model.train(**config)
            except KeyboardInterrupt:
                sys.exit()
Example #51
0
if __name__ == "__main__":
    settings = Settings()
    if settings.input_debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    logging.debug(f"Using config: {settings.json()}")
    g = Github(settings.input_token.get_secret_value())
    repo = g.get_repo(settings.github_repository)
    if not settings.github_event_path.is_file():
        raise RuntimeError(
            f"No github event file available at: {settings.github_event_path}"
        )
    contents = settings.github_event_path.read_text()
    github_event = PartialGitHubEvent.parse_raw(contents)
    translations_map: Dict[str, int] = yaml.safe_load(translations_path.read_text())
    logging.debug(f"Using translations map: {translations_map}")
    sleep_time = random.random() * 10  # random number between 0 and 10 seconds
    pr = repo.get_pull(github_event.pull_request.number)
    logging.debug(
        f"Processing PR: {pr.number}, with anti-race condition sleep time: {sleep_time}"
    )
    if pr.state == "open":
        logging.debug(f"PR is open: {pr.number}")
        label_strs = {label.name for label in pr.get_labels()}
        if lang_all_label in label_strs and awaiting_label in label_strs:
            logging.info(
                f"This PR seems to be a language translation and awaiting reviews: {pr.number}"
            )
            if approved_label in label_strs:
                message = (
Example #52
0
 def load_config(self):
     """
     Just load our config from the text passed in on init
     """
     config = yaml.safe_load(self.config_text) or {}
     self.update_config(config)
Example #53
0
def workflow_generator_simple(
    project, engine, n_traj, n_steps, round_n,
    longest = 5000,
    n_rounds = 1,
    modeller = None,
    sfkwargs = dict(),
    minlength = None,
    batchsize = 999999,
    batchwait = False,
    batchsleep = 5,
    progression = 'any',
    cpu_threads = 8,
    fixedlength = True,
    startontraj = 0, # TODO None and extra condition if set
    analysis_cfg = None,
    admd_profile = None,
    min_model_trajlength = 0, # TODO None and extra condition if set
    sampling_function_name = 'explore_macrostates',

    # these arent currently utilized
    randomly = False,
    mpi_ranks = 0,
    continuing = True,
    **kwargs,):
    '''Rename this function, block-style make simple workflow
    DAG but the function is complex.
    '''

    logger.info("Starting workflow_generator_simple function")
    sampling_function = get_sampling_function(
        sampling_function_name, **sfkwargs,
    )

    resource_requirements = dict(resource_name=project.configuration.name) # TODO calculate request
    qkwargs = dict(sleeptime=batchsleep, batchsize=batchsize, wait=batchwait)

    if progression == 'all':
        progress = lambda tasks: all([ta.is_done() for ta in tasks])

    else:
        progress = lambda tasks: any([ta.is_done() for ta in tasks])

    if n_rounds:

        assert isinstance(n_rounds, int)
        assert n_rounds > 0

        logger.info("Going to do n_rounds:  {}".format(n_rounds))

    c = counter(n_rounds)
    tasks = list()

    # PREPARATION - Preprocess task setups
    logger.info("Using MD Engine: {0} {1}".format(engine, engine.name))#, project.generators[engine.name].__dict__)
    logger.info("Using fixed length? {}".format(fixedlength))

    if minlength is None:
        minlength = n_steps

    logger.info("\nProject models\n - Number: {n_model}"
          .format(n_model=len(project.models)))

    logger.info("\nProject trajectories\n - Number: {n_traj}"
          .format(n_traj=len(project.trajectories)))

    logger.debug("\nProject trajectories\n - Lengths:\n{lengths}"
          .format(lengths=[t.length for t in project.trajectories]))

    # ROUND 1 - No pre-existing data
    if len(project.trajectories) == 0:
        notfirsttime = False

        logger.info("Defining first simulation tasks for new project")

        for traj in project.new_trajectory(engine['pdb_file'], n_steps, engine, n_traj):
            tasks.append(traj.run(**resource_requirements))

            if admd_profile: # This should be path to an RC file
                tasks[-1].pre.insert(0, "source %s" % admd_profile)

        if not c.done:
            logger.info("Project first tasks queue")
            queue_tasks(project, tasks, **qkwargs)
            c.increment()

        logger.info("Project first tasks queued")
        logger.info("Queued First Tasks in new project")

        yield lambda: progress(tasks)

        logger.info("First Tasks are done")
        logger.info("Project first tasks done")

    else:

        notfirsttime = True

    mtask = None

    if modeller:
        if analysis_cfg:
            with open(analysis_cfg, 'r') as f:
                _margs = yaml.safe_load(f)

            #logger.info(pformat(_margs))
            #update_margs = lambda rn: _margs[rn]
            update_margs = lambda rn: _margs[
                max(list(filter(
                lambda mi: mi <= rn, _margs)))
            ]

        else:
            raise RuntimeError("Must specify an analysis cfg file to use modeller")

    # Start of CONTROL LOOP
    while not c.done:

        logger.info("Checking Extension Lengths")

        # TODO fix, this isn't a consistent name "trajectories"
        trajectories = set()
        # This loop will escape if all the trajectories
        # are / become full length
        priorext = -1
        while priorext and not c.done:

            xtasks = list()

            #active_trajs =  ~~~  after_n_trajs_trajs
            after_n_trajs_trajs = list(project.trajectories.sorted(
                lambda tj: tj.__time__))[startontraj:]

            logger.info(
                "Checking last {} trajectories for proper length".format(
                len(after_n_trajs_trajs))
            )

            xtasks = check_trajectory_minlength(
                project, minlength, after_n_trajs_trajs, n_steps, n_traj,
                resource_requirements=resource_requirements
            )

            # NOTE we are tracking pre-existing extension tasks
            tnames = set()
            if len(trajectories) > 0:
                [tnames.add(_) for _ in set(zip(*trajectories)[0])]

            # NOTE so we only extend those who aren't already running
            queuethese = list()
            for xta in xtasks:
                tname = xta.trajectory.basename

                if tname not in tnames:
                    tnames.add(tname)
                    trajectories.add( (tname, xta) )
                    queuethese.append(xta)

            if queuethese:

                queue_tasks(project, queuethese, **qkwargs)
                yield lambda: progress(queuethese)

            # NOTE and remove any that have already completed
            removals = list()
            for tname, xta in trajectories:
                if xta.state in {"fail","halted","success","cancelled"}:
                    removals.append( (tname, xta) )

            for removal in removals:
                trajectories.remove(removal)

            if len(trajectories) == n_traj and priorext < n_traj:
                logger.info("Have full width of extensions")
                c.increment()

            # setting this to look at next round
            priorext = len(trajectories)

        logger.info("----------- On workload #{0}".format(c.n))
        logger.info("Runtime main loop enter")
        tasks = list()

        if not modeller:
            logger.info("Extending project without modeller")

            trajectories = sampling_function(project, engine, n_steps, n_traj)
            logger.info("Runtime new trajectories defined")

            logger.info("Converting trajectories to tasks")
            [tasks.append(t.run(**resource_requirements)) for t in trajectories]

            logger.info("Runtime new tasks queueing")
            if tasks:
                queue_tasks(project, tasks, **qkwargs)
                logger.info("Runtime new tasks queued")

            c.increment()

            yield lambda: progress(tasks)

            logger.info("Runtime main no modeller done")

        else:

            if mtask is None:

                margs = update_margs(round_n)
                margs.update(resource_requirements)

                logger.info("Extending project with modeller")
                logger.info("Analysis args for this round will be: {}".format(pformat(margs)))

                trajectories = list(filter(lambda tj: tj.length >= min_model_trajlength, project.trajectories))
                mtask = modeller.execute(trajectories, **margs)
                project.queue(mtask)

                yield lambda: progress(tasks)

            elif mtask.is_done():
                # In current 1-workload per runtime model, shouldn't ever see this condition

                logger.info("Current modelling task is done")
                mtask = None

            else:
                # In current 1-workload per runtime model, shouldn't ever see this condition

                logger.info("Waiting on modelling task")
                yield lambda: progress(tasks)
Example #54
0
from flask_cors import CORS

# import strptime to avoid weird issues described @ http://bugs.python.org/msg221094
datetime.strptime('', '')

# Create WSGI app instance - this __name__.split('.') handles
# the case where the file is part of a package.
app = Flask(__name__.split('.')[0])

# to allow cross domain resource sharing over all domains,
# for more specific usage see @ https://github.com/corydolphin/flask-cors
CORS(app)

# load the configuration file
try:
    app_config = yaml.safe_load(open('etc/config.yml'))
except yaml.YAMLError as e:  # exceptions can be specific to the possible errors
    app.logger.error('exception encountered while parsing the configuration file, check the logs')
    app.logger.exception(e)
    sys.exit(1)

# when the file is loaded properly, load & set database configs for sqlalchemy
database_host = app_config['database']['host']
database_port = app_config['database']['port']
database_user = app_config['database']['user_name']
database_password = app_config['database']['password']
database_name = app_config['database']['database_name']

# setup database uri
# i used postgres as example database however any database can be used which is supported be sqlalchemy
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://%s:%s@%s:%s/%s' % \
Example #55
0
 def _load_config_file(cls, agent_name) -> Dict:
     """Load a config file."""
     agent_config_file = Path(agent_name, DEFAULT_AEA_CONFIG_FILE)  # type: ignore
     file_pointer = open(agent_config_file, mode="r", encoding="utf-8")
     agent_config_instance = yaml.safe_load(file_pointer)
     return agent_config_instance
Example #56
0
 def read_diffuse_component_yaml(yamlfile):
     """ Read the yaml file for the diffuse components
     """
     diffuse_components = yaml.safe_load(open(yamlfile))
     return diffuse_components
    def _test_internal_route_reflector(self, backend='bird', bgpconfig_as_num=64514, peer_as_num=64514):
        """
        Run a multi-host test using an internal route reflector.
        """
        with DockerHost('host1',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                        start_calico=False) as host1, \
             DockerHost('host2',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                        start_calico=False) as host2, \
             DockerHost('host3',
                        additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
                        start_calico=False) as host3:

            # Start all hosts using specific backends.
            host1.start_calico_node("--backend=%s" % backend)
            host2.start_calico_node("--backend=%s" % backend)
            host3.start_calico_node("--backend=%s" % backend)

            # Set the default AS number - as this is used by the RR mesh, and
            # turn off the node-to-node mesh (do this from any host).
            update_bgp_config(host1, nodeMesh=False, asNum=bgpconfig_as_num)

            # Create a workload on each host in the same network.
            network1 = host1.create_network("subnet1")
            workload_host1 = host1.create_workload("workload1",
                                                   network=network1)
            workload_host2 = host2.create_workload("workload2",
                                                   network=network1)
            workload_host3 = host3.create_workload("workload3",
                                                   network=network1)

            # Allow network to converge (which it won't)
            self.assert_false(workload_host1.check_can_ping(workload_host2.ip, retries=5))

            # Make host2 act as a route reflector.
            node2 = host2.calicoctl("get Node %s -o yaml" % host2.get_hostname())
            node2cfg = yaml.safe_load(node2)
            logger.info("host2 Node: %s", node2cfg)
            node2cfg['spec']['bgp']['routeReflectorClusterID'] = '224.0.0.2'
            node2cfg['metadata']['labels'] = {
                'routeReflectorClusterID': node2cfg['spec']['bgp']['routeReflectorClusterID'],
            }
            host2.add_resource(node2cfg)

            # Configure peerings - note, NOT a full mesh - from the
            # other nodes to the route reflector.
            host2.add_resource({
                'apiVersion': 'projectcalico.org/v3',
                'kind': 'BGPPeer',
                'metadata': {
                    'name': 'rr-peerings',
                },
                'spec': {
                    'nodeSelector': '!has(routeReflectorClusterID)',
                    'peerSelector': 'has(routeReflectorClusterID)',
                },
            })

            # Allow network to converge (which it now will).
            self.assert_true(workload_host1.check_can_ping(workload_host2.ip, retries=20))

            # And check connectivity in both directions.
            self.assert_ip_connectivity(workload_list=[workload_host1,
                                                       workload_host2,
                                                       workload_host3],
                                        ip_pass_list=[workload_host1.ip,
                                                      workload_host2.ip,
                                                      workload_host3.ip],
                                        retries=5)
Example #58
0
import logging
from nuimo_menue.config import NuimoMenueConfiguration
import yaml


class NuimoOpenHabConfiguration(NuimoMenueConfiguration):

    def __init__(self, config):
        super(NuimoOpenHabConfiguration, self).__init__(config["key_mapping"])
        self.config = config

    def __getitem__(self, item):
        return self.config[item]

project_root_dir = os.path.dirname(sys.argv[0])
NUIMO_OPENHAB_CONFIG_PATH = os.getenv('NUIMO_OPENHAB_CONFIG_PATH', os.path.join(project_root_dir, 'config.yml'))

with open(NUIMO_OPENHAB_CONFIG_PATH, 'r') as ymlfile:
    rawConfig = yaml.safe_load(ymlfile)

    loggers = []
    if rawConfig["log_file"]: loggers.append(logging.FileHandler(filename=rawConfig["log_file"]))
    if rawConfig["log_stdout"]: loggers.append(logging.StreamHandler(stream=sys.stdout))
    formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
                                  "%Y-%m-%d %H:%M:%S")
    for logger in loggers: logger.setFormatter(formatter)
    if loggers: logging.basicConfig(handlers=loggers, level=rawConfig["log_level"])

    logging.info("Loaded config file from %s", NUIMO_OPENHAB_CONFIG_PATH)
    sys.modules[__name__] = NuimoOpenHabConfiguration(rawConfig)
Example #59
0
    def __init__(self, *args, **kwargs):
        """Set up all the resources."""
        super().__init__(*args, **kwargs)

        # Load the resources from the yaml files in /resources/
        self.resources = {
            path.stem: yaml.safe_load(path.read_text())
            for path in RESOURCES_PATH.rglob("*.yaml")
        }

        # Sort the resources alphabetically
        self.resources = dict(
            sorted(self.resources.items(), key=self._sort_key_disregard_the))

        # Parse out all current tags
        resource_tags = {
            "topics": set(),
            "payment_tiers": set(),
            "difficulty": set(),
            "type": set(),
        }
        for resource_name, resource in self.resources.items():
            css_classes = []
            for tag_type in resource_tags.keys():
                # Store the tags into `resource_tags`
                tags = resource.get("tags", {}).get(tag_type, [])
                for tag in tags:
                    tag = tag.title()
                    tag = tag.replace("And", "and")
                    resource_tags[tag_type].add(tag)

                # Make a CSS class friendly representation too, while we're already iterating.
                for tag in tags:
                    css_tag = to_kebabcase(f"{tag_type}-{tag}")
                    css_classes.append(css_tag)

            # Now add the css classes back to the resource, so we can use them in the template.
            self.resources[resource_name]["css_classes"] = " ".join(
                css_classes)

        # Set up all the filter checkbox metadata
        self.filters = {
            "Difficulty": {
                "filters": sorted(resource_tags.get("difficulty")),
                "icon": "fas fa-brain",
                "hidden": False,
            },
            "Type": {
                "filters": sorted(resource_tags.get("type")),
                "icon": "fas fa-photo-video",
                "hidden": False,
            },
            "Payment tiers": {
                "filters": sorted(resource_tags.get("payment_tiers")),
                "icon": "fas fa-dollar-sign",
                "hidden": True,
            },
            "Topics": {
                "filters": sorted(resource_tags.get("topics")),
                "icon": "fas fa-lightbulb",
                "hidden": True,
            }
        }

        # The bottom topic should always be "Other".
        self.filters["Topics"]["filters"].remove("Other")
        self.filters["Topics"]["filters"].append("Other")

        # A complete list of valid filter names
        self.valid_filters = {
            "topics": [
                to_kebabcase(topic)
                for topic in self.filters["Topics"]["filters"]
            ],
            "payment_tiers": [
                to_kebabcase(tier)
                for tier in self.filters["Payment tiers"]["filters"]
            ],
            "type":
            [to_kebabcase(type_) for type_ in self.filters["Type"]["filters"]],
            "difficulty": [
                to_kebabcase(tier)
                for tier in self.filters["Difficulty"]["filters"]
            ],
        }
Example #60
0
from pprint import pprint

import argo_workflows
from argo_workflows.api import workflow_service_api
from argo_workflows.model.io_argoproj_workflow_v1alpha1_workflow_create_request import \
    IoArgoprojWorkflowV1alpha1WorkflowCreateRequest
import requests
import yaml

configuration = argo_workflows.Configuration(host="https://127.0.0.1:2746")
configuration.verify_ssl = False

resp = requests.get('https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/hello-world.yaml')
manifest = yaml.safe_load(resp.text)

api_client = argo_workflows.ApiClient(configuration)
api_instance = workflow_service_api.WorkflowServiceApi(api_client)
api_response = api_instance.create_workflow(
    namespace='argo',
    body=IoArgoprojWorkflowV1alpha1WorkflowCreateRequest(workflow=manifest, _check_type=False),
    _check_return_type=False)
pprint(api_response)