コード例 #1
0
ファイル: test_build.py プロジェクト: stub42/charm-tools
    def test_tester_layer(self):
        bu = build.Builder()
        bu.log_level = "WARNING"
        bu.output_dir = "out"
        bu.series = "trusty"
        bu.name = "foo"
        bu.charm = "trusty/tester"
        bu.hide_metrics = True
        bu()
        base = path('out/trusty/foo')
        self.assertTrue(base.exists())

        # Verify ignore rules applied
        self.assertFalse((base / ".bzr").exists())

        # Metadata should have combined provides fields
        metadata = base / "metadata.yaml"
        self.assertTrue(metadata.exists())
        metadata_data = yaml.load(metadata.open())
        self.assertIn("shared-db", metadata_data['provides'])
        self.assertIn("storage", metadata_data['provides'])

        # Config should have keys but not the ones in deletes
        config = base / "config.yaml"
        self.assertTrue(config.exists())
        config_data = yaml.load(config.open())['options']
        self.assertIn("bind-address", config_data)
        self.assertNotIn("vip", config_data)

        cyaml = base / "layer.yaml"
        self.assertTrue(cyaml.exists())
        cyaml_data = yaml.load(cyaml.open())
        self.assertEquals(cyaml_data['includes'], ['trusty/mysql'])
        self.assertEquals(cyaml_data['is'], 'foo')

        self.assertTrue((base / "hooks/config-changed").exists())

        # Files from the top layer as overrides
        start = base / "hooks/start"
        self.assertTrue(start.exists())
        self.assertIn("Overridden", start.text())

        self.assertTrue((base / "README.md").exists())
        self.assertEqual("dynamic tactics", (base / "README.md").text())

        sigs = base / ".build.manifest"
        self.assertTrue(sigs.exists())
        data = json.load(sigs.open())
        self.assertEquals(data['signatures']["README.md"], [
            u'foo',
            "static",
            u'cfac20374288c097975e9f25a0d7c81783acdbc81'
            '24302ff4a731a4aea10de99'])

        self.assertEquals(data["signatures"]['metadata.yaml'], [
            u'foo',
            "dynamic",
            u'8dd9059eae849c61a1bd3d8de7f96a418e'
            u'f8b4bf5d9c058c413b5169e2783815',
            ])
コード例 #2
0
ファイル: experiments.py プロジェクト: hefeix/skll
def _write_summary_file(result_json_paths, output_file, ablation=0):
    """
    Function to take a list of paths to individual result
    json files and returns a single file that summarizes
    all of them.

    :param result_json_paths: A list of paths to the
                              individual result json files.
    :type result_json_paths: list

    :returns: The output file to contain a summary of the individual result
              files.
    :rtype: file
    """
    learner_result_dicts = []
    # Map from feature set names to all features in them
    all_features = defaultdict(set)
    logger = logging.getLogger(__name__)
    for json_path in result_json_paths:
        if not exists(json_path):
            logger.error(('JSON results file %s not found. Skipping summary '
                          'creation. You can manually create the summary file'
                          ' after the fact by using the summarize_results '
                          'script.'), json_path)
            return
        else:
            with open(json_path, 'r') as json_file:
                obj = json.load(json_file)
                featureset_name = obj[0]['featureset_name']
                if ablation != 0 and '_minus_' in featureset_name:
                    parent_set = featureset_name.split('_minus_', 1)[0]
                    all_features[parent_set].update(
                        yaml.load(obj[0]['featureset']))
                learner_result_dicts.extend(obj)

    # Build and write header
    header = set(learner_result_dicts[0].keys()) - {'result_table',
                                                    'descriptive'}
    if ablation != 0:
        header.add('ablated_features')
    header = sorted(header)
    writer = csv.DictWriter(output_file, header, extrasaction='ignore',
                            dialect=csv.excel_tab)
    writer.writeheader()

    # Build "ablated_features" list and fix some backward compatible things
    for lrd in learner_result_dicts:
        featureset_name = lrd['featureset_name']
        if ablation != 0:
            parent_set = featureset_name.split('_minus_', 1)[0]
            ablated_features = all_features[parent_set].difference(
                yaml.load(lrd['featureset']))
            lrd['ablated_features'] = ''
            if ablated_features:
                lrd['ablated_features'] = json.dumps(sorted(ablated_features))

        # write out the new learner dict with the readable fields
        writer.writerow(lrd)

    output_file.flush()
コード例 #3
0
ファイル: test_config.py プロジェクト: carlosp420/shub
 def test_save(self):
     tmpdir = tempfile.mkdtemp()
     tmpfilepath = os.path.join(tmpdir, 'saved_conf.yml')
     self.conf.save(tmpfilepath)
     with open(tmpfilepath, 'r') as f:
         self.assertEqual(yaml.load(f), yaml.load(VALID_YAML_CFG))
     shutil.rmtree(tmpdir)
コード例 #4
0
ファイル: test_config.py プロジェクト: 3kwa/conda
def test_set_rc_string():
    # Test setting string keys in .condarc

    # We specifically test ssl_verify since it can be either a boolean or a string
    try:
        stdout, stderr = run_conda_command('config', '--file', test_condarc,
                                           '--set', 'ssl_verify', 'yes')
        assert stdout == ''
        assert stderr == ''

        verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify']
        assert verify == 'yes'

        stdout, stderr = run_conda_command('config', '--file', test_condarc,
                                           '--set', 'ssl_verify', 'test_string.crt')
        assert stdout == ''
        assert stderr == ''

        verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify']
        assert verify == 'test_string.crt'


        os.unlink(test_condarc)
    finally:
        try:
            os.unlink(test_condarc)
        except OSError:
            pass
コード例 #5
0
ファイル: utils.py プロジェクト: jlesquembre/autopilot
def get_config():
    config_path = get_user_config_path()

    try:
        with config_path.open() as f:
            user_config = yaml.load(f, Loader=yaml.RoundTripLoader)
    except FileNotFoundError:
        print('WARNING: No user config file found at:\n'
              '{}'.format(config_path))
        user_config = {}

    with (get_data_dir() / 'default_config.yml').open() as f:
        default_config = yaml.load(f, Loader=yaml.RoundTripLoader)

    config = merge_config(default_config, user_config)

    config['new_project']['license'] = get_license_index(config['new_project']['license'])
    set_if_none(user_config, config, 'new_project.default_dir', os.getcwd())

    # TODO set cwd to dir where we want to create the project
    set_if_none(user_config, config, 'author.name', stdout('git config --get user.name'))
    set_if_none(user_config, config, 'author.email', stdout('git config --get user.email'))

    set_if_none(user_config, config, 'editor', os.environ.get('EDITOR','vim'))

    config['pypi_list'] = _get_pypi_list(config['pypi_servers'])
    config['release']['upload'] = _get_pypi_list_index(config['pypi_servers'], config['release']['upload'])
    return config
コード例 #6
0
ファイル: test_build.py プロジェクト: aznashwan/charm-tools
    def test_regenerate_inplace(self):
        # take a generated example where a base layer has changed
        # regenerate in place
        # make some assertions
        bu = build.Builder()
        bu.log_level = "WARNING"
        bu.output_dir = "out"
        bu.series = "trusty"
        bu.name = "foo"
        bu.charm = "trusty/b"
        bu.hide_metrics = True
        bu()
        base = path("out/trusty/foo")
        self.assertTrue(base.exists())

        # verify the 1st gen worked
        self.assertTrue((base / "a").exists())
        self.assertTrue((base / "README.md").exists())

        # now regenerate from the target
        with utils.cd("out/trusty/foo"):
            bu = build.Builder()
            bu.log_level = "WARNING"
            bu.output_dir = path(os.getcwd())
            bu.series = "trusty"
            # The generate target and source are now the same
            bu.name = "foo"
            bu.charm = "."
            bu.hide_metrics = True
            bu()
            base = bu.output_dir
            self.assertTrue(base.exists())

            # Check that the generated layer.yaml makes sense
            cy = base / "layer.yaml"
            config = yaml.load(cy.open())
            self.assertEquals(config["includes"], ["trusty/a", "interface:mysql"])
            self.assertEquals(config["is"], "foo")

            # We can even run it more than once
            bu()
            cy = base / "layer.yaml"
            config = yaml.load(cy.open())
            self.assertEquals(config["includes"], ["trusty/a", "interface:mysql"])
            self.assertEquals(config["is"], "foo")

            # We included an interface, we should be able to assert things about it
            # in its final form as well
            provides = base / "hooks/relations/mysql/provides.py"
            requires = base / "hooks/relations/mysql/requires.py"
            self.assertTrue(provides.exists())
            self.assertTrue(requires.exists())

            # and that we generated the hooks themselves
            for kind in ["joined", "changed", "broken", "departed"]:
                self.assertTrue((base / "hooks" / "mysql-relation-{}".format(kind)).exists())

            # and ensure we have an init file (the interface doesn't its added)
            init = base / "hooks/relations/mysql/__init__.py"
            self.assertTrue(init.exists())
コード例 #7
0
def main(file_paths):
    with open(file_paths[0]) as yaml_file:
        result = yaml.load(yaml_file, Loader=yaml.RoundTripLoader)
    for file_path in file_paths[1:]:
        with open(file_path) as yaml_file:
            dict_update(result, yaml.load(yaml_file, Loader=yaml.RoundTripLoader))
    yaml.dump(result, sys.stdout, Dumper=yaml.RoundTripDumper)
コード例 #8
0
    def test_tester_compose(self):
        composer = juju_compose.Composer()
        composer.log_level = "WARNING"
        composer.output_dir = "out"
        composer.series = "trusty"
        composer.name = "foo"
        composer.charm = "trusty/tester"
        composer()
        base = path('out/trusty/foo')
        self.assertTrue(base.exists())

        # Verify ignore rules applied
        self.assertFalse((base / ".bzr").exists())

        # Metadata should have combined provides fields
        metadata = base / "metadata.yaml"
        self.assertTrue(metadata.exists())
        metadata_data = yaml.load(metadata.open())
        self.assertIn("shared-db", metadata_data['provides'])
        self.assertIn("storage", metadata_data['provides'])

        # Config should have keys but not the ones in deletes
        config = base / "config.yaml"
        self.assertTrue(config.exists())
        config_data = yaml.load(config.open())['options']
        self.assertIn("bind-address", config_data)
        self.assertNotIn("vip", config_data)

        cyaml = base / "composer.yaml"
        self.assertTrue(cyaml.exists())
        cyaml_data = yaml.load(cyaml.open())
        self.assertEquals(cyaml_data['includes'], ['trusty/mysql'])
        self.assertEquals(cyaml_data['is'], 'trusty/tester')

        self.assertTrue((base / "hooks/config-changed").exists())

        # Files from the top layer as overrides
        start = base / "hooks/start"
        self.assertTrue(start.exists())
        self.assertIn("Overridden", start.text())

        self.assertTrue((base / "README.md").exists())
        self.assertEqual("dynamic tactics", (base / "README.md").text())

        sigs = base / ".composer.manifest"
        self.assertTrue(sigs.exists())
        data = json.load(sigs.open())
        self.assertEquals(data['signatures']["README.md"], [
            u'trusty/tester',
            "static",
            u'cfac20374288c097975e9f25a0d7c81783acdbc81'
            '24302ff4a731a4aea10de99'])

        self.assertEquals(data["signatures"]['metadata.yaml'], [
            u'trusty/tester',
            "dynamic",
            u'ecb80da834070599ac81190e78448440b442d4eda9'
            'cea2e4af3a1db58e60e400'])
コード例 #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("swagger")
    parser.add_argument("annotations")
    parser.add_argument("url")

    parser.add_argument("--print-rdf", action="store_true", default=False)
    parser.add_argument("--serve", action="store_true", default=False)
    parser.add_argument("--fuseki-path", type=str, default=".")

    args = parser.parse_args()
    warnings.simplefilter('ignore', yaml.error.UnsafeLoaderWarning)

    with open(args.annotations) as f2:
        annotations = yaml.load(f2)

    with open(args.swagger) as f:
        sld = swg2salad.swg2salad(yaml.load(f), annotations)

    sld["$base"] = "http://ga4gh.org/schemas/tool-registry-schemas"
    sld["name"] = "file://" + os.path.realpath(args.swagger)

    document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema(
        cmap(sld))

    txt = document_loader.fetch_text(
        urlparse.urljoin(
            "file://" +
            os.getcwd() +
            "/",
            args.url))
    r = yaml.load(txt)

    validate_doc(avsc_names, r, document_loader, True)

    sys.stderr.write("API returned valid response\n")

    toolreg = Namespace("http://ga4gh.org/schemas/tool-registry-schemas#")
    td = Namespace(
        "http://ga4gh.org/schemas/tool-registry-schemas#ToolDescriptor/")

    if args.print_rdf or args.serve:
        g = jsonld_context.makerdf(args.url, r, document_loader.ctx)
        for s, _, o in g.triples((None, td["type"], Literal("CWL"))):
            for _, _, d in g.triples((s, toolreg["descriptor"], None)):
                expand_cwl(d, unicode(s), g)

    if args.print_rdf:
        print(g.serialize(format="turtle"))

    if args.serve:
        t = tempfile.NamedTemporaryFile(suffix=".ttl")
        g.serialize(t, format="turtle")
        t.flush()
        subprocess.check_call(
            ["./fuseki-server", "--file=" + t.name, "/tools"], cwd=args.fuseki_path)
コード例 #10
0
    def _create_config(self, api_name, api_version, api_full_name, output_dir,
                       package_dependencies_yaml, package_defaults_yaml, proto_deps,
                       proto_test_deps, language, local_paths, src_proto_path, package_type,
                       gapic_api_yaml, release_level=None,packaging='single-artifact',
                       generated_package_version=None):
        googleapis_dir = local_paths['googleapis']
        googleapis_path = os.path.commonprefix(
            [os.path.relpath(p, googleapis_dir) for p in src_proto_path])

        with open(package_dependencies_yaml) as dep_file:
            package_dependencies = yaml.load(dep_file, Loader=yaml.Loader)
        with open(package_defaults_yaml) as dep_file:
            package_defaults = yaml.load(dep_file, Loader=yaml.Loader)

        if release_level is not None:
            package_defaults['release_level'][language] = (
                release_level)
        # Apply package version and development status overrides if specified
        # in the artman config
        if generated_package_version is not None:
            release_version_type = package_defaults['release_level'][language]
            if release_version_type != 'ga':
                package_defaults['generated_package_version'][language] = (
                    generated_package_version)
            else:
                package_defaults['generated_ga_package_version'][language] = (
                    generated_package_version)

        gapic_config_name = ''
        if len(gapic_api_yaml) > 0:
            gapic_config_name = os.path.basename(gapic_api_yaml[0])

        dependency_type = 'local'
        if packaging == 'single-artifact':
            dependency_type = 'release'

        config = {
            'short_name': api_name,
            'major_version': api_version,
            'proto_path': googleapis_path,
            'package_name': {
                'default': api_full_name,
            },
            'proto_deps': proto_deps,
            'package_type': package_type,
            'dependency_type': dependency_type,
            'gapic_config_name': gapic_config_name,
        }

        if proto_test_deps:
            config['proto_test_deps'] = proto_test_deps

        config.update(package_dependencies)
        config.update(package_defaults)

        return config
コード例 #11
0
 def test_examples(self):
     for a in ["field_name", "ident_res", "link_res", "vocab_res"]:
         ldr, _, _, _ = schema_salad.schema.load_schema(
             get_data("metaschema/%s_schema.yml" % a))
         with open(get_data("metaschema/%s_src.yml" % a)) as src_fp:
             src = ldr.resolve_all(
                 yaml.load(src_fp, Loader=SafeLoader), "", checklinks=False)[0]
         with open(get_data("metaschema/%s_proc.yml" % a)) as src_proc:
             proc = yaml.load(src_proc, Loader=SafeLoader)
         self.assertEqual(proc, src)
コード例 #12
0
ファイル: data.py プロジェクト: UltrosBot/Ultros
    def validate(self, data):
        try:
            yaml.load(data, version=(1, 1))
        except yaml.YAMLError as e:
            problem = e.problem
            problem = problem.replace("could not found", "could not find")

            mark = e.problem_mark
            if mark is not None:
                return [[mark.line, problem]]
            return [False, problem]
        return [True]
コード例 #13
0
ファイル: dictdiff.py プロジェクト: Zaharid/yamldiff
def yaml_diff(p1, p2, *, set_keys=None):
    with open(p1) as f1:
        d1 = yaml.load(f1, Loader=yaml.RoundTripLoader)
    with open(p2) as f2:
        d2 = yaml.load(f2, Loader=yaml.RoundTripLoader)

    try:
        d1 = reprocess_dict(d1, set_keys)
        d2 = reprocess_dict(d2, set_keys)
    except KeyError as e:
        raise ValueError("Bad set key: {}".format(next(iter(e.args))))
    diff = dict_diff(d1, d2)
    print_diff(diff)
コード例 #14
0
ファイル: views.py プロジェクト: liujitao/teamworkflow
def generate_yaml(hosts, proxy_host, filename):
    import ruamel.yaml as yaml
    tmp = '''
- hosts: 
  gather_facts: no
  vars:
   - proxy_host: 
  become: yes
  become_method: sudo
  become_user: root

  tasks:
    - name: update /etc/hosts
      template: src=/opt/ansible/templates/hosts.j2 dest=/etc/hosts owner=root group=root mode=0644
      notify: 
        - restart dnsmasq
        - flush json

  handlers:
    - name: restart dnsmasq
      service: name=dnsmasq state=restarted
     
    - name: flush json
      shell: python link_info.py chdir=/opt/rrd
'''

    data = yaml.load(tmp, Loader=yaml.RoundTripLoader)
    data[0]['hosts'] = hosts
    data[0]['vars'][0]['proxy_host'] = proxy_host

    with open(filename, 'w') as f:
        yaml.dump(data, f, Dumper=yaml.RoundTripDumper, default_flow_style=False, indent=2)
コード例 #15
0
def main():
    subprocess.run(['git', 'checkout', '--', 'zuul.d/projects.yaml'])
    yaml = ruamel.yaml.YAML()
    yaml.indent(mapping=2, sequence=4, offset=2)
    projects = yaml.load(open('zuul.d/projects.yaml', 'r'))

    for project in projects:
        if project['project']['name'].split('/')[1].startswith('networking-'):
            if 'templates' not in project['project']:
                continue
            templates = project['project']['templates']
            for template in ('openstack-python-jobs',
                             'openstack-python35-jobs'):
                if template in templates:
                    new_name = template + '-neutron'
                    templates[templates.index(template)] = new_name

    yaml.dump(projects, open('zuul.d/projects.yaml', 'w'))

    # Strip the extra 2 spaces that ruamel.yaml appends because we told it
    # to indent an extra 2 spaces. Because the top level entry is a list it
    # applies that indentation at the top. It doesn't indent the comment lines
    # extra though, so don't do them.
    with open('zuul.d/projects.yaml', 'r') as main_in:
        main_content = main_in.readlines()
    with open('zuul.d/projects.yaml', 'w') as main_out:
        for line in main_content:
            if '#' in line:
                main_out.write(line)
            else:
                if line.startswith('  - project'):
                    main_out.write('\n')
                main_out.write(line[2:])
コード例 #16
0
ファイル: yedit.py プロジェクト: georgegoh/openshift-ansible
    def put(self, path, value):
        ''' put path, value into a dict '''
        try:
            entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
        except KeyError:
            entry = None

        if entry == value:
            return (False, self.yaml_dict)

        # deepcopy didn't work
        # Try to use ruamel.yaml and fallback to pyyaml
        try:
            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
                                                      default_flow_style=False),
                                 yaml.RoundTripLoader)
        except AttributeError:
            tmp_copy = copy.deepcopy(self.yaml_dict)

        # set the format attributes if available
        try:
            tmp_copy.fa.set_block_style()
        except AttributeError:
            pass

        result = Yedit.add_entry(tmp_copy, path, value, self.separator)
        if not result:
            return (False, self.yaml_dict)

        self.yaml_dict = tmp_copy

        return (True, self.yaml_dict)
コード例 #17
0
ファイル: yedit.py プロジェクト: georgegoh/openshift-ansible
    def parse_value(inc_value, vtype=''):
        '''determine value type passed'''
        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
                      'on', 'On', 'ON', ]
        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
                       'off', 'Off', 'OFF']

        # It came in as a string but you didn't specify value_type as string
        # we will convert to bool if it matches any of the above cases
        if isinstance(inc_value, str) and 'bool' in vtype:
            if inc_value not in true_bools and inc_value not in false_bools:
                raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
                                     % (inc_value, vtype))
        elif isinstance(inc_value, bool) and 'str' in vtype:
            inc_value = str(inc_value)

        # If vtype is not str then go ahead and attempt to yaml load it.
        if isinstance(inc_value, str) and 'str' not in vtype:
            try:
                inc_value = yaml.load(inc_value)
            except Exception:
                raise YeditException('Could not determine type of incoming ' +
                                     'value. value=[%s] vtype=[%s]'
                                     % (type(inc_value), vtype))

        return inc_value
コード例 #18
0
ファイル: directory_layout.py プロジェクト: LLNL/spack
    def _extension_map(self, spec):
        """Get a dict<name -> spec> for all extensions currently
           installed for this package."""
        _check_concrete(spec)

        if spec not in self._extension_maps:
            path = self.extension_file_path(spec)
            if not os.path.exists(path):
                self._extension_maps[spec] = {}

            else:
                by_hash = self.layout.specs_by_hash()
                exts = {}
                with open(path) as ext_file:
                    yaml_file = yaml.load(ext_file)
                    for entry in yaml_file['extensions']:
                        name = next(iter(entry))
                        dag_hash = entry[name]['hash']
                        prefix   = entry[name]['path']

                        if dag_hash not in by_hash:
                            raise InvalidExtensionSpecError(
                                "Spec %s not found in %s" % (dag_hash, prefix))

                        ext_spec = by_hash[dag_hash]
                        if prefix != ext_spec.prefix:
                            raise InvalidExtensionSpecError(
                                "Prefix %s does not match spec hash %s: %s"
                                % (prefix, dag_hash, ext_spec))

                        exts[ext_spec.name] = ext_spec
                self._extension_maps[spec] = exts

        return self._extension_maps[spec]
コード例 #19
0
ファイル: utility.py プロジェクト: AndrewMeadows/bullet3
def load_config(logdir):
  """Load a configuration from the log directory.

  Args:
    logdir: The logging directory containing the configuration file.

  Raises:
    IOError: The logging directory does not contain a configuration file.

  Returns:
    Configuration object.
  """
  config_path = logdir and os.path.join(logdir, 'config.yaml')
  if not config_path:
    message = (
        'Cannot resume an existing run since the logging directory does not '
        'contain a configuration file.')
    raise IOError(message)
  print("config_path=",config_path)

  stream = open(config_path, 'r')
  config = yaml.load(stream)
  message = 'Resume run and write summaries and checkpoints to {}.'
  print(message.format(logdir))
  return config
コード例 #20
0
ファイル: test_git.py プロジェクト: Musiqua/jenkins-autojobs
def config(jenkins, repo):
    base = u'''
    jenkins: %s
    repo: %s

    template: master-job-git
    namesep: '-'
    namefmt: '{shortref}'
    overwrite: true
    enable: 'sticky'

    sanitize:
      '@!?#&|\^_$%%*': '_'

    substitute:
      '@@JOB_NAME@@': '{shortref}'

    ignore:
      - 'refs/heads/feature/.*-nobuild'

    refs:
      - 'refs/heads/feature/(.*)'
      - 'refs/heads/scratch/(.*)':
          'namefmt': '{shortref}'
    '''

    base = dedent(base) % (jenkins.url, repo.url)
    base = yaml.load(io.StringIO(base))
    return base
コード例 #21
0
ファイル: gen_config.py プロジェクト: mills-lab/apps
def _AlterRuntime(config_filename, runtime):
  try:
    # 0. Take backup
    with tempfile.NamedTemporaryFile(prefix='app.yaml.') as f:
      backup_fname = f.name
    log.status.Print(
        'Copying original config [{0}] to backup location [{1}].'.format(
            config_filename, backup_fname))
    shutil.copyfile(config_filename, backup_fname)
    # 1. Open and parse file using ruamel
    with open(config_filename, 'r') as yaml_file:
      encoding = yaml_file.encoding
      config = yaml.load(yaml_file, yaml.RoundTripLoader)
    # 2. Alter the ruamel in-memory object representing the yaml file
    config['runtime'] = runtime
    # 3. Create an in-memory file buffer and write yaml file to it
    raw_buf = io.BytesIO()
    tmp_yaml_buf = io.TextIOWrapper(raw_buf, encoding)
    yaml.dump(config, tmp_yaml_buf, Dumper=yaml.RoundTripDumper)
    # 4. Overwrite the original app.yaml
    with open(config_filename, 'wb') as yaml_file:
      tmp_yaml_buf.seek(0)
      yaml_file.write(raw_buf.getvalue())
  except Exception as e:
    raise fingerprinter.AlterConfigFileError(e)
コード例 #22
0
ファイル: config.py プロジェクト: far1974/ntfy
def load_config(config_path=DEFAULT_CONFIG):
    logger = logging.getLogger(__name__)

    try:
        config = yaml.load(open(expanduser(config_path)))
    except IOError as e:
        if e.errno == errno.ENOENT and config_path == DEFAULT_CONFIG:
            logger.info('{} not found'.format(config_path))
            config = {}
        else:
            logger.error('Failed to open {}'.format(config_path),
                         exc_info=True)
            exit(1)
    except ValueError as e:
        logger.error('Failed to load {}'.format(config_path), exc_info=True)
        exit(1)

    if 'backend' in config:
        logger.warning(
            "The 'backend' config option is deprecated, use 'backends'")
        if 'backends' in config:
            logger.warning("Both 'backend' and 'backends' in config, "
                           "ignoring 'backend'.")
        else:
            config['backends'] = [config['backend']]

    return config
コード例 #23
0
ファイル: test_basic_ref.py プロジェクト: drone115b/cog
 def setUp(self):
   os.chdir( os.path.dirname( __file__ ) )
   filename = 'test_basic_ref_002.yaml'
   yamldoc = open( filename, 'rt' ).read()
   self.doc = yaml.load( yamldoc )
   self.ccn = cog.ccn.Context()
   self.ccn.load_doc( self.doc )
コード例 #24
0
ファイル: main.py プロジェクト: shinfan/artman
def read_user_config(flags):
    """Read the user config from disk and return it.

    Args:
        flags (argparse.Namespace): The flags from sys.argv.

    Returns:
        dict: The user config.
    """
    # Load the user configuration if it exists and save a dictionary.
    user_config = {}
    user_config_file = os.path.realpath(os.path.expanduser(flags.user_config))
    if os.path.isfile(user_config_file):
        with io.open(user_config_file) as ucf:
            user_config = yaml.load(ucf.read(), Loader=yaml.Loader) or {}

    # Sanity check: Is there a configuration? If not, abort.
    if not user_config:
        setup_logging(INFO)
        logger.critical('No user configuration found.')
        logger.warn('This is probably your first time running Artman.')
        logger.warn('Run `configure-artman` to get yourself set up.')
        sys.exit(64)

    # Done; return the user config.
    return user_config
コード例 #25
0
ファイル: setup.py プロジェクト: cburschka/cadence
def generate_emoticons(cdn_url, packs):
    output = {'packages': {}, 'sidebars': {}}
    imagepath = 'assets/emoticons'
    try:
        for pack in packs:
            datafile = 'emoticon-packs/' + pack + '/emoticons.yml'
            baseURL = cdn_url + imagepath + '/' + pack + '/'
            data = yaml.load(open(datafile, 'r'), Loader=yaml.RoundTripLoader)
            if 'codes' in data:
                output['packages'][pack] = {
                    'baseURL': baseURL,
                    'codes': data['codes']
                }
                if 'title' in data and 'icon' in data:
                    output['sidebars'][pack] = {
                        'icon': data['icon'],
                        'title': data['title']
                    }

            if 'aliases' in data:
                output['packages'][pack + '_hidden'] = {
                    'baseURL': baseURL,
                    'codes': data['aliases']
                }
        return json.dumps(output)
    except ValueError as e:
        print("Error parsing emoticon pack {}".format(pack))
        raise(e)
コード例 #26
0
ファイル: test_basic_wrx.py プロジェクト: drone115b/cog
  def setUp(self):
    self.yamldoc = """

- session std1 :
    inputs :
        - port std_version :
            widget_hint : string
            widget_help : Select a version of python to use
            value : python27
    code : |
        ccs.set_argv( ['/home/mayur/cog_session_%s.py' % ccs.get_input('std_version'), ccs.get_execid()] )

- session std2 :
    inputs :
        - port std2_version :
            value : python27
    code : |
        ccs.set_argv( ['/home/mayur/cog_session_%s.py' % ccs.get_input('std2_version'), ccs.get_execid()] )


- op op1 :
    inputs :
        - port op1_filename :
            widget_hint : string
            value : '/tmp/cog/cog.db'
    outputs:
        - port op1_output:
            widget_hint : string
    session : std1
    code : |
        import os
        ccx.set_output( 'op1_output', os.path.dirname( ccx.get_input( 'op1_filename' )))
        
- op op2 :
    inputs :
        - port op2_filename :
            widget_hint : string
    outputs:
        - port op2_output:
            widget_hint : string
            widget_help : I do not know what this thing does
    session : std2
    code : |
        print( "op2 code: %s" % ccx.get_input('op2_filename') )
        ccx.set_output( 'op2_output', ccx.get_input('op2_filename') )

- node node1 : op1
- node node2 : op2

- link : ['node1.op1_output', 'node2.op2_filename']

- ui qt4 :
    node.node1 : [ 21, 45 ]
    node.node2 : [ 56, 78 ]

"""

    self.doc = yaml.load( self.yamldoc )
    self.ccn = cog.ccn.Context()
    self.ccn.load_doc( self.doc )
コード例 #27
0
ファイル: conf.py プロジェクト: WhatWorksWhenForWhom/nlpppln
def generate_cwl_documentation(_):
    cur_dir = os.path.abspath(os.path.dirname(__file__))

    # find all cwl files
    with WorkflowGenerator() as wf:
        cwl_files = [step.run for step in wf.steps_library.steps.values()]
    # sort alphabetically
    cwl_files.sort()

    tools_file = os.path.join(cur_dir, 'tools.rst')
    tool_template = '\n{}\n{}\n\n{}\n'
    with codecs.open(tools_file, 'wb', encoding='utf-8') as f:
        f.write('Tools\n=====\n')
        f.write('\n``nlppln`` contains the following tools:\n')
        for cwl in cwl_files:
            if is_url(cwl):
                pass
                # TODO: get documentation from urls
            else:
                tool_name = os.path.basename(cwl)
                plusses = '+'*len(tool_name)
                with codecs.open(cwl) as c:
                    try:
                        cwl_yaml = yaml.load(c, Loader=yaml.RoundTripLoader)
                        doc = cwl_yaml.get('doc', 'No documentation')
                        f.write(tool_template.format(tool_name, plusses, doc))
                    except yaml.YAMLError:
                        pass
コード例 #28
0
ファイル: config.py プロジェクト: nestortoledo/shub
def update_yaml_dict(conf_path=None):
    """
    Context manager for updating a YAML file while preserving key ordering and
    comments.
    """
    conf_path = conf_path or GLOBAL_SCRAPINGHUB_YML_PATH
    dumper = yaml.RoundTripDumper
    try:
        with open(conf_path, 'r') as f:
            conf = yaml.load(f, yaml.RoundTripLoader) or {}
    except IOError as e:
        if e.errno != 2:
            raise
        conf = {}
        # Use alphabetic order when creating files
        dumper = yaml.Dumper
    # Code inside context manager is executed after this yield
    yield conf
    # Avoid writing "key: {}"
    for key in conf.keys():
        if conf[key] == {}:
            del conf[key]
    with open(conf_path, 'w') as f:
        # Avoid writing "{}"
        if conf:
            yaml.dump(conf, f, default_flow_style=False, Dumper=dumper)
コード例 #29
0
ファイル: yedit.py プロジェクト: andrewklau/openshift-ansible
    def load(self, content_type='yaml'):
        ''' return yaml file '''
        contents = self.read()

        if not contents and not self.content:
            return None

        if self.content:
            if isinstance(self.content, dict):
                self.yaml_dict = self.content
                return self.yaml_dict
            elif isinstance(self.content, str):
                contents = self.content

        # check if it is yaml
        try:
            if content_type == 'yaml' and contents:
                self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
                # pylint: disable=no-member
                if hasattr(self.yaml_dict, 'fa'):
                    self.yaml_dict.fa.set_block_style()
            elif content_type == 'json' and contents:
                self.yaml_dict = json.loads(contents)
        except yaml.YAMLError as err:
            # Error loading yaml or json
            raise YeditException('Problem with loading yaml file. %s' % err)

        return self.yaml_dict
コード例 #30
0
ファイル: replay.py プロジェクト: jpoley/cloud-custodian
def main():
    parser = setup_parser()
    options = parser.parse_args()

    module_dir = os.path.dirname(os.path.abspath(__file__))
    default_templates = [
        os.path.abspath(os.path.join(module_dir, 'msg-templates')),
        os.path.abspath(os.path.join(module_dir, '..', 'msg-templates')),
        os.path.abspath('.')
    ]
    templates = options.templates
    if templates:
        default_templates.append(
            os.path.abspath(os.path.expanduser(os.path.expandvars(templates)))
        )

    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    logging.basicConfig(level=logging.DEBUG, format=log_format)
    logging.getLogger('botocore').setLevel(logging.WARNING)

    with open(options.config) as fh:
        config = yaml.load(fh.read(), Loader=yaml.SafeLoader)

    jsonschema.validate(config, CONFIG_SCHEMA)
    setup_defaults(config)
    config['templates_folders'] = default_templates

    tester = MailerTester(
        options.MESSAGE_FILE, config, msg_plain=options.plain,
        json_dump_file=options.json_dump_file
    )
    tester.run(options.dry_run, options.print_only)
コード例 #31
0
ファイル: lib.py プロジェクト: michalsimek/yaml-bindings
def load_schema(schema):
    schema = os.path.join(schema_basedir, schema)
    with open(schema, 'r', encoding='utf-8') as f:
        return yaml.load(f.read())
コード例 #32
0
ファイル: data.py プロジェクト: Yunski/capsnet-greebles
        raise ValueError("Unsupported file")


def get_data(save_dir, urls, update=False):
    for url in urls:
        download_and_extract(save_dir, url, update)


if __name__ == '__main__':
    parser = argparse.ArgumentParser('Prepare datasets')
    parser.add_argument("--data_dir", default="data")
    parser.add_argument("--update", default=False, type=bool)
    args = parser.parse_args()
    if not os.path.exists(args.data_dir):
        os.mkdir(args.data_dir)
    save_dir = os.path.join(args.data_dir, cfg.dataset)
    with open(cfg.dataset_file) as stream:
        try:
            datasets = yaml.load(stream, Loader=yaml.Loader)
        except yaml.YAMLError as exc:
            raise Exception(exc)
    dataset = cfg.dataset
    if dataset not in datasets:
        raise ValueError("{} not in dataset file".format(dataset))
    selected_dataset = datasets[dataset]
    urls = [
        os.path.join(selected_dataset['base'], filepath)
        for filepath in selected_dataset['files']
    ]
    get_data(save_dir, urls, update=args.update)
コード例 #33
0
def main(args):
    local_rank = dg.parallel.Env().local_rank
    nranks = dg.parallel.Env().nranks
    parallel = nranks > 1

    with open(args.config) as f:
        cfg = yaml.load(f, Loader=yaml.Loader)

    global_step = 0
    place = fluid.CUDAPlace(local_rank) if args.use_gpu else fluid.CPUPlace()

    if not os.path.exists(args.output):
        os.mkdir(args.output)

    writer = SummaryWriter(os.path.join(args.output,
                                        'log')) if local_rank == 0 else None

    fluid.enable_dygraph(place)
    network_cfg = cfg['network']
    model = TransformerTTS(
        network_cfg['embedding_size'], network_cfg['hidden_size'],
        network_cfg['encoder_num_head'], network_cfg['encoder_n_layers'],
        cfg['audio']['num_mels'], network_cfg['outputs_per_step'],
        network_cfg['decoder_num_head'], network_cfg['decoder_n_layers'])

    model.train()
    optimizer = fluid.optimizer.AdamOptimizer(
        learning_rate=dg.NoamDecay(
            1 / (cfg['train']['warm_up_step'] *
                 (cfg['train']['learning_rate']**2)),
            cfg['train']['warm_up_step']),
        parameter_list=model.parameters(),
        grad_clip=fluid.clip.GradientClipByGlobalNorm(
            cfg['train']['grad_clip_thresh']))

    # Load parameters.
    global_step = io.load_parameters(model=model,
                                     optimizer=optimizer,
                                     checkpoint_dir=os.path.join(
                                         args.output, 'checkpoints'),
                                     iteration=args.iteration,
                                     checkpoint_path=args.checkpoint)
    print("Rank {}: checkpoint loaded.".format(local_rank))

    if parallel:
        strategy = dg.parallel.prepare_context()
        model = fluid.dygraph.parallel.DataParallel(model, strategy)

    reader = LJSpeechLoader(cfg['audio'],
                            place,
                            args.data,
                            cfg['train']['batch_size'],
                            nranks,
                            local_rank,
                            shuffle=True).reader()

    for epoch in range(cfg['train']['max_epochs']):
        pbar = tqdm(reader)
        for i, data in enumerate(pbar):
            pbar.set_description('Processing at epoch %d' % epoch)
            character, mel, mel_input, pos_text, pos_mel = data

            global_step += 1

            mel_pred, postnet_pred, attn_probs, stop_preds, attn_enc, attn_dec = model(
                character, mel_input, pos_text, pos_mel)

            mel_loss = layers.mean(
                layers.abs(layers.elementwise_sub(mel_pred, mel)))
            post_mel_loss = layers.mean(
                layers.abs(layers.elementwise_sub(postnet_pred, mel)))
            loss = mel_loss + post_mel_loss

            # Note: When used stop token loss the learning did not work.
            if cfg['network']['stop_token']:
                label = (pos_mel == 0).astype(np.float32)
                stop_loss = cross_entropy(stop_preds, label)
                loss = loss + stop_loss

            if local_rank == 0:
                writer.add_scalars(
                    'training_loss', {
                        'mel_loss': mel_loss.numpy(),
                        'post_mel_loss': post_mel_loss.numpy()
                    }, global_step)

                if cfg['network']['stop_token']:
                    writer.add_scalar('stop_loss', stop_loss.numpy(),
                                      global_step)

                if parallel:
                    writer.add_scalars(
                        'alphas', {
                            'encoder_alpha':
                            model._layers.encoder.alpha.numpy(),
                            'decoder_alpha':
                            model._layers.decoder.alpha.numpy(),
                        }, global_step)
                else:
                    writer.add_scalars(
                        'alphas', {
                            'encoder_alpha': model.encoder.alpha.numpy(),
                            'decoder_alpha': model.decoder.alpha.numpy(),
                        }, global_step)

                writer.add_scalar('learning_rate',
                                  optimizer._learning_rate.step().numpy(),
                                  global_step)

                if global_step % cfg['train']['image_interval'] == 1:
                    for i, prob in enumerate(attn_probs):
                        for j in range(cfg['network']['decoder_num_head']):
                            x = np.uint8(
                                cm.viridis(prob.numpy()[
                                    j * cfg['train']['batch_size'] // 2]) *
                                255)
                            writer.add_image('Attention_%d_0' % global_step,
                                             x,
                                             i * 4 + j,
                                             dataformats="HWC")

                    for i, prob in enumerate(attn_enc):
                        for j in range(cfg['network']['encoder_num_head']):
                            x = np.uint8(
                                cm.viridis(prob.numpy()[
                                    j * cfg['train']['batch_size'] // 2]) *
                                255)
                            writer.add_image('Attention_enc_%d_0' %
                                             global_step,
                                             x,
                                             i * 4 + j,
                                             dataformats="HWC")

                    for i, prob in enumerate(attn_dec):
                        for j in range(cfg['network']['decoder_num_head']):
                            x = np.uint8(
                                cm.viridis(prob.numpy()[
                                    j * cfg['train']['batch_size'] // 2]) *
                                255)
                            writer.add_image('Attention_dec_%d_0' %
                                             global_step,
                                             x,
                                             i * 4 + j,
                                             dataformats="HWC")

            if parallel:
                loss = model.scale_loss(loss)
                loss.backward()
                model.apply_collective_grads()
            else:
                loss.backward()
            optimizer.minimize(loss)
            model.clear_gradients()

            # save checkpoint
            if local_rank == 0 and global_step % cfg['train'][
                    'checkpoint_interval'] == 0:
                io.save_parameters(os.path.join(args.output, 'checkpoints'),
                                   global_step, model, optimizer)

    if local_rank == 0:
        writer.close()
コード例 #34
0
def main():  # type: () -> int

    args = arg_parser().parse_args(sys.argv[1:])
    if '--' in args.args:
        args.args.remove('--')

    # Remove test arguments with wrong syntax
    if args.testargs is not None:
        args.testargs = [testarg for testarg in args.testargs if testarg.count('==') == 1]

    if not args.test:
        arg_parser().print_help()
        return 1

    with open(args.test) as f:
        tests = yaml.load(f, Loader=yaml.SafeLoader)

    failures = 0
    unsupported = 0
    passed = 0
    suite_name, _ = os.path.splitext(os.path.basename(args.test))
    report = junit_xml.TestSuite(suite_name, [])

    # the number of total tests, failured tests, unsupported tests and passed tests for each tag
    ntotal = defaultdict(int)        # type: Dict[str, int]
    nfailures = defaultdict(int)     # type: Dict[str, int]
    nunsupported = defaultdict(int)  # type: Dict[str, int]
    npassed = defaultdict(int)       # type: Dict[str, int]

    if args.only_tools:
        alltests = tests
        tests = []
        for t in alltests:
            loader = schema_salad.ref_resolver.Loader({"id": "@id"})
            cwl = loader.resolve_ref(t["tool"])[0]
            if isinstance(cwl, dict):
                if cwl["class"] == "CommandLineTool":
                    tests.append(t)
            else:
                raise Exception("Unexpected code path.")

    if args.tags:
        alltests = tests
        tests = []
        tags = args.tags.split(",")
        for t in alltests:
            ts = t.get("tags", [])
            if any((tag in ts for tag in tags)):
                tests.append(t)

    if args.l:
        for i, t in enumerate(tests):
            if t.get("short_name"):
                print(u"[%i] %s: %s" % (i + 1, t["short_name"], t.get("doc", "").strip()))
            else:
                print(u"[%i] %s" % (i + 1, t.get("doc", "").strip()))

        return 0

    if args.n is not None or args.s is not None:
        ntest = []
        if args.n is not None:
            for s in args.n.split(","):
                sp = s.split("-")
                if len(sp) == 2:
                    ntest.extend(list(range(int(sp[0]) - 1, int(sp[1]))))
                else:
                    ntest.append(int(s) - 1)
        if args.s is not None:
            for s in args.s.split(","):
                test_number = get_test_number_by_key(tests, "short_name", s)
                if test_number:
                    ntest.append(test_number)
                else:
                    _logger.error('Test with short name "%s" not found ', s)
                    return 1
    else:
        ntest = list(range(0, len(tests)))

    total = 0
    with ThreadPoolExecutor(max_workers=args.j) as executor:
        jobs = [executor.submit(run_test, args, tests[i], i+1, len(tests), args.timeout, args.junit_verbose)
                for i in ntest]
        try:
            for i, job in zip(ntest, jobs):
                test_result = job.result()
                test_case = test_result.create_test_case(tests[i])
                test_case.url = "cwltest:{}#{}".format(suite_name, i + 1)
                total += 1
                tags = tests[i].get("tags", [])
                for t in tags:
                    ntotal[t] += 1

                return_code = test_result.return_code
                category = test_case.category
                if return_code == 0:
                    passed += 1
                    for t in tags:
                        npassed[t] += 1
                elif return_code != 0 and return_code != UNSUPPORTED_FEATURE:
                    failures += 1
                    for t in tags:
                        nfailures[t] += 1
                    test_case.add_failure_info(output=test_result.message)
                elif return_code == UNSUPPORTED_FEATURE and category == REQUIRED:
                    failures += 1
                    for t in tags:
                        nfailures[t] += 1
                    test_case.add_failure_info(output=test_result.message)
                elif category != REQUIRED and return_code == UNSUPPORTED_FEATURE:
                    unsupported += 1
                    for t in tags:
                        nunsupported[t] += 1
                    test_case.add_skipped_info("Unsupported")
                else:
                    raise Exception(
                        "This is impossible, return_code: {}, category: "
                        "{}".format(return_code, category))
                report.test_cases.append(test_case)
        except KeyboardInterrupt:
            for job in jobs:
                job.cancel()
            _logger.error("Tests interrupted")

    if args.junit_xml:
        with open(args.junit_xml, 'w') as xml:
            junit_xml.TestSuite.to_file(xml, [report])

    if args.badgedir:
        os.mkdir(args.badgedir)
        for t, v in ntotal.items():
            percent = int((npassed[t]/float(v))*100)
            if npassed[t] == v:
                color = "green"
            else:
                color = "red"
            with open("{}/{}.json".format(args.badgedir, t), 'w') as out:
                out.write(json.dumps({
                    "subject": "[CWL] {}".format(t),
                    "status": "{}%".format(percent),
                    "color": color,
                }))

    if failures == 0 and unsupported == 0:
        _logger.info("All tests passed")
        return 0
    if failures == 0 and unsupported > 0:
        _logger.warning("%i tests passed, %i unsupported features",
                        total - unsupported, unsupported)
        return 0
    _logger.warning("%i tests passed, %i failures, %i unsupported features",
                    total - (failures + unsupported), failures, unsupported)
    return 1
コード例 #35
0
def load_config(loglevels=None):
    log.info('Using home dir for BTS tools: %s' % BTS_TOOLS_HOMEDIR)
    global config
    if not exists(BTS_TOOLS_CONFIG_FILE):
        log.info('Copying default config file to %s' % BTS_TOOLS_CONFIG_FILE)
        try:
            os.makedirs(BTS_TOOLS_HOMEDIR)
        except OSError:
            pass
        shutil.copyfile(join(dirname(__file__), 'config.yaml'),
                        BTS_TOOLS_CONFIG_FILE)

    # load config file
    try:
        log.info('Loading config file: %s' % BTS_TOOLS_CONFIG_FILE)
        config_contents = open(BTS_TOOLS_CONFIG_FILE).read()
    except:
        log.error('Could not read config file: %s' % BTS_TOOLS_CONFIG_FILE)
        raise

    env = Environment(loader=PackageLoader('bts_tools', 'templates/config'))

    # render config from template
    try:
        config_contents = env.from_string(config_contents).render()
    except:
        log.error('Could not render config file as a valid jinja2 template')
        raise

    # load yaml config
    try:
        config = yaml.load(config_contents, Loader=yaml.RoundTripLoader)
    except:
        log.error('-' * 100)
        log.error('Config file contents is not a valid YAML object:')
        log.error(config_contents)
        log.error('-' * 100)
        raise

    # load default config and merge
    try:
        default = env.get_template('default.yaml').render()
        default = yaml.load(default, Loader=yaml.RoundTripLoader)
    except:
        log.error('Could not load defaults for config.yaml file...')
        raise

    with open(join(BTS_TOOLS_HOMEDIR, 'default_config.yaml'), 'w') as cfg:
        cfg.write(yaml.dump(default, indent=4, Dumper=yaml.RoundTripDumper))

    def recursive_update(a, b):
        for k, v in b.items():
            if k in a:
                if isinstance(v, dict):
                    recursive_update(a[k], v)
                else:
                    a[k] = v
            else:
                a[k] = v

    recursive_update(default, config)
    config = default

    # write full_config.yaml in ~/.bts_tools
    with open(join(BTS_TOOLS_HOMEDIR, 'full_config.yaml'), 'w') as cfg:
        cfg.write(yaml.dump(config, indent=4, Dumper=yaml.RoundTripDumper))

    # setup given logging levels, otherwise from config file
    if config.get('detailed_log', False):
        # https://pymotw.com/3/cgitb/index.html
        import cgitb
        cgitb.enable(format='text')
    loglevels = loglevels or config.get('logging', {})
    for name, level in loglevels.items():
        logging.getLogger(name).setLevel(getattr(logging, level))

    # check whether config.yaml has a correct format
    m = config['monitoring']['feeds']
    if (m['bts'].get('publish_time_interval') is None
            and m['bts'].get('publish_time_slot') is None):
        log.warning('Will not be able to publish feeds. You need to specify '
                    'either publish_time_interval or publish_time_slot')

    check_time_interval = m['check_time_interval']
    publish_time_interval = m['bts'].get('publish_time_interval')
    if publish_time_interval:
        if publish_time_interval < check_time_interval:
            log.error(
                'Feed publish time interval ({}) is smaller than check time interval ({})'
                .format(publish_time_interval, check_time_interval))
            log.error('Cannot compute proper period for publishing feeds...')

    # expand wildcards for monitoring plugins
    for client_name, client in config['clients'].items():
        for n in client.get('roles', []):
            n.setdefault('monitoring', [])
            if not isinstance(n['monitoring'], list):
                n['monitoring'] = [n['monitoring']]

            def add_cmdline_args(args):
                append_unique(client.setdefault('run_args', []), args)

            def add_monitoring(l2):
                append_unique(n['monitoring'], l2)

            if n['role'] == 'witness':
                # TODO: add 'prefer_backbone_exclusively' when implemented; in this case we also need:
                # TODO: "--accept-incoming-connections 0" (or limit list of allowed peers from within the client)
                add_monitoring([
                    'missed', 'network_connections', 'voted_in',
                    'wallet_state', 'fork'
                ])

            elif n['role'] == 'feed_publisher':
                add_monitoring(['feeds'])

            # options for seed node types
            elif n['role'] == 'seed':
                add_monitoring(['seed', 'network_connections', 'fork'])

            # options for backbone node types
            elif n['role'] == 'backbone':
                add_cmdline_args(['--disable-peer-advertising'])
                add_monitoring(['backbone', 'network_connections', 'fork'])

            else:
                log.warning('Unknown role: {}'.format(n['role']))

            # always check for free disk space
            add_monitoring(['free_disk_space'])

    return config
コード例 #36
0
def format_yaml(unformatted: str, _info_str: str) -> str:
    parsed = yaml.load(unformatted)
    dump_stream = io.StringIO()
    yaml.dump(parsed, stream=dump_stream)
    return dump_stream.getvalue()
コード例 #37
0
 def entry_definitions_yaml(self):
     with open(os.path.join(self.destination, self.entry_definitions)) as f:
         return yaml.load(f)
def test_common_usage_stats_are_sent_no_mocking(
        caplog, in_memory_data_context_config_usage_stats_enabled,
        monkeypatch):
    """
    What does this test and why?
    Our usage stats events are tested elsewhere in several ways (sending example events, validating sample events, throughout other tests ensuring the right events are sent, anonymization, opt-out, etc). This specific test is to ensure that there are no errors with the machinery to send the events in the UsageStatisticsHandler by running code that emits events and checking for errors in the log. This test purposely does not mock any part of the usage stats system to ensure the full code path is run, and sends events to the QA endpoint. This test uses both methods decorated with usage_statistics_enabled_method and those that send events directly.
    """

    # caplog default is WARNING and above, we want to see DEBUG level messages for this test
    caplog.set_level(
        level=logging.DEBUG,
        logger="great_expectations.core.usage_statistics.usage_statistics",
    )

    # Make sure usage stats are enabled
    monkeypatch.delenv("GE_USAGE_STATS",
                       raising=False)  # Undo the project-wide test default
    assert os.getenv("GE_USAGE_STATS") is None

    context: BaseDataContext = BaseDataContext(
        in_memory_data_context_config_usage_stats_enabled)

    # Note, we lose the `data_context.__init__` event because it was emitted before closing the worker
    context._usage_statistics_handler._close_worker()

    # Make sure usage stats are enabled
    assert not context._check_global_usage_statistics_opt_out()
    assert context.anonymous_usage_statistics.enabled
    assert context.anonymous_usage_statistics.data_context_id == DATA_CONTEXT_ID

    # Note module_name fields are omitted purposely to ensure we are still able to send events
    datasource_yaml = """
    name: example_datasource
    class_name: Datasource
    module_name: great_expectations.datasource
    execution_engine:
      # module_name: great_expectations.execution_engine
      class_name: PandasExecutionEngine
    data_connectors:
        default_runtime_data_connector_name:
            class_name: RuntimeDataConnector
            # module_name: great_expectations.datasource.data_connector
            batch_identifiers:
                - default_identifier_name
    """

    # context.test_yaml_config() uses send_usage_message()
    context.test_yaml_config(yaml_config=datasource_yaml)
    expected_events: List[str] = ["data_context.test_yaml_config"]

    context.add_datasource(**yaml.load(datasource_yaml))
    expected_events.append("data_context.add_datasource")

    df = pd.DataFrame({"a": [1, 2], "b": [3, 4]})

    batch_request = RuntimeBatchRequest(
        datasource_name="example_datasource",
        data_connector_name="default_runtime_data_connector_name",
        data_asset_name=
        "my_data_asset",  # This can be anything that identifies this data_asset for you
        runtime_parameters={"batch_data": df},  # df is your dataframe
        batch_identifiers={"default_identifier_name": "default_identifier"},
    )

    context.create_expectation_suite(expectation_suite_name="test_suite",
                                     overwrite_existing=True)
    validator = context.get_validator(batch_request=batch_request,
                                      expectation_suite_name="test_suite")
    expected_events.append("data_context.get_batch_list")
    validator.expect_table_row_count_to_equal(value=2)
    validator.save_expectation_suite()
    expected_events.append("data_context.save_expectation_suite")

    checkpoint_yaml = """
    name: my_checkpoint
    config_version: 1
    class_name: SimpleCheckpoint
    validations:
      - batch_request:
            datasource_name: example_datasource
            data_connector_name: default_runtime_data_connector_name
            data_asset_name: my_data_asset
        expectation_suite_name: test_suite

    """
    context.test_yaml_config(yaml_config=checkpoint_yaml)
    expected_events.append("data_context.test_yaml_config")

    # Note: add_checkpoint is not instrumented as of 20211215
    context.add_checkpoint(**yaml.safe_load(checkpoint_yaml))

    context.run_checkpoint(
        checkpoint_name="my_checkpoint",
        batch_request={
            "runtime_parameters": {
                "batch_data": df
            },
            "batch_identifiers": {
                "default_identifier_name": "my_simple_df"
            },
        },
    )

    expected_events.append("data_context.get_batch_list")
    expected_events.append("data_asset.validate")
    expected_events.append("data_context.build_data_docs")
    expected_events.append("checkpoint.run")
    expected_events.append("data_context.run_checkpoint")

    assert not usage_stats_exceptions_exist(messages=caplog.messages)

    message_queue = context._usage_statistics_handler._message_queue.queue
    events = [event["event"] for event in message_queue]

    # Note: expected events does not contain the `data_context.__init__` event
    assert events == expected_events

    assert not usage_stats_invalid_messages_exist(caplog.messages)
コード例 #39
0
ファイル: utils.py プロジェクト: jhosoume/automlbenchmark
def yaml_load(file, as_namespace=False):
    if as_namespace:
        return yaml.load(file, Loader=YAMLNamespaceLoader)
    else:
        return yaml.safe_load(file)
コード例 #40
0
#!/usr/bin/env python3

import sys
import ruamel.yaml, json

yaml = ruamel.yaml.YAML(typ="safe")
print(json.dumps(yaml.load(sys.stdin.read())))
コード例 #41
0
def loads(blob):
    """Load a yaml blob and retain key ordering."""
    yaml = ruamel.yaml.YAML()
    return yaml.load(blob)
コード例 #42
0
def readYmlFile(filePath = None):
    with open(filePath, 'r', encoding="utf-8") as f:
        return yaml.load(f.read(),Loader=yaml.Loader)
コード例 #43
0
def load_yml(filepath):
    with open(filepath) as f:
        return yaml.load(f)
コード例 #44
0
ファイル: spack_yaml.py プロジェクト: key4hep/spack
def load_config(*args, **kwargs):
    """Load but modify the loader instance so that it will add __line__
       attributes to the returned object."""
    kwargs['Loader'] = OrderedLineLoader
    return yaml.load(*args, **kwargs)
コード例 #45
0
ファイル: spack_yaml.py プロジェクト: key4hep/spack
def load(*args, **kwargs):
    return yaml.load(*args, **kwargs)
コード例 #46
0
                ('Android多语言校对表',cell_overwrite_ok=True)

#xls表第一行显示标题
list = ['字段名', '英文', '中文', '日文', '法文', '韩文']


def initXls():
    """[生成列表项名]"""
    i = 1
    for tmp_str in list:
        bs_language_proofread.write(0, i, list[i - 1])
        i += 1


with open('../config.yml') as f:
    content = yaml.load(f, Loader=yaml.RoundTripLoader)
    print('*** Read config.yml successfully !')
    source_xml_path = 'source_xml_path'
    target_xls_path = 'target_xls_path'

    initXls()

    list_language = ['en', 'cn', 'ja', 'fr', 'ko']  #语言类型
    list = [None] * 5

    i = 0
    for tmp_name in list_language:
        list[i] = ET.parse(str(content[source_xml_path][tmp_name]))
        i += 1
        print('*** Read xml file successfully: '\
              + str(content[source_xml_path][tmp_name]))
コード例 #47
0
    help=
    'Run the command in the Mechanical Turk Sandbox (used for testing purposes)'
)
parser.add_argument(
    '-p',
    '--profile',
    help=
    'Run commands using specific aws credentials rather the default. To set-up alternative credentials see http://boto3.readthedocs.org/en/latest/guide/configuration.html#shared-credentials-file'
)
args = parser.parse_args()

host = 'mechanicalturk.sandbox.amazonaws.com' if args.sandbox else 'mechanicalturk.amazonaws.com'
mturk_website = 'requestersandbox.mturk.com' if args.sandbox else 'requester.mturk.com'

with open(args.successfile, 'r') as successfile:
    hitdata = load(successfile, Loader=CLoader)

mtc = MTurkConnection(is_secure=True, host=host, profile_name=args.profile)

all_results = []
outkeys = [
    'hitid', 'hittypeid', 'title', 'description', 'keywords', 'reward',
    'creationtime', 'assignments', 'numavailable', 'numpending', 'numcomplete',
    'hitstatus', 'reviewstatus', 'annotation', 'assignmentduration',
    'autoapprovaldelay', 'hitlifetime', 'viewhit', 'assignmentid', 'workerid',
    'assignmentstatus', 'autoapprovaltime', 'assignmentaccepttime',
    'assignmentsubmittime', 'assignmentapprovaltime', 'assignmentrejecttime',
    'deadline', 'feedback', 'reject'
]
answer_keys = set()
    assets:
      insert_your_data_asset_name_here:
        base_directory: ./
        group_names:
          - name
          - param_1_from_your_data_connector_eg_year
          - param_2_from_your_data_connector_eg_month
        module_name: great_expectations.datasource.data_connector.asset
        class_name: Asset
        pattern: (.+)_(\d.*)-(\d.*)\.csv
    module_name: great_expectations.datasource.data_connector
"""

context.test_yaml_config(datasource_yaml)

context.add_datasource(**yaml.load(datasource_yaml))

# Here is an example BatchRequest for all batches associated with the specified DataAsset
batch_request = BatchRequest(
    datasource_name="insert_your_datasource_name_here",
    data_connector_name="insert_your_data_connector_name_here",
    data_asset_name="insert_your_data_asset_name_here",
)
# NOTE: The following assertion is only for testing and can be ignored by users.
assert len(context.get_batch_list(batch_request=batch_request)) == 36

# This BatchRequest adds a query to retrieve only the twelve batches from 2020
data_connector_query_2020 = {
    "batch_filter_parameters": {
        "param_1_from_your_data_connector_eg_year": "2020"
    }
コード例 #49
0
    def run(self):
        while True:
            try:
                args = self.__input_queue.get(True, 0.1)
                ejob, args = args
                if ejob == EJob.STOP:
                    # terminate broker
                    self.terminate()

                elif ejob == EJob.MONITOR:
                    # monitor request
                    self.__results_queue_master.put(self.__monitor(args))

                elif ejob == EJob.CANCEL:
                    # cancel job
                    status = self.__cancel(args)
                    self.__results_queue_master.put(True)
                    self.__update_status(args.command_id, status)

                elif ejob == EJob.UPDATE:
                    # updating command status (e.g, RUN, EXIT, DONE)
                    # some command has started message is coming from worker process
                    c_id, status = args
                    self.__update_status(c_id, status)

                elif ejob == EJob.PID:
                    # updating process ID of submitted command
                    job_id, p_id = args
                    entry = yaml.load(self.__db[job_id], yaml.RoundTripLoader)
                    entry["job_id"] = p_id
                    self.__db[job_id] = yaml.dump(entry, Dumper=yaml.RoundTripDumper)
                    self.__db.sync()

                else:
                    # submitting job to worker
                    job, dependent = args
                    self.__add_command(job)
                    if dependent is not None:
                        self.__pending_dict[job] = dependent
                    else:
                        self.__worker_queue.put(job)

            except queue.Empty:
                # go through pending jobs and update status
                # if one of the dependent jobs terminated with an error pending job is also terminated with error
                for job, dependent in list(self.__pending_dict.items()):
                    status = self.__condition_fulfilled(dependent)
                    if job is not None:
                        if status == EStatus.EXIT:
                            # job cannot be called due to termination of dependent jobs
                            self.__update_status(job.command_id, EStatus.EXIT)
                            del self.__pending_dict[job]
                        elif status == EStatus.RUN:
                            self.__worker_queue.put(job)
                            del self.__pending_dict[job]
                    else:
                        # makes sure that a submitted job is properly registered and join works as intended
                        if status == EStatus.RUN:
                            del self.__pending_dict[job]

            except Exception as e:
                tb = traceback.format_exc()
                self.__results_queue_master.put((e, tb))
コード例 #50
0
ファイル: optional_attribute.py プロジェクト: sdruskat/yatiml
from typing import Optional, Union
import yatiml


# Create document class
class Submission:
    def __init__(self,
                 name: str,
                 age: Union[int, str],
                 tool: Optional[str] = None) -> None:
        self.name = name
        self.age = age
        self.tool = tool


# Create loader
class MyLoader(yatiml.Loader):
    pass


yatiml.add_to_loader(MyLoader, Submission)
yatiml.set_document_type(MyLoader, Submission)

# Load YAML
yaml_text = ('name: Janice\n' 'age: 6\n')
doc = yaml.load(yaml_text, Loader=MyLoader)

print(doc.name)
print(doc.age)
print(doc.tool)
コード例 #51
0
 def __get_testgrid_config(self):
     tg_config = yaml.load(E2E_TESTGRID_CONFIG_TEMPLATE)
     tg_config['name'] = self.job_name
     tg_config['gcs_prefix'] = GCS_LOG_PREFIX + self.job_name
     return tg_config
コード例 #52
0
def main():  # type: () -> int
    parser = argparse.ArgumentParser(description='Compliance tests for cwltool')
    parser.add_argument("--test", type=str, help="YAML file describing test cases", required=True)
    parser.add_argument("--basedir", type=str, help="Basedir to use for tests", default=".")
    parser.add_argument("-l", action="store_true", help="List tests then exit")
    parser.add_argument("-n", type=str, default=None, help="Run a specific tests, format is 1,3-6,9")
    parser.add_argument("--tool", type=str, default="cwl-runner",
                        help="CWL runner executable to use (default 'cwl-runner'")
    parser.add_argument("--only-tools", action="store_true", help="Only test CommandLineTools")
    parser.add_argument("--junit-xml", type=str, default=None, help="Path to JUnit xml file")
    parser.add_argument("--test-arg", type=str, help="Additional argument given in test cases and "
                                                     "required prefix for tool runner.",
                        metavar="cache==--cache-dir", action="append", dest="testargs")
    parser.add_argument("args", help="arguments to pass first to tool runner", nargs=argparse.REMAINDER)
    parser.add_argument("-j", type=int, default=1, help="Specifies the number of tests to run simultaneously "
                                                        "(defaults to one).")
    parser.add_argument("--verbose", action="store_true", help="More verbose output during test run.")
    parser.add_argument("--classname", type=str, default="", help="Specify classname for the Test Suite.")

    args = parser.parse_args()
    if '--' in args.args:
        args.args.remove('--')

    # Remove test arguments with wrong syntax
    if args.testargs is not None:
        args.testargs = [testarg for testarg in args.testargs if testarg.count('==') == 1]

    if not args.test:
        parser.print_help()
        return 1

    with open(args.test) as f:
        tests = yaml.load(f, Loader=yaml.SafeLoader)

    failures = 0
    unsupported = 0
    passed = 0
    suite_name, _ = os.path.splitext(os.path.basename(args.test))
    report = junit_xml.TestSuite(suite_name, [])

    if args.only_tools:
        alltests = tests
        tests = []
        for t in alltests:
            loader = schema_salad.ref_resolver.Loader({"id": "@id"})
            cwl = loader.resolve_ref(t["tool"])[0]
            if isinstance(cwl, dict):
                if cwl["class"] == "CommandLineTool":
                    tests.append(t)
            else:
                raise Exception("Unexpected code path.")

    if args.l:
        for i, t in enumerate(tests):
            print(u"[%i] %s" % (i + 1, t["doc"].strip()))
        return 0

    if args.n is not None:
        ntest = []
        for s in args.n.split(","):
            sp = s.split("-")
            if len(sp) == 2:
                ntest.extend(list(range(int(sp[0]) - 1, int(sp[1]))))
            else:
                ntest.append(int(s) - 1)
    else:
        ntest = list(range(0, len(tests)))

    total = 0
    with ThreadPoolExecutor(max_workers=args.j) as executor:
        jobs = [executor.submit(run_test, args, i, tests)
                for i in ntest]
        try:
            for i, job in zip(ntest, jobs):
                test_result = job.result()
                test_case = test_result.create_test_case(tests[i])
                total += 1
                if test_result.return_code == 1:
                    failures += 1
                    test_case.add_failure_info(output=test_result.message)
                elif test_result.return_code == UNSUPPORTED_FEATURE:
                    unsupported += 1
                    test_case.add_skipped_info("Unsupported")
                else:
                    passed += 1
                report.test_cases.append(test_case)
        except KeyboardInterrupt:
            for job in jobs:
                job.cancel()
            _logger.error("Tests interrupted")

    if args.junit_xml:
        with open(args.junit_xml, 'w') as fp:
            junit_xml.TestSuite.to_file(fp, [report])

    if failures == 0 and unsupported == 0:
        _logger.info("All tests passed")
        return 0
    elif failures == 0 and unsupported > 0:
        _logger.warn("%i tests passed, %i unsupported features", total - unsupported, unsupported)
        return 0
    else:
        _logger.warn("%i tests passed, %i failures, %i unsupported features", total - (failures + unsupported), failures, unsupported)
        return 1
コード例 #53
0
    filtered = []
    for resource in resources:
        if resource['kind'] == 'ReplicaSet':
            match = regex.match(resource['metadata']['name'])
            if match is None or match.group(1) not in deployments:
                filtered.append(resource)
        else:
            filtered.append(resource)

    return filtered


if __name__ == '__main__':
    CONFIG_PATH = 'config.yaml'
    with open(CONFIG_PATH, 'r') as stream:
        CONFIG = yaml.load(stream, Loader=yaml.Loader)

    if 'kube_config' in CONFIG:
        kube_config.load_kube_config(CONFIG['kube_config'])
    else:
        kube_config.load_kube_config(path.join(environ['HOME'],
                                               '.kube/config'))

    resources = []
    for rsc_type in CONFIG['api_resource_map'].keys():
        print("Retrieving {}s...".format(rsc_type.replace('_', ' ')))
        resources += get_resource_set(rsc_type, CONFIG)

    if CONFIG['skip_deployment_replica_sets']:
        resources = skip_deployment_replica_sets(resources)
コード例 #54
0
#
# This script updates the apiServer.certSANs of a file containing a
# kubernetes cluster configmap.

import argparse
import ruamel.yaml as yaml

parser = argparse.ArgumentParser()
parser.add_argument("--configmap_file", required=True)
parser.add_argument("--certsans", required=True)
args = parser.parse_args()

configmap_file = args.configmap_file

with open(configmap_file, 'r') as dest:
    configmap = yaml.load(dest, Loader=yaml.RoundTripLoader)
    # cluster config is a single string, so we need to parse the string
    # in order to modify it correctly
    cluster_config = yaml.load(configmap['data']['ClusterConfiguration'],
                               Loader=yaml.RoundTripLoader)

cluster_config['apiServer']['certSANs'] = \
    [item.strip() for item in args.certsans.split(',')]

cluster_config_string = yaml.dump(cluster_config,
                                  Dumper=yaml.RoundTripDumper,
                                  default_flow_style=False)

# use yaml.scalarstring.PreservedScalarString to make sure the yaml is
# constructed with proper formatting and tabbing
cluster_config_string = yaml.scalarstring.PreservedScalarString(
コード例 #55
0
ファイル: pipeline.py プロジェクト: sbasak3/bio_embeddings
def read_config_file(config_path: Path) -> Dict[str, Any]:
    with config_path.open("r") as fp:
        return yaml.load(fp, Loader=yaml.RoundTripLoader)
コード例 #56
0
ファイル: lib.py プロジェクト: michalsimek/yaml-bindings
def load(filename, line_number=False):
    with open(filename, 'r', encoding='utf-8') as f:
        if line_number:
            return rtyaml.load(f.read())
        else:
            return yaml.load(f.read())
コード例 #57
0
def ge_cloud_data_context_config(
    ge_cloud_runtime_base_url,
    ge_cloud_runtime_organization_id,
    ge_cloud_runtime_access_token,
):
    """
    This fixture is used to replicate a response retrieved from a GE Cloud API request.
    The resulting data is packaged into a DataContextConfig.

    Please see DataContext._retrieve_data_context_config_from_ge_cloud for more details.
    """
    DEFAULT_GE_CLOUD_DATA_CONTEXT_CONFIG = f"""
    datasources:
      default_spark_datasource:
        execution_engine:
          module_name: great_expectations.execution_engine
          class_name: SparkDFExecutionEngine
        module_name: great_expectations.datasource
        class_name: Datasource
        data_connectors:
          default_runtime_data_connector:
            class_name: RuntimeDataConnector
            batch_identifiers:
                - timestamp
      default_pandas_datasource:
          execution_engine:
            module_name: great_expectations.execution_engine
            class_name: PandasExecutionEngine
          module_name: great_expectations.datasource
          class_name: Datasource
          data_connectors:
            default_runtime_data_connector:
              class_name: RuntimeDataConnector
              batch_identifiers:
                - timestamp

    stores:
      default_evaluation_parameter_store:
        class_name: EvaluationParameterStore

      default_expectations_store:
        class_name: ExpectationsStore
        store_backend:
          class_name: GeCloudStoreBackend
          ge_cloud_base_url: {ge_cloud_runtime_base_url}
          ge_cloud_resource_type: expectation_suite
          ge_cloud_credentials:
            access_token: {ge_cloud_runtime_access_token}
            organization_id: {ge_cloud_runtime_organization_id}
          suppress_store_backend_id: True

      default_validations_store:
        class_name: ValidationsStore
        store_backend:
          class_name: GeCloudStoreBackend
          ge_cloud_base_url: {ge_cloud_runtime_base_url}
          ge_cloud_resource_type: suite_validation_result
          ge_cloud_credentials:
            access_token: {ge_cloud_runtime_access_token}
            organization_id: {ge_cloud_runtime_organization_id}
          suppress_store_backend_id: True

      default_checkpoint_store:
        class_name: CheckpointStore
        store_backend:
          class_name: GeCloudStoreBackend
          ge_cloud_base_url: {ge_cloud_runtime_base_url}
          ge_cloud_resource_type: contract
          ge_cloud_credentials:
            access_token: {ge_cloud_runtime_access_token}
            organization_id: {ge_cloud_runtime_organization_id}
          suppress_store_backend_id: True

    evaluation_parameter_store_name: default_evaluation_parameter_store
    expectations_store_name: default_expectations_store
    validations_store_name: default_validations_store
    checkpoint_store_name: default_checkpoint_store

    data_docs_sites:
      default_site:
        class_name: SiteBuilder
        show_how_to_buttons: true
        store_backend:
          class_name: GeCloudStoreBackend
          ge_cloud_base_url: {ge_cloud_runtime_base_url}
          ge_cloud_resource_type: rendered_data_doc
          ge_cloud_credentials:
            access_token: {ge_cloud_runtime_access_token}
            organization_id: {ge_cloud_runtime_organization_id}
          suppress_store_backend_id: True
        site_index_builder:
          class_name: DefaultSiteIndexBuilder
        site_section_builders:
          profiling: None

    anonymous_usage_statistics:
      enabled: true
      usage_statistics_url: https://dev.stats.greatexpectations.io/great_expectations/v1/usage_statistics
      data_context_id: {ge_cloud_data_context_config}
    """
    config = yaml.load(DEFAULT_GE_CLOUD_DATA_CONTEXT_CONFIG)
    return DataContextConfig(**config)
コード例 #58
0
import ruamel.yaml

yaml = ruamel.yaml.YAML()
data = yaml.load(open('environment.yml'))

requirements = []
for dep in data['dependencies']:
    if isinstance(dep, str):
        package, package_version, python_version = dep.split('=')
        if python_version == '0':
            continue
        requirements.append(package + '==' + package_version)
    elif isinstance(dep, dict):
        for preq in dep.get('pip', []):
            requirements.append(preq)

with open('requirements.txt', 'w') as fp:
    for requirement in requirements:
       print(requirement, file=fp)
コード例 #59
0
ファイル: utils.py プロジェクト: whpskyeagle/Graphics
def load_yml(filepath):
    '''Returns either yml content of a file, or an empty dict {} if file is empty'''
    with open(filepath) as f:
        yml_body = yaml.load(f)
        return yml_body if yml_body else {}
コード例 #60
0
ファイル: kerncraft.py プロジェクト: skg7on/kerncraft
def run(parser, args, output_file=sys.stdout):
    """Run command line interface."""
    # Try loading results file (if requested)
    result_storage = {}
    if args.store:
        args.store.seek(0)
        try:
            result_storage = pickle.load(args.store)
        except EOFError:
            pass
        args.store.close()

    # machine information
    # Read machine description
    machine = MachineModel(args.machine.name, args=args)

    # process kernel
    if not args.kernel_description:
        code = str(args.code_file.read())
        code = clean_code(code)
        kernel = KernelCode(code,
                            filename=args.code_file.name,
                            machine=machine,
                            keep_intermediates=not args.clean_intermediates)
    else:
        description = str(args.code_file.read())
        kernel = KernelDescription(yaml.load(description, Loader=yaml.Loader),
                                   machine=machine)

    # define constants
    required_consts = [
        v[1] for v in kernel.variables.values() if v[1] is not None
    ]
    required_consts += [[l['start'], l['stop']]
                        for l in kernel.get_loop_stack()]
    # split into individual consts
    required_consts = [i for l in required_consts for i in l]
    required_consts = set([i for l in required_consts for i in l.free_symbols])
    if len(required_consts) > 0:
        # build defines permutations
        define_dict = {}
        for name, values in args.define:
            if name not in define_dict:
                define_dict[name] = [[name, v] for v in values]
                continue
            for v in values:
                if v not in define_dict[name]:
                    define_dict[name].append([name, v])
        define_product = list(itertools.product(*list(define_dict.values())))

        # Check that all consts have been defined
        if set(required_consts).difference(
                set([symbol_pos_int(k) for k in define_dict.keys()])):
            raise ValueError(
                "Not all constants have been defined. Required are: {}".format(
                    required_consts))
    else:
        define_product = [{}]

    for define in define_product:
        # Reset state of kernel
        kernel.clear_state()

        # Add constants from define arguments
        for k, v in define:
            kernel.set_constant(k, v)

        for model_name in uniquify(args.pmodel):
            # print header
            print('{:^80}'.format(' kerncraft '), file=output_file)
            print('{:<40}{:>40}'.format(args.code_file.name,
                                        '-m ' + args.machine.name),
                  file=output_file)
            print(' '.join(['-D {} {}'.format(k, v) for k, v in define]),
                  file=output_file)
            print('{:-^80}'.format(' ' + model_name + ' '), file=output_file)

            if args.verbose > 1:
                if not args.kernel_description:
                    kernel.print_kernel_code(output_file=output_file)
                    print('', file=output_file)
                kernel.print_variables_info(output_file=output_file)
                kernel.print_kernel_info(output_file=output_file)
            if args.verbose > 0:
                kernel.print_constants_info(output_file=output_file)

            model = getattr(models, model_name)(kernel, machine, args, parser)

            model.analyze()
            model.report(output_file=output_file)

            # Add results to storage
            kernel_name = os.path.split(args.code_file.name)[1]
            if kernel_name not in result_storage:
                result_storage[kernel_name] = {}
            if tuple(kernel.constants.items()
                     ) not in result_storage[kernel_name]:
                result_storage[kernel_name][tuple(
                    kernel.constants.items())] = {}
            result_storage[kernel_name][tuple(kernel.constants.items())][model_name] = \
                model.results

            print('', file=output_file)

        # Save storage to file (if requested)
        if args.store:
            temp_name = args.store.name + '.tmp'
            with open(temp_name, 'wb+') as f:
                pickle.dump(result_storage, f)
            shutil.move(temp_name, args.store.name)