コード例 #1
0
def conda_create_command(
        requirement_files, env_name='calliope',
        python_version='3', channels=[], ignore=[], run=False):
    """
    Parameters
    ----------

    requirements_files : list
    env_name : str, optional
    python_version: str, optional
    channels: list, optional
    ignore : list, optional
    run : bool, optional

    Returns
    -------
    cmd : str

    """

    chan_set = set()
    dep_set = set()

    for req in requirement_files:
        with open(req) as f:
            y = yaml.safe_load(f)

        # Filter the optional `pip` dict in dependencies
        str_deps = list(filter(
            lambda i: isinstance(i, str),
            y['dependencies']
        ))

        if ignore:
            # Filter ignored dependencies
            str_deps = list(filter(
                lambda i: not any([ign in i for ign in ignore]),
                str_deps
            ))

        dep_set.update(str_deps)
        chan_set.update(y['channels'])

    dep_string = ' '.join(['"{}"'.format(i) for i in sorted(dep_set)])

    if channels:
        chan_string = ' '.join(['-c ' + i for i in channels])
    else:
        chan_string = ' '.join(['-c ' + i for i in chan_set])

    cmd = 'conda create --name {name} --override-channels {chans} "python={py}" {deps}'.format(
        name=env_name,
        chans=chan_string,
        deps=dep_string,
        py=python_version
    )

    return cmd
コード例 #2
0
 def load_yaml(self, yaml_file="example.yaml"):
     if not yaml_file:
         return None
     yaml_data = None
     with open(yaml_file, 'r') as fp:
         try:
             yaml_data = yaml.safe_load(fp)
         except yaml.YAMLError as exc:
             print(exc)
     return yaml_data
コード例 #3
0
ファイル: test_object.py プロジェクト: CINPLA/exdir
def test_object_attrs(setup_teardown_file):
    obj = setup_teardown_file[3].create_dataset("test_object", shape=(1,), dtype=float)

    assert isinstance(obj.attrs, Attribute)
    assert obj.attrs.mode.value == 1
    obj.attrs = "test value"

    with (setup_teardown_file[1] / "test_object" / ATTRIBUTES_FILENAME).open("r", encoding="utf-8") as meta_file:
        meta_data = yaml.safe_load(meta_file)
        assert meta_data == "test value"
コード例 #4
0
ファイル: conda_create.py プロジェクト: wangcj05/calliope
def conda_create_command(requirement_files,
                         env_name='calliope',
                         python_version='3',
                         channels=[],
                         ignore=[],
                         run=False):
    """
    Parameters
    ----------

    requirements_files : list
    env_name : str, optional
    python_version: str, optional
    channels: list, optional
    ignore : list, optional
    run : bool, optional

    Returns
    -------
    cmd : str

    """

    chan_set = set()
    dep_set = set()

    for req in requirement_files:
        with open(req) as f:
            y = yaml.safe_load(f)

        # Filter the optional `pip` dict in dependencies
        str_deps = list(filter(lambda i: isinstance(i, str),
                               y['dependencies']))

        if ignore:
            # Filter ignored dependencies
            str_deps = list(
                filter(lambda i: not any([ign in i for ign in ignore]),
                       str_deps))

        dep_set.update(str_deps)
        chan_set.update(y['channels'])

    dep_string = ' '.join(['"{}"'.format(i) for i in sorted(dep_set)])

    if channels:
        chan_string = ' '.join(['-c ' + i for i in channels])
    else:
        chan_string = ' '.join(['-c ' + i for i in chan_set])

    cmd = 'conda create --name {name} --override-channels {chans} "python={py}" {deps}'.format(
        name=env_name, chans=chan_string, deps=dep_string, py=python_version)

    return cmd
コード例 #5
0
ファイル: utilities.py プロジェクト: mazeau/cantera
def load_yaml(yml_file):
    # Load YAML data from file using the "safe" loading option.
    try:
        yaml_ = yaml.YAML(typ="safe")
        with open(yml_file, "rt", encoding="utf-8") as stream:
            return yaml_.load(stream)
    except yaml.constructor.ConstructorError:
        with open(yml_file, "rt", encoding="utf-8") as stream:
            # Ensure that  the loader remains backward-compatible with legacy
            # ruamel.yaml versions (prior to 0.17.0).
            return yaml.safe_load(stream)
コード例 #6
0
def upload_package(feedstock_root, recipe_root, config_file):
    specific_config = safe_load(open(config_file))
    if "channel_targets" in specific_config:
        channels = [c.strip().split(" ") for c in specific_config["channel_targets"]]
    else:
        update_global_config(feedstock_root)
        channels = _global_config["targets"]

    from .upload_or_check_non_existence import upload_or_check

    for owner, channel in channels:
        upload_or_check(recipe_root, owner, channel, [config_file])
コード例 #7
0
ファイル: build_all.py プロジェクト: xywei/staged-recipes
def build_all(recipes_dir, arch):
    folders = os.listdir(recipes_dir)
    old_comp_folders = []
    new_comp_folders = []
    if not folders:
        print("Found no recipes to build")
        return

    if get_host_platform() == "win":
        new_comp_folders.extend(folders)
    else:
        for folder in folders:
            built = False
            cbc = os.path.join(recipes_dir, folder, "conda_build_config.yaml")
            if os.path.exists(cbc):
                with open(cbc, "r") as f:
                    text = ''.join(f.readlines())
                if 'channel_sources' in text:
                    specific_config = safe_load(text)
                    if "channel_targets" not in specific_config:
                        raise RuntimeError(
                            "channel_targets not found in {}".format(folder))
                    if "channel_sources" in specific_config:
                        for row in specific_config["channel_sources"]:
                            channels = [c.strip() for c in row.split(",")]
                            if channels != ['conda-forge', 'defaults'] and \
                                    channels != ['conda-forge/label/cf201901', 'defaults']:
                                print(
                                    "Not a standard configuration of channel_sources. Building {} individually."
                                    .format(folder))
                                conda_build.api.build(
                                    [os.path.join(recipes_dir, folder)],
                                    config=get_config(arch, channels))
                                built = True
                                break
                    if not built:
                        old_comp_folders.append(folder)
                        continue
            new_comp_folders.append(folder)

    if old_comp_folders:
        print("Building {} with conda-forge/label/cf201901".format(
            ','.join(old_comp_folders)))
        channel_urls = ['local', 'conda-forge/label/cf201901', 'defaults']
        build_folders(recipes_dir, old_comp_folders, arch, channel_urls)
    if new_comp_folders:
        print("Building {} with conda-forge/label/main".format(
            ','.join(new_comp_folders)))
        channel_urls = ['local', 'conda-forge', 'defaults']
        build_folders(recipes_dir, new_comp_folders, arch, channel_urls)
コード例 #8
0
def make_dict_from_meta_file(path):
    try:
        document, compilers_in_build = dynamic_jinja_to_static_ruamel_yaml(
            path)
        recipe_dict = ruamel_yaml.safe_load(document)
        # add compilers to the dict
        for comp in compilers_in_build:
            recipe_dict["requirements"]["build"].append(comp)

        return recipe_dict
    except ruamel_yaml.YAMLError as exc:
        print(
            "Couldn't create a dictionary from the meta.ruamel_yaml. Error: {}"
            .format(exc))
コード例 #9
0
ファイル: build_all.py プロジェクト: eggzilla/staged-recipes
def build_all(recipes_dir, arch):
    folders = os.listdir(recipes_dir)
    old_comp_folders = []
    new_comp_folders = []
    if not folders:
        print("Found no recipes to build")
        return

    if get_host_platform() == "win":
        old_comp_folders.extend(folders)
    else:
        for folder in folders:
            built = False
            cbc = os.path.join(recipes_dir, folder, "conda_build_config.yaml")
            if os.path.exists(cbc):
                with open(cbc, "r") as f:
                    text = ''.join(f.readlines())
                if 'channel_sources' in text:
                    specific_config = safe_load(text)
                    if "channel_targets" not in specific_config:
                        raise RuntimeError("channel_targets not found in {}".format(folder))
                    if "channel_sources" in specific_config:
                        for row in specific_config["channel_sources"]:
                            channels = [c.strip() for c in row.split(",")]
                            if channels != ['conda-forge/label/gcc7', 'defaults'] and \
                                    channels != ['conda-forge', 'defaults']:
                                print("Not a standard configuration of channel_sources. Building {} individually.".format(folder))
                                conda_build.api.build([os.path.join(recipes_dir, folder)], config=get_config(arch, channels))
                                built = True
                                break
                    if not built:
                        new_comp_folders.append(folder)
                        continue
            old_comp_folders.append(folder)

    if old_comp_folders:
        print("Building {} with conda-forge/label/main".format(','.join(old_comp_folders)))
    if new_comp_folders:
        print("Building {} with conda-forge/label/gcc7".format(','.join(new_comp_folders)))

    if old_comp_folders:
        channel_urls = ['local', 'conda-forge', 'defaults']
        build_folders(recipes_dir, old_comp_folders, arch, channel_urls)

    if new_comp_folders:
        print("Building {} with conda-forge/label/gcc7".format(','.join(old_comp_folders)))
        channel_urls = ['local', 'conda-forge/label/gcc7', 'defaults']
        build_folders(recipes_dir, new_comp_folders, arch, channel_urls)
コード例 #10
0
def setup_conda_rc(feedstock_root, recipe_root, config_file):
    specific_config = safe_load(open(config_file))
    if "channel_sources" in specific_config:
        # Due to rendering we may have more than one row for channel_sources
        # if nothing gets zipped with it
        first_row = specific_config["channel_sources"][0]  # type: str
        channels = [c.strip() for c in first_row.split(",")]
    else:
        update_global_config(feedstock_root)
        channels = _global_config["sources"]

    call(["conda", "config", "--remove", "channels", "defaults"])
    for c in reversed(channels):
        call(["conda", "config", "--add", "channels", c])

    call(["conda", "config", "--set", "show_channel_urls", "true"])
コード例 #11
0
ファイル: metadata.py プロジェクト: bebaek/cryomem
def load_md(src):
    """Load metadata in YAML from a source.

    Arguments:
        src: dict-like (data), string (filename), or file-like
    """
    if isinstance(src, collections.Mapping):  # dict or yaml is given
        rawconfig = src
    elif isinstance(src, str):  # filename is given
        with open(src, "r") as f:
            rawconfig = yaml.load(f)
    elif (isinstance(src, TextIOBase)):  # file-like is given
        #rawconfig = yaml.load(src)                    # deprecated/unsafe
        rawconfig = yaml.safe_load(src)

    return parse_md(rawconfig)
コード例 #12
0
def is_nonraw_object_directory(directory):
    meta_filename = directory / META_FILENAME
    if not meta_filename.exists():
        return False
    with meta_filename.open("r", encoding="utf-8") as meta_file:
        meta_data = yaml.safe_load(meta_file)

        if not isinstance(meta_data, dict):
            return False

        if EXDIR_METANAME not in meta_data:
            return False
        if TYPE_METANAME not in meta_data[EXDIR_METANAME]:
            return False
        valid_types = [DATASET_TYPENAME, FILE_TYPENAME, GROUP_TYPENAME]
        if meta_data[EXDIR_METANAME][TYPE_METANAME] not in valid_types:
            return False
    return True
コード例 #13
0
def fail_if_travis_not_allowed_for_arch(config_file):
    specific_config = safe_load(open(config_file))
    if "channel_targets" in specific_config:
        channels = [c.strip().split(" ") for c in specific_config["channel_targets"]]
    else:
        update_global_config(feedstock_root)
        channels = _global_config["channels"]["targets"]

    upload_to_conda_forge = any(owner == "conda-forge" for owner, _ in channels)
    
    if (
        upload_to_conda_forge
        and os.environ.get("CI", None) == "travis"
        and (
            platform.uname().machine.lower() in ["x86_64", "amd64"]
            or platform.system().lower() != "linux"
        )
    ):
        raise RuntimeError("Travis CI cannot be used on x86_64 in conda-forge!")
コード例 #14
0
def lock(flow, pf, py, lab):
    output = P.ENVENTURES[flow, pf, py, lab]
    if not output.parent.exists():
        output.parent.mkdir(parents=True)
    composite = {"name": output.name, CHN: [], DEP: []}
    for env in P.ENV_DEPS[flow, pf, py, lab]:
        composite = merge(composite, safe_load(env.read_text()))

    print(safe_dump(composite, default_flow_style=False), flush=True)

    with tempfile.TemporaryDirectory() as td:
        tdp = Path(td)

        env = tdp / "environment.yml"
        env.write_text(safe_dump(composite, default_flow_style=False))
        args = [P.CONDA_EXE, "lock", "--platform", pf]
        subprocess.check_call(args, cwd=td)
        if not output.parent.exists():
            output.parent.mkdir(parents=True)
        output.write_text((tdp / f"conda-{pf}.lock").read_text())
    return 0
コード例 #15
0
ファイル: test_composite.py プロジェクト: paulblum/cantera
    def test_yaml_surface(self):
        gas = ct.Solution('ptcombust.yaml', 'gas')
        surf = ct.Interface('ptcombust.yaml', 'Pt_surf', [gas])
        gas.TPY = 900, ct.one_atm, np.ones(gas.n_species)
        surf.coverages = np.ones(surf.n_species)
        surf.write_yaml('ptcombust-generated.yaml')

        with open('ptcombust-generated.yaml') as infile:
            generated = yaml.safe_load(infile)
        for key in ('phases', 'species', 'gas-reactions', 'Pt_surf-reactions'):
            self.assertIn(key, generated)
        self.assertEqual(len(generated['gas-reactions']), gas.n_reactions)
        self.assertEqual(len(generated['Pt_surf-reactions']), surf.n_reactions)
        self.assertEqual(len(generated['species']), surf.n_total_species)

        gas2 = ct.Solution('ptcombust-generated.yaml', 'gas')
        surf2 = ct.Solution('ptcombust-generated.yaml', 'Pt_surf', [gas2])
        self.assertArrayNear(surf.concentrations, surf2.concentrations)
        self.assertArrayNear(surf.partial_molar_enthalpies,
                             surf2.partial_molar_enthalpies)
        self.assertArrayNear(surf.forward_rate_constants,
                             surf2.forward_rate_constants)
コード例 #16
0
def fail_if_outdated_windows_ci(feedstock_root):
    if sys.platform != "win32":
        return

    if "APPVEYOR_ACCOUNT_NAME" in os.environ:
        provider = "appveyor"
        if os.environ["APPVEYOR_ACCOUNT_NAME"] != "conda-forge":
            return
        if "APPVEYOR_PULL_REQUEST_NUMBER" not in os.environ:
            return
    elif "BUILD_REPOSITORY_NAME" in os.environ:
        provider = "azure"
        if not os.environ["BUILD_REPOSITORY_NAME"].startswith("conda-forge/"):
            return
        if "SYSTEM_PULLREQUEST_PULLREQUESTID" not in os.environ:
            return
    else:
        return

    with open(os.path.join(feedstock_root, "conda-forge.yml")) as f:
        config = safe_load(f)
        if "provider" in config and "win" in config["provider"]:
            provider_cfg = config["provider"]["win"]
            if provider_cfg != "azure":
                return
            if provider == "appveyor":
                raise RuntimeError(
                    "This PR needs a rerender to switch from appveyor to azure")
            if (
                provider == "azure"
                and (
                    os.getenv("UPLOAD_PACKAGES", "False") == "False"
                    or os.path.exists(".appveyor.yml")
                )
            ):
                raise RuntimeError(
                    "This PR needs a rerender to switch from appveyor to azure")
コード例 #17
0
def test_create_object_directory(setup_teardown_folder):
    with pytest.raises(ValueError):
        exob._create_object_directory(pathlib.Path(setup_teardown_folder[2]), exob._default_metadata("wrong_typename"))

    exob._create_object_directory(pathlib.Path(setup_teardown_folder[2]), exob._default_metadata(exob.DATASET_TYPENAME))

    assert setup_teardown_folder[2].is_dir()

    file_path = setup_teardown_folder[2] / exob.META_FILENAME
    assert file_path.is_file()

    compare_metadata = {
        exob.EXDIR_METANAME: {
            exob.TYPE_METANAME: exob.DATASET_TYPENAME,
            exob.VERSION_METANAME: 1}
    }

    with file_path.open("r", encoding="utf-8") as meta_file:
        metadata = yaml.safe_load(meta_file)

        assert metadata == compare_metadata

    with pytest.raises(IOError):
        exob._create_object_directory(pathlib.Path(setup_teardown_folder[2]), exob.DATASET_TYPENAME)
コード例 #18
0
def upload_package(feedstock_root, recipe_root, config_file, validate, private, feedstock_name):
    if feedstock_name is None and validate:
        raise RuntimeError("You must supply the --feedstock-name option if validating!")

    specific_config = safe_load(open(config_file))
    if "channel_targets" in specific_config:
        channels = [c.strip().split(" ") for c in specific_config["channel_targets"]]
        source_channels = ",".join(
            [c.strip() for c in specific_config["channel_sources"]])
    else:
        update_global_config(feedstock_root)
        channels = _global_config["channels"]["targets"]
        source_channels = ",".join(_global_config["channels"]["sources"])

    if "UPLOAD_ON_BRANCH" in os.environ:
        if "GIT_BRANCH" not in os.environ:
            print(
                "WARNING: UPLOAD_ON_BRANCH env variable set, "
                "but GIT_BRANCH not set. Skipping check")
        else:
            if os.environ["UPLOAD_ON_BRANCH"] != os.environ["GIT_BRANCH"]:
                print(
                    "The branch {} is not configured to be "
                    "uploaded".format(os.environ["GIT_BRANCH"]))
                return

    upload_to_conda_forge = any(owner == "conda-forge" for owner, _ in channels)
    if upload_to_conda_forge and "channel_sources" in specific_config:
        allowed_channels = [
            "conda-forge", "conda-forge/label/", "defaults", "c4aarch64",
            "c4armv7l"]
        for source_channel in source_channels.split(","):
            if source_channel.startswith('https://conda-web.anaconda.org/'):
                source_channel = source_channel[len('https://conda-web.anaconda.org/'):]
            for c in allowed_channels:
                if source_channel.startswith(c):
                    break
            else:
                print(
                    "Uploading to %s with source channel '%s' "
                    "is not allowed" % ("conda-forge", source_channel))
                return

    # get the git sha of the current commit
    git_sha = subprocess.run(
        "git rev-parse HEAD",
        check=True,
        stdout=subprocess.PIPE,
        shell=True,
        cwd=feedstock_root,
    ).stdout.decode("utf-8").strip()
    if len(git_sha) == 0:
        git_sha = None
        print("Did not find git SHA for this build!")
    else:
        print("Found git SHA %s for this build!" % git_sha)

    for owner, channel in channels:
        if validate and owner == "conda-forge":
            retry_upload_or_check(
                feedstock_name, recipe_root, STAGING, channel,
                [config_file], validate=True, git_sha=git_sha)
        else:
            retry_upload_or_check(
                feedstock_name, recipe_root, owner, channel,
                [config_file], validate=False, private_upload=private)
コード例 #19
0
ファイル: extractsids.py プロジェクト: udayardahal/faunus
import sys
if sys.version_info < (3, 0):
    sys.stdout.write("Sorry, Python 3 og higher required\n")
    sys.exit(1)

import os, json, urllib.request
import ruamel_yaml as yaml

if len(sys.argv) != 2:
    print("usage: {} music.yml".format(sys.argv[0]))
    sys.exit(1)

filename = sys.argv[1]

with open(filename) as f:
    sidlist = yaml.safe_load(f)
    server = sidlist['server'] + '/'
    dstdir = 'sids/'
    if not os.path.exists(dstdir):
        os.makedirs(dstdir)
    for i in sidlist['songs']:
        url = server + i['file']
        basename = os.path.basename(url)
        dstfile = dstdir + basename
        i['file'] = dstfile
        if not os.path.isfile(dstfile):
            print('retrieving', dstfile)
            urllib.request.urlretrieve(url, dstfile)

    # save playlist to json
    out = json.dumps(sidlist, indent=2)
コード例 #20
0
    root_path = os.path.realpath(
        os.path.join(os.path.dirname(__file__), os.path.pardir))

    # import the osgameclones data
    osgc_path = os.path.realpath(
        os.path.join(root_path, os.path.pardir, '11_osgameclones.git',
                     'games'))
    files = os.listdir(osgc_path)

    # iterate over all yaml files in osgameclones/data folder
    osgc_entries = []
    for file in files:
        # read yaml
        with open(os.path.join(osgc_path, file), 'r') as stream:
            try:
                _ = yaml.safe_load(stream)
            except yaml.YAMLError as exc:
                raise exc

        # add to entries
        osgc_entries.extend(_)
    print('{} entries in osgameclones'.format(len(osgc_entries)))

    # eliminate the ignored entries
    osgc_entries = [
        x for x in osgc_entries if x['name'] not in osgc_ignored_entries
    ]

    # fix names and licenses (so they are not longer detected as deviations downstreams)
    for index, entry in enumerate(osgc_entries):
        name = entry['name']
コード例 #21
0
    def test_cli_yaml(script_runner):
        """is the default output valid yaml?"""

        ret = script_runner.run("jupyter", "starters", "list")
        assert ret.success
        assert safe_load(ret.stdout)
コード例 #22
0
def load_configuration(filepath):
    with open(filepath, "r") as f:
        config = yaml.safe_load(f)

    return config
コード例 #23
0
 def _open_or_create(self):
     attrs = {}
     if self.filename.exists():  # NOTE str for Python 3.5 support
         with self.filename.open("r", encoding="utf-8") as meta_file:
             attrs = yaml.safe_load(meta_file)
     return attrs
コード例 #24
0
ファイル: Cell.py プロジェクト: acouplet/EROI-OMES
    def configure(self, config_file):
        stream = open(config_file, 'r')
        config = yaml.safe_load(stream)

        self.name = config['name']
        self.re_share = config['re_share']
        self.imp_share = config['re_share']
        self.nuc_share = config['nuc_share']
        if self.log_level >= 1:
            print("Loading cell: {0}".format(self.name))

        # Sets an order of importance for typical days to approximate the different inputs
        self.weight = config['weight']

        # Set consumption
        if self.log_level == 2: print('\tSet consumption,', end='')
        self.cons = np.loadtxt(open(config['consumption_file'], 'r'),
                               skiprows=1)
        self.cons = self.cons.reshape(-1, self.dt).mean(
            axis=1)  # Reshape according to the time step
        self.cons = self.cons * 1e6

        # Producers
        self.wt_parks = []
        self.pv_parks = []

        ## PV
        if self.log_level == 2: print('Set PV parks,', end='')
        if 'regions' in config['pv']:
            for i in range(len(config['pv']['regions'])):
                pvpark = PVPark(config['pv']['regions'][i], config['pv'],
                                self.size, self.dt)
                self.pv_parks.append(pvpark)
        else:
            pvpark = PVPark(config['pv'], config['pv'], self.size, self.dt)
            self.pv_parks.append(pvpark)

        ## WT
        if self.log_level == 2: print('Set WT parks,', end='')
        for onoff in config['wind']:
            if 'regions' in config['wind'][onoff]:
                for i in range(len(config['wind'][onoff]['regions'])):
                    wtpark = WTPark(config['wind'][onoff]['regions'][i],
                                    config['wind'][onoff], self.size, self.dt,
                                    onoff == 'onshore')
                    self.wt_parks.append(wtpark)
            else:
                wtpark = WTPark(config['wind'][onoff], config['wind'][onoff],
                                self.size, self.dt, onoff == 'onshore')
                self.wt_parks.append(wtpark)

        ## CCGT
        if self.log_level == 2: print('Set CCGT,', end='')
        self.ccgt = CCGTPlant(config['ccgt'])

        ## Coal
        if self.log_level == 2: print('Set Coal,', end='')
        self.coal = CoalPlant(config['coal'])

        ## Nuclear
        if self.log_level == 2: print('Set Nuclear,', end='')
        self.nuclear = NuclearPlant(config['nuclear'], self.size)

        ## Hydro Dam
        if self.log_level == 2: print('Set Dam,', end='')
        self.dam = DamPlant(config['hydro_dam'], self.size, self.dt)

        ## Run-of-the-river
        if self.log_level == 2: print('Set Run-of-the-river,', end='')
        self.runriver = RunRiver(config['hydro_river'], self.size, self.dt)

        # Storage
        self.bat_parks = []

        ## Batteries
        if self.log_level == 2: print('Set Battery parks,', end='')
        batt = Battery(config['battery'], "Lithium", self.size, self.dt)
        self.bat_parks.append(batt)

        ## PHES
        if self.log_level == 2: print('Set PHES,', end='')
        self.phes = PHES(config['phes'], self.size)

        ## PtG
        if self.log_level == 2: print('Set Power-to-Gas,', end='')
        self.ptg = PtG(config['ptg'], self.size, self.dt)

        ## Gas
        if self.log_level == 2: print('Set Gas storage')
        self.gas = Gas(config['gas'], self.size)
コード例 #25
0
ファイル: test_composite.py プロジェクト: strlamp/cantera
 def setUpClass(cls):
     utilities.CanteraTest.setUpClass()
     cls.yml_file = pjoin(cls.test_data_dir, "thermo-models.yaml")
     with open(cls.yml_file, 'rt', encoding="utf-8") as stream:
         cls.yml = yaml.safe_load(stream)
コード例 #26
0
    x, y, w, h = cv2.boundingRect(cnt)
    bb.append([x, y, w, h])
    #cv2.rectangle(img, (x,y), (x+w, y+h), (255),2)
    #cv2.imshow("contours", img)

binary = []
for img in Images:
    img = np.array(img)
    mask = img == 0
    img[mask] = 0
    img[np.logical_not(mask)] = 255
    binary.append(img)

for metafilename in sorted(glob.glob('./images/*_poses.yaml')):
    meta_file = open(metafilename)
    meta = yaml.safe_load(meta_file)
    t_ = [i * 1000 for i in meta['schaltgabel']['pose'][0]]
    t.append(t_)
    quaternion_list = meta['schaltgabel']['pose'][1]
    modified_quaternion = [
        quaternion_list[1], quaternion_list[2], quaternion_list[3],
        quaternion_list[0]
    ]
    r.append(R.from_quat(modified_quaternion).as_dcm().flatten().tolist())

#different notation of quaternions between sicpy and labelfusion
#https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_quat.html
#https://github.com/RobotLocomotion/LabelFusion/issues/41

posedata_file = open('./posedata.yml')
posedata = yaml.safe_load(posedata_file)
コード例 #27
0
ファイル: Case.py プロジェクト: acouplet/EROI-OMES
    def __init__(self, configuration_file, log_level):
        """Create Case object based on the given configuration file.

        Parameters
        ----------
        configuration_file: str
            Path to the case configuration file (a .yml file)
        log_level: int
            A value between 0 and 2: 0 for little information, 2 for lots of
            information

        Returns
        -------
        object
            Case object

        """

        self.log_level = log_level
        stream = open(configuration_file, 'r')
        config = yaml.safe_load(stream)

        # === Load parameters ===
        self.name = config['name']
        self.dt = config['dt']
        self.size = int(8760 / self.dt)
        self.use_CPLEX = config['use_CPLEX']
        self.optiTD = config['optiTD']

        if self.log_level >= 1:
            print(Style.BRIGHT + "=== Case configuration ===" + Style.NORMAL)
        if self.log_level == 2:
            print(
                "Case name: \t{0}\nTime step: \t{1}\nProblem size: \t{2}\nCPLEX solver: \t{3}\nTypical Days: \t{4}"
                .format(self.name, self.dt, self.size, self.use_CPLEX,
                        self.optiTD))

        if self.optiTD:  # If Typical Days are used
            self.replicate = config['replicate']
            self.loadTD = config['loadTD']
            self.nrTD = config['nrTD']
            if self.log_level == 2:
                print("Load TD: \t{0}\nNumber of TD: \t{1}".format(
                    self.loadTD, self.nrTD))
                if not (self.loadTD):
                    print("Replicate: \t{0}".format(self.replicate))

        # Policies
        re_share = config['re_share']
        imp_share = config['imp_share']
        self.policies = [re_share, imp_share]
        if self.log_level == 2:
            print("Max RE share: \t{0}\nMax imp share: \t{1}".format(
                re_share, imp_share))

        # Cells
        self.cells = []
        nbcells = len(config['cells'])
        if self.log_level >= 1:
            print('\n' + Style.BRIGHT + "=== Cells configuration ===" +
                  Style.NORMAL)
        for cell in config['cells']:
            self.cells.append(Cell(self.size, self.dt, self.log_level))
            self.cells[-1].configure(cell)

        # e_exch, gas_exch, e_imp and gas_imp
        e_exch = {}
        gas_exch = {}
        e_imp = {}
        gas_imp = {}
        for i, cell in enumerate(self.cells):
            e_exch[cell.name] = {}
            gas_exch[cell.name] = {}
            e_imp[cell.name] = config['e_imp'][i]
            gas_imp[cell.name] = config['gas_imp'][i]
            for j, neighbour in enumerate(self.cells):
                e_exch[cell.name][neighbour.name] = config['e_exch'][i][j]
                gas_exch[cell.name][neighbour.name] = config['gas_exch'][i][j]

        self.e_exch = e_exch
        self.gas_exch = gas_exch
        self.e_imp = e_imp
        self.gas_imp = gas_imp

        self.grid = Grid(self.e_imp, self.e_exch, self.gas_imp, self.gas_exch)

        # Computed values
        self.E = {}
        self.E_tot = {}
        self.EROI_pv = {}
        self.EROI_wt = {}
        self.share = {}
        self.share_tot = {}
        self.LF = {}
        self.E_invest = {}
        self.E_invest_tot = {}

        # Typical Days
        if self.optiTD:
            assert self.dt < 12, "Timestep to big to compute typical days."

            if self.loadTD:  # Load Typical Days from least error file
                filename = os.path.join('typical_days', self.name + '_' +  \
                        'TDays'+ '_' + str(self.dt) + '_' + str(self.nrTD) \
                        + 'TD_*')
                possible_TD = glob.glob(filename)
                min_error = 1e9
                for f in possible_TD:
                    error = float(f.split('_')[-1][:-4])
                    if error < min_error:
                        TDays_f = f
                        min_error = error
                if self.log_level >= 1:
                    print("\n" + Style.BRIGHT + "=== Typical Days ===" +
                          Style.NORMAL)
                    print('Min error : \t{:.6f}'.format(min_error))
                self.TDays = np.loadtxt(TDays_f)

            else:  # Start multithreaded computation to find the Typical Days
                threads = 8
                if self.log_level >= 1:
                    print("\n" + Style.BRIGHT + "=== Typical Days ===" +
                          Style.NORMAL)
                    print("Compute typical days with " + str(threads) +
                          " threads")
                manager = multiprocessing.Manager()
                return_dict = manager.dict()
                jobs = [None] * threads

                self.replicate = int(self.replicate / threads)
                for i in range(threads):  # Start threads
                    jobs[i] = multiprocessing.Process(target=self.computeTD, \
                                                      args=(i, return_dict))
                    jobs[i].start()
                for i in range(threads):  # Wait for threads to finish
                    jobs[i].join()

                best_error = 1e6
                for (TDays, error) in return_dict.values():
                    if error < best_error:
                        self.TDays = TDays.astype('int')
                        best_error = error
                if self.log_level >= 1:
                    print('Min error : \t{:.6f}'.format(best_error))

                dtperday = int(24 / self.dt)
                dtpd = int(24 / self.dt)
                powerTD = np.zeros(self.size)

                # Plot the load duration curve with and without typical days
                # for the user to assess the precision
                if self.log_level == 2:
                    for cell in self.cells:
                        for i in range(0, self.size, dtperday):
                            TD = self.TDays[i]
                            powerTD[i:i +
                                    dtpd] = cell.cons[TD * dtpd:TD * dtpd +
                                                      dtpd]

                        sort = np.sort(cell.cons)[::-1]
                        exceedence = np.arange(1., len(sort) + 1) / len(sort)
                        l1, = plt.plot(exceedence * 100, sort)
                        sort = np.sort(powerTD)[::-1]
                        exceedence = np.arange(1., len(sort) + 1) / len(sort)
                        l2, = plt.plot(exceedence * 100, sort)
                        plt.legend((l1, l2), ('Without TD', 'With TD'))
                        plt.title("LDC with and without Typical Days : " +
                                  cell.name)
                        plt.show()


                filename = os.path.join('typical_days', self.name + '_' +    \
                        'TDays'+ '_' + str(self.dt) + '_' + str(self.nrTD) + \
                        'TD_' + str(best_error) + '.txt')
                np.savetxt(filename, self.TDays, fmt='%d')
コード例 #28
0
ファイル: wedge.py プロジェクト: bebaek/cryomem
 def _load_chip_design(self, filename):
     with open(filename, "r") as f:
         #self.chip_design = yaml.load(f)
         self.chip_design = yaml.safe_load(f)
コード例 #29
0
ファイル: test_distrib.py プロジェクト: derekmerck/diana-star
    s0 = chain(
        orthanc.get_s(dx.oid(), DicomLevel.INSTANCES) | clf.classify_s()
        | redis.put_s())()
    s1 = chain(
        orthanc.get_s(hx.oid(), DicomLevel.INSTANCES) | clf.classify_s()
        | redis.put_s())()

    s0.get()
    s1.get()


if __name__ == "__main__":

    logging.basicConfig(level=logging.DEBUG)

    logging.debug("Simple Distributed Diana Test Script")

    service_cfg = os.environ.get("DIANA_SERVICES_CFG", "./services.yml")
    with open(service_cfg, "r") as f:
        services = yaml.safe_load(f)

    dcm_dir = "/Users/derek/data/DICOM/airway phantom/DICOM/PA2/ST1/SE1"

    files = File(location=dcm_dir)
    orthanc = Orthanc(**services['orthanc'])
    clf = Classifier()
    redis = Redis(**services['redis'])

    test_celery()
コード例 #30
0
def make_build_number(feedstock_root, recipe_root, config_file):
    """
    General logic

        The purpose of this is to ensure that the new compilers have build
        numbers > 1000 and legacy compilers have a build number < 1000.

        This is done by reading the build_number_decrement which is rendered
        into all the recipes.

        For linux and osx we want to avoid building for the legacy compilers
        with build numbers > 1000

    Example matrix
        - {'compiler_c': 'toolchain_c', 'build_number_decrement': 1000}
        - {'compiler_c': 'gcc',         'build_number_decrement': 0}

    """
    specific_config = safe_load(open(config_file))
    build_number_dec = int(specific_config.get("build_number_decrement", [0])[0])
    if build_number_dec == 0:
        return

    use_legacy_compilers = False
    for key in {"c", "cxx", "fortran"}:
        if "toolchain_{}".format(key) in specific_config.get(
                '{}_compiler'.format(key), ""):
            use_legacy_compilers = True
            break

    import conda_build.api

    rendered_recipe = conda_build.api.render(
        recipe_path=recipe_root, variants=specific_config
    )
    build_numbers = set()
    for recipe, _, _ in rendered_recipe:
        build_numbers.add(int(recipe.get_value("build/number")))
    if len(build_numbers) > 1:
        raise ValueError("More than one build number found, giving up")
    if len(build_numbers) == 0:
        print("> conda-forge:: No build number found.  Presuming build string")
        return
    try:
        build_number_int = build_numbers.pop()

        if build_number_int < 1000:
            if not use_legacy_compilers:
                raise ValueError(
                    "Only legacy compilers only valid with build numbers < 1000"
                )
            new_build_number = build_number_int
        else:
            new_build_number = build_number_int - build_number_dec

        config_dir, filename = os.path.split(config_file)
        with open(os.path.join(config_dir, "clobber_" + filename), "w") as fo:
            data = {"build": {"number": new_build_number}}
            print("> conda-forge:: Build number clobber {} -> {}".format(
                build_number_int, new_build_number))
            safe_dump(data, fo)
    except ValueError:
        # This is a NON string build number
        # we have this for things like the blas mutex and a few other similar cases
        print("> conda-forge:: No build number clobber gererated!")
        import traceback
        traceback.print_exc()
コード例 #31
0
ファイル: yason.py プロジェクト: vaspelin/faunus
try:  # ... to read json
    i = args.infile.read()
    if jinja2:
        # additional files can be used with {% include "file" %}
        dirs = [os.getcwd(), os.path.dirname(os.path.realpath(__file__)) + "/../top"]
        loader = jinja2.FileSystemLoader(dirs)
        env = jinja2.Environment(loader=loader)
        i = env.from_string(i).render()  # render jinja2
        # i = jinja2.Template(i).render() # render jinja2

    d = json.loads(i)
    if args.alwaysjson:
        if pygments:
            i = highlight(out, JsonLexer(), formatter())
        print(i)
    else:
        out = yaml.safe_dump(d, indent=args.indent, allow_unicode=True)
        if pygments:
            out = highlight(out, YamlLexer(), formatter())
        print(out)
except json.decoder.JSONDecodeError:
    try:  # ... to read yaml
        d = yaml.safe_load(i)  # plain load was deprecated in PyYAML
        out = json.dumps(d, indent=args.indent)
        if pygments:
            out = highlight(out, JsonLexer(), formatter())
        print(out)
    except yaml.parser.ParserError as exception:
        print("input error: invalid json or yaml format", file=sys.stderr)
        print(exception, file=sys.stderr)
コード例 #32
0
ファイル: yaml_utils.py プロジェクト: HaroldMills/Vesper
def load(source, **kwargs):
    return yaml.safe_load(source, **kwargs)
コード例 #33
0
import ruamel_yaml
import pytest
from argparse import ArgumentParser

PARSER = ArgumentParser()

PARSER.add_argument("--ignore", default="")
ARGS = PARSER.parse_args()

# paths unlikely to change per package
LICENSE_DIR = "library_licenses"
RECIPE_DIR = Path(os.environ["RECIPE_DIR"])
SRC_DIR = Path(os.environ["SRC_DIR"])

# semi-surpisingly, this is the post-rendered recipe
META = ruamel_yaml.safe_load((RECIPE_DIR / "meta.yaml").read_text("utf-8"))
META_LICENSE_NAMES = [
    lf.split(f"{LICENSE_DIR}/")[1]
    for lf in META["about"]["license_file"]
    if LICENSE_DIR in lf
]

# vendored crates are covered by packaged LICENSE-* file
IGNORE_DEPS = ARGS.ignore.strip().split(",")

LIBRARY_LICENSES = RECIPE_DIR / LICENSE_DIR
LICENSE_FILE_NAMES = sorted([f.name for f in LIBRARY_LICENSES.glob("*")])

# as generated by cargo-license
RAW_DEPENDENCIES = json.loads(
    (