示例#1
0
def list_profiles():
    """Return a list of available AWS profile names

    Search the aws credentials file and the aws config file for profile names

    Returns
    -------
    profile_names : namedtuple
        A named tuple with fields: `profile_names`, a list of AWS profiles in
        the aws config file and the aws shared credentials file;
        `credentials_file`, a path to the aws shared credentials file;
        and `aws_config_file`, a path to the aws config file
    """
    aws = os.path.join(os.path.expanduser("~"), ".aws")

    try:
        # Get aws credentials file from environment variable
        env_file = os.environ["AWS_SHARED_CREDENTIALS_FILE"]
        credentials_file = os.path.abspath(env_file)
    except KeyError:
        # Fallback on default credentials file path
        credentials_file = os.path.join(aws, "credentials")

    try:
        # Get aws config file from environment variable
        env_file = os.environ["AWS_CONFIG_FILE"]
        aws_config_file = os.path.abspath(env_file)
    except KeyError:
        # Fallback on default aws config file path
        aws_config_file = os.path.join(aws, "config")

    credentials = configparser.ConfigParser()
    credentials.read(credentials_file)

    aws_config = configparser.ConfigParser()
    aws_config.read(aws_config_file)

    profile_names = [
        s.split()[1] for s in aws_config.sections()
        if s.split()[0] == "profile" and len(s.split()) == 2
    ]

    profile_names += credentials.sections()

    # define a namedtuple for return value type
    ProfileInfo = namedtuple(
        "ProfileInfo",
        ["profile_names", "credentials_file", "aws_config_file"])

    return ProfileInfo(
        profile_names=profile_names,
        credentials_file=credentials_file,
        aws_config_file=aws_config_file,
    )
示例#2
0
def read_pkcs1_prikey(opts: argparse.Namespace) -> namedtuple:
    '''
    Read the RSA private key file.
    '''
    infov(opts, f'reading key file {opts.key}')
    with open(opts.key, 'r') as ifp:
        first, *b64, last = ifp.readlines()
    assert first.strip() == '-----BEGIN RSA PRIVATE KEY-----'
    assert last.strip() == '-----END RSA PRIVATE KEY-----'
    b64_str = ''.join([o.strip() for o in b64])
    b64_bytes = base64.b64decode(b64_str)

    # This is decoded raw, with no structure, that is why
    # recursion is disabled.
    _, msg = der_decoder.decode(b64_bytes,
                                asn1Spec=univ.Sequence(),
                                recursiveFlag=False)
    version, msg = der_decoder.decode(msg, asn1Spec=univ.Integer())
    modulus, msg = der_decoder.decode(msg, asn1Spec=univ.Integer())
    pubexp, msg = der_decoder.decode(msg, asn1Spec=univ.Integer())
    priexp, msg = der_decoder.decode(msg, asn1Spec=univ.Integer())
    prime1, msg = der_decoder.decode(msg, asn1Spec=univ.Integer())
    prime2, msg = der_decoder.decode(msg, asn1Spec=univ.Integer())
    exponent1, msg = der_decoder.decode(msg, asn1Spec=univ.Integer())
    exponent2, msg = der_decoder.decode(msg, asn1Spec=univ.Integer())
    crt_coeff, _ = der_decoder.decode(msg, asn1Spec=univ.Integer())
    rec = {
        'version': version,
        'modulus': int(modulus),
        'pubexp': int(pubexp),
        'priexp': int(priexp),
        'prime1': int(prime1),
        'prime2': int(prime2),
        'exponent1': int(exponent1),
        'exponent2': int(exponent2),
        'crt_coeff': int(crt_coeff),
    }
    ntdef = namedtuple('_', sorted(rec.keys()))
    return ntdef(**rec)
示例#3
0
文件: endl.py 项目: opotowsky/pyne
import numpy as np

from pyne.utils import QAWarning
from pyne import rxdata
import pyne.utils as utils
from pyne import nucname

warn(__name__ + ' is not yet QA compliant.', QAWarning)

if sys.version_info[0] > 2:
    basestring = str

END_OF_TABLE_RE = re.compile(' {71}1')

DataTuple = namedtuple('DataTuple', ['yo', 'limits', 'x1'])

NFIELDS_RPROP = {0: 2, 10: 2, 11: 2, 21: 3, 22: 3}


class Library(rxdata.RxLib):
    """A class for a file which contains multiple ENDL tables."""
    @staticmethod
    def _structure_dict_entry():
        """Static method to generate entries for the structure dict."""
        return {
            'pin': set(),
            'rdesc': set(),
            'rprop': set(),
            'pin_rdesc_rprop': defaultdict(lambda: {'data_tuples': []})
        }
示例#4
0
                       elf_references_PyFPE_jbuf,
                       elf_find_ucs2_symbols, elf_is_python_extension)
from .policy import (lddtree_external_references, versioned_symbols_policy,
                     get_policy_name, POLICY_PRIORITY_LOWEST,
                     POLICY_PRIORITY_HIGHEST, load_policies)

try:
    from collections.abc import defaultdict, Mapping, namedtuple
except ImportError:
    # Pre-Python 3.7 compatibility
    from collections import defaultdict, Mapping, namedtuple


log = logging.getLogger(__name__)
WheelAbIInfo = namedtuple('WheelAbIInfo',
                          ['overall_tag', 'external_refs', 'ref_tag',
                           'versioned_symbols', 'sym_tag', 'ucs_tag',
                           'pyfpe_tag'])


@functools.lru_cache()
def get_wheel_elfdata(wheel_fn: str):
    full_elftree = {}
    nonpy_elftree = {}
    full_external_refs = {}
    versioned_symbols = defaultdict(lambda: set())  # type: Dict[str, Set[str]]
    uses_ucs2_symbols = False
    uses_PyFPE_jbuf = False

    with InGenericPkgCtx(wheel_fn) as ctx:
        shared_libraries_in_purelib = []
示例#5
0
def _exists_already(job_id):
    """
    Check if an AWS batch job exists already.

    If batch job exists, return namedtuple with batch job info.
    Otherwise, set the namedtuple's `exists` field to
    `False`. The remaining fields default to `None`.

    Returns
    -------
    namedtuple JobExists
        A namedtuple with fields
        ['exists', 'name', 'job_id', 'job_queue_arn', 'job_definition',
         'environment_variables', 'array_job']
    """
    # define a namedtuple for return value type
    JobExists = namedtuple(
        "JobExists",
        [
            "exists",
            "name",
            "job_id",
            "job_queue_arn",
            "job_definition",
            "environment_variables",
            "array_job",
        ],
    )
    # make all but the first value default to None
    JobExists.__new__.__defaults__ = (None, ) * (len(JobExists._fields) - 1)

    response = clients["batch"].describe_jobs(jobs=[job_id])

    if response.get("jobs"):
        job = response.get("jobs")[0]
        name = job["jobName"]
        job_queue_arn = job["jobQueue"]
        job_def_arn = job["jobDefinition"]
        environment_variables = job["container"]["environment"]

        array_job = "arrayProperties" in job

        response = clients["batch"].describe_job_definitions(
            jobDefinitions=[job_def_arn])
        job_def = response.get("jobDefinitions")[0]
        job_def_name = job_def["jobDefinitionName"]
        job_def_env = job_def["containerProperties"]["environment"]
        bucket_env = [
            e for e in job_def_env if e["name"] == "CLOUDKNOT_JOBS_S3_BUCKET"
        ]
        output_bucket = bucket_env[0]["value"] if bucket_env else None
        job_def_retries = job_def["retryStrategy"]["attempts"]

        JobDef = namedtuple("JobDef",
                            ["name", "arn", "output_bucket", "retries"])
        job_definition = JobDef(
            name=job_def_name,
            arn=job_def_arn,
            output_bucket=output_bucket,
            retries=job_def_retries,
        )

        mod_logger.info("Job {id:s} exists.".format(id=job_id))

        return JobExists(
            exists=True,
            name=name,
            job_id=job_id,
            job_queue_arn=job_queue_arn,
            job_definition=job_definition,
            environment_variables=environment_variables,
            array_job=array_job,
        )
    else:
        return JobExists(exists=False)
示例#6
0
文件: helper.py 项目: sonya1st/ceph
class JLeaf(namedtuple('JLeaf', ['typ', 'none'])):
    def __new__(cls, typ, none=False):
        if typ == str:
            typ = six.string_types
        return super(JLeaf, cls).__new__(cls, typ, none)
示例#7
0
文件: helper.py 项目: sonya1st/ceph
    @classmethod
    def mons(cls):
        out = cls.ceph_cluster.mon_manager.raw_cluster_cmd('mon_status')
        j = json.loads(out)
        return [mon['name'] for mon in j['monmap']['mons']]


class JLeaf(namedtuple('JLeaf', ['typ', 'none'])):
    def __new__(cls, typ, none=False):
        if typ == str:
            typ = six.string_types
        return super(JLeaf, cls).__new__(cls, typ, none)


JList = namedtuple('JList', ['elem_typ'])

JTuple = namedtuple('JList', ['elem_typs'])


class JObj(namedtuple('JObj', ['sub_elems', 'allow_unknown', 'none', 'unknown_schema'])):
    def __new__(cls, sub_elems, allow_unknown=False, none=False, unknown_schema=None):
        """
        :type sub_elems: dict[str, JAny | JLeaf | JList | JObj | type]
        :type allow_unknown: bool
        :type none: bool
        :type unknown_schema: int, str, JAny | JLeaf | JList | JObj
        :return:
        """
        return super(JObj, cls).__new__(cls, sub_elems, allow_unknown, none, unknown_schema)
示例#8
0
    def _create_repo(self):
        """
        Create or retrieve an AWS ECR repository.

        Returns
        -------
        RepoInfo : namedtuple
            a namedtuple with fields name, uri, and registry_id
        """
        # Flake8 will see that repo_arn is set in the try/except clauses
        # and claim that we are referencing it before assignment below
        # so we predefine it here. Also, it should be predefined as a
        # string to pass parameter validation by boto.
        repo_arn = "test"
        try:
            # If repo exists, retrieve its info
            response = clients["ecr"].describe_repositories(
                repositoryNames=[self.name])

            repo_arn = response["repositories"][0]["repositoryArn"]
            repo_name = response["repositories"][0]["repositoryName"]
            repo_uri = response["repositories"][0]["repositoryUri"]
            repo_registry_id = response["repositories"][0]["registryId"]
            repo_created = False
        except clients["ecr"].exceptions.RepositoryNotFoundException:
            # If it doesn't exists already, then create it
            response = clients["ecr"].create_repository(
                repositoryName=self.name)

            repo_arn = response["repository"]["repositoryArn"]
            repo_name = response["repository"]["repositoryName"]
            repo_uri = response["repository"]["repositoryUri"]
            repo_registry_id = response["repository"]["registryId"]
            repo_created = True
        except botocore.exceptions.ClientError as e:
            error_code = e.response["Error"]["Code"]
            message = e.response["Error"]["Message"]
            if (error_code == "RepositoryNotFoundException"
                    or "RepositoryNotFoundException" in message):
                # If it doesn't exists already, then create it
                response = clients["ecr"].create_repository(
                    repositoryName=self.name)

                repo_arn = response["repository"]["repositoryArn"]
                repo_name = response["repository"]["repositoryName"]
                repo_uri = response["repository"]["repositoryUri"]
                repo_registry_id = response["repository"]["registryId"]
                repo_created = True

        if repo_created:
            mod_logger.info("Created repository {name:s} at {uri:s}".format(
                name=self.name, uri=repo_uri))
        else:
            mod_logger.info("Repository {name:s} already exists at "
                            "{uri:s}".format(name=self.name, uri=repo_uri))

        try:
            clients["ecr"].tag_resource(resourceArn=repo_arn, tags=self.tags)
        except NotImplementedError as e:
            moto_msg = "The tag_resource action has not been implemented"
            if moto_msg in e.args:
                # This exception is here for compatibility with moto
                # testing since the tag_resource action has not been
                # implemented in moto. Simply move on.
                pass
            else:
                raise e

        # Define and return namedtuple with repo info
        RepoInfo = namedtuple("RepoInfo", ["name", "uri", "registry_id"])
        return RepoInfo(name=repo_name,
                        uri=repo_uri,
                        registry_id=repo_registry_id)
示例#9
0
import numpy as np

from pyne.utils import QA_warn
from pyne import rxdata
import pyne.utils as utils
from pyne import nucname

QA_warn(__name__)

if sys.version_info[0] > 2:
    basestring = str

END_OF_TABLE_RE = re.compile(" {71}1")

DataTuple = namedtuple("DataTuple", ["yo", "limits", "x1"])

NFIELDS_RPROP = {0: 2, 10: 2, 11: 2, 21: 3, 22: 3}


class Library(rxdata.RxLib):
    """A class for a file which contains multiple ENDL tables."""
    @staticmethod
    def _structure_dict_entry():
        """Static method to generate entries for the structure dict."""
        return {
            "pin": set(),
            "rdesc": set(),
            "rprop": set(),
            "pin_rdesc_rprop": defaultdict(lambda: {"data_tuples": []}),
        }
示例#10
0
import logging
import os
from textwrap import dedent
import traceback
try:
    from collections.abc import namedtuple, defaultdict
except ImportError:
    from collections import namedtuple, defaultdict

from teuthology.orchestra.run import CommandFailedError
from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology

log = logging.getLogger(__name__)


ValidationError = namedtuple("ValidationError", ["exception", "backtrace"])


class Workload(object):
    def __init__(self, filesystem, mount):
        self._mount = mount
        self._filesystem = filesystem
        self._initial_state = None

        # Accumulate backtraces for every failed validation, and return them.  Backtraces
        # are rather verbose, but we only see them when something breaks, and they
        # let us see which check failed without having to decorate each check with
        # a string
        self._errors = []

    def assert_equal(self, a, b):
示例#11
0
class AceTable(
        namedtuple('_AceTable', [
            'alias', 'awr', 'location', 'metastable', 'name', 'path',
            'temperature', 'zaid'
        ])):
    """A simple data structure reprsenenting an <ace_table /> tag in a
    cross_sections.xml file.
    """
    def __new__(cls,
                alias=None,
                awr=None,
                location=None,
                metastable=None,
                name=None,
                path=None,
                temperature=None,
                zaid=None,
                cross_sections_path=None):
        return super(AceTable, cls).__new__(cls,
                                            alias=alias,
                                            awr=awr,
                                            location=location,
                                            metastable=metastable,
                                            name=name,
                                            path=path,
                                            temperature=temperature,
                                            zaid=zaid)

    def __init__(self,
                 alias=None,
                 awr=None,
                 location=None,
                 metastable=None,
                 name=None,
                 path=None,
                 temperature=None,
                 zaid=None,
                 cross_sections_path=None):
        """Parameters
        ----------
        alias : str, optional
            ace_table attribute.
        awr : str, optional
            ace_table attribute.
        location : str, optional
            ace_table attribute.
        metastable : str, optional
            ace_table attribute.
        name : str, optional
            ace_table attribute.
        path : str, optional
            ace_table attribute.
        temperature : str, optional
            ace_table attribute.
        zaid : str, optional
            ace_table attribute. If set or non-zero then the nucid attribute
            will be set.
        cross_sections_path : str, optional
            If this and path are both present then the abspath attribute will be
            set.
        """

        super(AceTable, self).__init__()
        nuc = None
        if zaid is not None or zaid != '0':
            meta = "0" if metastable is None else metastable
            nuc = nucname.zzaaam_to_id(zaid + meta)
            if nuc == 0:
                pass
            elif not nucname.isnuclide(
                    nuc):  # then it's in MCNP metastable form
                nuc = nucname.mcnp_to_id(zaid)
        self.nucid = nuc
        abspath = None
        if path is not None and cross_sections_path is not None:
            if os.path.isdir(cross_sections_path):
                d = cross_sections_path
            else:
                d = os.path.dirname(cross_sections_path)
            abspath = os.path.abspath(os.path.join(d, path))
        self.abspath = abspath

    def xml(self):
        """Creates an XML representation of the ACE Table."""
        s = '<ace_table '
        s += " ".join([
            '{0}="{1}"'.format(f, getattr(self, f)) for f in self._fields
            if getattr(self, f) is not None
        ])
        s += '/>'
        return s
示例#12
0
文件: __init__.py 项目: sonya1st/ceph
try:
    from collections.abc import namedtuple
except ImportError:
    from collections import namedtuple

sys_info = namedtuple('sys_info', ['devices'])
sys_info.devices = dict()


class UnloadedConfig(object):
    """
    This class is used as the default value for conf.ceph so that if
    a configuration file is not successfully loaded then it will give
    a nice error message when values from the config are used.
    """
    def __getattr__(self, *a):
        raise RuntimeError("No valid ceph configuration file was loaded.")


conf = namedtuple('config',
                  ['ceph', 'cluster', 'verbosity', 'path', 'log_path'])
conf.ceph = UnloadedConfig()

__version__ = "1.0.0"

__release__ = "nautilus"
示例#13
0
def get_s3_params():
    """Get the cloudknot S3 bucket and corresponding access policy

    For the bucket name, first check the cloudknot config file for the bucket
    option. If that fails, check for the CLOUDKNOT_S3_BUCKET environment
    variable. If that fails, use
    'cloudknot-' + get_user().lower() + '-' + uuid4()

    For the policy name, first check the cloudknot config file. If that fails,
    use 'cloudknot-bucket-access-' + str(uuid.uuid4())

    For the region, first check the cloudknot config file. If that fails,
    use the current cloudknot region

    Returns
    -------
    bucket : NamedTuple
        A namedtuple with fields ['bucket', 'policy', 'policy_arn', 'sse']
    """
    config_file = get_config_file()
    config = configparser.ConfigParser()

    BucketInfo = namedtuple("BucketInfo",
                            ["bucket", "policy", "policy_arn", "sse"])

    with rlock:
        config.read(config_file)

        option = "s3-bucket-policy"
        if config.has_section("aws") and config.has_option("aws", option):
            # Get policy name from the config file
            policy = config.get("aws", option)
        else:
            # or set policy to None to create it in the call to
            # set_s3_params()
            policy = None

        option = "s3-bucket"
        if config.has_section("aws") and config.has_option("aws", option):
            bucket = config.get("aws", option)
        else:
            try:
                # Get the bucket name from an environment variable
                bucket = os.environ["CLOUDKNOT_S3_BUCKET"]
            except KeyError:
                # Use the fallback bucket b/c the cloudknot
                # bucket environment variable is not set
                bucket = "cloudknot-" + get_user().lower() + "-" + str(
                    uuid.uuid4())

            if policy is not None:
                # In this case, the bucket name is new, but the policy is not.
                # Update the policy to reflect the new bucket name.
                update_s3_policy(policy=policy, bucket=bucket)

        option = "s3-sse"
        if config.has_section("aws") and config.has_option("aws", option):
            sse = config.get("aws", option)
            if sse not in ["AES256", "aws:kms", "None"]:
                raise CloudknotInputError(
                    'The server-side encryption option "sse" must must be '
                    'one of ["AES256", "aws:kms", "None"]')
        else:
            sse = None

        if sse == "None":
            sse = None

        # Use set_s3_params to check for name availability
        # and write to config file
        set_s3_params(bucket=bucket, policy=policy, sse=sse)

        if policy is None:
            config.read(config_file)
            policy = config.get("aws", "s3-bucket-policy")

    # Get all local policies with cloudknot prefix
    paginator = clients["iam"].get_paginator("list_policies")
    response_iterator = paginator.paginate(Scope="Local",
                                           PathPrefix="/cloudknot/")

    # response_iterator is a list of dicts. First convert to list of lists
    # and then flatten to a single list
    response_policies = [
        response["Policies"] for response in response_iterator
    ]
    policies = [lst for sublist in response_policies for lst in sublist]

    aws_policies = {d["PolicyName"]: d["Arn"] for d in policies}

    policy_arn = aws_policies[policy]

    return BucketInfo(bucket=bucket,
                      policy=policy,
                      policy_arn=policy_arn,
                      sse=sse)
import logging
from spinn_utilities.ordered_set import OrderedSet
from spinn_utilities.progress_bar import ProgressBar
from spinn_utilities.log import FormatAdapter
from pacman.exceptions import PacmanRoutingException
from pacman.model.constraints.key_allocator_constraints import (
    ContiguousKeyRangeContraint)
from pacman.model.graphs.common import EdgeTrafficType
from pacman.utilities import utility_calls

logger = FormatAdapter(logging.getLogger(__name__))
_32_BITS = 0xFFFFFFFF
range_masks = {_32_BITS - ((2 ** i) - 1) for i in range(33)}

# Define an internal class for placements
PlacementTuple = namedtuple('PlacementTuple', 'x y p')


def validate_routes(machine_graph, placements, routing_infos,
                    routing_tables, machine, graph_mapper=None):
    """ Go though the placements given and check that the routing entries\
        within the routing tables support reach the correction destinations\
        as well as not producing any cycles.

    :param machine_graph: the graph
    :param placements: the placements container
    :param routing_infos: the routing info container
    :param routing_tables: \
        the routing tables generated by the routing algorithm
    :param graph_mapper: \
        the mapping between graphs or none if only using a machine graph
示例#15
0
#
# microtome - Tim Conkling, 2012

from __future__ import print_function

import logging
import re

try:
    from collections.abc import namedtuple, OrderedDict  # Python 3
except ImportError:
    from collections import namedtuple, OrderedDict  # Python 2

import six

Section = namedtuple("Section", ["name", "contents", "disabled"])

LOG = logging.getLogger(__name__)


class Matcher(object):
    """Analog to java.util.regex.Matcher. Iterates over a string with the given pattern,
    and delegates all method calls to the most recent MatchObject iteration result."""
    def __init__(self, pattern, string):
        self._finditer = pattern.finditer(string)
        self._last_match = None

    def __iter__(self):
        return self

    def next(self):
示例#16
0
#
# microtome - Tim Conkling, 2012

"""
"""

try:
    from collections.abc import namedtuple  # Python 3
except ImportError:
    from collections import namedtuple  # Python 2

import microtome.core.defs as Defs

LibrarySpec =       namedtuple("LibrarySpec", ["namespace", "header_text", "tomes"])
TomeSpec =          namedtuple("TomeSpec", ["name", "superclass", "namespace", "props", "pos"])
PropSpec =          namedtuple("PropSpec", ["type", "name", "annotations", "pos"])
AnnotationSpec =    namedtuple("AnnotationSpec", ["name", "value", "pos"])
TypeSpec =          namedtuple("TypeSpec", ["name", "subtype"])

BoolType =      "bool"
IntType =       "int"
FloatType =     "float"
StringType =    "string"
ListType =      "List"
TomeRefType =   "TomeRef"
TomeType =      "Tome"

PRIMITIVE_TYPES = {BoolType, IntType, FloatType}
PARAMETERIZED_TYPES = {ListType, TomeRefType}
ALL_TYPES = {BoolType, IntType, FloatType, StringType, ListType, TomeRefType, TomeType}