示例#1
0
    def filter_product(self, data, sap_code, base_version, version='latest'):
        """Find product information from a feed dump given a single sap_code, base version and optional version."""
        product = {'version': '0.0.1'}
        channels = self.env.get('channels').split(',')

        #12 inputs to ccpinfo dict testing w BridgeCC
        for channel in data['channel']:
            if channel['name'] not in channels:
                continue

            for prod in channel['products']['product']:
                if prod['id'] != sap_code:
                    continue

                if base_version and prod['platforms']['platform'][0]['languageSet'][0].get('baseVersion') != base_version:
                    continue

                if 'version' not in prod:
                    self.output('product has no version: {}'.format(prod['displayName']))
                    continue

                if version == "latest":
                    if LV(prod['version']) > LV(product['version']):
                        product = prod
                else:
                    if prod['version'] == version:
                        product = prod

        if 'platforms' not in product:
            return None

        return product
示例#2
0
    def compile(cls):
        cdict = super().compile()
        if (cdict.get("provider_type", "")) == "":
            cdict.pop("provider_type", "")
        if (cdict.get("value_type", "")) == "":
            cdict.pop("value_type", "")

        CALM_VERSION = Version.get_version("Calm")
        if LV(CALM_VERSION) < LV("3.2.0"):
            value_type = cdict.pop("value_type")
            cdict["attrs"]["value_type"] = value_type

        else:
            value_type = cdict.get("value_type", "IP")
            if value_type == "VM":
                account = cdict["attrs"]["account_reference"]
                account_name = account["name"]
                account_data = Cache.get_entity_data(
                    entity_type=CACHE.ENTITY.ACCOUNT, name=account_name)
                if not account_data:
                    LOG.error("Account {} not found".format(account_name))
                    sys.exit(-1)

                provider_type = account_data["provider_type"]
                if provider_type not in ["nutanix_pc", "vmware"]:
                    LOG.error("Provider {} not supported for endpoints".format(
                        provider_type))
                    sys.exit(-1)

                cdict["provider_type"] = provider_type.upper()

        return cdict
示例#3
0
    def get_cache_tables(cls, sync_version=False):
        """returns tables used for cache purpose"""

        db = get_db_handle()
        db_tables = db.registered_tables

        # Get calm version from api only if necessary
        calm_version = CALM_VERSION
        if sync_version or (not calm_version):
            context = get_context()
            server_config = context.get_server_config()
            client = get_client_handle_obj(
                server_config["pc_ip"],
                server_config["pc_port"],
                auth=(server_config["pc_username"], server_config["pc_password"]),
            )
            res, err = client.version.get_calm_version()
            if err:
                LOG.error("Failed to get version")
                sys.exit(err["error"])
            calm_version = res.content.decode("utf-8")

        cache_tables = {}
        for table in db_tables:
            if hasattr(table, "__cache_type__") and (
                LV(calm_version) >= LV(table.feature_min_version)
            ):
                cache_tables[table.__cache_type__] = table
        return cache_tables
示例#4
0
文件: utils.py 项目: jkntnx/calm-dsl
    def invoke(self, ctx):

        if not ctx.protected_args:
            return super(FeatureFlagMixin, self).invoke(ctx)

        cmd_name = ctx.protected_args[0]

        feature_min_version = self.feature_version_map.get(cmd_name, "")
        if feature_min_version:
            calm_version = Version.get_version("Calm")
            if not calm_version:
                LOG.error("Calm version not found. Please update cache")
                sys.exit(-1)

            if LV(calm_version) >= LV(feature_min_version):
                return super().invoke(ctx)

            else:
                LOG.warning(
                    "Please update Calm (v{} -> >=v{}) to use this command.".
                    format(calm_version, feature_min_version))
                return None

        else:
            return super().invoke(ctx)
示例#5
0
def get_validators_with_defaults(schema_props):

    validators = {}
    defaults = {}
    display_map = bidict()
    for name, props in schema_props.items():
        calm_version = Version.get_version("Calm")

        # dev machines do not follow standard version protocols. Avoid matching there
        attribute_min_version = str(props.get("x-calm-dsl-min-version", ""))
        if not calm_version:
            # Raise warning and set default to 2.9.0
            calm_version = "2.9.0"

        # If attribute version is less than calm version, ignore it
        if attribute_min_version and LV(attribute_min_version) > LV(calm_version):
            continue

        ValidatorType, is_array, default = get_validator_details(schema_props, name)
        attr_name = props.get("x-calm-dsl-display-name", name)
        validators[attr_name] = (ValidatorType, is_array)
        defaults[attr_name] = default
        display_map[attr_name] = name

    return validators, defaults, display_map
def create_environment_payload(UserEnvironment, metadata=dict()):
    """
    Creates environment payload
    Args:
        UserEnvironment(object): Environment object
        metadata (dict) : Metadata for environment
    Returns:
        response(tuple): tuple consisting of environment payload object and error
    """

    err = {"error": "", "code": -1}

    if UserEnvironment is None:
        err["error"] = "Given environment is empty."
        return None, err

    if not isinstance(UserEnvironment, EnvironmentType):
        err["error"] = "Given environment is not of type Environment"
        return None, err

    spec = {
        "name": UserEnvironment.__name__,
        "description": UserEnvironment.__doc__ or "",
        "resources": UserEnvironment,
    }

    env_project = metadata.get("project_reference", {}).get("name", "")
    if not env_project:
        ContextObj = get_context()
        project_config = ContextObj.get_project_config()
        env_project = project_config["name"]

    project_cache_data = Cache.get_entity_data(
        entity_type=CACHE.ENTITY.PROJECT, name=env_project)
    if not project_cache_data:
        LOG.error("Project {} not found.".format(env_project))
        sys.exit("Project {} not found.".format(env_project))

    metadata_payload = {
        "spec_version": 1,
        "kind": "environment",
        "name": UserEnvironment.__name__,
        "uuid": str(uuid.uuid4()),
    }

    calm_version = Version.get_version("Calm")
    if LV(calm_version) >= LV("3.2.0"):
        metadata_payload["project_reference"] = {
            "kind": "project",
            "name": project_cache_data["name"],
            "uuid": project_cache_data["uuid"],
        }

    UserEnvironmentPayload = _environment_payload()
    UserEnvironmentPayload.metadata = metadata_payload
    UserEnvironmentPayload.spec = spec

    return UserEnvironmentPayload, None
示例#7
0
    def compile(cls):
        cdict = super().compile()

        cdict["account_reference_list"] = []
        cdict["subnet_reference_list"] = []
        cdict["external_network_list"] = []
        cdict["default_subnet_reference"] = {}

        CALM_VERSION = Version.get_version("Calm")
        default_subnet_reference = None

        # Populate accounts
        provider_list = cdict.pop("provider_list", [])
        for provider_obj in provider_list:
            provider_data = provider_obj.get_dict()

            if provider_obj.type == "nutanix_pc":
                if "subnet_reference_list" in provider_data:
                    cdict["subnet_reference_list"].extend(
                        provider_data["subnet_reference_list"])

                if "external_network_list" in provider_data:
                    for _network in provider_data["external_network_list"]:
                        _network.pop("kind", None)
                        cdict["external_network_list"].append(_network)

                if "default_subnet_reference" in provider_data:
                    # From 3.2, only subnets from local account can be marked as default
                    if provider_data.get("subnet_reference_list"
                                         ) or LV(CALM_VERSION) < LV("3.2.0"):
                        cdict["default_subnet_reference"] = provider_data[
                            "default_subnet_reference"]

            if "account_reference" in provider_data:
                cdict["account_reference_list"].append(
                    provider_data["account_reference"])

        quotas = cdict.pop("quotas", None)
        if quotas:
            project_resources = []
            for qk, qv in quotas.items():
                if qk != "VCPUS":
                    qv *= 1073741824

                project_resources.append({"limit": qv, "resource_type": qk})

            cdict["resource_domain"] = {"resources": project_resources}

        # pop out unnecessary attibutes
        cdict.pop("environment_definition_list", None)
        # empty dict is not accepted for default_environment_reference
        default_env = cdict.get("default_environment_reference")
        if not default_env:
            cdict.pop("default_environment_reference", None)

        if not cdict.get("default_subnet_reference"):
            cdict.pop("default_subnet_reference", None)
        return cdict
def validate_version():

    # At initializing dsl, version might not found in cache
    calm_version = Version.get_version("Calm")
    if calm_version:
        if LV(calm_version) < LV(LATEST_VERIFIED_VERSION):
            LOG.warning(
                "Calm server version ({}) is less than verified version. ({})."
                .format(calm_version, LATEST_VERIFIED_VERSION))
示例#9
0
    def test_endpoint_validation_and_type_update2(self, EndpointPayload):
        """
        test_endpoint_name_validations
        """

        client = get_api_client()
        endpoint = copy.deepcopy(change_uuids(EndpointPayload, {}))

        # set values and credentials to empty
        CALM_VERSION = Version.get_version("Calm")
        if LV(CALM_VERSION) < LV("3.3.0"):
            message = (
                "Name can contain only alphanumeric, underscores, hyphens and spaces"
            )
        else:
            message = "Name can contain only unicode characters, underscores, hyphens and spaces"

        endpoint["spec"]["name"] = "ep-$.-name1" + str(uuid.uuid4())[-10:]
        # Endpoint Create
        res, err = client.endpoint.create(endpoint)
        if not err:
            pytest.fail(
                "Endpoint created successfully with unsupported name formats")
        assert err.get("code", 0) == 422
        assert message in res.text

        endpoint["spec"]["name"] = "endpoint_" + str(uuid.uuid4())[-10:]
        res, err = client.endpoint.create(endpoint)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        ep = res.json()
        ep_uuid = ep["metadata"]["uuid"]
        ep_name = ep["spec"]["name"]
        print(">> Endpoint created: {}".format(ep_name))

        del ep["status"]
        ep["spec"]["name"] = "-test_ep_name_" + str(uuid.uuid4())[-10:]

        if LV(CALM_VERSION) < LV("3.3.0"):
            message = (
                "Names can only start with alphanumeric characters or underscore (_)"
            )
        else:
            message = "Names can only start with unicode characters or underscore (_)"
        res, err = client.endpoint.update(ep_uuid, ep)
        if not err:
            pytest.fail(
                "Endpoint updated successfully with unsupported name formats")
        assert err.get("code", 0) == 422
        assert message in res.text

        # delete the endpoint
        _, err = client.endpoint.delete(ep_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("endpoint {} deleted".format(ep_name))
示例#10
0
    def new_version(self):
        """
        compare current version with version in db
        """

        self.remote_version = DB.get_config('version_remote')
        self.message = DB.get_config('version_msg')

        if not self.remote_version:
            return False
        elif LV(self.local_version) < LV(self.remote_version):
            return True
        else:
            return False
示例#11
0
    def run(self, uuid, payload):
        from calm.dsl.store.version import Version

        calm_version = Version.get_version("Calm")
        runbook_run_api = self.RUN
        if LV(calm_version) >= LV("3.3.2"):
            runbook_run_api = self.EXECUTE

        return self.connection._call(
            runbook_run_api.format(uuid),
            verify=False,
            request_json=payload,
            method=REQUEST.METHOD.POST,
        )
示例#12
0
    def get_api_obj(cls):
        """returns object to call ahv provider specific apis"""

        client = get_api_client()
        calm_version = Version.get_version("Calm")
        api_handlers = AWSBase.api_handlers

        latest_version = "0"
        for version in api_handlers.keys():
            if LV(version) <= LV(calm_version) and LV(latest_version) < LV(
                    version):
                latest_version = version

        api_handler = api_handlers[latest_version]
        return api_handler(client.connection)
示例#13
0
    def translateAndCapitalizeNamespaces(self, text):
        """
        Makes sure that localized namespace names are used.
        """
        # arz uses english stylish codes
        if self.site.sitename() == 'wikipedia:arz':
            return text
        family = self.site.family
        # wiki links aren't parsed here.
        exceptions = ['nowiki', 'comment', 'math', 'pre']

        for nsNumber in self.site.namespaces():
            if nsNumber in (0, 2, 3):
                # skip main (article) namespace
                # skip user namespace, maybe gender is used
                continue
            # a clone is needed. Won't change the namespace dict
            namespaces = list(self.site.namespace(nsNumber, all=True))
            thisNs = namespaces.pop(0)
            if nsNumber == 6 and family.name == 'wikipedia':
                if self.site.lang in ('en', 'fr') and \
                   LV(self.site.version()) >= LV('1.14'):
                    # do not change "Image" on en-wiki and fr-wiki
                    assert u'Image' in namespaces
                    namespaces.remove(u'Image')
                if self.site.lang == 'hu':
                    # do not change "Kép" on hu-wiki
                    assert u'Kép' in namespaces
                    namespaces.remove(u'Kép')
                elif self.site.lang == 'pt':
                    # bug #3346901 should be implemented
                    continue
            # lowerspaced and underscored namespaces
            for i in range(len(namespaces)):
                item = namespaces[i].replace(' ', '[ _]')
                item = u'[%s%s]' % (item[0], item[0].lower()) + item[1:]
                namespaces[i] = item
            namespaces.append(thisNs[0].lower() + thisNs[1:])
            if thisNs and namespaces:
                text = pywikibot.replaceExcept(
                    text,
                    r'\[\[\s*(%s) *:(?P<nameAndLabel>.*?)\]\]'
                    % '|'.join(namespaces),
                    r'[[%s:\g<nameAndLabel>]]' % thisNs,
                    exceptions)
        return text
示例#14
0
def create_environment_payload(UserEnvironment):

    err = {"error": "", "code": -1}

    if UserEnvironment is None:
        err["error"] = "Given environment is empty."
        return None, err

    if not isinstance(UserEnvironment, EnvironmentType):
        err["error"] = "Given environment is not of type Environment"
        return None, err

    spec = {
        "name": UserEnvironment.__name__,
        "description": UserEnvironment.__doc__ or "",
        "resources": UserEnvironment,
    }

    ContextObj = get_context()
    project_config = ContextObj.get_project_config()
    project_cache_data = Cache.get_entity_data(
        entity_type=CACHE.ENTITY.PROJECT, name=project_config["name"])
    if not project_cache_data:
        LOG.error("Project {} not found.".format(project_config["name"]))
        sys.exit(-1)

    metadata = {
        "spec_version": 1,
        "kind": "environment",
        "name": UserEnvironment.__name__,
    }

    calm_version = Version.get_version("Calm")
    if LV(calm_version) >= LV("3.2.0"):
        metadata["project_reference"] = {
            "kind": "project",
            "name": project_cache_data["name"],
            "uuid": project_cache_data["uuid"],
        }

    UserEnvironmentPayload = _environment_payload()
    UserEnvironmentPayload.metadata = metadata
    UserEnvironmentPayload.spec = spec

    return UserEnvironmentPayload, None
示例#15
0
    def convert(self, value, param, ctx):

        if self.feature_min_version:
            calm_version = Version.get_version("Calm")
            if not calm_version:
                LOG.error("Calm version not found. Please update cache")
                sys.exit(-1)

            # TODO add the pc version to warning also
            if LV(calm_version) < LV(self.feature_min_version):
                LOG.error(
                    "Calm {} does not support '{}' option. Please upgrade server to Calm {}"
                    .format(calm_version, param.name,
                            self.feature_min_version))
                sys.exit(-1)

        # Add validation for file types etc.
        return value
示例#16
0
def get_package_version():
    try:
        proc = subprocess.run(['radicale', '--version'],
                              stdout=subprocess.PIPE, check=True)
        output = proc.stdout.decode('utf-8')
    except subprocess.CalledProcessError:
        return None

    package_version = str(output.strip())
    return LV(package_version)
示例#17
0
def get_vmware_vm_data_with_version_filtering(vm_data):
    """returns instance_data_according_to_version_filter"""

    CALM_VERSION = Version.get_version("Calm")

    instance_id = vm_data["instance_id"]
    instance_name = vm_data["instance_name"]

    if LV(CALM_VERSION) >= LV("3.3.0"):
        hostname = vm_data["guest_hostname"]
        address = ",".join(vm_data["guest_ipaddress"])
        vcpus = vm_data["cpu"]
        sockets = vm_data["num_vcpus_per_socket"]
        memory = int(vm_data["memory"]) // 1024
        guest_family = vm_data.get("guest_family", "")
        template = vm_data.get("is_template", False)

    else:
        hostname = vm_data["guest.hostName"]
        address = ",".join(vm_data["guest.ipAddress"])
        vcpus = vm_data["config.hardware.numCPU"]
        sockets = vm_data["config.hardware.numCoresPerSocket"]
        memory = int(vm_data["config.hardware.memoryMB"]) // 1024
        guest_family = vm_data.get("guest.guestFamily", "")
        template = vm_data.get("config.template", False)

    return (
        instance_id,
        instance_name,
        hostname,
        address,
        vcpus,
        sockets,
        memory,
        guest_family,
        template,
    )
示例#18
0
    def test_versions(self):
        import tensorflow as tf
        if mod_version:
            if not nvidia:
                self.assertEqual(LV(tf.__version__), LV(mod_version))
            else:
                print('NOTE: running NVIDIA container, skipping some tests...')

        if is_tf2:
            import tensorflow.keras as keras
        else:
            import keras
        self.assertGreaterEqual(LV(keras.__version__), LV("2.0"))

        if is_tf2:
            self.assertGreaterEqual(LV(keras.__version__), LV("2.2.4"))

        if expect_horovod:
            import horovod
            import horovod.tensorflow as hvd
            self.assertGreaterEqual(LV(horovod.__version__), LV("0.18.2"))
示例#19
0
    def __eq__(self, other):
        if not isinstance(other, InstalledRpm):
            return False

        if self.name != other.name:
            raise ValueError(
                'Cannot compare packages with differing names {0} != {1}'.
                format(self.name, other.name))
        if (not self._distribution) != (not other._distribution):
            raise ValueError(
                'Cannot compare packages that one has distribution while the other does not {0} != {1}'
                .format(self.package, other.package))

        eq_ret = (type(self) == type(other)
                  and LV(self.epoch) == LV(other.epoch)
                  and LV(self.version) == LV(other.version)
                  and LV(self.release) == LV(other.release))

        if self._distribution:
            return eq_ret and LV(self._distribution) == LV(other._distribution)
        else:
            return eq_ret
示例#20
0
    def __lt__(self, other):
        if not isinstance(other, InstalledRpm):
            return False

        if self == other:
            return False

        self_ep, other_ep = LV(self.epoch), LV(other.epoch)
        if self_ep != other_ep:
            return self_ep < other_ep

        self_v, other_v = LV(self.version), LV(other.version)
        if self_v != other_v:
            return self_v < other_v

        self_rl, other_rl = LV(self._release_sep), LV(other._release_sep)
        if self_rl != other_rl:
            return self_rl < other_rl

        # If we reach this point, the self == other test has determined that
        # we have a _distribution, so we rely on that.
        return LV(self._distribution) < LV(other._distribution)
示例#21
0
from keras.layers import LSTM, CuDNNLSTM
from keras.utils import to_categorical

from distutils.version import LooseVersion as LV
from keras import __version__
from keras import backend as K

from sklearn.model_selection import train_test_split

import os
import sys

import numpy as np

print('Using Keras version:', __version__, 'backend:', K.backend())
assert (LV(__version__) >= LV("2.0.0"))

# If we are using TensorFlow as the backend, we can use TensorBoard to
# visualize our progress during training.

if K.backend() == "tensorflow":
    import tensorflow as tf
    from keras.callbacks import TensorBoard
    import os, datetime
    logdir = os.path.join(
        os.getcwd(), "logs",
        "20ng-cnn-" + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
    print('TensorBoard log directory:', logdir)
    try:
        os.makedirs(logdir)
        callbacks = [TensorBoard(log_dir=logdir)]
from distutils.version import LooseVersion as LV
import os
import horovod.torch as hvd

torch.manual_seed(42)
if torch.cuda.is_available():
    device = torch.device('cuda')
else:
    device = torch.device('cpu')

hvd.init()
torch.cuda.set_device(hvd.local_rank())

if hvd.rank() == 0:
    print('Using PyTorch version:', torch.__version__, ' Device:', device)
    assert (LV(torch.__version__) >= LV("1.0.0"))

subpath = 'dogs-vs-cats/train-2000'

if 'DATADIR' in os.environ:
    DATADIR = os.environ['DATADIR']
else:
    DATADIR = "/scratch/project_2003747/data/"

datapath = os.path.join(DATADIR, subpath)

if hvd.rank() == 0:
    print('Reading data from path:', datapath)

(nimages_train, nimages_validation, nimages_test) = (2000, 1000, 22000)
示例#23
0
def _data_path(path=None,
               force_update=False,
               update_path=True,
               download=True,
               name=None,
               check_version=False,
               return_version=False,
               archive_name=None):
    """Aux function."""
    key = {
        'fake': 'MNE_DATASETS_FAKE_PATH',
        'misc': 'MNE_DATASETS_MISC_PATH',
        'sample': 'MNE_DATASETS_SAMPLE_PATH',
        'spm': 'MNE_DATASETS_SPM_FACE_PATH',
        'somato': 'MNE_DATASETS_SOMATO_PATH',
        'brainstorm': 'MNE_DATASETS_BRAINSTORM_PATH',
        'testing': 'MNE_DATASETS_TESTING_PATH',
        'multimodal': 'MNE_DATASETS_MULTIMODAL_PATH',
    }[name]

    path = _get_path(path, key, name)
    # To update the testing or misc dataset, push commits, then make a new
    # release on GitHub. Then update the "releases" variable:
    releases = dict(testing='0.25', misc='0.1')
    # And also update the "hashes['testing']" variable below.

    # To update any other dataset, update the data archive itself (upload
    # an updated version) and update the hash.
    archive_names = dict(
        misc='mne-misc-data-%s.tar.gz' % releases['misc'],
        sample='MNE-sample-data-processed.tar.gz',
        somato='MNE-somato-data.tar.gz',
        spm='MNE-spm-face.tar.gz',
        testing='mne-testing-data-%s.tar.gz' % releases['testing'],
        multimodal='MNE-multimodal-data.tar.gz',
        fake='foo.tgz',
    )
    if archive_name is not None:
        archive_names.update(archive_name)
    folder_names = dict(
        brainstorm='MNE-brainstorm-data',
        fake='foo',
        misc='MNE-misc-data',
        sample='MNE-sample-data',
        somato='MNE-somato-data',
        multimodal='MNE-multimodal-data',
        spm='MNE-spm-face',
        testing='MNE-testing-data',
    )
    urls = dict(
        brainstorm='https://mne-tools.s3.amazonaws.com/datasets/'
        'MNE-brainstorm-data/%s',
        fake='https://github.com/mne-tools/mne-testing-data/raw/master/'
        'datasets/%s',
        misc='https://codeload.github.com/mne-tools/mne-misc-data/'
        'tar.gz/%s' % releases['misc'],
        sample="https://mne-tools.s3.amazonaws.com/datasets/%s",
        somato='https://mne-tools.s3.amazonaws.com/datasets/%s',
        spm='https://mne-tools.s3.amazonaws.com/datasets/%s',
        testing='https://codeload.github.com/mne-tools/mne-testing-data/'
        'tar.gz/%s' % releases['testing'],
        multimodal='https://ndownloader.figshare.com/files/5999598',
    )
    hashes = dict(
        brainstorm=None,
        fake='3194e9f7b46039bb050a74f3e1ae9908',
        misc='f0708d8914cf2692fee7b6c9f105e71c',
        sample='1d5da3a809fded1ef5734444ab5bf857',
        somato='f3e3a8441477bb5bacae1d0c6e0964fb',
        spm='f61041e3f3f2ba0def8a2ca71592cc41',
        testing='217aed43e361c86b622dc0363ae3cef4',
        multimodal='26ec847ae9ab80f58f204d09e2c08367',
    )
    folder_origs = dict(  # not listed means None
        misc='mne-misc-data-%s' % releases['misc'],
        testing='mne-testing-data-%s' % releases['testing'],
    )
    folder_name = folder_names[name]
    archive_name = archive_names[name]
    hash_ = hashes[name]
    url = urls[name]
    folder_orig = folder_origs.get(name, None)
    if '%s' in url:
        url = url % archive_name

    folder_path = op.join(path, folder_name)
    if name == 'brainstorm':
        extract_path = folder_path
        folder_path = op.join(folder_path, archive_names[name].split('.')[0])

    rm_archive = False
    martinos_path = '/cluster/fusion/sample_data/' + archive_name
    neurospin_path = '/neurospin/tmp/gramfort/' + archive_name

    if not op.exists(folder_path) and not download:
        return ''
    if not op.exists(folder_path) or force_update:
        if name == 'brainstorm':
            if '--accept-brainstorm-license' in sys.argv:
                answer = 'y'
            else:
                answer = input('%sAgree (y/[n])? ' % _bst_license_text)
            if answer.lower() != 'y':
                raise RuntimeError('You must agree to the license to use this '
                                   'dataset')
        logger.info('Downloading or reinstalling '
                    'data archive %s at location %s' % (archive_name, path))

        if op.exists(martinos_path):
            archive_name = martinos_path
        elif op.exists(neurospin_path):
            archive_name = neurospin_path
        else:
            archive_name = op.join(path, archive_name)
            rm_archive = True
            fetch_archive = True
            if op.exists(archive_name):
                msg = ('Archive already exists. Overwrite it (y/[n])? ')
                answer = input(msg)
                if answer.lower() == 'y':
                    os.remove(archive_name)
                else:
                    fetch_archive = False

            if fetch_archive:
                _fetch_file(url,
                            archive_name,
                            print_destination=False,
                            hash_=hash_)

        if op.exists(folder_path):

            def onerror(func, path, exc_info):
                """Deal with access errors (e.g. testing dataset read-only)."""
                # Is the error an access error ?
                do = False
                if not os.access(path, os.W_OK):
                    perm = os.stat(path).st_mode | stat.S_IWUSR
                    os.chmod(path, perm)
                    do = True
                if not os.access(op.dirname(path), os.W_OK):
                    dir_perm = (os.stat(op.dirname(path)).st_mode
                                | stat.S_IWUSR)
                    os.chmod(op.dirname(path), dir_perm)
                    do = True
                if do:
                    func(path)
                else:
                    raise

            shutil.rmtree(folder_path, onerror=onerror)

        logger.info('Decompressing the archive: %s' % archive_name)
        logger.info('(please be patient, this can take some time)')
        for ext in ['gz', 'bz2']:  # informed guess (and the only 2 options).
            try:
                if name != 'brainstorm':
                    extract_path = path
                tf = tarfile.open(archive_name, 'r:%s' % ext)
                tf.extractall(path=extract_path)
                tf.close()
                break
            except tarfile.ReadError as err:
                logger.info('%s is %s trying "bz2"' % (archive_name, err))
        if folder_orig is not None:
            shutil.move(op.join(path, folder_orig), folder_path)

        if rm_archive:
            os.remove(archive_name)

    path = _do_path_update(path, update_path, key, name)
    path = op.join(path, folder_name)

    # compare the version of the dataset and mne
    data_version = _dataset_version(path, name)
    try:
        from distutils.version import LooseVersion as LV
    except:
        warn('Could not determine %s dataset version; dataset could '
             'be out of date. Please install the "distutils" package.' % name)
    else:  # 0.7 < 0.7.git shoud be False, therefore strip
        if check_version and LV(data_version) < LV(mne_version.strip('.git')):
            warn('The {name} dataset (version {current}) is older than '
                 'mne-python (version {newest}). If the examples fail, '
                 'you may need to update the {name} dataset by using '
                 'mne.datasets.{name}.data_path(force_update=True)'.format(
                     name=name, current=data_version, newest=mne_version))
    return (path, data_version) if return_version else path
示例#24
0
def _data_path(path=None,
               force_update=False,
               update_path=True,
               download=True,
               name=None,
               check_version=True,
               verbose=None):
    """Aux function
    """
    key = {
        'sample': 'MNE_DATASETS_SAMPLE_PATH',
        'spm': 'MNE_DATASETS_SPM_FACE_PATH'
    }[name]

    if path is None:
        # use an intelligent guess if it's not defined
        def_path = op.realpath(
            op.join(op.dirname(__file__), '..', '..', 'examples'))

        path = get_config(key, def_path)
        # use the same for all datasets
        if not os.path.exists(path):
            path = def_path

    if not isinstance(path, string_types):
        raise ValueError('path must be a string or None')

    if name == 'sample':
        archive_name = "MNE-sample-data-processed.tar.gz"
        url = "ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/" + archive_name
        folder_name = "MNE-sample-data"
        folder_path = op.join(path, folder_name)
        rm_archive = False
    elif name == 'spm':
        archive_name = 'MNE-spm-face.tar.bz2'
        url = 'ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/' + archive_name
        folder_name = "MNE-spm-face"
        folder_path = op.join(path, folder_name)
        rm_archive = False
    else:
        raise ValueError('Sorry, the dataset "%s" is not known.' % name)

    martinos_path = '/cluster/fusion/sample_data/' + archive_name
    neurospin_path = '/neurospin/tmp/gramfort/' + archive_name

    if not op.exists(folder_path) and not download:
        return ''

    if not op.exists(folder_path) or force_update:
        logger.info(
            'Downloading or reinstalling data archive %s at location %s' %
            (archive_name, path))

        if op.exists(martinos_path):
            archive_name = martinos_path
        elif op.exists(neurospin_path):
            archive_name = neurospin_path
        else:
            archive_name = op.join(path, archive_name)
            rm_archive = True
            fetch_archive = True
            if op.exists(archive_name):
                msg = ('Archive already exists. Overwrite it (y/[n])? ')
                answer = raw_input(msg)
                if answer.lower() == 'y':
                    os.remove(archive_name)
                else:
                    fetch_archive = False

            if fetch_archive:
                _fetch_file(url, archive_name, print_destination=False)

        if op.exists(folder_path):
            shutil.rmtree(folder_path)

        logger.info('Decompressing the archive: ' + archive_name)
        logger.info('... please be patient, this can take some time')
        for ext in ['gz', 'bz2']:  # informed guess (and the only 2 options).
            try:
                tarfile.open(archive_name, 'r:%s' % ext).extractall(path=path)
                break
            except tarfile.ReadError as err:
                logger.info('%s is %s trying "bz2"' % (archive_name, err))

        if rm_archive:
            os.remove(archive_name)

    path = op.abspath(path)
    if update_path is None:
        if get_config(key, '') != path:
            update_path = True
            msg = ('Do you want to set the path:\n    %s\nas the default '
                   'sample dataset path in the mne-python config [y]/n? ' %
                   path)
            answer = raw_input(msg)
            if answer.lower() == 'n':
                update_path = False
        else:
            update_path = False

    if update_path is True:
        set_config(key, path)

    path = op.join(path, folder_name)

    # compare the version of the Sample dataset and mne
    data_version = _dataset_version(path, name)
    try:
        from distutils.version import LooseVersion as LV
    except:
        warn('Could not determine sample dataset version; dataset could\n'
             'be out of date. Please install the "distutils" package.')
    else:  # 0.7 < 0.7.git shoud be False, therefore strip
        if check_version and LV(data_version) < LV(mne_version.strip('.git')):
            warn('The {name} dataset (version {current}) is older than '
                 'mne-python (version {newest}). If the examples fail, '
                 'you may need to update the {name} dataset by using '
                 'mne.datasets.{name}.data_path(force_update=True)'.format(
                     name=name, current=data_version, newest=mne_version))

    return path
示例#25
0
def get_accounts(name, filter_by, limit, offset, quiet, all_items,
                 account_type):
    """Get the accounts, optionally filtered by a string"""

    client = get_api_client()
    calm_version = Version.get_version("Calm")

    params = {"length": limit, "offset": offset}
    filter_query = ""
    if name:
        filter_query = get_name_query([name])
    if filter_by:
        filter_query = filter_query + ";(" + filter_by + ")"
    if account_type:
        filter_query += ";(type=={})".format(",type==".join(account_type))
    if all_items:
        filter_query += get_states_filter(ACCOUNT.STATES)

    # Remove PE accounts for versions >= 2.9.0 (TODO move to constants)
    if LV(calm_version) >= LV("2.9.0"):
        filter_query += ";type!=nutanix"

    if filter_query.startswith(";"):
        filter_query = filter_query[1:]

    if filter_query:
        params["filter"] = filter_query

    res, err = client.account.list(params)

    if err:
        ContextObj = get_context()
        server_config = ContextObj.get_server_config()
        pc_ip = server_config["pc_ip"]

        LOG.warning("Cannot fetch accounts from {}".format(pc_ip))
        return

    res = res.json()
    total_matches = res["metadata"]["total_matches"]
    if total_matches > limit:
        LOG.warning(
            "Displaying {} out of {} entities. Please use --limit and --offset option for more results."
            .format(limit, total_matches))

    json_rows = res["entities"]
    if not json_rows:
        click.echo(highlight_text("No account found !!!\n"))
        return

    if quiet:
        for _row in json_rows:
            row = _row["status"]
            click.echo(highlight_text(row["name"]))
        return

    table = PrettyTable()
    table.field_names = [
        "NAME",
        "ACCOUNT TYPE",
        "STATE",
        "OWNER",
        "CREATED ON",
        "LAST UPDATED",
        "UUID",
    ]

    for _row in json_rows:
        row = _row["status"]
        metadata = _row["metadata"]

        creation_time = int(metadata["creation_time"]) // 1000000
        last_update_time = int(metadata["last_update_time"]) // 1000000
        if "owner_reference" in metadata:
            owner_reference_name = metadata["owner_reference"]["name"]
        else:
            owner_reference_name = "-"

        table.add_row([
            highlight_text(row["name"]),
            highlight_text(row["resources"]["type"]),
            highlight_text(row["resources"]["state"]),
            highlight_text(owner_reference_name),
            highlight_text(time.ctime(creation_time)),
            "{}".format(arrow.get(last_update_time).humanize()),
            highlight_text(metadata["uuid"]),
        ])
    click.echo(table)
示例#26
0
class TestProjectCommands:
    def setup_method(self):
        """"Reset the context changes"""
        ContextObj = get_context()
        ContextObj.reset_configuration()

    def teardown_method(self):
        """"Reset the context changes"""
        ContextObj = get_context()
        ContextObj.reset_configuration()

    def test_projects_list(self):
        runner = CliRunner()
        result = runner.invoke(cli, ["get", "projects"])
        if result.exit_code:
            cli_res_dict = {
                "Output": result.output,
                "Exception": str(result.exception)
            }
            LOG.debug("Cli Response: {}".format(
                json.dumps(cli_res_dict, indent=4, separators=(",", ": "))))
            LOG.debug("Traceback: \n{}".format("".join(
                traceback.format_tb(result.exc_info[2]))))
            pytest.fail("Project Get failed")
        LOG.info("Success")

    def test_compile_project(self):
        runner = CliRunner()
        LOG.info("Compiling Project file at {}".format(DSL_PROJECT_PATH))
        result = runner.invoke(
            cli, ["compile", "project", "--file={}".format(DSL_PROJECT_PATH)])

        if result.exit_code:
            cli_res_dict = {
                "Output": result.output,
                "Exception": str(result.exception)
            }
            LOG.debug("Cli Response: {}".format(
                json.dumps(cli_res_dict, indent=4, separators=(",", ": "))))
            LOG.debug("Traceback: \n{}".format("".join(
                traceback.format_tb(result.exc_info[2]))))
            pytest.fail("Project compile command failed")
        LOG.info("Success")

    def test_project_crud(self):
        """
        It will cover create/describe/update/delete/get commands on project
        This test assumes users/groups mentioned in project file are already created
        """

        # Create operation
        self._test_project_create_using_dsl()

        # Read operations
        click.echo("")
        self._test_project_describe_out_json()
        click.echo("")
        self._test_project_describe_out_text()
        click.echo("")
        self._test_project_list_name_filter()

        # Update operations
        click.echo("")
        self._test_update_project_using_cli_switches()
        click.echo("")
        self._test_update_project_using_dsl_file()

        # Delete operations
        click.echo("")
        self._test_project_delete()

    def _test_project_create_using_dsl(self):

        runner = CliRunner()
        self.dsl_project_name = "Test_DSL_Project_{}".format(str(uuid.uuid4()))
        LOG.info("Testing 'calm create project' command")
        result = runner.invoke(
            cli,
            [
                "create",
                "project",
                "--file={}".format(DSL_PROJECT_PATH),
                "--name={}".format(self.dsl_project_name),
                "--description='Test DSL Project to delete'",
            ],
        )
        if result.exit_code:
            cli_res_dict = {
                "Output": result.output,
                "Exception": str(result.exception)
            }
            LOG.debug("Cli Response: {}".format(
                json.dumps(cli_res_dict, indent=4, separators=(",", ": "))))
            LOG.debug("Traceback: \n{}".format("".join(
                traceback.format_tb(result.exc_info[2]))))
            pytest.fail("Project creation from python file failed")
        LOG.info("Success")

    def _test_project_describe_out_text(self):

        runner = CliRunner()
        LOG.info("Testing 'calm describe project --out text' command")
        result = runner.invoke(cli,
                               ["describe", "project", self.dsl_project_name])
        if result.exit_code:
            cli_res_dict = {
                "Output": result.output,
                "Exception": str(result.exception)
            }
            LOG.debug("Cli Response: {}".format(
                json.dumps(cli_res_dict, indent=4, separators=(",", ": "))))
            LOG.debug("Traceback: \n{}".format("".join(
                traceback.format_tb(result.exc_info[2]))))
            pytest.fail("Project Get call failed")

        project_name_str = "Name: {}".format(self.dsl_project_name)
        assert project_name_str in result.output
        LOG.info("Success")

    def _test_project_describe_out_json(self):

        runner = CliRunner()
        LOG.info("Testing 'calm describe project --out json' command")
        result = runner.invoke(
            cli,
            ["describe", "project", self.dsl_project_name, "--out", "json"])
        if result.exit_code:
            cli_res_dict = {
                "Output": result.output,
                "Exception": str(result.exception)
            }
            LOG.debug("Cli Response: {}".format(
                json.dumps(cli_res_dict, indent=4, separators=(",", ": "))))
            LOG.debug("Traceback: \n{}".format("".join(
                traceback.format_tb(result.exc_info[2]))))
            pytest.fail("Project Get call failed")

        project_name_str = '"name": "{}"'.format(self.dsl_project_name)
        assert project_name_str in result.output
        LOG.info("Success")

    def _test_project_list_name_filter(self):

        runner = CliRunner()
        LOG.info("Testing 'calm get projects --name <project_name>' command")
        result = runner.invoke(
            cli, ["get", "projects", "--name", self.dsl_project_name])
        if result.exit_code:
            cli_res_dict = {
                "Output": result.output,
                "Exception": str(result.exception)
            }
            LOG.debug("Cli Response: {}".format(
                json.dumps(cli_res_dict, indent=4, separators=(",", ": "))))
            LOG.debug("Traceback: \n{}".format("".join(
                traceback.format_tb(result.exc_info[2]))))
            pytest.fail("Project list call failed")

        assert self.dsl_project_name in result.output
        LOG.info("Success")

    def _test_update_project_using_cli_switches(self):
        """
        Adds user to given project.
        (User must be prsent in db)
        """

        runner = CliRunner()
        LOG.info("Testing 'calm update project' command using cli switches")
        result = runner.invoke(
            cli,
            [
                "update",
                "project",
                self.dsl_project_name,
                "--add_user",
                USER_NAME,
            ],
        )
        if result.exit_code:
            cli_res_dict = {
                "Output": result.output,
                "Exception": str(result.exception)
            }
            LOG.debug("Cli Response: {}".format(
                json.dumps(cli_res_dict, indent=4, separators=(",", ": "))))
            LOG.debug("Traceback: \n{}".format("".join(
                traceback.format_tb(result.exc_info[2]))))
            pytest.fail("Project update call failed")
        LOG.info("Success")

    def _test_update_project_using_dsl_file(self):
        """
        Removes user from given project.
        (User must be prsent in db)
        """

        runner = CliRunner()
        LOG.info("Testing 'calm update project' command using dsl file")
        result = runner.invoke(
            cli,
            [
                "update",
                "project",
                self.dsl_project_name,
                "--file={}".format(DSL_PROJECT_PATH),
            ],
        )
        if result.exit_code:
            cli_res_dict = {
                "Output": result.output,
                "Exception": str(result.exception)
            }
            LOG.debug("Cli Response: {}".format(
                json.dumps(cli_res_dict, indent=4, separators=(",", ": "))))
            LOG.debug("Traceback: \n{}".format("".join(
                traceback.format_tb(result.exc_info[2]))))
            pytest.fail("Project update call failed")
        LOG.info("Success")

    def _test_project_delete(self):

        runner = CliRunner()
        LOG.info("Testing 'calm delete project' command")
        result = runner.invoke(cli,
                               ["delete", "project", self.dsl_project_name])
        if result.exit_code:
            cli_res_dict = {
                "Output": result.output,
                "Exception": str(result.exception)
            }
            LOG.debug("Cli Response: {}".format(
                json.dumps(cli_res_dict, indent=4, separators=(",", ": "))))
            LOG.debug("Traceback: \n{}".format("".join(
                traceback.format_tb(result.exc_info[2]))))
            pytest.fail("Project delete call failed")
        LOG.info("Success")

    @pytest.mark.skipif(LV(CALM_VERSION) >= LV("3.2.0"),
                        reason="Env creation changed in 3.2.0")
    def test_project_with_env_create_and_delete(self):
        """
        Describe and update flow are already checked in `test_project_crud`
        It will test only create and delete flow on projects with environment
        """

        runner = CliRunner()
        self.dsl_project_name = "Test_DSL_Project_Env{}".format(
            str(uuid.uuid4()))
        LOG.info("Testing 'calm create project' command")
        result = runner.invoke(
            cli,
            [
                "create",
                "project",
                "--file={}".format(DSL_PROJECT_WITH_ENV_PATH),
                "--name={}".format(self.dsl_project_name),
                "--description='Test DSL Project with Env to delete'",
            ],
        )
        if result.exit_code:
            cli_res_dict = {
                "Output": result.output,
                "Exception": str(result.exception)
            }
            LOG.debug("Cli Response: {}".format(
                json.dumps(cli_res_dict, indent=4, separators=(",", ": "))))
            LOG.debug("Traceback: \n{}".format("".join(
                traceback.format_tb(result.exc_info[2]))))
            pytest.fail("Project creation from python file failed")
        LOG.info("Success")

        self._test_project_delete()

        # Restoring the metadata context
        get_metadata_payload(__file__)
    APPLICATION.STATES.ERROR,
]

# project constants
DSL_CONFIG = json.loads(read_local_file(".tests/config.json"))
PROJECT = DSL_CONFIG["PROJECTS"]["PROJECT1"]
PROJECT_NAME = PROJECT["NAME"]
ENV_NAME = PROJECT["ENVIRONMENTS"][0]["NAME"]
BP_LAUNCH_PROFILE_NAME = "AhvVmProfile"

# calm_version
CALM_VERSION = Version.get_version("Calm")


@pytest.mark.skipif(
    LV(CALM_VERSION) < LV("3.2.0"),
    reason="Tests are for env changes introduced in 3.2.0",
)
class TestBpCommands:
    def setup_method(self):
        """Method to instantiate to created_bp_list and reset context"""

        # Resetting context
        ContextObj = get_context()
        ContextObj.reset_configuration()

        self.created_bp_list = []
        self.created_app_list = []

    def _wait_for_non_busy_state(self, app_name):
示例#28
0
def _data_path(path=None, force_update=False, update_path=True, download=True,
               name=None, check_version=True):
    """Aux function
    """
    key = {'sample': 'MNE_DATASETS_SAMPLE_PATH',
           'spm': 'MNE_DATASETS_SPM_FACE_PATH',
           'somato': 'MNE_DATASETS_SOMATO_PATH',
           'testing': 'MNE_DATASETS_TESTING_PATH',
           }[name]

    if path is None:
        # use an intelligent guess if it's not defined
        def_path = op.realpath(op.join(op.dirname(__file__),
                                       '..', '..', 'examples'))

        # backward compatibility
        if get_config(key) is None:
            key = 'MNE_DATA'

        path = get_config(key, def_path)

        # use the same for all datasets
        if not op.exists(path) or not os.access(path, os.W_OK):
            try:
                os.mkdir(path)
            except OSError:
                try:
                    logger.info("Checking for dataset in '~/mne_data'...")
                    path = op.join(op.expanduser("~"), "mne_data")
                    if not op.exists(path):
                        logger.info("Trying to create "
                                    "'~/mne_data' in home directory")
                        os.mkdir(path)
                except OSError:
                    raise OSError("User does not have write permissions "
                                  "at '%s', try giving the path as an argument"
                                  " to data_path() where user has write "
                                  "permissions, for ex:data_path"
                                  "('/home/xyz/me2/')" % (path))

    if not isinstance(path, string_types):
        raise ValueError('path must be a string or None')
    if name == 'sample':
        archive_name = "MNE-sample-data-processed.tar.gz"
        folder_name = "MNE-sample-data"
        url = "ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/" + archive_name
        hash_ = '1bb9f993bfba2057e0039c306a717109'
    elif name == 'spm':
        archive_name = 'MNE-spm-face.tar.bz2'
        folder_name = "MNE-spm-face"
        url = 'ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/' + archive_name
        hash_ = '3e9e83c642136e5b720e2ecc5dcc3244'
    elif name == 'somato':
        archive_name = 'MNE-somato-data.tar.gz'
        folder_name = "MNE-somato-data"
        url = 'ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/' + archive_name
        hash_ = 'f3e3a8441477bb5bacae1d0c6e0964fb'
    elif name == 'testing':
        archive_name = 'MNE-testing-data.tar.gz'
        folder_name = 'MNE-testing-data'
        url = 'http://lester.ilabs.uw.edu/files/' + archive_name
        hash_ = 'f66d60852e5f42a4940fb22bc1e92dc2'
    else:
        raise ValueError('Sorry, the dataset "%s" is not known.' % name)
    folder_path = op.join(path, folder_name)

    rm_archive = False
    martinos_path = '/cluster/fusion/sample_data/' + archive_name
    neurospin_path = '/neurospin/tmp/gramfort/' + archive_name
    if not op.exists(folder_path) and not download:
        return ''
    if not op.exists(folder_path) or force_update:
        logger.info('Downloading or reinstalling '
                    'data archive %s at location %s' % (archive_name, path))

        if op.exists(martinos_path):
            archive_name = martinos_path
        elif op.exists(neurospin_path):
            archive_name = neurospin_path
        else:
            archive_name = op.join(path, archive_name)
            rm_archive = True
            fetch_archive = True
            if op.exists(archive_name):
                msg = ('Archive already exists. Overwrite it (y/[n])? ')
                answer = input(msg)
                if answer.lower() == 'y':
                    os.remove(archive_name)
                else:
                    fetch_archive = False

            if fetch_archive:
                _fetch_file(url, archive_name, print_destination=False,
                            hash_=hash_)

        if op.exists(folder_path):
            shutil.rmtree(folder_path)

        logger.info('Decompressing the archive: %s' % archive_name)
        logger.info('(please be patient, this can take some time)')
        for ext in ['gz', 'bz2']:  # informed guess (and the only 2 options).
            try:
                tarfile.open(archive_name, 'r:%s' % ext).extractall(path=path)
                break
            except tarfile.ReadError as err:
                logger.info('%s is %s trying "bz2"' % (archive_name, err))

        if rm_archive:
            os.remove(archive_name)

    path = op.abspath(path)
    if update_path is None:
        if get_config(key, '') != path:
            update_path = True
            msg = ('Do you want to set the path:\n    %s\nas the default '
                   'sample dataset path in the mne-python config [y]/n? '
                   % path)
            answer = input(msg)
            if answer.lower() == 'n':
                update_path = False
        else:
            update_path = False

    if update_path is True:
        set_config(key, path)

    path = op.join(path, folder_name)

    # compare the version of the Sample dataset and mne
    data_version = _dataset_version(path, name)
    try:
        from distutils.version import LooseVersion as LV
    except:
        warn('Could not determine sample dataset version; dataset could\n'
             'be out of date. Please install the "distutils" package.')
    else:  # 0.7 < 0.7.git shoud be False, therefore strip
        if check_version and LV(data_version) < LV(mne_version.strip('.git')):
            warn('The {name} dataset (version {current}) is older than '
                 'mne-python (version {newest}). If the examples fail, '
                 'you may need to update the {name} dataset by using '
                 'mne.datasets.{name}.data_path(force_update=True)'.format(
                     name=name, current=data_version, newest=mne_version))

    return path
示例#29
0
 def _prepare(self):
     """ Prepare for running the main loop.
     Here we do some initialization like obtaining the startup info,
     creating the GUI application wrapper, etc.
     """
     
     # Reset debug status
     self.debugger.writestatus()
     
     # Get startup info (get a copy, or setting the new version wont trigger!)
     while self.context._stat_startup.recv() is None:
         time.sleep(0.02)
     self.startup_info = startup_info = self.context._stat_startup.recv().copy()
     
     # Set startup info (with additional info)
     if sys.platform.startswith('java'):
         import __builtin__ as builtins  # Jython
     else:
         builtins = __builtins__
     if not isinstance(builtins, dict):
         builtins = builtins.__dict__
     startup_info['builtins'] = [builtin for builtin in builtins.keys()]
     startup_info['version'] = tuple(sys.version_info)
     startup_info['keywords'] = keyword.kwlist
     self.context._stat_startup.send(startup_info)
     
     # Prepare the Python environment
     self._prepare_environment(startup_info)
     
     # Run startup code (before loading GUI toolkit or IPython
     self._run_startup_code(startup_info)
     
     # Write Python banner (to stdout)
     thename = 'Python'
     if sys.version_info[0] == 2:
         thename = 'Legacy Python'
     if '__pypy__' in sys.builtin_module_names:
         thename = 'Pypy'
     if sys.platform.startswith('java'):
         thename = 'Jython'
         # Jython cannot do struct.calcsize("P")
         import java.lang
         real_plat = java.lang.System.getProperty("os.name").lower()
         plat = '%s/%s' % (sys.platform, real_plat)
     elif sys.platform.startswith('win'):
         NBITS = 8 * struct.calcsize("P")
         plat = 'Windows (%i bits)' % NBITS
     else:
         NBITS = 8 * struct.calcsize("P")
         plat = '%s (%i bits)' % (sys.platform, NBITS) 
     printDirect("%s %s on %s.\n" %
                 (thename, sys.version.split('[')[0].rstrip(), plat))
     
     # Integrate GUI
     guiName, guiError = self._integrate_gui(startup_info)
     
     # Write pyzo part of banner (including what GUI loop is integrated)
     if True:
         pyzoBanner = 'This is the Pyzo interpreter'
     if guiError:
         pyzoBanner += '. ' + guiError + '\n'
     elif guiName:
         pyzoBanner += ' with integrated event loop for ' 
         pyzoBanner += guiName + '.\n'
     else:
         pyzoBanner += '.\n'
     printDirect(pyzoBanner)
     
     # Try loading IPython
     if startup_info.get('ipython', '').lower() in ('', 'no', 'false'):
         self._ipython = None
     else:
         try:
             self._load_ipyhon()
         except Exception:
             type, value, tb = sys.exc_info();
             del tb
             printDirect('IPython could not be loaded: %s\n' % str(value))
             self._ipython = None
     
     # Set prompts
     sys.ps1 = PS1(self)
     sys.ps2 = PS2(self)
     
     # Notify about project path
     projectPath = startup_info['projectPath']
     if projectPath:
         printDirect('Prepending the project path %r to sys.path\n' % 
             projectPath)
     
     # Write tips message.
     if self._ipython:
         import IPython
         printDirect("\nUsing IPython %s -- An enhanced Interactive Python.\n"
                     %  IPython.__version__)
         printDirect(
             "?         -> Introduction and overview of IPython's features.\n"
             "%quickref -> Quick reference.\n"
             "help      -> Python's own help system.\n"
             "object?   -> Details about 'object', "
             "use 'object??' for extra details.\n")
     else:
         printDirect("Type 'help' for help, " + 
                     "type '?' for a list of *magic* commands.\n")
     
     # Notify the running of the script
     if self._scriptToRunOnStartup:
         printDirect('\x1b[0;33mRunning script: "'+self._scriptToRunOnStartup+'"\x1b[0m\n')
     
     # Prevent app nap on OSX 9.2 and up
     # The _nope module is taken from MINRK's appnope package
     if sys.platform == "darwin" and LV(platform.mac_ver()[0]) >= LV("10.9"):
         from pyzokernel import _nope
         _nope.nope()
     
     # Setup post-mortem debugging via appropriately logged exceptions
     class PMHandler(logging.Handler):
         def emit(self, record):
             if record.exc_info:
                 sys.last_type, sys.last_value, sys.last_traceback = record.exc_info
             return record
     #
     root_logger = logging.getLogger()
     if not root_logger.handlers:
         root_logger.addHandler(logging.StreamHandler())
     root_logger.addHandler(PMHandler())
示例#30
0
def _data_path(path=None, force_update=False, update_path=True, download=True,
               name=None, check_version=False, return_version=False,
               archive_name=None):
    """Aux function."""
    key = {
        'fake': 'MNE_DATASETS_FAKE_PATH',
        'misc': 'MNE_DATASETS_MISC_PATH',
        'sample': 'MNE_DATASETS_SAMPLE_PATH',
        'spm': 'MNE_DATASETS_SPM_FACE_PATH',
        'somato': 'MNE_DATASETS_SOMATO_PATH',
        'brainstorm': 'MNE_DATASETS_BRAINSTORM_PATH',
        'testing': 'MNE_DATASETS_TESTING_PATH',
        'multimodal': 'MNE_DATASETS_MULTIMODAL_PATH',
        'visual_92_categories': 'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH',
        'mtrf': 'MNE_DATASETS_MTRF_PATH',
        'fieldtrip_cmc': 'MNE_DATASETS_FIELDTRIP_CMC_PATH'
    }[name]

    path = _get_path(path, key, name)
    # To update the testing or misc dataset, push commits, then make a new
    # release on GitHub. Then update the "releases" variable:
    releases = dict(testing='0.40', misc='0.3')
    # And also update the "hashes['testing']" variable below.

    # To update any other dataset, update the data archive itself (upload
    # an updated version) and update the hash.
    archive_names = dict(
        misc='mne-misc-data-%s.tar.gz' % releases['misc'],
        sample='MNE-sample-data-processed.tar.gz',
        somato='MNE-somato-data.tar.gz',
        spm='MNE-spm-face.tar.gz',
        testing='mne-testing-data-%s.tar.gz' % releases['testing'],
        multimodal='MNE-multimodal-data.tar.gz',
        fake='foo.tgz',
        visual_92_categories='MNE-visual_92_categories.tar.gz',
        mtrf='mTRF_1.5.zip',
        fieldtrip_cmc='SubjectCMC.zip'
    )
    if archive_name is not None:
        archive_names.update(archive_name)
    folder_names = dict(
        brainstorm='MNE-brainstorm-data',
        fake='foo',
        misc='MNE-misc-data',
        mtrf='mTRF_1.5',
        sample='MNE-sample-data',
        somato='MNE-somato-data',
        multimodal='MNE-multimodal-data',
        spm='MNE-spm-face',
        testing='MNE-testing-data',
        visual_92_categories='MNE-visual_92_categories-data',
        fieldtrip_cmc='MNE-fieldtrip_cmc-data'
    )
    urls = dict(
        brainstorm='https://mne-tools.s3.amazonaws.com/datasets/'
                   'MNE-brainstorm-data/%s',
        fake='https://github.com/mne-tools/mne-testing-data/raw/master/'
             'datasets/%s',
        misc='https://codeload.github.com/mne-tools/mne-misc-data/'
             'tar.gz/%s' % releases['misc'],
        sample="https://mne-tools.s3.amazonaws.com/datasets/%s",
        somato='https://mne-tools.s3.amazonaws.com/datasets/%s',
        spm='https://mne-tools.s3.amazonaws.com/datasets/%s',
        testing='https://codeload.github.com/mne-tools/mne-testing-data/'
                'tar.gz/%s' % releases['testing'],
        multimodal='https://ndownloader.figshare.com/files/5999598',
        visual_92_categories='https://mne-tools.s3.amazonaws.com/datasets/%s',
        mtrf="https://superb-dca2.dl.sourceforge.net/project/aespa/%s",
        fieldtrip_cmc='ftp://ftp.fieldtriptoolbox.org/pub/fieldtrip/'
                      'tutorial/%s'
    )
    hashes = dict(
        brainstorm=None,
        fake='3194e9f7b46039bb050a74f3e1ae9908',
        misc='d822a720ef94302467cb6ad1d320b669',
        sample='1d5da3a809fded1ef5734444ab5bf857',
        somato='f3e3a8441477bb5bacae1d0c6e0964fb',
        spm='ecce87351d88def59d3d4cdc561e2a60',
        testing='02796b3ab145ee9cad680a545563beb5',
        multimodal='26ec847ae9ab80f58f204d09e2c08367',
        visual_92_categories='46c7e590f4a48596441ce001595d5e58',
        mtrf='273a390ebbc48da2c3184b01a82e4636',
        fieldtrip_cmc='6f9fd6520f9a66e20994423808d2528c'
    )
    folder_origs = dict(  # not listed means None
        misc='mne-misc-data-%s' % releases['misc'],
        testing='mne-testing-data-%s' % releases['testing'],
    )
    folder_name = folder_names[name]
    archive_name = archive_names[name]
    hash_ = hashes[name]
    url = urls[name]
    folder_orig = folder_origs.get(name, None)
    if '%s' in url:
        url = url % archive_name

    folder_path = op.join(path, folder_name)
    if name == 'brainstorm':
        extract_path = folder_path
        folder_path = op.join(folder_path, archive_names[name].split('.')[0])

    rm_archive = False
    martinos_path = '/cluster/fusion/sample_data/' + archive_name
    neurospin_path = '/neurospin/tmp/gramfort/' + archive_name

    if not op.exists(folder_path) and not download:
        return ''
    if not op.exists(folder_path) or force_update:
        if name == 'brainstorm':
            if '--accept-brainstorm-license' in sys.argv:
                answer = 'y'
            else:
                answer = input('%sAgree (y/[n])? ' % _bst_license_text)
            if answer.lower() != 'y':
                raise RuntimeError('You must agree to the license to use this '
                                   'dataset')
        logger.info('Downloading or reinstalling '
                    'data archive %s at location %s' % (archive_name, path))

        if op.exists(martinos_path):
            archive_name = martinos_path
        elif op.exists(neurospin_path):
            archive_name = neurospin_path
        else:
            archive_name = op.join(path, archive_name)
            rm_archive = True
            fetch_archive = True
            if op.exists(archive_name):
                msg = ('Archive already exists. Overwrite it (y/[n])? ')
                answer = input(msg)
                if answer.lower() == 'y':
                    os.remove(archive_name)
                else:
                    fetch_archive = False

            if fetch_archive:
                _fetch_file(url, archive_name, print_destination=False,
                            hash_=hash_)

        if op.exists(folder_path):
            def onerror(func, path, exc_info):
                """Deal with access errors (e.g. testing dataset read-only)."""
                # Is the error an access error ?
                do = False
                if not os.access(path, os.W_OK):
                    perm = os.stat(path).st_mode | stat.S_IWUSR
                    os.chmod(path, perm)
                    do = True
                if not os.access(op.dirname(path), os.W_OK):
                    dir_perm = (os.stat(op.dirname(path)).st_mode |
                                stat.S_IWUSR)
                    os.chmod(op.dirname(path), dir_perm)
                    do = True
                if do:
                    func(path)
                else:
                    raise
            shutil.rmtree(folder_path, onerror=onerror)

        logger.info('Decompressing the archive: %s' % archive_name)
        logger.info('(please be patient, this can take some time)')
        if name != 'brainstorm':
            extract_path = path
        if name == 'fieldtrip_cmc':
            extract_path = folder_path
        if archive_name.endswith('.zip'):
            with zipfile.ZipFile(archive_name, 'r') as ff:
                ff.extractall(extract_path)
        else:
            for ext in ['gz', 'bz2']:  # informed guess
                try:
                    tf = tarfile.open(archive_name, 'r:%s' % ext)
                    tf.extractall(path=extract_path)
                    tf.close()
                    break
                except tarfile.ReadError as err:
                    logger.info('%s is %s trying "bz2"' % (archive_name, err))
        if folder_orig is not None:
            shutil.move(op.join(path, folder_orig), folder_path)

        if rm_archive:
            os.remove(archive_name)

    path = _do_path_update(path, update_path, key, name)
    path = op.join(path, folder_name)

    # compare the version of the dataset and mne
    data_version = _dataset_version(path, name)
    try:
        from distutils.version import LooseVersion as LV
    except:
        warn('Could not determine %s dataset version; dataset could '
             'be out of date. Please install the "distutils" package.'
             % name)
    else:  # 0.7 < 0.7.git shoud be False, therefore strip
        if check_version and LV(data_version) < LV(mne_version.strip('.git')):
            warn('The {name} dataset (version {current}) is older than '
                 'mne-python (version {newest}). If the examples fail, '
                 'you may need to update the {name} dataset by using '
                 'mne.datasets.{name}.data_path(force_update=True)'.format(
                     name=name, current=data_version, newest=mne_version))
    return (path, data_version) if return_version else path