Exemple #1
0
def eeg_data_path(base_path, subject, accept):
    datapath = op.join(base_path, "EEG", "subject {:02d}".format(subject),
                       "with occular artifact")
    if not op.isfile(op.join(datapath, "cnt.mat")):
        if not op.isdir(op.join(base_path, "EEG")):
            os.makedirs(op.join(base_path, "EEG"))
        intervals = [[1, 5], [6, 10], [11, 15], [16, 20], [21, 25], [26, 29]]
        for low, high in intervals:
            if subject >= low and subject <= high:
                if not op.isfile(op.join(base_path, "EEG.zip")):
                    if not accept:
                        raise AttributeError(
                            "You must accept licence term to download this dataset,"
                            "set accept=True when instanciating the dataset.")
                    _fetch_file(
                        "{}/EEG/EEG_{:02d}-{:02d}.zip".format(
                            SHIN_URL, low, high),
                        op.join(base_path, "EEG.zip"),
                        print_destination=False,
                    )
                with z.ZipFile(op.join(base_path, "EEG.zip"), "r") as f:
                    f.extractall(op.join(base_path, "EEG"))
                os.remove(op.join(base_path, "EEG.zip"))
                break
    assert op.isfile(op.join(datapath,
                             "cnt.mat")), op.join(datapath, "cnt.mat")
    return [op.join(datapath, fn) for fn in ["cnt.mat", "mrk.mat"]]
Exemple #2
0
def test_fetch_file():
    """Test file downloading
    """
    tempdir = _TempDir()
    urls = [
        'http://martinos.org/mne/',
        'ftp://surfer.nmr.mgh.harvard.edu/pub/data/bert.recon.md5sum.txt'
    ]
    with ArgvSetter(disable_stderr=False):  # to capture stdout
        for url in urls:
            archive_name = op.join(tempdir, "download_test")
            _fetch_file(url, archive_name, verbose=False)
            assert_raises(Exception,
                          _fetch_file,
                          'NOT_AN_ADDRESS',
                          op.join(tempdir, 'test'),
                          verbose=False)
            resume_name = op.join(tempdir, "download_resume")
            # touch file
            with open(resume_name + '.part', 'w'):
                os.utime(resume_name + '.part', None)
            _fetch_file(url, resume_name, resume=True, verbose=False)
            assert_raises(ValueError,
                          _fetch_file,
                          url,
                          archive_name,
                          hash_='a',
                          verbose=False)
            assert_raises(RuntimeError,
                          _fetch_file,
                          url,
                          archive_name,
                          hash_='a' * 32,
                          verbose=False)
Exemple #3
0
def _test_fetch(url):
    """Test URL retrieval."""
    tempdir = _TempDir()
    with ArgvSetter(disable_stderr=False):  # to capture stdout
        archive_name = op.join(tempdir, "download_test")
        _fetch_file(url,
                    archive_name,
                    timeout=30.,
                    verbose=False,
                    resume=False)
        pytest.raises(Exception,
                      _fetch_file,
                      'NOT_AN_ADDRESS',
                      op.join(tempdir, 'test'),
                      verbose=False)
        resume_name = op.join(tempdir, "download_resume")
        # touch file
        with open(resume_name + '.part', 'w'):
            os.utime(resume_name + '.part', None)
        _fetch_file(url, resume_name, resume=True, timeout=30., verbose=False)
        pytest.raises(ValueError,
                      _fetch_file,
                      url,
                      archive_name,
                      hash_='a',
                      verbose=False)
        pytest.raises(RuntimeError,
                      _fetch_file,
                      url,
                      archive_name,
                      hash_='a' * 32,
                      verbose=False)
Exemple #4
0
def fetch_faces_data(data_path, repo, subject_ids):
    """Dataset fetcher for OpenfMRI dataset ds000117.

    Parameters
    ----------
    data_path : str
        Path to the folder where data is stored
    repo : str
        The folder name (typically 'ds000117')
    subject_ids : list of int
        The subjects to fetch
    """
    for subject_id in subject_ids:
        src_url = ('http://openfmri.s3.amazonaws.com/tarballs/'
                   'ds117_R0.1.1_sub%03d_raw.tgz' % subject_id)
        tar_fname = op.join(data_path, repo + '.tgz')
        target_dir = op.join(data_path, repo)
        if not op.exists(target_dir):
            if not op.exists(tar_fname):
                _fetch_file(url=src_url,
                            file_name=tar_fname,
                            print_destination=True,
                            resume=True,
                            timeout=10.)
            tf = tarfile.open(tar_fname)
            print('Extracting files. This may take a while ...')
            tf.extractall(path=data_path)
            shutil.move(op.join(data_path, 'ds117'), target_dir)
            os.remove(tar_fname)
def download_file(url, name, root_destination='~/data/', zipfile=False,
                  replace=False):
    """Download a file from dropbox, google drive, or a URL.

    This will download a file and store it in a '~/data/` folder,
    creating directories if need be. It will also work for zip
    files, in which case it will unzip all of the files to the
    desired location.

    Parameters
    ----------
    url : string
        The url of the file to download. This may be a dropbox
        or google drive "share link", or a regular URL. If it
        is a share link, then it should point to a single file and
        not a folder. To download folders, zip them first.
    name : string
        The name / path of the file for the downloaded file, or
        the folder to zip the data into if the file is a zipfile.
    root_destination : string
        The root folder where data will be downloaded.
    zipfile : bool
        Whether the URL points to a zip file. If yes, it will be
        unzipped to root_destination + name.
    replace : bool
        If True and the URL points to a single file, overwrite the
        old file if possible.
    """
    # Make sure we have directories to dump files
    home = op.expanduser('~')
    tmpfile = home + '/tmp/tmp'
    if not op.isdir(home + '/data/'):
        print('Creating data folder...')
        os.makedirs(home + '/data/')

    if not op.isdir(home + '/tmp/'):
        print('Creating tmp folder...')
        os.makedirs(home + '/tmp/')

    download_path = _convert_url_to_downloadable(url)

    # Now save to the new destination
    out_path = root_destination.replace('~', home) + name
    if not op.isdir(op.dirname(out_path)):
        print('Creating path {} for output data'.format(out_path))
        os.makedirs(op.dirname(out_path))

    if zipfile is True:
        _fetch_file(download_path, tmpfile)
        myzip = ZipFile(tmpfile)
        myzip.extractall(out_path)
        os.remove(tmpfile)
    else:
        if len(name) == 0:
            raise ValueError('Cannot overwrite the root data directory')
        if replace is False and op.exists(out_path):
            raise ValueError('Path {} exists, use `replace=True` to '
                             'overwrite'.format(out_path))
        _fetch_file(download_path, out_path)
    print('Successfully moved file to {}'.format(out_path))
Exemple #6
0
def data_path(url,
              sign,
              path=None,
              force_update=False,
              update_path=True,
              verbose=None):
    """Get path to local copy of given dataset URL.

    This is a low-level function useful for getting a local copy of a
    remote dataset

    Parameters
    ----------
    url : str
        Path to remote location of data
    sign : str
        Signifier of dataset
    path : None | str
        Location of where to look for the BNCI data storing location.
        If None, the environment variable or config parameter
        ``MNE_DATASETS_(signifier)_PATH`` is used. If it doesn't exist, the
        "~/mne_data" directory is used. If the dataset
        is not found under the given path, the data
        will be automatically downloaded to the specified folder.
    force_update : bool
        Force update of the dataset even if a local copy exists.
    update_path : bool | None
        If True, set the MNE_DATASETS_(signifier)_PATH in mne-python
        config to the given path. If None, the user is prompted.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see :func:`mne.verbose`).

    Returns
    -------
    path : list of str
        Local path to the given data file. This path is contained inside a list
        of length one, for compatibility.

    """  # noqa: E501
    sign = sign.upper()
    key = 'MNE_DATASETS_{:s}_PATH'.format(sign)
    key_dest = 'MNE-{:s}-data'.format(sign.lower())
    if get_config(key) is None:
        set_config(key, osp.join(osp.expanduser("~"), "mne_data"))
    path = _get_path(path, key, sign)
    destination = _url_to_local_path(url, op.join(path, key_dest))
    # Fetch the file
    if not op.isfile(destination) or force_update:
        if op.isfile(destination):
            os.remove(destination)
        if not op.isdir(op.dirname(destination)):
            os.makedirs(op.dirname(destination))
        _fetch_file(url, destination, print_destination=False)

    # Offer to update the path
    _do_path_update(path, update_path, key, sign)
    return destination
def run_hnn_core(backend=None, n_jobs=1):
    """Test to check if hnn-core does not break."""
    # small snippet of data on data branch for now. To be deleted
    # later. Data branch should have only commit so it does not
    # pollute the history.
    data_url = ('https://raw.githubusercontent.com/jonescompneurolab/'
                'hnn-core/test_data/dpl.txt')
    if not op.exists('dpl.txt'):
        _fetch_file(data_url, 'dpl.txt')
    dpl_master = loadtxt('dpl.txt')

    hnn_core_root = op.dirname(hnn_core.__file__)

    # default params
    params_fname = op.join(hnn_core_root, 'param', 'default.json')
    params = read_params(params_fname)

    # run the simulation
    net = Network(params)

    if backend == 'mpi':
        with MPIBackend(n_procs=2, mpi_cmd='mpiexec'):
            dpl = simulate_dipole(net)[0]
    elif backend == 'joblib':
        with JoblibBackend(n_jobs=n_jobs):
            dpl = simulate_dipole(net)[0]
    else:
        dpl = simulate_dipole(net)[0]

    # write the dipole to a file and compare
    fname = './dpl2.txt'
    dpl.write(fname)

    dpl_pr = loadtxt(fname)
    assert_array_equal(dpl_pr[:, 2], dpl_master[:, 2])  # L2
    assert_array_equal(dpl_pr[:, 3], dpl_master[:, 3])  # L5

    # Test spike type counts
    spiketype_counts = {}
    for spikegid in net.spikes.gids[0]:
        if net.gid_to_type(spikegid) not in spiketype_counts:
            spiketype_counts[net.gid_to_type(spikegid)] = 0
        else:
            spiketype_counts[net.gid_to_type(spikegid)] += 1
    assert 'common' not in spiketype_counts
    assert 'exgauss' not in spiketype_counts
    assert 'extpois' not in spiketype_counts
    assert spiketype_counts == {
        'evprox1': 269,
        'L2_basket': 54,
        'L2_pyramidal': 113,
        'L5_pyramidal': 395,
        'L5_basket': 85,
        'evdist1': 234,
        'evprox2': 269
    }
Exemple #8
0
def test_hnn():
    """Test to check that HNN produces consistent results"""
    # small snippet of data on data branch for now. To be deleted
    # later. Data branch should have only commit so it does not
    # pollute the history.
    from subprocess import Popen, PIPE
    import shlex
    import os
    import sys

    ntrials = 3
    paramf = op.join('param', 'default.param')

    nrniv_str = 'nrniv -python -nobanner'
    cmd = nrniv_str + ' ' + sys.executable + ' run.py ' + paramf + ' ntrial ' + str(ntrials)

    # Split the command into shell arguments for passing to Popen
    cmdargs = shlex.split(cmd, posix="win" not in sys.platform)

    # Start the simulation
    proc = Popen(cmdargs, stdin=PIPE, stdout=PIPE, stderr=PIPE,
                    cwd=os.getcwd(), universal_newlines=True)
    out, err = proc.communicate()

    # print all messages (including error messages)
    print('STDOUT', out)
    print('STDERR', err)

    for trial in range(ntrials):
        print("Checking data for trial %d" % trial)
        if 'SYSTEM_USER_DIR' in os.environ:
            basedir = os.environ['SYSTEM_USER_DIR']
        else:
            basedir = os.path.expanduser('~')
        dirname = op.join(basedir, 'hnn_out', 'data', 'default')

        data_dir = ('https://raw.githubusercontent.com/jonescompneurolab/'
                    'hnn/test_data/')
        for data_type in [ 'dpl', 'rawdpl', 'i']:
            sys.stdout.write("%s..." % data_type)

            fname = "%s_%d.txt" % (data_type, trial)
            data_url = op.join(data_dir, fname)
            if not op.exists(fname):
                _fetch_file(data_url, fname)

            print("comparing %s" % fname)
            pr = loadtxt(op.join(dirname, fname))
            master = loadtxt(fname)

            assert_allclose(pr[:, 1], master[:, 1], rtol=1e-8, atol=0)
            if data_type in ['dpl', 'rawdpl', 'i']:
                assert_allclose(pr[:, 2], master[:, 2], rtol=1e-8, atol=0)
            if data_type in ['dpl', 'rawdpl']:
                assert_allclose(pr[:, 3], master[:, 3], rtol=1e-8, atol=0)
            print("done")
Exemple #9
0
def fetch_faces_data(data_path=None, repo='ds000117', subject_ids=[1]):
    """Dataset fetcher for OpenfMRI dataset ds000117.

    Parameters
    ----------
    data_path : str | None
        Path to the folder where data is stored. Defaults to
        '~/mne_data/mne_bids_examples'
    repo : str
        The folder name. Defaults to 'ds000117'.
    subject_ids : list of int
        The subjects to fetch. Defaults to [1], downloading subject 1.

    Returns
    -------
    data_path : str
        Path to the parent directory where the `repo` folder containing the
        data is stored.

    """
    if data_path is None:
        home = os.path.expanduser('~')
        data_path = os.path.join(home, 'mne_data', 'mne_bids_examples')
    if not os.path.exists(data_path):
        os.makedirs(data_path)

    target_dir = op.join(data_path, repo)
    os.makedirs(target_dir, exist_ok=True)
    for subject_id in subject_ids:  # pragma: no cover

        # If we have data for that subject already, skip
        sub_str = 'sub{:03}'.format(subject_id)
        if op.exists(op.join(target_dir, sub_str)):
            continue

        # Else, download it
        src_url = ('http://openfmri.s3.amazonaws.com/tarballs/'
                   'ds117_R0.1.1_{}_raw.tgz'.format(sub_str))
        tar_fname = op.join(data_path, repo, sub_str + '.tgz')
        if not op.exists(tar_fname):
            _fetch_file(url=src_url, file_name=tar_fname,
                        print_destination=True)

        # Unpack the downloaded archive to the correct location
        tf = tarfile.open(tar_fname)
        print('Extracting files. This may take a while ...')
        tf.extractall(path=data_path)  # will extract the 'ds117' directory
        shutil.move(op.join(data_path, 'ds117', sub_str), target_dir)

        # Clean up to get ready to download next subject
        shutil.rmtree(op.join(data_path, 'ds117'))
        os.remove(tar_fname)

    return data_path
Exemple #10
0
def test_base_params():
    """Test default params object matches base params"""
    param_url = ('https://raw.githubusercontent.com/jonescompneurolab/'
                 'hnn-core/test_data/base.json')
    params_base_fname = op.join(hnn_core_root, 'param', 'base.json')
    if not op.exists(params_base_fname):
        _fetch_file(param_url, params_base_fname)

    params_base = read_params(params_base_fname)
    params = Params()
    assert params == params_base
Exemple #11
0
def load(name):
    if name in names:
        fname = os.path.join(dirname, f"{name}.pickled")
        if not os.path.isfile(fname):
            _fetch_file(url % name, fname)
        else:
            print(f"{name}.pickled already downloaded.")
    else:
        raise ValueError(f"{name}: not found")
    with open(fname, 'rb') as f:
        v = pickle.load(f)
    return v
Exemple #12
0
def _data_path(path=None, force_update=False, update_path=True,
               download=True, name=None, verbose=None):
    if path is None:
        path = op.join(os.environ["COMA_DIR"],"examples")

    if not isinstance(path, basestring):
        raise ValueError('path must be a string or None')

    if name == 'example':
        archive_name = "ComaSample.tar.gz"
        url = "https://www.dropbox.com/s/thpu5cph0hv94bz/" + archive_name + "?dl=1"
        folder_name = "ComaSample"
        folder_path = op.join(path, folder_name)
        rm_archive = False
    else:
        raise ValueError('Sorry, the dataset "%s" is not known.' % name)

    if not op.exists(folder_path) and not download:
        return ''

    if not op.exists(folder_path) or force_update:
        logger.info('Sample data archive %s not found at:\n%s\n'
                    'It will be downloaded and extracted at this location.'
                    % (archive_name, folder_path))

        archive_name = op.join(path, archive_name)
        rm_archive = True
        if op.exists(archive_name):
            msg = ('Archive already exists at %r. Overwrite it '
                   '(y/[n])? ' % archive_name)
            answer = raw_input(msg)
            if answer.lower() == 'y':
                os.remove(archive_name)
            else:
                raise IOError('Archive file already exists at target '
                              'location %r.' % archive_name)

        _fetch_file(url, archive_name, print_destination=False)

        if op.exists(folder_path):
            shutil.rmtree(folder_path)

        logger.info('Decompressing the archive: ' + archive_name)
        logger.info('... please be patient, this can take some time')
        for ext in ['gz', 'bz2']:  # informed guess (and the only 2 options).
            try:
                tarfile.open(archive_name, 'r:%s' % ext).extractall(path=path)
            except tarfile.ReadError, err:
                logger.info('%s is %s trying "bz2"' % (archive_name, err))

        if rm_archive:
            os.remove(archive_name)
Exemple #13
0
def test_legacy_params():
    """Test reading legacy .param file."""
    param_url = ('https://raw.githubusercontent.com/hnnsolver/'
                 'hnn-core/test_data/default.param')
    params_legacy_fname = op.join(hnn_core_root, 'param', 'default.param')
    if not op.exists(params_legacy_fname):
        _fetch_file(param_url, params_legacy_fname)

    params_new_fname = op.join(hnn_core_root, 'param', 'default.json')
    params_legacy = read_params(params_legacy_fname)
    params_new = read_params(params_new_fname)

    assert params_new == params_legacy
Exemple #14
0
def fnirs_data_path(path, subject):
    datapath = op.join(path, 'NIRS', 'subject {:02d}'.format(subject))
    if not op.isfile(op.join(datapath, 'mrk.mat')):
        # fNIRS
        if not op.isfile(op.join(path, 'fNIRS.zip')):
            _fetch_file('http://doc.ml.tu-berlin.de/hBCI/NIRS/NIRS_01-29.zip',
                        op.join(path, 'fNIRS.zip'),
                        print_destination=False)
        if not op.isdir(op.join(path, 'NIRS')):
            os.makedirs(op.join(path, 'NIRS'))
        with z.ZipFile(op.join(path, 'fNIRS.zip'), 'r') as f:
            f.extractall(op.join(path, 'NIRS'))
        os.remove(op.join(path, 'fNIRS.zip'))
    return [op.join(datapath, fn) for fn in ['cnt.mat', 'mrk.mat']]
Exemple #15
0
def download_test_data():
    '''Download additional test data from dropbox.'''
    import zipfile
    try:
        from mne.utils import _fetch_file
        use_pooch = False
    except ImportError:
        import pooch
        use_pooch = True

    # check if test data exist
    data_dir = _get_test_data_dir()
    check_files = [
        'alpha_range_clusters.hdf5', 'DiamSar-eeg-oct-6-fwd.fif',
        op.join('fsaverage', 'bem',
                'fsaverage-ico-5-src.fif'), 'chan_alpha_range.hdf5',
        'test_clustering.npy', 'DiamSar_023_rest_raw.fif', 'GabCon-48_epo.fif'
    ]
    if all([op.isfile(op.join(data_dir, f)) for f in check_files]):
        return

    # set up paths
    fname = 'temp_file.zip'
    destination = op.join(data_dir, fname)
    download_link = ('https://www.dropbox.com/sh/l4scs37524lb3pa/'
                     'AABCak4jORjgridWwHlwjhMHa?dl=1')

    # download the file
    if use_pooch:
        hash = ('98bab9750844ab969c5a74deea9d041701d73f43dfe05465c'
                '577a83d974064e8')
        pooch.retrieve(url=download_link,
                       known_hash=hash,
                       path=data_dir,
                       fname=fname)
    else:
        _fetch_file(download_link,
                    destination,
                    print_destination=True,
                    resume=True,
                    timeout=30.)

    # unzip and extract
    # TODO - optionally extract only the missing files
    zip_ref = zipfile.ZipFile(destination, 'r')
    zip_ref.extractall(data_dir)
    zip_ref.close()

    # remove the zipfile
    os.remove(destination)
Exemple #16
0
def fnirs_data_path(path, subject):
    datapath = op.join(path, "NIRS", "subject {:02d}".format(subject))
    if not op.isfile(op.join(datapath, "mrk.mat")):
        # fNIRS
        if not op.isfile(op.join(path, "fNIRS.zip")):
            _fetch_file(
                "http://doc.ml.tu-berlin.de/hBCI/NIRS/NIRS_01-29.zip",
                op.join(path, "fNIRS.zip"),
                print_destination=False,
            )
        if not op.isdir(op.join(path, "NIRS")):
            os.makedirs(op.join(path, "NIRS"))
        with z.ZipFile(op.join(path, "fNIRS.zip"), "r") as f:
            f.extractall(op.join(path, "NIRS"))
        os.remove(op.join(path, "fNIRS.zip"))
    return [op.join(datapath, fn) for fn in ["cnt.mat", "mrk.mat"]]
Exemple #17
0
def test_fetch_file():
    """Test file downloading
    """
    tempdir = _TempDir()
    urls = ["http://martinos.org/mne/", "ftp://surfer.nmr.mgh.harvard.edu/pub/data/bert.recon.md5sum.txt"]
    with ArgvSetter(disable_stderr=False):  # to capture stdout
        for url in urls:
            archive_name = op.join(tempdir, "download_test")
            _fetch_file(url, archive_name, verbose=False)
            assert_raises(Exception, _fetch_file, "NOT_AN_ADDRESS", op.join(tempdir, "test"), verbose=False)
            resume_name = op.join(tempdir, "download_resume")
            # touch file
            with open(resume_name + ".part", "w"):
                os.utime(resume_name + ".part", None)
            _fetch_file(url, resume_name, resume=True, verbose=False)
            assert_raises(ValueError, _fetch_file, url, archive_name, hash_="a", verbose=False)
            assert_raises(RuntimeError, _fetch_file, url, archive_name, hash_="a" * 32, verbose=False)
Exemple #18
0
def fetch_file(url, file_name, resume=True, timeout=10.):
    """Load requested file, downloading it if needed or requested.

    Parameters
    ----------
    url: string
        The url of file to be downloaded.
    file_name: string
        Name, along with the path, of where downloaded file will be saved.
    resume: bool, optional
        If true, try to resume partially downloaded files.
    timeout : float
        The URL open timeout.
    """
    from mne.utils import _fetch_file
    _fetch_file(url=url, file_name=file_name, print_destination=True,
                resume=resume, hash_=None, timeout=timeout)
Exemple #19
0
def fetch_file(url, file_name, resume=True, timeout=10.):
    """Load requested file, downloading it if needed or requested

    Parameters
    ----------
    url: string
        The url of file to be downloaded.
    file_name: string
        Name, along with the path, of where downloaded file will be saved.
    resume: bool, optional
        If true, try to resume partially downloaded files.
    timeout : float
        The URL open timeout.
    """
    from mne.utils import _fetch_file
    _fetch_file(url=url, file_name=file_name, print_destination=True,
                resume=resume, hash_=None, timeout=timeout)
Exemple #20
0
def test_hnn_core():
    """Test to check if MNE neuron does not break."""
    # small snippet of data on data branch for now. To be deleted
    # later. Data branch should have only commit so it does not
    # pollute the history.
    data_url = ('https://raw.githubusercontent.com/jonescompneurolab/'
                'hnn-core/test_data/dpl.txt')
    if not op.exists('dpl.txt'):
        _fetch_file(data_url, 'dpl.txt')
    dpl_master = loadtxt('dpl.txt')

    hnn_core_root = op.join(op.dirname(hnn_core.__file__), '..')

    params_fname = op.join(hnn_core_root, 'param', 'default.json')
    params = read_params(params_fname)

    net = Network(params, n_jobs=1)
    dpl = simulate_dipole(net)[0]

    fname = './dpl2.txt'
    dpl.write(fname)

    dpl_pr = loadtxt(fname)
    assert_array_equal(dpl_pr[:, 2], dpl_master[:, 2])  # L2
    assert_array_equal(dpl_pr[:, 3], dpl_master[:, 3])  # L5

    # Test spike type counts
    spiketype_counts = {}
    for spikegid in net.spikegids[0]:
        if net.gid_to_type(spikegid) not in spiketype_counts:
            spiketype_counts[net.gid_to_type(spikegid)] = 0
        else:
            spiketype_counts[net.gid_to_type(spikegid)] += 1
    assert 'extinput' not in spiketype_counts
    assert 'exgauss' not in spiketype_counts
    assert 'extpois' not in spiketype_counts
    assert spiketype_counts == {
        'evprox1': 269,
        'L2_basket': 54,
        'L2_pyramidal': 113,
        'L5_pyramidal': 395,
        'L5_basket': 85,
        'evdist1': 234,
        'evprox2': 269
    }
Exemple #21
0
 def get_subjects(sub_inds, sub_names, ind):
     dataname = 'data{}'.format(ind)
     if not os.path.isfile(os.path.join(base_path, dataname + '.zip')):
         _fetch_file(FILES[ind],
                     os.path.join(base_path, dataname + '.zip'),
                     print_destination=False)
     with z.ZipFile(os.path.join(base_path, dataname + '.zip'), 'r') as f:
         os.makedirs(os.path.join(base_path, dataname), exist_ok=True)
         f.extractall(os.path.join(base_path, dataname))
         for fname in os.listdir(os.path.join(base_path, dataname)):
             for ind, prefix in zip(sub_inds, sub_names):
                 if fname.startswith(prefix):
                     os.rename(
                         os.path.join(base_path, dataname, fname),
                         os.path.join(base_path,
                                      'subject_{}.mat'.format(ind)))
     os.remove(os.path.join(base_path, dataname + '.zip'))
     shutil.rmtree(os.path.join(base_path, dataname))
Exemple #22
0
def data_path(url, sign, path=None, force_update=False, update_path=True, verbose=None):
    """Get path to local copy of given dataset URL. **Deprecated**

    This is a low-level function useful for getting a local copy of a
    remote dataset. It is deprecated in favor of data_dl.

    Parameters
    ----------
    url : str
        Path to remote location of data
    sign : str
        Signifier of dataset
    path : None | str
        Location of where to look for the data storing location.
        If None, the environment variable or config parameter
        ``MNE_DATASETS_(signifier)_PATH`` is used. If it doesn't exist, the
        "~/mne_data" directory is used. If the dataset
        is not found under the given path, the data
        will be automatically downloaded to the specified folder.
    force_update : bool
        Force update of the dataset even if a local copy exists.
    update_path : bool | None, **Deprecated**
        Unused, kept for compatibility purpose.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see :func:`mne.verbose`).

    Returns
    -------
    path : list of str
        Local path to the given data file. This path is contained inside a list
        of length one, for compatibility.

    """  # noqa: E501
    path = get_dataset_path(sign, path)
    key_dest = "MNE-{:s}-data".format(sign.lower())
    destination = _url_to_local_path(url, osp.join(path, key_dest))
    # Fetch the file
    if not osp.isfile(destination) or force_update:
        if osp.isfile(destination):
            os.remove(destination)
        if not osp.isdir(osp.dirname(destination)):
            os.makedirs(osp.dirname(destination))
        _fetch_file(url, destination, print_destination=False)
    return destination
Exemple #23
0
def fetch_faces_data(data_path=None, repo='ds000117', subject_ids=[1]):
    """Dataset fetcher for OpenfMRI dataset ds000117.

    Parameters
    ----------
    data_path : str | None
        Path to the folder where data is stored. Defaults to
        '~/mne_data/mne_bids_examples'
    repo : str
        The folder name. Defaults to 'ds000117'.
    subject_ids : list of int
        The subjects to fetch. Defaults to [1], downloading subject 1.

    Returns
    -------
    data_path : str
        Path to the folder where data is stored.

    """
    if not data_path:
        home = os.path.expanduser('~')
        data_path = os.path.join(home, 'mne_data', 'mne_bids_examples')
        if not os.path.exists(data_path):
            os.makedirs(data_path)

    for subject_id in subject_ids:
        src_url = ('http://openfmri.s3.amazonaws.com/tarballs/'
                   'ds117_R0.1.1_sub%03d_raw.tgz' % subject_id)
        tar_fname = op.join(data_path, repo + '.tgz')
        target_dir = op.join(data_path, repo)
        if not op.exists(target_dir):
            if not op.exists(tar_fname):
                _fetch_file(url=src_url,
                            file_name=tar_fname,
                            print_destination=True,
                            resume=True,
                            timeout=10.)
            tf = tarfile.open(tar_fname)
            print('Extracting files. This may take a while ...')
            tf.extractall(path=data_path)
            shutil.move(op.join(data_path, 'ds117'), target_dir)
            os.remove(tar_fname)

    return data_path
def _test_fetch(url):
    """Helper to test URL retrieval."""
    tempdir = _TempDir()
    with ArgvSetter(disable_stderr=False):  # to capture stdout
        archive_name = op.join(tempdir, "download_test")
        _fetch_file(url, archive_name, timeout=30., verbose=False,
                    resume=False)
        assert_raises(Exception, _fetch_file, 'NOT_AN_ADDRESS',
                      op.join(tempdir, 'test'), verbose=False)
        resume_name = op.join(tempdir, "download_resume")
        # touch file
        with open(resume_name + '.part', 'w'):
            os.utime(resume_name + '.part', None)
        _fetch_file(url, resume_name, resume=True, timeout=30.,
                    verbose=False)
        assert_raises(ValueError, _fetch_file, url, archive_name,
                      hash_='a', verbose=False)
        assert_raises(RuntimeError, _fetch_file, url, archive_name,
                      hash_='a' * 32, verbose=False)
Exemple #25
0
def fnirs_data_path(path, subject, accept):
    datapath = op.join(path, "NIRS", "subject {:02d}".format(subject))
    if not op.isfile(op.join(datapath, "mrk.mat")):
        # fNIRS
        if not op.isfile(op.join(path, "fNIRS.zip")):
            if not accept:
                raise AttributeError(
                    "You must accept licence term to download this dataset,"
                    "set accept=True when instanciating the dataset.")
            _fetch_file(
                "http://doc.ml.tu-berlin.de/hBCI/NIRS/NIRS_01-29.zip",
                op.join(path, "fNIRS.zip"),
                print_destination=False,
            )
        if not op.isdir(op.join(path, "NIRS")):
            os.makedirs(op.join(path, "NIRS"))
        with z.ZipFile(op.join(path, "fNIRS.zip"), "r") as f:
            f.extractall(op.join(path, "NIRS"))
        os.remove(op.join(path, "fNIRS.zip"))
    return [op.join(datapath, fn) for fn in ["cnt.mat", "mrk.mat"]]
Exemple #26
0
def eeg_data_path(base_path, subject):
    datapath = op.join(base_path, 'EEG', 'subject {:02d}'.format(
        subject), 'with occular artifact')
    if not op.isfile(op.join(datapath, 'cnt.mat')):
        if not op.isdir(op.join(base_path, 'EEG')):
            os.makedirs(op.join(base_path, 'EEG'))
        intervals = [[1, 5], [6, 10], [11, 15], [16, 20], [21, 25], [26, 29]]
        for low, high in intervals:
            if subject >= low and subject <= high:
                if not op.isfile(op.join(base_path, 'EEG.zip')):
                    _fetch_file('http://doc.ml.tu-berlin.de/hBCI/EEG/EEG_{:02d}-{:02d}.zip'.format(low,
                                                                                               high),
                            op.join(base_path, 'EEG.zip'), print_destination=False)
                with z.ZipFile(op.join(base_path, 'EEG.zip'), 'r') as f:
                    f.extractall(op.join(base_path, 'EEG'))
                os.remove(op.join(base_path, 'EEG.zip'))
                break
    assert op.isfile(op.join(datapath, 'cnt.mat')
                     ), op.join(datapath, 'cnt.mat')
    return [op.join(datapath, fn) for fn in ['cnt.mat', 'mrk.mat']]
    def test_compare_hnn_core(self, run_hnn_core_fixture, backend, n_jobs=1):
        """Test hnn-core does not break."""
        # small snippet of data on data branch for now. To be deleted
        # later. Data branch should have only commit so it does not
        # pollute the history.
        data_url = ('https://raw.githubusercontent.com/jonescompneurolab/'
                    'hnn-core/test_data/dpl.txt')
        if not op.exists('dpl.txt'):
            _fetch_file(data_url, 'dpl.txt')
        dpl_master = loadtxt('dpl.txt')

        dpls, net = run_hnn_core_fixture(backend=backend)
        dpl = dpls[0]

        # write the dipole to a file and compare
        fname = './dpl2.txt'
        dpl.write(fname)

        dpl_pr = loadtxt(fname)
        assert_array_equal(dpl_pr[:, 2], dpl_master[:, 2])  # L2
        assert_array_equal(dpl_pr[:, 3], dpl_master[:, 3])  # L5

        # Test spike type counts
        spike_type_counts = {}
        for spike_gid in net.cell_response.spike_gids[0]:
            if net.gid_to_type(spike_gid) not in spike_type_counts:
                spike_type_counts[net.gid_to_type(spike_gid)] = 0
            else:
                spike_type_counts[net.gid_to_type(spike_gid)] += 1
        assert 'common' not in spike_type_counts
        assert 'exgauss' not in spike_type_counts
        assert 'extpois' not in spike_type_counts
        assert spike_type_counts == {
            'evprox1': 269,
            'L2_basket': 54,
            'L2_pyramidal': 113,
            'L5_pyramidal': 395,
            'L5_basket': 85,
            'evdist1': 234,
            'evprox2': 269
        }
Exemple #28
0
def test_fetch_file():
    """Test file downloading
    """
    # Skipping test if no internet connection available
    try:
        urllib.request.urlopen("http://github.com", timeout=2)
    except urllib.request.URLError:
        from nose.plugins.skip import SkipTest
        raise SkipTest('No internet connection, skipping download test.')

    urls = ['http://github.com/mne-tools/mne-python/blob/master/README.rst',
            'ftp://surfer.nmr.mgh.harvard.edu/pub/data/bert.recon.md5sum.txt']
    for url in urls:
        archive_name = op.join(tempdir, "download_test")
        _fetch_file(url, archive_name, print_destination=False)
        assert_raises(Exception, _fetch_file, 'NOT_AN_ADDRESS',
                      op.join(tempdir, 'test'))
        resume_name = op.join(tempdir, "download_resume")
        # touch file
        with open(resume_name + '.part', 'w'):
            os.utime(resume_name + '.part', None)
        _fetch_file(url, resume_name, print_destination=False, resume=True)
Exemple #29
0
def download_sample_data(dataset="ssvep", subject=1, session=1):
    """Download BCI data for example purpose

    Parameters
    ----------
    dataset : str
        type of the dataset, could be "ssvep", "p300" or "imagery"
        Default is "ssvep", as other are not implemented
    subject : int
        Subject id, dataset specific (default: 1)
    session : int, default 1
        Session number%load , dataset specific (default: 1)

    Returns
    -------
    destination : str
        Path to downloaded data
    """
    if dataset == "ssvep":
        DATASET_URL = 'https://zenodo.org/record/2392979/files/'
        url = '{:s}subject{:02d}_run{:d}_raw.fif'.format(DATASET_URL,
                                                         subject, session + 1)
        sign = 'SSVEPEXO'
        key, key_dest = 'MNE_DATASETS_SSVEPEXO_PATH', 'MNE-ssvepexo-data'
    elif dataset == "p300" or dataset == "imagery":
        raise NotImplementedError("Not yet implemented")

    # Use MNE _fetch_file to download EEG file
    if get_config(key) is None:
        set_config(key, os.path.join(os.path.expanduser("~"), "mne_data"))
    # Adquire local de dados do MNE
    path = _get_path(None, key, sign)
    # Baixa o que está no URL para pasta local, que é criada no segundo parametro
    destination = _url_to_local_path(url, os.path.join(path, key_dest))
    # Cria pasta com todo caminho pro arquivo
    os.makedirs(os.path.dirname(destination), exist_ok=True)
    if not os.path.exists(destination):
        _fetch_file(url, destination, print_destination=False)
    return destination
Exemple #30
0
def test_base_params():
    """Test params object with base params"""
    param_url = ('https://raw.githubusercontent.com/jonescompneurolab/'
                 'hnn-core/test_data/base.json')
    params_base_fname = op.join(hnn_core_root, 'param', 'base.json')
    if not op.exists(params_base_fname):
        _fetch_file(param_url, params_base_fname)

    params_base = read_params(params_base_fname)
    params = Params()
    assert params == params_base

    # unsupported extension
    pytest.raises(ValueError, read_params, 'params.txt')
    # empty file
    empty_fname = op.join(hnn_core_root, 'param', 'empty.json')
    with open(empty_fname, 'w') as json_data:
        json.dump({}, json_data)
    pytest.raises(ValueError, read_params, empty_fname)
    # non dict type
    pytest.raises(ValueError, Params, [])
    pytest.raises(ValueError, Params, 'sdfdfdf')
Exemple #31
0
def test_fetch_file():
    """Test file downloading
    """
    tempdir = _TempDir()
    urls = ['http://google.com', 'ftp://ftp.openbsd.org/pub/OpenBSD/README']
    with ArgvSetter(disable_stderr=False):  # to capture stdout
        for url in urls:
            archive_name = op.join(tempdir, "download_test")
            _fetch_file(url,
                        archive_name,
                        timeout=30.,
                        verbose=False,
                        resume=False)
            assert_raises(Exception,
                          _fetch_file,
                          'NOT_AN_ADDRESS',
                          op.join(tempdir, 'test'),
                          verbose=False)
            resume_name = op.join(tempdir, "download_resume")
            # touch file
            with open(resume_name + '.part', 'w'):
                os.utime(resume_name + '.part', None)
            _fetch_file(url,
                        resume_name,
                        resume=True,
                        timeout=30.,
                        verbose=False)
            assert_raises(ValueError,
                          _fetch_file,
                          url,
                          archive_name,
                          hash_='a',
                          verbose=False)
            assert_raises(RuntimeError,
                          _fetch_file,
                          url,
                          archive_name,
                          hash_='a' * 32,
                          verbose=False)
Exemple #32
0
def test_fetch_file():
    """Test file downloading
    """
    tempdir = _TempDir()
    urls = ['http://google.com',
            'ftp://ftp.openbsd.org/pub/OpenBSD/README']
    with ArgvSetter(disable_stderr=False):  # to capture stdout
        for url in urls:
            archive_name = op.join(tempdir, "download_test")
            _fetch_file(url, archive_name, timeout=30., verbose=False)
            assert_raises(Exception, _fetch_file, 'NOT_AN_ADDRESS',
                          op.join(tempdir, 'test'), verbose=False)
            resume_name = op.join(tempdir, "download_resume")
            # touch file
            with open(resume_name + '.part', 'w'):
                os.utime(resume_name + '.part', None)
            _fetch_file(url, resume_name, resume=True, timeout=30.,
                        verbose=False)
            assert_raises(ValueError, _fetch_file, url, archive_name,
                          hash_='a', verbose=False)
            assert_raises(RuntimeError, _fetch_file, url, archive_name,
                          hash_='a' * 32, verbose=False)
Exemple #33
0
def test_read_legacy_params():
    """Test reading of legacy .param file."""
    param_url = ('https://raw.githubusercontent.com/hnnsolver/'
                 'hnn-core/test_data/default.param')
    params_legacy_fname = op.join(hnn_core_root, 'param', 'default.param')
    if not op.exists(params_legacy_fname):
        _fetch_file(param_url, params_legacy_fname)

    params_new_fname = op.join(hnn_core_root, 'param', 'default.json')
    params_legacy = read_params(params_legacy_fname)
    params_new = read_params(params_new_fname)

    params_new_seedless = {
        key: val
        for key, val in params_new.items()
        if key not in params_new['prng_seedcore*'].keys()
    }
    params_legacy_seedless = {
        key: val
        for key, val in params_legacy.items()
        if key not in params_legacy['prng_seedcore*'].keys()
    }
    assert params_new_seedless == params_legacy_seedless
Exemple #34
0
def eeg_data_path(base_path, subject):
    datapath = op.join(base_path, "EEG", "subject {:02d}".format(subject),
                       "with occular artifact")
    if not op.isfile(op.join(datapath, "cnt.mat")):
        if not op.isdir(op.join(base_path, "EEG")):
            os.makedirs(op.join(base_path, "EEG"))
        intervals = [[1, 5], [6, 10], [11, 15], [16, 20], [21, 25], [26, 29]]
        for low, high in intervals:
            if subject >= low and subject <= high:
                if not op.isfile(op.join(base_path, "EEG.zip")):
                    _fetch_file(
                        "{}/EEG/EEG_{:02d}-{:02d}.zip".format(
                            SHIN_URL, low, high),
                        op.join(base_path, "EEG.zip"),
                        print_destination=False,
                    )
                with z.ZipFile(op.join(base_path, "EEG.zip"), "r") as f:
                    f.extractall(op.join(base_path, "EEG"))
                os.remove(op.join(base_path, "EEG.zip"))
                break
    assert op.isfile(op.join(datapath,
                             "cnt.mat")), op.join(datapath, "cnt.mat")
    return [op.join(datapath, fn) for fn in ["cnt.mat", "mrk.mat"]]
Exemple #35
0
def test_fetch_file():
    """Test file downloading
    """
    # Skipping test if no internet connection available
    try:
        urllib.request.urlopen("http://github.com", timeout=2)
    except:
        from nose.plugins.skip import SkipTest
        raise SkipTest('No internet connection, skipping download test.')

    urls = [
        'http://github.com/mne-tools/mne-python/blob/master/README.rst',
        'ftp://surfer.nmr.mgh.harvard.edu/pub/data/bert.recon.md5sum.txt'
    ]
    for url in urls:
        archive_name = op.join(tempdir, "download_test")
        _fetch_file(url, archive_name, print_destination=False)
        assert_raises(Exception, _fetch_file, 'NOT_AN_ADDRESS',
                      op.join(tempdir, 'test'))
        resume_name = op.join(tempdir, "download_resume")
        # touch file
        with open(resume_name + '.part', 'w'):
            os.utime(resume_name + '.part', None)
        _fetch_file(url, resume_name, print_destination=False, resume=True)
Exemple #36
0
def local_data_path(base_path, subject):
    if not os.path.isdir(os.path.join(base_path,
                                      'subject_{}'.format(subject))):
        if not os.path.isdir(os.path.join(base_path, 'data')):
            _fetch_file(DATA_PATH, os.path.join(base_path, 'data.zip'),
                        print_destination=False)
            with z.ZipFile(os.path.join(base_path, 'data.zip'), 'r') as f:
                f.extractall(base_path)
            os.remove(os.path.join(base_path, 'data.zip'))
        datapath = os.path.join(base_path, 'data')
        for i in range(1, 5):
            os.makedirs(os.path.join(base_path, 'subject_{}'.format(i)))
            for session in range(1, 4):
                for run in ['A', 'B']:
                    os.rename(os.path.join(datapath,
                                           'S{}_{}{}.cnt'.format(i, session,
                                                                 run)),
                              os.path.join(base_path,
                                           'subject_{}'.format(i),
                                           '{}{}.cnt'.format(session, run)))
        shutil.rmtree(os.path.join(base_path, 'data'))
    subjpath = os.path.join(base_path, 'subject_{}'.format(subject))
    return [[os.path.join(subjpath, '{}{}.cnt'.format(y, x))
             for x in ['A', 'B']] for y in ['1', '2', '3']]
Exemple #37
0
def test_constants(tmpdir):
    """Test compensation."""
    tmpdir = str(tmpdir)  # old pytest...
    dest = op.join(tmpdir, 'fiff.zip')
    _fetch_file('https://codeload.github.com/mne-tools/fiff-constants/zip/' +
                commit, dest)
    names = list()
    with zipfile.ZipFile(dest, 'r') as ff:
        for name in ff.namelist():
            if 'Dictionary' in name:
                ff.extract(name, tmpdir)
                names.append(op.basename(name))
                shutil.move(op.join(tmpdir, name), op.join(tmpdir, names[-1]))
    names = sorted(names)
    assert names == ['DictionaryIOD.txt', 'DictionaryIOD_MNE.txt',
                     'DictionaryStructures.txt',
                     'DictionaryTags.txt', 'DictionaryTags_MNE.txt',
                     'DictionaryTypes.txt', 'DictionaryTypes_MNE.txt']
    # IOD (MEGIN and MNE)
    fif = dict(iod=dict(), tags=dict(), types=dict(), defines=dict())
    con = dict(iod=dict(), tags=dict(), types=dict(), defines=dict())
    fiff_version = None
    for name in ['DictionaryIOD.txt', 'DictionaryIOD_MNE.txt']:
        with open(op.join(tmpdir, name), 'rb') as fid:
            for line in fid:
                line = line.decode('latin1').strip()
                if line.startswith('# Packing revision'):
                    assert fiff_version is None
                    fiff_version = line.split()[-1]
                if (line.startswith('#') or line.startswith('alias') or
                        len(line) == 0):
                    continue
                line = line.split('"')
                assert len(line) in (1, 2, 3)
                desc = '' if len(line) == 1 else line[1]
                line = line[0].split()
                assert len(line) in (2, 3)
                if len(line) == 2:
                    kind, id_ = line
                else:
                    kind, id_, tagged = line
                    assert tagged in ('tagged',)
                id_ = int(id_)
                if id_ not in iod_dups:
                    assert id_ not in fif['iod']
                fif['iod'][id_] = [kind, desc]
    # Tags (MEGIN)
    with open(op.join(tmpdir, 'DictionaryTags.txt'), 'rb') as fid:
        for line in fid:
            line = line.decode('ISO-8859-1').strip()
            if (line.startswith('#') or line.startswith('alias') or
                    line.startswith(':') or len(line) == 0):
                continue
            line = line.split('"')
            assert len(line) in (1, 2, 3), line
            desc = '' if len(line) == 1 else line[1]
            line = line[0].split()
            assert len(line) == 4, line
            kind, id_, dtype, unit = line
            id_ = int(id_)
            val = [kind, dtype, unit]
            assert id_ not in fif['tags'], (fif['tags'].get(id_), val)
            fif['tags'][id_] = val
    # Tags (MNE)
    with open(op.join(tmpdir, 'DictionaryTags_MNE.txt'), 'rb') as fid:
        for li, line in enumerate(fid):
            line = line.decode('ISO-8859-1').strip()
            # ignore continuation lines (*)
            if (line.startswith('#') or line.startswith('alias') or
                    line.startswith(':') or line.startswith('*') or
                    len(line) == 0):
                continue
            # weird syntax around line 80:
            if line in ('/*', '"'):
                continue
            line = line.split('"')
            assert len(line) in (1, 2, 3), line
            if len(line) == 3 and len(line[2]) > 0:
                l2 = line[2].strip()
                assert l2.startswith('/*') and l2.endswith('*/'), l2
            desc = '' if len(line) == 1 else line[1]
            line = line[0].split()
            assert len(line) == 3, (li + 1, line)
            kind, id_, dtype = line
            unit = '-'
            id_ = int(id_)
            val = [kind, dtype, unit]
            if id_ not in tag_dups:
                assert id_ not in fif['tags'], (fif['tags'].get(id_), val)
            fif['tags'][id_] = val

    # Types and enums
    in_ = None
    re_prim = re.compile(r'^primitive\((.*)\)\s*(\S*)\s*"(.*)"$')
    re_enum = re.compile(r'^enum\((\S*)\)\s*".*"$')
    re_enum_entry = re.compile(r'\s*(\S*)\s*(\S*)\s*"(.*)"$')
    re_defi = re.compile(r'#define\s*(\S*)\s*(\S*)\s*"(.*)"$')
    used_enums = list()
    for extra in ('', '_MNE'):
        with open(op.join(tmpdir, 'DictionaryTypes%s.txt'
                          % (extra,)), 'rb') as fid:
            for li, line in enumerate(fid):
                line = line.decode('ISO-8859-1').strip()
                if in_ is None:
                    p = re_prim.match(line)
                    e = re_enum.match(line)
                    d = re_defi.match(line)
                    if p is not None:
                        t, s, d = p.groups()
                        s = int(s)
                        assert s not in fif['types']
                        fif['types'][s] = [t, d]
                    elif e is not None:
                        # entering an enum
                        this_enum = e.group(1)
                        if this_enum not in fif:
                            used_enums.append(this_enum)
                            fif[this_enum] = dict()
                            con[this_enum] = dict()
                        in_ = fif[this_enum]
                    elif d is not None:
                        t, s, d = d.groups()
                        s = int(s)
                        fif['defines'][t] = [s, d]
                    else:
                        assert not line.startswith('enum(')
                else:  # in an enum
                    if line == '{':
                        continue
                    elif line == '}':
                        in_ = None
                        continue
                    t, s, d = re_enum_entry.match(line).groups()
                    s = int(s)
                    if t != 'ecg' and s != 3:  # ecg defined the same way
                        assert s not in in_
                    in_[s] = [t, d]

    #
    # Assertions
    #

    # Version
    mne_version = '%d.%d' % (FIFF.FIFFC_MAJOR_VERSION,
                             FIFF.FIFFC_MINOR_VERSION)
    assert fiff_version == mne_version
    unknowns = list()

    # Assert that all our constants are in the FIF def
    assert 'FIFFV_SSS_JOB_NOTHING' in dir(FIFF)
    for name in sorted(dir(FIFF)):
        if name.startswith('_') or name in _dir_ignore_names:
            continue
        check = None
        val = getattr(FIFF, name)
        if name in fif['defines']:
            assert fif['defines'][name][0] == val
        elif name.startswith('FIFFC_'):
            # Checked above
            assert name in ('FIFFC_MAJOR_VERSION', 'FIFFC_MINOR_VERSION',
                            'FIFFC_VERSION')
        elif name.startswith('FIFFB_'):
            check = 'iod'
        elif name.startswith('FIFFT_'):
            check = 'types'
        elif name.startswith('FIFFV_'):
            if name.startswith('FIFFV_MNE_') and name.endswith('_ORI'):
                check = 'mne_ori'
            elif name.startswith('FIFFV_MNE_') and name.endswith('_COV'):
                check = 'covariance_type'
            elif name.startswith('FIFFV_MNE_COORD'):
                check = 'coord'  # weird wrapper
            elif name.endswith('_CH') or '_QUAT_' in name or name in \
                    ('FIFFV_DIPOLE_WAVE', 'FIFFV_GOODNESS_FIT',
                     'FIFFV_HPI_ERR', 'FIFFV_HPI_G', 'FIFFV_HPI_MOV'):
                check = 'ch_type'
            elif name.startswith('FIFFV_SUBJ_'):
                check = name.split('_')[2].lower()
            elif name in ('FIFFV_POINT_LPA', 'FIFFV_POINT_NASION',
                          'FIFFV_POINT_RPA'):
                check = 'cardinal_point'
            else:
                for check in used_enums:
                    if name.startswith('FIFFV_' + check.upper()):
                        break
                else:
                    raise RuntimeError('Could not find %s' % (name,))
            assert check in used_enums, name
            if 'SSS' in check:
                raise RuntimeError
        elif name.startswith('FIFF_UNIT'):  # units and multipliers
            check = name.split('_')[1].lower()
        elif name.startswith('FIFF_'):
            check = 'tags'
        else:
            unknowns.append((name, val))
        if check is not None and name not in _tag_ignore_names:
            assert val in fif[check], '%s: %s, %s' % (check, val, name)
            if val in con[check]:
                msg = "%s='%s'  ?" % (name, con[check][val])
                assert _aliases.get(name) == con[check][val], msg
            else:
                con[check][val] = name
    unknowns = '\n\t'.join('%s (%s)' % u for u in unknowns)
    assert len(unknowns) == 0, 'Unknown types\n\t%s' % unknowns

    # Assert that all the FIF defs are in our constants
    assert set(fif.keys()) == set(con.keys())
    for key in sorted(set(fif.keys()) - {'defines'}):
        this_fif, this_con = fif[key], con[key]
        assert len(set(this_fif.keys())) == len(this_fif)
        assert len(set(this_con.keys())) == len(this_con)
        missing_from_con = sorted(set(this_con.keys()) - set(this_fif.keys()))
        assert missing_from_con == [], key
        if key not in _ignore_incomplete_enums:
            missing_from_fif = sorted(set(this_fif.keys()) -
                                      set(this_con.keys()))
            assert missing_from_fif == [], key

    # Assert that `coil_def.dat` has accurate descriptions of all enum(coil)
    coil_def = _read_coil_defs()
    coil_desc = np.array([c['desc'] for c in coil_def])
    coil_def = np.array([(c['coil_type'], c['accuracy'])
                         for c in coil_def], int)
    mask = (coil_def[:, 1] == FWD.COIL_ACCURACY_ACCURATE)
    coil_def = coil_def[mask, 0]
    coil_desc = coil_desc[mask]
    bad_list = []
    for key in fif['coil']:
        if key not in _missing_coil_def and key not in coil_def:
            bad_list.append(('    %s,' % key).ljust(10) +
                            '  # ' + fif['coil'][key][1])
    assert len(bad_list) == 0, '\n' + '\n'.join(bad_list)
    # Assert that enum(coil) has all `coil_def.dat` entries
    for key, desc in zip(coil_def, coil_desc):
        if key not in fif['coil']:
            bad_list.append(('    %s,' % key).ljust(10) + '  # ' + desc)
    assert len(bad_list) == 0, '\n' + '\n'.join(bad_list)
Exemple #38
0
def test_fetch_file(url, tmpdir):
    """Test URL retrieval."""
    tempdir = str(tmpdir)
    archive_name = op.join(tempdir, "download_test")
    with catch_logging() as log:
        _fetch_file(url, archive_name, timeout=30., verbose='debug')
    log = log.getvalue()
    assert 'Resuming at' not in log
    with open(archive_name, 'rb') as fid:
        data = fid.read()
    stop = len(data) // 2
    assert 0 < stop < len(data)
    with open(archive_name + '.part', 'wb') as fid:
        fid.write(data[:stop])
    with catch_logging() as log:
        _fetch_file(url, archive_name, timeout=30., verbose='debug')
    log = log.getvalue()
    assert 'Resuming at %s' % stop in log
    with pytest.raises(Exception, match='unknown url type'):
        _fetch_file('NOT_AN_ADDRESS', op.join(tempdir, 'test'), verbose=False)
    resume_name = op.join(tempdir, "download_resume")
    # touch file
    with open(resume_name + '.part', 'w'):
        os.utime(resume_name + '.part', None)
    _fetch_file(url, resume_name, resume=True, timeout=30.,
                verbose=False)
    with pytest.raises(ValueError, match='Bad hash value'):
        _fetch_file(url, archive_name, hash_='a', verbose=False)
    with pytest.raises(RuntimeError, match='Hash mismatch'):
        _fetch_file(url, archive_name, hash_='a' * 32, verbose=False)
def test_constants(tmpdir):
    """Test compensation."""
    tmpdir = str(tmpdir)  # old pytest...
    dest = op.join(tmpdir, 'fiff.zip')
    _fetch_file('https://codeload.github.com/mne-tools/fiff-constants/zip/'
                'master', dest)
    names = list()
    with zipfile.ZipFile(dest, 'r') as ff:
        for name in ff.namelist():
            if 'Dictionary' in name:
                ff.extract(name, tmpdir)
                names.append(op.basename(name))
                shutil.move(op.join(tmpdir, name), op.join(tmpdir, names[-1]))
    names = sorted(names)
    assert names == ['DictionaryIOD.txt', 'DictionaryIOD_MNE.txt',
                     'DictionaryStructures.txt',
                     'DictionaryTags.txt', 'DictionaryTags_MNE.txt',
                     'DictionaryTypes.txt', 'DictionaryTypes_MNE.txt']
    # IOD (MEGIN and MNE)
    iod = dict()
    fiff_version = None
    for name in ['DictionaryIOD.txt', 'DictionaryIOD_MNE.txt']:
        with open(op.join(tmpdir, name), 'rb') as fid:
            for line in fid:
                line = line.decode('latin1').strip()
                if line.startswith('# Packing revision'):
                    assert fiff_version is None
                    fiff_version = line.split()[-1]
                if (line.startswith('#') or line.startswith('alias') or
                        len(line) == 0):
                    continue
                line = line.split('"')
                assert len(line) in (1, 2, 3)
                desc = '' if len(line) == 1 else line[1]
                line = line[0].split()
                assert len(line) in (2, 3)
                if len(line) == 2:
                    kind, id_ = line
                else:
                    kind, id_, tagged = line
                    assert tagged in ('tagged',)
                id_ = int(id_)
                if id_ not in iod_dups:
                    assert id_ not in iod
                iod[id_] = [kind, desc]
    # Tags (MEGIN)
    tags = dict()
    with open(op.join(tmpdir, 'DictionaryTags.txt'), 'rb') as fid:
        for line in fid:
            line = line.decode('ISO-8859-1').strip()
            if (line.startswith('#') or line.startswith('alias') or
                    line.startswith(':') or len(line) == 0):
                continue
            line = line.split('"')
            assert len(line) in (1, 2, 3), line
            desc = '' if len(line) == 1 else line[1]
            line = line[0].split()
            assert len(line) == 4, line
            kind, id_, dtype, unit = line
            id_ = int(id_)
            val = [kind, dtype, unit]
            assert id_ not in tags, (tags.get(id_), val)
            tags[id_] = val
    # Tags (MNE)
    with open(op.join(tmpdir, 'DictionaryTags_MNE.txt'), 'rb') as fid:
        for li, line in enumerate(fid):
            line = line.decode('ISO-8859-1').strip()
            # ignore continuation lines (*)
            if (line.startswith('#') or line.startswith('alias') or
                    line.startswith(':') or line.startswith('*') or
                    len(line) == 0):
                continue
            # weird syntax around line 80:
            if line in ('/*', '"'):
                continue
            line = line.split('"')
            assert len(line) in (1, 2, 3), line
            if len(line) == 3 and len(line[2]) > 0:
                l2 = line[2].strip()
                assert l2.startswith('/*') and l2.endswith('*/'), l2
            desc = '' if len(line) == 1 else line[1]
            line = line[0].split()
            assert len(line) == 3, (li + 1, line)
            kind, id_, dtype = line
            unit = '-'
            id_ = int(id_)
            val = [kind, dtype, unit]
            if id_ not in tag_dups:
                assert id_ not in tags, (tags.get(id_), val)
            tags[id_] = val

    # Types and enums
    defines = dict()  # maps the other way (name->val)
    types = dict()
    used_enums = ('unit', 'unitm', 'coil', 'aspect', 'bem_surf_id',
                  'ch_type', 'coord', 'mri_pixel', 'point', 'role',
                  'hand', 'sex', 'proj_item', 'bem_approx',
                  'mne_cov_ch', 'mne_ori', 'mne_map', 'covariance_type',
                  'mne_priors', 'mne_space', 'mne_surf')
    enums = dict((k, dict()) for k in used_enums)
    in_ = None
    re_prim = re.compile(r'^primitive\((.*)\)\s*(\S*)\s*"(.*)"$')
    re_enum = re.compile(r'^enum\((\S*)\)\s*".*"$')
    re_enum_entry = re.compile(r'\s*(\S*)\s*(\S*)\s*"(.*)"$')
    re_defi = re.compile(r'#define\s*(\S*)\s*(\S*)\s*"(.*)"$')
    for extra in ('', '_MNE'):
        with open(op.join(tmpdir, 'DictionaryTypes%s.txt'
                          % (extra,)), 'rb') as fid:
            for li, line in enumerate(fid):
                line = line.decode('ISO-8859-1').strip()
                if in_ is None:
                    p = re_prim.match(line)
                    e = re_enum.match(line)
                    d = re_defi.match(line)
                    if p is not None:
                        t, s, d = p.groups()
                        s = int(s)
                        assert s not in types
                        types[s] = [t, d]
                    elif e is not None:
                        # entering an enum
                        this_enum = e.group(1)
                        if this_enum in enums:
                            in_ = enums[e.group(1)]
                    elif d is not None:
                        t, s, d = d.groups()
                        s = int(s)
                        defines[t] = [s, d]
                    else:
                        assert not line.startswith('enum(')
                else:  # in an enum
                    if line == '{':
                        continue
                    elif line == '}':
                        in_ = None
                        continue
                    t, s, d = re_enum_entry.match(line).groups()
                    s = int(s)
                    if t != 'ecg' and s != 3:  # ecg defined the same way
                        assert s not in in_
                    in_[s] = [t, d]

    #
    # Assertions
    #

    # Version
    mne_version = '%d.%d' % (FIFF.FIFFC_MAJOR_VERSION,
                             FIFF.FIFFC_MINOR_VERSION)
    assert fiff_version == mne_version
    unknowns = list()

    # Assert that all our constants are in the dict
    # (we are not necessarily complete the other way)
    for name in sorted(dir(FIFF)):
        if name.startswith('_') or name in _dir_ignore_names or \
                name in _missing_names:
            continue
        val = getattr(FIFF, name)
        if name in defines:
            assert defines[name][0] == val
        elif name.startswith('FIFFC_'):
            # Checked above
            assert name in ('FIFFC_MAJOR_VERSION', 'FIFFC_MINOR_VERSION',
                            'FIFFC_VERSION')
        elif name.startswith('FIFFB_'):
            assert val in iod, (val, name)
        elif name.startswith('FIFFT_'):
            assert val in types, (val, name)
        elif name.startswith('FIFFV_'):
            if name.startswith('FIFFV_MNE_') and name.endswith('_ORI'):
                this_enum = 'mne_ori'
            elif name.startswith('FIFFV_MNE_') and name.endswith('_COV'):
                this_enum = 'covariance_type'
            elif name.startswith('FIFFV_MNE_COORD'):
                this_enum = 'coord'  # weird wrapper
            elif name.endswith('_CH') or '_QUAT_' in name or name in \
                    ('FIFFV_DIPOLE_WAVE', 'FIFFV_GOODNESS_FIT',
                     'FIFFV_HPI_ERR', 'FIFFV_HPI_G', 'FIFFV_HPI_MOV'):
                this_enum = 'ch_type'
            elif name.startswith('FIFFV_SUBJ_'):
                this_enum = name.split('_')[2].lower()
            else:
                for this_enum in used_enums:
                    if name.startswith('FIFFV_' + this_enum.upper()):
                        break
                else:
                    raise RuntimeError('Could not find %s' % (name,))
            assert this_enum in used_enums, name
            assert val in enums[this_enum], (val, name)
        elif name.startswith('FIFF_UNIT'):  # units and multipliers
            this_enum = name.split('_')[1].lower()
            assert val in enums[this_enum], (name, val)
        elif name.startswith('FIFF_'):
            assert val in tags, (name, val)
        else:
            unknowns.append((name, val))
    unknowns = '\n\t'.join('%s (%s)' % u for u in unknowns)
    assert len(unknowns) == 0, 'Unknown types\n\t%s' % unknowns