Example #1
0
def fetch_fsl_feeds_data(data_dir, redownload=False):
    '''
    Function to fetch SPM auditory data.

    '''

    url = ("http://fsl.fmrib.ox.ac.uk/fsldownloads/oldversions/"
           "fsl-4.1.0-feeds.tar.gz")
    subject_dir = data_dir
    archive_path = os.path.join(subject_dir, os.path.basename(url))
    if redownload:
        try:
            print "Zapping all old downloads .."
            # shutil.rmtree(subject_dir)
            # os.remove(archive_path)
        except OSError:
            pass
        finally:
            print "Done."
    if os.path.exists(subject_dir):
        subject_data = _glob_fsl_feeds_data(subject_dir)
        if subject_data is None:
            # shutil.rmtree(subject_dir)
            return fetch_fsl_feeds_data(data_dir)
        else:
            return subject_data
    elif os.path.exists(archive_path):
        try:
            _uncompress_file(archive_path)
        except:
            print "Archive corrupted, trying to download it again."
            os.remove(archive_path)
            return fetch_fsl_feeds_data(data_dir)
    else:
        _fetch_file(url, data_dir)
        try:
            _uncompress_file(archive_path)
        except:
            print "Archive corrupted, trying to download it again."
            os.remove(archive_path)
            return fetch_fsl_feeds_data(data_dir)
        return _glob_fsl_feeds_data(subject_dir)
Example #2
0
def fetch_spm_auditory_data(data_dir, redownload=False):
    '''
Function to fetch SPM auditory data.

'''

    url = "ftp://ftp.fil.ion.ucl.ac.uk/spm/data/MoAEpilot/MoAEpilot.zip"
    subject_dir = data_dir
    archive_path = os.path.join(subject_dir, os.path.basename(url))
    if redownload:
        try:
            print "Zapping all old downloads .."
            # shutil.rmtree(subject_dir)
            # os.remove(archive_path)
        except OSError:
            pass
        finally:
            print "Done."
    if os.path.exists(subject_dir):
        subject_data = _glob_spm_auditory_data(subject_dir)
        if subject_data is None:
            # shutil.rmtree(subject_dir)
            return fetch_spm_auditory_data(data_dir)
        else:
            return subject_data
    elif os.path.exists(archive_path):
        try:
            _uncompress_file(archive_path)
        except:
            print "Archive corrupted, trying to download it again."
            # os.remove(archive_path)
            return fetch_spm_auditory_data(data_dir)
    else:
        _fetch_file(url, data_dir)
        try:
            _uncompress_file(archive_path)
        except:
            print "Archive corrupted, trying to download it again."
            # os.remove(archive_path)
            return fetch_spm_auditory_data(data_dir)
        return _glob_spm_auditory_data(subject_dir)
Example #3
0
def fetch_haxby_subject_data(data_dir, subject_id, url, redownload=False):
    archive_name = os.path.basename(url)
    archive_path = os.path.join(data_dir, archive_name)
    subject_dir = os.path.join(data_dir, subject_id)
    if redownload:
        try:
            print "Zapping all old downloads .."
            # shutil.rmtree(subject_dir)
            # os.remove(archive_path)
        except OSError:
            pass
        finally:
            print "Done."
    if os.path.exists(subject_dir):
        subject_data = _glob_haxby_subject_data(subject_dir)
        if subject_data is None:
            # shutil.rmtree(subject_dir)
            return fetch_haxby_subject_data(data_dir, subject_id, url)
        else:
            return subject_id, subject_data
    elif os.path.exists(archive_path):
        try:
            _uncompress_file(archive_path)
        except:
            print "Archive corrupted, trying to download it again."
            os.remove(archive_path)
            return fetch_haxby_subject_data(data_dir, subject_id, url)
    else:
        _fetch_file(url, data_dir)
        try:
            _uncompress_file(archive_path)
        except:
            print "Archive corrupted, trying to download it again."
            os.remove(archive_path)
            return fetch_haxby_subject_data(data_dir, subject_id, url)
        return subject_id, _glob_haxby_subject_data(subject_dir)