Ejemplo n.º 1
0
 def _get_default_uri(cls):
     """Returns the URL for the latest GOES data."""
     now = datetime.datetime.utcnow()
     time_range = TimeRange(datetime.datetime(now.year, now.month, now.day), now)
     url_does_exist = net.url_exists(cls._get_url_for_date_range(time_range))
     while not url_does_exist:
         time_range = TimeRange(time_range.start-datetime.timedelta(days=1),
                                time_range.start)
         url_does_exist = net.url_exists(cls._get_url_for_date_range(time_range))
     return cls._get_url_for_date_range(time_range)
Ejemplo n.º 2
0
 def _get_default_uri(cls):
     """Returns the URL for the latest GOES data."""
     now = datetime.datetime.utcnow()
     time_range = TimeRange(datetime.datetime(now.year, now.month, now.day),
                            now)
     url_does_exist = net.url_exists(
         cls._get_url_for_date_range(time_range))
     while not url_does_exist:
         time_range = TimeRange(
             time_range.start - datetime.timedelta(days=1),
             time_range.start)
         url_does_exist = net.url_exists(
             cls._get_url_for_date_range(time_range))
     return cls._get_url_for_date_range(time_range)
Ejemplo n.º 3
0
def download_sample_data(progress=True):
    """
    Download the sample data.

    Parameters
    ----------
    progress: bool
        Show a progress bar during download

    Returns
    -------
    None
    """
    number_of_files_fetched = 0
    print("Downloading sample files to " + sampledata_dir)
    for base_url in _base_urls:
        for file_name in _files.itervalues():
            full_file_name = file_name[0] + file_name[1]
            if url_exists(os.path.join(base_url, full_file_name)):
                f = download_file(os.path.join(base_url, full_file_name))
                real_name, ext = os.path.splitext(full_file_name)

                if file_name[1] == '.zip':
                    print("Unpacking: %s" % real_name)
                    with ZipFile(f, 'r') as zip_file:
                        zip_file.extract(real_name, sampledata_dir)
                    remove(f)
                else:
                    # move files to the data directory
                    move(f, os.path.join(sampledata_dir, file_name[0]))
                # increment the number of files obtained to check later
                number_of_files_fetched += 1

    if number_of_files_fetched < len(_files.keys()):
        raise URLError("Could not download all samples files. Problem with accessing sample data servers.")
Ejemplo n.º 4
0
def download_sample_data(progress=True, overwrite=True, timeout=None):
    """
    Download the sample data.

    Parameters
    ----------
    progress: `bool`
        Show a progress bar during download
    overwrite: `bool`
        If exist overwrites the downloaded sample data.
    timeout: `float`
        The timeout in seconds. If `None` the default timeout is used from
        `astropy.utils.data.Conf.remote_timeout`.

    Returns
    -------
    None
    """
    # Creating the directory for sample files to be downloaded
    sampledata_dir = get_and_create_sample_dir()

    number_of_files_fetched = 0
    print("Downloading sample files to {}".format(sampledata_dir))
    for file_name in six.itervalues(_files):
        if not overwrite:
            if os.path.isfile(os.path.join(sampledata_dir, file_name[0])):
                number_of_files_fetched += 1
                continue

        for base_url in _base_urls:
            full_file_name = file_name[0] + file_name[1]
            try:
                exists = url_exists(os.path.join(base_url, full_file_name))
                if exists:
                    f = download_file(os.path.join(base_url, full_file_name))
                    real_name, ext = os.path.splitext(full_file_name)

                    if file_name[1] == '.zip':
                        print("Unpacking: {}".format(real_name))
                        with ZipFile(f, 'r') as zip_file:
                            zip_file.extract(real_name, sampledata_dir)
                        os.remove(f)
                    else:
                        # move files to the data directory
                        move(f, os.path.join(sampledata_dir, file_name[0]))
                    # increment the number of files obtained to check later
                    number_of_files_fetched += 1
                    break
            except (socket.error, socket.timeout) as e:
                warnings.warn("Download failed with error {}. \n"
                              "Retrying with different mirror.".format(e))

    if number_of_files_fetched < len(list(_files.keys())):
        raise URLError("Could not download all samples files."
                       "Problem with accessing sample data servers.")
Ejemplo n.º 5
0
def download_sample_data(progress=True, overwrite=True, timeout=None):
    """
    Download the sample data.

    Parameters
    ----------
    progress: `bool`
        Show a progress bar during download
    overwrite: `bool`
        If exist overwrites the downloaded sample data.
    timeout: `float`
        The timeout in seconds. If `None` the default timeout is used from
        `astropy.utils.data.Conf.remote_timeout`.

    Returns
    -------
    None
    """
    number_of_files_fetched = 0
    print("Downloading sample files to {}".format(sampledata_dir))
    for file_name in six.itervalues(_files):
        if not overwrite:
            if os.path.isfile(os.path.join(sampledata_dir,
                                           file_name[0])):
                number_of_files_fetched += 1
                continue

        for base_url in _base_urls:
            full_file_name = file_name[0] + file_name[1]
            try:
                exists = url_exists(os.path.join(base_url, full_file_name))
                if exists:
                    f = download_file(os.path.join(base_url, full_file_name))
                    real_name, ext = os.path.splitext(full_file_name)

                    if file_name[1] == '.zip':
                        print("Unpacking: {}".format(real_name))
                        with ZipFile(f, 'r') as zip_file:
                            zip_file.extract(real_name, sampledata_dir)
                        os.remove(f)
                    else:
                        # move files to the data directory
                        move(f, os.path.join(sampledata_dir, file_name[0]))
                    # increment the number of files obtained to check later
                    number_of_files_fetched += 1
                    break
            except (socket.error, socket.timeout) as e:
                warnings.warn("Download failed with error {}. \n Retrying with different mirror.".format(e))

    if number_of_files_fetched < len(list(_files.keys())):
        raise URLError("Could not download all samples files. Problem with accessing sample data servers.")
Ejemplo n.º 6
0
def download_sample_data():
    """
    Download the sample data.
    
    Parameters
    ----------
    None
    
    Returns
    -------
    None
    """
    print("Downloading sample fiss files to {}".format(sampledir))
    for f in files:
        if url_exists(url+f):
            df = download_file(url+f)
            move(df,os.path.join(sampledir,f))
Ejemplo n.º 7
0
def download_sample_data(progress=True, overwrite=True):
    """
    Download the sample data.

    Parameters
    ----------
    progress: bool
        Show a progress bar during download
    overwrite: bool
        If exist overwrites the downloaded sample data.

    Returns
    -------
    None
    """
    number_of_files_fetched = 0
    print("Downloading sample files to " + sampledata_dir)
    for file_name in _files.itervalues():
        if not overwrite:
            if os.path.isfile(os.path.join(sampledata_dir, file_name[0])):
                number_of_files_fetched += 1
                continue

        for base_url in _base_urls:
            full_file_name = file_name[0] + file_name[1]
            print(full_file_name)
            if url_exists(os.path.join(base_url, full_file_name)):
                f = download_file(os.path.join(base_url, full_file_name))
                real_name, ext = os.path.splitext(full_file_name)

                if file_name[1] == '.zip':
                    print("Unpacking: %s" % real_name)
                    with ZipFile(f, 'r') as zip_file:
                        zip_file.extract(real_name, sampledata_dir)
                    remove(f)
                else:
                    # move files to the data directory
                    move(f, os.path.join(sampledata_dir, file_name[0]))
                # increment the number of files obtained to check later
                number_of_files_fetched += 1
                break

    if number_of_files_fetched < len(_files.keys()):
        raise URLError(
            "Could not download all samples files. Problem with accessing sample data servers."
        )
Ejemplo n.º 8
0
def download_sample_data(overwrite=True,**kwargs):
    """
    Download the sample data.

    Parameters
    ----------
    progress: bool
        Show a progress bar during download
    overwrite: bool
        If exist overwrites the downloaded sample data.

    Returns
    -------
    None
    """
    number_of_files_fetched = 0
    print("Downloading sample files to {}".format(sampledata_dir))
    for file_name in six.itervalues(_files):
        if not overwrite:
            if os.path.isfile(os.path.join(sampledata_dir,
                                           file_name[0])):
                number_of_files_fetched += 1
                continue

        for base_url in _base_urls:
            full_file_name = file_name[0] + file_name[1]
            if url_exists(os.path.join(base_url, full_file_name)):
                f = download_file(os.path.join(base_url, full_file_name))
                real_name, ext = os.path.splitext(full_file_name)

                if file_name[1] == '.zip':
                    print("Unpacking: {}".format(real_name))
                    with ZipFile(f, 'r') as zip_file:
                        zip_file.extract(real_name, sampledata_dir)
                    os.remove(f)
                else:
                    # move files to the data directory
                    move(f, os.path.join(sampledata_dir, file_name[0]))
                # increment the number of files obtained to check later
                number_of_files_fetched += 1
                break

    if number_of_files_fetched < len(list(_files.keys())):
        raise URLError("Could not download all samples files. Problem with accessing sample data servers.")
Ejemplo n.º 9
0
def get_sample_file(filename,
                    url_list,
                    show_progress=True,
                    overwrite=False,
                    timeout=None):
    """
    Downloads a sample file. Will download  a sample data file and move it to
    the sample data directory. Also, uncompresses zip files if necessary.
    Returns the local file if exists.

    Parameters
    ----------
    filename: `str`
        Name of the file
    url_list: `str` or `list`
        urls where to look for the file
    show_progress: `bool`
        Show a progress bar during download
    overwrite: `bool`
        If True download and overwrite an existing file.
    timeout: `float`
        The timeout in seconds. If `None` the default timeout is used from
        `astropy.utils.data.Conf.remote_timeout`.

    Returns
    -------
    result: `str`
        The local path of the file. None if it failed.
    """

    # Creating the directory for sample files to be downloaded
    sampledata_dir = get_and_create_sample_dir()

    if filename[-3:] == 'zip':
        uncompressed_filename = filename[:-4]
    else:
        uncompressed_filename = filename
    # check if the (uncompressed) file exists
    if not overwrite and os.path.isfile(
            os.path.join(sampledata_dir, uncompressed_filename)):
        return os.path.join(sampledata_dir, uncompressed_filename)
    else:
        # check each provided url to find the file
        for base_url in url_list:
            online_filename = filename
            if base_url.count('github'):
                online_filename += '?raw=true'
            try:
                url = urljoin(base_url, online_filename)
                exists = url_exists(url)
                if exists:
                    f = download_file(os.path.join(base_url, online_filename),
                                      show_progress=show_progress,
                                      timeout=timeout)
                    real_name, ext = os.path.splitext(f)

                    if ext == '.zip':
                        print("Unpacking: {}".format(real_name))
                        with ZipFile(f, 'r') as zip_file:
                            unzipped_f = zip_file.extract(
                                real_name, sampledata_dir)
                        os.remove(f)
                        move(
                            unzipped_f,
                            os.path.join(sampledata_dir,
                                         uncompressed_filename))
                        return os.path.join(sampledata_dir,
                                            uncompressed_filename)
                    else:
                        # move files to the data directory
                        move(
                            f,
                            os.path.join(sampledata_dir,
                                         uncompressed_filename))
                        return os.path.join(sampledata_dir,
                                            uncompressed_filename)
            except (socket.error, socket.timeout) as e:
                warnings.warn(
                    "Download failed with error {}. Retrying with different mirror."
                    .format(e), SunpyUserWarning)
        # if reach here then file has not been downloaded.
        warnings.warn("File {} not found.".format(filename), SunpyUserWarning)
        return None
Ejemplo n.º 10
0
def get_sample_file(filename, url_list, show_progress=True, overwrite=False,
                    timeout=None):
    """
    Downloads a sample file. Will download  a sample data file and move it to
    the sample data directory. Also, uncompresses zip files if necessary.
    Returns the local file if exists.

    Parameters
    ----------
    filename: `str`
        Name of the file
    url_list: `str` or `list`
        urls where to look for the file
    show_progress: `bool`
        Show a progress bar during download
    overwrite: `bool`
        If True download and overwrite an existing file.
    timeout: `float`
        The timeout in seconds. If `None` the default timeout is used from
        `astropy.utils.data.Conf.remote_timeout`.

    Returns
    -------
    result: `str`
        The local path of the file. None if it failed.
    """

    # Creating the directory for sample files to be downloaded
    sampledata_dir = get_and_create_sample_dir()

    if filename[-3:] == 'zip':
        uncompressed_filename = filename[:-4]
    else:
        uncompressed_filename = filename
    # check if the (uncompressed) file exists
    if not overwrite and os.path.isfile(os.path.join(sampledata_dir,
                                                     uncompressed_filename)):
        return os.path.join(sampledata_dir, uncompressed_filename)
    else:
        # check each provided url to find the file
        for base_url in url_list:
            online_filename = filename
            if base_url.count('github'):
                online_filename += '?raw=true'
            try:
                url = urljoin(base_url, online_filename)
                exists = url_exists(url)
                if exists:
                    f = download_file(os.path.join(base_url, online_filename),
                                      show_progress=show_progress,
                                      timeout=timeout)
                    real_name, ext = os.path.splitext(f)

                    if ext == '.zip':
                        print("Unpacking: {}".format(real_name))
                        with ZipFile(f, 'r') as zip_file:
                            unzipped_f = zip_file.extract(real_name,
                                                          sampledata_dir)
                        os.remove(f)
                        move(unzipped_f, os.path.join(sampledata_dir,
                                                      uncompressed_filename))
                        return os.path.join(sampledata_dir,
                                            uncompressed_filename)
                    else:
                        # move files to the data directory
                        move(f, os.path.join(sampledata_dir,
                                             uncompressed_filename))
                        return os.path.join(sampledata_dir,
                                            uncompressed_filename)
            except (socket.error, socket.timeout) as e:
                warnings.warn("Download failed with error {}. \n"
                              "Retrying with different mirror.".format(e))
        # if reach here then file has not been downloaded.
        warnings.warn("File {} not found.".format(filename))
        return None