Ejemplo n.º 1
0
    def from_range(cls, instrument, start, end, **kwargs):
        """Automatically download data from instrument between start and
        end and join it together.

        Parameters
        ----------
        instrument : str
            instrument to retrieve the data from
        start : `~sunpy.time.parse_time` compatible
            start of the measurement
        end : `~sunpy.time.parse_time` compatible
            end of the measurement
        """
        kw = {
            'maxgap': None,
            'fill': cls.JOIN_REPEAT,
        }

        kw.update(kwargs)
        start = parse_time(start)
        end = parse_time(end)
        urls = query(start, end, [instrument])
        data = list(map(cls.from_url, urls))
        freq_buckets = defaultdict(list)
        for elem in data:
            freq_buckets[tuple(elem.freq_axis)].append(elem)
        try:
            return cls.combine_frequencies([
                cls.join_many(elem, **kw) for elem in itervalues(freq_buckets)
            ])
        except ValueError:
            raise ValueError("No data found.")
Ejemplo n.º 2
0
    def from_range(cls, instrument, start, end, **kwargs):
        """Automatically download data from instrument between start and
        end and join it together.

        Parameters
        ----------
        instrument : str
            instrument to retrieve the data from
        start : `~sunpy.time.parse_time` compatible
            start of the measurement
        end : `~sunpy.time.parse_time` compatible
            end of the measurement
        """
        kw = {
            'maxgap': None,
            'fill': cls.JOIN_REPEAT,
        }

        kw.update(kwargs)
        start = parse_time(start)
        end = parse_time(end)
        urls = query(start, end, [instrument])
        data = list(map(cls.from_url, urls))
        freq_buckets = defaultdict(list)
        for elem in data:
            freq_buckets[tuple(elem.freq_axis)].append(elem)
        try:
            return cls.combine_frequencies(
                [cls.join_many(elem, **kw) for elem in itervalues(freq_buckets)]
            )
        except ValueError:
            raise ValueError("No data found.")
Ejemplo n.º 3
0
def download_sample_data(progress=True, overwrite=True, timeout=None):
    """
    Download the sample data.

    Parameters
    ----------
    progress: `bool`
        Show a progress bar during download
    overwrite: `bool`
        If exist overwrites the downloaded sample data.
    timeout: `float`
        The timeout in seconds. If `None` the default timeout is used from
        `astropy.utils.data.Conf.remote_timeout`.

    Returns
    -------
    None
    """
    # Creating the directory for sample files to be downloaded
    sampledata_dir = get_and_create_sample_dir()

    number_of_files_fetched = 0
    print("Downloading sample files to {}".format(sampledata_dir))
    for file_name in six.itervalues(_files):
        if not overwrite:
            if os.path.isfile(os.path.join(sampledata_dir, file_name[0])):
                number_of_files_fetched += 1
                continue

        for base_url in _base_urls:
            full_file_name = file_name[0] + file_name[1]
            try:
                exists = url_exists(os.path.join(base_url, full_file_name))
                if exists:
                    f = download_file(os.path.join(base_url, full_file_name))
                    real_name, ext = os.path.splitext(full_file_name)

                    if file_name[1] == '.zip':
                        print("Unpacking: {}".format(real_name))
                        with ZipFile(f, 'r') as zip_file:
                            zip_file.extract(real_name, sampledata_dir)
                        os.remove(f)
                    else:
                        # move files to the data directory
                        move(f, os.path.join(sampledata_dir, file_name[0]))
                    # increment the number of files obtained to check later
                    number_of_files_fetched += 1
                    break
            except (socket.error, socket.timeout) as e:
                warnings.warn("Download failed with error {}. \n"
                              "Retrying with different mirror.".format(e))

    if number_of_files_fetched < len(list(_files.keys())):
        raise URLError("Could not download all samples files."
                       "Problem with accessing sample data servers.")
Ejemplo n.º 4
0
def download_sample_data(progress=True, overwrite=True, timeout=None):
    """
    Download the sample data.

    Parameters
    ----------
    progress: `bool`
        Show a progress bar during download
    overwrite: `bool`
        If exist overwrites the downloaded sample data.
    timeout: `float`
        The timeout in seconds. If `None` the default timeout is used from
        `astropy.utils.data.Conf.remote_timeout`.

    Returns
    -------
    None
    """
    number_of_files_fetched = 0
    print("Downloading sample files to {}".format(sampledata_dir))
    for file_name in six.itervalues(_files):
        if not overwrite:
            if os.path.isfile(os.path.join(sampledata_dir,
                                           file_name[0])):
                number_of_files_fetched += 1
                continue

        for base_url in _base_urls:
            full_file_name = file_name[0] + file_name[1]
            try:
                exists = url_exists(os.path.join(base_url, full_file_name))
                if exists:
                    f = download_file(os.path.join(base_url, full_file_name))
                    real_name, ext = os.path.splitext(full_file_name)

                    if file_name[1] == '.zip':
                        print("Unpacking: {}".format(real_name))
                        with ZipFile(f, 'r') as zip_file:
                            zip_file.extract(real_name, sampledata_dir)
                        os.remove(f)
                    else:
                        # move files to the data directory
                        move(f, os.path.join(sampledata_dir, file_name[0]))
                    # increment the number of files obtained to check later
                    number_of_files_fetched += 1
                    break
            except (socket.error, socket.timeout) as e:
                warnings.warn("Download failed with error {}. \n Retrying with different mirror.".format(e))

    if number_of_files_fetched < len(list(_files.keys())):
        raise URLError("Could not download all samples files. Problem with accessing sample data servers.")
Ejemplo n.º 5
0
def download_sample_data(show_progress=True):
    """
    Download all sample data at once. This will overwrite any existing files.

    Parameters
    ----------
    show_progress: `bool`
        Show a progress bar during download

    Returns
    -------
    None
    """
    for file_name in six.itervalues(_sample_files):
        get_sample_file(file_name, show_progress=show_progress,
                        url_list=_base_urls, overwrite=True)
Ejemplo n.º 6
0
def download_sample_data(progress=True, overwrite=True):
    """
    Download the sample data.

    Parameters
    ----------
    progress: bool
        Show a progress bar during download
    overwrite: bool
        If exist overwrites the downloaded sample data.

    Returns
    -------
    None
    """
    number_of_files_fetched = 0
    print("Downloading sample files to {}".format(sampledata_dir))
    for file_name in six.itervalues(_files):
        if not overwrite:
            if os.path.isfile(os.path.join(sampledata_dir, file_name[0])):
                number_of_files_fetched += 1
                continue

        for base_url in _base_urls:
            full_file_name = file_name[0] + file_name[1]
            if url_exists(os.path.join(base_url, full_file_name)):
                f = download_file(os.path.join(base_url, full_file_name))
                real_name, ext = os.path.splitext(full_file_name)

                if file_name[1] == '.zip':
                    print("Unpacking: {}".format(real_name))
                    with ZipFile(f, 'r') as zip_file:
                        zip_file.extract(real_name, sampledata_dir)
                    os.remove(f)
                else:
                    # move files to the data directory
                    move(f, os.path.join(sampledata_dir, file_name[0]))
                # increment the number of files obtained to check later
                number_of_files_fetched += 1
                break

    if number_of_files_fetched < len(list(_files.keys())):
        raise URLError(
            "Could not download all samples files. Problem with accessing sample data servers."
        )
Ejemplo n.º 7
0
def download_sample_data(show_progress=True):
    """
    Download all sample data at once. This will overwrite any existing files.

    Parameters
    ----------
    show_progress: `bool`
        Show a progress bar during download

    Returns
    -------
    None
    """
    for file_name in six.itervalues(_sample_files):
        get_sample_file(file_name,
                        show_progress=show_progress,
                        url_list=_base_urls,
                        overwrite=True)
Ejemplo n.º 8
0
def download_sample_data(overwrite=True,**kwargs):
    """
    Download the sample data.

    Parameters
    ----------
    progress: bool
        Show a progress bar during download
    overwrite: bool
        If exist overwrites the downloaded sample data.

    Returns
    -------
    None
    """
    number_of_files_fetched = 0
    print("Downloading sample files to {}".format(sampledata_dir))
    for file_name in six.itervalues(_files):
        if not overwrite:
            if os.path.isfile(os.path.join(sampledata_dir,
                                           file_name[0])):
                number_of_files_fetched += 1
                continue

        for base_url in _base_urls:
            full_file_name = file_name[0] + file_name[1]
            if url_exists(os.path.join(base_url, full_file_name)):
                f = download_file(os.path.join(base_url, full_file_name))
                real_name, ext = os.path.splitext(full_file_name)

                if file_name[1] == '.zip':
                    print("Unpacking: {}".format(real_name))
                    with ZipFile(f, 'r') as zip_file:
                        zip_file.extract(real_name, sampledata_dir)
                    os.remove(f)
                else:
                    # move files to the data directory
                    move(f, os.path.join(sampledata_dir, file_name[0]))
                # increment the number of files obtained to check later
                number_of_files_fetched += 1
                break

    if number_of_files_fetched < len(list(_files.keys())):
        raise URLError("Could not download all samples files. Problem with accessing sample data servers.")
Ejemplo n.º 9
0
def _make_transform_graph_docs():
    """
    Generates a string for use with the coordinate package's docstring
    to show the available transforms and coordinate systems
    """
    import inspect
    from textwrap import dedent
    from sunpy.extern import six
    from astropy.coordinates.baseframe import (BaseCoordinateFrame,
                                               frame_transform_graph)

    import copy
    f = copy.deepcopy(frame_transform_graph)
    for f1 in frame_transform_graph._graph.keys():
        if 'sunpy' not in str(f1):
            del f._graph[f1]
        else:
            for f2 in frame_transform_graph._graph[f1].keys():
                if 'sunpy' not in str(f2):
                    del f._graph[f1][f2]

    # TODO: Make this just show the SunPy Frames
    isclass = inspect.isclass
    coosys = [
        item for item in list(six.itervalues(globals()))
        if isclass(item) and issubclass(item, BaseCoordinateFrame)
    ]
    graphstr = f.to_dot_graph(addnodes=coosys, priorities=False)

    docstr = """
    The diagram below shows all of the coordinate systems built into the
    `~astropy.coordinates` package, their aliases (useful for converting
    other coordinates to them using attribute-style access) and the
    pre-defined transformations between them.  The user is free to
    override any of these transformations by defining new transformations
    between these systems, but the pre-defined transformations should be
    sufficient for typical usage.

    .. graphviz::

    """

    return dedent(docstr) + '    ' + graphstr.replace('\n', '\n    ')
Ejemplo n.º 10
0
def _make_transform_graph_docs():
    """
    Generates a string for use with the coordinate package's docstring
    to show the available transforms and coordinate systems
    """
    import inspect
    from textwrap import dedent
    from sunpy.extern import six
    from astropy.coordinates.baseframe import BaseCoordinateFrame, frame_transform_graph

    import copy

    f = copy.deepcopy(frame_transform_graph)
    for f1 in frame_transform_graph._graph.keys():
        if "sunpy" not in str(f1):
            del f._graph[f1]
        else:
            for f2 in frame_transform_graph._graph[f1].keys():
                if "sunpy" not in str(f2):
                    del f._graph[f1][f2]

    # TODO: Make this just show the SunPy Frames
    isclass = inspect.isclass
    coosys = [
        item for item in list(six.itervalues(globals())) if isclass(item) and issubclass(item, BaseCoordinateFrame)
    ]
    graphstr = f.to_dot_graph(addnodes=coosys, priorities=False)

    docstr = """
    The diagram below shows all of the coordinate systems built into the
    `~astropy.coordinates` package, their aliases (useful for converting
    other coordinates to them using attribute-style access) and the
    pre-defined transformations between them.  The user is free to
    override any of these transformations by defining new transformations
    between these systems, but the pre-defined transformations should be
    sufficient for typical usage.

    .. graphviz::

    """

    return dedent(docstr) + "    " + graphstr.replace("\n", "\n    ")
Ejemplo n.º 11
0
 def __hash__(self):
     return hash(tuple(six.itervalues(vars(self))))
Ejemplo n.º 12
0
 def __hash__(self):
     return hash(tuple(six.itervalues(vars(self))))