Пример #1
0
    def __init__(self, data, meta=None, units=None, **kwargs):
        self._data = data
        tr = self.time_range
        # Check metadata input
        if meta is None:
            # No meta given, so default
            self.meta = TimeSeriesMetaData(MetaDict(), tr, self.columns)
        elif isinstance(meta, (dict, OrderedDict, MetaDict)):
            # Given the values for metadata (dict) and infer timerange and colnames from the data
            self.meta = TimeSeriesMetaData(meta, tr, self.columns)
        elif isinstance(meta, tuple):
            # Given the values all in a tuple
            self.meta = TimeSeriesMetaData(meta, tr, self.columns)
        else:
            # Should have a list of 3-tuples giving a complex metadata list.
            self.meta = meta

        if units is None:
            self.units = {}
        else:
            self.units = units

        for col in self.columns:
            if col not in self.units:
                warn_user(f'Unknown units for {col}')
                self.units[col] = u.dimensionless_unscaled
Пример #2
0
    def _parse_args(self, *args, silence_errors=False, **kwargs):
        """
        Parses an args list into data-header pairs.

        args can contain any mixture of the following entries:
        * tuples of data,header
        * data, header not in a tuple
        * data, wcs object in a tuple
        * data, wcs object not in a tuple
        * filename, as a str or pathlib.Path, which will be read
        * directory, as a str or pathlib.Path, from which all files will be read
        * glob, from which all files will be read
        * url, which will be downloaded and read
        * lists containing any of the above.

        Examples
        --------
        self._parse_args(data, header,
                         (data, header),
                         ['file1', 'file2', 'file3'],
                         'file4',
                         'directory1',
                         '*.fits')
        """
        # Account for nested lists of items
        args = expand_list(args)

        # Sanitise the input so that each 'type' of input corresponds to a different
        # class, so single dispatch can be used later
        nargs = len(args)
        i = 0
        while i < nargs:
            arg = args[i]
            if isinstance(arg, SUPPORTED_ARRAY_TYPES):
                # The next two items are data and a header
                data = args.pop(i)
                header = args.pop(i)
                args.insert(i, (data, header))
                nargs -= 1
            elif isinstance(arg, str) and is_url(arg):
                # Repalce URL string with a Request object to dispatch on later
                args[i] = Request(arg)
            elif possibly_a_path(arg):
                # Repalce path strings with Path objects
                args[i] = pathlib.Path(arg)
            i += 1

        # Parse the arguments
        # Note that this list can also contain GenericMaps if they are directly given to the factory
        data_header_pairs = []
        for arg in args:
            try:
                data_header_pairs += self._parse_arg(arg, **kwargs)
            except NoMapsInFileError as e:
                if not silence_errors:
                    raise
                warn_user(
                    f"One of the arguments failed to parse with error: {e}")

        return data_header_pairs
Пример #3
0
def copy_default_config(overwrite=False):
    """
    Copies the default sunpy config file to the user's config directory.

    Parameters
    ----------
    overwrite : `bool`
        If True, existing config file will be overwritten.
    """
    config_filename = 'sunpyrc'
    config_file = Path(sunpy.__file__).parent / 'data' / config_filename
    user_config_dir = Path(_get_user_configdir())
    user_config_file = user_config_dir / config_filename

    if not _is_writable_dir(user_config_dir):
        raise RuntimeError(
            f'Could not write to SunPy config directory {user_config_dir}')

    if user_config_file.exists():
        if overwrite:
            message = "User config file already exists. " \
                      "This will be overwritten with a backup written in the same location."
            warn_user(message)
            os.rename(str(user_config_file), str(user_config_file) + ".bak")
            shutil.copyfile(config_file, user_config_file)
        else:
            message = "User config file already exists. " \
                      "To overwrite it use `copy_default_config(overwrite=True)`"
            warn_user(message)
    else:
        shutil.copyfile(config_file, user_config_file)
Пример #4
0
 def data(self):
     """
     A `pandas.DataFrame` representing one or more fields as a function of time.
     """
     warn_user("Using .data to access the dataframe is discouraged; "
               "use .to_dataframe() instead.")
     return self._data
Пример #5
0
    def _download_and_hash(self, urls, namespace=''):
        """
        Downloads the file and returns the path, hash and url it used to download.

        Parameters
        ----------
        urls : `list`
            List of urls.

        Returns
        -------
        `str`, `str`, `str`
            Path, hash and URL of the file.
        """
        def download(url):
            path = self._cache_dir / (namespace +
                                      get_filename(urlopen(url), url))
            self._downloader.download(url, path)
            shahash = hash_file(path)
            return path, shahash, url

        errors = []
        for url in urls:
            try:
                return download(url)
            except Exception as e:
                warn_user(f"{e}")
                errors.append(f"{e}")
        else:
            raise RuntimeError(errors)
Пример #6
0
    def make_3d(self):
        """
        This method calculates the third coordinate of the Helioprojective
        frame. It assumes that the coordinate point is on the surface of the Sun.

        If a point in the frame is off limb then NaN will be returned.

        Returns
        -------
        new_frame : `~sunpy.coordinates.frames.Helioprojective`
            A new frame instance with all the attributes of the original but
            now with a third coordinate.
        """
        # Skip if we already are 3D
        if not self._is_2d:
            return self

        if not isinstance(self.observer, BaseCoordinateFrame):
            raise ConvertError("Cannot calculate distance to the Sun "
                               f"for observer '{self.observer}' "
                               "without `obstime` being specified.")

        rep = self.represent_as(UnitSphericalRepresentation)
        lat, lon = rep.lat, rep.lon

        # Check for the use of floats with lower precision than the native Python float
        if not set([lon.dtype.type, lat.dtype.type]).issubset(
            [float, np.float64, np.longdouble]):
            warn_user(
                "The Helioprojective component values appear to be lower "
                "precision than the native Python float: "
                f"Tx is {lon.dtype.name}, and Ty is {lat.dtype.name}. "
                "To minimize precision loss, you may want to cast the values to "
                "`float` or `numpy.float64` via the NumPy method `.astype()`.")

        # Calculate the distance to the surface of the Sun using the law of cosines
        cos_alpha = np.cos(lat) * np.cos(lon)
        c = self.observer.radius**2 - self.rsun**2
        b = -2 * self.observer.radius * cos_alpha
        # Ignore sqrt of NaNs
        with np.errstate(invalid='ignore'):
            d = ((-1 * b) -
                 np.sqrt(b**2 - 4 * c)) / 2  # use the "near" solution

        if self._spherical_screen:
            sphere_center = self._spherical_screen['center'].transform_to(
                self).cartesian
            c = sphere_center.norm()**2 - self._spherical_screen['radius']**2
            b = -2 * sphere_center.dot(rep)
            # Ignore sqrt of NaNs
            with np.errstate(invalid='ignore'):
                dd = ((-1 * b) +
                      np.sqrt(b**2 - 4 * c)) / 2  # use the "far" solution

            d = np.fmin(d,
                        dd) if self._spherical_screen['only_off_disk'] else dd

        return self.realize_frame(
            SphericalRepresentation(lon=lon, lat=lat, distance=d))
Пример #7
0
def check_connection(url):
    try:
        return urlopen(url).getcode() == 200
    except (socket.error, socket.timeout, HTTPError, URLError) as e:
        warn_user(
            f"Connection to {url} failed with error {e}. Retrying with different url and port."
        )
        return None
Пример #8
0
def read(filepath, hdus=None, memmap=None, **kwargs):
    """
    Read a fits file.

    Parameters
    ----------
    filepath : `str`
        The fits file to be read.
    hdus : `int` or iterable
        The HDU indexes to read from the file.
    **kwargs : `dict`, optional
        Passed to `astropy.io.fits.open`.

    Returns
    -------
    `list`
        A list of (data, header) tuples

    Notes
    -----
    This routine reads all the HDU's in a fits file and returns a list of the
    data and a FileHeader instance for each one.

    Also all comments in the original file are concatenated into a single
    "comment" key in the returned FileHeader.
    """
    with fits.open(filepath, ignore_blank=True, memmap=memmap,
                   **kwargs) as hdulist:
        if hdus is not None:
            if isinstance(hdus, int):
                hdulist = hdulist[hdus]
            elif isinstance(hdus, collections.abc.Iterable):
                hdulist = [hdulist[i] for i in hdus]

        hdulist = fits.hdu.HDUList(hdulist)
        for h in hdulist:
            h.verify('silentfix+warn')

        headers = get_header(hdulist)
        pairs = []

        for i, (hdu, header) in enumerate(zip(hdulist, headers)):
            try:
                pairs.append(HDPair(hdu.data, header))
            except (KeyError, ValueError) as e:
                message = f"Error when reading HDU {i}. Skipping.\n"
                for line in traceback.format_tb(sys.exc_info()[2]):
                    message += line
                    message += '\n'
                message += repr(e)
                warn_user(message)

    return pairs
Пример #9
0
def _rotation_cv2(image, matrix, shift, order, missing, clip):
    """
    * Rotates using |cv2_warpAffine|_ from `OpenCV <https://opencv.org>`__
    * The ``order`` parameter selects from the following interpolation algorithms:

      * 0: nearest-neighbor interpolation
      * 1: bilinear interpolation
      * 3: bicubic interpolation

    * An input image with byte ordering that does not match the native byte order of
      the system (e.g., big-endian values on a little-endian system) will be
      copied and byte-swapped prior to rotation.
    * An input image with integer data is cast to floats prior to passing to
      |cv2_warpAffine|_.  The output image can be re-cast using
      :meth:`numpy.ndarray.astype` if desired.
    """
    try:
        import cv2
    except ImportError:
        raise ImportError(
            "The opencv-python package is required to use this rotation method."
        )

    _CV_ORDER_FLAGS = {
        0: cv2.INTER_NEAREST,
        1: cv2.INTER_LINEAR,
        3: cv2.INTER_CUBIC,
    }
    order_to_use = _CV_ORDER_FLAGS[order]

    if issubclass(image.dtype.type, numbers.Integral):
        warn_user("Integer input data has been cast to float64.")
        adjusted_image = image.astype(np.float64)
    else:
        adjusted_image = image.copy()

    trans = np.concatenate([matrix, shift[:, np.newaxis]], axis=1)

    # Swap the byte order if it is non-native (e.g., big-endian on a little-endian system)
    if adjusted_image.dtype.byteorder == ('>' if sys.byteorder == 'little' else
                                          '<'):
        adjusted_image = adjusted_image.byteswap().newbyteorder()

    # missing must be a Python float, not a NumPy float
    missing = float(missing)

    return cv2.warpAffine(adjusted_image,
                          trans,
                          np.flip(adjusted_image.shape),
                          flags=order_to_use | cv2.WARP_INVERSE_MAP,
                          borderMode=cv2.BORDER_CONSTANT,
                          borderValue=missing)
Пример #10
0
def _get_new_observer(initial_obstime, observer, time):
    """
    Helper function that interprets the possible ways of specifying the
    input to the solar coordinate rotation function.

    If the "observer" argument is not `None`, it is used to specify the location
    of the new observer in space and time.

    If the "time" argument is not `None`, it is used to calculate the duration
    over which to the amount of solar rotation is calculated. Note that using
    the "time" keyword assumes that the new observer is on the Earth. This may
    be a reasonable assumption depending on the application.

    Either the "observer" or "time" argument must be specified, but both
    cannot be specified at the same time and both cannot be None.

    Parameters
    ----------
    initial_obstime : `~astropy.time.Time`
        The initial time before solar rotation has been applied.

    observer : `~astropy.coordinates.BaseCoordinateFrame`, `~astropy.coordinates.SkyCoord`, None
        The location of the new observer in space and time (the observer must have an
        interpretable obstime property).

    time : `~astropy.time.Time`, `~astropy.time.TimeDelta`, `~astropy.units.Quantity`, None
        Used to define the duration over which the amount of solar rotation is
        calculated.  If 'time' is an `~astropy.time.Time` then the time interval is
        "time - initial_obstime"; if 'time' is `~astropy.time.TimeDelta` or
        `~astropy.units.Quantity` then the calculation is "initial_obstime + time".

    Returns
    -------
    new_observer : `~astropy.coordinates.SkyCoord`, `~astropy.coordinates.BaseCoordinateFrame`
        The position of the observer in space and time. If the "time" keyword is used
        the output is an `~astropy.coordinates.SkyCoord`. If the "observer" keyword
        is not None the output has the same type as the "observer" keyword.  In all cases
        the output is specified in the heliographic Stonyhurst coordinate system.
    """
    _validate_observer_args(initial_obstime, observer, time)
    # Check the input and create the new observer
    if observer is not None:
        new_observer = observer
    elif time is not None:
        warn_user("Using 'time' assumes an Earth-based observer.")
        if isinstance(time, TimeDelta) or isinstance(time, u.Quantity):
            new_observer_time = initial_obstime + time
        else:
            new_observer_time = parse_time(time)
        new_observer = get_earth(new_observer_time)
    return new_observer
Пример #11
0
    def _validate_meta(self):
        """
        Validates the meta-information associated with a
        `~sunpy.timeseries.TimeSeries`.

        This method includes very basic validation checks which apply to
        all of the kinds of files that SunPy can read. Datasource-
        specific validation should be handled in the relevant file in
        the "sunpy.timeseries.sources".
        """
        for meta_property in ('cunit1', 'cunit2', 'waveunit'):
            if (self.meta.get(meta_property) and
                    u.Unit(self.meta.get(meta_property),
                           parse_strict='silent').physical_type == 'unknown'):

                warn_user(f"Unknown value for {meta_property.upper()}.")
Пример #12
0
    def _validate_units(self, units, **kwargs):
        """
        Validates the astropy unit-information associated with a
        `~sunpy.timeseries.TimeSeries`.

        This method includes very basic validation checks which apply to
        all of the kinds of files that SunPy can read. Datasource-
        specific validation should be handled in the relevant file in
        the "sunpy.timeseries.sources".
        """
        result = True
        for key in units:
            if not isinstance(units[key], astropy.units.UnitBase):
                # If this is not a unit then this can't be a valid units dict.
                result = False
                warn_user(f"Invalid unit given for {key}.")

        return result
Пример #13
0
 def __init__(self, pattern, regex=False, **kwargs):
     if regex:
         self.pattern = pattern
         if kwargs:
             warn_user('regexp being used, the extra arguments passed are being ignored')
     else:
         self.pattern = pattern.format(**kwargs)
     self.domain = "{0.scheme}://{0.netloc}/".format(urlsplit(self.pattern))
     milliseconds = re.search(r'\%e', self.pattern)
     if not milliseconds:
         self.now = datetime.now().strftime(self.pattern)
     else:
         now = datetime.now()
         milliseconds_ = int(now.microsecond / 1000.)
         self.now = now.strftime('{start}{milli:03d}{end}'.format(
             start=self.pattern[0:milliseconds.start()],
             milli=milliseconds_,
             end=self.pattern[milliseconds.end():]
         ))
Пример #14
0
    def _validate_meta(self, meta):
        """
        Validate a metadata argument.
        """
        # Checking for metadata that may overlap.
        indices = range(0, len(self.metadata))
        for i, j in itertools.combinations(indices, 2):
            # Check if the TimeRanges overlap
            if not ((self.metadata[i][0].end <= self.metadata[j][0].start) or
                    (self.metadata[i][0].start >= self.metadata[j][0].end)):
                # Check column headings overlap
                col_overlap = list(
                    set(self.metadata[i][1]) & set(self.metadata[j][1]))
                # If we have an overlap then show a warning
                if col_overlap:
                    warn_user(
                        f'Metadata entries {i} and {j} contain interleaved data.'
                    )

        # TODO: Check all entries are in tr.start time order.
        return True
Пример #15
0
 def __init__(self, meta=None, timerange=None, colnames=None):
     self.metadata = []
     # Parse in arguments
     if meta is not None:
         if (isinstance(meta, (dict, MetaDict))
                 and isinstance(timerange, TimeRange)
                 and isinstance(colnames, list)):
             # Given a single metadata entry as a dictionary with additional timerange and colnames.
             self.metadata.append((timerange, colnames, meta))
         elif isinstance(meta, tuple):
             # Given a single metadata entry as a tuple.
             if isinstance(meta[0], TimeRange) and isinstance(
                     meta[1], list) and isinstance(meta[2],
                                                   (dict, MetaDict)):
                 self.metadata.append(meta)
             else:
                 raise ValueError("Invalid parameters passed in the meta")
         elif isinstance(meta, list):
             # Given a complex metadata list (of tuples)
             for meta_tuple in meta:
                 if isinstance(meta_tuple[0], TimeRange) and isinstance(
                         meta_tuple[1], list) and isinstance(
                             meta_tuple[2], (dict, MetaDict)):
                     self.metadata.append(meta_tuple)
                 else:
                     raise ValueError(
                         "Invalid parameters passed in the meta")
     else:
         # In the event no metadata dictionary is sent we default to something usable
         if isinstance(timerange, TimeRange):
             if isinstance(colnames, list):
                 self.metadata.append((timerange, colnames, MetaDict()))
             else:
                 self.metadata.append((timerange, [], MetaDict()))
                 warn_user("No time range given for metadata. "
                           "This will mean the metadata can't be linked "
                           "to columns in data.")
         else:
             raise ValueError("You cannot create a TimeSeriesMetaData "
                              "object without specifying a TimeRange")
Пример #16
0
def _get_transform_method(method, use_scipy):
    # This is re-used in affine_transform and GenericMap.rotate
    if method not in _rotation_registry:
        raise ValueError(f'Method {method} not in supported methods: '
                         f'{_rotation_registry.keys()}')

    if use_scipy is not None:
        warn_deprecated("The 'use_scipy' argument is deprecated. "
                        "Specify the rotation method to the 'method' "
                        "keyword argument instead.")
        if use_scipy is True and method != 'scipy':
            warn_user(f"Using scipy instead of {method} for rotation.")
            method = 'scipy'

    if method == 'scikit-image':
        try:
            import skimage  # NoQA
        except ImportError:
            raise ImportError(
                "scikit-image must be installed to be usable for rotation.")

    return method
Пример #17
0
def _get_transform_method(method, use_scipy):
    # This is re-used in affine_transform and GenericMap.rotate
    supported_methods = {'scipy', 'skimage'}
    if method not in supported_methods:
        raise ValueError(f'Method {method} not in supported methods: {supported_methods}')

    if use_scipy is not None:
        warn_deprecated("The 'use_scipy' argument is deprecated. "
                        "Specify the rotation method to the 'method' "
                        "keyword argument instead.")
        if use_scipy is True and method != 'scipy':
            warn_user(f"Using scipy instead of {method} for rotation.")
            method = 'scipy'

    if method == 'skimage':
        try:
            import skimage  # NoQA
        except ImportError:
            warn_user("scikit-image could not be imported. Image rotation will use scipy.")
            method = 'scipy'

    return method
Пример #18
0
    def _sanitize_units(self, **kwargs):
        """
        Sanitizes the `collections.OrderedDict` used to store the units.

        Primarily this method will:

        * Remove entries that don't match up to a column.
        * Add unitless entries for columns with no units defined.
        * Re-arrange the order of the dictionary to match the columns.
        """
        # Populate unspecified units:
        for column in set(self.columns) - set(self.units.keys()):
            # For all columns not present in the units dictionary.
            self.units[column] = u.dimensionless_unscaled
            warn_user(f"Unknown units for {column}.")

        # Re-arrange so it's in the same order as the columns and removed unused.
        units = OrderedDict()
        for column in self.columns:
            units.update({column: self.units[column]})

        # Now use the amended units Ordered Dictionary
        self.units = units
Пример #19
0
def check_for_nonfinite_entries(layer_image, template_image):
    """
    Issue a warning if there is any nonfinite entry in the layer or template images.

    Parameters
    ----------
    layer_image : `numpy.ndarray`
        A two-dimensional `numpy.ndarray`.
    template_image : `numpy.ndarray`
        A two-dimensional `numpy.ndarray`.
    """
    if not np.all(np.isfinite(layer_image)):
        warn_user('The layer image has nonfinite entries. '
                  'This could cause errors when calculating shift between two '
                  'images. Please make sure there are no infinity or '
                  'Not a Number values. For instance, replacing them with a '
                  'local mean.')

    if not np.all(np.isfinite(template_image)):
        warn_user('The template image has nonfinite entries. '
                  'This could cause errors when calculating shift between two '
                  'images. Please make sure there are no infinity or '
                  'Not a Number values. For instance, replacing them with a '
                  'local mean.')
Пример #20
0
    def _repr_html_(self):
        nmaps = len(self)

        # Output a warning about rendering time if there are more than 9 Maps
        if nmaps > 9:
            warn_user(
                f"Rendering the summary for a MapSequence of {nmaps} Maps "
                "may take a while.")

        # Assemble the individual HTML repr from each Map, all hidden initally
        repr_list = [
            f"<div style='display: none' index={i}>{m._repr_html_()}</div>"
            for i, m in enumerate(self.maps)
        ]

        # Unhide the first Map
        repr_list_html = "\n".join(repr_list).replace('display: none',
                                                      'display: ', 1)

        # Return HTML with Javascript-powered buttons
        # To avoid potential conflicts, the Javascript code does not use any user-defined functions
        return textwrap.dedent(f"""\
            <pre>{html.escape(self.__repr__())}</pre>
            <form cur_index=0 max_index={nmaps - 1}>
                <!-- Button to decrement index (always starts disabled) -->
                <input type=button value='&larr;' style='font-weight: bold' disabled onClick='
                    var form = this.parentElement;

                    // Decrement index if allowed
                    var cur_index = Math.max(
                                        parseInt(form.getAttribute("cur_index")) - 1,
                                        0
                                    );
                    form.setAttribute("cur_index", cur_index);

                    // Enable the decrement button if and only if this is not the first Map
                    form.children[0].disabled = (cur_index == 0);

                    // Always enable the increment button (because we just decremented)
                    form.children[1].disabled = false;

                    // Update string (which is children[2] of the form)
                    form.children[2].innerHTML = "Map at index " + cur_index;

                    // Update visibilities to show only the current index
                    // This avoids for...of syntax to retain support for ES5 browsers (e.g., IE11)
                    var array = Array.prototype.slice.call(form.lastElementChild.children);
                    array.forEach(function (elem)
                        {{
                            var form = elem.parentElement.parentElement;
                            elem.style.display = (elem.getAttribute("index") ==
                                                      form.getAttribute("cur_index") ? "" : "none"
                                                 );
                        }}
                    );
                '/>

                <!-- Button to increment index (starts enabled if there is more than one Map) -->
                <input type=button value='&rarr;' style='font-weight: bold'
                    {"" if nmaps > 1 else "disabled"} onClick='

                    var form = this.parentElement;

                    // Increment index if allowed
                    var cur_index = Math.min(
                                        parseInt(form.getAttribute("cur_index")) + 1,
                                        form.getAttribute("max_index")
                                    );
                    form.setAttribute("cur_index", cur_index);

                    // Always enable the decrement button (because we just incremented)
                    form.children[0].disabled = false;

                    // Enable the increment button if and only if this is not the last Map
                    form.children[1].disabled = (cur_index == form.getAttribute("max_index"));

                    // Update string (which is children[2] of the form)
                    form.children[2].innerHTML = "Map at index " + cur_index;

                    // Update visibilities to show only the current index
                    // This avoids for...of syntax to retain support for ES5 browsers (e.g., IE11)
                    var array = Array.prototype.slice.call(form.lastElementChild.children);
                    array.forEach(function (elem)
                        {{
                            var form = elem.parentElement.parentElement;
                            elem.style.display = (elem.getAttribute("index") ==
                                                      form.getAttribute("cur_index") ? "" : "none"
                                                 );
                        }}
                    );

                '/>

                <!-- This string is updated as the index is changed -->
                <span>Map at index 0</span>

                <!-- This element is at the end so that lastElementChild will point to it -->
                <div>
                    {repr_list_html}
                </div>
            </form>""")
Пример #21
0
def affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,
                     recenter=False, missing=0.0, use_scipy=None, *, method='skimage'):
    """
    Rotates, shifts and scales an image.

    Will use `skimage.transform.warp` unless scikit-image can't be imported
    then it will use`scipy.ndimage.affine_transform`.

    Parameters
    ----------
    image : `numpy.ndarray`
        2D image to be rotated.
    rmatrix : `numpy.ndarray` that is 2x2
        Linear transformation rotation matrix.
    order : `int` 0-5, optional
        Interpolation order to be used, defaults to 3. When using scikit-image this parameter
        is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).
        When using scipy it is passed into
        `scipy.ndimage.affine_transform` where it controls the order of the spline.
    scale : `float`
        A scale factor for the image with the default being no scaling.
    image_center : tuple, optional
        The point in the image to rotate around (axis of rotation).
        Defaults to the center of the array.
    recenter : `bool` or array-like, optional
        Move the axis of rotation to the center of the array or recenter coords.
        Defaults to `True` i.e., recenter to the center of the array.
    missing : `float`, optional
        The value to replace any missing data after the transformation.
    method : {'skimage', 'scipy'}
        Transform function to use. Currently
        :func:`scipy.ndimage.affine_transform` and
        :func:`skimage.transform.warp` are supported.
        Defaults to 'skimage', unless scikit-image can't be imported.

    Returns
    -------
    `numpy.ndarray`:
        New rotated, scaled and translated image.

    Notes
    -----
    This algorithm uses an affine transformation as opposed to a polynomial
    geometrical transformation, which by default is `skimage.transform.warp`.
    One can specify using `scipy.ndimage.affine_transform` as
    an alternative affine transformation. The two transformations use different
    algorithms and thus do not give identical output.

    When using `skimage.transform.warp` with order >= 4 or using
    `scipy.ndimage.affine_transform` at all, "NaN" values will be replaced with
    zero prior to rotation. No attempt is made to retain the "NaN" values.

    Input arrays with integer data are cast to float 64 and can be re-cast using
    `numpy.ndarray.astype` if desired.

    In the case of `skimage.transform.warp`, the image is normalized to [0, 1]
    before passing it to the function. It is later rescaled back to the original range.

    Although this function is analogous to the IDL's ``rot`` function, it does not
    use the same algorithm as the IDL ``rot`` function.
    IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__
    method to calculate the inverse mapping of original to target pixel
    coordinates. This is a polynomial geometrical transformation.
    Then optionally it uses a bicubic convolution interpolation
    algorithm to map the original to target pixel values.
    """
    rmatrix = rmatrix / scale
    array_center = (np.array(image.shape)[::-1] - 1) / 2.0

    # Make sure the image center is an array and is where it's supposed to be
    if image_center is not None:
        image_center = np.asanyarray(image_center)
    else:
        image_center = array_center

    # Determine center of rotation based on use (or not) of the recenter keyword
    if recenter:
        rot_center = array_center
    else:
        rot_center = image_center

    displacement = np.dot(rmatrix, rot_center)
    shift = image_center - displacement

    method = _get_transform_method(method, use_scipy)
    if method == 'scipy':
        if np.any(np.isnan(image)):
            warn_user("Setting NaNs to 0 for SciPy rotation.")
        # Transform the image using the scipy affine transform
        rotated_image = scipy.ndimage.affine_transform(
            np.nan_to_num(image).T, rmatrix, offset=shift, order=order,
            mode='constant', cval=missing).T
    else:
        import skimage.transform

        # Make the rotation matrix 3x3 to include translation of the image
        skmatrix = np.zeros((3, 3))
        skmatrix[:2, :2] = rmatrix
        skmatrix[2, 2] = 1.0
        skmatrix[:2, 2] = shift
        tform = skimage.transform.AffineTransform(skmatrix)

        if issubclass(image.dtype.type, numbers.Integral):
            warn_user("Integer input data has been cast to float64.")
            adjusted_image = image.astype(np.float64)
        else:
            adjusted_image = image.copy()
        if np.any(np.isnan(adjusted_image)) and order >= 4:
            warn_user("Setting NaNs to 0 for higher-order scikit-image rotation.")
            adjusted_image = np.nan_to_num(adjusted_image)

        # Scale image to range [0, 1] if it is valid (not made up entirely of NaNs)
        is_nan_image = np.all(np.isnan(adjusted_image))
        if is_nan_image:
            adjusted_missing = missing
        else:
            im_min = np.nanmin(adjusted_image)
            adjusted_image -= im_min
            im_max = np.nanmax(adjusted_image)
            if im_max > 0:
                adjusted_image /= im_max
                adjusted_missing = (missing - im_min) / im_max
            else:
                # The input array is all one value (aside from NaNs), so no scaling is needed
                adjusted_missing = missing - im_min

        rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,
                                               mode='constant', cval=adjusted_missing)

        # Convert the image back to its original range if it is valid
        if not is_nan_image:
            if im_max > 0:
                rotated_image *= im_max
            rotated_image += im_min

    return rotated_image
Пример #22
0
def _rotation_skimage(image, matrix, shift, order, missing, clip):
    """
    * Rotates using :func:`skimage.transform.warp`
    * The ``order`` parameter selects from the following interpolation algorithms:

      * 0: nearest-neighbor
      * 1: bi-linear
      * 2: bi-quadratic
      * 3: bi-cubic
      * 4: bi-quartic
      * 5: bi-quintic

    * The implementation for higher orders of interpolation means that the pixels
      in the output image that are beyond the extent of the input image may not have
      exactly the value of the ``missing`` parameter.
    * An input image with byte ordering that does not match the native byte order of
      the system (e.g., big-endian values on a little-endian system) will be
      copied and byte-swapped prior to rotation.
    * An input image with integer data is cast to floats prior to passing to
      :func:`~skimage.transform.warp`.  The output image can be re-cast using
      :meth:`numpy.ndarray.astype` if desired.
    * Does not let :func:`~skimage.transform.warp` handle clipping due to
      inconsistent handling across interpolation orders
    * Does not let :func:`~skimage.transform.warp` handle image NaNs because they
      are not handled properly for some interpolation orders
    * Does not pass NaN as ``missing`` to :func:`~skimage.transform.warp` due to
      inconsistent handling across interpolation orders
    """
    import skimage.transform

    # Make the rotation matrix 3x3 to include translation of the image
    skmatrix = np.zeros((3, 3))
    skmatrix[:2, :2] = matrix
    skmatrix[2, 2] = 1.0
    skmatrix[:2, 2] = shift
    tform = skimage.transform.AffineTransform(skmatrix)

    if issubclass(image.dtype.type, numbers.Integral):
        warn_user("Integer input data has been cast to float64.")
        adjusted_image = image.astype(np.float64)
    else:
        adjusted_image = image.copy()

    # Scale image to range [0, 1]
    im_min = np.nanmin([missing, np.min(adjusted_image)])
    adjusted_image -= im_min
    adjusted_missing = missing - im_min
    im_max = np.nanmax([adjusted_missing, np.max(adjusted_image)])
    if im_max > 0:
        adjusted_image /= im_max
        adjusted_missing /= im_max

    # Swap the byte order if it is non-native (e.g., big-endian on a little-endian system)
    if adjusted_image.dtype.byteorder == ('>' if sys.byteorder == 'little' else
                                          '<'):
        adjusted_image = adjusted_image.byteswap().newbyteorder()

    # Be aware that even though mode is set to 'constant', when skimage 0.19 calls scipy,
    # it specifies the scipy mode to be 'grid-constant' rather than 'constant'
    rotated_image = skimage.transform.warp(adjusted_image,
                                           tform,
                                           order=order,
                                           mode='constant',
                                           cval=adjusted_missing,
                                           clip=clip)

    # Convert the image back to its original range
    if im_max > 0:
        rotated_image *= im_max
    rotated_image += im_min

    return rotated_image
Пример #23
0
            def wrapper(*args, **kwargs):
                self._namespace = self._get_module(func)
                replace = self._skip_file.get(name)
                if replace:
                    uri_parse = urlparse(replace['uri'])
                    if uri_parse.scheme in ("", "file"):
                        # If a relative file uri is specified (i.e.
                        # `file://sunpy/test`) this maintains compatibility
                        # with the original behaviour where this would be
                        # interpreted as `./sunpy/test` if no scheme is
                        # specified netloc will be '' by default.
                        file_path = uri_parse.netloc + uri_parse.path
                        file_hash = hash_file(file_path)
                    else:
                        file_path, file_hash, _ = self._cache._download_and_hash(
                            [replace['uri']], self._namespace)
                    if replace['hash'] and file_hash != replace['hash']:
                        # if hash provided to replace function doesn't match the hash of the file
                        # raise error
                        raise ValueError(
                            "Hash provided to override_file does not match hash of the file."
                        )
                elif self._skip_hash_check:
                    file_path = self._cache.download(urls,
                                                     self._namespace,
                                                     redownload=True)
                else:
                    details = self._cache.get_by_hash(sha_hash)
                    if not details:
                        # In case we are matching by hash and file does not exist
                        # That might mean the wrong hash is supplied to decorator
                        # We match by urls to make sure that is not the case
                        if self._cache_has_file(urls):
                            # If we can't find a file matching sha_hash, but the url is already
                            # in the database
                            raise ValueError(
                                f"{urls} has already been downloaded, but no file "
                                f"matching the hash {sha_hash} can be found.")
                        file_path = self._cache.download(urls, self._namespace)
                        file_hash = hash_file(file_path)
                        if file_hash != sha_hash:
                            # the hash of the file downloaded does not match provided hash
                            # this means the file has changed on the server.
                            # the function should be updated to use the new
                            # hash. Raise an error to notify.
                            raise RuntimeError(
                                f"Hash of local file ({file_hash}) does not match expected hash ({sha_hash}). "
                                "File may have changed on the remote server.")
                    else:
                        # This is to handle the case when the local file
                        # appears to be tampered/corrupted
                        if hash_file(
                                details['file_path']) != details['file_hash']:
                            warn_user(
                                "Hashes do not match, the file will be redownloaded "
                                "(could be be tampered/corrupted)")
                            file_path = self._cache.download(urls,
                                                             self._namespace,
                                                             redownload=True)
                            # Recheck the hash again, if this fails, we will exit.
                            if hash_file(file_path) != details['file_hash']:
                                raise RuntimeError(
                                    "Redownloaded file also has the incorrect hash."
                                    "The remote file on the server might have changed."
                                )
                        else:
                            file_path = details['file_path']

                if name not in self._file_cache:
                    self._file_cache[name] = {}
                self._file_cache[name][self._namespace] = file_path
                result = func(*args, **kwargs)
                self._namespace = None
                return result
Пример #24
0
    def make_3d(self):
        """
        This method calculates the third coordinate of the Helioprojective
        frame. It assumes that the coordinate point is on the surface of the Sun.

        If a point in the frame is off limb then NaN will be returned.

        Returns
        -------
        new_frame : `~sunpy.coordinates.frames.Helioprojective`
            A new frame instance with all the attributes of the original but
            now with a third coordinate.
        """
        # Skip if we already are 3D
        if not self._is_2d:
            return self

        if not isinstance(self.observer, BaseCoordinateFrame):
            raise ConvertError("Cannot calculate distance to the Sun "
                               f"for observer '{self.observer}' "
                               "without `obstime` being specified.")

        rep = self.represent_as(UnitSphericalRepresentation)
        lat, lon = rep.lat, rep.lon

        # Check for the use of floats with lower precision than the native Python float
        if not set([lon.dtype.type, lat.dtype.type]).issubset([float, np.float64, np.longdouble]):
            warn_user("The Helioprojective component values appear to be lower "
                      "precision than the native Python float: "
                      f"Tx is {lon.dtype.name}, and Ty is {lat.dtype.name}. "
                      "To minimize precision loss, you may want to cast the values to "
                      "`float` or `numpy.float64` via the NumPy method `.astype()`.")

        # Calculate the distance to the surface of the Sun using the law of cosines
        cos_alpha = np.cos(lat) * np.cos(lon)
        c = self.observer.radius**2 - self.rsun**2
        b = -2 * self.observer.radius * cos_alpha
        # Ignore sqrt of NaNs
        with np.errstate(invalid='ignore'):
            d = ((-1*b) - np.sqrt(b**2 - 4*c)) / 2  # use the "near" solution

        if self._spherical_screen:
            sphere_center = self._spherical_screen['center'].transform_to(self).cartesian
            c = sphere_center.norm()**2 - self._spherical_screen['radius']**2
            b = -2 * sphere_center.dot(rep)
            # Ignore sqrt of NaNs
            with np.errstate(invalid='ignore'):
                dd = ((-1*b) + np.sqrt(b**2 - 4*c)) / 2  # use the "far" solution

            d = np.fmin(d, dd) if self._spherical_screen['only_off_disk'] else dd

        # This warning can be triggered in specific draw calls when plt.show() is called
        # we can not easily prevent this, so we check the specific function is being called
        # within the stack trace.
        stack_trace = traceback.format_stack()
        matching_string = 'wcsaxes.*_draw_grid'
        bypass = any([re.search(matching_string, string) for string in stack_trace])
        if not bypass and np.all(np.isnan(d)) and np.any(np.isfinite(cos_alpha)):
            warn_user("The conversion of these 2D helioprojective coordinates to 3D is all NaNs "
                      "because off-disk coordinates need an additional assumption to be mapped to "
                      "calculate distance from the observer. Consider using the context manager "
                      "`Helioprojective.assume_spherical_screen()`.")

        return self.realize_frame(SphericalRepresentation(lon=lon,
                                                          lat=lat,
                                                          distance=d))
Пример #25
0
    def search(self, *args, **kwargs):
        """
        The simple interface to query the wsdl service.

        Used to utilize the service's TimeQuery() method, this is a simple
        interface between the sunpy module library and the web-service's API.

        .. note::
           By default the maximum records returned by the service are limited to 500.
           To obtain more results ``a.helio.MaxRecords`` must be set to a higher value.

        Examples
        --------
        >>> from sunpy.net.helio import attrs as ha
        >>> from sunpy.net import attrs as a, Fido
        >>> timerange = a.Time('2005/01/03', '2005/12/03')
        >>> res = Fido.search(timerange, ha.MaxRecords(10),
        ...                   ha.TableName('rhessi_hxr_flare'))  # doctest: +REMOTE_DATA
        >>> res  #doctest: +REMOTE_DATA
        <sunpy.net.fido_factory.UnifiedResponse object at ...>
        Results from 1 Provider:
        <BLANKLINE>
        10 Results from the HECClient:
        hec_id      time_start          time_peak      ... energy_kev flare_number
        ------ ------------------- ------------------- ... ---------- ------------
         31463 2005-01-03T01:37:36 2005-01-03T01:37:54 ...          6      5010320
         31464 2005-01-03T01:51:36 2005-01-03T01:59:18 ...         12      5010301
         31465 2005-01-03T03:26:28 2005-01-03T03:42:50 ...          6      5010332
         31466 2005-01-03T03:46:04 2005-01-03T04:07:10 ...         12      5010302
         31467 2005-01-03T05:00:24 2005-01-03T05:00:30 ...          6      5010313
         31468 2005-01-03T06:40:48 2005-01-03T06:42:46 ...          6      5010314
         31469 2005-01-03T08:27:56 2005-01-03T08:28:26 ...          6      5010334
         31470 2005-01-03T09:31:00 2005-01-03T09:33:34 ...          6      5010322
         31471 2005-01-03T09:34:52 2005-01-03T09:59:46 ...          6      5010336
         31472 2005-01-03T11:06:48 2005-01-03T11:07:18 ...         12      5010304
        <BLANKLINE>
        <BLANKLINE>
        """
        qrdict = {}
        for elem in args:
            if isinstance(elem, a.Time):
                qrdict['Time'] = elem
            elif isinstance(elem, ha.MaxRecords):
                qrdict['max_records'] = elem.value
            elif isinstance(elem, ha.TableName):
                qrdict['table_name'] = elem.value
            else:
                raise ValueError(
                    f"{elem.__class__.__name__} should be a ``attrs.Time``, ``attrs.hek.MaxRecords`` or ``attrs.hek.TableName`` attribute."
                )
        qrdict.update(kwargs)
        table = qrdict.get('table_name', None)
        if table:
            if isinstance(table, bytes):
                warn_deprecated(
                    'type `bytes` for table_name is deprecated, use `str` instead.'
                )
            table = str.encode(table)
        start_time = qrdict['Time'].start
        end_time = qrdict['Time'].end
        max_records = qrdict.get('max_records', 500)
        while table is None:
            table = self.select_table()
        start_time = parse_time(start_time)
        end_time = parse_time(end_time)
        results = self.hec_client.service.TimeQuery(STARTTIME=start_time.isot,
                                                    ENDTIME=end_time.isot,
                                                    FROM=table,
                                                    MAXRECORDS=max_records)
        results = votable_handler(etree.tostring(results))
        table = HECResponse(results.to_table(), client=self)
        if len(table) == max_records == 500:
            warn_user(
                "Number of results is the same as the default `max_records` of 500. "
                "It is possible your query has been truncated. "
                "If you want to change this, set `a.helio.MaxRecords` to a higher value."
            )
        return table
Пример #26
0
    def get_request(self,
                    requests,
                    path=None,
                    overwrite=False,
                    progress=True,
                    downloader=None,
                    wait=True,
                    max_conn=default_max_conn,
                    **kwargs):
        """
        Query JSOC to see if the request(s) is ready for download.

        If the request is ready for download, it will then download it.

        Parameters
        ----------
        requests : `~drms.client.ExportRequest`, `str`, `list`
            `~drms.client.ExportRequest` objects or `str` request IDs or lists
            returned by `~sunpy.net.jsoc.jsoc.JSOCClient.request_data`.
        path : `str`
            Path to save data to, defaults to SunPy download dir.
        progress : `bool`, optional
            If `True` show a progress bar showing how many of the total files
            have been downloaded. If `False`, no progress bar will be shown.
        overwrite : `bool` or `str`, optional
            Determine how to handle downloading if a file already exists with the
            same name. If `False` the file download will be skipped and the path
            returned to the existing file, if `True` the file will be downloaded
            and the existing file will be overwritten, if ``'unique'`` the filename
            will be modified to be unique.
        downloader : `parfive.Downloader`, optional
            The download manager to use.
        wait : `bool`, optional
           If `False` ``downloader.download()`` will not be called. Only has
           any effect if ``downloader`` is not `None`.

        Returns
        -------
        res: `parfive.Results`
            A `parfive.Results` instance or `None` if no URLs to download
        """
        c = drms.Client()

        kwargs['max_splits'] = kwargs.get('max_splits', 2)

        # Convert Responses to a list if not already
        if isinstance(requests, str) or not isiterable(requests):
            requests = [requests]

        # Ensure all the requests are drms ExportRequest objects
        for i, request in enumerate(requests):
            if isinstance(request, str):
                r = c.export_from_id(request)
                requests[i] = r

        # We only download if all are finished
        if not all([r.has_succeeded() for r in requests]):
            raise NotExportedError("Can not download as not all the requests "
                                   "have been exported for download yet.")

        # Ensure path has a {file} in it
        if path is None:
            default_dir = config.get("downloads", "download_dir")
            path = os.path.join(default_dir, '{file}')
        elif isinstance(path, Path):
            path = str(path)

        if isinstance(path, str) and '{file}' not in path:
            path = os.path.join(path, '{file}')

        paths = []
        for request in requests:
            if request.method == 'url-tar':
                fname = path.format(file=Path(request.tarfile).name)
                paths.append(os.path.expanduser(fname))
            else:
                for filename in request.data['filename']:
                    # Ensure we don't duplicate the file extension
                    ext = os.path.splitext(filename)[1]
                    if path.endswith(ext):
                        fname = path.strip(ext)
                    else:
                        fname = path
                    fname = fname.format(file=filename)
                    fname = os.path.expanduser(fname)
                    paths.append(fname)

        dl_set = True
        if not downloader:
            dl_set = False
            downloader = Downloader(progress=progress,
                                    overwrite=overwrite,
                                    max_conn=max_conn)

        if downloader.max_conn * kwargs['max_splits'] > 10:
            warn_user(
                "JSOC does not support more than 10 parallel connections. " +
                f"Changing the number of parallel connections to {2 * self.default_max_conn}."
            )
            kwargs['max_splits'] = 2
            downloader.max_conn = self.default_max_conn

        urls = []
        for request in requests:
            if request.status == 0:
                if request.protocol == 'as-is' or request.method == 'url-tar':
                    urls.extend(list(request.urls.url))
                else:
                    for index, data in request.data.iterrows():
                        url_dir = request.request_url + '/'
                        urls.append(
                            urllib.parse.urljoin(url_dir, data['filename']))

        if urls:
            if progress:
                print_message = "{0} URLs found for download. Full request totalling {1}MB"
                print(print_message.format(len(urls), request._d['size']))
            for aurl, fname in zip(urls, paths):
                downloader.enqueue_file(aurl, filename=fname, **kwargs)

        if dl_set and not wait:
            return Results()

        results = downloader.download()
        return results
Пример #27
0
def read_cdf(fname):
    """
    Read a CDF file that follows the ISTP/IACG guidelines.

    Parameters
    ----------
    fname : path-like
        Location of single CDF file to read.

    Returns
    -------
    list[GenericTimeSeries]
        A list of time series objects, one for each unique time index within
        the CDF file.

    References
    ----------
    Space Physics Guidelines for CDF https://spdf.gsfc.nasa.gov/sp_use_of_cdf.html
    """
    cdf = cdflib.CDF(str(fname))

    # Extract the time varying variables
    cdf_info = cdf.cdf_info()
    meta = cdf.globalattsget()
    all_var_keys = cdf_info['rVariables'] + cdf_info['zVariables']
    var_attrs = {key: cdf.varattsget(key) for key in all_var_keys}
    # Get keys that depend on time
    var_keys = [var for var in var_attrs if 'DEPEND_0' in var_attrs[var]]

    # Get unique time index keys
    time_index_keys = sorted(set([var_attrs[var]['DEPEND_0'] for var in var_keys]))

    all_ts = []
    # For each time index, construct a GenericTimeSeries
    for index_key in time_index_keys:
        try:
            index = cdf.varget(index_key)
        except ValueError:
            # Empty index for cdflib >= 0.3.20
            continue
        if index is None:
            # Empty index for cdflib <0.3.20
            continue
        # TODO: use to_astropy_time() instead here when we drop pandas in timeseries
        index = CDFepoch.to_datetime(index)
        df = pd.DataFrame(index=pd.DatetimeIndex(name=index_key, data=index))
        units = {}

        for var_key in sorted(var_keys):
            attrs = var_attrs[var_key]
            if attrs['DEPEND_0'] != index_key:
                continue

            # Get data
            if cdf.varinq(var_key)['Last_Rec'] == -1:
                log.debug(f'Skipping {var_key} in {fname} as it has zero elements')
                continue

            data = cdf.varget(var_key)
            # Get units
            if 'UNITS' in attrs:
                unit_str = attrs['UNITS']
                try:
                    unit = u.Unit(unit_str)
                except ValueError:
                    if unit_str in _known_units:
                        unit = _known_units[unit_str]
                    else:
                        warn_user(f'astropy did not recognize units of "{unit_str}". '
                                  'Assigning dimensionless units. '
                                  'If you think this unit should not be dimensionless, '
                                  'please raise an issue at https://github.com/sunpy/sunpy/issues')
                        unit = u.dimensionless_unscaled
            else:
                warn_user(f'No units provided for variable "{var_key}". '
                          'Assigning dimensionless units.')
                unit = u.dimensionless_unscaled

            if data.ndim == 2:
                # Multiple columns, give each column a unique label
                for i, col in enumerate(data.T):
                    df[var_key + f'_{i}'] = col
                    units[var_key + f'_{i}'] = unit
            else:
                # Single column
                df[var_key] = data
                units[var_key] = unit

        all_ts.append(GenericTimeSeries(data=df, units=units, meta=meta))

    if not len(all_ts):
        log.debug(f'No data found in file {fname}')
    return all_ts
Пример #28
0
    def fetch(self,
              jsoc_response,
              path=None,
              progress=True,
              overwrite=False,
              downloader=None,
              wait=True,
              sleep=10,
              max_conn=default_max_conn,
              **kwargs):
        """
        Make the request for the data in a JSOC response and wait for it to be
        staged and then download the data.

        .. note::

            **Only complete searches can be downloaded from JSOC**, this means
            that no slicing operations performed on the results object will
            affect the number of files downloaded.

        Parameters
        ----------
        jsoc_response : `~sunpy.net.jsoc.jsoc.JSOCResponse` object
            A response object
        path : `str`
            Path to save data to, defaults to SunPy download dir
        progress : `bool`, optional
            If `True` show a progress bar showing how many of the total files
            have been downloaded. If `False`, no progress bar will be shown.
        overwrite : `bool` or `str`, optional
            Determine how to handle downloading if a file already exists with the
            same name. If `False` the file download will be skipped and the path
            returned to the existing file, if `True` the file will be downloaded
            and the existing file will be overwritten, if ``'unique'`` the filename
            will be modified to be unique.
        max_conn : `int`
            Maximum number of download connections.
        downloader : `parfive.Downloader`, optional
            The download manager to use.
        wait : `bool`, optional
            If `False` ``downloader.download()`` will not be called. Only has
            any effect if ``downloader`` is not `None`.
        sleep : `int`
            The number of seconds to wait between calls to JSOC to check the status
            of the request.

        Returns
        -------
        results : a `parfive.Results` instance
            A `parfive.Results` object.

        """
        for resp in jsoc_response.query_args:
            if 'notify' not in resp:
                raise ValueError(
                    'A registered email is required to get data from JSOC. '
                    'Please supply an email with attrs.jsoc.Notify to Fido.search. '
                    'Then pass those new results back into Fido.fetch')

        if len(jsoc_response) != jsoc_response._original_num_rows:
            warn_user("Downloading of sliced JSOC results is not supported. "
                      "All the files present in the original response will "
                      "be downloaded when passed to fetch().")

        # Make staging request to JSOC
        responses = self.request_data(jsoc_response)

        defaults = {'max_splits': 1}
        defaults.update(kwargs)

        # Make response iterable
        if not isiterable(responses):
            responses = [responses]

        # Add them to the response for good measure
        jsoc_response.requests = [r for r in responses]
        time.sleep(sleep / 2.)

        for response in responses:
            response.wait(verbose=progress)

        return self.get_request(responses,
                                path=path,
                                overwrite=overwrite,
                                progress=progress,
                                downloader=downloader,
                                wait=wait,
                                max_conn=max_conn,
                                **defaults)
Пример #29
0
    def __call__(self,
                 *args,
                 composite=False,
                 sequence=False,
                 silence_errors=False,
                 **kwargs):
        """ Method for running the factory. Takes arbitrary arguments and
        keyword arguments and passes them to a sequence of pre-registered types
        to determine which is the correct Map-type to build.

        Arguments args and kwargs are passed through to the validation
        function and to the constructor for the final type. For Map types,
        validation function must take a data-header pair as an argument.

        Parameters
        ----------
        composite : `bool`, optional
            Indicates if collection of maps should be returned as a `~sunpy.map.CompositeMap`.
            Default is `False`.
        sequence : `bool`, optional
            Indicates if collection of maps should be returned as a `sunpy.map.MapSequence`.
            Default is `False`.
        silence_errors : `bool`, optional
            If set, ignore data-header pairs which cause an exception.
            Default is ``False``.

        Notes
        -----
        Extra keyword arguments are passed through to `sunpy.io.read_file` such
        as `memmap` for FITS files.
        """
        data_header_pairs = self._parse_args(*args,
                                             silence_errors=silence_errors,
                                             **kwargs)
        new_maps = list()

        # Loop over each registered type and check to see if WidgetType
        # matches the arguments.  If it does, use that type.
        for pair in data_header_pairs:
            if isinstance(pair, GenericMap):
                new_maps.append(pair)
                continue
            data, header = pair
            meta = MetaDict(header)

            try:
                new_map = self._check_registered_widgets(data, meta, **kwargs)
                new_maps.append(new_map)
            except (NoMatchError, MultipleMatchError, ValidationFunctionError,
                    MapMetaValidationError) as e:
                if not silence_errors:
                    raise
                warn_user(
                    f"One of the data, header pairs failed to validate with: {e}"
                )

        if not len(new_maps):
            raise RuntimeError('No maps loaded')

        # If the list is meant to be a sequence, instantiate a map sequence
        if sequence:
            return MapSequence(new_maps, **kwargs)

        # If the list is meant to be a composite map, instantiate one
        if composite:
            return CompositeMap(new_maps, **kwargs)

        if len(new_maps) == 1:
            return new_maps[0]

        return new_maps
Пример #30
0
    def search(self, *query, response_format=None):
        """
        Query data from the VSO with the new API. Takes a variable number
        of attributes as parameter, which are chained together using AND.

        Parameters
        ----------
        response_format: {``"legacy"``, ``"table"``}, optional
            The response format from the search, this can be either
            ``"legacy"`` to return a list-like object of the zeep responses, or
            ``"table"`` to return the responses in a subclass of
            `~astropy.table.QTable`.

        Examples
        --------
        Query all data from eit or aia between 2010-01-01T00:00 and
        2010-01-01T01:00.

        >>> from datetime import datetime
        >>> from sunpy.net import vso, attrs as a
        >>> client = vso.VSOClient()  # doctest: +REMOTE_DATA
        >>> client.search(
        ...    a.Time(datetime(2010, 1, 1), datetime(2010, 1, 1, 1)),
        ...    a.Instrument.eit | a.Instrument.aia,
        ...    response_format="table")   # doctest:  +REMOTE_DATA
        <sunpy.net.vso.table_response.VSOQueryResponseTable object at ...>
            Start Time               End Time        Source ... Extent Type   Size
                                                            ...              Mibyte
        ----------------------- ----------------------- ------ ... ----------- -------
        2010-01-01 00:00:08.000 2010-01-01 00:00:20.000   SOHO ...    FULLDISK 2.01074
        2010-01-01 00:12:08.000 2010-01-01 00:12:20.000   SOHO ...    FULLDISK 2.01074
        2010-01-01 00:24:10.000 2010-01-01 00:24:22.000   SOHO ...    FULLDISK 2.01074
        2010-01-01 00:36:08.000 2010-01-01 00:36:20.000   SOHO ...    FULLDISK 2.01074
        2010-01-01 00:48:09.000 2010-01-01 00:48:21.000   SOHO ...    FULLDISK 2.01074


        Returns
        -------
        out : `~sunpy.net.vso.table_response.VSOQueryResponseTable`
            Matched items. Return value is of same type as the one of
            :meth:`VSOClient.search`.
        """
        if response_format is None:
            response_format = "table"
        query = and_(*query)
        QueryRequest = self.api.get_type('VSO:QueryRequest')
        VSOQueryResponse = self.api.get_type('VSO:QueryResponse')
        responses = []
        exceptions = []
        for block in walker.create(query, self.api):
            try:
                query_response = self.api.service.Query(
                    QueryRequest(block=block)
                )
                for resp in query_response:
                    if resp["error"]:
                        warn_user(resp["error"])
                responses.append(
                    VSOQueryResponse(query_response)
                )
            except Exception as ex:
                exceptions.append(ex)

        responses = self.merge(responses)
        if response_format == "legacy":
            response = QueryResponse.create(responses)
        else:
            response = VSOQueryResponseTable.from_zeep_response(responses, client=self)

        for ex in exceptions:
            response.add_error(ex)

        return response