Exemple #1
0
 def __init__(self, *args, **kwargs):
     """
     Create a CompositeMap
     
     Parameters
     ----------
     Maps: SunPy Maps
         A sequence of maps
     """
     
     self._maps = expand_list(args)
     
     for m in self._maps:
         if not isinstance(m, GenericMap):
             raise ValueError(
                        'CompositeMap expects pre-constructed map objects.')
     
     # Default alpha and zorder values
     alphas = [1] * len(self._maps)
     zorders = range(0, 10 * len(self._maps), 10)
     levels = [False] * len(self._maps)
     
     # Set z-order and alpha values for the map     
     for i, m in enumerate(self._maps):
         m.zorder = zorders[i]
         m.alpha = alphas[i]
         m.levels = levels[i]
Exemple #2
0
    def _parse_args(self, *args, silence_errors=False, **kwargs):
        """
        Parses an args list into data-header pairs.

        args can contain any mixture of the following entries:
        * tuples of data,header
        * data, header not in a tuple
        * data, wcs object in a tuple
        * data, wcs object not in a tuple
        * filename, as a str or pathlib.Path, which will be read
        * directory, as a str or pathlib.Path, from which all files will be read
        * glob, from which all files will be read
        * url, which will be downloaded and read
        * lists containing any of the above.

        Examples
        --------
        self._parse_args(data, header,
                         (data, header),
                         ['file1', 'file2', 'file3'],
                         'file4',
                         'directory1',
                         '*.fits')
        """
        # Account for nested lists of items
        args = expand_list(args)

        # Sanitise the input so that each 'type' of input corresponds to a different
        # class, so single dispatch can be used later
        nargs = len(args)
        i = 0
        while i < nargs:
            arg = args[i]
            if isinstance(arg, SUPPORTED_ARRAY_TYPES):
                # The next two items are data and a header
                data = args.pop(i)
                header = args.pop(i)
                args.insert(i, (data, header))
                nargs -= 1
            elif isinstance(arg, str) and is_url(arg):
                # Repalce URL string with a Request object to dispatch on later
                args[i] = Request(arg)
            elif possibly_a_path(arg):
                # Repalce path strings with Path objects
                args[i] = pathlib.Path(arg)
            i += 1

        # Parse the arguments
        # Note that this list can also contain GenericMaps if they are directly given to the factory
        data_header_pairs = []
        for arg in args:
            try:
                data_header_pairs += self._parse_arg(arg, **kwargs)
            except NoMapsInFileError as e:
                if not silence_errors:
                    raise
                warn_user(
                    f"One of the arguments failed to parse with error: {e}")

        return data_header_pairs
Exemple #3
0
    def __init__(self, *args, **kwargs):
        """Creates a new Map instance"""

        # Renaming mapcube functionality to mapsequence
        warnings.warn("Deprecated in favor of MapSequence. MapSequence has the same functionality as MapCube.",
                      SunpyDeprecationWarning, stacklevel=2)

        # Hack to get around Python 2.x not backporting PEP 3102.
        sortby = kwargs.pop('sortby', 'date')
        derotate = kwargs.pop('derotate', False)

        self.maps = expand_list(args)

        for m in self.maps:
            if not isinstance(m, GenericMap):
                raise ValueError(
                           'MapCube expects pre-constructed map objects.')

        # Optionally sort data
        if sortby is not None:
            if sortby is 'date':
                self.maps.sort(key=self._sort_by_date())
            else:
                raise ValueError("Only sort by date is supported")

        if derotate:
            self._derotate()
Exemple #4
0
    def __init__(self, *args, **kwargs):
        """Creates a new Map instance"""

        # Hack to get around Python 2.x not backporting PEP 3102.
        orderby = kwargs.pop('orderby', 'date')
        derotate = kwargs.pop('derotate', False)
        self.ref_index = kwargs.pop('reference_index', 0)

        maps = expand_list(args)

        for m in maps:
            if not isinstance(m, GenericMap):
                raise ValueError(
                    'MapCubed expects pre-constructed map objects.')

        maps.sort(key=self._sort_by_date())

        # test if all maps have the same shape
        if not np.all(
            [m.data.shape == maps[self.ref_index].data.shape for m in maps]):
            raise ValueError("All Map data must have the same dimensions")

        # test if all maps have the same scale
        if not np.all([m.scale == maps[self.ref_index].scale for m in maps]):
            raise ValueError("All Map data must have the same scale")

        self.data = np.zeros((maps[self.ref_index].data.shape[0],
                              maps[self.ref_index].data.shape[1], len(maps)),
                             dtype=maps[self.ref_index].data.dtype)
        for i, m in enumerate(maps):
            self.data[:, :, i] = m.data

        self._meta = []
        for i, m in enumerate(maps):
            self._meta.append(m.meta)
    def __init__(self, map3D, *args, **kwargs):
        # Use all the user parameters
        self.maps_list = map3D + expand_list(args)
        self.benchmark = kwargs.get('benchmark', 0) # Defaults to the first vector field in the list
        self.normalise = kwargs.get('normalise', False)

        # The table to store the test results
        self.results = Table(names=('extrapolator routine', 'extrapolation duration', 'fig of merit 1'), meta={'name': '3D field comparison table'}, dtype=('S24', 'f8', 'f8'))
        t['time (ave)'].unit = u.s

        # An empty table for the results:
        #N = len(self.maps_list)
        #t1, t2, t3, t4, t5, t6, t7 = [None] * N, [None] * N, [None] * N, [None] * N, [None] * N, [None] * N, [None] * N
        #self.results = Table([t1, t2, t3, t4, t5, t6, t7], names=('l-infinity norm', 'test 2', 'test 3', 'test 4', 'test 5', 'test 6', 'test 7'), meta={'name': 'Results Table'})
        #self.results_normalised = Table([t1, t2, t3, t4, t5, t6, t7], names=('l-infinity norm', 'test 2', 'test 3', 'test 4', 'test 5', 'test 6', 'test 7'), meta={'name': 'Results Table'})

        # Ensure that the input maps are all the same type and shape.
        for m in self.maps_list:#self.maps:
            # Check that this is a Map3D object.
            if not isinstance(m, Map3D):
                raise ValueError(
                         'Map3DComparer expects pre-constructed map3D objects.')

            # Compare the shape of this Map3D to the first in the Map3D list.
            if not m.data.shape == self.maps_list[0]:
                raise ValueError(
                         'Map3DComparer expects map3D objects with identical dimensions.')
Exemple #6
0
    def __init__(self, *args, **kwargs):
        """Creates a new Map instance"""

        # Renaming mapcube functionality to mapsequence
        warnings.warn(
            "Deprecated in favor of MapSequence. MapSequence has the same functionality as MapCube.",
            SunpyDeprecationWarning,
            stacklevel=2)

        # Hack to get around Python 2.x not backporting PEP 3102.
        sortby = kwargs.pop('sortby', 'date')
        derotate = kwargs.pop('derotate', False)

        self.maps = expand_list(args)

        for m in self.maps:
            if not isinstance(m, GenericMap):
                raise ValueError(
                    'MapCube expects pre-constructed map objects.')

        # Optionally sort data
        if sortby is not None:
            if sortby is 'date':
                self.maps.sort(key=self._sort_by_date())
            else:
                raise ValueError("Only sort by date is supported")

        if derotate:
            self._derotate()
Exemple #7
0
    def __init__(self, map3D, *args, **kwargs):
        # Use all the user parameters
        self.maps_list = map3D + expand_list(args)
        self.benchmark = kwargs.get(
            'benchmark', 0)  # Defaults to the first vector field in the list
        self.normalise = kwargs.get('normalise', False)

        # The table to store the test results
        self.results = Table(names=('extrapolator routine',
                                    'extrapolation duration',
                                    'fig of merit 1'),
                             meta={'name': '3D field comparison table'},
                             dtype=('S24', 'f8', 'f8'))
        t['time (ave)'].unit = u.s

        # An empty table for the results:
        #N = len(self.maps_list)
        #t1, t2, t3, t4, t5, t6, t7 = [None] * N, [None] * N, [None] * N, [None] * N, [None] * N, [None] * N, [None] * N
        #self.results = Table([t1, t2, t3, t4, t5, t6, t7], names=('l-infinity norm', 'test 2', 'test 3', 'test 4', 'test 5', 'test 6', 'test 7'), meta={'name': 'Results Table'})
        #self.results_normalised = Table([t1, t2, t3, t4, t5, t6, t7], names=('l-infinity norm', 'test 2', 'test 3', 'test 4', 'test 5', 'test 6', 'test 7'), meta={'name': 'Results Table'})

        # Ensure that the input maps are all the same type and shape.
        for m in self.maps_list:  #self.maps:
            # Check that this is a Map3D object.
            if not isinstance(m, Map3D):
                raise ValueError(
                    'Map3DComparer expects pre-constructed map3D objects.')

            # Compare the shape of this Map3D to the first in the Map3D list.
            if not m.data.shape == self.maps_list[0]:
                raise ValueError(
                    'Map3DComparer expects map3D objects with identical dimensions.'
                )
Exemple #8
0
    def __init__(self, *args, **kwargs):
        """Creates a new Map instance"""

        # Hack to get around Python 2.x not backporting PEP 3102.
        sortby = kwargs.pop('sortby', 'date')
        coalign = kwargs.pop('coalign', False)
        derotate = kwargs.pop('derotate', False)

        self._maps = expand_list(args)

        for m in self._maps:
            if not isinstance(m, GenericMap):
                raise ValueError(
                    'CompositeMap expects pre-constructed map objects.')

        # Optionally sort data
        if sortby is not None:
            if sortby is 'date':
                self._maps.sort(key=self._sort_by_date())
            else:
                raise ValueError("Only sort by date is supported")

        # Coalignment
        if coalign:
            if coalign == 'diff':
                self.coalign("diff")
            else:
                raise ValueError("That coalignment method is not supported")

        if derotate:
            self._derotate()
Exemple #9
0
    def __init__(self, *args, **kwargs):
        """Creates a new Map instance"""
        
        # Hack to get around Python 2.x not backporting PEP 3102.
        sortby = kwargs.pop('sortby', 'date')
        coalign = kwargs.pop('coalign', False)
        derotate = kwargs.pop('derotate', False)
        
        self._maps = expand_list(args)
        
        for m in self._maps:
            if not isinstance(m, GenericMap):
                raise ValueError(
                           'CompositeMap expects pre-constructed map objects.')

        # Optionally sort data
        if sortby is not None:
            if sortby is 'date':
                self._maps.sort(key=self._sort_by_date())
            else:
                raise ValueError("Only sort by date is supported")
        
        # Coalignment
        if coalign:
            if coalign == 'diff':
                self.coalign("diff")
            else:
                raise ValueError("That coalignment method is not supported")

        if derotate:
            self._derotate()
Exemple #10
0
 def __init__(self, *args, **kwargs):
     """
     Create a CompositeMap
     
     Parameters
     ----------
     Maps: SunPy Maps
         A sequence of maps
     """
     
     self._maps = expand_list(args)
     
     for m in self._maps:
         if not isinstance(m, GenericMap):
             raise ValueError(
                        'CompositeMap expects pre-constructed map objects.')
     
     # Default alpha and zorder values
     alphas = [1] * len(self._maps)
     zorders = range(0, 10 * len(self._maps), 10)
     levels = [False] * len(self._maps)
     
     # Set z-order and alpha values for the map     
     for i, m in enumerate(self._maps):
         m.zorder = zorders[i]
         m.alpha = alphas[i]
         m.levels = levels[i]
Exemple #11
0
    def _sanitise_args(self, args):
        """
        Sanitise a list of args so that a single argument corresponds to either:

        - (data, header, units) tuple.
        - path-like `pathlib.Path` (e.g. a filename, directory, glob etc.).
        - `urllib.request.Request`.
        - `GenericTimeSeries`.
        """
        # Account for nested lists of items. Simply outputs a single list of
        # items, nested lists are expanded to element level.
        args = expand_list(args)

        # Sanitise the input so that each 'type' of input corresponds to a different
        # class, so single dispatch can be used later
        i = 0
        while i < len(args):
            arg = args[i]
            if isinstance(arg, (np.ndarray, Table, pd.DataFrame)):
                # Extract data and metadata
                # The next item is data
                data = args[i]
                meta = MetaDict()
                units = OrderedDict()
                if isinstance(data, Table):
                    # We have an Astropy Table:
                    data, new_meta, new_units = self._from_table(data)
                    units.update(new_units)
                    meta.update(new_meta)
                elif isinstance(data, np.ndarray):
                    # We have a numpy ndarray. We assume the first column is a dt index
                    data = pd.DataFrame(data=data[:, 1:],
                                        index=Time(data[:, 0]))

                # The next two could be metadata or units
                for _ in range(2):
                    j = i + 1
                    if j < len(args):
                        arg = args[j]
                        if self._is_units(arg):
                            units.update(arg)
                            args.pop(j)
                        elif self._is_metadata(arg):
                            meta.update(self._parse_meta(arg))
                            args.pop(j)

                args[i] = (data, meta, units)

            elif isinstance(arg, str) and is_url(arg):
                args[i] = Request(arg)
            elif possibly_a_path(arg):
                args[i] = pathlib.Path(arg)
            i += 1

        return args
    def __init__(self, *args, **kwargs):

        # Hack to get around Python 2.x not backporting PEP 3102.
        #sortby = kwargs.pop('sortby', 'date')
        #derotate = kwargs.pop('derotate', False)

        self.maps = expand_list(args)

        for m in self.maps:
            if not isinstance(m, Map3D):
                raise ValueError(
                           'CompositeMap expects pre-constructed map objects.')
Exemple #13
0
    def __init__(self, *args, **kwargs):

        # Hack to get around Python 2.x not backporting PEP 3102.
        #sortby = kwargs.pop('sortby', 'date')
        #derotate = kwargs.pop('derotate', False)

        self.maps = expand_list(args)

        for m in self.maps:
            if not isinstance(m, Map3D):
                raise ValueError(
                    'CompositeMap expects pre-constructed map objects.')
Exemple #14
0
    def __init__(self, *args, sortby='date', derotate=False, **kwargs):
        """Creates a new Map instance"""

        self.maps = expand_list(args)

        for m in self.maps:
            if not isinstance(m, GenericMap):
                raise ValueError('MapSequence expects pre-constructed map objects.')

        # Optionally sort data
        if sortby is not None:
            if sortby == 'date':
                self.maps.sort(key=self._sort_by_date())
            else:
                raise ValueError("Only sort by date is supported")

        if derotate:
            self._derotate()
Exemple #15
0
    def __init__(self, *args, sortby='date', derotate=False, **kwargs):
        """Creates a new Map instance"""

        self.maps = expand_list(args)

        for m in self.maps:
            if not isinstance(m, GenericMap):
                raise ValueError('MapSequence expects pre-constructed map objects.')

        # Optionally sort data
        if sortby is not None:
            if sortby == 'date':
                self.maps.sort(key=self._sort_by_date())
            else:
                raise ValueError("Only sort by date is supported")

        if derotate:
            self._derotate()
Exemple #16
0
    def __init__(self, *args, **kwargs):
        """Creates a new Map instance"""

        # Hack to get around Python 2.x not backporting PEP 3102.
        sortby = kwargs.pop("sortby", "date")
        derotate = kwargs.pop("derotate", False)

        self.maps = expand_list(args)

        for m in self.maps:
            if not isinstance(m, GenericMap):
                raise ValueError("MapCube expects pre-constructed map objects.")

        # Optionally sort data
        if sortby is not None:
            if sortby is "date":
                self.maps.sort(key=self._sort_by_date())
            else:
                raise ValueError("Only sort by date is supported")

        if derotate:
            self._derotate()
Exemple #17
0
    def __init__(self, *args, **kwargs):
        """Creates a new Map instance"""

        # Hack to get around Python 2.x not backporting PEP 3102.
        sortby = kwargs.pop('sortby', 'date')
        derotate = kwargs.pop('derotate', False)

        self.maps = expand_list(args)

        for m in self.maps:
            if not isinstance(m, GenericMap):
                raise ValueError(
                    'MapSequence expects pre-constructed map objects.')

        # Optionally sort data
        if sortby is not None:
            if sortby is 'date':
                self.maps.sort(key=self._sort_by_date())
            else:
                raise ValueError("Only sort by date is supported")

        if derotate:
            self._derotate()
Exemple #18
0
    def _parse_args(self, *args, **kwargs):
        """
        Parses an args list for data-header pairs.  args can contain any
        mixture of the following entries:
        * tuples of data,header
        * data, header not in a tuple
        * data, wcs object in a tuple
        * data, wcs object not in a tuple
        * filename, which will be read
        * directory, from which all files will be read
        * glob, from which all files will be read
        * url, which will be downloaded and read
        * lists containing any of the above.

        Example
        -------
        self._parse_args(data, header,
                         (data, header),
                         ['file1', 'file2', 'file3'],
                         'file4',
                         'directory1',
                         '*.fits')

        """

        data_header_pairs = list()
        already_maps = list()

        # Account for nested lists of items
        args = expand_list(args)

        # For each of the arguments, handle each of the cases
        i = 0
        while i < len(args):

            arg = args[i]

            # Data-header or data-WCS pair
            if isinstance(arg, SUPPORTED_ARRAY_TYPES):
                arg_header = args[i+1]
                if isinstance(arg_header, WCS):
                    arg_header = args[i+1].to_header()

                if self._validate_meta(arg_header):
                    pair = (args[i], OrderedDict(arg_header))
                    data_header_pairs.append(pair)
                    i += 1    # an extra increment to account for the data-header pairing

            # File name
            elif (isinstance(arg, six.string_types) and
                  os.path.isfile(os.path.expanduser(arg))):
                path = os.path.expanduser(arg)
                pairs = self._read_file(path, **kwargs)
                data_header_pairs += pairs

            # Directory
            elif (isinstance(arg, six.string_types) and
                  os.path.isdir(os.path.expanduser(arg))):
                path = os.path.expanduser(arg)
                files = [os.path.join(path, elem) for elem in os.listdir(path)]
                for afile in files:
                    data_header_pairs += self._read_file(afile, **kwargs)

            # Glob
            elif (isinstance(arg, six.string_types) and '*' in arg):
                files = glob.glob(os.path.expanduser(arg))
                for afile in files:
                    data_header_pairs += self._read_file(afile, **kwargs)

            # Already a Map
            elif isinstance(arg, GenericMap):
                already_maps.append(arg)

            # A URL
            elif (isinstance(arg, six.string_types) and
                  _is_url(arg)):
                url = arg
                path = download_file(url, get_and_create_download_dir())
                pairs = self._read_file(path, **kwargs)
                data_header_pairs += pairs

            # A database Entry
            elif isinstance(arg, DatabaseEntry):
                data_header_pairs += self._read_file(arg.path, **kwargs)

            else:
                raise ValueError("File not found or invalid input")

            i += 1

        # TODO:
        # In the end, if there are already maps it should be put in the same
        # order as the input, currently they are not.
        return data_header_pairs, already_maps
Exemple #19
0
    def _parse_args(self, *args, **kwargs):
        """
        Parses an args list for data-header pairs.  args can contain any mixture
        of the following entries:
        * tuples of data,header
        * data, header not in a tuple
        * filename, which will be read
        * directory, from which all files will be read
        * glob, from which all files will be read
        * url, which will be downloaded and read
        * lists containing any of the above.

        Example
        -------
        self._parse_args(data, header,
                         (data, header),
                         ['file1', 'file2', 'file3'],
                         'file4',
                         'directory1',
                         '*.fits')

        """

        data_header_pairs = list()
        already_maps = list()

        # Account for nested lists of items
        args = expand_list(args)

        # For each of the arguments, handle each of the cases
        i = 0
        while i < len(args):

            arg = args[i]

            # Data-header pair in a tuple
            if ((type(arg) in [tuple, list]) and
                 len(arg) == 2 and
                 isinstance(arg[0],np.ndarray) and
                 isinstance(arg[1],dict)):
                data_header_pairs.append(arg)

            # Data-header pair not in a tuple
            elif (isinstance(arg, np.ndarray) and
                  isinstance(args[i+1],dict)):
                pair = (args[i], args[i+1])
                data_header_pairs.append(pair)
                i += 1 # an extra increment to account for the data-header pairing

            # File name
            elif (isinstance(arg,basestring) and
                  os.path.isfile(os.path.expanduser(arg))):
                path = os.path.expanduser(arg)
                pairs = self._read_file(path, **kwargs)
                data_header_pairs += pairs

            # Directory
            elif (isinstance(arg,basestring) and
                  os.path.isdir(os.path.expanduser(arg))):
                path = os.path.expanduser(arg)
                files = [os.path.join(path, elem) for elem in os.listdir(path)]
                for afile in files:
                    data_header_pairs += self._read_file(afile, **kwargs)

            # Glob
            elif (isinstance(arg,basestring) and '*' in arg):
                files = glob.glob( os.path.expanduser(arg) )
                for afile in files:
                    data_header_pairs += self._read_file(afile, **kwargs)

            # Already a Map
            elif isinstance(arg, GenericMap):
                already_maps.append(arg)

            # A URL
            elif (isinstance(arg,basestring) and
                  _is_url(arg)):
                default_dir = sunpy.config.get("downloads", "download_dir")
                url = arg
                path = download_file(url, default_dir)
                pairs = self._read_file(path, **kwargs)
                data_header_pairs += pairs

            # A database Entry
            elif isinstance(arg, DatabaseEntry):
                data_header_pairs += self._read_file(arg.path, **kwargs)

            else:
                raise ValueError("File not found or invalid input")

            i += 1
        #TODO:
        # In the end, if there are aleady maps it should be put in the same
        # order as the input, currently they are not.
        return data_header_pairs, already_maps
Exemple #20
0
    def _parse_args(self, *args, **kwargs):
        """
        Parses an args list for data-header pairs.  args can contain any
        mixture of the following entries:
        * tuples of data,header
        * data, header not in a tuple
        * data, wcs object in a tuple
        * data, wcs object not in a tuple
        * filename, as a str or pathlib.Path, which will be read
        * directory, as a str or pathlib.Path, from which all files will be read
        * glob, from which all files will be read
        * url, which will be downloaded and read
        * lists containing any of the above.

        Example
        -------
        self._parse_args(data, header,
                         (data, header),
                         ['file1', 'file2', 'file3'],
                         'file4',
                         'directory1',
                         '*.fits')

        """

        data_header_pairs = list()
        already_maps = list()

        # Account for nested lists of items
        args = expand_list(args)

        # For each of the arguments, handle each of the cases
        i = 0
        while i < len(args):

            arg = args[i]

            # Data-header or data-WCS pair
            if isinstance(arg, SUPPORTED_ARRAY_TYPES):
                arg_header = args[i+1]
                if isinstance(arg_header, WCS):
                    arg_header = args[i+1].to_header()

                if self._validate_meta(arg_header):
                    pair = (args[i], OrderedDict(arg_header))
                    data_header_pairs.append(pair)
                    i += 1    # an extra increment to account for the data-header pairing

            # A database Entry
            elif isinstance(arg, DatabaseEntryType):
                data_header_pairs += self._read_file(arg.path, **kwargs)

            # Already a Map
            elif isinstance(arg, GenericMap):
                already_maps.append(arg)

            # URL
            elif isinstance(arg, str) and _is_url(arg):
                url = arg
                path = str(cache.download(url).absolute())
                pairs = self._read_file(path, **kwargs)
                data_header_pairs += pairs

            # File system path (file or directory or glob)
            elif _possibly_a_path(arg):
                path = pathlib.Path(arg).expanduser()
                if _is_file(path):
                    pairs = self._read_file(path, **kwargs)
                    data_header_pairs += pairs
                elif _is_dir(path):
                    for afile in sorted(path.glob('*')):
                        data_header_pairs += self._read_file(afile, **kwargs)
                elif glob.glob(os.path.expanduser(arg)):
                    for afile in sorted(glob.glob(os.path.expanduser(arg))):
                        data_header_pairs += self._read_file(afile, **kwargs)

                else:
                    raise ValueError(f'Did not find any files at {arg}')

            else:
                raise ValueError(f"Invalid input: {arg}")

            i += 1

        # TODO:
        # In the end, if there are already maps it should be put in the same
        # order as the input, currently they are not.
        return data_header_pairs, already_maps
Exemple #21
0
    def _parse_args(self, *args, **kwargs):
        """
        Parses an `args` list for data-header pairs. `args` can contain any mixture of the following
        entries:

        * tuples of (data, header, unit) (1)
        * data, header not in a tuple (1)
        * filename, which will be read
        * directory, from which all files will be read
        * glob, from which all files will be read
        * url, which will be downloaded and read
        * lists containing any of the above.

        (1) header/unit are optional and in either order, but data should be the first entry in each group.

        Examples
        --------
        self._parse_args(data, header,
                         (data, header),
                         ['file1', 'file2', 'file3'],
                         'file4',
                         'directory1',
                         '*.fits')
        """
        data_header_unit_tuples = list()
        data_header_pairs = list()
        already_timeseries = list()
        filepaths = list()

        # Account for nested lists of items. Simply outputs a single list of
        # items, nested lists are expanded to element level.
        args = expand_list(args)

        # For each of the arguments, handle each of the cases
        i = 0
        while i < len(args):
            arg = args[i]

            # Data-header pair in a tuple
            if (isinstance(arg, (np.ndarray, Table, pd.DataFrame))):
                # and self._validate_meta(args[i+1])):
                # Assume a Pandas Dataframe is given
                data = arg
                units = OrderedDict()
                meta = MetaDict()

                # Convert the data argument into a Pandas DataFrame if needed.
                if isinstance(data, Table):
                    # We have an Astropy Table:
                    data, meta, units = self._from_table(data)
                elif isinstance(data, np.ndarray):
                    # We have a numpy ndarray. We assume the first column is a dt index
                    data = pd.DataFrame(data=data[:, 1:], index=Time(data[:, 0]))

                # If there are 1 or 2 more arguments:
                for _ in range(2):
                    if (len(args) > i+1):
                        # If that next argument isn't data but is metaddata or units:
                        if not isinstance(args[i+1], (np.ndarray, Table, pd.DataFrame)):
                            if self._validate_units(args[i+1]):
                                units.update(args[i+1])
                                i += 1  # an extra increment to account for the units
                            elif self._validate_meta(args[i+1]):
                                # if we have an astropy.io FITS header then convert
                                # to preserve multi-line comments
                                if isinstance(args[i+1], astropy.io.fits.header.Header):
                                    args[i+1] = MetaDict(sunpy.io.header.FileHeader(args[i+1]))
                                meta.update(args[i+1])
                                i += 1  # an extra increment to account for the meta

                # Add a 3-tuple for this TimeSeries.
                data_header_unit_tuples.append((data, meta, units))

            # Filepath
            elif (isinstance(arg, str) and
                  os.path.isfile(os.path.expanduser(arg))):

                path = os.path.expanduser(arg)
                result = self._read_file(path, **kwargs)
                data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths, result)

            # Directory
            elif (isinstance(arg, str) and
                  os.path.isdir(os.path.expanduser(arg))):

                path = os.path.expanduser(arg)
                files = [os.path.join(path, elem) for elem in os.listdir(path)]
                for afile in files:
                    # returns a boolean telling us if it were read and either a
                    # tuple or the original filepath for reading by a source
                    result = self._read_file(afile, **kwargs)
                    data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths,
                                                                 result)

            # Glob
            elif isinstance(arg, str) and '*' in arg:

                files = glob.glob(os.path.expanduser(arg))
                for afile in files:
                    # returns a boolean telling us if it were read and either a
                    # tuple or the original filepath for reading by a source
                    result = self._read_file(afile, **kwargs)
                    data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths,
                                                                 result)

            # Already a TimeSeries
            elif isinstance(arg, GenericTimeSeries):
                already_timeseries.append(arg)

            # A URL
            elif (isinstance(arg, str) and
                  _is_url(arg)):
                url = arg
                path = download_file(url, get_and_create_download_dir())
                result = self._read_file(path, **kwargs)
                data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths, result)
            else:
                raise NoMatchError("File not found or invalid input")
            i += 1

        # TODO:
        # In the end, if there are already TimeSeries it should be put in the
        # same order as the input, currently they are not.
        return data_header_unit_tuples, data_header_pairs, already_timeseries, filepaths
Exemple #22
0
    def _parse_args(self, *args, **kwargs):
        """
        Parses an args list for data-header pairs.  args can contain any
        mixture of the following entries:
        * tuples of (data, header, unit) (1)
        * data, header not in a tuple (1)
        * filename, which will be read
        * directory, from which all files will be read
        * glob, from which all files will be read
        * url, which will be downloaded and read
        * lists containing any of the above.

        (1) Note that header/unit are optional and in either order, but data
        but be the first entry in each group.

        Example
        -------
        self._parse_args(data, header,
                         (data, header),
                         ['file1', 'file2', 'file3'],
                         'file4',
                         'directory1',
                         '*.fits')

        """

        data_header_unit_tuples = list()
        data_header_pairs = list()
        already_timeseries = list()
        filepaths = list()

        # Take source kwarg if defined
        source = kwargs.get('source', None)

        # Account for nested lists of items. Simply outputs a single list of
        # items, nested lists are expanded to element level.
        args = expand_list(args)

        # For each of the arguments, handle each of the cases
        i = 0
        while i < len(args):
            arg = args[i]

            # Data-header pair in a tuple
            if (isinstance(arg, (np.ndarray, Table, pd.DataFrame))):# and self._validate_meta(args[i+1])):
                # Assume a Pandas Dataframe is given
                data = arg
                units = OrderedDict()
                meta = MetaDict()

                # Convert the data argument into a Pandas DataFrame if needed.
                if isinstance(data, Table):
                    # We have an AstroPy Table:
                    data, meta, units = self._from_table(data)
                elif isinstance(data, np.ndarray):
                    # We have a numpy ndarray. We assume the first column is a dt index
                    data = pd.DataFrame(data=data[:,1:], index=Time(data[:,0]))

                # If there are 1 or 2 more arguments:
                for _ in range(2):
                    if (len(args) > i+1):
                        # If that next argument isn't data but is metaddata or units:
                        if not isinstance(args[i+1], (np.ndarray, Table, pd.DataFrame)):
                            if self._validate_units(args[i+1]):
                                units.update(args[i+1])
                                i += 1  # an extra increment to account for the units
                            elif self._validate_meta(args[i+1]):
                                # if we have an astropy.io FITS header then convert
                                # to preserve multi-line comments
                                if isinstance(args[i+1], astropy.io.fits.header.Header):
                                    args[i+1] = MetaDict(sunpy.io.header.FileHeader(args[i+1]))
                                meta.update(args[i+1])
                                i += 1  # an extra increment to account for the meta

                # Add a 3-tuple for this TimeSeries.
                data_header_unit_tuples.append((data, meta, units))

            # Filepath
            elif (isinstance(arg, six.string_types) and
                  os.path.isfile(os.path.expanduser(arg))):

                path = os.path.expanduser(arg)

                read, result = self._read_file(path, **kwargs)

                if read:
                    data_header_pairs.append(result)
                else:
                    filepaths.append(result)

            # Directory
            elif (isinstance(arg, six.string_types) and
                  os.path.isdir(os.path.expanduser(arg))):

                path = os.path.expanduser(arg)
                files = [os.path.join(path, elem) for elem in os.listdir(path)]
                for afile in files:
                    # returns a boolean telling us if it were read and either a
                    # tuple or the original filepath for reading by a source
                    read, result = self._read_file(afile, **kwargs)
                    if read:
                        data_header_pairs.append(result)
                    else:
                        filepaths.append(result)

            # Glob
            elif (isinstance(arg, six.string_types) and '*' in arg):

                files = glob.glob(os.path.expanduser(arg))

                for afile in files:
                    # data_header_unit_tuples += self._read_file(afile, **kwargs)
                    # returns a boolean telling us if it were read and either a
                    # tuple or the original filepath for reading by a source
                    read, result = self._read_file(afile, **kwargs)
                    if read:
                        data_header_pairs.append(result)
                    else:
                        filepaths.append(result)

            # Already a TimeSeries
            elif isinstance(arg, GenericTimeSeries):
                already_timeseries.append(arg)

            # A URL
            elif (isinstance(arg,six.string_types) and
                  _is_url(arg)):
                default_dir = sunpy.config.get("downloads", "download_dir")
                url = arg
                path = download_file(url, default_dir)
                pairs = self._read_file(path, **kwargs)
                #data_header_pairs += pairs
                filepaths.append(pairs[1])

            else:
                #raise ValueError("File not found or invalid input")
                raise NoMatchError("File not found or invalid input")
            i += 1

        # TODO:
        # In the end, if there are already TimeSeries it should be put in the
        # same order as the input, currently they are not.
        return data_header_unit_tuples, data_header_pairs, already_timeseries, filepaths
def differential_rotate(smap, observer=None, time=None, **diff_rot_kwargs):
    """
    Warp a `~sunpy.map.GenericMap` to take into account both solar differential
    rotation and the changing location of the observer.

    .. warning::
        This function, while greatly improved in 1.0, is still experimental.
        Please validate that it gives you results you expect and report any
        discrepancies on the SunPy issue tracker.


    The function transforms the input map data pixels by first rotating each
    pixel according to solar differential rotation.  The amount of solar
    differential applied is calculated by the time difference between the
    observation time of map and the new observation time, as specified by either the
    "time" keyword or the "obstime" property of the "observer" keyword.
    The location of the rotated pixels are then transformed to locations on the Sun
    as seen from the new observer position.  This is desirable since in most cases
    the observer does not remain at a fixed position in space. If
    the "time" keyword is used then the new observer position is assumed to
    be based on the location of the Earth.  If the "observer" keyword is used then
    this defines the new observer position.

    The function works with full disk maps and maps that contain portions of the
    solar disk (maps that are entirely off-disk will raise an error).  When the
    input map contains the full disk, the output map has the same dimensions as
    the input map.  When the input map images only part of the solar disk, only
    the on-disk pixels are differentially rotated and the output map can have
    a different dimensions compared to the input map.  In this case any off-disk
    emission shown in the input map is not included in the output map.

    Parameters
    ----------
    smap : `~sunpy.map.GenericMap`
        Original map that we want to transform.
    observer : `~astropy.coordinates.BaseCoordinateFrame`, `~astropy.coordinates.SkyCoord`, `None`, optional
        The location of the new observer.
        Instruments in Earth orbit can be approximated by using the position
        of the Earth at the observation time of the new observer.
    time : sunpy-compatible time, `~astropy.time.TimeDelta`, `~astropy.units.Quantity`, `None`, optional
        Used to define the duration over which the amount of solar rotation is
        calculated.  If 'time' is an `~astropy.time.Time` then the time interval
        is difference between 'time' and the map observation time. If 'time' is
        `~astropy.time.TimeDelta` or `~astropy.units.Quantity` then the calculation
        is "initial_obstime + time".

    Returns
    -------
    `~sunpy.map.GenericMap`
        A map with the result of applying solar differential rotation to the
        input map.
    """
    # If the entire map is off-disk, return an error so the user is aware.
    if is_all_off_disk(smap):
        raise ValueError(
            "The entire map is off disk. No data to differentially rotate.")

    # Get the new observer
    new_observer = _get_new_observer(smap.date, observer, time)

    # Only this function needs scikit image
    from skimage import transform

    # Check whether the input contains the full disk of the Sun
    is_sub_full_disk = not contains_full_disk(smap)
    if is_sub_full_disk:
        # Find the minimal submap of the input map that includes all the
        # on disk pixels. This is required in order to calculate how
        # much to pad the output (solar-differentially rotated) data array by
        # compared to the input map.
        # The amount of padding is dependent on the amount of solar differential
        # rotation and where the on-disk pixels are (since these pixels are the only ones
        # subject to solar differential rotation).
        if not is_all_on_disk(smap):
            # Get the bottom left and top right coordinates that are the
            # vertices that define a box that encloses the on disk pixels
            bottom_left, top_right = on_disk_bounding_coordinates(smap)

            # Create a submap that excludes the off disk emission that does
            # not need to be rotated.
            smap = smap.submap(bottom_left, top_right=top_right)
        bottom_left = smap.bottom_left_coord
        top_right = smap.top_right_coord

        # Get the edges of the minimal submap that contains all the on-disk pixels.
        edges = map_edges(smap)

        # Calculate where the output array moves to.
        # Rotate the top and bottom edges
        rotated_top = _rotate_submap_edge(smap,
                                          edges[0],
                                          observer=new_observer,
                                          **diff_rot_kwargs)
        rotated_bottom = _rotate_submap_edge(smap,
                                             edges[1],
                                             observer=new_observer,
                                             **diff_rot_kwargs)

        # Rotate the left and right hand edges
        rotated_lhs = _rotate_submap_edge(smap,
                                          edges[2],
                                          observer=new_observer,
                                          **diff_rot_kwargs)
        rotated_rhs = _rotate_submap_edge(smap,
                                          edges[3],
                                          observer=new_observer,
                                          **diff_rot_kwargs)

        # Calculate the bounding box of the rotated map
        rotated_bl, rotated_tr = _get_bounding_coordinates(
            [rotated_top, rotated_bottom, rotated_lhs, rotated_rhs])

        # Calculate the maximum distance in pixels the map has moved by comparing
        # how far the original and rotated bounding boxes have moved.
        diff_x = [(np.abs(rotated_bl.Tx - bottom_left.Tx)).value,
                  (np.abs(rotated_tr.Tx - top_right.Tx)).value]
        deltax = int(np.ceil(np.max(diff_x) / smap.scale.axis1).value)

        diff_y = [(np.abs(rotated_bl.Ty - bottom_left.Ty)).value,
                  (np.abs(rotated_tr.Ty - top_right.Ty)).value]
        deltay = int(np.ceil(np.max(diff_y) / smap.scale.axis2).value)

        # Create a new `smap` with the padding around it
        padded_data = np.pad(smap.data, ((deltay, deltay), (deltax, deltax)),
                             'constant',
                             constant_values=0)
        padded_meta = deepcopy(smap.meta)
        padded_meta['naxis2'], padded_meta['naxis1'] = smap.data.shape

        padded_meta['crpix1'] += deltax
        padded_meta['crpix2'] += deltay

        # Create the padded map that will be used to create the rotated map.
        smap = smap._new_instance(padded_data, padded_meta)

    # Check for masked maps
    if smap.mask is not None:
        smap_data = np.ma.array(smap.data, mask=smap.mask)
    else:
        smap_data = smap.data

    # Create the arguments for the warp function.
    warp_args = {'smap': smap, 'new_observer': new_observer}
    warp_args.update(diff_rot_kwargs)

    # Apply solar differential rotation as a scikit-image warp
    out_data = transform.warp(smap_data,
                              inverse_map=_warp_sun_coordinates,
                              map_args=warp_args,
                              preserve_range=True,
                              cval=np.nan)

    # Update the meta information with the new date and time.
    out_meta = deepcopy(smap.meta)
    if out_meta.get('date_obs', False):
        del out_meta['date_obs']
    out_meta['date-obs'] = new_observer.obstime.strftime(
        "%Y-%m-%dT%H:%M:%S.%f")

    # Need to update the observer location for the output map.
    # Remove all the possible observer keys
    all_keys = expand_list(
        [e[0] for e in smap._supported_observer_coordinates])
    for key in all_keys:
        out_meta.pop(key)

    # Add a new HGS observer
    out_meta.update(get_observer_meta(new_observer,
                                      out_meta['rsun_ref'] * u.m))

    if is_sub_full_disk:
        # Define a new reference pixel and the value at the reference pixel.
        # Note that according to the FITS convention the first pixel in the
        # image is at (1.0, 1.0).
        center_rotated = solar_rotate_coordinate(smap.center,
                                                 observer=new_observer,
                                                 **diff_rot_kwargs)
        out_meta['crval1'] = center_rotated.Tx.value
        out_meta['crval2'] = center_rotated.Ty.value
        out_meta['crpix1'] = 1 + smap.data.shape[1]/2.0 + \
            ((center_rotated.Tx - smap.center.Tx)/smap.scale.axis1).value
        out_meta['crpix2'] = 1 + smap.data.shape[0]/2.0 + \
            ((center_rotated.Ty - smap.center.Ty)/smap.scale.axis2).value
        return smap._new_instance(out_data,
                                  out_meta).submap(rotated_bl,
                                                   top_right=rotated_tr)
    else:
        return smap._new_instance(out_data, out_meta)
Exemple #24
0
    def _parse_args(self, *args, **kwargs):
        """
        Parses an args list for data-header pairs.  args can contain any mixture
        of the following entries:
        * tuples of data,header
        * data, header not in a tuple
        * filename, which will be read
        * directory, from which all files will be read
        * glob, from which all files will be read
        * url, which will be downloaded and read
        * lists containing any of the above.

        Example
        -------
        self._parse_args(data, header,
                         (data, header),
                         ['file1', 'file2', 'file3'],
                         'file4',
                         'directory1',
                         '*.fits')

        """

        data_header_pairs = list()
        already_maps = list()

        # Account for nested lists of items
        args = expand_list(args)

        # For each of the arguments, handle each of the cases
        i = 0
        while i < len(args):

            arg = args[i]

            # Data-header pair in a tuple
            if ((type(arg) in [tuple, list]) and len(arg) == 2
                    and isinstance(arg[0], np.ndarray)
                    and isinstance(arg[1], dict)):
                data_header_pairs.append(arg)

            # Data-header pair not in a tuple
            elif (isinstance(arg, np.ndarray)
                  and isinstance(args[i + 1], dict)):
                pair = (args[i], args[i + 1])
                data_header_pairs.append(pair)
                i += 1  # an extra increment to account for the data-header pairing

            # File name
            elif (isinstance(arg, basestring)
                  and os.path.isfile(os.path.expanduser(arg))):
                path = os.path.expanduser(arg)
                pairs = self._read_file(path, **kwargs)
                data_header_pairs += pairs

            # Directory
            elif (isinstance(arg, basestring)
                  and os.path.isdir(os.path.expanduser(arg))):
                path = os.path.expanduser(arg)
                files = [os.path.join(path, elem) for elem in os.listdir(path)]
                for afile in files:
                    data_header_pairs += self._read_file(afile, **kwargs)

            # Glob
            elif (isinstance(arg, basestring) and '*' in arg):
                files = glob.glob(os.path.expanduser(arg))
                for afile in files:
                    data_header_pairs += self._read_file(afile, **kwargs)

            # Already a Map
            elif isinstance(arg, GenericMap):
                already_maps.append(arg)

            # A URL
            elif (isinstance(arg, basestring) and _is_url(arg)):
                default_dir = sunpy.config.get("downloads", "download_dir")
                url = arg
                path = download_file(url, default_dir)
                pairs = self._read_file(path, **kwargs)
                data_header_pairs += pairs

            # A database Entry
            elif isinstance(arg, DatabaseEntry):
                data_header_pairs += self._read_file(arg.path, **kwargs)

            else:
                raise ValueError("File not found or invalid input")

            i += 1
        #TODO:
        # In the end, if there are aleady maps it should be put in the same
        # order as the input, currently they are not.
        return data_header_pairs, already_maps
Exemple #25
0
    def _parse_args(self, *args, **kwargs):
        """
        Parses an args list for data-header pairs.  args can contain any
        mixture of the following entries:
        * tuples of data,header
        * data, header not in a tuple
        * data, wcs object in a tuple
        * data, wcs object not in a tuple
        * filename, which will be read
        * directory, from which all files will be read
        * glob, from which all files will be read
        * url, which will be downloaded and read
        * lists containing any of the above.

        Example
        -------
        self._parse_args(data, header,
                         (data, header),
                         ['file1', 'file2', 'file3'],
                         'file4',
                         'directory1',
                         '*.fits')

        """

        data_header_pairs = list()
        already_maps = list()

        # Account for nested lists of items
        args = expand_list(args)

        # For each of the arguments, handle each of the cases
        i = 0
        while i < len(args):

            arg = args[i]

            # Data-header or data-WCS pair
            if isinstance(arg, SUPPORTED_ARRAY_TYPES):
                arg_header = args[i+1]
                if isinstance(arg_header, WCS):
                    arg_header = args[i+1].to_header()

                if self._validate_meta(arg_header):
                    pair = (args[i], OrderedDict(arg_header))
                    data_header_pairs.append(pair)
                    i += 1    # an extra increment to account for the data-header pairing

            # File name
            elif (isinstance(arg, str) and
                  os.path.isfile(os.path.expanduser(arg))):
                path = os.path.expanduser(arg)
                pairs = self._read_file(path, **kwargs)
                data_header_pairs += pairs

            # Directory
            elif (isinstance(arg, str) and
                  os.path.isdir(os.path.expanduser(arg))):
                path = os.path.expanduser(arg)
                files = [os.path.join(path, elem) for elem in os.listdir(path)]
                for afile in files:
                    data_header_pairs += self._read_file(afile, **kwargs)

            # Glob
            elif (isinstance(arg, str) and '*' in arg):
                files = glob.glob(os.path.expanduser(arg))
                for afile in files:
                    data_header_pairs += self._read_file(afile, **kwargs)

            # Already a Map
            elif isinstance(arg, GenericMap):
                already_maps.append(arg)

            # A URL
            elif (isinstance(arg, str) and
                  _is_url(arg)):
                url = arg
                path = download_file(url, get_and_create_download_dir())
                pairs = self._read_file(path, **kwargs)
                data_header_pairs += pairs

            # A database Entry
            elif isinstance(arg, DatabaseEntry):
                data_header_pairs += self._read_file(arg.path, **kwargs)

            else:
                raise ValueError("File not found or invalid input")

            i += 1

        # TODO:
        # In the end, if there are already maps it should be put in the same
        # order as the input, currently they are not.
        return data_header_pairs, already_maps