Esempio n. 1
0
    def _parse_level_0cs(fp):
        """Parses and EVE Level 0CS file"""
        is_missing_data = False      #boolean to check for missing data
        missing_data_val = numpy.nan
        header = []
        fields = []
        line = fp.readline()
        # Read header at top of file
        while line.startswith(";"):
            header.append(line)
            if '; Missing data:' in line :
                is_missing_data = True
                missing_data_val = line.split(':')[1].strip()


            line = fp.readline()

        meta = OrderedDict()
        for hline in header :
            if hline == '; Format:\n' or hline == '; Column descriptions:\n':
                continue
            elif ('Created' in hline) or ('Source' in hline):
                meta[hline.split(':',1)[0].replace(';',' ').strip()] = hline.split(':',1)[1].strip()
            elif ':' in hline :
                meta[hline.split(':')[0].replace(';',' ').strip()] = hline.split(':')[1].strip()

        fieldnames_start = False
        for hline in header:
            if hline.startswith("; Format:"):
                fieldnames_start = False
            if fieldnames_start:
                fields.append(hline.split(":")[0].replace(';', ' ').strip())
            if hline.startswith("; Column descriptions:"):
                fieldnames_start = True

        # Next line is YYYY DOY MM DD
        date_parts = line.split(" ")

        year = int(date_parts[0])
        month = int(date_parts[2])
        day = int(date_parts[3])
        #last_pos = fp.tell()
        #line = fp.readline()
        #el = line.split()
        #len

        # function to parse date column (HHMM)
        parser = lambda x: datetime(year, month, day, int(x[0:2]), int(x[2:4]))

        data = read_csv(fp, sep="\s*", names=fields, index_col=0, date_parser=parser, header = None)
        if is_missing_data :   #If missing data specified in header
            data[data == float(missing_data_val)] = numpy.nan

        #data.columns = fields
        return meta, data
Esempio n. 2
0
    def _parse_fits(filepath):
        """Loads LYRA data from a FITS file"""
        # Open file with PyFITS
        hdulist = fits.open(filepath)
        fits_record = hdulist[1].data
        #secondary_header = hdulist[1].header

        # Start and end dates.  Different LYRA FITS files have
        # different tags for the date obs.
        if 'date-obs' in hdulist[0].header:
            start_str = hdulist[0].header['date-obs']
        elif 'date_obs' in hdulist[0].header:
            start_str = hdulist[0].header['date_obs']
        #end_str = hdulist[0].header['date-end']

        #start = datetime.datetime.strptime(start_str, '%Y-%m-%dT%H:%M:%S.%f')
        start = parse_time(start_str)
        #end = datetime.datetime.strptime(end_str, '%Y-%m-%dT%H:%M:%S.%f')

        # First column are times.  For level 2 data, the units are [s].
        # For level 3 data, the units are [min]
        if hdulist[1].header['TUNIT1'] == 's':
            times = [
                start + datetime.timedelta(seconds=int(n))
                for n in fits_record.field(0)
            ]
        elif hdulist[1].header['TUNIT1'] == 'MIN':
            times = [
                start + datetime.timedelta(minutes=int(n))
                for n in fits_record.field(0)
            ]
        else:
            raise ValueError("Time unit in LYRA fits file not recognised.  "
                             "Value = {0}".format(hdulist[1].header['TUNIT1']))

        # Rest of columns are the data
        table = {}

        for i, col in enumerate(fits_record.columns[1:-1]):
            #temporary patch for big-endian data bug on pandas 0.13
            if fits_record.field(
                    i +
                    1).dtype.byteorder == '>' and sys.byteorder == 'little':
                table[col.name] = fits_record.field(
                    i + 1).byteswap().newbyteorder()
            else:
                table[col.name] = fits_record.field(i + 1)

        # Return the header and the data
        return OrderedDict(hdulist[0].header), pandas.DataFrame(table,
                                                                index=times)
Esempio n. 3
0
    def _parse_fits(filepath):
        """This method parses NoRH tca and tcz correlation files."""
        hdulist=fits.open(filepath)
        header=OrderedDict(hdulist[0].header)
        #for these NoRH files, the time series data is recorded in the primary HDU
        data=hdulist[0].data

        #No explicit time array in FITS file, so construct the time array from the FITS header
        obs_start_time=parse_time(header['DATE-OBS'] + 'T' + header['CRVAL1'])
        length=len(data)
        cadence=np.float(header['CDELT1'])
        sec_array=np.linspace(0, length-1, (length/cadence))

        norh_time=[]
        for s in sec_array:
            norh_time.append(obs_start_time + datetime.timedelta(0,s))

        return header, pandas.DataFrame(data, index=norh_time)
Esempio n. 4
0
    def _parse_fits(filepath):
        """Loads LYRA data from a FITS file"""
        # Open file with PyFITS
        hdulist = fits.open(filepath)
        fits_record = hdulist[1].data
        #secondary_header = hdulist[1].header

        # Start and end dates.  Different LYRA FITS files have
        # different tags for the date obs.
        if 'date-obs' in hdulist[0].header:
            start_str = hdulist[0].header['date-obs']
        elif 'date_obs' in hdulist[0].header:
            start_str = hdulist[0].header['date_obs']
        #end_str = hdulist[0].header['date-end']

        #start = datetime.datetime.strptime(start_str, '%Y-%m-%dT%H:%M:%S.%f')
        start = parse_time(start_str)
        #end = datetime.datetime.strptime(end_str, '%Y-%m-%dT%H:%M:%S.%f')

        # First column are times
        times = [
            start + datetime.timedelta(0, n) for n in fits_record.field(0)
        ]

        # Rest of columns are the data
        table = {}

        for i, col in enumerate(fits_record.columns[1:-1]):
            #temporary patch for big-endian data bug on pandas 0.13
            if fits_record.field(
                    i +
                    1).dtype.byteorder == '>' and sys.byteorder == 'little':
                table[col.name] = fits_record.field(
                    i + 1).byteswap().newbyteorder()
            else:
                table[col.name] = fits_record.field(i + 1)

        # Return the header and the data
        return OrderedDict(hdulist[0].header), pandas.DataFrame(table,
                                                                index=times)
Esempio n. 5
0
 def update(self, d2):
     """Overide .update() to perform case-insensitively"""
     return OrderedDict.update(self,
                               dict((k.lower(), v) for k, v in d2.items()))
Esempio n. 6
0
 def get(self, key, default=None):
     """Overide .get() indexing"""
     return OrderedDict.get(self, key.lower(), default)
Esempio n. 7
0
 def __setitem__(self, key, value):
     """Overide [] indexing"""
     return OrderedDict.__setitem__(self, key.lower(), value)
Esempio n. 8
0
 def __getitem__(self, key):
     """Overide [] indexing"""
     return OrderedDict.__getitem__(self, key.lower())
Esempio n. 9
0
 def __init__(self, maxsize=float('inf')):
     self.maxsize = maxsize
     self._dict = OrderedDict()
Esempio n. 10
0
class BaseCache(object):
    """
    BaseCache is a class that saves and operates on an OrderedDict. It has a
    certain capacity, stored in the attribute `maxsize`. Whether this
    capacity is reached, can be checked by using the boolean property
    `is_full`. To implement a custom cache, inherit from this class and
    override the methods ``__getitem__`` and ``__setitem__``.
    Call the method `sunpy.database.caching.BaseCache.callback` as soon
    as an item from the cache is removed.
    """
    __metaclass__ = ABCMeta

    def __init__(self, maxsize=float('inf')):
        self.maxsize = maxsize
        self._dict = OrderedDict()

    def get(self, key, default=None):  # pragma: no cover
        """Return the corresponding value to `key` if `key` is in the cache,
        `default` otherwise. This method has no side-effects, multiple calls
        with the same cache and the same passed key must always return the same
        value.

        """
        try:
            return self._dict[key]
        except KeyError:
            return default

    @abstractmethod
    def __getitem__(self, key):
        """abstract method: this method must be overwritten by inheriting
        subclasses. It defines what happens if an item from the cache is
        attempted to be accessed.

        """
        return  # pragma: no cover

    @abstractmethod
    def __setitem__(self, key, value):
        """abstract method: this method must be overwritten by inheriting
        subclasses. It defines what happens if a new value should be assigned
        to the given key. If the given key does already exist in the cache or
        not must be checked by the person who implements this method.
        """

    @abstractproperty
    def to_be_removed(self):
        """The item that will be removed on the next
        :meth:`sunpy.database.caching.BaseCache.remove` call.

        """

    @abstractmethod
    def remove(self):
        """Call this method to manually remove one item from the cache. Which
        item is removed, depends on the implementation of the cache. After the
        item has been removed, the callback method is called.

        """

    def callback(self, key, value):
        """This method should be called (by convention) if an item is removed
        from the cache because it is full. The passed key and value are the
        ones that are removed. By default this method does nothing, but it
        can be customized in a custom cache that inherits from this base class.

        """

    @property
    def is_full(self):
        """True if the number of items in the cache equals :attr:`maxsize`,
        False otherwise.

        """
        return len(self._dict) == self.maxsize

    def __delitem__(self, key):
        self._dict.__delitem__(key)

    def __contains__(self, key):
        return key in self._dict.keys()

    def __len__(self):
        return len(self._dict)

    def __iter__(self):
        for key in self._dict.__iter__():
            yield key

    def __reversed__(self):  # pragma: no cover
        for key in self._dict.__reversed__():
            yield key

    def clear(self):  # pragma: no cover
        return self._dict.clear()

    def keys(self):  # pragma: no cover
        return self._dict.keys()

    def values(self):  # pragma: no cover
        return self._dict.values()

    def items(self):  # pragma: no cover
        return self._dict.items()

    def iterkeys(self):  # pragma: no cover
        return self._dict.iterkeys()

    def itervalues(self):  # pragma: no cover
        for value in self._dict.itervalues():
            yield value

    def iteritems(self):  # pragma: no cover
        for key, value in self._dict.iteritems():
            yield key, value

    def update(self, *args, **kwds):  # pragma: no cover
        self._dict.update(*args, **kwds)

    def pop(self,
            key,
            default=MutableMapping._MutableMapping__marker
            ):  # pragma: no cover
        return self._dict.pop(key, default)

    def setdefault(self, key, default=None):  # pragma: no cover
        return self._dict.setdefault(key, default)

    def popitem(self, last=True):  # pragma: no cover
        return self._dict.popitem(last)

    def __reduce__(self):  # pragma: no cover
        return self._dict.__reduce__()

    def copy(self):  # pragma: no cover
        return self._dict.copy()

    def __eq__(self, other):  # pragma: no cover
        return self._dict.__eq__(other)

    def __ne__(self, other):  # pragma: no cover
        return self._dict.__ne__(other)

    def viewkeys(self):  # pragma: no cover
        return self._dict.viewkeys()

    def viewvalues(self):  # pragma: no cover
        return self._dict.viewvalues()

    def viewitems(self):  # pragma: no cover
        return self._dict.viewitems()

    @classmethod
    def fromkeys(cls, iterable, value=None):  # pragma: no cover
        return OrderedDict.fromkeys(iterable, value)

    def __repr__(self):  # pragma: no cover
        return '{0}({1!r})'.format(self.__class__.__name__, dict(self._dict))
Esempio n. 11
0
 def fromkeys(cls, iterable, value=None):  # pragma: no cover
     return OrderedDict.fromkeys(iterable, value)
Esempio n. 12
0
 def __init__(self, data, meta=None):
     self.data = pandas.DataFrame(data)
     if meta == '' or meta is None:
         self.meta = OrderedDict()
     else:
         self.meta = OrderedDict(meta)
Esempio n. 13
0
 def setdefault(self, key, default=None):
     """Overide .setdefault() to perform case-insensitively"""
     return OrderedDict.setdefault(self, key.lower(), default)
Esempio n. 14
0
 def __setitem__(self, key, value):
     """Overide [] indexing"""
     return OrderedDict.__setitem__(self, key.lower(), value)
Esempio n. 15
0
 def setdefault(self, key, default=None):
     """Overide .setdefault() to perform case-insensitively"""
     return OrderedDict.setdefault(self, key.lower(), default)
Esempio n. 16
0
 def update(self, d2):
     """Overide .update() to perform case-insensitively"""
     return OrderedDict.update(self, dict((k.lower(), v) for k, v in d2.items()))
Esempio n. 17
0
 def get(self, key, default=None):
     """Overide .get() indexing"""
     return OrderedDict.get(self, key.lower(), default)
Esempio n. 18
0
class LightCurve(object):
    """
    LightCurve(filepath)

    A generic light curve object.

    Parameters
    ----------
    args : filepath, url, or start and end dates
        The input for a LightCurve object should either be a filepath, a URL,
        or a date range to be queried for the particular instrument.

    Attributes
    ----------
    meta : string, dict
        The comment string or header associated with the light curve input
    data : pandas.DataFrame
        An pandas DataFrame prepresenting one or more fields as they vary with 
        respect to time.

    Examples
    --------
    >>> import sunpy
    >>> import datetime
    >>> import numpy as np

    >>> base = datetime.datetime.today()
    >>> dates = [base - datetime.timedelta(minutes=x) for x in range(0, 24 * 60)]

    >>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / 24 * 60))

    >>> light_curve = sunpy.lightcurve.LightCurve.create(
    ...    {"param1": intensity}, index=dates
    ... )

    >>> light_curve.peek()

    References
    ----------
    | http://pandas.pydata.org/pandas-docs/dev/dsintro.html

    """
    _cond_dispatch = ConditionalDispatch()
    create = classmethod(_cond_dispatch.wrapper())

    def __init__(self, data, meta=None):
        self.data = pandas.DataFrame(data)
        if meta == '' or meta is None:
            self.meta = OrderedDict()
        else:
            self.meta = OrderedDict(meta)

    @property
    def header(self):
        """
        Return the lightcurves metadata

        .. deprecated:: 0.4.0
            Use .meta instead
        """
        warnings.warn(
            """lightcurve.header has been renamed to lightcurve.meta
for compatability with map, please use meta instead""", Warning)
        return self.meta

    @classmethod
    def from_time(cls, time, **kwargs):
        '''Called by Conditional Dispatch object when valid time is passed as input to create method.'''
        date = parse_time(time)
        url = cls._get_url_for_date(date, **kwargs)
        filepath = cls._download(
            url, kwargs, err="Unable to download data for specified date")
        return cls.from_file(filepath)

    @classmethod
    def from_range(cls, start, end, **kwargs):
        '''Called by Conditional Dispatch object when start and end time are passed as input to create method.'''
        url = cls._get_url_for_date_range(parse_time(start), parse_time(end))
        filepath = cls._download(
            url,
            kwargs,
            err="Unable to download data for specified date range")
        result = cls.from_file(filepath)
        result.data = result.data.truncate(start, end)
        return result

    @classmethod
    def from_timerange(cls, timerange, **kwargs):
        '''Called by Conditional Dispatch object when time range is passed as input to create method.'''
        url = cls._get_url_for_date_range(timerange)
        filepath = cls._download(
            url,
            kwargs,
            err="Unable to download data for specified date range")
        result = cls.from_file(filepath)
        result.data = result.data.truncate(timerange.start(), timerange.end())
        return result

    @classmethod
    def from_file(cls, filename):
        '''Used to return Light Curve object by reading the given filename

	Parameters:
	    filename: Path of the file to be read.

	'''

        filename = os.path.expanduser(filename)
        meta, data = cls._parse_filepath(filename)
        if data.empty:
            raise ValueError("No data found!")
        else:
            return cls(data, meta)

    @classmethod
    def from_url(cls, url, **kwargs):
        '''
	Downloads a file from the given url, reads and returns a Light Curve object.

	Parameters:
	    url : string 
	        Uniform Resource Locator pointing to the file.

	    kwargs :Dict
	        Dict object containing other related parameters to assist in download.

        '''
        try:
            filepath = cls._download(url, kwargs)
        except (urllib2.HTTPError, urllib2.URLError, ValueError):
            err = ("Unable to read location %s.") % url
            raise ValueError(err)
        return cls.from_file(filepath)

    @classmethod
    def from_data(cls, data, index=None, meta=None):
        '''
	Called by Conditional Dispatch object to create Light Curve object when corresponding data is passed
	to create method.
	'''

        return cls(pandas.DataFrame(data, index=index), meta)

    @classmethod
    def from_yesterday(cls):
        return cls.from_url(cls._get_default_uri())

    @classmethod
    def from_dataframe(cls, dataframe, meta=None):
        '''
	Called by Conditional Dispatch object to create Light Curve object when Pandas DataFrame is passed
	to create method.
	'''

        return cls(dataframe, meta)

    def plot(self, axes=None, **plot_args):
        """Plot a plot of the light curve

        Parameters
        ----------
        axes: matplotlib.axes object or None
            If provided the image will be plotted on the given axes. Else the 
            current matplotlib axes will be used.

        **plot_args : dict
            Any additional plot arguments that should be used
            when plotting the image.

        """

        #Get current axes
        if axes is None:
            axes = plt.gca()

        axes = self.data.plot(ax=axes, **plot_args)

        return axes

    def peek(self, **kwargs):
        """Displays the light curve in a new figure"""

        figure = plt.figure()

        self.plot(**kwargs)

        figure.show()

        return figure

    @staticmethod
    def _download(uri, kwargs, err='Unable to download data at specified URL'):
        """Attempts to download data at the specified URI"""

        _filename = os.path.basename(uri).split("?")[0]

        # user specifies a download directory
        if "directory" in kwargs:
            download_dir = os.path.expanduser(kwargs["directory"])
        else:
            download_dir = sunpy.config.get("downloads", "download_dir")

        # overwrite the existing file if the keyword is present
        if "overwrite" in kwargs:
            overwrite = kwargs["overwrite"]
        else:
            overwrite = False

        # If the file is not already there, download it
        filepath = os.path.join(download_dir, _filename)

        if not (os.path.isfile(filepath)) or (overwrite
                                              and os.path.isfile(filepath)):
            try:
                response = urllib2.urlopen(uri)
            except (urllib2.HTTPError, urllib2.URLError):
                raise urllib2.URLError(err)
            with open(filepath, 'wb') as fp:
                shutil.copyfileobj(response, fp)
        else:
            warnings.warn(
                "Using existing file rather than downloading, use overwrite=True to override.",
                RuntimeWarning)

        return filepath

    @classmethod
    def _get_default_uri(cls):
        """Default data to load when none is specified"""
        msg = "No default action set for %s"
        raise NotImplementedError(msg % cls.__name__)

    @classmethod
    def _get_url_for_date(cls, date, **kwargs):
        """Returns a URL to the data for the specified date"""
        msg = "Date-based downloads not supported for for %s"
        raise NotImplementedError(msg % cls.__name__)

    @classmethod
    def _get_url_for_date_range(cls, *args, **kwargs):
        """Returns a URL to the data for the specified date range"""
        msg = "Date-range based downloads not supported for for %s"
        raise NotImplementedError(msg % cls.__name__)

    @staticmethod
    def _parse_csv(filepath):
        """Place holder method to parse CSV files."""
        msg = "Generic CSV parsing not yet implemented for LightCurve"
        raise NotImplementedError(msg)

    @staticmethod
    def _parse_fits(filepath):
        """Place holder method to parse FITS files."""
        msg = "Generic FITS parsing not yet implemented for LightCurve"
        raise NotImplementedError(msg)

    @classmethod
    def _parse_filepath(cls, filepath):
        """Check the file extension to see how to parse the file"""
        filename, extension = os.path.splitext(filepath)

        if extension.lower() in (".csv", ".txt"):
            return cls._parse_csv(filepath)
        else:
            return cls._parse_fits(filepath)

    def truncate(self, a, b=None):
        """Returns a truncated version of the timeseries object"""
        if isinstance(a, TimeRange):
            time_range = a
        else:
            time_range = TimeRange(a, b)

        truncated = self.data.truncate(time_range.start(), time_range.end())
        return self.__class__.create(truncated, self.meta.copy())

    def extract(self, a):
        """Extract a set of particular columns from the DataFrame"""
        # TODO allow the extract function to pick more than one column
        if isinstance(self, pandas.Series):
            return self
        else:
            return LightCurve(self.data[a], self.meta.copy())

    def time_range(self):
        """Returns the start and end times of the LightCurve as a TimeRange
        object"""
        return TimeRange(self.data.index[0], self.data.index[-1])
Esempio n. 19
0
class LightCurve(object):
    """
    LightCurve(filepath)

    A generic light curve object.

    Parameters
    ----------
    args : filepath, url, or start and end dates
        The input for a LightCurve object should either be a filepath, a URL,
        or a date range to be queried for the particular instrument.

    Attributes
    ----------
    meta : string, dict
        The comment string or header associated with the light curve input
    data : pandas.DataFrame
        An pandas DataFrame prepresenting one or more fields as they vary with
        respect to time.

    Examples
    --------
    >>> import sunpy
    >>> import datetime
    >>> import numpy as np

    >>> base = datetime.datetime.today()
    >>> dates = [base - datetime.timedelta(minutes=x) for x in range(0, 24 * 60)]

    >>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / 24 * 60))

    >>> light_curve = sunpy.lightcurve.LightCurve.create(
    ...    {"param1": intensity}, index=dates
    ... )

    >>> light_curve.peek()

    References
    ----------
    | http://pandas.pydata.org/pandas-docs/dev/dsintro.html

    """

    _cond_dispatch = ConditionalDispatch()
    create = classmethod(_cond_dispatch.wrapper())

    def __init__(self, data, meta=None):
        self.data = pandas.DataFrame(data)
        if meta == "" or meta is None:
            self.meta = OrderedDict()
        else:
            self.meta = OrderedDict(meta)

    @property
    def header(self):
        """
        Return the lightcurves metadata

        .. deprecated:: 0.4.0
            Use .meta instead
        """
        warnings.warn(
            """lightcurve.header has been renamed to lightcurve.meta
for compatability with map, please use meta instead""",
            Warning,
        )
        return self.meta

    @classmethod
    def from_time(cls, time, **kwargs):
        """Called by Conditional Dispatch object when valid time is passed as input to create method."""
        date = parse_time(time)
        url = cls._get_url_for_date(date, **kwargs)
        filepath = cls._download(url, kwargs, err="Unable to download data for specified date")
        return cls.from_file(filepath)

    @classmethod
    def from_range(cls, start, end, **kwargs):
        """Called by Conditional Dispatch object when start and end time are passed as input to create method."""
        url = cls._get_url_for_date_range(parse_time(start), parse_time(end), **kwargs)
        filepath = cls._download(url, kwargs, err="Unable to download data for specified date range")
        result = cls.from_file(filepath)
        result.data = result.data.truncate(start, end)
        return result

    @classmethod
    def from_timerange(cls, timerange, **kwargs):
        """Called by Conditional Dispatch object when time range is passed as input to create method."""
        url = cls._get_url_for_date_range(timerange, **kwargs)
        filepath = cls._download(url, kwargs, err="Unable to download data for specified date range")
        result = cls.from_file(filepath)
        result.data = result.data.truncate(timerange.start(), timerange.end())
        return result

    @classmethod
    def from_file(cls, filename):
        """Used to return Light Curve object by reading the given filename

        Parameters:
            filename: Path of the file to be read.

        """

        filename = os.path.expanduser(filename)
        meta, data = cls._parse_filepath(filename)
        if data.empty:
            raise ValueError("No data found!")
        else:
            return cls(data, meta)

    @classmethod
    def from_url(cls, url, **kwargs):
        """
        Downloads a file from the given url, reads and returns a Light Curve object.

        Parameters:
            url : string
                Uniform Resource Locator pointing to the file.

            kwargs :Dict
                Dict object containing other related parameters to assist in download.

        """
        try:
            filepath = cls._download(url, kwargs)
        except (urllib2.HTTPError, urllib2.URLError, ValueError):
            err = ("Unable to read location %s.") % url
            raise ValueError(err)
        return cls.from_file(filepath)

    @classmethod
    def from_data(cls, data, index=None, meta=None):
        """
        Called by Conditional Dispatch object to create Light Curve object when corresponding data is passed
        to create method.
        """

        return cls(pandas.DataFrame(data, index=index), meta)

    @classmethod
    def from_yesterday(cls):
        return cls.from_url(cls._get_default_uri())

    @classmethod
    def from_dataframe(cls, dataframe, meta=None):
        """
        Called by Conditional Dispatch object to create Light Curve object when Pandas DataFrame is passed
        to create method.
        """

        return cls(dataframe, meta)

    def plot(self, axes=None, **plot_args):
        """Plot a plot of the light curve

        Parameters
        ----------
        axes: matplotlib.axes object or None
            If provided the image will be plotted on the given axes. Else the
            current matplotlib axes will be used.

        **plot_args : dict
            Any additional plot arguments that should be used
            when plotting the image.

        """

        # Get current axes
        if axes is None:
            axes = plt.gca()

        axes = self.data.plot(ax=axes, **plot_args)

        return axes

    def peek(self, **kwargs):
        """Displays the light curve in a new figure"""

        figure = plt.figure()

        self.plot(**kwargs)

        figure.show()

        return figure

    @staticmethod
    def _download(uri, kwargs, err="Unable to download data at specified URL"):
        """Attempts to download data at the specified URI"""

        _filename = os.path.basename(uri).split("?")[0]

        # user specifies a download directory
        if "directory" in kwargs:
            download_dir = os.path.expanduser(kwargs["directory"])
        else:
            download_dir = config.get("downloads", "download_dir")

        # overwrite the existing file if the keyword is present
        if "overwrite" in kwargs:
            overwrite = kwargs["overwrite"]
        else:
            overwrite = False

        # If the file is not already there, download it
        filepath = os.path.join(download_dir, _filename)

        if not (os.path.isfile(filepath)) or (overwrite and os.path.isfile(filepath)):
            try:
                response = urllib2.urlopen(uri)
            except (urllib2.HTTPError, urllib2.URLError):
                raise urllib2.URLError(err)
            with open(filepath, "wb") as fp:
                shutil.copyfileobj(response, fp)
        else:
            warnings.warn(
                "Using existing file rather than downloading, use overwrite=True to override.", RuntimeWarning
            )

        return filepath

    @classmethod
    def _get_default_uri(cls):
        """Default data to load when none is specified"""
        msg = "No default action set for %s"
        raise NotImplementedError(msg % cls.__name__)

    @classmethod
    def _get_url_for_date(cls, date, **kwargs):
        """Returns a URL to the data for the specified date"""
        msg = "Date-based downloads not supported for for %s"
        raise NotImplementedError(msg % cls.__name__)

    @classmethod
    def _get_url_for_date_range(cls, *args, **kwargs):
        """Returns a URL to the data for the specified date range"""
        msg = "Date-range based downloads not supported for for %s"
        raise NotImplementedError(msg % cls.__name__)

    @staticmethod
    def _parse_csv(filepath):
        """Place holder method to parse CSV files."""
        msg = "Generic CSV parsing not yet implemented for LightCurve"
        raise NotImplementedError(msg)

    @staticmethod
    def _parse_fits(filepath):
        """Place holder method to parse FITS files."""
        msg = "Generic FITS parsing not yet implemented for LightCurve"
        raise NotImplementedError(msg)

    @classmethod
    def _parse_filepath(cls, filepath):
        """Check the file extension to see how to parse the file"""
        filename, extension = os.path.splitext(filepath)

        if extension.lower() in (".csv", ".txt"):
            return cls._parse_csv(filepath)
        else:
            return cls._parse_fits(filepath)

    def truncate(self, a, b=None):
        """Returns a truncated version of the timeseries object"""
        if isinstance(a, TimeRange):
            time_range = a
        else:
            time_range = TimeRange(a, b)

        truncated = self.data.truncate(time_range.start(), time_range.end())
        return self.__class__.create(truncated, self.meta.copy())

    def extract(self, a):
        """Extract a set of particular columns from the DataFrame"""
        # TODO allow the extract function to pick more than one column
        if isinstance(self, pandas.Series):
            return self
        else:
            return LightCurve(self.data[a], self.meta.copy())

    def time_range(self):
        """Returns the start and end times of the LightCurve as a TimeRange
        object"""
        return TimeRange(self.data.index[0], self.data.index[-1])
Esempio n. 20
0
class BaseCache(object):
    """
    BaseCache is a class that saves and operates on an OrderedDict. It has a
    certain capacity, stored in the attribute `maxsize`. Whether this
    capacity is reached, can be checked by using the boolean property
    `is_full`. To implement a custom cache, inherit from this class and
    override the methods ``__getitem__`` and ``__setitem__``.
    Call the method `sunpy.database.caching.BaseCache.callback` as soon
    as an item from the cache is removed.
    """
    __metaclass__ = ABCMeta

    def __init__(self, maxsize=float('inf')):
        self.maxsize = maxsize
        self._dict = OrderedDict()

    def get(self, key, default=None):  # pragma: no cover
        """Return the corresponding value to `key` if `key` is in the cache,
        `default` otherwise. This method has no side-effects, multiple calls
        with the same cache and the same passed key must always return the same
        value.

        """
        try:
            return self._dict[key]
        except KeyError:
            return default

    @abstractmethod
    def __getitem__(self, key):
        """abstract method: this method must be overwritten by inheriting
        subclasses. It defines what happens if an item from the cache is
        attempted to be accessed.

        """
        return  # pragma: no cover

    @abstractmethod
    def __setitem__(self, key, value):
        """abstract method: this method must be overwritten by inheriting
        subclasses. It defines what happens if a new value should be assigned
        to the given key. If the given key does already exist in the cache or
        not must be checked by the person who implements this method.
        """

    @abstractproperty
    def to_be_removed(self):
        """The item that will be removed on the next
        :meth:`sunpy.database.caching.BaseCache.remove` call.

        """

    @abstractmethod
    def remove(self):
        """Call this method to manually remove one item from the cache. Which
        item is removed, depends on the implementation of the cache. After the
        item has been removed, the callback method is called.

        """

    def callback(self, key, value):
        """This method should be called (by convention) if an item is removed
        from the cache because it is full. The passed key and value are the
        ones that are removed. By default this method does nothing, but it
        can be customized in a custom cache that inherits from this base class.

        """

    @property
    def is_full(self):
        """True if the number of items in the cache equals :attr:`maxsize`,
        False otherwise.

        """
        return len(self._dict) == self.maxsize

    def __delitem__(self, key):
        self._dict.__delitem__(key)

    def __contains__(self, key):
        return key in self._dict.keys()

    def __len__(self):
        return len(self._dict)

    def __iter__(self):
        for key in self._dict.__iter__():
            yield key

    def __reversed__(self):  # pragma: no cover
        for key in self._dict.__reversed__():
            yield key

    def clear(self):  # pragma: no cover
        return self._dict.clear()

    def keys(self):  # pragma: no cover
        return self._dict.keys()

    def values(self):  # pragma: no cover
        return self._dict.values()

    def items(self):  # pragma: no cover
        return self._dict.items()

    def iterkeys(self):  # pragma: no cover
        return self._dict.iterkeys()

    def itervalues(self):  # pragma: no cover
        for value in self._dict.itervalues():
            yield value

    def iteritems(self):  # pragma: no cover
        for key, value in self._dict.iteritems():
            yield key, value

    def update(self, *args, **kwds):  # pragma: no cover
        self._dict.update(*args, **kwds)

    def pop(self, key, default=MutableMapping._MutableMapping__marker):  # pragma: no cover
        return self._dict.pop(key, default)

    def setdefault(self, key, default=None):  # pragma: no cover
        return self._dict.setdefault(key, default)

    def popitem(self, last=True):  # pragma: no cover
        return self._dict.popitem(last)

    def __reduce__(self):  # pragma: no cover
        return self._dict.__reduce__()

    def copy(self):  # pragma: no cover
        return self._dict.copy()

    def __eq__(self, other):  # pragma: no cover
        return self._dict.__eq__(other)

    def __ne__(self, other):  # pragma: no cover
        return self._dict.__ne__(other)

    def viewkeys(self):  # pragma: no cover
        return self._dict.viewkeys()

    def viewvalues(self):  # pragma: no cover
        return self._dict.viewvalues()

    def viewitems(self):  # pragma: no cover
        return self._dict.viewitems()

    @classmethod
    def fromkeys(cls, iterable, value=None):  # pragma: no cover
        return OrderedDict.fromkeys(iterable, value)

    def __repr__(self):  # pragma: no cover
        return '{0}({1!r})'.format(self.__class__.__name__, dict(self._dict))
Esempio n. 21
0
 def __init__(self, data, meta=None):
     self.data = pandas.DataFrame(data)
     if meta == "" or meta is None:
         self.meta = OrderedDict()
     else:
         self.meta = OrderedDict(meta)
Esempio n. 22
0
 def __init__(self, maxsize=float('inf')):
     self.maxsize = maxsize
     self._dict = OrderedDict()
Esempio n. 23
0
 def __contains__(self, key):
     """Overide __contains__"""
     return OrderedDict.__contains__(self, key.lower())
Esempio n. 24
0
 def fromkeys(cls, iterable, value=None):  # pragma: no cover
     return OrderedDict.fromkeys(iterable, value)
Esempio n. 25
0
 def __getitem__(self, key):
     """Overide [] indexing"""
     return OrderedDict.__getitem__(self, key.lower())
Esempio n. 26
0
 def __init__(self, *args, **kwargs):
     OrderedDict.__init__(self, *args, **kwargs)
Esempio n. 27
0
 def __contains__(self, key):
     """Overide __contains__"""
     return OrderedDict.__contains__(self, key.lower())