Example #1
0
def print_table(lst, colsep=' ', linesep='\n'):
    width = [max(map(len, col)) for col in zip(*lst)]
    return linesep.join(
        colsep.join(
            col.ljust(n) for n, col in zip(width, row)
        ) for row in lst
    )
Example #2
0
def print_table(lst, colsep=' ', linesep='\n'):
    """
    ?

    Parameters
    ----------
    lst : ?
        ?
    colsep : ?
        ?
    linesep : ?
        ?

    Returns
    -------
    ?

    .. todo::
        improve documentation.

    """
    width = [max(map(len, col)) for col in zip(*lst)]
    return linesep.join(
        colsep.join(
            col.ljust(n) for n, col in zip(width, row)
        ) for row in lst
    )
Example #3
0
    def _extractDateURL(self, url):
        """Extracts the date from a particular url following the pattern"""

        # remove the user and passwd from files if there:
        url = url.replace("anonymous:[email protected]@", "")

        # url_to_list substitutes '.' and '_' for '/' to then create
        # a list of all the blocks in times - assuming they are all
        # separated with either '.', '_' or '/'
        url_to_list = lambda txt: re.sub(r'\.|_', '/', txt).split('/')
        pattern_list = url_to_list(self.pattern)
        url_list = url_to_list(url)
        time_order = [
            '%Y', '%y', '%b', '%B', '%m', '%d', '%j', '%H', '%I', '%M', '%S',
            '%e', '%f'
        ]
        final_date = []
        final_pattern = []
        # Find in directory and filename
        for pattern_elem, url_elem in zip(pattern_list, url_list):
            time_formats = [x for x in time_order if x in pattern_elem]
            if len(time_formats) > 0:
                # Find whether there's text that should not be here
                toremove = re.split('%.', pattern_elem)
                if len(toremove) > 0:
                    for bit in toremove:
                        if bit != '':
                            url_elem = url_elem.replace(bit, '', 1)
                            pattern_elem = pattern_elem.replace(bit, '', 1)
                final_date.append(url_elem)
                final_pattern.append(pattern_elem)
                for time_bit in time_formats:
                    time_order.remove(time_bit)
        # Find and remove repeated elements eg: %Y in ['%Y', '%Y%m%d']
        #   Make all as single strings
        date_together = ''.join(final_date)
        pattern_together = ''.join(final_pattern)
        re_together = pattern_together
        for k, v in six.iteritems(TIME_CONVERSIONS):
            re_together = re_together.replace(k, v)

        #   Lists to contain the unique elements of the date and the pattern
        final_date = list()
        final_pattern = list()
        re_together = re_together.replace('[A-Z]', '\\[A-Z]')
        for p, r in zip(
                pattern_together.split('%')[1:],
                re_together.split('\\')[1:]):
            if p == 'e':
                continue
            regexp = r'\{}'.format(r) if not r.startswith('[') else r
            pattern = '%{}'.format(p)
            date_part = re.search(regexp, date_together)
            date_together = date_together[:date_part.start()] + \
                            date_together[date_part.end():]
            if pattern not in final_pattern:
                final_pattern.append('%{}'.format(p))
                final_date.append(date_part.group())
        return datetime.datetime.strptime(' '.join(final_date),
                                          ' '.join(final_pattern))
Example #4
0
    def _extractDateURL(self, url):
        """Extracts the date from a particular url following the pattern"""

        # remove the user and passwd from files if there:
        url = url.replace("anonymous:[email protected]@", "")

        # url_to_list substitutes '.' and '_' for '/' to then create
        # a list of all the blocks in times - assuming they are all
        # separated with either '.', '_' or '/'
        url_to_list = lambda txt: re.sub(r'\.|_', '/', txt).split('/')
        pattern_list = url_to_list(self.pattern)
        url_list = url_to_list(url)
        time_order = ['%Y', '%y', '%b', '%B', '%m', '%d', '%j',
                      '%H', '%I', '%M', '%S', '%e', '%f']
        final_date = []
        final_pattern = []
        # Find in directory and filename
        for pattern_elem, url_elem in zip(pattern_list, url_list):
            time_formats = [x for x in time_order if x in pattern_elem]
            if len(time_formats) > 0:
                # Find whether there's text that should not be here
                toremove = re.split('%.', pattern_elem)
                if len(toremove) > 0:
                    for bit in toremove:
                        if bit != '':
                            url_elem = url_elem.replace(bit, '', 1)
                            pattern_elem = pattern_elem.replace(bit, '', 1)
                final_date.append(url_elem)
                final_pattern.append(pattern_elem)
                for time_bit in time_formats:
                    time_order.remove(time_bit)
        # Find and remove repeated elements eg: %Y in ['%Y', '%Y%m%d']
        #   Make all as single strings
        date_together = ''.join(final_date)
        pattern_together = ''.join(final_pattern)
        re_together = pattern_together
        for k, v in six.iteritems(TIME_CONVERSIONS):
            re_together = re_together.replace(k, v)

        #   Lists to contain the unique elements of the date and the pattern
        final_date = list()
        final_pattern = list()
        re_together = re_together.replace('[A-Z]', '\\[A-Z]')
        for p, r in zip(pattern_together.split('%')[1:], re_together.split('\\')[1:]):
            if p == 'e':
                continue
            regexp = r'\{}'.format(r) if not r.startswith('[') else r
            pattern = '%{}'.format(p)
            date_part = re.search(regexp, date_together)
            date_together = date_together[:date_part.start()] + \
                            date_together[date_part.end():]
            if pattern not in final_pattern:
                final_pattern.append('%{}'.format(p))
                final_date.append(date_part.group())
        return datetime.datetime.strptime(' '.join(final_date),
                                          ' '.join(final_pattern))
Example #5
0
    def add(self, fun, types, override=SILENT):
        """ Add fun to the multimethod. It will be executed if get returns
        values of the types passed as types. Must return tuples of same
        length for any input.

        Parameters
        ----------
        fun : function
            function to be added to the multimethod
        types : tuple of classes
            types for which the function is executed
        override : SILENT, WARN or FAIL
            control behaviour when overriding existing definitions.
            If it is set to SILENT, prior definitions are silently
            overridden, if it is set to WARN a TypeWarning
            will be issued, and with FAIL a TypeError is raised when
            attempting to override an existing definition.
        """
        if override not in (SILENT, WARN, FAIL):
            raise ValueError("Invalid value '{0}' for override.".format(override))

        overriden = False
        if override != SILENT:
            for signature, _ in self.methods:
                if all(issubclass(a, b) for a, b in zip(types, signature)):
                    overriden = True
        if overriden and override == FAIL:
            raise TypeError
        elif overriden and override == WARN:
            # pylint: disable=W0631
            warn('Definition ({0}) overrides prior definition ({1}).'.format(_fmt_t(types),
                                                                             _fmt_t(signature)),
                 TypeWarning, stacklevel=3)

        self.methods.append((types, fun))
Example #6
0
def fmt_argspec_types(fun, types, start=0):
    args, varargs, keywords, defaults = correct_argspec(fun)

    args = args[start:]
    types = types[start:]

    NULL = object()
    if defaults is None:
        defaults = []
    defs = chain(repeat(NULL, len(args) - len(defaults)), defaults)

    spec = []
    for key, value, type_ in zip(args, defs, types):
        # This is a work around for a bug introduced during Python 3 porting.
        # for some reason the type was being passed in as a length 1 tuple.
        # This extracts the type under that condition. SM 6/10/15
        if isinstance(type_, tuple) and len(type_) == 1:
            type_ = type_[0]
        if value is NULL:
            spec.append("{0}: {1}".format(key, type_.__name__))
        else:
            spec.append("{0}: {1} = {2}".format(key, type_.__name__, value))
    if varargs is not None:
        spec.append('*{!s}'.format(varargs))
    if keywords is not None:
        spec.append('**{!s}'.format(keywords))
    return '(' + ', '.join(spec) + ')'
Example #7
0
    def add(self, fun, types, override=SILENT):
        """ Add fun to the multimethod. It will be executed if get returns
        values of the types passed as types. Must return tuples of same
        length for any input.

        Parameters
        ----------
        fun : function
            function to be added to the multimethod
        types : tuple of classes
            types for which the function is executed
        override : SILENT, WARN or FAIL
            control behaviour when overriding existing definitions.
            If it is set to SILENT, prior definitions are silently
            overridden, if it is set to WARN a TypeWarning
            will be issued, and with FAIL a TypeError is raised when
            attempting to override an existing definition.
        """
        overriden = False
        if override:
            for signature, _ in self.methods:
                if all(issubclass(a, b) for a, b in zip(types, signature)):
                    overriden = True
        if overriden and override == FAIL:
            raise TypeError
        elif overriden and override == WARN:
            # pylint: disable=W0631
            warn('Definition ({0}) overrides prior definition ({1}).'.format(
                _fmt_t(types), _fmt_t(signature)),
                 TypeWarning,
                 stacklevel=3)
        elif overriden:
            raise ValueError('Invalid value for override.')
        self.methods.append((types, fun))
Example #8
0
 def generate_docs(self):
     fns = (item[0] for item in chain(self.funcs, self.nones))
     return '\n\n'.join(
         "{0} -> :py:meth:`{1}`".format(sig, fun.__name__) for sig, fun in
         # The 1 prevents the cls from incorrectly being shown in the
         # documentation.
         zip(self.get_signatures("create", -1), fns))
Example #9
0
def matches_types(fun, types, args, kwargs):
    """ See if args and kwargs match are instances of types. types are given
    in the order they are defined in the function. kwargs are automatically
    converted into that order. """
    return all(
        isinstance(obj, cls)
        for obj, cls in zip(arginize(fun, args, kwargs), types))
Example #10
0
def fmt_argspec_types(fun, types, start=0):
    args, varargs, keywords, defaults = correct_argspec(fun)

    args = args[start:]
    types = types[start:]

    NULL = object()
    if defaults is None:
        defaults = []
    defs = chain(repeat(NULL, len(args) - len(defaults)), defaults)

    spec = []
    for key, value, type_ in zip(args, defs, types):
        # This is a work around for a bug introduced during Python 3 porting.
        # for some reason the type was being passed in as a length 1 tuple.
        # This extracts the type under that condition. SM 6/10/15
        if isinstance(type_, tuple) and len(type_) == 1:
            type_ = type_[0]
        if value is NULL:
            spec.append("{0}: {1}".format(key, type_.__name__))
        else:
            spec.append("{0}: {1} = {2}".format(key, type_.__name__, value))
    if varargs is not None:
        spec.append('*{!s}'.format(varargs))
    if keywords is not None:
        spec.append('**{!s}'.format(keywords))
    return '(' + ', '.join(spec) + ')'
Example #11
0
    def super(self, *args, **kwargs):
        """ Like __call__, only that when you give it super(cls, obj) items,
        it will skip the multimethod for cls and use the one for its parent
        class. The normal __call__ does not consider this for performance
        reasons. """
        objs = self.get(*args, **kwargs)
        types = tuple([
            x.__thisclass__.__mro__[1] if isinstance(x, super) else type(x)
            for x in objs
        ])
        nargs = [x.__self__ if isinstance(x, super) else x for x in args]

        for k, elem in six.iteritems(kwargs):
            if isinstance(elem, super):
                kwargs[k] = elem.__self__

        # This code is duplicate for performance reasons.
        cached = self.cache.get(types, None)
        if cached is not None:
            return cached(*nargs, **kwargs)

        for signature, fun in reversed(self.methods):
            if all(issubclass(ty, sig) for ty, sig in zip(types, signature)):
                self.cache[types] = fun
                return fun(*nargs, **kwargs)
        raise TypeError
Example #12
0
    def super(self, *args, **kwargs):
        """ Like __call__, only that when you give it super(cls, obj) items,
        it will skip the multimethod for cls and use the one for its parent
        class. The normal __call__ does not consider this for performance
        reasons. """
        objs = self.get(*args, **kwargs)
        types = tuple(
            [
                x.__thisclass__.__mro__[1] if isinstance(x, super) else type(x)
                for x in objs
            ]
        )
        nargs = [
            x.__self__ if isinstance(x, super) else x
            for x in args
        ]

        for k, elem in six.iteritems(kwargs):
            if isinstance(elem, super):
                kwargs[k] = elem.__self__

        # This code is duplicate for performance reasons.
        cached = self.cache.get(types, None)
        if cached is not None:
            return cached(*nargs, **kwargs)

        for signature, fun in reversed(self.methods):
            if all(issubclass(ty, sig) for ty, sig in zip(types, signature)):
                self.cache[types] = fun
                return fun(*nargs, **kwargs)
        raise TypeError
Example #13
0
 def generate_docs(self):
     fns = (item[0] for item in chain(self.funcs, self.nones))
     return '\n\n'.join("{0} -> :py:meth:`{1}`".format(sig, fun.__name__)
         for sig, fun in
         # The 1 prevents the cls from incorrectly being shown in the
         # documentation.
         zip(self.get_signatures("create", -1), fns)
     )
Example #14
0
    def _extractDateURL(self, url):
        """Extracts the date from a particular url following the pattern"""
        # url_to_list substitutes '.' and '_' for '/' to then create
        # a list of all the blocks in times - assuming they are all
        # separated with either '.', '_' or '/'
        url_to_list = lambda txt: re.sub(r'\.|_', '/', txt).split('/')
        pattern_list = url_to_list(self.pattern)
        url_list = url_to_list(url)

        time_order = [
            '%Y', '%y', '%b', '%B', '%m', '%d', '%j', '%H', '%I', '%M', '%S'
        ]
        final_date = []
        final_pattern = []
        # Find in directory and filename
        for pattern_elem, url_elem in zip(pattern_list, url_list):
            time_formats = [x for x in time_order if x in pattern_elem]
            if len(time_formats) > 0:
                final_date.append(url_elem)
                final_pattern.append(pattern_elem)
                for time_bit in time_formats:
                    time_order.remove(time_bit)
        # Find and remove repeated elements eg: %Y in ['%Y', '%Y%m%d']
        #   Make all as single strings
        date_together = ''.join(final_date)
        pattern_together = ''.join(final_pattern)
        re_together = pattern_together
        for k, v in six.iteritems(TIME_CONVERSIONS):
            re_together = re_together.replace(k, v)

        #   Create new empty lists
        final_date = list()
        final_pattern = list()
        for p, r in zip(
                pattern_together.split('%')[1:],
                re_together.split('\\')[1:]):
            regexp = '\\{}'.format(r)
            pattern = '%{}'.format(p)
            date_part = re.match(regexp, date_together)
            date_together = date_together[:date_part.start()] + \
                            date_together[date_part.end():]
            if pattern not in final_pattern:
                final_pattern.append('%{}'.format(p))
                final_date.append(date_part.group())
        return datetime.datetime.strptime(' '.join(final_date),
                                          ' '.join(final_pattern))
Example #15
0
def test_window(timerange_a):
    timerange = sunpy.time.TimeRange(tbegin_str, tfin_str)
    window = timerange.window(u.Quantity(12 * 60 * 60, 's'), u.Quantity(10, 's'))
    expect = [sunpy.time.TimeRange('2012/1/1T00:00:00', '2012/1/1T00:00:10'),
              sunpy.time.TimeRange('2012/1/1T12:00:00', '2012/1/1T12:00:10'),
              sunpy.time.TimeRange('2012/1/2T00:00:00', '2012/1/2T00:00:10')]
    assert isinstance(window, list)
    # Doing direct comparisons seem to not work
    assert all([wi.start == ex.start and wi.end == ex.end for wi, ex in zip(window, expect)])
Example #16
0
def test_window_timedelta(timerange_a):
    timerange = sunpy.time.TimeRange(tbegin_str, tfin_str)
    window = timerange.window(datetime.timedelta(hours=12), datetime.timedelta(seconds=10))
    expect = [sunpy.time.TimeRange('2012/1/1T00:00:00', '2012/1/1T00:00:10'),
              sunpy.time.TimeRange('2012/1/1T12:00:00', '2012/1/1T12:00:10'),
              sunpy.time.TimeRange('2012/1/2T00:00:00', '2012/1/2T00:00:10')]
    assert isinstance(window, list)
    # Doing direct comparisons seem to not work
    assert all([wi.start == ex.start and wi.end == ex.end for wi, ex in zip(window, expect)])
Example #17
0
def matches_types(fun, types, args, kwargs):
    """ See if args and kwargs match are instances of types. types are given
    in the order they are defined in the function. kwargs are automatically
    converted into that order. """
    return all(
        isinstance(obj, cls) for obj, cls in zip(
            arginize(fun, args, kwargs), types
        )
    )
Example #18
0
def test_window_timedelta(timerange_a):
    timerange = sunpy.time.TimeRange(tbegin_str,tfin_str)
    window = timerange.window(datetime.timedelta(hours=12), datetime.timedelta(seconds=10))
    expect = [sunpy.time.TimeRange('2012/1/1T00:00:00', '2012/1/1T00:00:10'),
              sunpy.time.TimeRange('2012/1/1T12:00:00', '2012/1/1T12:00:10'),
              sunpy.time.TimeRange('2012/1/2T00:00:00', '2012/1/2T00:00:10')]
    assert isinstance(window, list)
    #Doing direct comparisons seem to not work
    assert all([wi.start == ex.start and wi.end == ex.end for wi, ex in zip(window, expect)])
Example #19
0
def test_window(timerange_a):
    timerange = sunpy.time.TimeRange(tbegin_str, tfin_str)
    window = timerange.window(u.Quantity(12 * 60 * 60, 's'), u.Quantity(10, 's'))
    expect = [sunpy.time.TimeRange('2012/1/1T00:00:00', '2012/1/1T00:00:10'),
              sunpy.time.TimeRange('2012/1/1T12:00:00', '2012/1/1T12:00:10'),
              sunpy.time.TimeRange('2012/1/2T00:00:00', '2012/1/2T00:00:10')]
    assert isinstance(window, list)
    #Doing direct comparisons seem to not work
    assert all([wi.start == ex.start and wi.end == ex.end for wi, ex in zip(window, expect)])
Example #20
0
    def _extractDateURL(self, url):
        """Extracts the date from a particular url following the pattern"""
        # url_to_list substitutes '.' and '_' for '/' to then create
        # a list of all the blocks in times - assuming they are all
        # separated with either '.', '_' or '/'
        url_to_list = lambda txt: re.sub(r'\.|_', '/', txt).split('/')
        pattern_list = url_to_list(self.pattern)
        url_list = url_to_list(url)

        time_order = ['%Y', '%y', '%b', '%B', '%m', '%d', '%j',
                      '%H', '%I', '%M', '%S']
        final_date = []
        final_pattern = []
        # Find in directory and filename
        for pattern_elem, url_elem in zip(pattern_list, url_list):
            time_formats = [x for x in time_order if x in pattern_elem]
            if len(time_formats) > 0:
                final_date.append(url_elem)
                final_pattern.append(pattern_elem)
                for time_bit in time_formats:
                    time_order.remove(time_bit)
        # Find and remove repeated elements eg: %Y in ['%Y', '%Y%m%d']
        #   Make all as single strings
        date_together = ''.join(final_date)
        pattern_together = ''.join(final_pattern)
        re_together = pattern_together
        for k, v in six.iteritems(TIME_CONVERSIONS):
            re_together = re_together.replace(k, v)

        #   Create new empty lists
        final_date = list()
        final_pattern = list()
        for p,r in zip(pattern_together.split('%')[1:], re_together.split('\\')[1:]):
            regexp = '\\{}'.format(r)
            pattern = '%{}'.format(p)
            date_part = re.match(regexp, date_together)
            date_together = date_together[:date_part.start()] + \
                            date_together[date_part.end():]
            if pattern not in final_pattern:
                final_pattern.append('%{}'.format(p))
                final_date.append(date_part.group())
        return datetime.datetime.strptime(' '.join(final_date),
                                          ' '.join(final_pattern))
Example #21
0
def test_split(timerange_a):
    expect = [
        sunpy.time.TimeRange('2012/1/1T00:00:00', '2012/1/1T12:00:00'),
        sunpy.time.TimeRange('2012/1/1T12:00:00', '2012/1/2T00:00:00')
    ]
    split = timerange_a.split(n=2)
    #Doing direct comparisons seem to not work
    assert all([
        wi.start == ex.start and wi.end == ex.end
        for wi, ex in zip(split, expect)
    ])
Example #22
0
def arginize(fun, a, kw):
    """ Turn args and kwargs into args by considering the function
    signature. """
    args, varargs, keywords, defaults = correct_argspec(fun)
    if varargs is not None:
        raise ValueError
    names = args[len(a):]
    if defaults:
        defs = dict(zip(args[-len(defaults):], defaults))
    else:
        defs = {}
    return list(a) + [kw.get(name, defs.get(name, None)) for name in names]
Example #23
0
def arginize(fun, a, kw):
    """ Turn args and kwargs into args by considering the function
    signature. """
    args, varargs, keywords, defaults = correct_argspec(fun)
    if varargs is not None:
        raise ValueError
    names = args[len(a):]
    if defaults:
        defs = dict(zip(args[-len(defaults):], defaults))
    else:
        defs = {}
    return list(a) + [kw.get(name, defs.get(name, None)) for name in names]
Example #24
0
def print_table(lst, colsep=' ', linesep='\n'):
    """
    ?

    Parameters
    ----------
    lst : ?
        ?
    colsep : ?
        ?
    linesep : ?
        ?

    Returns
    -------
    ?

    .. todo::
        improve documentation.

    """
    width = [max(map(len, col)) for col in zip(*lst)]
    return linesep.join(
        colsep.join(col.ljust(n) for n, col in zip(width, row)) for row in lst)
Example #25
0
    def __call__(self, *args, **kwargs):
        objs = self.get(*args, **kwargs)

        # pylint: disable=W0141
        types = tuple(map(type, objs))

        # This code is duplicate for performance reasons.
        cached = self.cache.get(types, None)
        if cached is not None:
            return cached(*args, **kwargs)

        for signature, fun in reversed(self.methods):
            if all(issubclass(ty, sig) for ty, sig in zip(types, signature)):
                self.cache[types] = fun
                return fun(*args, **kwargs)
        raise TypeError('{0!r}'.format(types))
Example #26
0
    def __call__(self, *args, **kwargs):
        objs = self.get(*args, **kwargs)

        # pylint: disable=W0141
        types = tuple(map(type, objs))

        # This code is duplicate for performance reasons.
        cached = self.cache.get(types, None)
        if cached is not None:
            return cached(*args, **kwargs)

        for signature, fun in reversed(self.methods):
            if all(issubclass(ty, sig) for ty, sig in zip(types, signature)):
                self.cache[types] = fun
                return fun(*args, **kwargs)
        raise TypeError('{0!r}'.format(types))
Example #27
0
def read(filepath, hdus=None, memmap=None, **kwargs):
    """
    Read a fits file

    Parameters
    ----------
    filepath : `str`
        The fits file to be read
    hdu: `int` or iterable
        The HDU indexes to read from the file

    Returns
    -------
    pairs : `list`
        A list of (data, header) tuples

    Notes
    -----
    This routine reads all the HDU's in a fits file and returns a list of the
    data and a FileHeader instance for each one.
    Also all comments in the original file are concatenated into a single
    'comment' key in the returned FileHeader.
    """
    with fits.open(filepath, ignore_blank=True, memmap=memmap) as hdulist:
        if hdus is not None:
            if isinstance(hdus, int):
                hdulist = hdulist[hdus]
            elif isinstance(hdus, collections.Iterable):
                hdulist = [hdulist[i] for i in hdus]

        hdulist.verify('silentfix+warn')

        headers = get_header(hdulist)
        pairs = []

        for i, (hdu, header) in enumerate(zip(hdulist, headers)):
            try:
                pairs.append(HDPair(hdu.data, header))
            except (KeyError, ValueError) as e:
                message = "Error when reading HDU {}. Skipping.\n".format(i)
                for line in traceback.format_tb(sys.exc_info()[2]):
                    message += line
                    message += '\n'
                message += repr(e)
                warnings.warn(message, Warning, stacklevel=2)

    return pairs
Example #28
0
File: fits.py Project: mirca/sunpy
def read(filepath, hdus=None, memmap=None, **kwargs):
    """
    Read a fits file

    Parameters
    ----------
    filepath : `str`
        The fits file to be read
    hdu: `int` or iterable
        The HDU indexes to read from the file

    Returns
    -------
    pairs : `list`
        A list of (data, header) tuples

    Notes
    -----
    This routine reads all the HDU's in a fits file and returns a list of the
    data and a FileHeader instance for each one.
    Also all comments in the original file are concatenated into a single
    'comment' key in the returned FileHeader.
    """
    with fits.open(filepath, memmap=memmap) as hdulist:
        if hdus is not None:
            if isinstance(hdus, int):
                hdulist = hdulist[hdus]
            elif isinstance(hdus, collections.Iterable):
                hdulist = [hdulist[i] for i in hdus]

        hdulist.verify('silentfix+warn')

        headers = get_header(hdulist)
        pairs = []

        for i, (hdu, header) in enumerate(zip(hdulist, headers)):
            try:
                pairs.append(HDPair(hdu.data, header))
            except (KeyError, ValueError) as e:
                message = "Error when reading HDU {}. Skipping.\n".format(i)
                for line in traceback.format_tb(sys.exc_info()[2]):
                    message += line
                    message += '\n'
                message += repr(e)
                warnings.warn(message, Warning, stacklevel=2)

    return pairs
Example #29
0
    def linearize_freqs(self, delta_freq=None):
        """Rebin frequencies so that the frequency axis is linear.

        Parameters
        ----------
        delta_freq : float
            Difference between consecutive values on the new frequency axis.
            Defaults to half of smallest delta in current frequency axis.
            Compare Nyquist-Shannon sampling theorem.
        """
        if delta_freq is None:
            # Nyquist–Shannon sampling theorem
            delta_freq = _min_delt(self.freq_axis) / 2.
        nsize = (self.freq_axis.max() - self.freq_axis.min()) / delta_freq + 1
        new = np.zeros((nsize, self.shape[1]), dtype=self.data.dtype)

        freqs = self.freq_axis - self.freq_axis.max()
        freqs = freqs / delta_freq

        midpoints = np.round((freqs[:-1] + freqs[1:]) / 2)
        fillto = np.concatenate(
            [midpoints - 1, np.round([freqs[-1]]) - 1]
        )
        fillfrom = np.concatenate(
            [np.round([freqs[0]]), midpoints - 1]
        )

        fillto = np.abs(fillto)
        fillfrom = np.abs(fillfrom)

        for row, from_, to_ in zip(self, fillfrom, fillto):
            new[from_: to_] = row

        vrs = self._get_params()
        vrs.update({
            'freq_axis': np.linspace(
                self.freq_axis.max(), self.freq_axis.min(), nsize
            )
        })

        return self.__class__(new, **vrs)
Example #30
0
    def linearize_freqs(self, delta_freq=None):
        """Rebin frequencies so that the frequency axis is linear.

        Parameters
        ----------
        delta_freq : float
            Difference between consecutive values on the new frequency axis.
            Defaults to half of smallest delta in current frequency axis.
            Compare Nyquist-Shannon sampling theorem.
        """
        if delta_freq is None:
            # Nyquist–Shannon sampling theorem
            delta_freq = _min_delt(self.freq_axis) / 2.
        nsize = (self.freq_axis.max() - self.freq_axis.min()) / delta_freq + 1
        new = np.zeros((int(nsize), self.shape[1]), dtype=self.data.dtype)

        freqs = self.freq_axis - self.freq_axis.max()
        freqs = freqs / delta_freq

        midpoints = np.round((freqs[:-1] + freqs[1:]) / 2)
        fillto = np.concatenate(
            [midpoints - 1, np.round([freqs[-1]]) - 1]
        )
        fillfrom = np.concatenate(
            [np.round([freqs[0]]), midpoints - 1]
        )

        fillto = np.abs(fillto)
        fillfrom = np.abs(fillfrom)

        for row, from_, to_ in zip(self, fillfrom, fillto):
            new[int(from_): int(to_)] = row

        vrs = self._get_params()
        vrs.update({
            'freq_axis': np.linspace(
                self.freq_axis.max(), self.freq_axis.min(), nsize
            )
        })

        return self.__class__(new, **vrs)
Example #31
0
def read(filepath, hdus=None, memmap=None, **kwargs):
    """
    Read a fits file

    Parameters
    ----------
    filepath : `str`
        The fits file to be read
    hdu: `int` or iterable
        The HDU indexes to read from the file

    Returns
    -------
    pairs : `list`
        A list of (data, header) tuples

    Notes
    -----
    This routine reads all the HDU's in a fits file and returns a list of the
    data and a FileHeader instance for each one.
    Also all comments in the original file are concatenated into a single
    'comment' key in the returned FileHeader.
    """
    hdulist = fits.open(filepath, memmap=memmap)
    if hdus is not None:
        if isinstance(hdus, int):
            hdulist = hdulist[hdus]
        elif isinstance(hdus, collections.Iterable):
            hdulist = [hdulist[i] for i in hdus]
    try:
        hdulist.verify('silentfix+warn')

        headers = get_header(hdulist)
        pairs = []
        for hdu, header in zip(hdulist, headers):
            pairs.append((hdu.data, header))
    finally:
        hdulist.close()

    return pairs
Example #32
0
def read(filepath, hdus=None, memmap=None, **kwargs):
    """
    Read a fits file

    Parameters
    ----------
    filepath : `str`
        The fits file to be read
    hdu: `int` or iterable
        The HDU indexes to read from the file

    Returns
    -------
    pairs : `list`
        A list of (data, header) tuples

    Notes
    -----
    This routine reads all the HDU's in a fits file and returns a list of the
    data and a FileHeader instance for each one.
    Also all comments in the original file are concatenated into a single
    'comment' key in the returned FileHeader.
    """
    hdulist = fits.open(filepath, memmap=memmap)
    if hdus is not None:
        if isinstance(hdus, int):
            hdulist = hdulist[hdus]
        elif isinstance(hdus, collections.Iterable):
            hdulist = [hdulist[i] for i in hdus]
    try:
        hdulist.verify('silentfix+warn')

        headers = get_header(hdulist)
        pairs = []
        for hdu,header in zip(hdulist, headers):
            pairs.append((hdu.data, header))
    finally:
        hdulist.close()

    return pairs
Example #33
0
    def interpolate(self, frequency):
        """
        Linearly interpolate intensity at unknown frequency using linear
        interpolation of its two neighbours.

        Parameters
        ----------
        frequency : float or int
            Unknown frequency for which to linearly interpolate the intensities.
            freq_axis[0] >= frequency >= self_freq_axis[-1]
        """
        lfreq, lvalue = None, None
        for freq, value in zip(self.freq_axis, self.data[:, :]):
            if freq < frequency:
                break
            lfreq, lvalue = freq, value
        else:
            raise ValueError("Frequency not in interpolation range")
        if lfreq is None:
            raise ValueError("Frequency not in interpolation range")
        diff = frequency - freq # pylint: disable=W0631
        ldiff = lfreq - frequency
        return (ldiff * value + diff * lvalue) / (diff + ldiff) # pylint: disable=W0631
Example #34
0
    def interpolate(self, frequency):
        """
        Linearly interpolate intensity at unknown frequency using linear
        interpolation of its two neighbours.

        Parameters
        ----------
        frequency : float or int
            Unknown frequency for which to linearly interpolate the intensities.
            freq_axis[0] >= frequency >= self_freq_axis[-1]
        """
        lfreq, lvalue = None, None
        for freq, value in zip(self.freq_axis, self.data[:, :]):
            if freq < frequency:
                break
            lfreq, lvalue = freq, value
        else:
            raise ValueError("Frequency not in interpolation range")
        if lfreq is None:
            raise ValueError("Frequency not in interpolation range")
        diff = frequency - freq  # pylint: disable=W0631
        ldiff = lfreq - frequency
        return (ldiff * value + diff * lvalue) / (diff + ldiff)  # pylint: disable=W0631
Example #35
0
    def join_many(cls, specs, mk_arr=None, nonlinear=False,
                  maxgap=0, fill=JOIN_REPEAT):
        """Produce new Spectrogram that contains spectrograms
        joined together in time.

        Parameters
        ----------
        specs : list
            List of spectrograms to join together in time.
        nonlinear : bool
            If True, leave out gaps between spectrograms. Else, fill them with
            the value specified in fill.
        maxgap : float, int or None
            Largest gap to allow in second. If None, allow gap of arbitrary
            size.
        fill : float or int
            Value to fill missing values (assuming nonlinear=False) with.
            Can be LinearTimeSpectrogram.JOIN_REPEAT to repeat the values for
            the time just before the gap.
        mk_array: function
            Function that is called to create the resulting array. Can be set
            to LinearTimeSpectrogram.memap(filename) to create a memory mapped
            result array.
        """
        # XXX: Only load header and load contents of files
        # on demand.
        mask = None

        if mk_arr is None:
            mk_arr = cls.make_array

        specs = sorted(specs, key=lambda x: x.start)

        freqs = specs[0].freq_axis
        if not all(np.array_equal(freqs, sp.freq_axis) for sp in specs):
            raise ValueError("Frequency channels do not match.")

        # Smallest time-delta becomes the common time-delta.
        min_delt = min(sp.t_delt for sp in specs)
        dtype_ = max(sp.dtype for sp in specs)

        specs = [sp.resample_time(min_delt) for sp in specs]
        size = sum(sp.shape[1] for sp in specs)

        data = specs[0]
        start_day = data.start

        xs = []
        last = data
        for elem in specs[1:]:
            e_init = (
                SECONDS_PER_DAY * (
                    get_day(elem.start) - get_day(start_day)
                ).days + elem.t_init
            )
            x = int((e_init - last.t_init) / min_delt)
            xs.append(x)
            diff = last.shape[1] - x

            if maxgap is not None and -diff > maxgap / min_delt:
                raise ValueError("Too large gap.")

            # If we leave out undefined values, we do not want to
            # add values here if x > t_res.
            if nonlinear:
                size -= max(0, diff)
            else:
                size -= diff

            last = elem

        # The non existing element after the last one starts after
        # the last one. Needed to keep implementation below sane.
        xs.append(specs[-1].shape[1])

        # We do that here so the user can pass a memory mapped
        # array if they'd like to.
        arr = mk_arr((data.shape[0], size), dtype_)
        time_axis = np.zeros((size,))
        sx = 0
        # Amount of pixels left out due to non-linearity. Needs to be
        # considered for correct time axes.
        sd = 0
        for x, elem in zip(xs, specs):
            diff = x - elem.shape[1]
            e_time_axis = elem.time_axis

            elem = elem.data

            if x > elem.shape[1]:
                if nonlinear:
                    x = elem.shape[1]
                else:
                    # If we want to stay linear, fill up the missing
                    # pixels with placeholder zeros.
                    filler = np.zeros((data.shape[0], diff))
                    if fill is cls.JOIN_REPEAT:
                        filler[:, :] = elem[:, -1, np.newaxis]
                    else:
                        filler[:] = fill
                    minimum = e_time_axis[-1]
                    e_time_axis = np.concatenate([
                        e_time_axis,
                        np.linspace(
                            minimum + min_delt,
                            minimum + diff * min_delt,
                            diff
                        )
                    ])
                    elem = np.concatenate([elem, filler], 1)
            arr[:, sx:sx + x] = elem[:, :x]

            if diff > 0:
                if mask is None:
                    mask = np.zeros((data.shape[0], size), dtype=np.uint8)
                mask[:, sx + x - diff:sx + x] = 1
            time_axis[sx:sx + x] = e_time_axis[:x] + data.t_delt * (sx + sd)
            if nonlinear:
                sd += max(0, diff)
            sx += x
        params = {
            'time_axis': time_axis,
            'freq_axis': data.freq_axis,
            'start': data.start,
            'end': specs[-1].end,
            't_delt': data.t_delt,
            't_init': data.t_init,
            't_label': data.t_label,
            'f_label': data.f_label,
            'content': data.content,
            'instruments': _union(spec.instruments for spec in specs),
        }
        if mask is not None:
            arr = ma.array(arr, mask=mask)
        if nonlinear:
            del params['t_delt']
            return Spectrogram(arr, **params)
        return common_base(specs)(arr, **params)
Example #36
0
    c = np.cos(angle); s = np.sin(angle)
    rmatrix = np.array([[c, -s], [s, c]])
    expected = np.rot90(original, k=k)
    rot = affine_transform(original, rmatrix=rmatrix, use_scipy=True)
    assert compare_results(expected, rot, allclose=False)

    # TODO: Check incremental 360 degree rotation against original image

    # Check derotated image against original
    derot_matrix = np.array([[c, s], [-s, c]])
    derot = affine_transform(rot, rmatrix=derot_matrix, use_scipy=True)
    assert compare_results(original, derot, allclose=False)

dx_values, dy_values = list(range(-100, 101, 100))*3, list(range(-100, 101, 100))*3
dy_values.sort()
@pytest.mark.parametrize("dx, dy", list(zip(dx_values, dy_values)))
def test_shift(dx, dy):
    # Rotation center for all translation tests.
    image_center = np.array(original.shape)/2.0 - 0.5

    # No rotation for all translation tests.
    rmatrix = np.array([[1.0, 0.0], [0.0, 1.0]])

    # Check a shifted shape against expected outcome
    expected = np.roll(np.roll(original, dx, axis=1), dy, axis=0)
    rcen = image_center + np.array([dx, dy])
    shift = affine_transform(original, rmatrix=rmatrix, recenter=True, image_center=rcen)
    ymin, ymax = max([0, dy]), min([original.shape[1], original.shape[1]+dy])
    xmin, xmax = max([0, dx]), min([original.shape[0], original.shape[0]+dx])
    compare_results(expected[ymin:ymax, xmin:xmax], shift[ymin:ymax, xmin:xmax])
Example #37
0
    def join_many(cls, specs, mk_arr=None, nonlinear=False,
        maxgap=0, fill=JOIN_REPEAT):
        """Produce new Spectrogram that contains spectrograms
        joined together in time.

        Parameters
        ----------
        specs : list
            List of spectrograms to join together in time.
        nonlinear : bool
            If True, leave out gaps between spectrograms. Else, fill them with
            the value specified in fill.
        maxgap : float, int or None
            Largest gap to allow in second. If None, allow gap of arbitrary
            size.
        fill : float or int
            Value to fill missing values (assuming nonlinear=False) with.
            Can be LinearTimeSpectrogram.JOIN_REPEAT to repeat the values for
            the time just before the gap.
        mk_array: function
            Function that is called to create the resulting array. Can be set
            to LinearTimeSpectrogram.memap(filename) to create a memory mapped
            result array.
        """
        # XXX: Only load header and load contents of files
        # on demand.
        mask = None

        if mk_arr is None:
            mk_arr = cls.make_array

        specs = sorted(specs, key=lambda x: x.start)

        freqs = specs[0].freq_axis
        if not all(np.array_equal(freqs, sp.freq_axis) for sp in specs):
            raise ValueError("Frequency channels do not match.")

        # Smallest time-delta becomes the common time-delta.
        min_delt = min(sp.t_delt for sp in specs)
        dtype_ = max(sp.dtype for sp in specs)

        specs = [sp.resample_time(min_delt) for sp in specs]
        size = sum(sp.shape[1] for sp in specs)

        data = specs[0]
        start_day = data.start

        xs = []
        last = data
        for elem in specs[1:]:
            e_init = (
                SECONDS_PER_DAY * (
                    get_day(elem.start) - get_day(start_day)
                ).days + elem.t_init
            )
            x = int((e_init - last.t_init) / min_delt)
            xs.append(x)
            diff = last.shape[1] - x

            if maxgap is not None and -diff > maxgap / min_delt:
                raise ValueError("Too large gap.")

            # If we leave out undefined values, we do not want to
            # add values here if x > t_res.
            if nonlinear:
                size -= max(0, diff)
            else:
                size -= diff

            last = elem

        # The non existing element after the last one starts after
        # the last one. Needed to keep implementation below sane.
        xs.append(specs[-1].shape[1])

        # We do that here so the user can pass a memory mapped
        # array if they'd like to.
        arr = mk_arr((data.shape[0], size), dtype_)
        time_axis = np.zeros((size,))
        sx = 0
        # Amount of pixels left out due to non-linearity. Needs to be
        # considered for correct time axes.
        sd = 0
        for x, elem in zip(xs, specs):
            diff = x - elem.shape[1]
            e_time_axis = elem.time_axis

            elem = elem.data

            if x > elem.shape[1]:
                if nonlinear:
                    x = elem.shape[1]
                else:
                    # If we want to stay linear, fill up the missing
                    # pixels with placeholder zeros.
                    filler = np.zeros((data.shape[0], diff))
                    if fill is cls.JOIN_REPEAT:
                        filler[:, :] = elem[:, -1, np.newaxis]
                    else:
                        filler[:] = fill
                    minimum = e_time_axis[-1]
                    e_time_axis = np.concatenate([
                        e_time_axis,
                        np.linspace(
                            minimum + min_delt,
                            minimum + diff * min_delt,
                            diff
                        )
                    ])
                    elem = np.concatenate([elem, filler], 1)
            arr[:, sx:sx + x] = elem[:, :x]

            if diff > 0:
                if mask is None:
                    mask = np.zeros((data.shape[0], size), dtype=np.uint8)
                mask[:, sx + x - diff:sx + x] = 1
            time_axis[sx:sx + x] = e_time_axis[:x] + data.t_delt * (sx + sd)
            if nonlinear:
                sd += max(0, diff)
            sx += x
        params = {
            'time_axis': time_axis,
            'freq_axis': data.freq_axis,
            'start': data.start,
            'end': specs[-1].end,
            't_delt': data.t_delt,
            't_init': data.t_init,
            't_label': data.t_label,
            'f_label': data.f_label,
            'content': data.content,
            'instruments': _union(spec.instruments for spec in specs),
        }
        if mask is not None:
            arr = ma.array(arr, mask=mask)
        if nonlinear:
            del params['t_delt']
            return Spectrogram(arr, **params)
        return common_base(specs)(arr, **params)
Example #38
0
def print_table(lst, colsep=' ', linesep='\n'):
    width = [max(map(len, col)) for col in zip(*lst)]
    return linesep.join(
        colsep.join(col.ljust(n) for n, col in zip(width, row)) for row in lst)
Example #39
0
def test_split(timerange_a):
    expect = [sunpy.time.TimeRange('2012/1/1T00:00:00', '2012/1/1T12:00:00'),
              sunpy.time.TimeRange('2012/1/1T12:00:00', '2012/1/2T00:00:00')]
    split = timerange_a.split(n=2)
    #Doing direct comparisons seem to not work
    assert all([wi.start == ex.start and wi.end == ex.end for wi, ex in zip(split, expect)])