def __init__(self, key, mode, octave=4): if mode.lower() not in mode_map.keys(): raise NotImplementedError("Currently only support the modes: {0}" .format(",".join(mode_map.keys()))) self.mode = mode.lower() # parse key - note self.root_note = key.lower() if isiterable(octave): self._root_midi = _note_to_midi(self.root_note, octave[0]) self.octave = map(int,octave) else: self._root_midi = _note_to_midi(self.root_note, octave) self.octave = int(octave) # get valid notes if isiterable(self.octave): self.midi_notes = np.array([], dtype=int) for octv in self.octave: these_notes = _note_to_midi(self.root_note, octv) + np.array(mode_map[self.mode]) self.midi_notes = np.append(self.midi_notes, these_notes) else: self.midi_notes = _note_to_midi(self.root_note, self.octave) + np.array(mode_map[self.mode]) self.notes = [_midi_to_note(x) for x in self.midi_notes]
def __init__(self, key, mode, octave=4): if mode.lower() not in mode_map.keys(): raise NotImplementedError( "Currently only support the modes: {0}".format(",".join( mode_map.keys()))) self.mode = mode.lower() # parse key - note self.root_note = key.lower() if isiterable(octave): self._root_midi = _note_to_midi(self.root_note, octave[0]) self.octave = map(int, octave) else: self._root_midi = _note_to_midi(self.root_note, octave) self.octave = int(octave) # get valid notes if isiterable(self.octave): self.midi_notes = np.array([], dtype=int) for octv in self.octave: these_notes = _note_to_midi(self.root_note, octv) + np.array( mode_map[self.mode]) self.midi_notes = np.append(self.midi_notes, these_notes) else: self.midi_notes = _note_to_midi( self.root_note, self.octave) + np.array(mode_map[self.mode]) self.notes = [_midi_to_note(x) for x in self.midi_notes]
def radec2lb(ra, dec, radian=False, FK5=False): """ Convert (ra, dec) into Galactic coordinate (l, b). Parameters ---------- ra : float or list or array RA Coordinates in degree dec : float or list or array DEC Coordinates in degree Returns ------- l : float or list or array b : float or list or array """ """ See if the input is number or array""" if not (isiterable(ra) or isiterable(dec)): returnScalar = True if not FK5: raDec = [SkyCoord(ra, dec, frame='icrs', unit='deg')] else: raDec = [SkyCoord(ra, dec, frame='fk5', unit='deg')] else: returnScalar = False if not FK5: raDec = [ SkyCoord(ra, dec, frame='icrs', unit='deg') for rrr, ddd in zip(ra, dec) ] else: raDec = [ SkyCoord(ra, dec, frame='fk5', unit='deg') for rrr, ddd in zip(ra, dec) ] """ Convert to galactic coordinates Currently, coordinates do not support arrays; have to loop. """ gal_l = np.empty(len(raDec), dtype=np.float) gal_b = np.empty(len(raDec), dtype=np.float) for ii, cc in enumerate(raDec): gg = cc.galactic # Hack to support both astropy v0.2.4 and v0.3.dev # TODO: remove this hack once v0.3 is out (and array-ify this # whole thing) if radian: gal_l[ii] = gg.l.radian gal_b[ii] = gg.b.radian else: gal_l[ii] = gg.l.degree gal_b[ii] = gg.b.degree if returnScalar: return gal_l[0], gal_b[0] else: return gal_l, gal_b
def to_tree_transform(cls, model, ctx): node = OrderedDict() node['no_label'] = model.no_label if model.inputs_mapping is not None: node['inputs_mapping'] = model.inputs_mapping if isinstance(model, LabelMapperArray): node['mapper'] = model.mapper elif isinstance(model, LabelMapper): node['mapper'] = model.mapper node['inputs'] = list(model.inputs) elif isinstance(model, (LabelMapperDict, LabelMapperRange)): if hasattr(model, 'atol'): node['atol'] = model.atol mapper = OrderedDict() labels = list(model.mapper) transforms = [] for k in labels: transforms.append(model.mapper[k]) if isiterable(labels[0]): labels = [list(l) for l in labels] mapper['labels'] = labels mapper['models'] = transforms node['mapper'] = mapper node['inputs'] = list(model.inputs) else: raise TypeError("Unrecognized type of LabelMapper - {0}".format(model)) return yamlutil.custom_tree_to_tagged_tree(node, ctx)
def de_density_scale(self, z): """ Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array-like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by .. math:: I = \\left(1 + z\\right)^{3 \\left(1 + w_0 + w_a\\right)} \exp \\left(-3 w_a \\frac{z}{1+z}\\right) """ if isiterable(z): z = np.asarray(z) zp1 = 1.0 + z aa = 1. / zp1 return zp1 ** (3 * (1 + self._w0 + self._wa)) * \ np.exp(-self._wa * (1 - aa) * (60*aa**6 - 430*aa**5 + 1334*aa**4 - 2341*aa**3 + 2559*aa**2 - 1851*aa + 1089) / 140.)
def invert(self, dispersion_values): if not hasattr(dispersion_values, 'unit'): raise u.UnitsException('Must give a dispersion value with a valid unit (i.e. quantity 5 * u.Angstrom)') if misc.isiterable(dispersion_values) and not isinstance(dispersion_values, basestring): dispersion_values = np.array(dispersion_values) return float((dispersion_values - self.dispersion0) / self.dispersion_delta) + self.pixel_index
def __getitem__(self, k): """ Passback abundance value as a float given an element input Parameters ---------- k: overloaded * int -- Atomic number (6) * str -- Element name (e.g. 'C') Returns ------- Abund or Abund difference for the ratio """ # Iterate? if isiterable(k) and not isinstance(k, basestring): out_abnd = [] for ik in k: out_abnd.append(self[ik]) out_abnd = np.array(out_abnd) return out_abnd if isinstance(k, int): # Atomic number mt = np.where(self._data["Z"] == k)[0] if len(mt) != 1: raise ValueError("Atomic Number not in Table: {:d}".format(k)) elif isinstance(k, basestring): # Name mt = np.where(self._data["Elm"] == k)[0] if len(mt) != 1: raise ValueError("Element not in Table: {:s}".format(k)) else: raise IndexError("Not prepared for this type of input", k) # Return return self._data["Abund"][mt][0]
def do_photometry(hdu, extensions=None, threshold=5, fwhm=2.5): if extensions is None: extensions = np.arange(1, len(hdu)) if not isiterable(extensions): extensions = (extensions, ) output = {} for ext in extensions: header = hdu[ext].header data = hdu[ext].data image_wcs = WCS(header) background = mad_std(data) sources = daofind(data, threshold=threshold * background, fwhm=fwhm) positions = (sources['xcentroid'], sources['ycentroid']) sky_positions = pixel_to_skycoord(*positions, wcs=image_wcs) apertures = CircularAperture(positions, r=2.) photometry_table = aperture_photometry(data, apertures) photometry_table['sky_center'] = sky_positions output[str(ext)] = photometry_table return output
def subtract_background(data, background): """ Subtract background from data and generate a 2D pixel-wise background image. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D data array from which to subtract ``background``. background : float, array_like, or `~astropy.units.Quantity` The background level of the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Returns ------- data : 2D `~numpy.ndarray` or `~astropy.units.Quantity` The background subtracted data. background : 2D `~numpy.ndarray` or `~astropy.units.Quantity` The pixel-wise background array. """ if not isiterable(background): # NOTE: np.broadcast_arrays() never returns a Quantity # background = np.broadcast_arrays(background, data)[0] background = np.zeros(data.shape) + background else: if background.shape != data.shape: raise ValueError('If input background is 2D, then it must ' 'have the same shape as the input data.') return (data - background), background
def w(self, z): """Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : array-like Input redshifts. Returns ------- w : ndarray, or float if input scalar The dark energy equation of state Notes ------ The dark energy equation of state is defined as :math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = w_0 + w_a (1 - a)^n = w_0 + w_a \\frac{z^n}{(1+z)^n}`. """ if isiterable(z): z = np.asarray(z) return self._w0 + self._wa * z**7 / ( (1.0 + z)**7)
def __getitem__(self, k): """ Return abundance given an element Parameters ---------- k : int or str or list/tuple * int -- Atomic number (6) * str -- Element name (e.g. 'C') Returns ------- Abund : float """ # Iterate? if isiterable(k) and not isinstance(k, basestring): out_abnd = [] for ik in k: out_abnd.append(self[ik]) out_abnd = np.array(out_abnd) return out_abnd if isinstance(k, numbers.Integral): # Atomic number mt = np.where(self._data['Z'] == k)[0] if len(mt) != 1: raise ValueError('Atomic Number not in Table: {:d}'.format(k)) elif isinstance(k, basestring): # Name mt = np.where(self._data['Elm'] == k)[0] if len(mt) != 1: raise ValueError('Element not in Table: {:s}'.format(k)) else: raise IndexError('Not prepared for this type of input', k) # Return return self._data['Abund'][mt][0]
def de_density_scale(self, z): """ Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : array-like Input redshifts. Returns ------- I : ndarray, or float if input scalar The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`, and in this case is given by .. math:: I = \\left(1 + z\\right)^{3 \\left(1 + w_0 + w_a\\right)} \exp \\left(-3 w_a \\frac{z}{1+z}\\right) """ if isiterable(z): z = np.asarray(z) zp1 = 1.0 + z return zp1 ** (3 * (1 + self._w0)) * \ np.exp(1.5 * self._wa * z**2 / zp1**2)
def from_tree_transform(cls, node, ctx): inputs_mapping = node.get('inputs_mapping', None) if inputs_mapping is not None and not isinstance(inputs_mapping, models.Mapping): raise TypeError("inputs_mapping must be an instance" "of astropy.modeling.models.Mapping.") mapper = node['mapper'] atol = node.get('atol', 10**-8) no_label = node.get('no_label', np.nan) if isinstance(mapper, NDArrayType): if mapper.ndim != 2: raise NotImplementedError( "GWCS currently only supports 2x2 masks ") return LabelMapperArray(mapper, inputs_mapping) elif isinstance(mapper, Model): inputs = node.get('inputs') return LabelMapper(inputs, mapper, inputs_mapping=inputs_mapping, no_label=no_label) else: inputs = node.get('inputs', None) if inputs is not None: inputs = tuple(inputs) labels = mapper.get('labels') transforms = mapper.get('models') if isiterable(labels[0]): labels = [tuple(l) for l in labels] dict_mapper = dict(zip(labels, transforms)) return LabelMapperRange(inputs, dict_mapper, inputs_mapping) else: dict_mapper = dict(zip(labels, transforms)) return LabelMapperDict(inputs, dict_mapper, inputs_mapping, atol=atol)
def set_log_level(args, loggers): if args.verbosity == 1: log_level = logging.DEBUG elif args.verbosity == 2: log_level = 1 elif args.verbosity == 3: log_level = 0 elif args.quietness == 1: log_level = logging.WARNING elif args.quietness == 2: log_level = logging.ERROR else: log_level = logging.INFO # default if not isiterable(loggers): loggers = [loggers] for logger in loggers: logger.setLevel(log_level)
def from_table(cls, table, dispersion_column='dispersion', flux_column='flux', uncertainty_column=None, flag_columns=None): """ Initializes a `Spectrum1D`-object from an `~astropy.table.Table` object Parameters ---------- table : ~astropy.table.Table object dispersion_column : str, optional name of the dispersion column. default is 'dispersion' flux_column : str, optional name of the flux column. default is 'flux' uncertainty_column : str, optional name of the uncertainty column. If set to None uncertainty is set to None. default is None flag_columns : str or list, optional name or names of flag columns. If multiple names are supplied a ~astropy.nddata.FlagCollection will be built. default is None """ flux = table[flux_column] dispersion = table[dispersion_column] if uncertainty_column is not None: uncertainty = table[uncertainty_column] if uncertainty.unit != flux.unit: log.warning( '"uncertainty"-column and "flux"-column do not share the units (%s vs %s) ', uncertainty.unit, flux.unit) else: uncertainty = None if isinstance(flag_columns, basestring): flags = table[flag_columns] elif misc.isiterable(flag_columns): flags = FlagCollection(shape=flux.shape) for flag_column in flag_columns: flags[flag_column] = table[flag_column] else: raise ValueError( 'flag_columns should either be a string or a list (or iterable) of strings' ) return cls.from_array(flux=flux.data, dispersion=dispersion.data, uncertainty=uncertainty, dispersion_unit=dispersion.units, unit=flux.units, mask=table.mask, flags=flags, meta=table.meta)
def wcs_from_spec_footprints(wcslist, refwcs=None, transform=None, domain=None): """ Create a WCS from a list of spatial/spectral WCS. Build-7 workaround. """ if not isiterable(wcslist): raise ValueError("Expected 'wcslist' to be an iterable of gwcs.WCS") if not all([isinstance(w, WCS) for w in wcslist]): raise TypeError( "All items in 'wcslist' must have instance of gwcs.WCS") if refwcs is None: refwcs = wcslist[0] else: if not isinstance(refwcs, WCS): raise TypeError("Expected refwcs to be an instance of gwcs.WCS.") # TODO: generalize an approach to do this for more than one wcs. For # now, we just do it for one, using the api for a list of wcs. # Compute a fiducial point for the output frame at center of input data fiducial = compute_spec_fiducial(wcslist, domain=domain) # Create transform for output frame transform = compute_spec_transform(fiducial, refwcs) output_frame = refwcs.output_frame wnew = WCS(output_frame=output_frame, forward_transform=transform) # Build the domain in the output frame wcs object by running the input wcs # footprints through the backward transform of the output wcs sky = [spec_footprint(w) for w in wcslist] domain_grid = [wnew.backward_transform(*f) for f in sky] sky0 = sky[0] det = domain_grid[0] offsets = [] input_frame = refwcs.input_frame for axis in input_frame.axes_order: axis_min = np.nanmin(det[axis]) offsets.append(axis_min) transform = Shift(offsets[0]) & Shift(offsets[1]) | transform wnew = WCS(output_frame=output_frame, input_frame=input_frame, forward_transform=transform) domain = [] for axis in input_frame.axes_order: axis_min = np.nanmin(domain_grid[0][axis]) axis_max = np.nanmax(domain_grid[0][axis]) + 1 domain.append({ 'lower': axis_min, 'upper': axis_max, 'includes_lower': True, 'includes_upper': False }) wnew.domain = domain return wnew
def invert(self, dispersion_values): if not hasattr(dispersion_values, 'unit'): raise u.UnitsException( 'Must give a dispersion value with a valid unit') if misc.isiterable(dispersion_values) and not isinstance( dispersion_values, basestring): dispersion_values = np.array(dispersion_values) return float((dispersion_values - self.dispersion0) / self.dispersion_delta) + self.pixel_index
def test_properties(self): props = source_properties(IMAGE, SEGM) assert props[0].id == 1 assert_quantity_allclose(props[0].xcentroid, XCEN * u.pix, rtol=1.0e-2) assert_quantity_allclose(props[0].ycentroid, YCEN * u.pix, rtol=1.0e-2) assert_allclose(props[0].source_sum, IMAGE[IMAGE >= THRESHOLD].sum()) assert_quantity_allclose(props[0].semimajor_axis_sigma, MAJOR_SIG * u.pix, rtol=1.0e-2) assert_quantity_allclose(props[0].semiminor_axis_sigma, MINOR_SIG * u.pix, rtol=1.0e-2) assert_quantity_allclose(props[0].orientation, THETA * u.rad, rtol=1.0e-3) assert_allclose(props[0].bbox.value, [35, 25, 70, 77]) assert_quantity_allclose(props[0].area, 1058.0 * u.pix ** 2) assert_allclose(len(props[0].values), props[0].area.value) assert_allclose(len(props[0].coords), 2) assert_allclose(len(props[0].coords[0]), props[0].area.value) properties = [ "background_at_centroid", "background_mean", "eccentricity", "ellipticity", "elongation", "equivalent_radius", "max_value", "maxval_xpos", "maxval_ypos", "min_value", "minval_xpos", "minval_ypos", "perimeter", "cxx", "cxy", "cyy", "covar_sigx2", "covar_sigxy", "covar_sigy2", "xmax", "xmin", "ymax", "ymin", ] for propname in properties: assert not isiterable(getattr(props[0], propname)) properties = ["centroid", "covariance_eigvals", "cutout_centroid", "maxval_cutout_pos", "minval_cutout_pos"] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (2,) properties = ["covariance", "inertia_tensor"] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (2, 2) properties = ["moments", "moments_central"] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (4, 4)
def evaluate(self, x, y): #region=None """ Parameters ---------- x : float Input pixel coordinate y : float Input pixel coordinate region : int or str Region id """ #if region is not None: #if not np.isnan(region): # return self._selector[region.item()](x, y) # use input_values for indexing the output arrays input_values = (x, y) idx = np.empty(x.shape, dtype=np.int) idy = np.empty(y.shape, dtype=np.int) idx = np.around(x, out=idx) idy = np.around(y, out=idy) result = self.regions_mask[idy, idx] if not isiterable(result) or isinstance(result, np.string_): if result != 0 and result != '': result = self._selector[result](*input_values) if np.isscalar(result[0]): result = [np.array(ar) for ar in result] return result else: broadcast_missing_value = np.broadcast_arrays( self.undefined_transform_value, input_values[0])[0] return [ broadcast_missing_value for output in range(self.n_outputs) ] #input_values unique_regions = self.get_unique_regions(result) if unique_regions == []: warnings.warning('There are no regions with valid transforms.') return nout = self._selector[unique_regions[0]].n_outputs output_arrays = [ np.zeros(self.regions_mask.shape, dtype=np.float) for i in range(nout) ] for ar in output_arrays: ar[self.regions_mask == 0] = self.undefined_transform_value for i in unique_regions: transform = self._selector[i] indices = (self.regions_mask == i).nonzero() outputs = transform(*indices) for out, tr_out in zip(output_arrays, outputs): out[indices] = tr_out result = [ar[idx, idy] for ar in output_arrays] if np.isscalar(result[0]): result = [np.array(ar) for ar in result] return tuple(result)
def test_properties(self): with pytest.warns(AstropyDeprecationWarning): obj = source_properties(IMAGE, self.segm)[1] assert obj.id == 2 assert_quantity_allclose(obj.xcentroid, XCEN * u.pix, rtol=1.e-2) assert_quantity_allclose(obj.ycentroid, YCEN * u.pix, rtol=1.e-2) assert_allclose(obj.source_sum, 16723.388) assert_quantity_allclose(obj.semimajor_axis_sigma, MAJOR_SIG * u.pix, rtol=1.e-2) assert_quantity_allclose(obj.semiminor_axis_sigma, MINOR_SIG * u.pix, rtol=1.e-2) assert_quantity_allclose(obj.orientation, THETA * u.rad, rtol=1.e-3) assert obj.bbox_xmin.value == 25 assert obj.bbox_xmax.value == 77 assert obj.bbox_ymin.value == 35 assert obj.bbox_ymax.value == 70 assert_quantity_allclose(obj.area, 1058.0 * u.pix**2) assert_allclose(len(obj.indices), 2) assert_allclose(len(obj.indices[0]), obj.area.value) properties = [ 'background_at_centroid', 'background_mean', 'eccentricity', 'ellipticity', 'elongation', 'equivalent_radius', 'max_value', 'maxval_xpos', 'maxval_ypos', 'min_value', 'minval_xpos', 'minval_ypos', 'perimeter', 'cxx', 'cxy', 'cyy', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'bbox_xmin', 'bbox_xmax', 'bbox_ymin', 'bbox_ymax' ] for propname in properties: assert not isiterable(getattr(obj, propname)) properties = [ 'centroid', 'covariance_eigvals', 'cutout_centroid', 'maxval_cutout_pos', 'minval_cutout_pos' ] shapes = [getattr(obj, p).shape for p in properties] for shape in shapes: assert shape == (2, ) properties = ['covariance', 'inertia_tensor'] shapes = [getattr(obj, p).shape for p in properties] for shape in shapes: assert shape == (2, 2) properties = ['moments', 'moments_central'] shapes = [getattr(obj, p).shape for p in properties] for shape in shapes: assert shape == (4, 4) assert obj.kron_radius.value < 1.3 assert obj.kron_flux < 16700. assert obj.kron_fluxerr is None
def check_odd(val, name='value'): """ Raise Exception if `val` is not odd. """ if isiterable(val): if np.any(np.asarray(val) % 2 == 0): raise Exception(f'{name} must be odd') else: if val % 2 == 0: raise Exception(f'{name} must be odd')
def _set_axis_physical_types(self, pht=None): """ Set the physical type of the coordinate axes using VO UCD1+ v1.23 definitions. """ if pht is not None: if isinstance(pht, str): pht = (pht,) elif not isiterable(pht): raise TypeError("axis_physical_types must be of type string or iterable of strings") if len(pht) != self.naxes: raise ValueError('"axis_physical_types" must be of length {}'.format(self.naxes)) ph_type = [] for axt in pht: if axt not in VALID_UCDS and not axt.startswith("custom:"): ph_type.append("custom:{}".format(axt)) else: ph_type.append(axt) validate_physical_types(ph_type) return tuple(ph_type) if isinstance(self, CelestialFrame): if isinstance(self.reference_frame, coord.Galactic): ph_type = "pos.galactic.lon", "pos.galactic.lat" elif isinstance(self.reference_frame, (coord.GeocentricTrueEcliptic, coord.GCRS, coord.PrecessedGeocentric)): ph_type = "pos.bodyrc.lon", "pos.bodyrc.lat" elif isinstance(self.reference_frame, coord.builtin_frames.BaseRADecFrame): ph_type = "pos.eq.ra", "pos.eq.dec" elif isinstance(self.reference_frame, coord.builtin_frames.BaseEclipticFrame): ph_type = "pos.ecliptic.lon", "pos.ecliptic.lat" else: ph_type = tuple("custom:{}".format(t) for t in self.axes_names) elif isinstance(self, SpectralFrame): if self.unit[0].physical_type == "frequency": ph_type = ("em.freq",) elif self.unit[0].physical_type == "length": ph_type = ("em.wl",) elif self.unit[0].physical_type == "energy": ph_type = ("em.energy",) else: ph_type = ("custom:{}".format(self.unit[0].physical_type),) elif isinstance(self, TemporalFrame): ph_type = ("time",) elif isinstance(self, Frame2D): if all(self.axes_names): ph_type = self.axes_names else: ph_type = self.axes_type ph_type = tuple("custom:{}".format(t) for t in ph_type) else: ph_type = tuple("custom:{}".format(t) for t in self.axes_type) validate_physical_types(ph_type) return ph_type
def fetch(self, jsoc_response, path=None, overwrite=False, progress=True, max_conn=5, downloader=None, sleep=10): """ Make the request for the data in a JSOC response and wait for it to be staged and then download the data. Parameters ---------- jsoc_response : `~sunpy.net.jsoc.jsoc.JSOCResponse` object A response object path : `str` Path to save data to, defaults to SunPy download dir overwrite : `bool` Replace files with the same name if True progress : `bool` Print progress info to terminal max_conns : `int` Maximum number of download connections. downloader: `~sunpy.net.download.Downloader` instance A Custom downloader to use sleep : `int` The number of seconds to wait between calls to JSOC to check the status of the request. Returns ------- results : a `~sunpy.net.download.Results` instance A Results object """ # Make staging request to JSOC responses = self.request_data(jsoc_response) # Make response iterable if not isiterable(responses): responses = [responses] # Add them to the response for good measure jsoc_response.requests = [r for r in responses] time.sleep(sleep/2.) r = Results(lambda x: None, done=lambda maps: [v['path'] for v in maps.values()]) for response in responses: response.wait(verbose=progress) r = self.get_request(response, path=path, overwrite=overwrite, progress=progress, results=r) return r
def abs_line_data(wrest, datfil=None, ret_flg=0, tol=1e-3 * u.AA): """ wrest : float or array -- Input wavelength (Ang) tol : float (1e-3) Tolerance for finding a match in wrest ret_flg : int (0) 0: Return a dictionary 1: Return an astropy Table """ # Data file if datfil == None: datfil = xa_path + '/data/atomic/spec_atomic_lines.fits' # Read data = Table.read(datfil) if not isiterable(wrest): wrest = [wrest] # Loop all_row = [] for iwrest in wrest: mt = np.where(np.fabs(data['wrest'] - iwrest) < tol)[0] nm = len(mt) # Found? if nm == 0: raise ValueError( 'abs_line_data: {:.3f} not in our table {:s}'.format( iwrest, datfil)) elif nm == 1: # Grab all_row.append(mt[0]) else: raise ValueError( 'abs_line_data: {:g} appears {:d} times in our table {:s}'. format(iwrest, nm, datfil)) tab = data[all_row] # Return if ret_flg == 0: # Dictionary(ies) adict = [] for row in all_row: adict.append(dict(zip(data.dtype.names, data[row]))) if len(wrest) == 1: return adict[0] else: return adict elif ret_flg == 1: return tab else: raise Exception('abs_line_data: Not ready for this..')
def __init__(self, *args, **kwargs): if 'shape' in kwargs: self.shape = kwargs.pop('shape') if not isiterable(self.shape): raise ValueError("FlagCollection shape should be " "an iterable object") else: raise Exception("FlagCollection should be initialized with " "the shape of the data") OrderedDict.__init__(self, *args, **kwargs)
def __init__(self, P_min, P_max, jitter=None, jitter_unit=None, poly_trend=1, linear_par_Vinv=None, anomaly_tol=1E-10, anomaly_maxiter=128): # the names of the default parameters self.default_params = ['P', 'M0', 'e', 'omega', 'jitter', 'K'] self.poly_trend = int(poly_trend) self.default_params += ['v{0}'.format(i) for i in range(self.poly_trend)] self.P_min = P_min self.P_max = P_max self.anomaly_tol = float(anomaly_tol) self.anomaly_maxiter = int(anomaly_maxiter) # K, then the linear parameters _n_linear = 1 + self.poly_trend if linear_par_Vinv is None: self.linear_par_Vinv = 1e-10 * np.eye(_n_linear) else: self.linear_par_Vinv = np.array(linear_par_Vinv) if self.linear_par_Vinv.shape != (_n_linear, _n_linear): raise ValueError("Linear parameter inverse variance prior must " "have shape ({0}, {0})".format(_n_linear)) # validate the input jitter specification if jitter is None: jitter = 0 * u.km/u.s if isiterable(jitter): if len(jitter) != 2: raise ValueError("If specifying parameters for the jitter prior, you " "must pass in a length-2 container containing the " "mean and standard deviation of the Gaussian over " "log(jitter^2)") if jitter_unit is None or not isinstance(jitter_unit, u.UnitBase): raise TypeError("If specifying parameters for the jitter prior, you " "must also specify the units of the jitter for " "evaluating the prior as an astropy.units.UnitBase " "instance.") self._fixed_jitter = False self._jitter_unit = jitter_unit self.jitter = jitter else: self._fixed_jitter = True self._jitter_unit = jitter.unit self.jitter = jitter
def abs_line_data(wrest, datfil=None, ret_flg=0, tol=1e-3*u.AA): """ wrest : float or array -- Input wavelength (Ang) tol : float (1e-3) Tolerance for finding a match in wrest ret_flg : int (0) 0: Return a dictionary 1: Return an astropy Table """ # Data file if datfil == None: datfil = xa_path+'/data/atomic/spec_atomic_lines.fits' # Read global abs_data if abs_data is None: abs_data = Table.read(datfil) if not isiterable(wrest): wrest = [wrest] # Loop all_row = [] for iwrest in wrest: mt = np.where(np.fabs(abs_data['wrest']-iwrest) < tol)[0] nm = len(mt) # Found? if nm == 0: raise ValueError('abs_line_data: {:.3f} not in our table {:s}'.format(iwrest,datfil)) elif nm == 1: # Grab all_row.append(mt[0]) else: raise ValueError('abs_line_data: {:g} appears {:d} times in our table {:s}'.format( iwrest,nm,datfil)) tab = abs_data[all_row] # Return if ret_flg == 0: # Dictionary(ies) adict = [] for row in all_row: adict.append(dict(zip(abs_data.dtype.names,abs_data[row]))) if len(wrest) == 1: return adict[0] else: return adict elif ret_flg == 1: return tab else: raise Exception('abs_line_data: Not ready for this..')
def parallax_converter(x): x = np.asanyarray(x) d = 1 / x if isiterable(d): d[d < 0] = np.nan return d else: if d < 0: return np.array(np.nan) else: return d
def get_m2_min(m1, mf): """Compute the minimum companion mass given the primary mass and the binary mass function. Parameters ---------- m1 : quantity_like [mass] Primary mass. mf : quantity_like [mass] Binary mass function. Returns ------- m2_min : `~astropy.units.Quantity` [mass] The minimum companion mass. """ from scipy.optimize import root mf = mf.to(m1.unit) if isiterable(m1) and isiterable(mf): m2s = [] for x, y in zip(m1, mf): try: res = root(_m2_func, x0=10., args=(x.value, 1., y.value)) if not res.success: raise RuntimeError('Unsuccessful') m2s.append(res.x[0]) except Exception as e: m2s.append(np.nan) return m2s * m1.unit else: res = root(_m2_func, x0=10., args=(m1.value, 1., mf.value)) if res.success: return res.x[0] * m1.unit else: return np.nan * m1.unit
def get_m2_min(m1, mf): mf = mf.to(m1.unit) if isiterable(m1) and isiterable(mf): m2s = [] for x, y in zip(m1, mf): try: res = root(m2_func, x0=10., args=(x.value, 1., y.value)) if not res.success: raise RuntimeError('Unsuccessful') m2s.append(res.x[0]) except Exception as e: logger.debug('Failed to compute m2_min for sample: {0}'.format( str(e))) m2s.append(np.nan) return m2s * m1.unit else: res = root(m2_func, x0=10., args=(m1.value, 1., mf.value)) if res.success: return res.x[0] * m1.unit else: return np.nan * m1.unit
def _quantitiesToValue(quantity): if isiterable(quantity): if isinstance(quantity[0], Quantity): value = np.array( [quantity[i].si.value for i in range(len(quantity))]) else: value = value = np.array( [quantity[i] for i in range(len(quantity))]) else: if isinstance(quantity, Quantity): value = quantity.si.value else: value = quantity return value
def wcs_from_spec_footprints(wcslist, refwcs=None, transform=None, domain=None): """ Create a WCS from a list of spatial/spectral WCS. Build-7 workaround. """ if not isiterable(wcslist): raise ValueError("Expected 'wcslist' to be an iterable of gwcs.WCS") if not all([isinstance(w, WCS) for w in wcslist]): raise TypeError("All items in 'wcslist' must have instance of gwcs.WCS") if refwcs is None: refwcs = wcslist[0] else: if not isinstance(refwcs, WCS): raise TypeError("Expected refwcs to be an instance of gwcs.WCS.") # TODO: generalize an approach to do this for more than one wcs. For # now, we just do it for one, using the api for a list of wcs. # Compute a fiducial point for the output frame at center of input data fiducial = compute_spec_fiducial(wcslist, domain=domain) # Create transform for output frame transform = compute_spec_transform(fiducial, refwcs) output_frame = refwcs.output_frame wnew = WCS(output_frame=output_frame, forward_transform=transform) # Build the domain in the output frame wcs object by running the input wcs # footprints through the backward transform of the output wcs sky = [spec_footprint(w) for w in wcslist] domain_grid = [wnew.backward_transform(*f) for f in sky] sky0 = sky[0] det = domain_grid[0] offsets = [] input_frame = refwcs.input_frame for axis in input_frame.axes_order: axis_min = np.nanmin(det[axis]) offsets.append(axis_min) transform = Shift(offsets[0]) & Shift(offsets[1]) | transform wnew = WCS(output_frame=output_frame, input_frame=input_frame, forward_transform=transform) domain = [] for axis in input_frame.axes_order: axis_min = np.nanmin(domain_grid[0][axis]) axis_max = np.nanmax(domain_grid[0][axis]) + 1 domain.append({'lower': axis_min, 'upper': axis_max, 'includes_lower': True, 'includes_upper': False}) wnew.domain = domain return wnew
def from_table(cls, table, dispersion_column='dispersion', flux_column='flux', uncertainty_column=None, flag_columns=None): """ Initializes a `Spectrum1D`-object from an `~astropy.table.Table` object Parameters ---------- table : ~astropy.table.Table object dispersion_column : str, optional name of the dispersion column. default is 'dispersion' flux_column : str, optional name of the flux column. default is 'flux' uncertainty_column : str, optional name of the uncertainty column. If set to None uncertainty is set to None. default is None flag_columns : str or list, optional name or names of flag columns. If multiple names are supplied a ~astropy.nddata.FlagCollection will be built. default is None """ flux = table[flux_column] dispersion = table[dispersion_column] if uncertainty_column is not None: uncertainty = table[uncertainty_column] if uncertainty.unit != flux.unit: log.warning('"uncertainty"-column and "flux"-column do not share the units (%s vs %s) ', uncertainty.unit, flux.unit) else: uncertainty = None if isinstance(flag_columns, six.string_types): flags = table[flag_columns] elif misc.isiterable(flag_columns): flags = FlagCollection(shape=flux.shape) for flag_column in flag_columns: flags[flag_column] = table[flag_column] else: raise ValueError('flag_columns should either be a string or a list (or iterable) of strings') return cls.from_array(flux=flux.data, dispersion=dispersion.data, uncertainty=uncertainty, dispersion_unit=dispersion.units, unit=flux.units, mask=table.mask, flags=flags, meta=table.meta)
def _constantrotating_static_helper(frame_r, frame_i, w, t=None, sign=1.): # TODO: use representation arithmetic instead Omega = -frame_r.parameters['Omega'].decompose(frame_i.units).value if not isinstance(w, Orbit) and t is None: raise ValueError("Time array must be provided if not passing an " "Orbit subclass.") if t is None: t = w.t elif not hasattr(t, 'unit'): t = t * frame_i.units['time'] if t is None: raise ValueError('Time must be supplied either through the input ' 'Orbit class instance or through the t argument.') t = t.decompose(frame_i.units).value # HACK: this is a little bit crazy...this makes it so that !=3D # representations will work here if hasattr(w.pos, 'xyz'): pos = w.pos vel = w.vel else: cart = w.cartesian pos = cart.pos vel = cart.vel pos = pos.xyz.decompose(frame_i.units).value vel = vel.d_xyz.decompose(frame_i.units).value # get rotation angle, axis vs. time if isiterable(Omega): # 3D vec = Omega / np.linalg.norm(Omega) theta = np.linalg.norm(Omega) * t x_i2r = rodrigues_axis_angle_rotate(pos, vec, sign * theta) v_i2r = rodrigues_axis_angle_rotate(vel, vec, sign * theta) else: # 2D vec = Omega * np.array([0, 0, 1.]) theta = sign * Omega * t x_i2r = z_angle_rotate(pos, theta) v_i2r = z_angle_rotate(vel, theta) return x_i2r * frame_i.units['length'], v_i2r * frame_i.units[ 'length'] / frame_i.units['time']
def test_properties(self): props = source_properties(IMAGE, SEGM) assert props[0].id == 1 assert_quantity_allclose(props[0].xcentroid, XCEN * u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].ycentroid, YCEN * u.pix, rtol=1.e-2) assert_allclose(props[0].source_sum, IMAGE[IMAGE >= THRESHOLD].sum()) assert_quantity_allclose(props[0].semimajor_axis_sigma, MAJOR_SIG * u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].semiminor_axis_sigma, MINOR_SIG * u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].orientation, THETA * u.rad, rtol=1.e-3) assert_allclose(props[0].bbox.value, [35, 25, 70, 77]) assert_quantity_allclose(props[0].area, 1058.0 * u.pix**2) assert_allclose(len(props[0].values), props[0].area.value) assert_allclose(len(props[0].coords), 2) assert_allclose(len(props[0].coords[0]), props[0].area.value) properties = [ 'background_at_centroid', 'background_mean', 'eccentricity', 'ellipticity', 'elongation', 'equivalent_radius', 'max_value', 'maxval_xpos', 'maxval_ypos', 'min_value', 'minval_xpos', 'minval_ypos', 'perimeter', 'cxx', 'cxy', 'cyy', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'xmax', 'xmin', 'ymax', 'ymin' ] for propname in properties: assert not isiterable(getattr(props[0], propname)) properties = [ 'centroid', 'covariance_eigvals', 'cutout_centroid', 'maxval_cutout_pos', 'minval_cutout_pos' ] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (2, ) properties = ['covariance', 'inertia_tensor'] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (2, 2) properties = ['moments', 'moments_central'] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (4, 4)
def _constantrotating_static_helper(frame_r, frame_i, w, t=None, sign=1.): # TODO: use representation arithmetic instead Omega = -frame_r.parameters['Omega'].decompose(frame_i.units).value if not isinstance(w, Orbit) and t is None: raise ValueError("Time array must be provided if not passing an " "Orbit subclass.") if t is None: t = w.t elif not hasattr(t, 'unit'): t = t * frame_i.units['time'] if t is None: raise ValueError('Time must be supplied either through the input ' 'Orbit class instance or through the t argument.') t = t.decompose(frame_i.units).value # HACK: this is a little bit crazy...this makes it so that !=3D # representations will work here if hasattr(w.pos, 'xyz'): pos = w.pos vel = w.vel else: cart = w.cartesian pos = cart.pos vel = cart.vel pos = pos.xyz.decompose(frame_i.units).value vel = vel.d_xyz.decompose(frame_i.units).value # get rotation angle, axis vs. time if isiterable(Omega): # 3D vec = Omega / np.linalg.norm(Omega) theta = np.linalg.norm(Omega) * t x_i2r = rodrigues_axis_angle_rotate(pos, vec, sign*theta) v_i2r = rodrigues_axis_angle_rotate(vel, vec, sign*theta) else: # 2D vec = Omega * np.array([0,0,1.]) theta = sign * Omega * t x_i2r = z_angle_rotate(pos, theta) v_i2r = z_angle_rotate(vel, theta) return x_i2r * frame_i.units['length'], v_i2r * frame_i.units['length']/frame_i.units['time']
def test_properties(self): props = source_properties(IMAGE, SEGM) assert props[0].id == 1 assert_quantity_allclose(props[0].xcentroid, XCEN*u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].ycentroid, YCEN*u.pix, rtol=1.e-2) assert_allclose(props[0].source_sum, IMAGE[IMAGE >= THRESHOLD].sum()) assert_quantity_allclose(props[0].semimajor_axis_sigma, MAJOR_SIG*u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].semiminor_axis_sigma, MINOR_SIG*u.pix, rtol=1.e-2) assert_quantity_allclose(props[0].orientation, THETA*u.rad, rtol=1.e-3) assert_allclose(props[0].bbox.value, [35, 25, 70, 77]) assert_quantity_allclose(props[0].area, 1058.0*u.pix**2) assert_allclose(len(props[0].values), props[0].area.value) assert_allclose(len(props[0].coords), 2) assert_allclose(len(props[0].coords[0]), props[0].area.value) properties = ['background_at_centroid', 'background_mean', 'eccentricity', 'ellipticity', 'elongation', 'equivalent_radius', 'max_value', 'maxval_xpos', 'maxval_ypos', 'min_value', 'minval_xpos', 'minval_ypos', 'perimeter', 'cxx', 'cxy', 'cyy', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'xmax', 'xmin', 'ymax', 'ymin'] for propname in properties: assert not isiterable(getattr(props[0], propname)) properties = ['centroid', 'covariance_eigvals', 'cutout_centroid', 'maxval_cutout_pos', 'minval_cutout_pos'] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (2,) properties = ['covariance', 'inertia_tensor'] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (2, 2) properties = ['moments', 'moments_central'] shapes = [getattr(props[0], p).shape for p in properties] for shape in shapes: assert shape == (4, 4)
def _prepare_data(data, error=None, effective_gain=None, background=None): """ Prepare the data, error, and background arrays. If any of ``data``, ``error``, and ``background`` have units, then they all are checked that they have units and the units are the same. If ``effective_gain`` is input, then the total error array including source Poisson noise is calculated. If ``background`` is input, then it is returned as a 2D array with the same shape as ``data`` (if necessary). It is *not* subtracted from the input ``data``. Notes ----- ``data``, ``error``, and ``background`` must all have the same units if they are `~astropy.units.Quantity`\s. If ``effective_gain`` is a `~astropy.units.Quantity`, then it must have units such that ``effective_gain * data`` is in units of counts (e.g. counts, electrons, or photons). """ inputs = [data, error, background] _check_units(inputs) # generate a 2D background array, if necessary if background is not None: if not isiterable(background): background = np.zeros(data.shape) + background else: if background.shape != data.shape: raise ValueError('If input background is 2D, then it must ' 'have the same shape as the input data.') if error is not None: if data.shape != error.shape: raise ValueError('data and error must have the same shape') if effective_gain is not None: error = calc_total_error(data, error, effective_gain) return data, error, background
def check_request(self, requests): """ Check the status of a request and print out a message about it. Parameters ---------- requests : `~drms.ExportRequest` object or a list of `~drms.ExportRequest` objects, returned by `~sunpy.net.jsoc.jsoc.JSOCClient.request_data` Returns ------- status : `int` or `list` A status or list of status' that were returned by JSOC. """ # Convert IDs to a list if not already if not isiterable(requests) or isinstance(requests, drms.ExportRequest): requests = [requests] allstatus = [] for request in requests: status = request.status if status == request._status_code_ok: # Data ready to download print("Request {0} was exported at {1} and is ready to " "download.".format(request.id, request._d['exptime'])) elif status in request._status_codes_pending: print_message = "Request {0} was submitted {1} seconds ago, "\ "it is not ready to download." print(print_message.format(request.id, request._d['wait'])) else: print_message = "Request returned status: {0} with error: {1}" json_status = request.status json_error = request._d.get('error', '') print(print_message.format(json_status, json_error)) allstatus.append(status) if len(allstatus) == 1: return allstatus[0] return allstatus
def ion_name(ion,flg=0,nspace=None): """ Convert ion into a string JXP on 16 Nov 2014 Parameters ---------- ion: tuple (Z,ion) nspace: int (0) Number of spaces to insert Returns ------- name : string e.g. Si II, {\rm Si}^{+} """ if isiterable(ion): elm = ELEMENTS[ion[0]] str_elm = elm.symbol else: raise ValueError('ionization.ion_name: Not ready for this input yet.') # Ion state if flg == 0: # Roman if nspace is None: nspace = 0 str_ion = roman.toRoman(ion[1]) spc = ' '*nspace outp = str_elm+spc+str_ion elif flg == 1: # LaTeX if ion[1] == 0: raise ValueError('ionization.ion_name: Not ready for this input yet.') elif ion[1] == 1: str_ion = '^0' elif ion[1] == 2: str_ion = '^{+}' elif ion[1] == 3: str_ion = '^{++}' else: str_ion = '^{+'+str(ion[1]-1)+'}' outp = '{\\rm '+str_elm+'}'+str_ion else: raise ValueError('ionization.ion_name: Not ready for this flg.') return outp
def check_request(self, requestIDs): """ Check the status of a request and print out a message about it Parameters ---------- requestIDs : list or string A list of requestIDs to check Returns ------- status : list A list of status' that were returned by JSOC """ # Convert IDs to a list if not already if not isiterable(requestIDs) or isinstance(requestIDs, six.string_types): requestIDs = [requestIDs] allstatus = [] for request_id in requestIDs: u = self._request_status(request_id) status = int(u.json()['status']) if status == 0: # Data ready to download print("Request {0} was exported at {1} and is ready to " "download.".format(u.json()['requestid'], u.json()['exptime'])) elif status == 1: print_message = "Request {0} was submitted {1} seconds ago, " \ "it is not ready to download." print( print_message.format(u.json()['requestid'], u.json()['wait'])) else: print_message = "Request returned status: {0} with error: {1}" json_status = u.json()['status'] json_error = u.json()['error'] print(print_message.format(json_status, json_error)) allstatus.append(status) return allstatus
def abund(Z,dat_file=None,table=None): """ Report back the solar abundance Parameters ---------- Z: int or string (can be an array of either) Atomic number or name Returns ------- out_abnd : float (scalar or an array) Solar abundance. Meteoritic if available. JXP on 21 Nov 2014 """ if table is None: # Data file if dat_file is None: dat_file = xa_path+'/data/abund/solar_Apslund09.dat' # Read table names=('name', 'abund', 'Z') table = ascii.read(dat_file, format='no_header', names=names) # Iterate? if isiterable(Z): out_abnd = [] for iZ in Z: out_abnd.append(abund(iZ,table=table)) out_abnd = np.array(out_abnd) else: if isinstance(Z,int): idx = np.where( table['Z'] == Z )[0] if len(idx) == 0: raise ValueError('abund.solar.abund: Z={:d} not in {:s}'.format(Z,dat_file)) out_abnd = table['abund'][idx[0]] else: raise ValueError('abund.solar.abund: Not ready for this input yet.') return out_abnd
def check_request(self, requestIDs): """ Check the status of a request and print out a message about it Parameters ---------- requestIDs : list or string A list of requestIDs to check Returns ------- status : list A list of status' that were returned by JSOC """ # Convert IDs to a list if not already if not isiterable(requestIDs) or isinstance(requestIDs, six.string_types): requestIDs = [requestIDs] allstatus = [] for request_id in requestIDs: u = self._request_status(request_id) status = int(u.json()['status']) if status == 0: # Data ready to download print("Request {0} was exported at {1} and is ready to " "download.".format(u.json()['requestid'], u.json()['exptime'])) elif status == 1: print_message = "Request {0} was submitted {1} seconds ago, " \ "it is not ready to download." print(print_message.format(u.json()['requestid'], u.json()['wait'])) else: print_message = "Request returned status: {0} with error: {1}" json_status = u.json()['status'] json_error = u.json()['error'] print(print_message.format(json_status, json_error)) allstatus.append(status) return allstatus
def to_tree_transform(cls, model, ctx): node = OrderedDict() if isinstance(model, LabelMapperArray): node['mapper'] = model.mapper if isinstance(model, (LabelMapperDict, LabelMapperRange)): mapper = OrderedDict() labels = list(model.mapper) transforms = [] for k in labels: transforms.append(model.mapper[k]) if isiterable(labels[0]): labels = [list(l) for l in labels] mapper['labels'] = labels mapper['models'] = transforms node['mapper'] = mapper node['inputs'] = list(model.inputs) if model.inputs_mapping is not None: node['inputs_mapping'] = model.inputs_mapping return yamlutil.custom_tree_to_tagged_tree(node, ctx)
def physical_distance(self,z): """ Physical line-of-sight distance in Mpc at a given redshift. Parameters ---------- z : array_like Input redshifts. Must be 1D or scalar. Returns ------- d : ndarray, or float if input scalar Physical distance in Mpc to each input redshift. """ from astropy import units as u from astropy import constants as const if not isiterable(z): return self.lookback_time(z) * const.c.to(u.Mpc/u.Gyr) out = ( [(self.lookback_time(redshift)*const.c.to(u.Mpc/u.Gyr)) for redshift in z] ) return u.Mpc * np.array( [tmp.value for tmp in out] )
def wcs_from_footprints(wcslist, refwcs=None, transform=None, domain=None): """ Create a WCS from a list of WCS objects. A fiducial point in the output coordinate frame is created from the footprints of all WCS objects. For a spatial frame this is the center of the union of the footprints. For a spectral frame the fiducial is in the beginning of the footprint range. If ``refwcs`` is not specified, the first WCS object in the list is considered a reference. The output coordinate frame and projection (for celestial frames) is taken from ``refwcs``. If ``transform`` is not suplied, a compound transform comprised of scaling and rotation is copied from ``refwcs``. If ``domain`` is not supplied, the domain of the new WCS is computed from the domains of all input WCSs Parameters ---------- wcslist : list of `~gwcs.wcs.WCS` A list of WCS objects. refwcs : `~gwcs.wcs.WCS`, optional Reference WCS. The output coordinate frame, the projection and a scaling and rotation transform is created from it. If not supplied the first WCS in the list is used as ``refwcs``. transform : `~astropy.modeling.core.Model`, optional A transform, passed to :class_method:`~gwcs.WCS.wcs_from_fiducial` If not supplied Scaling | Rotation is computed from ``refwcs``. domain : list of dicts, optional Domain of the new WCS. If not supplied it is computed from the domain of all inputs. """ if not isiterable(wcslist): raise ValueError("Expected 'wcslist' to be an iterable of WCS objects.") if not all([isinstance(w, WCS) for w in wcslist]): raise TypeError("All items in wcslist are expected to be instances of gwcs.WCS.") if refwcs is None: refwcs = wcslist[0] else: if not isinstance(refwcs, WCS): raise TypeError("Expected refwcs to be an instance of gwcs.WCS.") fiducial = compute_fiducial(wcslist, domain) prj = np.array([isinstance(m, projections.Projection) for m \ in refwcs.forward_transform]).nonzero()[0] if prj: # TODO: Fix the compound model indexing with numpy integers in astropy. # Remove the work around this issues from here. prj = refwcs.forward_transform[int(prj[0])] else: prj = None trans = [] scales = [m for m in refwcs.forward_transform if isinstance(m, astmodels.Scale)] if scales: trans.append(functools.reduce(lambda x, y: x & y, scales)) rotation = [m for m in refwcs.forward_transform if \ isinstance(m, astmodels.AffineTransformation2D)] if rotation: trans.append(rotation[0]) if trans: tr = functools.reduce(lambda x, y: x | y, trans) else: tr = None out_frame = getattr(refwcs, getattr(refwcs, 'output_frame')) wnew = wcs_from_fiducial(fiducial, coordinate_frame=out_frame, projection=prj, transform=tr) #domain_bounds = np.hstack([gwutils._domain_to_bounds(d) for d in \ #[w.domain for w in wcslist]]) domain_footprints = [w.footprint() for w in wcslist] domain_bounds = np.hstack([wnew.backward_transform(*f) for f in domain_footprints]) for axs in domain_bounds: axs -= axs.min() domain = [] for axis in out_frame.axes_order: axis_min, axis_max = domain_bounds[axis].min(), domain_bounds[axis].max() domain.append({'lower': axis_min, 'upper': axis_max, 'includes_lower': True, 'includes_upper': True}) wnew.domain = domain return wnew
def animate_source(source, label=None, fps=30, length=20., phase_range=(None, None), wave_range=(None, None), match_peakphase=True, match_peakflux=True, peakwave=4000., fname=None, still=False): """Animate spectral timeseries of model(s) using matplotlib.animation. *Note:* Requires matplotlib v1.1 or higher. Parameters ---------- source : `~sncosmo.Source` or str or iterable thereof The Source to animate or list of sources to animate. label : str or list of str, optional If given, label(s) for Sources, to be displayed in a legend on the animation. fps : int, optional Frames per second. Default is 30. length : float, optional Movie length in seconds. Default is 15. phase_range : (float, float), optional Phase range to plot (in the timeframe of the first source if multiple sources are given). `None` indicates to use the maximum extent of the source(s). wave_range : (float, float), optional Wavelength range to plot. `None` indicates to use the maximum extent of the source(s). match_peakflux : bool, optional For multiple sources, scale fluxes so that the peak of the spectrum at the peak matches that of the first source. Default is True. match_peakphase : bool, optional For multiple sources, shift additional sources so that the source's reference phase matches that of the first source. peakwave : float, optional Wavelength used in match_peakflux and match_peakphase. Default is 4000. fname : str, optional If not `None`, save animation to file `fname`. Requires ffmpeg to be installed with the appropriate codecs: If `fname` has the extension '.mp4' the libx264 codec is used. If the extension is '.webm' the VP8 codec is used. Otherwise, the 'mpeg4' codec is used. The first frame is also written to a png. still : bool, optional When writing to a file, also save the first frame as a png file. This is useful for displaying videos on a webpage. Returns ------- ani : `~matplotlib.animation.FuncAnimation` Animation object that can be shown or saved. Examples -------- Compare the salt2 and hsiao sources: >>> import matplotlib.pyplot as plt # doctest: +SKIP >>> ani = animate_source(['salt2', 'hsiao'], phase_range=(None, 30.), ... wave_range=(2000., 9200.)) # doctest: +SKIP >>> plt.show() # doctest: +SKIP Compare the salt2 source with ``x1=1`` to the same source with ``x1=0.``: >>> m1 = sncosmo.get_source('salt2') # doctest: +SKIP >>> m1.set(x1=1.) # doctest: +SKIP >>> m2 = sncosmo.get_source('salt2') # doctest: +SKIP >>> m2.set(x1=0.) # doctest: +SKIP >>> ani = animate_source([m1, m2], label=['salt2, x1=1', 'salt2, x1=0']) ... # doctest: +SKIP >>> plt.show() # doctest: +SKIP """ from matplotlib import pyplot as plt from matplotlib import animation # Convert input to a list (if it isn't already). if (not isiterable(source)) or isinstance(source, six.string_types): sources = [source] else: sources = source # Check that all entries are Source or strings. for m in sources: if not (isinstance(m, six.string_types) or isinstance(m, Source)): raise ValueError('str or Source instance expected for ' 'source(s)') sources = [get_source(m) for m in sources] # Get the source labels if label is None: labels = [None] * len(sources) elif isinstance(label, six.string_types): labels = [label] else: labels = label if len(labels) != len(sources): raise ValueError('if given, length of label must match ' 'that of source') # Get a wavelength array for each source. waves = [np.arange(m.minwave(), m.maxwave(), 10.) for m in sources] # Phase offsets needed to match peak phases. peakphases = [m.peakphase(peakwave) for m in sources] if match_peakphase: phase_offsets = [p - peakphases[0] for p in peakphases] else: phase_offsets = [0.] * len(sources) # Determine phase range to display. minphase, maxphase = phase_range if minphase is None: minphase = min([sources[i].minphase() - phase_offsets[i] for i in range(len(sources))]) if maxphase is None: maxphase = max([sources[i].maxphase() - phase_offsets[i] for i in range(len(sources))]) # Determine the wavelength range to display. minwave, maxwave = wave_range if minwave is None: minwave = min([m.minwave() for m in sources]) if maxwave is None: maxwave = max([m.maxwave() for m in sources]) # source time interval between frames phase_interval = (maxphase - minphase) / (length * fps) # maximum flux density of entire spectrum at the peak phase # for each source max_fluxes = [np.max(m.flux(phase, w)) for m, phase, w in zip(sources, peakphases, waves)] # scaling factors if match_peakflux: peakfluxes = [m.flux(phase, peakwave) # Not the same as max_fluxes! for m, phase in zip(sources, peakphases)] scaling_factors = [peakfluxes[0] / f for f in peakfluxes] global_max_flux = max_fluxes[0] else: scaling_factors = [1.] * len(sources) global_max_flux = max(max_fluxes) ymin = -0.06 * global_max_flux ymax = 1.1 * global_max_flux # Set up the figure, the axis, and the plot element we want to animate fig = plt.figure() ax = plt.axes(xlim=(minwave, maxwave), ylim=(ymin, ymax)) plt.axhline(y=0., c='k') plt.xlabel('Wavelength ($\\AA$)') plt.ylabel('Flux Density ($F_\lambda$)') phase_text = ax.text(0.05, 0.95, '', ha='left', va='top', transform=ax.transAxes) empty_lists = 2 * len(sources) * [[]] lines = ax.plot(*empty_lists, lw=1) if label is not None: for line, l in zip(lines, labels): line.set_label(l) legend = plt.legend(loc='upper right') def init(): for line in lines: line.set_data([], []) phase_text.set_text('') return tuple(lines) + (phase_text,) def animate(i): current_phase = minphase + phase_interval * i for j in range(len(sources)): y = sources[j].flux(current_phase + phase_offsets[j], waves[j]) lines[j].set_data(waves[j], y * scaling_factors[j]) phase_text.set_text('phase = {0:.1f}'.format(current_phase)) return tuple(lines) + (phase_text,) ani = animation.FuncAnimation(fig, animate, init_func=init, frames=int(fps*length), interval=(1000./fps), blit=True) # Save the animation as an mp4 or webm file. # This requires that ffmpeg is installed. if fname is not None: if still: i = fname.rfind('.') stillfname = fname[:i] + '.png' plt.savefig(stillfname) ext = fname[i+1:] codec = {'mp4': 'libx264', 'webm': 'libvpx'}.get(ext, 'mpeg4') ani.save(fname, fps=fps, codec=codec, extra_args=['-vcodec', codec], writer='ffmpeg_file', bitrate=1800) plt.close() else: return ani
def calc_total_error(data, bkg_error, effective_gain): """ Calculate a total error array, combining a background-only error array with the Poisson noise of sources. Parameters ---------- data : array_like or `~astropy.units.Quantity` The data array. bkg_error : array_like or `~astropy.units.Quantity` The pixel-wise Gaussian 1-sigma background-only errors of the input ``data``. ``error`` should include all sources of "background" error but *exclude* the Poisson error of the sources. ``error`` must have the same shape as ``data``. effective_gain : float, array-like, or `~astropy.units.Quantity` Ratio of counts (e.g., electrons or photons) to the units of ``data`` used to calculate the Poisson error of the sources. Returns ------- total_error : `~numpy.ndarray` or `~astropy.units.Quantity` The total error array. If ``data``, ``bkg_error``, and ``effective_gain`` are all `~astropy.units.Quantity` objects, then ``total_error`` will also be returned as a `~astropy.units.Quantity` object. Otherwise, a `~numpy.ndarray` will be returned. Notes ----- To use units, ``data``, ``bkg_error``, and ``effective_gain`` must *all* be `~astropy.units.Quantity` objects. A `ValueError` will be raised if only some of the inputs are `~astropy.units.Quantity` objects. The total error array, :math:`\sigma_{\mathrm{tot}}` is: .. math:: \\sigma_{\\mathrm{tot}} = \\sqrt{\\sigma_{\\mathrm{b}}^2 + \\frac{I}{g}} where :math:`\sigma_b`, :math:`I`, and :math:`g` are the background ``bkg_error`` image, ``data`` image, and ``effective_gain``, respectively. Pixels where ``data`` (:math:`I_i)` is negative do not contribute additional Poisson noise to the total error, i.e. :math:`\sigma_{\mathrm{tot}, i} = \sigma_{\mathrm{b}, i}`. Note that this is different from `SExtractor`_, which sums the total variance in the segment, including pixels where :math:`I_i` is negative. In such cases, `SExtractor`_ underestimates the total errors. Also note that ``data`` should be background-subtracted to match SExtractor's errors. ``effective_gain`` can either be a scalar value or a 2D image with the same shape as the ``data``. A 2D image is useful with mosaic images that have variable depths (i.e., exposure times) across the field. For example, one should use an exposure-time map as the ``effective_gain`` for a variable depth mosaic image in count-rate units. If your input ``data`` are in units of ADU, then ``effective_gain`` should represent electrons/ADU. If your input ``data`` are in units of electrons/s then ``effective_gain`` should be the exposure time or an exposure time map (e.g., for mosaics with non-uniform exposure times). .. _SExtractor: http://www.astromatic.net/software/sextractor """ data = np.asanyarray(data) bkg_error = np.asanyarray(bkg_error) inputs = [data, bkg_error, effective_gain] has_unit = [hasattr(x, 'unit') for x in inputs] use_units = all(has_unit) if any(has_unit) and not all(has_unit): raise ValueError('If any of data, bkg_error, or effective_gain has ' 'units, then they all must all have units.') if use_units: count_units = [u.electron, u.photon] datagain_unit = (data * effective_gain).unit if datagain_unit not in count_units: raise u.UnitsError('(data * effective_gain) has units of "{0}", ' 'but it must have count units (u.electron ' 'or u.photon).'.format(datagain_unit)) if not isiterable(effective_gain): effective_gain = np.zeros(data.shape) + effective_gain else: effective_gain = np.asanyarray(effective_gain) if effective_gain.shape != data.shape: raise ValueError('If input effective_gain is 2D, then it must ' 'have the same shape as the input data.') if np.any(effective_gain <= 0): raise ValueError('effective_gain must be strictly positive ' 'everywhere.') if use_units: source_variance = np.maximum((data * data.unit) / effective_gain.value, 0. * bkg_error.unit**2) else: source_variance = np.maximum(data / effective_gain, 0) return np.sqrt(bkg_error**2 + source_variance)
def read_spec(ispec, exten=None, norm=True, **kwargs): """Parse spectrum out of the input If 2 spectra are given, the 2nd is scaled to the first Parameters ---------- ispec : XSpectrum1D, str, list of files (ordered blue to red), or tuple of arrays exten : int, optional FITS extension Returns ------- spec : XSpectrum1D spec_file : str """ from linetools.spectra import xspectrum1d as lsx from linetools.spectra import utils as ltsu from astropy.utils.misc import isiterable # if isinstance(ispec,basestring): spec_fil = ispec if 'rsp_kwargs' in kwargs.keys(): spec = lsx.XSpectrum1D.from_file(spec_fil, exten=exten, **kwargs['rsp_kwargs']) else: spec = lsx.XSpectrum1D.from_file(spec_fil, exten=exten) elif isinstance(ispec, lsx.XSpectrum1D): spec = ispec spec_fil = spec.filename # Grab from Spectrum1D elif isinstance(ispec,tuple): spec = lsx.XSpectrum1D.from_tuple(ispec) spec_fil = 'none' elif isinstance(ispec,list): # Multiple file names # Loop on the files for kk,ispecf in enumerate(ispec): if isiterable(exten): iexten = exten[kk] else: iexten = exten jspec = lsx.XSpectrum1D.from_file(ispecf, exten=iexten) if kk == 0: spec = jspec _, xper1 = ltsp.get_flux_plotrange(spec.flux, perc=0.9) else: # Scale flux for convenience of plotting (sig is not scaled) _, xper2 = ltsp.get_flux_plotrange(jspec.flux, perc=0.9) scl = xper1/xper2 # Splice #from PyQt4 import QtCore #QtCore.pyqtRemoveInputHook() #pdb.set_trace() #QtCore.pyqtRestoreInputHook() spec = ltsu.splice_two(spec, jspec)#, scale=scl) # Filename spec_fil = ispec[0] spec.filename=spec_fil else: raise ValueError('Bad input to read_spec: {}'.format(type(ispec))) # Normalize? if norm: if spec.co_is_set: spec.normed=True # Demand AA for wavelength unit (unless over-ridden) if spec.wavelength.unit != u.AA: wvAA = spec.wavelength.to('AA') spec.wavelength = wvAA #from PyQt4 import QtCore #QtCore.pyqtRemoveInputHook() #pdb.set_trace() #QtCore.pyqtRestoreInputHook() # Return return spec, spec_fil