Example #1
0
    def classify(self, lattice):
        """Determine lattice element indices which might contain the TRISO particle.

        Parameters
        ----------
        lattice : openmc.RectLattice
            Lattice to check

        Returns
        -------
        list of tuple
            (z,y,x) lattice element indices which might contain the TRISO
            particle.

        """

        ll, ur = self.region.bounding_box
        if lattice.ndim == 2:
            (i_min, j_min), p = lattice.find_element(ll)
            (i_max, j_max), p = lattice.find_element(ur)
            return list(np.broadcast(*np.ogrid[
                j_min:j_max+1, i_min:i_max+1]))
        else:
            (i_min, j_min, k_min), p = lattice.find_element(ll)
            (i_max, j_max, k_max), p = lattice.find_element(ur)
            return list(np.broadcast(*np.ogrid[
                k_min:k_max+1, j_min:j_max+1, i_min:i_max+1]))
Example #2
0
    def _validate_mask_and_error(self):
        """
        Raises ValueError if they don't match (using ~numpy.broadcast)
        """

        try:
            if self.mask is not None:
                np.broadcast(self.data, self.mask)
            maskmatch = True
        except ValueError:
            maskmatch = False

        try:
            if self.error is not None:
                np.broadcast(self.data, self.error)
            errmatch = True
        except ValueError:
            errmatch = False

        if not errmatch and not maskmatch:
            raise ValueError('NDData error and mask do not match data')
        elif not errmatch:
            raise ValueError('NDData error does not match data')
        elif not maskmatch:
            raise ValueError('NDData mask does not match data')
Example #3
0
 def indices(self):
     if self.ndim == 2:
         return list(np.broadcast(*np.ogrid[
             :self.shape[1], :self.shape[0]]))
     else:
         return list(np.broadcast(*np.ogrid[
             :self.shape[2], :self.shape[1], :self.shape[0]]))
Example #4
0
def check_mean_sigma_keepdims(a, axis):
    mu1, sigma1 = mean_sigma(a, axis, keepdims=False)
    mu2, sigma2 = mean_sigma(a, axis, keepdims=True)

    assert_array_equal(mu1.ravel(), mu2.ravel())
    assert_array_equal(sigma1.ravel(), sigma2.ravel())

    assert_array_equal(np.broadcast(a, mu2).shape, a.shape)
    assert_array_equal(np.broadcast(a, sigma2).shape, a.shape)
Example #5
0
 def _set_error(self, value):
     if value is not None:
         try:
             np.broadcast(self.data, value)
         except ValueError:
             raise ValueError("dimensions of `error` do not match data")
         else:
             self._error = value
     else:
         self._error = value
def logsubtrexp(minuend, subtrahend, sign_minuend = None, sign_subtrahend = None):

    if sign_minuend is None:
        sign_minuend = np.ones(minuend.shape)
    if sign_subtrahend is None:
        sign_subtrahend = np.ones(subtrahend.shape)
    if not (minuend.shape == sign_minuend.shape and subtrahend.shape == sign_subtrahend.shape):
        raise ValueError("sign arguments expected be of same shape as corresponding log-matrices")
    if not (np.abs(sign_minuend).all() and np.abs(sign_subtrahend).all()):
        raise ValueError("sign arguments expected to contain only +1 or -1 elements")
        
    b = np.broadcast(minuend, subtrahend)
    s_b = np.broadcast(sign_minuend, sign_subtrahend)
    abs_res = np.empty(b.shape)
    sign_res = np.empty(b.shape)
    
    for i in range(b.size):
        (m, s) = b.next()
        (sign_m, sign_s) = s_b.next()
        if sign_m > sign_s: # sign_m == 1 and sign_s == -1
            # this is equivalent to logsumexp(m, s)
            #print("sign_m > sign_s")
            sign_res.flat[i] = 1
            abs_res.flat[i] = logsumexp((m,s))
        elif sign_m < sign_s: # sign_m == -1 and sign_s == 1
            #print("sign_m < sign_s")
            sign_res.flat[i] = -1
            abs_res.flat[i] = logsumexp((m,s))
        else:
            #signs are eqal
            if m == s:                
                sign_res.flat[i] = 1
                abs_res.flat[i] = log(0)
            else:
                if sign_m == -1:
                    if m > s:
                        #print("m >= s")
                        sign_res.flat[i] = -1
                        abs_res.flat[i] = log(1 - exp(s - m)) + m
                    elif m < s:
                        #print("m < s")
                        sign_res.flat[i] = 1
                        abs_res.flat[i] = log(1 - exp(m - s)) + s
                else:# sign_m == 1
                    if m > s:
                        #print("m >= s")
                        sign_res.flat[i] = 1
                        abs_res.flat[i] = log(1 - exp(s - m)) + m
                    elif m < s:
                        #print("m < s")
                        sign_res.flat[i] = -1
                        abs_res.flat[i] = log(1 - exp(m - s)) + s
        #print(sign_m*exp(m),  sign_s*exp(s),  sign_m*exp(m) - sign_s*exp(s), sign_res.flat[i] * exp(abs_res.flat[i]))
    
    return (abs_res, sign_res)
Example #7
0
    def getPsfAtPoints(self, bandnum, x, y):
        '''
        Reconstruct the SDSS model PSF from KL basis functions.

        x,y can be scalars or 1-d numpy arrays.

        Return value:
        if x,y are scalars: a PSF image
        if x,y are arrays:  a list of PSF images
        '''
        rtnscalar = np.isscalar(x) and np.isscalar(y)
        x = np.atleast_1d(x)
        y = np.atleast_1d(y)
        psf = fits_table(self.hdus[bandnum+1].data)
        psfimgs = None
        (outh, outw) = (None,None)

        # From the IDL docs:
        # http://photo.astro.princeton.edu/photoop_doc.html#SDSS_PSF_RECON
        #   acoeff_k = SUM_i{ SUM_j{ (0.001*ROWC)^i * (0.001*COLC)^j * C_k_ij } }
        #   psfimage = SUM_k{ acoeff_k * RROWS_k }
        for k in range(len(psf)):
            nrb = psf.nrow_b[k]
            ncb = psf.ncol_b[k]
            c = psf.c[k].reshape(5, 5)
            c = c[:nrb,:ncb]
            (gridi,gridj) = np.meshgrid(range(nrb), range(ncb))

            if psfimgs is None:
                psfimgs = [np.zeros_like(psf.rrows[k]) for xy
                        in np.broadcast(x,y)]
                (outh,outw) = (psf.rnrow[k], psf.rncol[k])
            else:
                assert(psf.rnrow[k] == outh)
                assert(psf.rncol[k] == outw)

            for i,(xi,yi) in enumerate(np.broadcast(x,y)):
                #print 'xi,yi', xi,yi
                acoeff_k = np.sum(((0.001 * xi)**gridi * (0.001 * yi)**gridj * c))
                if False: # DEBUG
                    print 'coeffs:', (0.001 * xi)**gridi * (0.001 * yi)**gridj
                    print 'c:', c
                    for (coi,ci) in zip(((0.001 * xi)**gridi * (0.001 * yi)**gridj).ravel(), c.ravel()):
                        print 'co %g, c %g' % (coi,ci)
                    print 'acoeff_k', acoeff_k

                #print 'acoeff_k', acoeff_k.shape, acoeff_k
                #print 'rrows[k]', psf.rrows[k].shape, psf.rrows[k]
                psfimgs[i] += acoeff_k * psf.rrows[k]

        psfimgs = [img.reshape((outh,outw)) for img in psfimgs]
        if rtnscalar:
            return psfimgs[0]
        return psfimgs
Example #8
0
def test_mean_sigma_keepdims(axis):
    np.random.seed(0)
    a = np.random.random((4, 5, 6))
    mu1, sigma1 = mean_sigma(a, axis, keepdims=False)
    mu2, sigma2 = mean_sigma(a, axis, keepdims=True)

    assert_array_equal(mu1.ravel(), mu2.ravel())
    assert_array_equal(sigma1.ravel(), sigma2.ravel())

    assert_array_equal(np.broadcast(a, mu2).shape, a.shape)
    assert_array_equal(np.broadcast(a, sigma2).shape, a.shape)
 def _assert_incompatible_broadcast(self, shape1, shape2):
   if shape1.dims is not None and shape2.dims is not None:
     zeros1 = np.zeros(shape1.as_list())
     zeros2 = np.zeros(shape2.as_list())
     with self.assertRaises(ValueError):
       np.broadcast(zeros1, zeros2)
     with self.assertRaises(ValueError):
       np.broadcast(zeros2, zeros1)
   with self.assertRaises(ValueError):
     common_shapes.broadcast_shape(shape1, shape2)
   with self.assertRaises(ValueError):
     common_shapes.broadcast_shape(shape2, shape1)
Example #10
0
def test_broadcastable():
    for ndim1 in range(1, 4):
        for ndim2 in range(1, 4):
            for shape1 in itertools.permutations(range(1, 4), ndim1):
                for shape2 in itertools.permutations(range(1, 4), ndim2):
                    try:
                        np.broadcast(np.zeros(shape1),
                                     np.zeros(shape2))
                        result = True
                    except ValueError:
                        result = False
                    assert result == broadcastable(shape1, shape2)
Example #11
0
 def _set_flags(self, value):
     if value is not None:
         if isinstance(value, np.ndarray):
             try:
                 np.broadcast(self.data, value)
             except ValueError:
                 raise ValueError("dimensions of `flags` do not match data")
             else:
                 self._flags = value
         else:
             raise TypeError("`flags` should be a Numpy array")
     else:
         self._flags = value
Example #12
0
    def __init__(self, w, mu, *args, **kwargs):
        _, sd = get_tau_sd(tau=kwargs.pop('tau', None),
                           sd=kwargs.pop('sd', None))

        distshape = np.broadcast(mu, sd).shape
        self.mu = mu = tt.as_tensor_variable(mu)
        self.sd = sd = tt.as_tensor_variable(sd)

        if not distshape:
            distshape = np.broadcast(mu.tag.test_value, sd.tag.test_value).shape

        super(NormalMixture, self).__init__(w, Normal.dist(mu, sd=sd, shape=distshape),
                                            *args, **kwargs)
Example #13
0
 def near(x, l, h):
     #RESULT: array(3) & array([3, 4])
     a = broadcast(x,l,h)
     has_iterable = a.shape
     x,l,h = tuple(atleast_1d(i) for i in (x,l,h))
     b = broadcast(x,l,h)
     _x,_l,_h = (empty(b.shape), empty(b.shape), empty(b.shape))
     _x.flat = [i for i in x]
     _l.flat = [i for i in l]
     _h.flat = [i for i in h]
     result = asarray(map(_near, x, l, h))
     if not has_iterable:
         result = asarray(result[0])
     return result
Example #14
0
    def _natural_indices(self):
        """Iterate over all possible (x,y) or (x,y,z) lattice element indices.

        This property is used when constructing distributed cell and material
        paths. Most importantly, the iteration order matches that used on the
        Fortran side.

        """
        if self.ndim == 2:
            nx, ny = self.shape
            return np.broadcast(*np.ogrid[:nx, :ny])
        else:
            nx, ny, nz = self.shape
            return np.broadcast(*np.ogrid[:nx, :ny, :nz])
 def _assert_broadcast(self, expected, shape1, shape2):
   if shape1.dims is not None and shape2.dims is not None:
     expected_np = expected.as_list()
     zeros1 = np.zeros(shape1.as_list())
     zeros2 = np.zeros(shape2.as_list())
     self.assertAllEqual(expected_np, np.broadcast(zeros1, zeros2).shape)
     self.assertAllEqual(expected_np, np.broadcast(zeros2, zeros1).shape)
     self.assertEqual(
         expected, common_shapes.broadcast_shape(shape1, shape2))
     self.assertEqual(
         expected, common_shapes.broadcast_shape(shape2, shape1))
   else:
     self.assertEqual(expected, common_shapes.broadcast_shape(shape1, shape2))
     self.assertEqual(expected, common_shapes.broadcast_shape(shape2, shape1))
Example #16
0
    def __call__(self, p, cols='all'):
        if cols is 'all':
            icols = np.arange(self.n_columns)
        else:
            icols = np.array([self.column_index[col] for col in cols])
        args = (p, self.grid, icols, self.index_columns)

        if self.ndim == 2:
            args = (p[0], p[1], self.grid, icols,
                    self.index_columns[0], self.index_columns[1])
            if ((isinstance(p[0], float) or isinstance(p[0], int)) and
                    (isinstance(p[1], float) or isinstance(p[1], int))):
                values = interp_value_2d(*args)
            else:
                b = np.broadcast(*p)
                pp = [np.atleast_1d(np.resize(x, b.shape)).astype(float) for x in p]
                args = (*pp, self.grid, icols, *self.index_columns)
                # print([(a, type(a)) for a in args])
                values = interp_values_2d(*args)
        if self.ndim == 3:
            args = (p[0], p[1], p[2], self.grid, icols,
                    self.index_columns[0], self.index_columns[1],
                    self.index_columns[2])
            if ((isinstance(p[0], float) or isinstance(p[0], int)) and
                    (isinstance(p[1], float) or isinstance(p[1], int)) and
                    (isinstance(p[2], float) or isinstance(p[2], int))):
                values = interp_value_3d(*args)
            else:
                b = np.broadcast(*p)
                pp = [np.atleast_1d(np.resize(x, b.shape)).astype(float) for x in p]
                args = (*pp, self.grid, icols, *self.index_columns)
                # print([(a, type(a)) for a in args])
                values = interp_values_3d(*args)
        elif self.ndim == 4:
            args = (p[0], p[1], p[2], p[3], self.grid, icols,
                    self.index_columns[0], self.index_columns[1],
                    self.index_columns[2], self.index_columns[3])
            if ((isinstance(p[0], float) or isinstance(p[0], int)) and
                    (isinstance(p[1], float) or isinstance(p[1], int)) and
                    (isinstance(p[2], float) or isinstance(p[2], int)) and
                    (isinstance(p[3], float) or isinstance(p[3], int))):
                values = interp_value_4d(*args)
            else:
                b = np.broadcast(*p)
                pp = [np.atleast_1d(np.resize(x, b.shape)).astype(float) for x in p]
                values = interp_values_4d(*pp, self.grid, icols, *self.index_columns)

        return values
Example #17
0
    def creategrid(self,mode='full',boundary=[],_fileini=''):
        """ create a grid

        Parameters
        ----------

        full : boolean
            default (True) use all the layout area
        boundary : (xmin,ymin,xmax,ymax)
            if full is False the boundary argument is used

        """
        if mode=="file":
            self.RN = RadioNode(name='',
                               typ='rx',
                               _fileini = _fileini,
                               _fileant = 'def.vsh3')
            self.grid =self.RN.position[0:2,:].T
        else:
            if mode=="full":
                mi=np.min(self.L.Gs.pos.values(),axis=0)+0.01
                ma=np.max(self.L.Gs.pos.values(),axis=0)-0.01
            if mode=="zone":
                assert boundary<>[]
                mi = np.array([boundary[0],boundary[1]])
                ma = np.array([boundary[2],boundary[3]])

            x = np.linspace(mi[0],ma[0],self.nx)
            y = np.linspace(mi[1],ma[1],self.ny)

            self.grid=np.array((list(np.broadcast(*np.ix_(x, y)))))

        self.ng = self.grid.shape[0]
Example #18
0
 def binoutput(self, value, mask):
     binoutput = self._base.binoutput
     value = np.asarray(value)
     mask = np.asarray(mask)
     result = [binoutput(x, m)
               for x, m in np.broadcast(value.flat, mask.flat)]
     return _empty_bytes.join(result)
def check_chisquare(f_obs, f_exp, ddof, axis, expected_chi2):
    # Use this only for arrays that have no masked values.
    f_obs = np.asarray(f_obs)
    if axis is None:
        num_obs = f_obs.size
    else:
        if axis == 'no':
            use_axis = 0
        else:
            use_axis = axis
        b = np.broadcast(f_obs, f_exp)
        num_obs = b.shape[use_axis]

    if axis == 'no':
        chi2, p = mstats.chisquare(f_obs, f_exp=f_exp, ddof=ddof)
    else:
        chi2, p = mstats.chisquare(f_obs, f_exp=f_exp, ddof=ddof, axis=axis)
    assert_array_equal(chi2, expected_chi2)

    ddof = np.asarray(ddof)
    expected_p = stats.chisqprob(expected_chi2, num_obs - 1 - ddof)
    assert_array_equal(p, expected_p)

    # Also compare to stats.chisquare
    if axis == 'no':
        stats_chisq, stats_p = stats.chisquare(f_obs, f_exp=f_exp, ddof=ddof)
    else:
        stats_chisq, stats_p = stats.chisquare(f_obs, f_exp=f_exp, ddof=ddof,
                                               axis=axis)
    assert_array_almost_equal(chi2, stats_chisq)
    assert_array_almost_equal(p, stats_p)
Example #20
0
    def _fbasis_coeffs_for(self, ftype, fproj, fdjacs, nffpts):
        # Suitable quadrature rules for various face types
        qrule_map = {
            'line': ('gauss-legendre', self._order + 1),
            'quad': ('gauss-legendre', (self._order + 1)**2),
            'tri': ('williams-shunn', 36)
        }

        # Obtain a quadrature rule for integrating on the face
        qrule = get_quadrule(ftype, *qrule_map[ftype])

        # Project the rule points onto the various faces
        proj = fproj(*np.atleast_2d(qrule.np_points.T))
        qfacepts = np.vstack(list(np.broadcast(*p)) for p in proj)

        # Obtain a nodal basis on the reference face
        fname = self._cfg.get('solver-interfaces-' + ftype, 'flux-pts')
        ffpts = get_quadrule(ftype, fname, nffpts)
        nodeb = get_polybasis(ftype, self._order + 1, ffpts.np_points)

        L = nodeb.nodal_basis_at(qrule.np_points)

        M = self.ubasis.ortho_basis_at(qfacepts)
        M = M.reshape(-1, len(proj), len(qrule.np_points))

        # Do the quadrature
        S = np.einsum('i...,ik,jli->lkj', qrule.np_weights, L, M)

        # Account for differing face areas
        S *= np.asanyarray(fdjacs)[:,None,None]

        return S.reshape(-1, self.nupts)
Example #21
0
def harmonic_trajectory(mass, omega, gamma, ts, init_ps, init_qs) -> '(g nm/ps mol, nm, 1, kJ ps/mol)':
    """
    Analytically-determined time-propagated quantities for a harmonic
    oscillator trajectory.

    Parameters:
      mass: Mass of particle (g/mol).
      omega: Angular frequency of oscillator (1/ps).
      gamma: Coherent state width (1/nm^2).
      ts: NumPy array of times at which to evaluate the trajectory (ps).
      init_ps: NumPy array of initial momenta (g nm/ps mol).
      init_qs: NumPy array of initial positions (nm).

    Returns:
      Momenta, positions, Herman-Kluk prefactors, classical actions.
    """

    mesh_ts, mesh_ps, mesh_qs = meshgrid(ts, init_ps, init_qs)
    shape = np.broadcast(init_ps, init_qs).shape

    c = np.cos(omega * mesh_ts)
    s = np.sin(omega * mesh_ts)

    ps = mesh_ps * c - mass * omega * mesh_qs * s  # g nm/ps mol
    qs = mesh_ps * s / (mass * omega) + mesh_qs * c  # nm

    if gamma is not None:
        Rs = harmonic_hk(mass, omega, gamma, mesh_ts, shape)
    else:
        Rs = None

    Ss = 0.5 * ((mesh_ps * mesh_ps / (mass * omega) - mass * omega * mesh_qs * mesh_qs) * c - 2. * mesh_ps * mesh_qs * s) * s  # kJ ps/mol

    return ps, qs, Rs, Ss
Example #22
0
def kernel_reconstruct(kernels_theta, kernels_phi, weights,
                       grid_theta, grid_phi, kernel, N=8):
    """Kernel reconstruction of the PDF, on a grid on the sphere.

    Parameters
    ----------
    kernels_theta, kernels_phi : (N,) ndarray
        Positions of the kernels.
    weights : (N,) ndarray
        Weights of the kernels.
    grid_theta : (M,) ndarray
        Inclination angles of grid.
    grid_phi : (N,) ndarray
        Azimuth angles of grid.
    kernel : callable
        Kernel function used in the reconstruction.
    N : int
        Maximum order of kernel polynomials.

    Returns
    -------
    pdf : (M, N) ndarray
        Reconstruction of the PDF on the specified grid.

    """
    PDF_recon = np.zeros(np.broadcast(grid_theta, grid_phi).shape)

    for (k_theta, k_phi, w) in zip(kernels_theta, kernels_phi, weights):
        cos_theta = cos_inc_angle(grid_theta, grid_phi,
                                  k_theta, k_phi)

        PDF_recon += w * kernel(cos_theta, N)

    return PDF_recon
Example #23
0
def broadcast_shape(a_shape, b_shape):
    global broadcast_shape_cache

    raise Exception('This function is probably a bad idea, because shape is not cached and overquerying can occur.')

    uid = (a_shape, b_shape)

    if uid not in broadcast_shape_cache:
        la = len(a_shape)
        lb = len(b_shape)
        ln = la if la > lb else lb

        ash = np.ones(ln, dtype=np.uint32)
        bsh = np.ones(ln, dtype=np.uint32)
        ash[-la:] = a_shape
        bsh[-lb:] = b_shape

        our_result = np.max(np.vstack((ash, bsh)), axis=0)

        if False:
            numpy_result = np.broadcast(np.empty(a_shape), np.empty(b_shape)).shape
            #print 'aaa' + str(our_result)
            #print 'bbb' + str(numpy_result)
            if not np.array_equal(our_result, numpy_result):
                import pdb; pdb.set_trace()
            assert(np.array_equal(our_result, numpy_result))

        broadcast_shape_cache[uid] = tuple(our_result)
    return broadcast_shape_cache[uid]
def movetext(textid, x=None, y=None, dx=None, dy=None):
    '''Convert textid to a list of matplotlib Text instances.

    textid   -- integer zero based index or a matplotlib Text instance.
                Can be also a list of indices or Text objects.
                When 'all', use all text objects in the current axis.
    x, y     -- new x, y positions of the specified text objects,
                floating point numbers or arrays.
                Positions are not changed when None.
    dx, dy   -- offsets in the x or y directions, numbers or arrays.
                These are applied after setting the x, y coordinates.

    Return a list of Text instances.
    '''
    import numpy
    texthandles = findtexts(textid)
    for hti, xi, yi, dxi, dyi in numpy.broadcast(texthandles, x, y, dx, dy):
        xt, yt = hti.get_position()
        if xi is not None:
            xt = xi
        if dxi is not None:
            xt += dxi
        if yi is not None:
            yt = yi
        if dyi is not None:
            yt += dyi
        hti.set_position((xt, yt))
    if texthandles:
        _redraw()
    return
Example #25
0
    def log_prior(self, theta_arr):
        theta_arr = np.array(theta_arr + [None])[:-1]

        return np.sum([log_norm(t.reshape(t.shape[:-1]), mu, sigma)
                       for (t, mu, sigma)
                       in np.broadcast(theta_arr, self.mu_params,
                                       self.sigma_params)], 0)
Example #26
0
 def _broadcast_shape(*args):
     """Returns the shape of the ararys that would result from broadcasting the
     supplied arrays against each other.
     """
     if not args:
         raise ValueError('must provide at least one argument')
     if len(args) == 1:
         # a single argument does not work with np.broadcast
         return np.asarray(args[0]).shape
     # use the old-iterator because np.nditer does not handle size 0 arrays
     # consistently
     b = np.broadcast(*args[:32])
     # unfortunately, it cannot handle 32 or more arguments directly
     for pos in range(32, len(args), 31):
         b = np.broadcast(b, *args[pos:(pos + 31)])
     return b.shape
Example #27
0
def busday_count_mask_NaT(begindates, enddates, out=None):
    """
    Simple of numpy.busday_count that returns `float` arrays rather than int
    arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`.

    Doesn't support custom weekdays or calendars, but probably should in the
    future.

    See Also
    --------
    np.busday_count
    """
    if out is None:
        out = empty(broadcast(begindates, enddates).shape, dtype=float)

    beginmask = begindates == NaTD
    endmask = enddates == NaTD

    out = busday_count(
        # Temporarily fill in non-NaT values.
        where(beginmask, _notNaT, begindates),
        where(endmask, _notNaT, enddates),
        out=out,
    )

    # Fill in entries where either comparison was NaT with nan in the output.
    out[beginmask | endmask] = nan
    return out
Example #28
0
 def __init__(self, s, **kwargs):
     """."""
     super(DiagLinop, self).__init__(**kwargs)
     test_in = np.empty(self.inshape, self.indtype)
     test_out = np.empty(self.outshape, self.outdtype)
     try:
         np.broadcast(s, test_in)
         np.broadcast(s, test_out)
     except ValueError:
         errstr = (
             's must have shape that is broadcastable with inshape and'
             ' outshape (which must be the same)'
         )
         raise ValueError(errstr)
     self._s = s
     self._sconj = np.conj(s)
Example #29
0
    def creategrid(self,full=True,boundary=[]):
        """ create a grid

        Parameters
        ----------

        full : boolean
            default (True) use all the layout area
        boundary : (xmin,ymin,xmax,ymax)
            if full is False the boundary argument is used

        """

        if full:
            mi=np.min(self.L.Gs.pos.values(),axis=0)+0.01
            ma=np.max(self.L.Gs.pos.values(),axis=0)-0.01
        else:
            assert boundary<>[]
            mi = np.array([boundary[0],boundary[1]])
            ma = np.array([boundary[2],boundary[3]])

        x = np.linspace(mi[0],ma[0],self.nx)
        y = np.linspace(mi[1],ma[1],self.ny)

        self.grid=np.array((list(np.broadcast(*np.ix_(x, y)))))
Example #30
0
    def testHarmonicOscillator(self):
        """
        Check the semiclassical trajectory for a harmonic oscillator.
        """

        ps = np.linspace(-hp['init_p_max'], hp['init_p_max'], 7)  # g nm/ps mol
        qs = np.linspace(-hp['init_q_max'], hp['init_q_max'], 11)  # nm
        init_ps, init_qs = meshgrid(ps, qs)
        shape = np.broadcast(init_ps, init_qs).shape

        ho_fs = harmonic(m=hp['mass'], omega=hp['omega'])

        ts = np.linspace(0., 4. * np.pi / hp['omega'], 6000)  # ps
        dt = ts[1] - ts[0]  # ps
        omts = hp['omega'] * ts  # 1

        traj = SemiclassicalTrajectory(hp['gamma'], hp['mass'], dt, ho_fs, init_ps, init_qs, max_steps=len(ts))

        calculated_p = np.empty((len(ts), len(ps), len(qs)))  # g nm/ps mol
        calculated_q = np.empty_like(calculated_p)  # nm
        calculated_R = np.empty_like(calculated_p, dtype=complex)  # 1
        calculated_S = np.empty_like(calculated_p)  # kJ ps/mol

        for i, step in enumerate(traj):
            _, calculated_p[i], calculated_q[i], calculated_R[i], calculated_S[i] = step

        exact_ps, exact_qs, exact_Rs, exact_Ss = harmonic_trajectory(hp['mass'], hp['omega'], hp['gamma'], ts, init_ps, init_qs)

        assert_array_almost_equal(calculated_p, exact_ps)
        assert_array_almost_equal(calculated_q, exact_qs)
        assert_array_almost_equal(calculated_R, exact_Rs)
        assert_array_almost_equal(calculated_S, exact_Ss)
def circles(x, y, s, ax, c='b', vmin=None, vmax=None, **kwargs):
    """
    Make a scatter of circles plot of x vs y, where x and y are sequence 
    like objects of the same lengths. The size of circles are in data scale.

    Parameters
    ----------
    x,y : scalar or array_like, shape (n, )
        Input data
    s : scalar or array_like, shape (n, ) 
        Radius of circle in data unit.
    c : color or sequence of color, optional, default : 'b'
        `c` can be a single color format string, or a sequence of color
        specifications of length `N`, or a sequence of `N` numbers to be
        mapped to colors using the `cmap` and `norm` specified via kwargs.
        Note that `c` should not be a single numeric RGB or RGBA sequence 
        because that is indistinguishable from an array of values
        to be colormapped. (If you insist, use `color` instead.)  
        `c` can be a 2-D array in which the rows are RGB or RGBA, however. 
    vmin, vmax : scalar, optional, default: None
        `vmin` and `vmax` are used in conjunction with `norm` to normalize
        luminance data.  If either are `None`, the min and max of the
        color array is used.
    kwargs : `~matplotlib.collections.Collection` properties
        Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), 
        norm, cmap, transform, etc.

    Returns
    -------
    paths : `~matplotlib.collections.PathCollection`

    Examples
    --------
    a = np.arange(11)
    circles(a, a, a*0.2, c=a, alpha=0.5, edgecolor='none')
    plt.colorbar()

    License
    --------
    This code is under [The BSD 3-Clause License]
    (http://opensource.org/licenses/BSD-3-Clause)
    """

    from matplotlib.patches import Circle
    from matplotlib.collections import PatchCollection
    import numpy as np
    import matplotlib.pyplot as plt
    #if np.isscalar(c):
    #    kwargs.setdefault('color', c)
    #    c = None
    if c is not None:
        kwargs.setdefault('color', c)
        c = None
    if 'zorder' in kwargs:
        kwargs.setdefault('zorder', kwargs.pop('zorder'))

    if 'fc' in kwargs: kwargs.setdefault('facecolor', kwargs.pop('fc'))
    if 'ec' in kwargs: kwargs.setdefault('edgecolor', kwargs.pop('ec'))
    if 'ls' in kwargs: kwargs.setdefault('linestyle', kwargs.pop('ls'))
    if 'lw' in kwargs: kwargs.setdefault('linewidth', kwargs.pop('lw'))

    patches = [Circle((x_, y_), s_) for x_, y_, s_ in np.broadcast(x, y, s)]
    collection = PatchCollection(patches, **kwargs)
    #if c is not None:
    #    collection.set_array(np.asarray(c))
    #    collection.set_clim(vmin, vmax)

    #ax = plt.gca()
    ax.add_collection(collection)
    ax.autoscale_view()
    if c is not None:
        plt.sci(collection)
    return collection


# example usage

# plt.figure(figsize=(6,4))
# ax = plt.subplot(aspect='equal')
#
# #plot a set of circle
# a = np.arange(11)
# out = circles(a, a, a*0.2, c=a, alpha=0.5, ec='none')
# plt.colorbar()
#
# #plot one circle (the lower-right one)
# circles(1, 0, 0.4, 'r', ls='--', lw=5, fc='none', transform=ax.transAxes)
# xlim(0,10)
# ylim(0,10)
# plt.show()
#
# exit(1)
Example #32
0
def sample_ppc_w(traces,
                 samples=None,
                 models=None,
                 weights=None,
                 random_seed=None,
                 progressbar=True):
    """Generate weighted posterior predictive samples from a list of models and
    a list of traces according to a set of weights.

    Parameters
    ----------
    traces : list
        List of traces generated from MCMC sampling. The number of traces should
        be equal to the number of weights.
    samples : int
        Number of posterior predictive samples to generate. Defaults to the
        length of the shorter trace in traces.
    models : list
        List of models used to generate the list of traces. The number of models
        should be equal to the number of weights and the number of observed RVs
        should be the same for all models.
        By default a single model will be inferred from `with` context, in this
        case results will only be meaningful if all models share the same
        distributions for the observed RVs.
    weights: array-like
        Individual weights for each trace. Default, same weight for each model.
    random_seed : int
        Seed for the random number generator.
    progressbar : bool
        Whether or not to display a progress bar in the command line. The
        bar shows the percentage of completion, the sampling speed in
        samples per second (SPS), and the estimated remaining time until
        completion ("expected time of arrival"; ETA).

    Returns
    -------
    samples : dict
        Dictionary with the variables as keys. The values corresponding to the
        posterior predictive samples from the weighted models.
    """
    np.random.seed(random_seed)

    if models is None:
        models = [modelcontext(models)] * len(traces)

    if weights is None:
        weights = [1] * len(traces)

    if len(traces) != len(weights):
        raise ValueError('The number of traces and weights should be the same')

    if len(models) != len(weights):
        raise ValueError('The number of models and weights should be the same')

    lenght_morv = len(models[0].observed_RVs)
    if not all(len(i.observed_RVs) == lenght_morv for i in models):
        raise ValueError(
            'The number of observed RVs should be the same for all models')

    weights = np.asarray(weights)
    p = weights / np.sum(weights)

    min_tr = min([len(i) for i in traces])

    n = (min_tr * p).astype('int')
    # ensure n sum up to min_tr
    idx = np.argmax(n)
    n[idx] = n[idx] + min_tr - np.sum(n)
    trace = np.concatenate(
        [np.random.choice(traces[i], j) for i, j in enumerate(n)])

    obs = [x for m in models for x in m.observed_RVs]
    variables = np.repeat(obs, n)

    lenghts = list(
        set([np.shape(np.atleast_1d(o.distribution.default())) for o in obs]))

    if len(lenghts) == 1:
        size = [None for i in variables]
    elif len(lenghts) > 2:
        raise ValueError('Observed variables could not be broadcast together')
    else:
        size = []
        x = np.zeros(shape=lenghts[0])
        y = np.zeros(shape=lenghts[1])
        b = np.broadcast(x, y)
        for var in variables:
            l = np.shape(np.atleast_1d(var.distribution.default()))
            if l != b.shape:
                size.append(b.shape)
            else:
                size.append(None)
    len_trace = len(trace)

    if samples is None:
        samples = len_trace

    indices = np.random.randint(0, len_trace, samples)

    if progressbar:
        indices = tqdm(indices, total=samples)

    try:
        ppc = defaultdict(list)
        for idx in indices:
            param = trace[idx]
            var = variables[idx]
            ppc[var.name].append(
                var.distribution.random(point=param, size=size[idx]))

    except KeyboardInterrupt:
        pass

    finally:
        if progressbar:
            indices.close()

    return {k: np.asarray(v) for k, v in ppc.items()}
Example #33
0
    def _get_multi_index(self, arr, indices):
        """Mimic multi dimensional indexing.

        Parameters
        ----------
        arr : ndarray
            Array to be indexed.
        indices : tuple of index objects

        Returns
        -------
        out : ndarray
            An array equivalent to the indexing operation (but always a copy).
            `arr[indices]` should be identical.
        no_copy : bool
            Whether the indexing operation requires a copy. If this is `True`,
            `np.may_share_memory(arr, arr[indicies])` should be `True` (with
            some exceptions for scalars and possibly 0-d arrays).

        Notes
        -----
        While the function may mostly match the errors of normal indexing this
        is generally not the case.
        """
        in_indices = list(indices)
        indices = []
        # if False, this is a fancy or boolean index
        no_copy = True
        # number of fancy/scalar indexes that are not consecutive
        num_fancy = 0
        # number of dimensions indexed by a "fancy" index
        fancy_dim = 0
        # NOTE: This is a funny twist (and probably OK to change).
        # The boolean array has illegal indexes, but this is
        # allowed if the broadcast fancy-indices are 0-sized.
        # This variable is to catch that case.
        error_unless_broadcast_to_empty = False

        # We need to handle Ellipsis and make arrays from indices, also
        # check if this is fancy indexing (set no_copy).
        ndim = 0
        ellipsis_pos = None  # define here mostly to replace all but first.
        for i, indx in enumerate(in_indices):
            if indx is None:
                continue
            if isinstance(indx, np.ndarray) and indx.dtype == bool:
                no_copy = False
                if indx.ndim == 0:
                    raise IndexError
                # boolean indices can have higher dimensions
                ndim += indx.ndim
                fancy_dim += indx.ndim
                continue
            if indx is Ellipsis:
                if ellipsis_pos is None:
                    ellipsis_pos = i
                    continue  # do not increment ndim counter
                raise IndexError
            if isinstance(indx, slice):
                ndim += 1
                continue
            if not isinstance(indx, np.ndarray):
                # This could be open for changes in numpy.
                # numpy should maybe raise an error if casting to intp
                # is not safe. It rejects np.array([1., 2.]) but not
                # [1., 2.] as index (same for ie. np.take).
                # (Note the importance of empty lists if changing this here)
                indx = np.array(indx, dtype=np.intp)
                in_indices[i] = indx
            elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
                raise IndexError('arrays used as indices must be of '
                                 'integer (or boolean) type')
            if indx.ndim != 0:
                no_copy = False
            ndim += 1
            fancy_dim += 1

        if arr.ndim - ndim < 0:
            # we can't take more dimensions then we have, not even for 0-d
            # arrays.  since a[()] makes sense, but not a[(),]. We will
            # raise an error later on, unless a broadcasting error occurs
            # first.
            raise IndexError

        if ndim == 0 and None not in in_indices:
            # Well we have no indexes or one Ellipsis. This is legal.
            return arr.copy(), no_copy

        if ellipsis_pos is not None:
            in_indices[ellipsis_pos:ellipsis_pos + 1] = ([slice(None, None)] *
                                                         (arr.ndim - ndim))

        for ax, indx in enumerate(in_indices):
            if isinstance(indx, slice):
                # convert to an index array
                indx = np.arange(*indx.indices(arr.shape[ax]))
                indices.append(['s', indx])
                continue
            elif indx is None:
                # this is like taking a slice with one element from a new axis:
                indices.append(['n', np.array([0], dtype=np.intp)])
                arr = arr.reshape((arr.shape[:ax] + (1, ) + arr.shape[ax:]))
                continue
            if isinstance(indx, np.ndarray) and indx.dtype == bool:
                if indx.shape != arr.shape[ax:ax + indx.ndim]:
                    raise IndexError

                try:
                    flat_indx = np.ravel_multi_index(np.nonzero(indx),
                                                     arr.shape[ax:ax +
                                                               indx.ndim],
                                                     mode='raise')
                except:
                    error_unless_broadcast_to_empty = True
                    # fill with 0s instead, and raise error later
                    flat_indx = np.array([0] * indx.sum(), dtype=np.intp)
                # concatenate axis into a single one:
                if indx.ndim != 0:
                    arr = arr.reshape(
                        (arr.shape[:ax] +
                         (np.prod(arr.shape[ax:ax + indx.ndim]), ) +
                         arr.shape[ax + indx.ndim:]))
                    indx = flat_indx
                else:
                    # This could be changed, a 0-d boolean index can
                    # make sense (even outside the 0-d indexed array case)
                    # Note that originally this is could be interpreted as
                    # integer in the full integer special case.
                    raise IndexError
            else:
                # If the index is a singleton, the bounds check is done
                # before the broadcasting. This used to be different in <1.9
                if indx.ndim == 0:
                    if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
                        raise IndexError
            if indx.ndim == 0:
                # The index is a scalar. This used to be two fold, but if
                # fancy indexing was active, the check was done later,
                # possibly after broadcasting it away (1.7. or earlier).
                # Now it is always done.
                if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
                    raise IndexError
            if (len(indices) > 0 and indices[-1][0] == 'f'
                    and ax != ellipsis_pos):
                # NOTE: There could still have been a 0-sized Ellipsis
                # between them. Checked that with ellipsis_pos.
                indices[-1].append(indx)
            else:
                # We have a fancy index that is not after an existing one.
                # NOTE: A 0-d array triggers this as well, while one may
                # expect it to not trigger it, since a scalar would not be
                # considered fancy indexing.
                num_fancy += 1
                indices.append(['f', indx])

        if num_fancy > 1 and not no_copy:
            # We have to flush the fancy indexes left
            new_indices = indices[:]
            axes = list(range(arr.ndim))
            fancy_axes = []
            new_indices.insert(0, ['f'])
            ni = 0
            ai = 0
            for indx in indices:
                ni += 1
                if indx[0] == 'f':
                    new_indices[0].extend(indx[1:])
                    del new_indices[ni]
                    ni -= 1
                    for ax in range(ai, ai + len(indx[1:])):
                        fancy_axes.append(ax)
                        axes.remove(ax)
                ai += len(indx) - 1  # axis we are at
            indices = new_indices
            # and now we need to transpose arr:
            arr = arr.transpose(*(fancy_axes + axes))

        # We only have one 'f' index now and arr is transposed accordingly.
        # Now handle newaxis by reshaping...
        ax = 0
        for indx in indices:
            if indx[0] == 'f':
                if len(indx) == 1:
                    continue
                # First of all, reshape arr to combine fancy axes into one:
                orig_shape = arr.shape
                orig_slice = orig_shape[ax:ax + len(indx[1:])]
                arr = arr.reshape(
                    (arr.shape[:ax] + (np.prod(orig_slice).astype(int), ) +
                     arr.shape[ax + len(indx[1:]):]))

                # Check if broadcasting works
                if len(indx[1:]) != 1:
                    res = np.broadcast(*indx[1:])  # raises ValueError...
                else:
                    res = indx[1]
                # unfortunately the indices might be out of bounds. So check
                # that first, and use mode='wrap' then. However only if
                # there are any indices...
                if res.size != 0:
                    if error_unless_broadcast_to_empty:
                        raise IndexError
                    for _indx, _size in zip(indx[1:], orig_slice):
                        if _indx.size == 0:
                            continue
                        if np.any(_indx >= _size) or np.any(_indx < -_size):
                            raise IndexError
                if len(indx[1:]) == len(orig_slice):
                    if np.product(orig_slice) == 0:
                        # Work around for a crash or IndexError with 'wrap'
                        # in some 0-sized cases.
                        try:
                            mi = np.ravel_multi_index(indx[1:],
                                                      orig_slice,
                                                      mode='raise')
                        except:
                            # This happens with 0-sized orig_slice (sometimes?)
                            # here it is a ValueError, but indexing gives a:
                            raise IndexError('invalid index into 0-sized')
                    else:
                        mi = np.ravel_multi_index(indx[1:],
                                                  orig_slice,
                                                  mode='wrap')
                else:
                    # Maybe never happens...
                    raise ValueError
                arr = arr.take(mi.ravel(), axis=ax)
                arr = arr.reshape(
                    (arr.shape[:ax] + mi.shape + arr.shape[ax + 1:]))
                ax += mi.ndim
                continue

            # If we are here, we have a 1D array for take:
            arr = arr.take(indx[1], axis=ax)
            ax += 1

        return arr, no_copy
Example #34
0
def redshift(z_in, z_out, data_in=None, data_out=None, rules=[]):
    """Transform spectral data from redshift z_in to z_out.

    Each quantity X is transformed according to a power law::

        X_out = X_in * ((1 + z_out) / (1 + z_in))**exponent

    where exponents are specified with the ``rules`` argument. Exponents for
    some common cases are listed in the table below.

    ======== ================================================================
    Exponent Quantities
    ======== ================================================================
    0        flux density in photons/(s*cm^2*Ang)
    +1       wavelength, wavelength error, flux density in ergs/(s*cm^2*Hz)
    -1       frequency, frequency error, flux density in ergs/(s*cm^2*Ang)
    +2       inverse variance of flux density in ergs/(s*cm^2*Ang)
    -2       inverse variance of flux density in ergs/(s*cm^2*Hz)
    ======== ================================================================

    For example, to transform separate wavelength and flux arrays using the
    SDSS standard units of Ang and 1e-17 erg/(s*cm^2*Ang):

    >>> wlen = np.arange(4000., 10000.)
    >>> flux = np.ones(wlen.shape)
    >>> result = redshift(z_in=0, z_out=1, rules=[
    ... dict(name='wlen', exponent=+1, array_in=wlen),
    ... dict(name='flux', exponent=-1, array_in=flux)])
    >>> result.dtype
    dtype([('wlen', '<f8'), ('flux', '<f8')])
    >>> result['flux'][0]
    0.5

    The same calculation could be performed with the input data stored in
    a numpy structured array, in which case any additional fields are
    copied to the output array:

    >>> data = np.empty(6000, dtype=[
    ... ('wlen', float), ('flux', float), ('maskbits', int)])
    >>> data['wlen'] = np.arange(4000., 10000.)
    >>> data['flux'] = np.ones_like(data['wlen'])
    >>> result = redshift(z_in=0, z_out=1, data_in=data, rules=[
    ... dict(name='wlen', exponent=+1),
    ... dict(name='flux', exponent=-1)])
    >>> result.dtype
    dtype([('wlen', '<f8'), ('flux', '<f8'), ('maskbits', '<i8')])
    >>> result['flux'][0]
    0.5

    The transformed result is always a `numpy structured array
    <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`__, with field
    (column) names determined by the rules you provide.

    The usual `numpy broadcasting rules
    <http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`__ apply
    in the transformation expression above so, for example, the same redshift
    can be applied to multiple spectra, or different redshifts can be applied
    to the same spectrum with appropriate input shapes.

    Input arrays can have associated `masks
    <http://docs.scipy.org/doc/numpy/reference/maskedarray.html>`__ and these
    will be propagated to the output. Input arrays can also have `units
    <http://astropy.readthedocs.io/en/latest/units/index.html>`__ but these
    will not be used or propagated to the output since numpy structured arrays
    do not support per-column units.

    Parameters
    ----------
    z_in : float or numpy.ndarray
        Redshift(s) of the input spectral data, which must all be > -1.
    z_out : float or numpy.ndarray
        Redshift(s) of the output spectral data, which must all be > -1.
    data_in : numpy.ndarray
        Structured numpy array containing input spectrum data to transform. If
        none is specified, then all quantities must be provided as numpy arrays
        in the rules.
    data_out : numpy.ndarray
        Structured numpy array where output spectrum data should be written. If
        none is specified, then an appropriately sized array will be allocated
        and returned. Use this method to take control of the memory allocation
        and, for example, re-use the same output array for a sequence of
        transforms.
    rules : iterable
        An iterable object whose elements are dictionaries. Each dictionary
        specifies how one quantity will be transformed and must contain 'name'
        and 'exponent' values. If an 'array_in' value is also specified, it
        should refer to a numpy array containing the input values to transform.
        Otherwise, ``data_in[<name>]`` is assumed to contain the input values
        to transform.  If no ``rules`` are specified and ``data_in`` is
        provided, then ``data_out`` is just a copy of ``data_in``.

    Returns
    -------
    numpy.ndarray
        Array of spectrum data with the redshift transform applied. Equal to
        data_out when set, otherwise a new array is allocated. If ``data_in``
        is specified, then any fields not listed in ``rules`` are copied to
        ``data_out``, so effectively have an implicit exponent of zero.
    """

    if not isinstance(z_in, np.ndarray):
        z_in = np.float(z_in)
    if np.any(z_in <= -1):
        raise ValueError('Found invalid z_in <= -1.')
    if not isinstance(z_out, np.ndarray):
        z_out = np.float(z_out)
    if np.any(z_out <= -1):
        raise ValueError('Found invalid z_out <= -1.')
    z_factor = (1.0 + z_out) / (1.0 + z_in)

    if data_in is not None and not isinstance(data_in, np.ndarray):
        raise ValueError('Invalid data_in type: {0}.'.format(type(data_in)))
    if data_out is not None and not isinstance(data_out, np.ndarray):
        raise ValueError('Invalid data_out type: {0}.'.format(type(data_out)))

    if data_in is not None:
        shape_in = data_in.shape
        dtype_in = data_in.dtype
        masked_in = ma.isMA(data_in)
    else:
        shape_in = None
        dtype_in = []
        masked_in = False

    for i, rule in enumerate(rules):
        name = rule.get('name')
        if not isinstance(name, str):
            raise ValueError('Invalid name in rule: {0}'.format(name))
        try:
            exponent = np.float(rule.get('exponent'))
        except TypeError:
            raise ValueError('Invalid exponent for {0}: {1}.'.format(
                name, rule.get('exponent')))
        if data_in is not None and name not in dtype_in.names:
            raise ValueError('No such data_in field named {0}.'.format(name))
        if data_out is not None and name not in data_out.dtype.names:
            raise ValueError('No such data_out field named {0}.'.format(name))
        array_in = rule.get('array_in')
        if array_in is not None:
            if data_in is not None:
                raise ValueError(
                    'Cannot specify data_in and array_in for {0}.'.format(
                        name))
            if not isinstance(array_in, np.ndarray):
                raise ValueError('Invalid array_in type for {0}: {1}.'.format(
                    name, type(array_in)))
            if shape_in is None:
                shape_in = array_in.shape
            elif shape_in != array_in.shape:
                raise ValueError(
                    'Incompatible array_in shape for {0}: {1}. Expected {2}.'.
                    format(name, array_in.shape, shape_in))
            dtype_in.append((name, array_in.dtype))
            if ma.isMA(array_in):
                masked_in = True
        else:
            if data_in is None:
                raise ValueError(
                    'Missing array_in for {0} (with no data_in).'.format(name))
            # Save a view of the input data column associated with this rule.
            rules[i]['array_in'] = data_in[name]

    shape_out = np.broadcast(np.empty(shape_in), z_factor).shape
    if data_out is None:
        if masked_in:
            data_out = ma.empty(shape_out, dtype=dtype_in)
            data_out.mask = False
        else:
            data_out = np.empty(shape_out, dtype=dtype_in)
    else:
        if masked_in and not ma.isMA(data_out):
            raise ValueError('data_out discards data_in mask.')
        if data_out.shape != shape_out:
            raise ValueError(
                'Invalid data_out shape: {0}. Expected {1}.'.format(
                    data_out.shape, shape_out))
        if data_out.dtype != dtype_in:
            raise ValueError(
                'Invalid data_out dtype: {0}. Expected {1}.'.format(
                    data_out.dtype, dtype_in))

    if data_in is not None:
        # Copy data_in to data_out so that any columns not listed in the
        # rules are propagated to the output.
        data_out[...] = data_in

    for rule in rules:
        name = rule.get('name')
        exponent = np.float(rule.get('exponent'))
        array_in = rule.get('array_in')
        data_out[name][:] = array_in * z_factor**exponent
        if data_in is None and ma.isMA(array_in):
            data_out[name].mask[...] = array_in.mask

    return data_out
Example #35
0
File: EYE.py Project: seapsy/DvM
def circles(x, y, s, c='b', vmin=None, vmax=None, **kwargs):
    """
	Make a scatter of circles plot of x vs y, where x and y are sequence 
	like objects of the same lengths. The size of circles are in data scale.

	Parameters
	----------
	x,y : scalar or array_like, shape (n, )
	    Input data
	s : scalar or array_like, shape (n, ) 
	    Radius of circle in data unit.
	c : color or sequence of color, optional, default : 'b'
	    `c` can be a single color format string, or a sequence of color
	    specifications of length `N`, or a sequence of `N` numbers to be
	    mapped to colors using the `cmap` and `norm` specified via kwargs.
	    Note that `c` should not be a single numeric RGB or RGBA sequence 
	    because that is indistinguishable from an array of values
	    to be colormapped. (If you insist, use `color` instead.)  
	    `c` can be a 2-D array in which the rows are RGB or RGBA, however. 
	vmin, vmax : scalar, optional, default: None
	    `vmin` and `vmax` are used in conjunction with `norm` to normalize
	    luminance data.  If either are `None`, the min and max of the
	    color array is used.
	kwargs : `~matplotlib.collections.Collection` properties
	    Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), 
	    norm, cmap, transform, etc.

	Returns
	-------
	paths : `~matplotlib.collections.PathCollection`

	Examples
	--------
	a = np.arange(11)
	circles(a, a, a*0.2, c=a, alpha=0.5, edgecolor='none')
	plt.colorbar()

	License
	--------
	This code is under [The BSD 3-Clause License]
	(http://opensource.org/licenses/BSD-3-Clause)
	"""

    try:
        basestring
    except NameError:
        basestring = str

    if np.isscalar(c):
        kwargs.setdefault('color', c)
        c = None
    if 'fc' in kwargs: kwargs.setdefault('facecolor', kwargs.pop('fc'))
    if 'ec' in kwargs: kwargs.setdefault('edgecolor', kwargs.pop('ec'))
    if 'ls' in kwargs: kwargs.setdefault('linestyle', kwargs.pop('ls'))
    if 'lw' in kwargs: kwargs.setdefault('linewidth', kwargs.pop('lw'))

    patches = [
        Circle((x_, y_), s_, fill=False)
        for x_, y_, s_ in np.broadcast(x, y, s)
    ]
    collection = PatchCollection(patches, **kwargs)
    if c is not None:
        collection.set_array(np.asarray(c))
        collection.set_clim(vmin, vmax)

    ax = plt.gca()
    ax.add_collection(collection)
    ax.autoscale_view()
    if c is not None:
        plt.sci(collection)

    return collection
Example #36
0
 def __call__(self, theta_rad, phi_rad):
     out = 1.
     return reshape_broadcast(out, np.broadcast(theta_rad, phi_rad).shape)
Example #37
0
 def shape(self):
     return np.broadcast(*self.key.tuple).shape
Example #38
0
def pmt(rate, nper, pv, fv=0, when='end'):
    """
    Compute the payment against loan principal plus interest.

    Given:
     * a present value, `pv` (e.g., an amount borrowed)
     * a future value, `fv` (e.g., 0)
     * an interest `rate` compounded once per period, of which
       there are
     * `nper` total
     * and (optional) specification of whether payment is made
       at the beginning (`when` = {'begin', 1}) or the end
       (`when` = {'end', 0}) of each period

    Return:
       the (fixed) periodic payment.

    Parameters
    ----------
    rate : array_like
        Rate of interest (per period)
    nper : array_like
        Number of compounding periods
    pv : array_like
        Present value
    fv : array_like (optional)
        Future value (default = 0)
    when : {{'begin', 1}, {'end', 0}}, {string, int}
        When payments are due ('begin' (1) or 'end' (0))

    Returns
    -------
    out : ndarray
        Payment against loan plus interest.  If all input is scalar, returns a
        scalar float.  If any input is array_like, returns payment for each
        input element. If multiple inputs are array_like, they all must have
        the same shape.

    Notes
    -----
    The payment is computed by solving the equation::

     fv +
     pv*(1 + rate)**nper +
     pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0

    or, when ``rate == 0``::

      fv + pv + pmt * nper == 0

    for ``pmt``.

    Note that computing a monthly mortgage payment is only
    one use for this function.  For example, pmt returns the
    periodic deposit one must make to achieve a specified
    future balance given an initial deposit, a fixed,
    periodically compounded interest rate, and the total
    number of periods.

    References
    ----------
    .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
       Open Document Format for Office Applications (OpenDocument)v1.2,
       Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
       Pre-Draft 12. Organization for the Advancement of Structured Information
       Standards (OASIS). Billerica, MA, USA. [ODT Document].
       Available:
       http://www.oasis-open.org/committees/documents.php
       ?wg_abbrev=office-formulaOpenDocument-formula-20090508.odt

    Examples
    --------
    What is the monthly payment needed to pay off a $200,000 loan in 15
    years at an annual interest rate of 7.5%?

    >>> np.pmt(0.075/12, 12*15, 200000)
    -1854.0247200054619

    In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained
    today, a monthly payment of $1,854.02 would be required.  Note that this
    example illustrates usage of `fv` having a default value of 0.

    """
    when = _convert_when(when)
    (rate, nper, pv, fv, when) = map(np.asarray, [rate, nper, pv, fv, when])
    temp = (1+rate)**nper
    miter = np.broadcast(rate, nper, pv, fv, when)
    zer = np.zeros(miter.shape)
    fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer)
    return -(fv + pv*temp) / fact
Example #39
0
def nper(rate, pmt, pv, fv=0, when='end'):
    """
    Compute the number of periodic payments.

    Parameters
    ----------
    rate : array_like
        Rate of interest (per period)
    pmt : array_like
        Payment
    pv : array_like
        Present value
    fv : array_like, optional
        Future value
    when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
        When payments are due ('begin' (1) or 'end' (0))

    Notes
    -----
    The number of periods ``nper`` is computed by solving the equation::

     fv + pv*(1+rate)**nper + pmt*(1+rate*when)/rate*((1+rate)**nper-1) = 0

    but if ``rate = 0`` then::

     fv + pv + pmt*nper = 0

    Examples
    --------
    If you only had $150/month to pay towards the loan, how long would it take
    to pay-off a loan of $8,000 at 7% annual interest?

    >>> print round(np.nper(0.07/12, -150, 8000), 5)
    64.07335

    So, over 64 months would be required to pay off the loan.

    The same analysis could be done with several different interest rates
    and/or payments and/or total amounts to produce an entire table.

    >>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12,
    ...                    -150   : -99     : 50    ,
    ...                    8000   : 9001    : 1000]))
    array([[[  64.07334877,   74.06368256],
            [ 108.07548412,  127.99022654]],
           [[  66.12443902,   76.87897353],
            [ 114.70165583,  137.90124779]]])

    """
    when = _convert_when(when)
    (rate, pmt, pv, fv, when) = map(np.asarray, [rate, pmt, pv, fv, when])

    use_zero_rate = False
    with np.errstate(divide="raise"):
        try:
            z = pmt*(1.0+rate*when)/rate
        except FloatingPointError:
            use_zero_rate = True

    if use_zero_rate:
        return (-fv + pv) / (pmt + 0.0)
    else:
        A = -(fv + pv)/(pmt+0.0)
        B = np.log((-fv+z) / (pv+z))/np.log(1.0+rate)
        miter = np.broadcast(rate, pmt, pv, fv, when)
        zer = np.zeros(miter.shape)
        return np.where(rate==zer, A+zer, B+zer) + 0.0
Example #40
0
def test_broadcast_shape():
    def shape_tuple(x, use_bcast=True):
        if use_bcast:
            return tuple(s if not bcast else 1
                         for s, bcast in zip(tuple(x.shape), x.broadcastable))
        else:
            return tuple(s for s in tuple(x.shape))

    x = np.array([[1], [2], [3]])
    y = np.array([4, 5, 6])
    b = np.broadcast(x, y)
    x_aet = aet.as_tensor_variable(x)
    y_aet = aet.as_tensor_variable(y)
    b_aet = broadcast_shape(x_aet, y_aet)
    assert np.array_equal([z.eval() for z in b_aet], b.shape)
    # Now, we try again using shapes as the inputs
    #
    # This case also confirms that a broadcast dimension will
    # broadcast against a non-broadcast dimension when they're
    # both symbolic (i.e. we couldn't obtain constant values).
    b_aet = broadcast_shape(
        shape_tuple(x_aet, use_bcast=False),
        shape_tuple(y_aet, use_bcast=False),
        arrays_are_shapes=True,
    )
    assert any(
        isinstance(node.op, Assert)
        for node in applys_between([x_aet, y_aet], b_aet))
    assert np.array_equal([z.eval() for z in b_aet], b.shape)
    b_aet = broadcast_shape(shape_tuple(x_aet),
                            shape_tuple(y_aet),
                            arrays_are_shapes=True)
    assert np.array_equal([z.eval() for z in b_aet], b.shape)
    # These are all constants, so there shouldn't be any asserts in the
    # resulting graph.
    assert not any(
        isinstance(node.op, Assert)
        for node in applys_between([x_aet, y_aet], b_aet))

    x = np.array([1, 2, 3])
    y = np.array([4, 5, 6])
    b = np.broadcast(x, y)
    x_aet = aet.as_tensor_variable(x)
    y_aet = aet.as_tensor_variable(y)
    b_aet = broadcast_shape(x_aet, y_aet)
    assert np.array_equal([z.eval() for z in b_aet], b.shape)
    b_aet = broadcast_shape(shape_tuple(x_aet),
                            shape_tuple(y_aet),
                            arrays_are_shapes=True)
    assert np.array_equal([z.eval() for z in b_aet], b.shape)
    # TODO: This will work when/if we use a more sophisticated `is_same_graph`
    # implementation.
    # assert not any(
    #     isinstance(node.op, Assert)
    #     for node in graph_ops([x_aet, y_aet], b_aet)
    # )

    x = np.empty((1, 2, 3))
    y = np.array(1)
    b = np.broadcast(x, y)
    x_aet = aet.as_tensor_variable(x)
    y_aet = aet.as_tensor_variable(y)
    b_aet = broadcast_shape(x_aet, y_aet)
    assert b_aet[0].value == 1
    assert np.array_equal([z.eval() for z in b_aet], b.shape)
    assert not any(
        isinstance(node.op, Assert)
        for node in applys_between([x_aet, y_aet], b_aet))
    b_aet = broadcast_shape(shape_tuple(x_aet),
                            shape_tuple(y_aet),
                            arrays_are_shapes=True)
    assert np.array_equal([z.eval() for z in b_aet], b.shape)

    x = np.empty((2, 1, 3))
    y = np.empty((2, 1, 1))
    b = np.broadcast(x, y)
    x_aet = aet.as_tensor_variable(x)
    y_aet = aet.as_tensor_variable(y)
    b_aet = broadcast_shape(x_aet, y_aet)
    assert b_aet[1].value == 1
    assert np.array_equal([z.eval() for z in b_aet], b.shape)
    # TODO: This will work when/if we use a more sophisticated `is_same_graph`
    # implementation.
    # assert not any(
    #     isinstance(node.op, Assert)
    #     for node in graph_ops([x_aet, y_aet], b_aet)
    # )
    b_aet = broadcast_shape(shape_tuple(x_aet),
                            shape_tuple(y_aet),
                            arrays_are_shapes=True)
    assert np.array_equal([z.eval() for z in b_aet], b.shape)

    x1_shp_aet = iscalar("x1")
    x2_shp_aet = iscalar("x2")
    y1_shp_aet = iscalar("y1")
    x_shapes = (1, x1_shp_aet, x2_shp_aet)
    x_aet = aet.ones(x_shapes)
    y_shapes = (y1_shp_aet, 1, x2_shp_aet)
    y_aet = aet.ones(y_shapes)
    b_aet = broadcast_shape(x_aet, y_aet)
    # TODO: This will work when/if we use a more sophisticated `is_same_graph`
    # implementation.
    # assert not any(
    #     isinstance(node.op, Assert)
    #     for node in graph_ops([x_aet, y_aet], b_aet)
    # )
    res = aet.as_tensor(b_aet).eval({
        x1_shp_aet: 10,
        x2_shp_aet: 4,
        y1_shp_aet: 2,
    })
    assert np.array_equal(res, (2, 10, 4))

    y_shapes = (y1_shp_aet, 1, y1_shp_aet)
    y_aet = aet.ones(y_shapes)
    b_aet = broadcast_shape(x_aet, y_aet)
    assert isinstance(b_aet[-1].owner.op, Assert)
Example #41
0
def roll(a, shift, axis=None):
    """
    Roll tensor elements along a given axis.

    Elements that roll beyond the last position are re-introduced at
    the first.

    Parameters
    ----------
    a : array_like
        Input tensor.
    shift : int or tuple of ints
        The number of places by which elements are shifted.  If a tuple,
        then `axis` must be a tuple of the same size, and each of the
        given axes is shifted by the corresponding number.  If an int
        while `axis` is a tuple of ints, then the same value is used for
        all given axes.
    axis : int or tuple of ints, optional
        Axis or axes along which elements are shifted.  By default, the
        tensor is flattened before shifting, after which the original
        shape is restored.

    Returns
    -------
    res : Tensor
        Output tensor, with the same shape as `a`.

    See Also
    --------
    rollaxis : Roll the specified axis backwards, until it lies in a
               given position.

    Notes
    -----

    Supports rolling over multiple dimensions simultaneously.

    Examples
    --------
    >>> import mars.tensor as mt

    >>> x = mt.arange(10)
    >>> mt.roll(x, 2).execute()
    array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])

    >>> x2 = mt.reshape(x, (2,5))
    >>> x2.execute()
    array([[0, 1, 2, 3, 4],
           [5, 6, 7, 8, 9]])
    >>> mt.roll(x2, 1).execute()
    array([[9, 0, 1, 2, 3],
           [4, 5, 6, 7, 8]])
    >>> mt.roll(x2, 1, axis=0).execute()
    array([[5, 6, 7, 8, 9],
           [0, 1, 2, 3, 4]])
    >>> mt.roll(x2, 1, axis=1).execute()
    array([[4, 0, 1, 2, 3],
           [9, 5, 6, 7, 8]])

    """
    from ..merge import concatenate

    a = astensor(a)
    raw = a

    if axis is None:
        a = ravel(a)
        axis = 0

    if not isinstance(shift, Iterable):
        shift = (shift,)
    else:
        shift = tuple(shift)
    if not isinstance(axis, Iterable):
        axis = (axis,)
    else:
        axis = tuple(axis)

    [validate_axis(a.ndim, ax) for ax in axis]
    broadcasted = np.broadcast(shift, axis)
    if broadcasted.ndim > 1:
        raise ValueError(
            "'shift' and 'axis' should be scalars or 1D sequences")

    shifts = {ax: 0 for ax in range(a.ndim)}
    for s, ax in broadcasted:
        shifts[ax] += s

    for ax, s in six.iteritems(shifts):
        if s == 0:
            continue

        s = -s
        s %= a.shape[ax]

        slc1 = (slice(None),) * ax + (slice(s, None),)
        slc2 = (slice(None),) * ax + (slice(s),)

        a = concatenate([a[slc1], a[slc2]], axis=ax)

    return a.reshape(raw.shape)
Example #42
0
    def integrate(self, low, high, weight_power=None):
        """Integrate the function from low to high.

        Optionally weight the integral by x^weight_power.

        """
        limits = self._limits.flatten()
        coefficients = self._coefficients.flatten()
        powers = self._powers.flatten()
        if weight_power is not None:
            powers += weight_power
            # Integral of each piece over its domain.
            integrals = ((coefficients / (powers + 1.)) *
                         (limits[1:]**(powers + 1.) -
                          limits[:-1]**(powers + 1.)))
        else:
            integrals = self._integrals
        
        pairs = np.broadcast(low, high)
        integral = np.empty(pairs.shape)
        for (i, (x0, x1)) in enumerate(pairs):
            # Sort the integral limits.
            x0, x1 = list(np.sort([x0,x1]))

            # Select the pieces contained entirely in the interval.
            mask = np.logical_and(x0 < limits[:-1],
                                     x1 >= limits[1:]).flatten()
            indices = np.where(mask)
            if not np.any(mask):
                integral.flat[i] = 0

                # If the interval is outside the domain.
                if x0 > limits[-1] or x1 < limits[0]:
                    integral.flat[i] = 0
                    continue

                # Find out if any piece contains the entire interval:
                containedmask = np.logical_and(x0 >= limits[:-1],
                                                  x1 < limits[1:])
                # Three possibilites:
                if np.any(containedmask):
                    # The interval is contained in a single segment.
                    index = np.where(containedmask)[0][0]
                    integral.flat[i] = ((coefficients[index] /
                                         (powers[index] + 1.)) *
                                        (x1**(powers[index] + 1.) -
                                         x0**(powers[index] + 1.)))
                    continue
                elif x1 >= limits[0] and x1 < limits[1]:
                    # x1 is in the first segment.
                    highi = 0
                    lowi = -1
                elif x0 < limits[-1] and x0 >= limits[-2]:
                    # x0 is in the last segment:
                    lowi = len(limits) - 2
                    highi = len(limits)
                else:
                    # We must be spanning the division between a pair of pieces.
                    lowi = np.max(np.where(x0 >= limits[:-1]))
                    highi = np.min(np.where(x1 < limits[1:]))
                insideintegral = 0
            else:
                # Add up the integrals of the pieces totally inside the interval.
                insideintegral = np.sum(integrals[indices])

                lowi = np.min(indices) - 1
                highi = np.max(indices) + 1

            # Check that the integral limits are inside our domain.
            if x0 < limits[0] or lowi < 0:
                lowintegral = 0.
            else:
                lowintegral = ((coefficients[lowi] / (powers[lowi] + 1.)) *
                               (limits[lowi + 1]**(powers[lowi] + 1.) -
                                x0**(powers[lowi] + 1.)))
            if x1 > limits[-1] or highi > len(coefficients) - 1:
                highintegral = 0.
            else:
                highintegral = ((coefficients[highi] / (powers[highi] + 1.)) *
                                (x1**(powers[highi] + 1.) -
                                 limits[highi]**(powers[highi] + 1.)))
            integral.flat[i] = highintegral + insideintegral + lowintegral
        return integral
Example #43
0
def fv(rate, nper, pmt, pv, when='end'):
    """
    Compute the future value.

    Given:
     * a present value, `pv`
     * an interest `rate` compounded once per period, of which
       there are
     * `nper` total
     * a (fixed) payment, `pmt`, paid either
     * at the beginning (`when` = {'begin', 1}) or the end
       (`when` = {'end', 0}) of each period

    Return:
       the value at the end of the `nper` periods

    Parameters
    ----------
    rate : scalar or array_like of shape(M, )
        Rate of interest as decimal (not per cent) per period
    nper : scalar or array_like of shape(M, )
        Number of compounding periods
    pmt : scalar or array_like of shape(M, )
        Payment
    pv : scalar or array_like of shape(M, )
        Present value
    when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
        When payments are due ('begin' (1) or 'end' (0)).
        Defaults to {'end', 0}.

    Returns
    -------
    out : ndarray
        Future values.  If all input is scalar, returns a scalar float.  If
        any input is array_like, returns future values for each input element.
        If multiple inputs are array_like, they all must have the same shape.

    Notes
    -----
    The future value is computed by solving the equation::

     fv +
     pv*(1+rate)**nper +
     pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) == 0

    or, when ``rate == 0``::

     fv + pv + pmt * nper == 0

    References
    ----------
    .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
       Open Document Format for Office Applications (OpenDocument)v1.2,
       Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
       Pre-Draft 12. Organization for the Advancement of Structured Information
       Standards (OASIS). Billerica, MA, USA. [ODT Document].
       Available:
       http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
       OpenDocument-formula-20090508.odt

    Examples
    --------
    What is the future value after 10 years of saving $100 now, with
    an additional monthly savings of $100.  Assume the interest rate is
    5% (annually) compounded monthly?

    >>> np.fv(0.05/12, 10*12, -100, -100)
    15692.928894335748

    By convention, the negative sign represents cash flow out (i.e. money not
    available today).  Thus, saving $100 a month at 5% annual interest leads
    to $15,692.93 available to spend in 10 years.

    If any input is array_like, returns an array of equal shape.  Let's
    compare different interest rates from the example above.

    >>> a = np.array((0.05, 0.06, 0.07))/12
    >>> np.fv(a, 10*12, -100, -100)
    array([ 15692.92889434,  16569.87435405,  17509.44688102])

    """
    when = _convert_when(when)
    (rate, nper, pmt, pv, when) = map(np.asarray, [rate, nper, pmt, pv, when])
    temp = (1+rate)**nper
    miter = np.broadcast(rate, nper, pmt, pv, when)
    zer = np.zeros(miter.shape)
    fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer)
    return -(pv*temp + pmt*fact)
Example #44
0
def test_broadcast_shape_basic():
    def shape_tuple(x, use_bcast=True):
        if use_bcast:
            return tuple(
                s if not bcast else 1
                for s, bcast in zip(tuple(x.shape), x.broadcastable)
            )
        else:
            return tuple(s for s in tuple(x.shape))

    x = np.array([[1], [2], [3]])
    y = np.array([4, 5, 6])
    b = np.broadcast(x, y)
    x_at = at.as_tensor_variable(x)
    y_at = at.as_tensor_variable(y)
    b_at = broadcast_shape(x_at, y_at)
    assert np.array_equal([z.eval() for z in b_at], b.shape)
    # Now, we try again using shapes as the inputs
    #
    # This case also confirms that a broadcast dimension will
    # broadcast against a non-broadcast dimension when they're
    # both symbolic (i.e. we couldn't obtain constant values).
    b_at = broadcast_shape(
        shape_tuple(x_at, use_bcast=False),
        shape_tuple(y_at, use_bcast=False),
        arrays_are_shapes=True,
    )
    assert any(
        isinstance(node.op, Assert) for node in applys_between([x_at, y_at], b_at)
    )
    assert np.array_equal([z.eval() for z in b_at], b.shape)
    b_at = broadcast_shape(shape_tuple(x_at), shape_tuple(y_at), arrays_are_shapes=True)
    assert np.array_equal([z.eval() for z in b_at], b.shape)

    x = np.array([1, 2, 3])
    y = np.array([4, 5, 6])
    b = np.broadcast(x, y)
    x_at = at.as_tensor_variable(x)
    y_at = at.as_tensor_variable(y)
    b_at = broadcast_shape(x_at, y_at)
    assert np.array_equal([z.eval() for z in b_at], b.shape)
    b_at = broadcast_shape(shape_tuple(x_at), shape_tuple(y_at), arrays_are_shapes=True)
    assert np.array_equal([z.eval() for z in b_at], b.shape)

    x = np.empty((1, 2, 3))
    y = np.array(1)
    b = np.broadcast(x, y)
    x_at = at.as_tensor_variable(x)
    y_at = at.as_tensor_variable(y)
    b_at = broadcast_shape(x_at, y_at)
    assert b_at[0].value == 1
    assert np.array_equal([z.eval() for z in b_at], b.shape)
    b_at = broadcast_shape(shape_tuple(x_at), shape_tuple(y_at), arrays_are_shapes=True)
    assert np.array_equal([z.eval() for z in b_at], b.shape)

    x = np.empty((2, 1, 3))
    y = np.empty((2, 1, 1))
    b = np.broadcast(x, y)
    x_at = at.as_tensor_variable(x)
    y_at = at.as_tensor_variable(y)
    b_at = broadcast_shape(x_at, y_at)
    assert b_at[1].value == 1
    assert np.array_equal([z.eval() for z in b_at], b.shape)
    b_at = broadcast_shape(shape_tuple(x_at), shape_tuple(y_at), arrays_are_shapes=True)
    assert np.array_equal([z.eval() for z in b_at], b.shape)

    x1_shp_at = iscalar("x1")
    x2_shp_at = iscalar("x2")
    y1_shp_at = iscalar("y1")
    x_shapes = (1, x1_shp_at, x2_shp_at)
    x_at = at.ones(x_shapes)
    y_shapes = (y1_shp_at, 1, x2_shp_at)
    y_at = at.ones(y_shapes)
    b_at = broadcast_shape(x_at, y_at)
    res = at.as_tensor(b_at).eval(
        {
            x1_shp_at: 10,
            x2_shp_at: 4,
            y1_shp_at: 2,
        }
    )
    assert np.array_equal(res, (2, 10, 4))

    y_shapes = (y1_shp_at, 1, y1_shp_at)
    y_at = at.ones(y_shapes)
    b_at = broadcast_shape(x_at, y_at)
    assert isinstance(b_at[-1].owner.op, Assert)
Example #45
0
def gammaincc(a, x):
    r'''Regularized upper incomplete gamma function.

    This implementation of `gammaincc` allows :math:`a` real and :math:`x`
    nonnegative.

    Parameters
    ----------
    a : array_like
        Real parameter.

    x : array_like
        Nonnegative argument.

    Returns
    -------
    scalar or ndarray
        Values of the upper incomplete gamma function.

    Notes
    -----
    The function value is computed via a recurrence from the value of
    `scipy.special.gammaincc` for arguments :math:`a-n, x` where :math:`n` is
    the smallest integer such that :math:`a-n \ge 0`.

    See also
    --------
    scipy.special.gammaincc : Computes the start of the recurrence.

    Examples
    --------
    This implementation of `gammaincc` supports positive and negative values
    of `a`.

    >>> from skypy.utils.special import gammaincc
    >>> gammaincc([-1.5, -0.5, 0.5, 1.5], 1.2)
    array([ 0.03084582, -0.03378949,  0.12133525,  0.49363462])

    '''
    if np.broadcast(a, x).ndim == 0:
        return _gammaincc(a, x)
    a, x = np.broadcast_arrays(a, x)
    if np.any(x < 0):
        raise ValueError('negative x in gammaincc')

    # nonpositive a need special treatment
    i = a <= 0

    # find integer n such that a + n >= 0
    n = np.where(i, np.floor(a), 0)

    # compute gammaincc for a-n and x as usual
    g = np.where(a == n, 0, sc.gammaincc(a-n, x))

    # deal with nonpositive a
    # the number n keeps track of iterations still to do
    if np.any(i):
        # all x = inf are done
        n[i & np.isinf(x)] = 0

        # do x = 0 for nonpositive a; depends on Gamma(a)
        i = i & (x == 0)
        s = sc.gammasgn(a[i])
        g[i] = np.where(s == 0, 0, np.copysign(np.inf, s))
        n[i] = 0

        # these are still to do
        i = n < 0

        # recurrence
        f = np.empty_like(g)
        f[i] = np.exp(sc.xlogy(a[i]-n[i], x[i])-x[i]-sc.gammaln(a[i]-n[i]+1))
        while np.any(i):
            f[i] *= (a[i]-n[i])/x[i]
            g[i] -= f[i]
            n[i] += 1
            i[i] = n[i] < 0
    return g
Example #46
0
def _spectral_helper(x,
                     y,
                     fs=1.0,
                     window='hanning',
                     nperseg=256,
                     noverlap=None,
                     nfft=None,
                     detrend='constant',
                     return_onesided=True,
                     scaling='spectrum',
                     axis=-1,
                     mode='psd'):
    """
    Calculate various forms of windowed FFTs for PSD, CSD, etc.

    This is a helper function that implements the commonality between the
    psd, csd, and spectrogram functions. It is not designed to be called
    externally. The windows are not averaged over; the result from each window
    is returned.

    Parameters
    ---------
    x : array_like
        Array or sequence containing the data to be analyzed.
    y : array_like
        Array or sequence containing the data to be analyzed. If this is
        the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
        the extra computations are spared.
    fs : float, optional
        Sampling frequency of the time series. Defaults to 1.0.
    window : str or tuple or array_like, optional
        Desired window to use. See `get_window` for a list of windows and
        required parameters. If `window` is array_like it will be used
        directly as the window and its length will be used for nperseg.
        Defaults to 'hanning'.
    nperseg : int, optional
        Length of each segment.  Defaults to 256.
    noverlap : int, optional
        Number of points to overlap between segments. If None,
        ``noverlap = nperseg // 2``.  Defaults to None.
    nfft : int, optional
        Length of the FFT used, if a zero padded FFT is desired.  If None,
        the FFT length is `nperseg`. Defaults to None.
    detrend : str or function or False, optional
        Specifies how to detrend each segment. If `detrend` is a string,
        it is passed as the ``type`` argument to `detrend`.  If it is a
        function, it takes a segment and returns a detrended segment.
        If `detrend` is False, no detrending is done.  Defaults to 'constant'.
    return_onesided : bool, optional
        If True, return a one-sided spectrum for real data. If False return
        a two-sided spectrum. Note that for complex data, a two-sided
        spectrum is always returned.
    scaling : { 'density', 'spectrum' }, optional
        Selects between computing the cross spectral density ('density')
        where `Pxy` has units of V**2/Hz and computing the cross spectrum
        ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
        measured in V and fs is measured in Hz.  Defaults to 'density'
    axis : int, optional
        Axis along which the periodogram is computed; the default is over
        the last axis (i.e. ``axis=-1``).
    mode : str, optional
        Defines what kind of return values are expected. Options are ['psd',
        'complex', 'magnitude', 'angle', 'phase'].

    Returns
    -------
    freqs : ndarray
        Array of sample frequencies.
    t : ndarray
        Array of times corresponding to each data segment
    result : ndarray
        Array of output data, contents dependant on *mode* kwarg.

    References
    ----------
    .. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
        http://stackoverflow.com/a/6811241
    .. [2] Stack Overflow, "Using strides for an efficient moving average
        filter", http://stackoverflow.com/a/4947453

    Notes
    -----
    Adapted from matplotlib.mlab

    .. versionadded:: 0.16.0
    """
    if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
        raise ValueError("Unknown value for mode %s, must be one of: "
                         "'default', 'psd', 'complex', "
                         "'magnitude', 'angle', 'phase'" % mode)

    # If x and y are the same object we can save ourselves some computation.
    same_data = y is x

    if not same_data and mode != 'psd':
        raise ValueError("x and y must be equal if mode is not 'psd'")

    axis = int(axis)

    # Ensure we have np.arrays, get outdtype
    x = np.asarray(x)
    if not same_data:
        y = np.asarray(y)
        outdtype = np.result_type(x, y, np.complex64)
    else:
        outdtype = np.result_type(x, np.complex64)

    if not same_data:
        # Check if we can broadcast the outer axes together
        xouter = list(x.shape)
        youter = list(y.shape)
        xouter.pop(axis)
        youter.pop(axis)
        try:
            outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
        except ValueError:
            raise ValueError('x and y cannot be broadcast together.')

    if same_data:
        if x.size == 0:
            return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
    else:
        if x.size == 0 or y.size == 0:
            outshape = outershape + (min([x.shape[axis], y.shape[axis]]), )
            emptyout = np.rollaxis(np.empty(outshape), -1, axis)
            return emptyout, emptyout, emptyout

    if x.ndim > 1:
        if axis != -1:
            x = np.rollaxis(x, axis, len(x.shape))
            if not same_data and y.ndim > 1:
                y = np.rollaxis(y, axis, len(y.shape))

    # Check if x and y are the same length, zero-pad if neccesary
    if not same_data:
        if x.shape[-1] != y.shape[-1]:
            if x.shape[-1] < y.shape[-1]:
                pad_shape = list(x.shape)
                pad_shape[-1] = y.shape[-1] - x.shape[-1]
                x = np.concatenate((x, np.zeros(pad_shape)), -1)
            else:
                pad_shape = list(y.shape)
                pad_shape[-1] = x.shape[-1] - y.shape[-1]
                y = np.concatenate((y, np.zeros(pad_shape)), -1)

    # X and Y are same length now, can test nperseg with either
    if x.shape[-1] < nperseg:
        warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
                      'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
        nperseg = x.shape[-1]

    nperseg = int(nperseg)
    if nperseg < 1:
        raise ValueError('nperseg must be a positive integer')

    if nfft is None:
        nfft = nperseg
    elif nfft < nperseg:
        raise ValueError('nfft must be greater than or equal to nperseg.')
    else:
        nfft = int(nfft)

    if noverlap is None:
        noverlap = nperseg // 2
    elif noverlap >= nperseg:
        raise ValueError('noverlap must be less than nperseg.')
    else:
        noverlap = int(noverlap)

    # Handle detrending and window functions
    if not detrend:

        def detrend_func(d):
            return d
    elif not hasattr(detrend, '__call__'):

        def detrend_func(d):
            return signaltools.detrend(d, type=detrend, axis=-1)
    elif axis != -1:
        # Wrap this function so that it receives a shape that it could
        # reasonably expect to receive.
        def detrend_func(d):
            d = np.rollaxis(d, -1, axis)
            d = detrend(d)
            return np.rollaxis(d, axis, len(d.shape))
    else:
        detrend_func = detrend

    if isinstance(window, string_types) or type(window) is tuple:
        win = get_window(window, nperseg)
    else:
        win = np.asarray(window)
        if len(win.shape) != 1:
            raise ValueError('window must be 1-D')
        if win.shape[0] != nperseg:
            raise ValueError('window must have length of nperseg')

    if np.result_type(win, np.complex64) != outdtype:
        win = win.astype(outdtype)

    if mode == 'psd':
        if scaling == 'density':
            scale = 1.0 / (fs * (win * win).sum())
        elif scaling == 'spectrum':
            scale = 1.0 / win.sum()**2
        else:
            raise ValueError('Unknown scaling: %r' % scaling)
    else:
        scale = 1

    if return_onesided is True:
        if np.iscomplexobj(x):
            sides = 'twosided'
        else:
            sides = 'onesided'
            if not same_data:
                if np.iscomplexobj(y):
                    sides = 'twosided'
    else:
        sides = 'twosided'

    if sides == 'twosided':
        num_freqs = nfft
    elif sides == 'onesided':
        if nfft % 2:
            num_freqs = (nfft + 1) // 2
        else:
            num_freqs = nfft // 2 + 1

    # Perform the windowed FFTs
    result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
    result = result[..., :num_freqs]
    freqs = fftpack.fftfreq(nfft, 1 / fs)[:num_freqs]

    if not same_data:
        # All the same operations on the y data
        result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
        result_y = result_y[..., :num_freqs]
        result = np.conjugate(result) * result_y
    elif mode == 'psd':
        result = np.conjugate(result) * result
    elif mode == 'magnitude':
        result = np.absolute(result)
    elif mode == 'angle' or mode == 'phase':
        result = np.angle(result)
    elif mode == 'complex':
        pass

    result *= scale
    if sides == 'onesided':
        if nfft % 2:
            result[..., 1:] *= 2
        else:
            # Last point is unpaired Nyquist freq point, don't double
            result[..., 1:-1] *= 2

    t = np.arange(nperseg / 2, x.shape[-1] - nperseg / 2 + 1,
                  nperseg - noverlap) / float(fs)

    if sides != 'twosided' and not nfft % 2:
        # get the last value correctly, it is negative otherwise
        freqs[-1] *= -1

    # we unwrap the phase here to handle the onesided vs. twosided case
    if mode == 'phase':
        result = np.unwrap(result, axis=-1)

    result = result.astype(outdtype)

    # All imaginary parts are zero anyways
    if same_data and mode != 'complex':
        result = result.real

    # Output is going to have new last axis for window index
    if axis != -1:
        # Specify as positive axis index
        if axis < 0:
            axis = len(result.shape) - 1 - axis

        # Roll frequency axis back to axis where the data came from
        result = np.rollaxis(result, -1, axis)
    else:
        # Make sure window/time index is last axis
        result = np.rollaxis(result, -1, -2)

    return freqs, t, result
Example #47
0
def _pack_vector(*args):
    shape = np.broadcast(*args).shape
    out = np.empty(shape + (len(args),))
    for i, arg in enumerate(args):
        out[..., i] = arg
    return out
Example #48
0
                       arr.dtype)
    kron[(slice(None), ) * axis + (pos, )] = arr
    return kron


class Broadcast1D:
    def __init__(self, arg):
        self.arg = numpy.asarray(arg)
        self.shape = self.arg.shape
        self.size = self.arg.size

    def __iter__(self):
        return ((item, ) for item in self.arg.flat)


broadcast = lambda *args: numpy.broadcast(*args) if len(
    args) > 1 else Broadcast1D(args[0])


def det_exact(A):
    # for some reason, numpy.linalg.det suffers from rounding errors
    A = numpy.asarray(A)
    assert A.ndim == 2 and A.shape[0] == A.shape[1]
    if len(A) == 0:
        det = 1.
    elif len(A) == 1:
        det = A[0, 0]
    elif len(A) == 2:
        ((a, b), (c, d)) = A
        det = a * d - b * c
    elif len(A) == 3:
Example #49
0
 def output(self, value, mask):
     output = self._base.output
     result = [output(x, m) for x, m in np.broadcast(value, mask)]
     return ' '.join(result)
Example #50
0
def synth_values(coeffs, radius, theta, phi, \
                 nmax=None, nmin=None, grid=None):
    """
    Based on chaosmagpy from Clemens Kloss (DTU Space, Copenhagen)
    Computes radial, colatitude and azimuthal field components from the
    magnetic potential field in terms of spherical harmonic coefficients.
    A reduced version of the DTU synth_values chaosmagpy code

    Parameters
    ----------

    coeffs : ndarray, shape (..., N)
        Coefficients of the spherical harmonic expansion. The last dimension is
        equal to the number of coefficients, `N` at the grid points.
    radius : float or ndarray, shape (...)
        Array containing the radius in kilometers.
    theta : float or ndarray, shape (...)
        Array containing the colatitude in degrees
        :math:`[0^\\circ,180^\\circ]`.
    phi : float or ndarray, shape (...)
        Array containing the longitude in degrees.
    nmax : int, positive, optional
        Maximum degree up to which expansion is to be used (default is given by
        the ``coeffs``, but can also be smaller if specified
        :math:`N` :math:`\\geq` ``nmax`` (``nmax`` + 2)
    nmin : int, positive, optional
        Minimum degree from which expansion is to be used (defaults to 1).
        Note that it will just skip the degrees smaller than ``nmin``, the
        whole sequence of coefficients 1 through ``nmax`` must still be given
        in ``coeffs``.
        Magnetic field source (default is an internal source).
    grid : bool, optional
        If ``True``, field components are computed on a regular grid. Arrays
        ``theta`` and ``phi`` must have one dimension less than the output grid
        since the grid will be created as their outer product (defaults to
        ``False``).

    Returns
    -------
    B_radius, B_theta, B_phi : ndarray, shape (...)
        Radial, colatitude and azimuthal field components.

    Notes
    -----
    The function can work with different grid shapes, but the inputs have to
    satisfy NumPy's `broadcasting rules \\
    <https://docs.scipy.org/doc/numpy-1.15.0/user/basics.broadcasting.html>`_
    (``grid=False``, default). This also applies to the dimension of the
    coefficients ``coeffs`` excluding the last dimension.

    The optional parameter ``grid`` is for convenience. If set to ``True``,
    a singleton dimension is appended (prepended) to ``theta`` (``phi``)
    for broadcasting to a regular grid. The other inputs ``radius`` and
    ``coeffs`` must then be broadcastable as before but now with the resulting
    regular grid.

    Examples
    --------
    The most straight forward computation uses a fully specified grid. For
    example, compute the magnetic field at :math:`N=50` grid points on the
    surface.

    .. code-block:: python

      import igrf_utils as iut
      import numpy as np

      N = 13
      coeffs = np.ones((3,))  # degree 1 coefficients for all points
      radius = 6371.2 * np.ones((N,))  # radius of 50 points in km
      phi = np.linspace(-180., 180., num=N)  # azimuth of 50 points in deg.
      theta = np.linspace(0., 180., num=N)  # colatitude of 50 points in deg.

      B = iut.synth_values(coeffs, radius, theta, phi)
      print([B[num].shape for num in range(3)])  # (N,) shaped output

    Instead of `N` points, compute the field on a regular
    :math:`N\\times N`-grid in azimuth and colatitude (slow).

    .. code-block:: python

      radius_grid = 6371.2 * np.ones((N, N))
      phi_grid, theta_grid = np.meshgrid(phi, theta)  # regular NxN grid

      B = iut.synth_values(coeffs, radius_grid, theta_grid, phi_grid)
      print([B[num].shape for num in range(3)])  # NxN output

    But this is slow since some computations on the grid are executed several
    times. The preferred method is to use NumPy's broadcasting rules (fast).

    .. code-block:: python

      radius_grid = 6371.2  # float, () or (1,)-shaped array broadcasted to NxN
      phi_grid = phi[None, ...]  # prepend singleton: 1xN
      theta_grid = theta[..., None]  # append singleton: Nx1

      B = iut.synth_values(coeffs, radius_grid, theta_grid, phi_grid)
      print([B[num].shape for num in range(3)])  # NxN output

    For convenience, you can do the same by using ``grid=True`` option.

    .. code-block:: python

      B = iut.synth_values(coeffs, radius_grid, theta, phi, grid=True)
      print([B[num].shape for num in range(3)])  # NxN output

    Remember that ``grid=False`` (or left out completely) will result in
    (N,)-shaped outputs as in the first example.

    """

    # ensure ndarray inputs
    coeffs = np.array(coeffs, dtype=np.float)
    radius = np.array(radius,
                      dtype=np.float) / 6371.2  # Earth's average radius
    theta = np.array(theta, dtype=np.float)
    phi = np.array(phi, dtype=np.float)

    if np.amin(theta) <= 0.0 or np.amax(theta) >= 180.0:
        if np.amin(theta) == 0.0 or np.amax(theta) == 180.0:
            warnings.warn('The geographic poles are included.')
        else:
            raise ValueError('Colatitude outside bounds [0, 180].')

    if nmin is None:
        nmin = 1
    else:
        assert nmin > 0, 'Only positive nmin allowed.'

    # handle optional argument: nmax
    nmax_coeffs = int(np.sqrt(coeffs.shape[-1] + 1) - 1)  # degree
    if nmax is None:
        nmax = nmax_coeffs
    else:
        assert nmax > 0, 'Only positive nmax allowed.'

    if nmax > nmax_coeffs:
        warnings.warn('Supplied nmax = {0} and nmin = {1} is '
                      'incompatible with number of model coefficients. '
                      'Using nmax = {2} instead.'.format(
                          nmax, nmin, nmax_coeffs))
        nmax = nmax_coeffs

    if nmax < nmin:
        raise ValueError(f'Nothing to compute: nmax < nmin ({nmax} < {nmin}.)')

    # handle grid option
    grid = False if grid is None else grid

    # manually broadcast input grid on surface
    if grid:
        theta = theta[..., None]  # first dimension is theta
        phi = phi[None, ...]  # second dimension is phi

    # get shape of broadcasted result
    try:
        b = np.broadcast(radius, theta, phi,
                         np.broadcast_to(0, coeffs.shape[:-1]))
    except ValueError:
        print('Cannot broadcast grid shapes (excl. last dimension of coeffs):')
        print(f'radius: {radius.shape}')
        print(f'theta:  {theta.shape}')
        print(f'phi:    {phi.shape}')
        print(f'coeffs: {coeffs.shape[:-1]}')
        raise

    grid_shape = b.shape

    # initialize radial dependence given the source
    r_n = radius**(-(nmin + 2))

    # compute associated Legendre polynomials as (n, m, theta-points)-array
    Pnm = legendre_poly(nmax, theta)

    # save sinth for fast access
    sinth = Pnm[1, 1]

    # calculate cos(m*phi) and sin(m*phi) as (m, phi-points)-array
    phi = radians(phi)
    cmp = np.cos(np.multiply.outer(np.arange(nmax + 1), phi))
    smp = np.sin(np.multiply.outer(np.arange(nmax + 1), phi))

    # allocate arrays in memory
    B_radius = np.zeros(grid_shape)
    B_theta = np.zeros(grid_shape)
    B_phi = np.zeros(grid_shape)

    num = nmin**2 - 1
    for n in range(nmin, nmax + 1):
        B_radius += (n + 1) * Pnm[n, 0] * r_n * coeffs[..., num]

        B_theta += -Pnm[0, n + 1] * r_n * coeffs[..., num]

        num += 1

        for m in range(1, n + 1):
            B_radius += (
                (n + 1) * Pnm[n, m] * r_n *
                (coeffs[..., num] * cmp[m] + coeffs[..., num + 1] * smp[m]))

            B_theta += (
                -Pnm[m, n + 1] * r_n *
                (coeffs[..., num] * cmp[m] + coeffs[..., num + 1] * smp[m]))

            with np.errstate(divide='ignore', invalid='ignore'):
                # handle poles using L'Hopital's rule
                div_Pnm = np.where(theta == 0., Pnm[m, n + 1],
                                   Pnm[n, m] / sinth)
                div_Pnm = np.where(theta == degrees(pi), -Pnm[m, n + 1],
                                   div_Pnm)

            B_phi += (
                m * div_Pnm * r_n *
                (coeffs[..., num] * smp[m] - coeffs[..., num + 1] * cmp[m]))

            num += 2

        r_n = r_n / radius  # equivalent to r_n = radius**(-(n+2))

    return B_radius, B_theta, B_phi
Example #51
0
 def test_inverse_shape(self):
     shape = self.scale.shape
     self.assertEqual(
         tuple(self._t.forward_shape(shape)),
         np.broadcast(np.random.random(shape), self.loc, self.scale).shape)
Example #52
0
    def __call__(self,
                 observer,
                 targets,
                 times=None,
                 time_range=None,
                 time_grid_resolution=0.5 * u.hour,
                 grid_times_targets=False):
        """
        Compute the constraint for this class

        Parameters
        ----------
        observer : `~astroplan.Observer`
            the observation location from which to apply the constraints
        targets : sequence of `~astroplan.Target`
            The targets on which to apply the constraints.
        times : `~astropy.time.Time`
            The times to compute the constraint.
            WHAT HAPPENS WHEN BOTH TIMES AND TIME_RANGE ARE SET?
        time_range : `~astropy.time.Time` (length = 2)
            Lower and upper bounds on time sequence.
        time_grid_resolution : `~astropy.units.quantity`
            Time-grid spacing
        grid_times_targets : bool
            if True, grids the constraint result with targets along the first
            index and times along the second. Otherwise, we rely on broadcasting
            the shapes together using standard numpy rules.
        Returns
        -------
        constraint_result : 1D or 2D array of float or bool
            The constraints. If 2D with targets along the first index and times along
            the second.
        """

        if times is None and time_range is not None:
            times = time_grid_from_range(time_range,
                                         time_resolution=time_grid_resolution)

        if grid_times_targets:
            targets = get_skycoord(targets)
            # TODO: these broadcasting operations are relatively slow
            # but there is potential for huge speedup if the end user
            # disables gridding and re-shapes the coords themselves
            # prior to evaluating multiple constraints.
            if targets.isscalar:
                # ensure we have a (1, 1) shape coord
                targets = SkyCoord(np.tile(targets, 1))[:, np.newaxis]
            else:
                targets = targets[..., np.newaxis]
        times, targets = observer._preprocess_inputs(times,
                                                     targets,
                                                     grid_times_targets=False)
        result = self.compute_constraint(times, observer, targets)

        # make sure the output has the same shape as would result from
        # broadcasting times and targets against each other
        if targets is not None:
            # broadcasting times v targets is slow due to
            # complex nature of these objects. We make
            # to simple numpy arrays of the same shape and
            # broadcast these to find the correct shape
            shp1, shp2 = times.shape, targets.shape
            x = np.array([1])
            a = as_strided(x, shape=shp1, strides=[0] * len(shp1))
            b = as_strided(x, shape=shp2, strides=[0] * len(shp2))
            output_shape = np.broadcast(a, b).shape
            if output_shape != np.array(result).shape:
                result = np.broadcast_to(result, output_shape)

        return result
Example #53
0
def ellipses(x, y, w, h=None, rot=0.0, c='b', vmin=None, vmax=None, **kwargs):
    """
    Make a scatter plot of ellipses. 
    Parameters
    ----------
    x, y : scalar or array_like, shape (n, )
        Center of ellipses.
    w, h : scalar or array_like, shape (n, )
        Total length (diameter) of horizontal/vertical axis.
        `h` is set to be equal to `w` by default, ie. circle.
    rot : scalar or array_like, shape (n, )
        Rotation in degrees (anti-clockwise).
    c : color or sequence of color, optional, default : 'b'
        `c` can be a single color format string, or a sequence of color
        specifications of length `N`, or a sequence of `N` numbers to be
        mapped to colors using the `cmap` and `norm` specified via kwargs.
        Note that `c` should not be a single numeric RGB or RGBA sequence
        because that is indistinguishable from an array of values
        to be colormapped. (If you insist, use `color` instead.)
        `c` can be a 2-D array in which the rows are RGB or RGBA, however.
    vmin, vmax : scalar, optional, default: None
        `vmin` and `vmax` are used in conjunction with `norm` to normalize
        luminance data.  If either are `None`, the min and max of the
        color array is used.
    kwargs : `~matplotlib.collections.Collection` properties
        Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
        norm, cmap, transform, etc.
        
    Returns
    -------
    paths : `~matplotlib.collections.PathCollection`
    
    Examples
    --------
    a = np.arange(11)
    ellipses(a, a, w=4, h=a, rot=a*30, c=a, alpha=0.5, ec='none')
    plt.colorbar()
    
    License
    --------
    This code is under [The BSD 3-Clause License]
    (http://opensource.org/licenses/BSD-3-Clause)
    """
    if np.isscalar(c):
        kwargs.setdefault('color', c)
        c = None
    if 'fc' in kwargs:
        kwargs.setdefault('facecolor', kwargs.pop('fc'))
    if 'ec' in kwargs:
        kwargs.setdefault('edgecolor', kwargs.pop('ec'))
    if 'ls' in kwargs:
        kwargs.setdefault('linestyle', kwargs.pop('ls'))
    if 'lw' in kwargs:
        kwargs.setdefault('linewidth', kwargs.pop('lw'))
    # You can set `facecolor` with an array for each patch,
    # while you can only set `facecolors` with a value for all.

    if h is None:
        h = w
    patches = [
        Ellipse((x_, y_), w_, h_, rot_)
        for x_, y_, w_, h_, rot_ in np.broadcast(x, y, w, h, rot)
    ]
    collection = PatchCollection(patches, **kwargs)
    if c is not None:
        collection.set_array(np.asarray(c))
        collection.set_clim(vmin, vmax)

    ax = plt.gca()
    ax.add_collection(collection)
    ax.autoscale_view()
    plt.draw_if_interactive()
    if c is not None:
        plt.sci(collection)
    return collection
Example #54
0
print('x.shape 和 y.shape:')
print(x.shape, y.shape)
'''
numpy.broadcast
numpy.broadcast 用于模仿广播的对象,它返回一个对象,该对象封装了将一个数组广播到另一个数组的结果。
该函数使用两个数组作为输入参数
'''
print(
    "---------------------------numpy.broadcast---------------------------------"
)

x = np.array([[1], [2], [3]])
y = np.array([4, 5, 6])

# 对 y 广播 x
b = np.broadcast(x, y)
# 它拥有 iterator 属性,基于自身组件的迭代器元组

print('对 y 广播 x:')
r, c = b.iters

# Python3.x 为 next(context) ,Python2.x 为 context.next()
print(next(r), next(c))
print(next(r), next(c))
print('\n')
# shape 属性返回广播对象的形状

print('广播对象的形状:')
print(b.shape)
print('\n')
# 手动使用 broadcast 将 x 与 y 相加
Example #55
0
def prism_gravity(
    coordinates, prisms, density, field, dtype="float64", disable_checks=False
):
    """
    Compute gravitational fields of right-rectangular prisms in Cartesian coordinates.

    The gravitational fields are computed through the analytical solutions given by
    [Nagy2000]_ and [Nagy2002]_, which are valid on the entire domain. This means that
    the computation can be done at any point, either outside or inside the prism.

    This implementation makes use of the modified arctangent function proposed by
    [Fukushima2019]_ (eq. 12) so that the potential field to satisfies Poisson's
    equation in the entire domain. Moreover, the logarithm function was also modified in
    order to solve the singularities that the analytical solution has on some points
    (see [Nagy2000]_).

    .. warning::
        The **z direction points upwards**, i.e. positive and negative values of
        ``upward`` represent points above and below the surface, respectively. But
        remember that the ``g_z`` field returns the downward component of the gravity
        acceleration so that positive density contrasts produce positive anomalies.

    Parameters
    ----------
    coordinates : list or 1d-array
        List or array containing ``easting``, ``northing`` and ``upward`` of the
        computation points defined on a Cartesian coordinate system. All coordinates
        should be in meters.
    prisms : list, 1d-array, or 2d-array
        List or array containing the coordinates of the prism(s) in the following order:
        west, east, south, north, bottom, top in a Cartesian coordinate system. All
        coordinates should be in meters. Coordinates for more than one prism can be
        provided. In this case, *prisms* should be a list of lists or 2d-array (with one
        prism per line).
    density : list or array
        List or array containing the density of each prism in kg/m^3.
    field : str
        Gravitational field that wants to be computed.
        The available fields are:

        - Gravitational potential: ``potential``
        - Downward acceleration: ``g_z``

    dtype : data-type (optional)
        Data type assigned to the resulting gravitational field. Default to
        ``np.float64``.
    disable_checks : bool (optional)
        Flag that controls whether to perform a sanity check on the model. Should be set
        to ``True`` only when it is certain that the input model is valid and it does not
        need to be checked. Default to ``False``.

    Returns
    -------
    result : array
        Gravitational field generated by the prisms on the computation points.

    Examples
    --------

    Compute a single a prism located beneath the surface with density of 2670 kg/m³:

    >>> prism = [-34, 5, -18, 14, -345, -146]
    >>> density = 2670
    >>> # Define a computation point above its center, at 30 meters above the surface
    >>> coordinates = (130, 75, 30)
    >>> # Define three computation points along the easting axe at 30m above the surface
    >>> coordinates = ([-40, 0, 40], [0, 0, 0], [30, 30, 30])
    >>> # Compute the downward component of the gravity acceleration that the prism
    >>> # generates on the computation points
    >>> gz = prism_gravity(coordinates, prism, density, field="g_z")
    >>> print("({:.5f}, {:.5f}, {:.5f})".format(*gz))
    (0.06551, 0.06628, 0.06173)

    Define two prisms with positive and negative density contrasts

    >>> prisms = [[-134, -5, -45, 45, -200, -50], [5, 134, -45, 45, -180, -30]]
    >>> densities = [-300, 300]
    >>> # Compute the g_z that the prisms generate on the computation points
    >>> gz = prism_gravity(coordinates, prisms, densities, field="g_z")
    >>> print("({:.5f}, {:.5f}, {:.5f})".format(*gz))
    (-0.05379, 0.02908, 0.11235)

    """
    kernels = {"potential": kernel_potential, "g_z": kernel_g_z}
    if field not in kernels:
        raise ValueError("Gravity field {} not recognized".format(field))
    # Figure out the shape and size of the output array
    cast = np.broadcast(*coordinates[:3])
    result = np.zeros(cast.size, dtype=dtype)
    # Convert coordinates, prisms and density to arrays with proper shape
    coordinates = tuple(np.atleast_1d(i).ravel() for i in coordinates[:3])
    prisms = np.atleast_2d(prisms)
    density = np.atleast_1d(density).ravel()
    # Sanity checks
    if not disable_checks:
        if density.size != prisms.shape[0]:
            raise ValueError(
                "Number of elements in density ({}) ".format(density.size)
                + "mismatch the number of prisms ({})".format(prisms.shape[0])
            )
        _check_prisms(prisms)
    # Compute gravitational field
    jit_prism_gravity(coordinates, prisms, density, kernels[field], result)
    result *= GRAVITATIONAL_CONST
    # Convert to more convenient units
    if field == "g_z":
        result *= 1e5  # SI to mGal
    return result.reshape(cast.shape)
def circles(x, y, s, c='b', vmin=None, vmax=None, **kwargs):
    """
    See https://gist.github.com/syrte/592a062c562cd2a98a83 

    Make a scatter plot of circles. 
    Similar to plt.scatter, but the size of circles are in data scale.
    Parameters
    ----------
    x, y : scalar or array_like, shape (n, )
        Input data
    s : scalar or array_like, shape (n, ) 
        Radius of circles.
    c : color or sequence of color, optional, default : 'b'
        `c` can be a single color format string, or a sequence of color
        specifications of length `N`, or a sequence of `N` numbers to be
        mapped to colors using the `cmap` and `norm` specified via kwargs.
        Note that `c` should not be a single numeric RGB or RGBA sequence 
        because that is indistinguishable from an array of values
        to be colormapped. (If you insist, use `color` instead.)  
        `c` can be a 2-D array in which the rows are RGB or RGBA, however. 
    vmin, vmax : scalar, optional, default: None
        `vmin` and `vmax` are used in conjunction with `norm` to normalize
        luminance data.  If either are `None`, the min and max of the
        color array is used.
    kwargs : `~matplotlib.collections.Collection` properties
        Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), 
        norm, cmap, transform, etc.
    Returns
    -------
    paths : `~matplotlib.collections.PathCollection`
    Examples
    --------
    a = np.arange(11)
    circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
    plt.colorbar()
    License
    --------
    This code is under [The BSD 3-Clause License]
    (http://opensource.org/licenses/BSD-3-Clause)
    """

    if np.isscalar(c):
        kwargs.setdefault('color', c)
        c = None

    if 'fc' in kwargs:
        kwargs.setdefault('facecolor', kwargs.pop('fc'))
    if 'ec' in kwargs:
        kwargs.setdefault('edgecolor', kwargs.pop('ec'))
    if 'ls' in kwargs:
        kwargs.setdefault('linestyle', kwargs.pop('ls'))
    if 'lw' in kwargs:
        kwargs.setdefault('linewidth', kwargs.pop('lw'))
    # You can set `facecolor` with an array for each patch,
    # while you can only set `facecolors` with a value for all.

    zipped = np.broadcast(x, y, s)
    patches = [Circle((x_, y_), s_) for x_, y_, s_ in zipped]
    collection = PatchCollection(patches, **kwargs)
    if c is not None:
        c = np.broadcast_to(c, zipped.shape).ravel()
        collection.set_array(c)
        collection.set_clim(vmin, vmax)

    ax = plt.gca()
    ax.add_collection(collection)
    ax.autoscale_view()
    plt.draw_if_interactive()
    if c is not None:
        plt.sci(collection)
    return collection
Example #57
0
 def evaluatePrior(self, eta1, eta2, lnR):
     b = numpy.broadcast(eta1, eta2, lnR)
     p = numpy.zeros(b.shape, dtype=lsst.meas.modelfit.Scalar)
     for i, (eta1i, eta2i, lnRi) in enumerate(b):
         p.flat[i] = self.prior.evaluate(numpy.array([eta1i, eta2i, lnRi]), self.amplitudes)
     return p
Example #58
0
def search_frequencies(t,
                       y,
                       dy,
                       LS_func=lomb_scargle,
                       LS_kwargs=None,
                       initial_guess=25,
                       limit_fractions=[0.04, 0.3, 0.9, 0.99],
                       n_eval=10000,
                       n_retry=5,
                       n_save=50):
    """Utility Routine to find the best frequencies

    To find the best frequency with a Lomb-Scargle periodogram requires
    searching a large range of frequencies at a very fine resolution.
    This is an iterative routine that searches progressively finer
    grids to narrow-in on the best result.

    Parameters
    ----------
    t: array_like
        observed times
    y: array_like
        observed fluxes or magnitudes
    dy: array_like
        observed errors on y

    Other Parameters
    ----------------
    LS_func : function
        Function used to perform Lomb-Scargle periodogram.  The call signature
        should be LS_func(t, y, dy, omega, **kwargs)
        (Default is astroML.periodogram.lomb_scargle)
    LS_kwargs : dict
        dictionary of keyword arguments to pass to LS_func in addition to
        (t, y, dy, omega)
    initial_guess : float
        the initial guess of the best period
    limit_fractions : array_like
        the list of fractions to use when zooming in on peak possibilities.
        On the i^th iteration, with f_i = limit_fractions[i], the range
        probed around each candidate will be
        (candidate * f_i, candidate / f_i).
    n_eval : integer or list
        The number of point to evaluate in the range on each iteration.
        If n_eval is a list, it should have the same length as limit_fractions.
    n_retry : integer or list
        Number of top points to search on each iteration. If n_retry is a list,
        it should have the same length as limit_fractions.
    n_save : integer or list
        Number of evaluations to save on each iteration.
        If n_save is a list, it should have the same length as limit_fractions.

    Returns
    -------
    omega_top, power_top: ndarrays
        The saved values of omega and power.  These will have size
        1 + n_save * (1 + n_retry * len(limit_fractions))
        as long as n_save > n_retry
    """
    if LS_kwargs is None:
        LS_kwargs = dict()

    omega_best = [initial_guess]
    power_best = LS_func(t, y, dy, omega_best, **LS_kwargs)

    for (Ne, Nr, Ns, frac) in np.broadcast(n_eval, n_retry, n_save,
                                           limit_fractions):
        # make sure we explore differing regions
        log_ob = np.log(omega_best)
        width = 0.1 * np.log(frac)
        log_ob = np.floor(-log_ob / width).astype(int)
        indices = np.arange(len(log_ob))

        for i in range(Nr):
            if len(indices) == 0:
                break
            omega_try = omega_best[indices[-1]]
            non_duplicates = (log_ob != log_ob[-1])
            log_ob = log_ob[non_duplicates]
            indices = indices[non_duplicates]

            omega = np.linspace(omega_try * frac, omega_try / frac, Ne)
            power = LS_func(t, y, dy, omega, **LS_kwargs)
            i = np.argsort(power)[-Ns:]
            power_best = np.concatenate([power_best, power[i]])
            omega_best = np.concatenate([omega_best, omega[i]])

        i = np.argsort(power_best)
        power_best = power_best[i]
        omega_best = omega_best[i]

    i = np.argsort(omega_best)
    return omega_best[i], power_best[i]
Example #59
0
 def __call__(self, theta_rad, phi_rad):
     if self.backward:
         theta_rad = pi - theta_rad
     coef = -0.5 / self.sigma_rad**2
     out = ne.evaluate('exp(coef * theta_rad**2)')
     return reshape_broadcast(out, np.broadcast(theta_rad, phi_rad).shape)
Example #60
0
def pv(rate, nper, pmt, fv=0.0, when='end'):
    """
    Compute the present value.

    Given:
     * a future value, `fv`
     * an interest `rate` compounded once per period, of which
       there are
     * `nper` total
     * a (fixed) payment, `pmt`, paid either
     * at the beginning (`when` = {'begin', 1}) or the end
       (`when` = {'end', 0}) of each period

    Return:
       the value now

    Parameters
    ----------
    rate : array_like
        Rate of interest (per period)
    nper : array_like
        Number of compounding periods
    pmt : array_like
        Payment
    fv : array_like, optional
        Future value
    when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
        When payments are due ('begin' (1) or 'end' (0))

    Returns
    -------
    out : ndarray, float
        Present value of a series of payments or investments.

    Notes
    -----
    The present value is computed by solving the equation::

     fv +
     pv*(1 + rate)**nper +
     pmt*(1 + rate*when)/rate*((1 + rate)**nper - 1) = 0

    or, when ``rate = 0``::

     fv + pv + pmt * nper = 0

    for `pv`, which is then returned.

    References
    ----------
    .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
       Open Document Format for Office Applications (OpenDocument)v1.2,
       Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
       Pre-Draft 12. Organization for the Advancement of Structured Information
       Standards (OASIS). Billerica, MA, USA. [ODT Document].
       Available:
       http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
       OpenDocument-formula-20090508.odt

    Examples
    --------
    What is the present value (e.g., the initial investment)
    of an investment that needs to total $15692.93
    after 10 years of saving $100 every month?  Assume the
    interest rate is 5% (annually) compounded monthly.

    >>> np.pv(0.05/12, 10*12, -100, 15692.93)
    -100.00067131625819

    By convention, the negative sign represents cash flow out
    (i.e., money not available today).  Thus, to end up with
    $15,692.93 in 10 years saving $100 a month at 5% annual
    interest, one's initial deposit should also be $100.

    If any input is array_like, ``pv`` returns an array of equal shape.
    Let's compare different interest rates in the example above:

    >>> a = np.array((0.05, 0.04, 0.03))/12
    >>> np.pv(a, 10*12, -100, 15692.93)
    array([ -100.00067132,  -649.26771385, -1273.78633713])

    So, to end up with the same $15692.93 under the same $100 per month
    "savings plan," for annual interest rates of 4% and 3%, one would
    need initial investments of $649.27 and $1273.79, respectively.

    """
    when = _convert_when(when)
    (rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when])
    temp = (1+rate)**nper
    miter = np.broadcast(rate, nper, pmt, fv, when)
    zer = np.zeros(miter.shape)
    fact = np.where(rate == zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer)
    return -(fv + pmt*fact)/temp