def draw(self,animating = False):
        cols, rows = self.size
        minx, maxx = self.xlimits
        miny, maxy = self.ylimits

        cwidth, cheight = self.cell_size

        x = map(lambda i: minx + cwidth*i, range(cols+1))
        y = map(lambda i: miny + cheight*i, range(rows+1))

	if(not animating):
        	f = plt.figure(figsize=self.figsize)
	else:
		plt.clf()
        hlines = np.column_stack(np.broadcast_arrays(x[0], y, x[-1], y))
        vlines = np.column_stack(np.broadcast_arrays(x, y[0], x, y[-1]))
        lines = np.concatenate([hlines, vlines]).reshape(-1, 2, 2)
        line_collection = LineCollection(lines, color="black", linewidths=0.5)
        ax = plt.gca()
        ax.add_collection(line_collection)
        ax.set_xlim(x[0]-1, x[-1]+1)
        ax.set_ylim(y[0]-1, y[-1]+1)
        plt.gca().set_aspect('equal', adjustable='box')
        plt.axis('off')
        self._draw_obstacles(plt.gca())
        self._draw_start_goal(plt.gca())

        return plt.gca()
Пример #2
0
def refractions(n1, n2, ray_dirs, normals):
	"""Generates directions of rays refracted according to Snells's law (in its vector
	form, [2]
	
	Arguments: 
	n1, n2 - respectively the refractive indices of the medium the unrefracted ray
		travels in and of the medium the ray is entering.
	ray_dirs, normals - each a row of 3-component vectors (as an array) with the
		direction of incoming rays and corresponding normals at the points of
		incidence with the refracting surface.
	
	Returns:
	refracted - a boolean array stating which of the incoming rays has not
		undergone total internal reflection.
	refr_dirs - new ray directions as the result of refraction, for the non-TIR
		rays in the input bundle.
	"""
	# Broadcast all necessary arrays to the larger size required:
	n = N.broadcast_arrays(n2/n1, ray_dirs[0])[0]
	normals = N.broadcast_arrays(normals, ray_dirs)[0]
	cos1 = (normals*ray_dirs).sum(axis=0)
	refracted = cos1**2 >= 1 - n**2
	
	# Throw away totally-reflected rays.
	cos1 = cos1[refracted]
	ray_dirs = ray_dirs[:,refracted]
	normals = normals[:,refracted]
	n = n[refracted]
	
	refr_dirs = (ray_dirs - cos1*normals)/n
	cos2 = N.sqrt(1 - 1./n**2*(1 - cos1**2))
	refr_dirs += normals*cos2*N.where(cos1 < 0, -1, 1)
	
	return refracted, refr_dirs
    def draw(self):
        cols, rows = self.size
        minx, maxx = self.xlimits
        miny, maxy = self.ylimits

        width, height = self.cell_dimensions

        x = map(lambda i: minx + width*i, range(cols+1))
        y = map(lambda i: miny + height*i, range(rows+1))

        f = plt.figure(figsize=self.figsize)

        hlines = np.column_stack(np.broadcast_arrays(x[0], y, x[-1], y))
        vlines = np.column_stack(np.broadcast_arrays(x, y[0], x, y[-1]))
        lines = np.concatenate([hlines, vlines]).reshape(-1, 2, 2)
        line_collection = LineCollection(lines, color="black", linewidths=0.5)
        ax = plt.gca()
        ax.add_collection(line_collection)
        ax.set_xlim(x[0]-1, x[-1]+1)
        ax.set_ylim(y[0]-1, y[-1]+1)
        plt.gca().set_aspect('equal', adjustable='box')
        plt.axis('off')
        self.draw_obstacles(plt.gca())

        return plt.gca()
Пример #4
0
 def test_simple(self):
     orig = np.ma.masked_array([[1], [2], [3]], mask=[[1], [0], [1]])
     result = BroadcastArray(orig, {1: 2}, (2,)).masked_array()
     expected, _ = np.broadcast_arrays(orig.data, result)
     expected_mask, _ = np.broadcast_arrays(orig.mask, result)
     expected = np.ma.masked_array(expected, mask=expected_mask)
     assert_array_equal(result.mask, expected.mask)
     assert_array_equal(result.data, expected.data)
Пример #5
0
 def half_edge_align(p, pts, polys):
     poly = align_polys(p, polys)
     mid   = pts[poly].mean(1)
     left  = pts[poly[:,[0,2]]].mean(1)
     right = pts[poly[:,[0,1]]].mean(1)
     s1 = np.array(np.broadcast_arrays(pts[p], mid, left)).swapaxes(0,1)
     s2 = np.array(np.broadcast_arrays(pts[p], mid, right)).swapaxes(0,1)
     return np.vstack([s1, s2])
Пример #6
0
    def axes_to_table(axes):
        """Fill the observation group axes into a table.

        Define one row for each possible combination of the
        observation group axis bins. Each row will represent
        an observation group.

        Parameters
        ----------
        axes : `~gammapy.data.ObservationGroupAxis`
            List of observation group axes.

        Returns
        -------
        table : `~astropy.table.Table`
            Table containing the observation group definitions.
        """
        # define table column data
        column_data_min = []
        column_data_max = []
        # loop over observation axes
        for i_axis in range(len(axes)):
            if axes[i_axis].fmt == 'values':
                column_data_min.append(axes[i_axis].bins)
                column_data_max.append(axes[i_axis].bins)
            elif axes[i_axis].fmt == 'edges':
                column_data_min.append(axes[i_axis].bins[:-1])
                column_data_max.append(axes[i_axis].bins[1:])

        # define grids of column data
        ndim = len(axes)
        s0 = (1,) * ndim
        expanding_arrays = [x.reshape(s0[:i] + (-1,) + s0[i + 1::])
                            for i, x in enumerate(column_data_min)]
        column_data_expanded_min = np.broadcast_arrays(*expanding_arrays)
        expanding_arrays = [x.reshape(s0[:i] + (-1,) + s0[i + 1::])
                            for i, x in enumerate(column_data_max)]
        column_data_expanded_max = np.broadcast_arrays(*expanding_arrays)

        # recover units
        for i_dim in range(ndim):
            column_data_expanded_min[i_dim] = _recover_units(column_data_expanded_min[i_dim],
                                                             column_data_min[i_dim])
            column_data_expanded_max[i_dim] = _recover_units(column_data_expanded_max[i_dim],
                                                             column_data_max[i_dim])

        # Make the table
        table = Table()
        for i_axis in range(len(axes)):
            if axes[i_axis].fmt == 'values':
                table[axes[i_axis].name] = column_data_expanded_min[i_axis].flatten()
            elif axes[i_axis].fmt == 'edges':
                table[axes[i_axis].name + "_MIN"] = column_data_expanded_min[i_axis].flatten()
                table[axes[i_axis].name + "_MAX"] = column_data_expanded_max[i_axis].flatten()

        ObservationGroups._add_group_id(table, axes)

        return table
Пример #7
0
def _bandflux(model, band, time_or_phase, zp, zpsys):
    """Support function for bandflux in Source and Model.
    This is necessary to have outside because ``phase`` is used in Source
    and ``time`` is used in Model.
    """

    if zp is not None and zpsys is None:
        raise ValueError('zpsys must be given if zp is not None')

    # broadcast arrays
    if zp is None:
        time_or_phase, band = np.broadcast_arrays(time_or_phase, band)
    else:
        time_or_phase, band, zp, zpsys = \
            np.broadcast_arrays(time_or_phase, band, zp, zpsys)

    # convert all to 1d arrays
    ndim = time_or_phase.ndim # save input ndim for return val
    time_or_phase = np.atleast_1d(time_or_phase)
    band = np.atleast_1d(band)
    if zp is not None:
        zp = np.atleast_1d(zp)
        zpsys = np.atleast_1d(zpsys)

    # initialize output arrays
    bandflux = np.zeros(time_or_phase.shape, dtype=np.float)

    # Loop over unique bands.
    for b in set(band):
        mask = band == b
        b = get_bandpass(b)

        # Raise an exception if bandpass is out of model range.
        if (b.wave[0] < model.minwave() or b.wave[-1] > model.maxwave()):
            raise ValueError(
                'bandpass {0!r:s} [{1:.6g}, .., {2:.6g}] '
                'outside spectral range [{3:.6g}, .., {4:.6g}]'
                .format(b.name, b.wave[0], b.wave[-1], 
                        model.minwave(), model.maxwave()))

        # Get the flux
        f = model._flux(time_or_phase[mask], b.wave)
        fsum = np.sum(f * b.trans * b.wave * b.dwave, axis=1) / HC_ERG_AA

        if zp is not None:
            zpnorm = 10.**(0.4 * zp[mask])
            bandzpsys = zpsys[mask]
            for ms in set(bandzpsys):
                mask2 = bandzpsys == ms
                ms = get_magsystem(ms)
                zpnorm[mask2] = zpnorm[mask2] / ms.zpbandflux(b)
            fsum *= zpnorm

        bandflux[mask] = fsum

    if ndim == 0:
        return bandflux[0]
    return bandflux
Пример #8
0
def _reduce_points_and_bounds(points, lower_and_upper_bounds=None):
    """
    Reduce the dimensionality of arrays of coordinate points (and optionally
    bounds).

    Dimensions over which all values are the same are reduced to size 1, using
    :func:`_collapse_degenerate_points_and_bounds`.
    All size-1 dimensions are then removed.
    If the bounds arrays are also passed in, then all three arrays must have
    the same shape or be capable of being broadcast to match.

    Args:

    * points (array-like):
        Coordinate point values.

    Kwargs:

    * lower_and_upper_bounds (pair of array-like, or None):
        Corresponding bounds values (lower, upper), if any.

    Returns:
        dims (iterable of ints), points(array), bounds(array)

        * 'dims' is the mapping from the result array dimensions to the
            original dimensions.  However, when 'array' is scalar, 'dims' will
            be None (rather than an empty tuple).
        * 'points' and 'bounds' are the reduced arrays.
            If no bounds were passed, None is returned.

    """
    orig_points_dtype = np.asarray(points).dtype
    bounds = None
    if lower_and_upper_bounds is not None:
        lower_bounds, upper_bounds = np.broadcast_arrays(
            *lower_and_upper_bounds)
        orig_bounds_dtype = lower_bounds.dtype
        bounds = np.vstack((lower_bounds, upper_bounds)).T

    # Attempt to broadcast points to match bounds to handle scalars.
    if bounds is not None and points.shape != bounds.shape[:-1]:
        points, _ = np.broadcast_arrays(points, bounds[..., 0])

    points, bounds = _collapse_degenerate_points_and_bounds(points, bounds)

    used_dims = tuple(i_dim for i_dim in range(points.ndim)
                      if points.shape[i_dim] > 1)
    reshape_inds = tuple([points.shape[dim] for dim in used_dims])
    points = points.reshape(reshape_inds)
    points = points.astype(orig_points_dtype)
    if bounds is not None:
        bounds = bounds.reshape(reshape_inds + (2,))
        bounds = bounds.astype(orig_bounds_dtype)

    if not used_dims:
        used_dims = None

    return used_dims, points, bounds
Пример #9
0
def uniform_distribution_overlap(m1, s1, m2, s2):
    '''Find the overlap between two uniform distributions
    
    Compute the integral of two uniform distributions
    centered on m1 and m2
    with widths 2 * s1 and 2 * s2
    and heights 1 / (2 * s1) and 1 / (2 * s2)
    '''
    h1h2 = 1 / (4 * s1 * s2)
    return np.clip((np.min(np.broadcast_arrays(m1 + s1, m2 + s2), axis=0) -
                    np.max(np.broadcast_arrays(m1 - s1, m2 - s2), axis=0)),
                   0, None) / h1h2
Пример #10
0
def test_broadcast_arrays():
    # Currently arrayfire is missing support for int64
    x2 = numpy.array([[1,2,3]], dtype=numpy.float32)
    y2 = numpy.array([[1],[2],[3]], dtype=numpy.float32)
    x1 = afnumpy.array(x2)
    y1 = afnumpy.array(y2)
    iassert(afnumpy.broadcast_arrays(x1, y1), numpy.broadcast_arrays(x2, y2))
    x1 = afnumpy.array([2])
    y1 = afnumpy.array(2)
    x2 = numpy.array([2])
    y2 = numpy.array(2)
    iassert(afnumpy.broadcast_arrays(x1, y1), numpy.broadcast_arrays(x2, y2))
Пример #11
0
def bkgsubtract(space, bkg):
    if space.dimension == bkg.dimension:
        bkg.photons = bkg.photons * space.contributions / bkg.contributions
        bkg.photons[bkg.contributions == 0] = 0
        bkg.contributions = space.contributions
        return space - bkg
    else:
        photons = numpy.broadcast_arrays(space.photons, bkg.photons)[1]
        contributions = numpy.broadcast_arrays(space.contributions, bkg.contributions)[1]
        bkg = Space(space.axes)
        bkg.photons = photons
        bkg.contributions = contributions
        return bkgsubtract(space, bkg)
Пример #12
0
    def score(self, outcomes, modelparams, expparams, return_L=False):

        na = np.newaxis
        n_m = modelparams.shape[0]
        n_e = expparams.shape[0]
        n_o = outcomes.shape[0]
        n_p = self.n_modelparams
        
        m = expparams['m'].reshape((1, 1, 1, n_e))
        
        L = self.likelihood(outcomes, modelparams, expparams)[na, ...]
        outcomes = outcomes.reshape((1, n_o, 1, 1))
        
        if not self._il:

            p, A, B = modelparams.T[:, :, np.newaxis]
            p = p.reshape((1, 1, n_m, 1))
            A = A.reshape((1, 1, n_m, 1))
            B = B.reshape((1, 1, n_m, 1))
        
            q = (-1)**(1-outcomes) * np.concatenate(np.broadcast_arrays(
                A * m * (p ** (m-1)), p**m, np.ones_like(p),
            ), axis=0) / L
            
        else:
        
            p_tilde, p_ref, A, B = modelparams.T[:, :, np.newaxis]
            p_C = p_tilde * p_ref
            
            mode = expparams['reference'][np.newaxis, :]
            
            p = np.where(mode, p_ref, p_C)
            
            p = p.reshape((1, 1, n_m, n_e))
            A = A.reshape((1, 1, n_m, 1))
            B = B.reshape((1, 1, n_m, 1))
        
            q = (-1)**(1-outcomes) * np.concatenate(np.broadcast_arrays(
                np.where(mode, 0, A * m * (p_tilde ** (m - 1)) * (p_ref ** m)),
                np.where(mode,
                    A * m * (p_ref ** (m - 1)),
                    A * m * (p_ref ** (m - 1)) * (p_tilde ** m)
                ),
                p**m, np.ones_like(p)
            ), axis=0) / L
        
        if return_L:
            # Need to strip off the extra axis we added for broadcasting to q.
            return q, L[0, ...]
        else:
            return q
Пример #13
0
    def __setitem__(self, key, x):
        row, col = self._validate_indices(key)

        if isinstance(row, INT_TYPES) and isinstance(col, INT_TYPES):
            x = np.asarray(x, dtype=self.dtype)
            if x.size != 1:
                raise ValueError('Trying to assign a sequence to an item')
            self._set_intXint(row, col, x.flat[0])
            return

        if isinstance(row, slice):
            row = np.arange(*row.indices(self.shape[0]))[:, None]
        else:
            row = np.atleast_1d(row)

        if isinstance(col, slice):
            col = np.arange(*col.indices(self.shape[1]))[None, :]
            if row.ndim == 1:
                row = row[:, None]
        else:
            col = np.atleast_1d(col)

        i, j = np.broadcast_arrays(row, col)
        if i.shape != j.shape:
            raise IndexError('number of row and column indices differ')

        from .base import isspmatrix
        if isspmatrix(x):
            if i.ndim == 1:
                # Inner indexing, so treat them like row vectors.
                i = i[None]
                j = j[None]
            broadcast_row = x.shape[0] == 1 and i.shape[0] != 1
            broadcast_col = x.shape[1] == 1 and i.shape[1] != 1
            if not ((broadcast_row or x.shape[0] == i.shape[0]) and
                    (broadcast_col or x.shape[1] == i.shape[1])):
                raise ValueError('shape mismatch in assignment')
            if x.size == 0:
                return
            x = x.tocoo(copy=True)
            x.sum_duplicates()
            self._set_arrayXarray_sparse(i, j, x)
        else:
            # Make x and i into the same shape
            x = np.asarray(x, dtype=self.dtype)
            x, _ = np.broadcast_arrays(x, i)
            if x.shape != i.shape:
                raise ValueError("shape mismatch in assignment")
            if x.size == 0:
                return
            self._set_arrayXarray(i, j, x)
Пример #14
0
    def _index_to_arrays(self, i, j):
        if isinstance(i, np.ndarray) and i.dtype.kind == 'b':
            i = self._boolean_index_to_array(i)
            if len(i)==2:
                if isinstance(j, slice):
                    j = i[1]

                else:
                    raise ValueError('too many indices for array')
            i = i[0]
        if isinstance(j, np.ndarray) and j.dtype.kind == 'b':
            j = self._boolean_index_to_array(j)[0]

        i_slice = isinstance(i, slice)
        if i_slice:
            i = self._slicetoarange(i, self.shape[0])[:,None]
        else:
            i = np.atleast_1d(i)

        if isinstance(j, slice):
            j = self._slicetoarange(j, self.shape[1])[None,:]
            if i.ndim == 1:
                i = i[:,None]
            elif not i_slice:
                raise IndexError('index returns 3-dim structure')
        elif isscalarlike(j):
            # row vector special case
            j = np.atleast_1d(j)
            if i.ndim == 1:
                i, j = np.broadcast_arrays(i, j)
                i = i[:, None]
                j = j[:, None]
                return i, j
        else:
            j = np.atleast_1d(j)
            if i_slice and j.ndim>1:
                raise IndexError('index returns 3-dim structure')

        i, j = np.broadcast_arrays(i, j)

        if i.ndim == 1:
            # return column vectors for 1-D indexing
            i = i[None,:]
            j = j[None,:]
        elif i.ndim > 2:
            raise IndexError("Index dimension must be <= 2")

        return i, j
Пример #15
0
    def noisepower(self, bl_indices, f_indices, ndays=None):
        """Calculate the instrumental noise power spectrum.

        Assume we are still within the regime where the power spectrum is white
        in `m` modes.

        Parameters
        ----------
        bl_indices : array_like
            Indices of baselines to calculate.
        f_indices : array_like
            Indices of frequencies to calculate. Must be broadcastable against
            `bl_indices`.
        ndays : integer
            The number of sidereal days observed.

        Returns
        -------
        noise_ps : np.ndarray
            The noise power spectrum.
        """

        ndays = self.ndays if not ndays else ndays # Set to value if not set.

        # Broadcast arrays against each other
        bl_indices, f_indices = np.broadcast_arrays(bl_indices, f_indices)

        bw = np.abs(self.frequencies[1] - self.frequencies[0]) * 1e6
        # bw = 1.0e6 * (self.freq_upper - self.freq_lower) / self.num_freq
        delnu = units.t_sidereal * bw / (2*np.pi)
        noisepower = self.tsys(f_indices)**2 / (2 * np.pi * delnu * ndays)
        noisebase = noisepower / self.redundancy[bl_indices]

        return noisebase
Пример #16
0
def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0):
    np.random.seed(1234)

    x = [-1, 0, 1]
    s = list(range(1, len(y_shape)+1))
    s.insert(axis % (len(y_shape)+1), 0)
    y = np.random.rand(*((3,) + y_shape)).transpose(s)

    # Cython code chokes on y.shape = (0, 3) etc, skip them
    if y.size == 0:
        return

    xi = np.zeros(x_shape)
    yi = interpolator_cls(x, y, axis=axis)(xi)

    target_shape = ((deriv_shape or ()) + y.shape[:axis]
                    + x_shape + y.shape[axis:][1:])
    assert_equal(yi.shape, target_shape)

    # check it works also with lists
    if x_shape and y.size > 0:
        interpolator_cls(list(x), list(y), axis=axis)(list(xi))

    # check also values
    if xi.size > 0 and deriv_shape is None:
        bs_shape = (y.shape[:axis] + ((1,)*len(x_shape)) + y.shape[axis:][1:])
        yv = y[((slice(None,None,None),)*(axis % y.ndim))+(1,)].reshape(bs_shape)

        yi, y = np.broadcast_arrays(yi, yv)
        assert_allclose(yi, y)
Пример #17
0
def cart2sphere(x, y, z):
    r''' Return angles for Cartesian 3D coordinates `x`, `y`, and `z`

    See doc for ``sphere2cart`` for angle conventions and derivation
    of the formulae.

    $0\le\theta\mathrm{(theta)}\le\pi$ and $-\pi\le\phi\mathrm{(phi)}\le\pi$

    Parameters
    ------------
    x : array-like
       x coordinate in Cartesian space
    y : array-like
       y coordinate in Cartesian space
    z : array-like
       z coordinate

    Returns
    ---------
    r : array
       radius
    theta : array
       inclination (polar) angle
    phi : array
       azimuth angle
    '''
    r = np.sqrt(x*x + y*y + z*z)
    theta = np.arccos(z/r)
    phi = np.arctan2(y, x)
    r, theta, phi = np.broadcast_arrays(r, theta, phi)
    return r, theta, phi
Пример #18
0
    def noise_variance(self, bl_indices, f_indices, nt_per_day, ndays=None):
        """Calculate the instrumental noise variance.

        Parameters
        ----------
        bl_indices : array_like
            Indices of baselines to calculate.
        f_indices : array_like
            Indices of frequencies to calculate. Must be broadcastable against
            `bl_indices`.
        nt_per_day : integer
            The number of time samples in one sidereal day.
        ndays : integer
            The number of sidereal days observed.

        Returns
        -------
        noise_var : np.ndarray
            The noise variance.
        """

        ndays = self.ndays if not ndays else ndays # Set to value if not set.
        t_int = ndays * units.t_sidereal / nt_per_day
        # bw = 1.0e6 * (self.freq_upper - self.freq_lower) / self.num_freq
        bw = np.abs(self.frequencies[1] - self.frequencies[0]) * 1e6

        # Broadcast arrays against each other
        bl_indices, f_indices = np.broadcast_arrays(bl_indices, f_indices)

        return 2.0*self.tsys(f_indices)**2 / (t_int * bw * self.redundancy[bl_indices]) # 2.0 for two pol
Пример #19
0
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
    """
    np.where(cond, x, fillvalue) always evaluates x even where cond is False.
    This one only evaluates f(arr1[cond], arr2[cond], ...).
    For example,
    >>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
    >>> def f(a, b):
        return a*b
    >>> _lazywhere(a > 2, (a, b), f, np.nan)
    array([ nan,  nan,  21.,  32.])

    Notice it assumes that all `arrays` are of the same shape, or can be
    broadcasted together.

    """
    if fillvalue is None:
        if f2 is None:
            raise ValueError("One of (fillvalue, f2) must be given.")
        else:
            fillvalue = np.nan
    else:
        if f2 is not None:
            raise ValueError("Only one of (fillvalue, f2) can be given.")

    arrays = np.broadcast_arrays(*arrays)
    temp = tuple(np.extract(cond, arr) for arr in arrays)
    tcode = np.mintypecode([a.dtype.char for a in arrays])
    out = _valarray(np.shape(arrays[0]), value=fillvalue, typecode=tcode)
    np.place(out, cond, f(*temp))
    if f2 is not None:
        temp = tuple(np.extract(~cond, arr) for arr in arrays)
        np.place(out, ~cond, f2(*temp))

    return out
Пример #20
0
def day_of_year_to_cal(year, N, gregorian=True):
    """Convert a day of year number to a month and day in the Julian or
    Gregorian calendars.

    Arguments:
      - `year`      : year
      - `N`         : day of year, 1..365 (or 366 for leap years)

    Keywords:
      - `gregorian` : If True, use Gregorian calendar, else use Julian calendar
        (default: True)

    Return:
      - (month, day) : (tuple)

    """
    year = np.atleast_1d(year)
    N = np.atleast_1d(N)
    year, N = np.broadcast_arrays(year, N)
    K = np.ones_like(N)
    K[:] = 2
    K[np.atleast_1d(is_leap_year(year, gregorian))] = 1
    mon = (9 * (K + N) / 275.0 + 0.98).astype(np.int64)
    mon[N < 32] = 1
    day = (N - (275 * mon / 9.0).astype(np.int64) +
           K * ((mon + 9) / 12.0).astype(np.int64) + 30).astype(np.int64)
    return _scalar_if_one(mon), _scalar_if_one(day)
Пример #21
0
def frac_yr_to_jd(year, gregorian=True):
    """Convert a date in the Julian or Gregorian fractional year to the
    Julian Day Number (Meeus 7.1).

    Arguments:
      - `year` : (int, float)  year

    Keywords:
      - `gregorian` : (bool, default=True) If True, use Gregorian calendar,
        else use Julian calendar

    Returns:
      - (float)

    """
    year = np.atleast_1d(year)
    day = np.atleast_1d(0.0).astype(np.float64)
    year, day = list(map(np.array, np.broadcast_arrays(year, day)))
    # For float years abuse the day variable
    fyear = year - year.astype('i')
    mask = fyear > 0
    if np.any(mask):
        year = year.astype('i')
        days_in_year = cal_to_jd(year[mask] + 1) - cal_to_jd(year[mask])
        day[mask] = days_in_year*fyear[mask]
        return _scalar_if_one(cal_to_jd(year) + day)
    return _scalar_if_one(cal_to_jd(year))
Пример #22
0
def _ndim_coords_from_arrays(points, ndim=None):
    """
    Convert a tuple of coordinate arrays to a (..., ndim)-shaped array.

    """
    if isinstance(points, tuple) and len(points) == 1:
        # handle argument tuple
        points = points[0]
    if isinstance(points, tuple):
        p = np.broadcast_arrays(*points)
        for j in xrange(1, len(p)):
            if p[j].shape != p[0].shape:
                raise ValueError(
                    "coordinate arrays do not have the same shape")
        points = np.empty(p[0].shape + (len(points),), dtype=float)
        for j, item in enumerate(p):
            points[..., j] = item
    else:
        points = np.asanyarray(points)
        # XXX Feed back to scipy.
        if points.ndim <= 1:
            if ndim is None:
                points = points.reshape(-1, 1)
            else:
                points = points.reshape(-1, ndim)
    return points
Пример #23
0
def cal_to_day_of_year(year, mon, day, gregorian=True):
    """Convert a date in the Julian or Gregorian calendars to day of the year
    (Meeus 7.1).

    Arguments:
      - `year` : (int) year
      - `mon`  : (int) month
      - `day`  : (int) day

    Keywords:
      - `gregorian` : (bool, default=True) If True, use Gregorian calendar,
        else use Julian calendar

    Return:
      - day number : 1 = Jan 1...365 (or 366 for leap years) = Dec 31.

    """
    year = np.atleast_1d(year).astype(np.int64)
    mon = np.atleast_1d(mon).astype(np.int64)
    day = np.atleast_1d(day).astype(np.int64)
    year, mon, day = np.broadcast_arrays(year, mon, day)
    K = np.ones_like(year)
    K[:] = 2
    K[np.atleast_1d(is_leap_year(year, gregorian))] = 1
    return _scalar_if_one(
        (275 * mon / 9.0).astype(np.int64) -
        (K * ((mon + 9) / 12.0).astype(np.int64)) + day - 30)
Пример #24
0
def test_broadcast_arrays():
    # Test user defined dtypes
    a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
    b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
    result = np.broadcast_arrays(a, b)
    assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
    assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
Пример #25
0
def test_beta():
    np.random.seed(1234)

    b = np.r_[np.logspace(-200, 200, 4),
              np.logspace(-10, 10, 4),
              np.logspace(-1, 1, 4),
              -1, -2.3, -3, -100.3, -10003.4]
    a = b

    ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T

    old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
    try:
        mpmath.mp.dps = 400

        assert_func_equal(sc.beta,
                          lambda a, b: float(mpmath.beta(a, b)),
                          ab,
                          vectorized=False,
                          rtol=1e-10)

        assert_func_equal(
            sc.betaln,
            lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))),
            ab,
            vectorized=False,
            rtol=1e-10)
    finally:
        mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
Пример #26
0
def hyp0f1(v, z):
    r"""Confluent hypergeometric limit function 0F1.

    Parameters
    ----------
    v, z : array_like
        Input values.

    Returns
    -------
    hyp0f1 : ndarray
        The confluent hypergeometric limit function.

    Notes
    -----
    This function is defined as:

    .. math:: _0F_1(v,z) = \sum_{k=0}^{\inf}\frac{z^k}{(v)_k k!}.

    It's also the limit as q -> infinity of ``1F1(q;v;z/q)``, and satisfies
    the differential equation :math:``f''(z) + vf'(z) = f(z)`.
    """
    v = atleast_1d(v)
    z = atleast_1d(z)
    v, z = np.broadcast_arrays(v, z)
    arg = 2 * sqrt(abs(z))
    old_err = np.seterr(all='ignore')  # for z=0, a<1 and num=inf, next lines
    num = where(z.real >= 0, iv(v - 1, arg), jv(v - 1, arg))
    den = abs(z)**((v - 1.0) / 2)
    num *= gamma(v)
    np.seterr(**old_err)
    num[z == 0] = 1
    den[z == 0] = 1
    return num / den
Пример #27
0
    def transform(self, pixel):
        """
        Transform pixel to world coordinates. You should pass in a Nx2 array
        of (x, y) pixel coordinates to transform to world coordinates. This
        will then return an NxM array where M is the number of dimensions in
        the WCS
        """

        if self.slice is None:
            pixel_full = pixel.copy()
        else:
            pixel_full = []
            for index in self.slice:
                if index == 'x':
                    pixel_full.append(pixel[:, 0])
                elif index == 'y':
                    pixel_full.append(pixel[:, 1])
                else:
                    pixel_full.append(index)
            pixel_full = np.array(np.broadcast_arrays(*pixel_full)).transpose()

        pixel_full += 1

        world = self.wcs.wcs_pix2world(pixel_full, 1)

        # At the moment, one has to manually check that the transformation
        # round-trips, otherwise it should be considered invalid.
        pixel_check = self.wcs.wcs_world2pix(world, 1)
        with np.errstate(invalid='ignore'):
            invalid = np.any(np.abs(pixel_check - pixel_full) > 1., axis=1)
        world[invalid] = np.nan

        return world
Пример #28
0
def validate_inputs(*arrays, **kwargs):
    """Validate input arrays

    This checks that
    - Arrays are mutually broadcastable
    - Broadcasted arrays are one-dimensional

    Optionally, arrays are sorted according to the ``sort_by`` argument.

    Parameters
    ----------
    *args : ndarrays
        All non-keyword arguments are arrays which will be validated
    sort_by : array
        If specified, sort all inputs by the order given in this array.
    """
    arrays = np.broadcast_arrays(*arrays)
    sort_by = kwargs.pop('sort_by', None)

    if kwargs:
        raise ValueError("unrecognized arguments: {0}".format(kwargs.keys()))

    if arrays[0].ndim != 1:
        raise ValueError("Input arrays should be one-dimensional.")

    if sort_by is not None:
        isort = np.argsort(sort_by)
        if isort.shape != arrays[0].shape:
            raise ValueError("sort shape must equal array shape.")
        arrays = tuple([a[isort] for a in arrays])
    return arrays
def max_pool_backward_reshape(dout, cache):
  """
  A fast implementation of the backward pass for the max pooling layer that
  uses some clever broadcasting and reshaping.

  This can only be used if the forward pass was computed using
  max_pool_forward_reshape.

  NOTE: If there are multiple argmaxes, this method will assign gradient to
  ALL argmax elements of the input rather than picking one. In this case the
  gradient will actually be incorrect. However this is unlikely to occur in
  practice, so it shouldn't matter much. One possible solution is to split the
  upstream gradient equally among all argmax elements; this should result in a
  valid subgradient. You can make this happen by uncommenting the line below;
  however this results in a significant performance penalty (about 40% slower)
  and is unlikely to matter in practice so we don't do it.
  """
  x, x_reshaped, out = cache

  dx_reshaped = np.zeros_like(x_reshaped)
  out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
  mask = (x_reshaped == out_newaxis)
  dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
  dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
  dx_reshaped[mask] = dout_broadcast[mask]
  dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
  dx = dx_reshaped.reshape(x.shape)

  return dx
Пример #30
0
    def aim(self, yo, yp=None, z=None, a=None, surface=None, filter=True):
        if z is None:
            z = self.pupil_distance
        yo = np.atleast_2d(yo)
        if yp is not None:
            if a is None:
                a = self.pupil_radius
                a = np.array(((-a, -a), (a, a)))
            a = np.arctan2(a, z)
            yp = np.atleast_2d(yp)
            yp = self.map_pupil(yp, a, filter)
            yp = z*np.tan(yp)
            yo, yp = np.broadcast_arrays(yo, yp)

        y = np.zeros((yo.shape[0], 3))
        y[..., :2] = -yo*self.radius
        if surface:
            y[..., 2] = -surface.surface_sag(y)
        uz = (0, 0, z)
        if self.telecentric:
            u = uz
        else:
            u = uz - y
        if yp is not None:
            s, m = sagittal_meridional(u, uz)
            u += yp[..., 0, None]*s + yp[..., 1, None]*m
        normalize(u)
        if z < 0:
            u *= -1
        return y, u
Пример #31
0
    def dib(self, d1=None, d2=None):
        """Days in base according to day count convention of the object

        Inputs:
            d1  - Initial date

            d2  - Final date

        Returns:
            dib - Integer or integer array with days in base

        Note: unlike other functions in this obj, function will only return
        array if two conditions are simultaneously met:
            (1) User is passing an array (standard); AND
            (2) There is potential ambiguity in the answer, i.e. if DB
            truly depends on the input dates.
        If one of the conditions above fails, function will return scalar.
        """

        # Handle fixed cases with dict
        dibd = {
            'NL/365': 365,
            'BUS/30': 30,
            'BUS/252': 252,
            'BUS/1': 1,
            'ACT/365': 365,
            'ACT/365F': 365,
            'ACT/364': 364,
            'ACT/360': 360,
            '30A/360': 360,
            '30E/360': 360,
            '30E+/360': 360,
            '30E/360 ISDA': 360,
            '30U/360': 360
        }
        # Simply cases end here
        try:
            return dibd[self.dc]
        except KeyError:
            pass
        # Throw error for ACT/ACT ISMA
        if self.dc == 'ACT/ACT ICMA':
            raise AttributeError('The concept of days in base does not apply '
                                 'to the ACT/ACT ICMA convention')
        # We worry about vectorization, so we will use pandas to do this
        if self.dc == 'BUS/BUS':
            # Error checking delegated to BDY
            return self.bdy(d2)
        elif self.dc == 'ACT/ACT ISDA':
            # Error checking delegated to DY
            return self.dy(d1)
        elif self.dc == 'ACT/365L':
            # Error checking delegated to DY
            return self.dy(d2)
        elif self.dc == 'ACT/365A':
            # Note that this is NOT the same as the FRENCH case below,
            # as the interval is standard (closed above, and not below)
            d1 = self.adjust(d1)
            d2 = self.adjust(d2)
            if isinstance(d1, Timestamp) and isinstance(d2, Timestamp):
                if d2.day == 29 and d2.month == 2:
                    return 366
                else:
                    if self.hasleap(d1, d2):
                        return 366
                    else:
                        return 365
            # For the vectorized case, we assume the ACT/ACT AFB logic and
            # then fix the boundary
            leap = self.hasleap(d1, d2)
            base = asarray(366 * leap + 365 * ~leap, dtype='int64')
            # Guarantee dimension conformity
            d2, base = broadcast_arrays(d2, base)
            d2 = DatetimeIndex(d2)
            mask = (d2.day == 29) & (d2.month == 2)
            base[mask] = 366
            return base
        elif self.dc == 'ACT/ACT AFB':
            # The bizarre french case. No surprise here.
            d1 = self.adjust(d1)
            d2 = self.adjust(d2)
            leap = self.hasleap(d1, d2)
            if isinstance(leap, bool):
                return 366 * leap + 365 * (not leap)
            else:
                return asarray(366 * leap + 365 * ~leap, dtype='int64')
        elif self.dc == '1/1':
            # There are two (seemingly?) very different definitions for this
            # guy. The "FBF Master Agreement for Financial Transactions,
            # Supplement to the Derivatives Annex, Edition 2004, section
            # 7a." document states that no matter what, this will return 1.
            # Same applies for the OpenGamma documentation. On the other
            # hand, there are places that say that this is equivalent to
            # DIB(ACT/ACT) unless d1 == d2, in which case DIB == 365.25
            d1 = self.adjust(d1)
            d2 = self.adjust(d2)
            if isinstance(d1, Timestamp) and isinstance(d2, Timestamp):
                if (d1.day == d2.day and d1.month == d2.month) \
                        or (d1.month == 2 and d2.month == 2 and
                            d1.day in [28, 29] and d2.day in [28, 29]):
                    return 365.25
                else:
                    return self.dy(d1)
            else:  # We have at least 1 array. Because we only accept the
                # combinations of equally sized arrays or array + scalar,
                # we don't care about the broadcast
                mask = ((d1.day == d2.day) & (d1.month == d2.month)) | \
                       ((d1.month == 2) & (d2.month == 2) %
                        ((d1.day == 28) | (d1.day == 29)) %
                        ((d2.day == 28) | (d2.day == 29)))
                base = self.dy(d1)
                if isinstance(base, int):
                    # We handle the mask as two separate cases as the
                    # negation ~True returns -2
                    if isinstance(mask, bool):
                        nmask = not mask
                    else:
                        nmask = ~mask
                    return asarray(base * nmask + 365.25 * mask,
                                   dtype='float64')
                else:
                    base = base.astype('float64')
                    base[mask] = 365.25
                    return asarray(base, dtype='float64')
        else:
            raise NotImplementedError('Day count %s not supported' % self.dc)
Пример #32
0
def rsh(l, m, theta, phi, normalization='quantum', condon_shortley=True):
    """
    Compute the real spherical harmonic (RSH) S_l^m(theta, phi).

    The RSH are obtained from Complex Spherical Harmonics (CSH) as follows:
    if m < 0:
        S_l^m = i / sqrt(2) * (Y_l^m - (-1)^m Y_l^{-m})
    if m == 0:
        S_l^m = Y_l^0
    if m > 0:
        S_l^m = 1 / sqrt(2) * (Y_l^{-m} + (-1)^m Y_l^m)
     (see [1])

    Various normalizations for the CSH exist, see the CSH() function. Since the CSH->RSH change of basis is unitary,
    the orthogonality and normalization properties of the RSH are the same as those of the CSH from which they were
    obtained. Furthermore, the operation of changing normalization and that of changeing field
    (complex->real or vice-versa) commute, because the ratio c_m of normalization constants are always the same for
    m and -m (to see this that this implies commutativity, substitute Y_l^m * c_m for Y_l^m in the above formula).

    Pinchon & Hoggan [2] define a different change of basis for CSH -> RSH, but they also use an unusual definition
    of CSH. To obtain RSH as defined by Pinchon-Hoggan, use this function with normalization='quantum'.

    References:
    [1] http://en.wikipedia.org/wiki/Spherical_harmonics#Real_form
    [2] Rotation matrices for real spherical harmonics: general rotations of atomic orbitals in space-fixed axes.

    :param l: non-negative integer; the degree of the CSH.
    :param m: integer, -l <= m <= l; the order of the CSH.
    :param theta: the colatitude / polar angle,
    ranging from 0 (North Pole, (X,Y,Z)=(0,0,1)) to pi (South Pole, (X,Y,Z)=(0,0,-1)).
    :param phi: the longitude / azimuthal angle, ranging from 0 to 2 pi.
    :param normalization: how to normalize the RSH:
    'seismology', 'quantum', 'geodesy'.
    these are immediately passed to the CSH functions, and since the change of basis
    from CSH to RSH is unitary, the orthogonality and normalization properties are unchanged.
    :return: the value of the real spherical harmonic S^l_m(theta, phi)
    """
    l, m, theta, phi = np.broadcast_arrays(l, m, theta, phi)
    # Get the CSH for m and -m, using Condon-Shortley phase (regardless of whhether CS is requested or not)
    # The reason is that the code that changes from CSH to RSH assumes CS phase.

    a = csh(l=l, m=m, theta=theta, phi=phi, normalization=normalization, condon_shortley=True)
    b = csh(l=l, m=-m, theta=theta, phi=phi, normalization=normalization, condon_shortley=True)

    #if m > 0:
    #    y = np.array((b + ((-1.)**m) * a).real / np.sqrt(2.))
    #elif m < 0:
    #    y = np.array((1j * a - 1j * ((-1.)**(-m)) * b).real / np.sqrt(2.))
    #else:
    #    # For m == 0, the complex spherical harmonics are already real
    #    y = np.array(a.real)

    y = ((m > 0) * np.array((b + ((-1.)**m) * a).real / np.sqrt(2.))
         + (m < 0) * np.array((1j * a - 1j * ((-1.)**(-m)) * b).real / np.sqrt(2.))
         + (m == 0) * np.array(a.real))

    if condon_shortley:
        return y
    else:
        # Cancel the CS phase of y (i.e. multiply by -1 when m is both odd and greater than 0)
        return y * ((-1.) ** (m * (m > 0)))
Пример #33
0
def interp_grid(
    old_model_obj,
    new_model_obj,
    shift_east=0,
    shift_north=0,
    pad=1,
    dim="2d",
    smooth_kernel=None,
):
    """
    interpolate an old grid onto a new one
    """

    if dim == "2d":
        north, east = np.broadcast_arrays(
            old_model_obj.grid_north[:, None] + shift_north,
            old_model_obj.grid_east[None, :] + shift_east,
        )

        # 2) do a 2D interpolation for each layer, much faster
        new_res = np.zeros((
            new_model_obj.grid_north.shape[0],
            new_model_obj.grid_east.shape[0],
            new_model_obj.grid_z.shape[0],
        ))

        for zz in range(new_model_obj.grid_z.shape[0]):
            try:
                old_zz = np.where(
                    old_model_obj.grid_z >= new_model_obj.grid_z[zz])[0][0]
            except IndexError:
                old_zz = -1

            print "New depth={0:.2f}; old depth={1:.2f}".format(
                new_model_obj.grid_z[zz], old_model_obj.grid_z[old_zz])

            new_res[:, :, zz] = spi.griddata(
                (north.ravel(), east.ravel()),
                old_model_obj.res_model[:, :, old_zz].ravel(),
                (new_model_obj.grid_north[:, None],
                 new_model_obj.grid_east[None, :]),
                method="linear",
            )

            new_res[0:pad, pad:-pad, zz] = new_res[pad, pad:-pad, zz]
            new_res[-pad:, pad:-pad, zz] = new_res[-pad - 1, pad:-pad, zz]
            new_res[:, 0:pad,
                    zz] = (new_res[:, pad,
                                   zz].repeat(pad).reshape(new_res[:, 0:pad,
                                                                   zz].shape))
            new_res[:, -pad:,
                    zz] = (new_res[:, -pad - 1,
                                   zz].repeat(pad).reshape(new_res[:, -pad:,
                                                                   zz].shape))

            if smooth_kernel is not None:
                new_res[:, :, zz] = smooth_2d(new_res[:, :, zz], smooth_kernel)

    elif dim == "3d":
        # 1) first need to make x, y, z have dimensions (nx, ny, nz), similar to res
        north, east, vert = np.broadcast_arrays(
            old_model_obj.grid_north[:, None, None],
            old_model_obj.grid_east[None, :, None],
            old_model_obj.grid_z[None, None, :],
        )

        # 2) next interpolate ont the new mesh (3D interpolation, slow)
        new_res = spi.griddata(
            (north.ravel(), east.ravel(), vert.ravel()),
            old_model_obj.res_model.ravel(),
            (
                new_model_obj.grid_north[:, None, None],
                new_model_obj.grid_east[None, :, None],
                new_model_obj.grid_z[None, None, :],
            ),
            method="linear",
        )

    print "Shape of new res = {0}".format(new_res.shape)
    return new_res
Пример #34
0
def implied_vol(underlying_price, strike, expiry, option_price, put=False,
                initial_guess=0.5, assert_no_arbitrage=True):
    """Implied volatility function

    Inverts the Black-Scholes formula to find the volatility that matches the
    given option price. The implied volatility is computed using Newton's
    method.

    Parameters
    ----------
    underlying_price : float
        Price of the underlying asset.
    strike : float
        Strike of the option.
    expiry : float
        Time remaining until the expiry of the option.
    option_price : float
        Option price according to Black-Scholes formula.
    put : bool, optional
        Whether the option is a put option. Defaults to `False`.
    initial_guess : float, optional
        Initial guess for the implied volatility for the Newton's method.

    Returns
    -------
    float
        Implied volatility.

    Example
    -------

    >>> import numpy as np
    >>> from fyne import blackscholes
    >>> call_price = 11.77
    >>> put_price = 1.77
    >>> underlying_price = 100.
    >>> strike = 90.
    >>> expiry = 0.5
    >>> implied_vol = blackscholes.implied_vol(underlying_price, strike,
    ...                                        expiry, call_price)
    >>> np.round(implied_vol, 2)
    0.2
    >>> implied_vol = blackscholes.implied_vol(underlying_price, strike,
    ...                                        expiry, put_price, put=True)
    >>> np.round(implied_vol, 2)
    0.2

    """
    call = common._put_call_parity_reverse(option_price, underlying_price,
                                           strike, put)
    if assert_no_arbitrage:
        common._assert_no_arbitrage(underlying_price, call, strike)

    k = np.array(np.log(strike/underlying_price))
    c = np.array(call/underlying_price)
    k, expiry, c, initial_guess = np.broadcast_arrays(k, expiry, c, initial_guess)
    noarb_mask = ~np.any(
        common._check_arbitrage(underlying_price, call, strike), axis=0)
    noarb_mask &= ~np.any(
        tuple(map(np.isnan, (k, expiry, c, initial_guess))), axis=0)

    iv = np.full(c.shape, np.nan)
    iv[noarb_mask] = _reduced_implied_vol(k[noarb_mask], expiry[noarb_mask],
                                          c[noarb_mask],
                                          initial_guess[noarb_mask])
    return iv
Пример #35
0
    def test_complete_losses(self):
        # this is testing full broadcasting

        n = np.newaxis
        (freqs, h_tgs, h_rgs, time_percents, versions, G_ts,
         G_rs) = np.broadcast_arrays(
             np.array(self.cases_freqs)[:, n, n, n, n],
             np.array(self.cases_h_tgs)[n, :, n, n, n],
             np.array(self.cases_h_rgs)[n, :, n, n, n],
             np.array(self.cases_time_percents)[n, n, :, n, n],
             np.array(self.cases_versions, dtype=np.int32)[n, n, n, :, n],
             np.array(self.cases_G_ts)[n, n, n, n, :],
             np.array(self.cases_G_rs)[n, n, n, n, :],
         )
        results = pathprof.losses_complete(
            freqs * apu.GHz,
            self.temperature,
            self.pressure,
            self.lon_t,
            self.lat_t,
            self.lon_r,
            self.lat_r,
            h_tgs * apu.m,
            h_rgs * apu.m,
            self.hprof_step,
            time_percents * apu.percent,
            G_t=G_ts * cnv.dBi,
            G_r=G_rs * cnv.dBi,
            omega=self.omega,
            version=versions,
        )

        for tup in np.nditer([
                freqs,
                h_tgs,
                h_rgs,
                time_percents,
                G_ts,
                G_rs,
                versions,
                results['L_b0p'],
                results['L_bd'],
                results['L_bs'],
                results['L_ba'],
                results['L_b'],
                results['L_b_corr'],
        ]):

            with ZipFile(self.cases_zip_name) as myzip:

                loss_name = self.loss_template.format(
                    float(tup[0]),
                    float(tup[1]),
                    float(tup[2]),
                    float(tup[3]),
                    float(tup[4]),
                    float(tup[5]),
                    int(tup[6]),
                )
                with myzip.open(loss_name, 'r') as f:
                    loss_true = json.loads(f.read().decode('utf-8'))

                for i, k in enumerate([
                        'L_b0p',
                        'L_bd',
                        'L_bs',
                        'L_ba',
                        'L_b',
                        'L_b_corr',
                ]):
                    assert_quantity_allclose(tup[i + 7], loss_true[k + '_t'])
Пример #36
0
def float_array_broadcast(*args):
    return num.broadcast_arrays(
        *[num.asarray(x, dtype=num.float) for x in args])
Пример #37
0
    def T_s(self, s, p=None, d=None, debug=False):
        """Temperature as a function of entropy
    T = T_s(s)
        or
    T = T_s(s,p)
        or
    T = T_s(s,d)

Accepts unit_energy / unit_matter / unit_temperature
        unit_pressure
        unit_matter / unit_volume
Returns unit_temperature
"""
        if p is None and d is None:
            p = pm.config['def_p']

        s = pm.units.energy(np.asarray(s, dtype=float), to_units='kJ')
        s = pm.units.matter(s, self.data['mw'], to_units='kmol', exponent=-1)
        s = pm.units.temperature(s, to_units='K', exponent=-1)
        if s.ndim == 0:
            s = np.reshape(s, (1, ))

        # If isobaric
        if p is not None:
            p = pm.units.pressure(np.asarray(p, dtype=float), to_units='Pa')
            if p.ndim == 0:
                p = np.reshape(p, (1, ))

            s, p = np.broadcast_arrays(s, p)
            # Adjust s by the pressure term
            s += pm.units.const_Ru * np.log(p / self.data['pref'])

            I = np.ones_like(s, dtype=bool)
            T = np.full_like(
                s, 0.5 * (self.data['Tlim'][0] + self.data['Tlim'][-1]))
            self._iter1(self._s,
                        'T',
                        s,
                        T,
                        I,
                        self.data['Tlim'][0],
                        self.data['Tlim'][-1],
                        verbose=debug)
        # If isochoric
        else:
            d = pm.units.matter(np.asarray(d, dtype=float),
                                self.data['mw'],
                                to_units='kmol')
            d = pm.units.volume(d, to_units='m3', exponent=-1)

            s, d = np.broadcast_arrays(s, d)

            R = pm.units.const_Ru

            # Define a custom iterator function
            def fn(T, d, diff):
                sd = 0.
                s, sT = self._s(T, diff)

                s -= R * np.log(d * R * T * 1e3 / (self.data['pref']))
                if diff:
                    sT -= R / T
                    sd = -R / d

                return s, sT, sd

            I = np.ones_like(s, dtype=bool)
            T = np.full_like(
                s, 0.5 * (self.data['Tlim'][0] + self.data['Tlim'][-1]))
            self._iter1(fn,
                        'T',
                        s,
                        T,
                        I,
                        self.data['Tlim'][0],
                        self.data['Tlim'][-1],
                        param={'d': d},
                        verbose=debug)

        pm.units.temperature_scale(T, from_units='K')
        return T
Пример #38
0
 def get_idx(self):
     idxs = (0, 0)
     if self.axes is not None:
         for ax in self.axes:
             idxs += (np.arange(ax.nbin).reshape((-1, 1, 1)),)
     return np.broadcast_arrays(*idxs)
Пример #39
0
def chol_solve_numpy(t, b, diageps=None):
    """
    t (..., n)
    b (..., n, m) or (n,)
    t[0] += diageps
    m = toeplitz(t)
    l = chol(m)
    return solve(l, b)
    pure numpy, object arrays supported
    """

    t = numpy.copy(t, subok=True)
    n = t.shape[-1]

    b = numpy.asanyarray(b)
    vec = b.ndim < 2
    if vec:
        b = b[:, None]
    assert b.shape[-2] == n

    if n == 0:
        shape = numpy.broadcast_shapes(t.shape[:-1], b.shape[:-2])
        shape += (n, ) if vec else b.shape[-2:]
        dtype = numpy.result_type(t.dtype, b.dtype)
        return numpy.empty(shape, dtype)

    if diageps is not None:
        t[..., 0] += diageps

    if numpy.any(t[..., 0] <= 0):
        msg = '1-th leading minor is not positive definite'
        raise numpy.linalg.LinAlgError(msg)

    norm = numpy.copy(t[..., 0, None], subok=True)
    t /= norm
    invLb = numpy.copy(numpy.broadcast_arrays(b, t[..., None])[0], subok=True)
    prevLi = t
    g = numpy.stack([numpy.roll(t, 1, -1), t], -2)

    for i in range(1, n):

        assert numpy.all(g[..., 0, i] > 0)
        rho = -g[..., 1, i, None, None] / g[..., 0, i, None, None]

        if numpy.any(numpy.abs(rho) >= 1):
            msg = '{}-th leading minor is not positive definite'.format(i + 1)
            raise numpy.linalg.LinAlgError(msg)

        gamma = numpy.sqrt((1 - rho) * (1 + rho))
        g[..., :, i:] += g[..., ::-1, i:] * rho
        g[..., :, i:] /= gamma
        Li = g[..., 0, i:]  # i-th column of L from row i
        invLb[..., i:, :] -= invLb[..., i - 1, None, :] * prevLi[..., i:, None]
        invLb[..., i, :] /= Li[..., 0, None]
        prevLi[..., i:] = Li
        g[..., 0, i:] = numpy.roll(g[..., 0, i:], 1, -1)

    invLb /= numpy.sqrt(norm[..., None])
    if vec:
        invLb = numpy.squeeze(invLb, -1)
    return invLb
Пример #40
0
    #    plt.show()
    # %%
    #    plt.figure(figsize=(12, 12))
    #    for i in range(0, 9):
    #        plt.subplot(3, 3, i+1)
    #        plt.title('Distance {} SD'.format(i+1))
    #        plt.imshow(np.logical_and(Zm < -i, Zm > -i-1))
    #        plt.axis('off')
    #    plt.show()

    # Boundaries between center and surround and limit of surround
    inner_b = 2
    outer_b = 4

    center_mask = np.logical_not(Zm < inner_b)
    center_mask_3d = np.broadcast_arrays(sta, center_mask[..., None])[1]
    surround_mask = np.logical_not(np.logical_and(Zm > inner_b, Zm < outer_b))
    surround_mask_3d = np.broadcast_arrays(sta, surround_mask[..., None])[1]

    # %%

    plt.subplot(2, 3, 4)
    plt.imshow(center_mask)
    plt.title('Center (<{}$\sigma$)'.format(inner_b))
    plt.subplot(2, 3, 5)
    plt.imshow(surround_mask)
    plt.title('Surround (Between {}$\sigma$ and {}$\sigma$)'.format(
        inner_b, outer_b))
    #    plt.show()

    sta_center = np.ma.array(sta, mask=center_mask_3d)
 def _set_arrayXarray_sparse(self, row, col, x):
     # Fall back to densifying x
     x = np.asarray(x.toarray(), dtype=self.dtype)
     x, _ = np.broadcast_arrays(x, row)
     self._set_arrayXarray(row, col, x)
Пример #42
0
    def _argparse(self, T=None, p=None, d=None,\
        temperature=False, pressure=False, density=False):
        """Parse the arguments supplied to an IG2 property method
    T = _argparse(*varg, **kwarg)
        OR
    T,p,d = _argparse(*varg, **kwarg, temperature=True, pressure=True, 
                        density=True)
    
_ARGPARSE automatically applies the default temperature and pressure,
def_T or def_p, from the pyromat.config system to deal with unspecified
parameters.  All inputs are re-cast as numpy arrays of at least one 
dimension and inputs are automatically converted from the configured 
user units into kJ, kmol, m^3.

The returned variables are arrays of temperature, T, pressure, p, and 
the density, d.  The optional keywords TEMPERATURE, PRESSURE, and 
DENSITY are used to indicate which state variables should be returned.
They are always returned in the order T, p, d.  
"""
        nparam = ((0 if T is None else 1) + (0 if p is None else 1) +
                  (0 if d is None else 1))

        if nparam == 1:
            if T is None:
                T = pm.config['def_T']
            else:
                p = pm.config['def_p']
        elif nparam == 0:
            T = pm.config['def_T']
            p = pm.config['def_p']
        elif nparam > 2:
            raise utility.PMParameterError(
                'Specifying more than two simultaneous parameters is illegal.')

        # Perform the unit conversions, and format the arrays
        if T is not None:
            T = pm.units.temperature_scale(np.asarray(T, dtype=float),
                                           to_units='K')
            if T.ndim == 0:
                T = np.reshape(T, (1, ))

        if p is not None:
            p = pm.units.pressure(np.asarray(p, dtype=float), to_units='Pa')
            if p.ndim == 0:
                p = np.reshape(p, (1, ))

        if d is not None:
            d = pm.units.matter(np.asarray(d, dtype=float),
                                self.data['mw'],
                                to_units='kmol')
            pm.units.volume(d, to_units='m3', exponent=-1, inplace=True)
            if d.ndim == 0:
                d = np.reshape(d, (1, ))

        # Convert the IG constant to J/kmol/K
        R = 1000 * pm.units.const_Ru

        # Case out the specified state variables
        # There are three possible combinations
        if T is not None:
            # T,p
            if p is not None:
                # Broadcast the arrays
                T, p = np.broadcast_arrays(T, p)
                # Do we need density?
                if density:
                    d = p / (R * T)
            # T,d
            else:
                # Broadcast the arrays
                T, d = np.broadcast_arrays(T, d)
                # Do we need pressure?
                if pressure:
                    p = d * R * T
        # p,d
        else:
            # Broadcast the arrays
            p, d = np.broadcast_arrays(p, d)
            # Do we need temperature?
            if temperature:
                T = p / (R * d)

        out = []
        if temperature:
            out.append(T)
        if pressure:
            out.append(p)
        if density:
            out.append(d)

        if len(out) > 1:
            return tuple(out)
        elif len(out) == 1:
            return out[0]
        return
Пример #43
0
def logsumexp(a: np.ndarray,
              axis: int = None,
              b: np.ndarray = None,
              keepdims: bool = False,
              return_sign: bool = False,
              base: float = 10.) -> float:
    """
    Compute the log of the sum of exponentials of input elements.
    Parameters
    ----------
    a : array_like
        Input array.
    axis : None or int or tuple of ints, optional
        Axis or axes over which the sum is taken. By default `axis` is None,
        and all elements are summed.

    keepdims : bool, optional
        If this is set to True, the axes which are reduced are left in the
        result as dimensions with size one. With this option, the result
        will broadcast correctly against the original array.

    base : float, optional
        This base is used in the exponentiation and the logarithm.
        $\log_{base} \sum (base)^{array}$

    b : array-like, optional
        Scaling factor for exp(`a`) must be of the same shape as `a` or
        broadcastable to `a`. These values may be negative in order to
        implement subtraction.

    return_sign : bool, optional
        If this is set to True, the result will be a pair containing sign
        information; if False, results that are negative will be returned
        as NaN. Default is False (no sign information).

    Returns
    -------
    res : ndarray
        The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
        more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
        is returned.

    sgn : ndarray
        If return_sign is True, this will be an array of floating-point
        numbers matching res and +1, 0, or -1 depending on the sign
        of the result. If False, only one result is returned.

    See Also
    --------
    numpy.logaddexp, numpy.logaddexp2

    Notes
    -----
    NumPy has a logaddexp function which is very similar to `logsumexp`, but
    only handles two arguments. `logaddexp.reduce` is similar to this
    function, but may be less stable.
    """
    if b is not None:
        a, b = np.broadcast_arrays(a, b)
        if np.any(b == 0):
            a = a + 0.  # promote to at least float
            a[b == 0] = -np.inf

    a_max = np.amax(a, axis=axis, keepdims=True)

    if a_max.ndim > 0:
        a_max[~np.isfinite(a_max)] = 0
    elif not np.isfinite(a_max):
        a_max = 0

    if b is not None:
        b = np.asarray(b)
        if base is None or base == np.e:
            tmp = b * np.exp(a - a_max)
        else:
            tmp = b * base**(a - a_max)
    else:
        if base is None or base == np.e:
            tmp = np.exp(a - a_max)
        else:
            tmp = base**(a - a_max)

    # suppress warnings about log of zero
    with np.errstate(divide='ignore'):
        s = np.sum(tmp, axis=axis, keepdims=keepdims)
        if return_sign:
            sgn = np.sign(s)
            s *= sgn  # /= makes more sense but we need zero -> zero

        log_base_conversion = 1 / np.log(base)
        out = np.log(s) * log_base_conversion

    if not keepdims:
        a_max = np.squeeze(a_max, axis=axis)
    out += a_max

    if return_sign:
        return out, sgn
    else:
        return out
Пример #44
0
 def simple_broadcast(self, *args):
     return np.broadcast_arrays(*args)
Пример #45
0
def geometry_area_weights(cube, geometry, normalize=False):
    """
    Returns the array of weights corresponding to the area of overlap between
    the cells of cube's horizontal grid, and the given shapely geometry.

    The returned array is suitable for use with :const:`iris.analysis.MEAN`.

    The cube must have bounded horizontal coordinates.

    .. note::
        This routine works in Euclidean space. Area calculations do not
        account for the curvature of the Earth. And care must be taken to
        ensure any longitude values are expressed over a suitable interval.

    .. note::
        This routine currently does not handle all out-of-bounds cases
        correctly. In cases where both the coordinate bounds and the
        geometry's bounds lie outside the physically realistic range
        (i.e., abs(latitude) > 90., as it is commonly the case when
        bounds are constructed via guess_bounds()), the weights
        calculation might be wrong. In this case, a UserWarning will
        be issued.

    Args:

    * cube (:class:`iris.cube.Cube`):
        A Cube containing a bounded, horizontal grid definition.
    * geometry (a shapely geometry instance):
        The geometry of interest. To produce meaningful results this geometry
        must have a non-zero area. Typically a Polygon or MultiPolygon.

    Kwargs:

    * normalize:
        Calculate each individual cell weight as the cell area overlap between
        the cell and the given shapely geometry divided by the total cell area.
        Default is False.

    """

    # extract smallest subcube containing geometry
    shape = cube.shape
    extraction_results = _extract_relevant_cube_slice(cube, geometry)

    # test if there is overlap between cube and geometry
    if extraction_results is None:
        return np.zeros(shape)

    subcube, subx_coord, suby_coord, bnds_ix = extraction_results
    x_min_ix, y_min_ix, x_max_ix, y_max_ix = bnds_ix

    # prepare the weights array
    subshape = list(cube.shape)
    x_dim = cube.coord_dims(subx_coord)[0]
    y_dim = cube.coord_dims(suby_coord)[0]
    subshape[x_dim] = subx_coord.shape[0]
    subshape[y_dim] = suby_coord.shape[0]
    subx_bounds = subx_coord.bounds
    suby_bounds = suby_coord.bounds
    subweights = np.empty(subshape, np.float32)

    # calculate the area weights
    for nd_index in np.ndindex(subweights.shape):
        xi = nd_index[x_dim]
        yi = nd_index[y_dim]
        x0, x1 = subx_bounds[xi]
        y0, y1 = suby_bounds[yi]
        polygon = Polygon([(x0, y0), (x0, y1), (x1, y1), (x1, y0)])
        subweights[nd_index] = polygon.intersection(geometry).area
        if normalize:
            subweights[nd_index] /= polygon.area

    # pad the calculated weights with zeros to match original cube shape
    weights = np.zeros(shape, np.float32)
    slices = []
    for i in range(weights.ndim):
        if i == x_dim:
            slices.append(slice(x_min_ix, x_max_ix + 1))
        elif i == y_dim:
            slices.append(slice(y_min_ix, y_max_ix + 1))
        else:
            slices.append(slice(None))

    weights[tuple(slices)] = subweights

    # Fix for the limitation of iris.analysis.MEAN weights handling.
    # Broadcast the array to the full shape of the cube
    weights = np.broadcast_arrays(weights, cube.data)[0]

    return weights
Пример #46
0
def extirpolate(x, y, N=None, M=4):
    """
    Extirpolate the values (x, y) onto an integer grid range(N),
    using lagrange polynomial weights on the M nearest points.
    Parameters
    ----------
    x : array_like
        array of abscissas
    y : array_like
        array of ordinates
    N : int
        number of integer bins to use. For best performance, N should be larger
        than the maximum of x
    M : int
        number of adjoining points on which to extirpolate.

    Returns
    -------
    yN : ndarray
         N extirpolated values associated with range(N)

    Example
    -------
    >>> rng = np.random.RandomState(0)
    >>> x = 100 * rng.rand(20)
    >>> y = np.sin(x)
    >>> y_hat = extirpolate(x, y)
    >>> x_hat = np.arange(len(y_hat))
    >>> f = lambda x: np.sin(x / 10)
    >>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
    True

    Notes
    -----
    This code is based on the C implementation of spread() presented in
    Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
    """
    x, y = map(np.ravel, np.broadcast_arrays(x, y))

    if N is None:
        N = int(np.max(x) + 0.5 * M + 1)

    # Now use legendre polynomial weights to populate the results array;
    # This is an efficient recursive implementation (See Press et al. 1989)
    result = np.zeros(N, dtype=y.dtype)

    # first take care of the easy cases where x is an integer
    integers = (x % 1 == 0)
    add_at(result, x[integers].astype(int), y[integers])
    x, y = x[~integers], y[~integers]

    # For each remaining x, find the index describing the extirpolation range.
    # i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
    # adjusted so that the limits are within the range 0...N
    ilo = np.clip((x - M // 2).astype(int), 0, N - M)
    numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
    denominator = factorial(M - 1)

    for j in range(M):
        if j > 0:
            denominator *= j / (j - M)
        ind = ilo + (M - 1 - j)
        add_at(result, ind, numerator / (denominator * (x - ind)))
    return result
Пример #47
0
def lombscargle_fastchi2(t,
                         y,
                         dy,
                         f0,
                         df,
                         Nf,
                         normalization='standard',
                         fit_mean=True,
                         center_data=True,
                         nterms=1,
                         use_fft=True,
                         trig_sum_kwds=None):
    """Lomb-Scargle Periodogram

    This implements a fast chi-squared periodogram using the algorithm
    outlined in [4]_. The result is identical to the standard Lomb-Scargle
    periodogram. The advantage of this algorithm is the
    ability to compute multiterm periodograms relatively quickly.

    Parameters
    ----------
    t, y, dy : array_like  (NOT astropy.Quantities)
        times, values, and errors of the data points. These should be
        broadcastable to the same shape.
    f0, df, Nf : (float, float, int)
        parameters describing the frequency grid, f = f0 + df * arange(Nf).
    normalization : string (optional, default='standard')
        Normalization to use for the periodogram.
        Options are 'standard', 'model', 'log', or 'psd'.
    fit_mean : bool (optional, default=True)
        if True, include a constant offset as part of the model at each
        frequency. This can lead to more accurate results, especially in the
        case of incomplete phase coverage.
    center_data : bool (optional, default=True)
        if True, pre-center the data by subtracting the weighted mean
        of the input data. This is especially important if ``fit_mean = False``
    nterms : int (optional, default=1)
        Number of Fourier terms in the fit

    Returns
    -------
    power : array_like
        Lomb-Scargle power associated with each frequency.
        Units of the result depend on the normalization.

    References
    ----------
    .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
    .. [2] W. Press et al, Numerical Recipies in C (2002)
    .. [3] Scargle, J.D. ApJ 263:835-853 (1982)
    .. [4] Palmer, J. ApJ 695:496-502 (2009)
    """
    if nterms == 0 and not fit_mean:
        raise ValueError("Cannot have nterms = 0 without fitting bias")

    if dy is None:
        dy = 1

    # Validate and setup input data
    t, y, dy = np.broadcast_arrays(t, y, dy)
    if t.ndim != 1:
        raise ValueError("t, y, dy should be one dimensional")

    # Validate and setup frequency grid
    if f0 < 0:
        raise ValueError("Frequencies must be positive")
    if df <= 0:
        raise ValueError("Frequency steps must be positive")
    if Nf <= 0:
        raise ValueError("Number of frequencies must be positive")

    w = dy**-2.0
    ws = np.sum(w)

    # if fit_mean is true, centering the data now simplifies the math below.
    if center_data or fit_mean:
        y = y - np.dot(w, y) / ws

    yw = y / dy
    chi2_ref = np.dot(yw, yw)

    kwargs = dict.copy(trig_sum_kwds or {})
    kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)

    # Here we build-up the matrices XTX and XTy using pre-computed
    # sums. The relevant identities are
    # 2 sin(mx) sin(nx) = cos(m-n)x - cos(m+n)x
    # 2 cos(mx) cos(nx) = cos(m-n)x + cos(m+n)x
    # 2 sin(mx) cos(nx) = sin(m-n)x + sin(m+n)x

    yws = np.sum(y * w)

    SCw = [(np.zeros(Nf), ws * np.ones(Nf))]
    SCw.extend([
        trig_sum(t, w, freq_factor=i, **kwargs)
        for i in range(1, 2 * nterms + 1)
    ])
    Sw, Cw = zip(*SCw)

    SCyw = [(np.zeros(Nf), yws * np.ones(Nf))]
    SCyw.extend([
        trig_sum(t, w * y, freq_factor=i, **kwargs)
        for i in range(1, nterms + 1)
    ])
    Syw, Cyw = zip(*SCyw)

    # Now create an indexing scheme so we can quickly
    # build-up matrices at each frequency
    order = [('C', 0)] if fit_mean else []
    order.extend(sum([[('S', i), ('C', i)] for i in range(1, nterms + 1)], []))

    funcs = dict(S=lambda m, i: Syw[m][i],
                 C=lambda m, i: Cyw[m][i],
                 SS=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] - Cw[m + n][i]),
                 CC=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] + Cw[m + n][i]),
                 SC=lambda m, n, i: 0.5 *
                 (np.sign(m - n) * Sw[abs(m - n)][i] + Sw[m + n][i]),
                 CS=lambda m, n, i: 0.5 *
                 (np.sign(n - m) * Sw[abs(n - m)][i] + Sw[n + m][i]))

    def compute_power(i):
        XTX = np.array([[funcs[A[0] + B[0]](A[1], B[1], i) for A in order]
                        for B in order])
        XTy = np.array([funcs[A[0]](A[1], i) for A in order])
        return np.dot(XTy.T, np.linalg.solve(XTX, XTy))

    p = np.array([compute_power(i) for i in range(Nf)])

    if normalization == 'psd':
        p *= 0.5 * t.size / ws
    elif normalization == 'standard':
        p /= chi2_ref
    elif normalization == 'log':
        p = -np.log(1 - p / chi2_ref)
    elif normalization == 'model':
        p /= chi2_ref - p
    else:
        raise ValueError("normalization='{0}' "
                         "not recognized".format(normalization))
    return p
Пример #48
0
def sphere2cart(r, theta, phi):
    """ Spherical to Cartesian coordinates

    This is the standard physics convention where `theta` is the
    inclination (polar) angle, and `phi` is the azimuth angle.

    Imagine a sphere with center (0,0,0).  Orient it with the z axis
    running south-north, the y axis running west-east and the x axis
    from posterior to anterior.  `theta` (the inclination angle) is the
    angle to rotate from the z-axis (the zenith) around the y-axis,
    towards the x axis.  Thus the rotation is counter-clockwise from the
    point of view of positive y.  `phi` (azimuth) gives the angle of
    rotation around the z-axis towards the y axis.  The rotation is
    counter-clockwise from the point of view of positive z.

    Equivalently, given a point P on the sphere, with coordinates x, y,
    z, `theta` is the angle between P and the z-axis, and `phi` is
    the angle between the projection of P onto the XY plane, and the X
    axis.

    Geographical nomenclature designates theta as 'co-latitude', and phi
    as 'longitude'

    Parameters
    ------------
    r : array_like
       radius
    theta : array_like
       inclination or polar angle
    phi : array_like
       azimuth angle

    Returns
    ---------
    x : array
       x coordinate(s) in Cartesion space
    y : array
       y coordinate(s) in Cartesian space
    z : array
       z coordinate

    Notes
    --------
    See these pages:

    * http://en.wikipedia.org/wiki/Spherical_coordinate_system
    * http://mathworld.wolfram.com/SphericalCoordinates.html

    for excellent discussion of the many different conventions
    possible.  Here we use the physics conventions, used in the
    wikipedia page.

    Derivations of the formulae are simple. Consider a vector x, y, z of
    length r (norm of x, y, z).  The inclination angle (theta) can be
    found from: cos(theta) == z / r -> z == r * cos(theta).  This gives
    the hypotenuse of the projection onto the XY plane, which we will
    call Q. Q == r*sin(theta). Now x / Q == cos(phi) -> x == r *
    sin(theta) * cos(phi) and so on.

    We have deliberately named this function ``sphere2cart`` rather than
    ``sph2cart`` to distinguish it from the Matlab function of that
    name, because the Matlab function uses an unusual convention for the
    angles that we did not want to replicate.  The Matlab function is
    trivial to implement with the formulae given in the Matlab help.
    """
    sin_theta = np.sin(theta)
    x = r * np.cos(phi) * sin_theta
    y = r * np.sin(phi) * sin_theta
    z = r * np.cos(theta)
    x, y, z = np.broadcast_arrays(x, y, z)
    return x, y, z
Пример #49
0
    def __init__(self, selection, array):

        # some initial normalization
        selection = ensure_tuple(selection)
        selection = tuple([i] if is_integer(i) else i for i in selection)
        selection = replace_lists(selection)

        # validation
        if not is_coordinate_selection(selection, array):
            raise IndexError(
                'invalid coordinate selection; expected one integer '
                '(coordinate) array per dimension of the target array, '
                'got {!r}'.format(selection))

        # handle wraparound, boundscheck
        for dim_sel, dim_len in zip(selection, array.shape):

            # handle wraparound
            wraparound_indices(dim_sel, dim_len)

            # handle out of bounds
            boundscheck_indices(dim_sel, dim_len)

        # compute chunk index for each point in the selection
        chunks_multi_index = tuple(
            dim_sel // dim_chunk_len
            for (dim_sel, dim_chunk_len) in zip(selection, array._chunks))

        # broadcast selection - this will raise error if array dimensions don't match
        selection = np.broadcast_arrays(*selection)
        chunks_multi_index = np.broadcast_arrays(*chunks_multi_index)

        # remember shape of selection, because we will flatten indices for processing
        self.sel_shape = selection[0].shape if selection[0].shape else (1, )

        # flatten selection
        selection = [dim_sel.reshape(-1) for dim_sel in selection]
        chunks_multi_index = [
            dim_chunks.reshape(-1) for dim_chunks in chunks_multi_index
        ]

        # ravel chunk indices
        chunks_raveled_indices = np.ravel_multi_index(chunks_multi_index,
                                                      dims=array._cdata_shape)

        # group points by chunk
        if np.any(np.diff(chunks_raveled_indices) < 0):
            # optimisation, only sort if needed
            sel_sort = np.argsort(chunks_raveled_indices)
            selection = tuple(dim_sel[sel_sort] for dim_sel in selection)
        else:
            sel_sort = None

        # store attributes
        self.selection = selection
        self.sel_sort = sel_sort
        self.shape = selection[0].shape if selection[0].shape else (1, )
        self.drop_axes = None
        self.array = array

        # precompute number of selected items for each chunk
        self.chunk_nitems = np.bincount(chunks_raveled_indices,
                                        minlength=array.nchunks)
        self.chunk_nitems_cumsum = np.cumsum(self.chunk_nitems)
        # locate the chunks we need to process
        self.chunk_rixs = np.nonzero(self.chunk_nitems)[0]

        # unravel chunk indices
        self.chunk_mixs = np.unravel_index(self.chunk_rixs,
                                           dims=array._cdata_shape)
Пример #50
0
def trig_sum(t,
             h,
             df,
             N,
             f0=0,
             freq_factor=1,
             oversampling=5,
             use_fft=True,
             Mfft=4):
    """Compute (approximate) trigonometric sums for a number of frequencies
    This routine computes weighted sine and cosine sums:
        S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
        C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
    Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
    The sums can be computed either by a brute force O[N^2] method, or
    by an FFT-based O[Nlog(N)] method.

    Parameters
    ----------
    t : array_like
        array of input times
    h : array_like
        array weights for the sum
    df : float
        frequency spacing
    N : int
        number of frequency bins to return
    f0 : float (optional, default=0)
        The low frequency to use
    freq_factor : float (optional, default=1)
        Factor which multiplies the frequency
    use_fft : bool
        if True, use the approximate FFT algorithm to compute the result.
        This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
    oversampling : int (default = 5)
        oversampling freq_factor for the approximation; roughly the number of
        time samples across the highest-frequency sinusoid. This parameter
        contains the tradeoff between accuracy and speed. Not referenced
        if use_fft is False.
    Mfft : int
        The number of adjacent points to use in the FFT approximation.
        Not referenced if use_fft is False.

    Returns
    -------
    S, C : ndarrays
        summation arrays for frequencies f = df * np.arange(1, N + 1)
    """
    df *= freq_factor
    f0 *= freq_factor

    if df <= 0:
        raise ValueError("df must be positive")
    t, h = map(np.ravel, np.broadcast_arrays(t, h))

    if use_fft:
        Mfft = int(Mfft)
        if Mfft <= 0:
            raise ValueError("Mfft must be positive")

        # required size of fft is the power of 2 above the oversampling rate
        Nfft = bitceil(N * oversampling)
        t0 = t.min()

        if f0 > 0:
            h = h * np.exp(2j * np.pi * f0 * (t - t0))

        tnorm = ((t - t0) * Nfft * df) % Nfft
        grid = extirpolate(tnorm, h, Nfft, Mfft)

        fftgrid = np.fft.ifft(grid)[:N]
        if t0 != 0:
            f = f0 + df * np.arange(N)
            fftgrid *= np.exp(2j * np.pi * t0 * f)

        C = Nfft * fftgrid.real
        S = Nfft * fftgrid.imag
    else:
        f = f0 + df * np.arange(N)
        C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
        S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))

    return S, C
Пример #51
0
def allclose(a,
             b,
             rtol=4 * np.finfo(float).eps,
             atol=0.0,
             equal_nan=False,
             verbose=False):
    """Returns True if two arrays are element-wise equal within a tolerance.

    This function is essentially a wrapper for the `quaternion.isclose`
    function, but returns a single boolean value of True if all elements
    of the output from `quaternion.isclose` are True, and False otherwise.
    This function also adds the option.

    Note that this function has stricter tolerances than the
    `numpy.allclose` function, as well as the additional `verbose` option.

    Parameters
    ----------
    a, b : array_like
        Input arrays to compare.
    rtol : float
        The relative tolerance parameter (see Notes).
    atol : float
        The absolute tolerance parameter (see Notes).
    equal_nan : bool
        Whether to compare NaN's as equal.  If True, NaN's in `a` will be
        considered equal to NaN's in `b` in the output array.
    verbose : bool
        If the return value is False, all the non-close values are printed,
        iterating through the non-close indices in order, displaying the
        array values along with the index, with a separate line for each
        pair of values.

    See Also
    --------
    isclose, numpy.all, numpy.any, numpy.allclose

    Returns
    -------
    allclose : bool
        Returns True if the two arrays are equal within the given
        tolerance; False otherwise.


    Notes
    -----
    If the following equation is element-wise True, then allclose returns
    True.

      absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))

    The above equation is not symmetric in `a` and `b`, so that
    `allclose(a, b)` might be different from `allclose(b, a)` in
    some rare cases.

    """
    close = isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
    result = np.all(close)
    if verbose and not result:
        a, b = np.atleast_1d(a), np.atleast_1d(b)
        a, b = np.broadcast_arrays(a, b)
        print('Non-close values:')
        for i in np.nonzero(close == False):
            print('    a[{0}]={1}\n    b[{0}]={2}'.format(i, a[i], b[i]))
    return result
Пример #52
0
def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
    """Compute the log of the sum of exponentials of input elements.

    Parameters
    ----------
    a : array_like
        Input array.
    axis : None or int or tuple of ints, optional
        Axis or axes over which the sum is taken. By default `axis` is None,
        and all elements are summed.

        .. versionadded:: 0.11.0
    keepdims : bool, optional
        If this is set to True, the axes which are reduced are left in the
        result as dimensions with size one. With this option, the result
        will broadcast correctly against the original array.

        .. versionadded:: 0.15.0
    b : array-like, optional
        Scaling factor for exp(`a`) must be of the same shape as `a` or
        broadcastable to `a`. These values may be negative in order to
        implement subtraction.

        .. versionadded:: 0.12.0
    return_sign : bool, optional
        If this is set to True, the result will be a pair containing sign
        information; if False, results that are negative will be returned
        as NaN. Default is False (no sign information).

        .. versionadded:: 0.16.0

    Returns
    -------
    res : ndarray
        The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
        more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
        is returned.
    sgn : ndarray
        If return_sign is True, this will be an array of floating-point
        numbers matching res and +1, 0, or -1 depending on the sign
        of the result. If False, only one result is returned.

    See Also
    --------
    numpy.logaddexp, numpy.logaddexp2

    Notes
    -----
    NumPy has a logaddexp function which is very similar to `logsumexp`, but
    only handles two arguments. `logaddexp.reduce` is similar to this
    function, but may be less stable.

    Examples
    --------
    >>> from scipy.special import logsumexp
    >>> a = np.arange(10)
    >>> np.log(np.sum(np.exp(a)))
    9.4586297444267107
    >>> logsumexp(a)
    9.4586297444267107

    With weights

    >>> a = np.arange(10)
    >>> b = np.arange(10, 0, -1)
    >>> logsumexp(a, b=b)
    9.9170178533034665
    >>> np.log(np.sum(b*np.exp(a)))
    9.9170178533034647

    Returning a sign flag

    >>> logsumexp([1,2],b=[1,-1],return_sign=True)
    (1.5413248546129181, -1.0)

    Notice that `logsumexp` does not directly support masked arrays. To use it
    on a masked array, convert the mask into zero weights:

    >>> a = np.ma.array([np.log(2), 2, np.log(3)],
    ...                  mask=[False, True, False])
    >>> b = (~a.mask).astype(int)
    >>> logsumexp(a.data, b=b), np.log(5)
    1.6094379124341005, 1.6094379124341005

    """
    a = _asarray_validated(a, check_finite=False)
    if b is not None:
        a, b = np.broadcast_arrays(a, b)
        if np.any(b == 0):
            a = a + 0.  # promote to at least float
            a[b == 0] = -np.inf

    a_max = np.amax(a, axis=axis, keepdims=True)

    if a_max.ndim > 0:
        a_max[~np.isfinite(a_max)] = 0
    elif not np.isfinite(a_max):
        a_max = 0

    if b is not None:
        b = np.asarray(b)
        tmp = b * np.exp(a - a_max)
    else:
        tmp = np.exp(a - a_max)

    # suppress warnings about log of zero
    with np.errstate(divide='ignore'):
        s = np.sum(tmp, axis=axis, keepdims=keepdims)
        if return_sign:
            sgn = np.sign(s)
            s *= sgn  # /= makes more sense but we need zero -> zero
        out = np.log(s)

    if not keepdims:
        a_max = np.squeeze(a_max, axis=axis)
    out += a_max

    if return_sign:
        return out, sgn
    else:
        return out
def ipmt(rate, per, nper, pv, fv=0.0, when='end'):
    """
    Compute the interest portion of a payment.

    Parameters
    ----------
    rate : scalar or array_like of shape(M, )
        Rate of interest as decimal (not per cent) per period
    per : scalar or array_like of shape(M, )
        Interest paid against the loan changes during the life or the loan.
        The `per` is the payment period to calculate the interest amount.
    nper : scalar or array_like of shape(M, )
        Number of compounding periods
    pv : scalar or array_like of shape(M, )
        Present value
    fv : scalar or array_like of shape(M, ), optional
        Future value
    when : {{'begin', 1}, {'end', 0}}, {string, int}, optional
        When payments are due ('begin' (1) or 'end' (0)).
        Defaults to {'end', 0}.

    Returns
    -------
    out : ndarray
        Interest portion of payment.  If all input is scalar, returns a scalar
        float.  If any input is array_like, returns interest payment for each
        input element. If multiple inputs are array_like, they all must have
        the same shape.

    See Also
    --------
    ppmt, pmt, pv

    Notes
    -----
    The total payment is made up of payment against principal plus interest.

    ``pmt = ppmt + ipmt``

    Examples
    --------
    What is the amortization schedule for a 1 year loan of $2500 at
    8.24% interest per year compounded monthly?

    >>> principal = 2500.00

    The 'per' variable represents the periods of the loan.  Remember that
    financial equations start the period count at 1!

    >>> per = np.arange(1*12) + 1
    >>> ipmt = np.ipmt(0.0824/12, per, 1*12, principal)
    >>> ppmt = np.ppmt(0.0824/12, per, 1*12, principal)

    Each element of the sum of the 'ipmt' and 'ppmt' arrays should equal
    'pmt'.

    >>> pmt = np.pmt(0.0824/12, 1*12, principal)
    >>> np.allclose(ipmt + ppmt, pmt)
    True

    >>> fmt = '{0:2d} {1:8.2f} {2:8.2f} {3:8.2f}'
    >>> for payment in per:
    ...     index = payment - 1
    ...     principal = principal + ppmt[index]
    ...     print fmt.format(payment, ppmt[index], ipmt[index], principal)
     1  -200.58   -17.17  2299.42
     2  -201.96   -15.79  2097.46
     3  -203.35   -14.40  1894.11
     4  -204.74   -13.01  1689.37
     5  -206.15   -11.60  1483.22
     6  -207.56   -10.18  1275.66
     7  -208.99    -8.76  1066.67
     8  -210.42    -7.32   856.25
     9  -211.87    -5.88   644.38
    10  -213.32    -4.42   431.05
    11  -214.79    -2.96   216.26
    12  -216.26    -1.49    -0.00

    >>> interestpd = np.sum(ipmt)
    >>> np.round(interestpd, 2)
    -112.98

    """
    when = _convert_when(when)
    rate, per, nper, pv, fv, when = np.broadcast_arrays(
        rate, per, nper, pv, fv, when)
    total_pmt = pmt(rate, nper, pv, fv, when)
    ipmt = _rbl(rate, per, total_pmt, pv, when) * rate
    try:
        ipmt = np.where(when == 1, ipmt / (1 + rate), ipmt)
        ipmt = np.where(np.logical_and(when == 1, per == 1), 0.0, ipmt)
    except IndexError:
        pass
    return ipmt
Пример #54
0
    def addEquations(self, op):
        """
        Function to generate equations corresponding to all types of add/subtraction operations
        Arguments:
            op: (tf.op) representing addition or subtraction operations
        """
        # We may have included the add equation with a prior matmul equation
        if op in self.varMap:
            return

        # Get inputs and outputs
        assert len(op.inputs) == 2
        input1, input1_isVar = self.getValues(op.inputs[0].op)
        input2, input2_isVar = self.getValues(op.inputs[1].op)
        outputVars = self.makeNewVariables(op).flatten()

        # Special case for BiasAdd with NCHW format. We need to add the bias along the channels dimension
        if op.node_def.op == 'BiasAdd':
            data_format = 'NHWC'
            if 'data_format' in op.node_def.attr:
                data_format = op.node_def.attr['data_format'].s.decode().upper(
                )
            if data_format == 'NCHW':
                input2 = input2.reshape((1, len(input2), 1, 1))

        # Broadcast and flatten. Assert that lengths are all the same
        input1, input2 = np.broadcast_arrays(input1, input2)
        input1 = input1.flatten()
        input2 = input2.flatten()
        assert len(input1) == len(input2)
        assert len(outputVars) == len(input1)

        # Signs for addition/subtraction
        sgn1 = 1
        sgn2 = 1
        if op.node_def.op in ["Sub"]:
            sgn2 = -1

        # Create new equations depending on if the inputs are variables or constants
        # At least one input must be a variable, otherwise this operation is a constant,
        # which gets caught in makeGraphEquations.
        assert input1_isVar or input2_isVar

        # Always negate the scalar term because it changes sides in equation, from
        # w1*x1+...wk*xk + b = x_out
        # to
        # w1*x1+...wk+xk - x_out = -b
        if input1_isVar and input2_isVar:
            for i in range(len(outputVars)):
                e = MarabouUtils.Equation()
                e.addAddend(sgn1, input1[i])
                e.addAddend(sgn2, input2[i])
                e.addAddend(-1, outputVars[i])
                e.setScalar(0.0)
                self.addEquation(e)
        elif input1_isVar:
            for i in range(len(outputVars)):
                e = MarabouUtils.Equation()
                e.addAddend(sgn1, input1[i])
                e.addAddend(-1, outputVars[i])
                e.setScalar(-sgn2 * input2[i])
                self.addEquation(e)
        else:
            for i in range(len(outputVars)):
                e = MarabouUtils.Equation()
                e.addAddend(sgn2, input2[i])
                e.addAddend(-1, outputVars[i])
                e.setScalar(-sgn1 * input1[i])
                self.addEquation(e)
Пример #55
0
    def plot_diagram(self, pers_dgm, skew=True, ax=None, out_file=None):
        """ Plot a persistence diagram.
        
        Parameters
        ----------
        pers_dgm : (-,2) numpy.ndarray
            A persistence diagram.
        skew : boolean 
            Flag indicating if diagram needs to first be converted to birth-persistence coordinates (default: True).
        ax : matplotlib.Axes
            Instance of a matplotlib.Axes object in which to plot (default: None, generates a new figure)
        out_file : str
            Path and file name including extension to save the figure (default: None, figure not saved).

        Returns
        -------
        matplotlib.Axes
            The matplotlib.Axes which contains the persistence diagram
        """
        pers_dgm = np.copy(pers_dgm)

        if skew:
            pers_dgm[:, 1] = pers_dgm[:, 1] - pers_dgm[:, 0]
            ylabel = 'persistence'
        else:
            ylabel = 'death'

        # setup plot range
        plot_buff_frac = 0.05
        bmin = np.min((np.min(pers_dgm[:, 0]), np.min(self._bpnts)))
        bmax = np.max((np.max(pers_dgm[:, 0]), np.max(self._bpnts)))
        b_plot_buff = (bmax - bmin) * plot_buff_frac
        bmin -= b_plot_buff
        bmax += b_plot_buff

        pmin = np.min((np.min(pers_dgm[:, 1]), np.min(self._ppnts)))
        pmax = np.max((np.max(pers_dgm[:, 1]), np.max(self._ppnts)))
        p_plot_buff = (pmax - pmin) * plot_buff_frac
        pmin -= p_plot_buff
        pmax += p_plot_buff

        ax = ax or plt.gca()
        ax.set_xlim(bmin, bmax)
        ax.set_ylim(pmin, pmax)

        # compute reasonable line width for pixel overlay (initially 1/50th of the width of a pixel)
        linewidth = (1/50 * self.pixel_size) * 72 * plt.gcf().bbox_inches.width * ax.get_position().width / \
                    np.min((bmax - bmin, pmax - pmin))

        # plot the persistence image grid
        if skew:
            hlines = np.column_stack(np.broadcast_arrays(self._bpnts[0], self._ppnts, self._bpnts[-1], self._ppnts))
            vlines = np.column_stack(np.broadcast_arrays(self._bpnts, self._ppnts[0], self._bpnts, self._ppnts[-1]))
            lines = np.concatenate([hlines, vlines]).reshape(-1, 2, 2)
            line_collection = LineCollection(lines, color='black', linewidths=linewidth)
            ax.add_collection(line_collection)       

        # plot persistence diagram
        ax.scatter(pers_dgm[:, 0], pers_dgm[:, 1])

        # plot diagonal if necessary
        if not skew:
            min_diag = np.min((np.min(ax.get_xlim()), np.min(ax.get_ylim())))
            max_diag = np.min((np.max(ax.get_xlim()), np.max(ax.get_ylim())))
            ax.plot([min_diag, max_diag], [min_diag, max_diag])

        # fix and label axes
        ax.set_aspect('equal')
        ax.set_xlabel('birth')
        ax.set_ylabel(ylabel)

        # optionally save figure
        if out_file:
            plt.savefig(out_file, bbox_inches='tight')
        
        return ax
Пример #56
0
 def broadcast_to(arr, shape):
     "Broadcast an array to a desired shape. Returns a view."
     return np.broadcast_arrays(arr, np.empty(shape, dtype=np.bool))[0]
avg_elev_index = 66

elev_mod = modem.Model()
elev_mod.read_model_file(elev_model_fn)

avg_mod = modem.Model()
avg_mod.read_model_file(avg_model_fn)

data_obj = modem.Data()
data_obj.read_data_file(data_fn)

avg_elev = elev_mod.grid_z[avg_elev_index]

elev_res = np.zeros_like(elev_mod.res_model)

avg_north, avg_east = np.broadcast_arrays(avg_mod.grid_north[:, None],
                                          avg_mod.grid_east[None, :])
elev_north, elev_east = np.broadcast_arrays(elev_mod.grid_north[:, None],
                                            elev_mod.grid_east[None, :])
# interpolate the avg grid onto elevation model
# assuming mono lake is 0 elevation
for zz in range(elev_mod.grid_z.shape[0]):
    try:
        avg_zz = np.where(
            avg_mod.grid_z >= elev_mod.grid_z[zz] - avg_elev)[0][0]
    except IndexError:
        avg_zz = -1

    print "New depth={0:.2f}; elev depth={1:.2f}".format(
        avg_mod.grid_z[avg_zz], elev_mod.grid_z[zz])

    elev_res[:, :, zz] = avg_mod.res_model[:, :, avg_zz]
Пример #58
0
    def subset(self, raster_data, **kwargs):
        # It is possible our user has drawn a polygon where part of the
        # shape is outside the dataset,  intersect with the rasterdata
        # shape to make sure we don't try to select/mask data that is
        # outside the bounds of our dataset.
        clipped = self.intersection(raster_data.shape)

        # Polygon is completely outside the dataset, return whatever
        # would have been returned by get_data()
        if not bool(clipped):
            ul = raster_data.index(self.bounds[0], self.bounds[1])
            lr = raster_data.index(self.bounds[2], self.bounds[3])
            window = self.get_data_window(ul[0], ul[1], lr[0], lr[1])

            return raster_data.get_data(window=window, **kwargs)

        ul = raster_data.index(clipped.bounds[0], clipped.bounds[1])
        lr = raster_data.index(clipped.bounds[2], clipped.bounds[3])
        window = self.get_data_window(ul[0], ul[1], lr[0], lr[1])

        data = raster_data.get_data(window=window, **kwargs)

        # out_shape must be determined from data's shape,  get_data
        # may have returned a bounding box of data that is smaller than
        # the implicit shape of the window we passed.  e.g. if the window
        # is partially outside the extent of the raster data. Note that
        # we index with negative numbers here because we may or may not
        # have a time dimension.
        num_bands = len(raster_data.band_indexes)

        if num_bands > 1:
            out_shape = data.shape[-3], data.shape[-2]
        else:
            out_shape = data.shape[-2], data.shape[-1]

        coordinates = []
        for lat, lon in clipped.exterior.coords:
            x, y = raster_data.index(lat, lon)
            coordinates.append((y - window[0][1], x - window[0][0]))

        # Mask the final polygon
        mask = rasterize([({
            'type': 'Polygon',
            'coordinates': [coordinates]
        }, 0)],
                         out_shape=out_shape,
                         fill=1,
                         all_touched=True,
                         dtype=np.uint8)

        # If we have more than one band,  expand the mask so it includes
        # A "channel" dimension (e.g.  shape is now (lat, lon, channel))
        if num_bands > 1:
            mask = mask[..., np.newaxis] * np.ones(num_bands)

        # Finally broadcast mask to data because data may be from a
        # Raster data collection and include a time component
        # (e.g.  shape could be (time, lat, lon),  or even
        #  (time, lat, lon, channels))
        _, mask = np.broadcast_arrays(data, mask)
        data[mask.astype(bool)] = raster_data.nodata

        return np.ma.masked_equal(data, raster_data.nodata)
def _broadcast_to(array, shape):
    if hasattr(numpy, 'broadcast_to'):
        return numpy.broadcast_to(array, shape)
    dummy = numpy.empty(shape, array.dtype)
    return numpy.broadcast_arrays(array, dummy)[0]
Пример #60
0
def ndgrid(*args, **kwargs):
    """
    Form tensorial grid for 1, 2, or 3 dimensions.

    Returns as column vectors by default.

    To return as matrix input:

        ndgrid(..., vector=False)

    The inputs can be a list or separate arguments.

    e.g.::

        a = np.array([1, 2, 3])
        b = np.array([1, 2])

        XY = ndgrid(a, b)
            > [[1 1]
               [2 1]
               [3 1]
               [1 2]
               [2 2]
               [3 2]]

        X, Y = ndgrid(a, b, vector=False)
            > X = [[1 1]
                   [2 2]
                   [3 3]]
            > Y = [[1 2]
                   [1 2]
                   [1 2]]

    """

    # Read the keyword arguments, and only accept a vector=True/False
    vector = kwargs.pop('vector', True)
    assert type(vector) == bool, "'vector' keyword must be a bool"
    assert len(kwargs) == 0, "Only 'vector' keyword accepted"

    # you can either pass a list [x1, x2, x3] or each seperately
    if type(args[0]) == list:
        xin = args[0]
    else:
        xin = args

    # Each vector needs to be a numpy array
    assert np.all([isinstance(x, np.ndarray)
                   for x in xin]), "All vectors must be numpy arrays."

    if len(xin) == 1:
        return xin[0]
    elif len(xin) == 2:
        XY = np.broadcast_arrays(mkvc(xin[1], 1), mkvc(xin[0], 2))
        if vector:
            X2, X1 = [mkvc(x) for x in XY]
            return np.c_[X1, X2]
        else:
            return XY[1], XY[0]
    elif len(xin) == 3:
        XYZ = np.broadcast_arrays(mkvc(xin[2], 1), mkvc(xin[1], 2),
                                  mkvc(xin[0], 3))
        if vector:
            X3, X2, X1 = [mkvc(x) for x in XYZ]
            return np.c_[X1, X2, X3]
        else:
            return XYZ[2], XYZ[1], XYZ[0]