예제 #1
0
def comp_a(dert__, rng):
    """
    Compute and compare a over predetermined range.
    """
    # Unpack dert__:
    if len(dert__) in (5, 12): # idert or full dert with m.
        i__, g__, m__, dy__, dx__ = dert__[:5]
    else: # idert or full dert without m.
        i__, g__, dy__, dx__ = dert__[:4]

    if len(dert__) > 10: # if ra+:
        a__ = dert__[-7:-5] # Computed angle (use reverse indexing to avoid m check).
        day__ = dert__[-4:-2] # Accumulated day__.
        dax__ = dert__[-2:] # Accumulated day__.
    else: # if fa:
        # Compute angles:
        a__ = ma.stack((dy__, dx__), axis=0) / g__
        a__.mask = g__.mask

        # Initialize dax, day:
        day__, dax__ = [ma.zeros((2,) + i__.shape) for _ in range(2)]

    # Compute angle differences:
    da__ = translated_operation(a__, rng, angle_diff)
    comp_field = central_slice(rng)


    # Decompose and add to corresponding day and dax:
    day__[comp_field] = (da__ * Y_COEFFS[rng]).mean(axis=-1)
    dax__[comp_field] = (da__ * X_COEFFS[rng]).mean(axis=-1)

    # Apply mask:
    msq = np.ones(a__.shape, dtype=int) # Rim mask.
    msq[comp_field] = a__.mask[comp_field] + da__.mask.sum(axis=-1) # Summed d mask.
    imsq = msq.nonzero()
    day__[imsq] = dax__[imsq] = ma.masked # Apply mask.

    # Compute ga:
    ga__ = ma.hypot(
        ma.arctan2(*day__),
        ma.arctan2(*dax__)
    )[np.newaxis, ...] * SCALER_ga

    try: # dert with m is more common:
        return ma.concatenate( # Concatenate on the first dimension.
            (
                ma.stack((i__, g__, m__, dy__, dx__), axis=0),
                a__, ga__, day__, dax__,
            ),
            axis=0,
        )
    except NameError: # m doesn't exist:
        return ma.concatenate(  # Concatenate on the first dimension.
            (
                ma.stack((i__, g__, dy__, dx__), axis=0),
                a__, ga__, day__, dax__,
            ),
            axis=0,
        )
예제 #2
0
파일: comp_i.py 프로젝트: zxlzr/CogAlg
def comp_a(gdert__, rng):
    """
    Compute and compare a over predetermined range.
    """
    # Unpack dert__:
    try:
        g, gg, m, dy, dx = gdert__
    except ValueError:  # Initial dert doesn't contain m.
        g, gg, dy, dx = gdert__

    # Initialize dax, day:
    day, dax = [ma.zeros((2, ) + g.shape) for _ in range(2)]

    # Compute angles:
    a = ma.stack((dy, dx), axis=0) / gg
    a.mask = gg.mask

    # Compute angle differences:
    da = translated_operation(a, rng, angle_diff)
    comp_field = central_slice(rng)

    # Decompose and add to corresponding day and dax:
    day[comp_field] = (da * Y_COEFFS[rng]).mean(axis=-1)
    dax[comp_field] = (da * X_COEFFS[rng]).mean(axis=-1)

    # Apply mask:
    msq = np.ones(a.shape, dtype=int)  # Rim mask.
    msq[comp_field] = a.mask[comp_field] + da.mask.sum(
        axis=-1)  # Summed d mask.
    imsq = msq.nonzero()
    day[imsq] = dax[imsq] = ma.masked  # Apply mask.

    # Compute ga:
    ga = ma.hypot(ma.arctan2(*day), ma.arctan2(*dax))[np.newaxis,
                                                      ...] * SCALER_ga

    try:
        return ma.concatenate(  # Concatenate on the first dimension.
            (
                ma.stack((g, gg, m), axis=0),
                ga,
                day,
                dax,
            ),
            axis=0,
        )
    except NameError:  # m doesn't exist.
        return ma.concatenate(  # Concatenate on the first dimension.
            (
                ma.stack((g, gg), axis=0),
                a,
                ga,
                day,
                dax,
            ),
            axis=0,
        )
예제 #3
0
        def _compare(self):
            dy, dx = compare_slices(self._i, self._rng, operator=angle_diff)

            self._derts[1:][central_slice(self._rng)] += np.concatenate((dy, dx))
            self._derts[0] = ma.hypot(
                ma.arctan2(*self.dy),
                ma.arctan2(*self.dx),
            )

            self._derts[rim_mask(self._i.shape, rng)] = ma.masked
            return self
예제 #4
0
 def _angles(self, U, V, eps=0.001):
     xy = self.ax.transData.transform(self.XY)
     uv = ma.hstack((U[:, np.newaxis], V[:, np.newaxis])).filled(0)
     xyp = self.ax.transData.transform(self.XY + eps * uv)
     dxy = xyp - xy
     ang = ma.arctan2(dxy[:, 1], dxy[:, 0])
     return ang
예제 #5
0
    def calculatePosturalMeasurements(self):
        self.Xhead = ma.array(ma.squeeze(self.skeleton[:, 0, :]))
        self.Xhead = ((self.Xhead + self.h5ref['boundingBox'][:, :2]) /
                      self.pixelsPerMicron)
        self.Xhead[np.logical_not(self.orientationFixed), :] = ma.masked
        self.Xtail = ma.array(np.squeeze(self.skeleton[:, -1, :]))
        self.Xtail = ((self.Xtail + self.h5ref['boundingBox'][:, :2]) /
                      self.pixelsPerMicron)
        self.Xtail[np.logical_not(self.orientationFixed), :] = ma.masked
        self.psi = ma.arctan2(self.Xhead[:, 0]-self.X[:, 0],
                              self.Xhead[:, 1]-self.X[:, 1])
        dpsi = self.phi - self.psi
        self.dpsi = ma.mod(dpsi+np.pi, 2*np.pi)-np.pi
        self.psi[np.logical_not(self.orientationFixed)] = ma.masked
        self.dpsi[np.logical_or(np.logical_not(self.orientationFixed),
                                self.badFrames)] = ma.masked
        self.Xhead[np.logical_or(np.logical_not(self.orientationFixed),
                                 self.badFrames), :] = ma.masked
        self.Xtail[np.logical_or(np.logical_not(self.orientationFixed),
                                 self.badFrames), :] = ma.masked

        skeleton = ma.array(self.skeleton)
        skeleton[np.logical_not(self.orientationFixed), :, :] = ma.masked
        posture = ma.array(self.posture)
        posture[np.logical_not(self.orientationFixed), :] = ma.masked
        missing = np.any(posture.mask, axis=1)
        if np.all(missing):
            self.Ctheta = None
            self.ltheta = None
            self.vtheta = None
        else:
            posture = posture[~missing, :].T
            self.Ctheta = np.cov(posture)
            self.ltheta, self.vtheta = LA.eig(self.Ctheta)
예제 #6
0
def _generate_dbc(lane_to_follow: Lane, dbe_file_name: str) -> str:
    from numpy.ma import arctan2
    from numpy import rad2deg, pi, sin, cos, array
    goal = lane_to_follow[-1]
    current = lane_to_follow[0]
    current_pos = array([current["x"], current["y"]])
    next = lane_to_follow[1]
    next_pos = array([next["x"], next["y"]])
    delta = next_pos - current_pos
    initial_orientation_rad = arctan2(delta[1], delta[0])
    initial_orientation_deg = float("{0:.2f}".format(
        rad2deg(initial_orientation_rad)))

    r2 = 0.25 * current["width"]
    theta2 = initial_orientation_rad - 0.5 * pi
    offset = array([
        2.5 * cos(initial_orientation_rad), 2.5 * sin(initial_orientation_rad)
    ])
    temp = current_pos + offset
    initial_position = temp + array([r2 * cos(theta2), r2 * sin(theta2)])
    initial_state = {"x": initial_position[0], "y": initial_position[1]}

    return TEMPLATE_ENV.get_template(DBC_TEMPLATE_NAME) \
        .render(initial_state=initial_state,
                initial_orientation=initial_orientation_deg,
                lane=lane_to_follow,
                dbe_file_name=dbe_file_name,
                goal=goal)
예제 #7
0
파일: test_old_ma.py 프로젝트: numpy/numpy
 def test_testUfuncs1(self):
     # Test various functions such as sin, cos.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     assert_(eq(np.cos(x), cos(xm)))
     assert_(eq(np.cosh(x), cosh(xm)))
     assert_(eq(np.sin(x), sin(xm)))
     assert_(eq(np.sinh(x), sinh(xm)))
     assert_(eq(np.tan(x), tan(xm)))
     assert_(eq(np.tanh(x), tanh(xm)))
     with np.errstate(divide='ignore', invalid='ignore'):
         assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
         assert_(eq(np.log(abs(x)), log(xm)))
         assert_(eq(np.log10(abs(x)), log10(xm)))
     assert_(eq(np.exp(x), exp(xm)))
     assert_(eq(np.arcsin(z), arcsin(zm)))
     assert_(eq(np.arccos(z), arccos(zm)))
     assert_(eq(np.arctan(z), arctan(zm)))
     assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
     assert_(eq(np.absolute(x), absolute(xm)))
     assert_(eq(np.equal(x, y), equal(xm, ym)))
     assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
     assert_(eq(np.less(x, y), less(xm, ym)))
     assert_(eq(np.greater(x, y), greater(xm, ym)))
     assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
     assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
     assert_(eq(np.conjugate(x), conjugate(xm)))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
     assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
     assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
예제 #8
0
    def calculateCentroidMeasurements(self):
        self.X[self.badFrames, :] = ma.masked
        if not self.useSmoothingFilterDerivatives:
            self.v[1:-1] = (self.X[2:, :] - self.X[0:-2])/(2.0/self.frameRate)
        else:
            # use a cubic polynomial filter to estimate the velocity
            self.v = ma.zeros(self.X.shape)
            halfWindow = int(np.round(self.filterWindow/2.*self.frameRate))
            for i in xrange(halfWindow, self.v.shape[0]-halfWindow):
                start = i-halfWindow
                mid = i
                finish = i+halfWindow+1
                if not np.any(self.X.mask[start:finish,:]):
                    px = np.polyder(np.polyfit(self.t[start:finish]-self.t[mid],
                                               self.X[start:finish, 0], 3))
                    py = np.polyder(np.polyfit(self.t[start:finish]-self.t[mid],
                                               self.X[start:finish, 1], 3))
                    self.v[i,:] = [np.polyval(px, 0), np.polyval(py, 0)]
                else:
                    self.v[i,:] = ma.masked

        self.s = ma.sqrt(ma.sum(ma.power(self.v, 2), axis=1))
        self.phi = ma.arctan2(self.v[:, 1], self.v[:, 0])
        self.t[self.badFrames] = ma.masked
        self.X[self.badFrames, :] = ma.masked
        self.v[self.badFrames, :] = ma.masked
        self.s[self.badFrames] = ma.masked
        self.phi[self.badFrames] = ma.masked
예제 #9
0
 def test_testUfuncs1(self):
     # Test various functions such as sin, cos.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     assert_(eq(np.cos(x), cos(xm)))
     assert_(eq(np.cosh(x), cosh(xm)))
     assert_(eq(np.sin(x), sin(xm)))
     assert_(eq(np.sinh(x), sinh(xm)))
     assert_(eq(np.tan(x), tan(xm)))
     assert_(eq(np.tanh(x), tanh(xm)))
     with np.errstate(divide='ignore', invalid='ignore'):
         assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
         assert_(eq(np.log(abs(x)), log(xm)))
         assert_(eq(np.log10(abs(x)), log10(xm)))
     assert_(eq(np.exp(x), exp(xm)))
     assert_(eq(np.arcsin(z), arcsin(zm)))
     assert_(eq(np.arccos(z), arccos(zm)))
     assert_(eq(np.arctan(z), arctan(zm)))
     assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
     assert_(eq(np.absolute(x), absolute(xm)))
     assert_(eq(np.equal(x, y), equal(xm, ym)))
     assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
     assert_(eq(np.less(x, y), less(xm, ym)))
     assert_(eq(np.greater(x, y), greater(xm, ym)))
     assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
     assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
     assert_(eq(np.conjugate(x), conjugate(xm)))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
     assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
     assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
예제 #10
0
 def _angles(self, U, V, eps=0.001):
     xy = self.ax.transData.transform(self.XY)
     uv = ma.hstack((U[:,np.newaxis], V[:,np.newaxis])).filled(0)
     xyp = self.ax.transData.transform(self.XY + eps * uv)
     dxy = xyp - xy
     ang = ma.arctan2(dxy[:,1], dxy[:,0])
     return ang
예제 #11
0
파일: statsmod.py 프로젝트: annemo1976/IWVS
def DD_FF(u,v):
    ''' calculates wind/current speed and direction from u and v components
    #
    if u and v are easterly and northerly components,
    DD is heading direction of the wind. 
    to get meteorological-standard, call DD_FF(-u, -v)
    '''
    DD = ma.arctan2(u, v)*180/sp.pi
    DD[DD < 0] = 360 + DD[DD < 0]
    FF = ma.sqrt(u**2 + v**2)
    return DD, FF
예제 #12
0
def DD_FF(u,v):
    ''' calculates wind/current speed and direction from u and v components
    #
    if u and v are easterly and northerly components,
    DD is heading direction of the wind. 
    to get meteorological-standard, call DD_FF(-u, -v)
    '''
    DD = ma.arctan2(u, v)*180/sp.pi
    DD[DD < 0] = 360 + DD[DD < 0]
    FF = ma.sqrt(u**2 + v**2)
    return DD, FF
예제 #13
0
def cart2polar(x, y, degrees=True):
    """
    Convert cartesian X and Y to polar RHO and THETA.
    :param x: x cartesian coordinate
    :param y: y cartesian coordinate
    :param degrees: True = return theta in degrees, False = return theta in
        radians. [default: True]
    :return: r, theta
    """
    rho = ma.sqrt(x**2 + y**2)
    theta = ma.arctan2(y, x)
    if degrees:
        theta *= (180 / math.pi)

    return rho, theta
def cart2polar(x, y, degrees=True):
    """
    Convert cartesian X and Y to polar RHO and THETA.
    :param x: x cartesian coordinate
    :param y: y cartesian coordinate
    :param degrees: True = return theta in degrees, False = return theta in
        radians. [default: True]
    :return: r, theta
    """
    rho = ma.sqrt(x ** 2 + y ** 2)
    theta = ma.arctan2(y, x)
    if degrees:
        theta *= (180 / math.pi)

    return rho, theta
예제 #15
0
def DD_FF(u,v,met=True):
    ''' calculates wind/current speed and direction from u and v components
    #
    if u and v are easterly and northerly components,
    returns:
    FF : wind speed
    DD : wind direction in meteorological standard (directions the wind is coming from)
    call with met=False for oceanographic standard
    '''
    if met==False:
        u,v = -u, -v
    DD = ma.arctan2(-u, -v)*180/sp.pi
    DD[DD < 0] = 360 + DD[DD < 0]
    FF = ma.sqrt(u**2 + v**2)
    return DD, FF
예제 #16
0
def outlier_detector_cmean(np_ma, Vny, Nprf_arr, Nmin=2):

    data = np_ma.data
    mask = np_ma.mask

    f_arr = np.ones(Nprf_arr.shape)
    f_arr[np.where(Nprf_arr == np.min(Nprf_arr))] = -1
    Vny_arr = Vny / Nprf_arr

    kH, kL = np.zeros((5, 5)), np.zeros((5, 5))
    kH[1::2] = 1
    kL[::2] = 1

    # Array with the number of valid neighbours at each point
    Nval_arr_H = local_valid(mask, kernel=kH)
    Nval_arr_L = local_valid(mask, kernel=kL)

    # Convert to angles and calculate trigonometric variables
    ang_ma = (np_ma * pi / Vny)
    cos_ma = ma.cos(ang_ma * Nprf_arr)
    sin_ma = ma.sin(ang_ma * Nprf_arr)

    # Average trigonometric variables in local neighbourhood
    dummy_cos = cos_ma.data * (~mask).astype(int)
    dummy_sin = sin_ma.data * (~mask).astype(int)

    ncols, cos_conv = dummy_cols(dummy_cos, kH, val=0)
    ncols, sin_conv = dummy_cols(dummy_sin, kH, val=0)

    cos_sumH = ndimage.convolve(cos_conv, weights=kH, mode='wrap')
    cos_sumL = ndimage.convolve(cos_conv, weights=kL, mode='wrap')

    sin_sumH = ndimage.convolve(sin_conv, weights=kH, mode='wrap')
    sin_sumL = ndimage.convolve(sin_conv, weights=kL, mode='wrap')

    # Remove added columns
    cos_sumH = cos_sumH[:, :int(cos_sumL.shape[1] - ncols)]
    cos_sumL = cos_sumL[:, :int(cos_sumL.shape[1] - ncols)]
    sin_sumH = sin_sumH[:, :int(sin_sumL.shape[1] - ncols)]
    sin_sumL = sin_sumL[:, :int(sin_sumL.shape[1] - ncols)]

    # Average angle in local neighbourhood
    cos_avgH_ma = ma.array(data=cos_sumH, mask=mask) / Nval_arr_H
    cos_avgL_ma = ma.array(data=cos_sumL, mask=mask) / Nval_arr_L
    sin_avgH_ma = ma.array(data=sin_sumH, mask=mask) / Nval_arr_H
    sin_avgL_ma = ma.array(data=sin_sumL, mask=mask) / Nval_arr_L

    BH = ma.arctan2(sin_avgH_ma, cos_avgH_ma)
    BL = ma.arctan2(sin_avgL_ma, cos_avgL_ma)

    # Average velocity ANGLE of neighbours (reference ANGLE for outlier detection):
    angref_ma = f_arr * (BL - BH)
    angref_ma[angref_ma < 0] = angref_ma[angref_ma < 0] + 2 * pi
    angref_ma[angref_ma > pi] = -(2 * pi - angref_ma[angref_ma > pi])
    angobs_ma = ma.arctan2(ma.sin(ang_ma), ma.cos(ang_ma))

    # Detector array (minimum ANGLE difference between observed and reference):
    diff = angobs_ma - angref_ma
    det_ma = (Vny / pi) * ma.arctan2(ma.sin(diff), ma.cos(diff))

    out_mask = np.zeros(det_ma.shape)
    out_mask[abs(det_ma) > 0.8 * Vny_arr] = 1
    out_mask[(Nval_arr_H < Nmin) | (Nval_arr_L < Nmin)] = 0

    # CORRECTION (2 STEP)

    # Convolution kernel
    kernel = np.ones(kH.shape)

    new_mask = (mask) | (out_mask.astype(bool))

    # Array with the number of valid neighbours at each point (outliers removed)
    Nval_arr = local_valid(new_mask, kernel=kernel)

    out_mask[Nval_arr < Nmin] = 0

    ref_arr = ref_val(data, new_mask, kernel, method='median')
    ref_ma = ma.array(data=ref_arr, mask=mask)

    return ref_ma, out_mask
예제 #17
0
 def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
     pivot, sizes, fill_empty, flip):
     '''
     This function actually creates the wind barbs.  *u* and *v*
     are components of the vector in the *x* and *y* directions,
     respectively.
     *nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
     *respectively, the number of flags, number of barbs, flag for
     *half a barb, and flag for empty barb, ostensibly obtained
     *from :meth:`_find_tails`.
     *length* is the length of the barb staff in points.
     *pivot* specifies the point on the barb around which the
     entire barb should be rotated.  Right now, valid options are
     'head' and 'middle'.
     *sizes* is a dictionary of coefficients specifying the ratio
     of a given feature to the length of the barb. These features
     include:
         - *spacing*: space between features (flags, full/half
            barbs)
         - *height*: distance from shaft of top of a flag or full
            barb
         - *width* - width of a flag, twice the width of a full barb
         - *emptybarb* - radius of the circle used for low
            magnitudes
     *fill_empty* specifies whether the circle representing an
     empty barb should be filled or not (this changes the drawing
     of the polygon).
     *flip* is a flag indicating whether the features should be flipped to
     the other side of the barb (useful for winds in the southern
     hemisphere.
     This function returns list of arrays of vertices, defining a polygon for
     each of the wind barbs.  These polygons have been rotated to properly
     align with the vector direction.
     '''
     spacing = length * sizes.get('spacing', 0.125)
     full_height = length * sizes.get('height', 0.4)
     full_width = length * sizes.get('width', 0.25)
     empty_rad = length * sizes.get('emptybarb', 0.15)
     pivot_points = dict(tip=0.0, middle=-length/2.)
     if flip: full_height = -full_height
     endx = 0.0
     endy = pivot_points[pivot.lower()]
     angles = -(ma.arctan2(v, u) + np.pi/2)
     circ = CirclePolygon((0,0), radius=empty_rad).get_verts()
     if fill_empty:
         empty_barb = circ
     else:
         empty_barb = np.concatenate((circ, circ[::-1]))
     barb_list = []
     for index, angle in np.ndenumerate(angles):
         if empty_flag[index]:
             barb_list.append(empty_barb)
             continue
         poly_verts = [(endx, endy)]
         offset = length
         for i in range(nflags[index]):
             if offset != length: offset += spacing / 2.
             poly_verts.extend([[endx, endy + offset],
                 [endx + full_height, endy - full_width/2 + offset],
                 [endx, endy - full_width + offset]])
             offset -= full_width + spacing
         for i in range(nbarbs[index]):
             poly_verts.extend([(endx, endy + offset),
                 (endx + full_height, endy + offset + full_width/2),
                 (endx, endy + offset)])
             offset -= spacing
         if half_barb[index]:
             if offset == length:
                 poly_verts.append((endx, endy + offset))
                 offset -= 1.5 * spacing
             poly_verts.extend([(endx, endy + offset),
                 (endx + full_height/2, endy + offset + full_width/4),
                 (endx, endy + offset)])
         poly_verts = transforms.Affine2D().rotate(-angle).transform(
             poly_verts)
         barb_list.append(poly_verts)
     return barb_list
예제 #18
0
    def _OSGB36toWGS84(self):
        """ Convert between OSGB36 and WGS84. If we already have values available, we just return them.

        :rtype: float, float
        """
        if self.easting is None or self.northing is None:
            return False

        #E, N are the British national grid coordinates - eastings and northings
        a, b = 6377563.396, 6356256.909  #The Airy 180 semi-major and semi-minor axes used for OSGB36 (m)
        F0 = 0.9996012717  #scale factor on the central meridian
        lat0 = 49 * pi / 180  #Latitude of true origin (radians)
        lon0 = -2 * pi / 180  #Longtitude of true origin and central meridian (radians)
        N0, E0 = -100000, 400000  #Northing & easting of true origin (m)
        e2 = 1 - (b * b) / (a * a)  #eccentricity squared
        n = (a - b) / (a + b)

        #Initialise the iterative variables
        lat, M = lat0, 0

        while self.northing - N0 - M >= 0.00001:  #Accurate to 0.01mm
            lat += (self.northing - N0 - M) / (a * F0)
            M1 = (1 + n + (5. / 4) * n**2 + (5. / 4) * n**3) * (lat - lat0)
            M2 = (3 * n + 3 * n**2 +
                  (21. / 8) * n**3) * sin(lat - lat0) * cos(lat + lat0)
            M3 = ((15. / 8) * n**2 +
                  (15. / 8) * n**3) * sin(2 * (lat - lat0)) * cos(2 *
                                                                  (lat + lat0))
            M4 = (35. / 24) * n**3 * sin(3 * (lat - lat0)) * cos(3 *
                                                                 (lat + lat0))
            #meridional arc
            M = b * F0 * (M1 - M2 + M3 - M4)

        #transverse radius of curvature
        nu = a * F0 / sqrt(1 - e2 * sin(lat)**2)

        #meridional radius of curvature
        rho = a * F0 * (1 - e2) * (1 - e2 * sin(lat)**2)**(-1.5)
        eta2 = nu / rho - 1

        secLat = 1. / cos(lat)
        VII = tan(lat) / (2 * rho * nu)
        VIII = tan(lat) / (24 * rho * nu**3) * (5 + 3 * tan(lat)**2 + eta2 -
                                                9 * tan(lat)**2 * eta2)
        IX = tan(lat) / (720 * rho * nu**5) * (61 + 90 * tan(lat)**2 +
                                               45 * tan(lat)**4)
        X = secLat / nu
        XI = secLat / (6 * nu**3) * (nu / rho + 2 * tan(lat)**2)
        XII = secLat / (120 * nu**5) * (5 + 28 * tan(lat)**2 +
                                        24 * tan(lat)**4)
        XIIA = secLat / (5040 * nu**7) * (
            61 + 662 * tan(lat)**2 + 1320 * tan(lat)**4 + 720 * tan(lat)**6)
        dE = self.easting - E0

        #These are on the wrong ellipsoid currently: Airy1830. (Denoted by _1)
        lat_1 = lat - VII * dE**2 + VIII * dE**4 - IX * dE**6
        lon_1 = lon0 + X * dE - XI * dE**3 + XII * dE**5 - XIIA * dE**7

        #Want to convert to the GRS80 ellipsoid.
        #First convert to cartesian from spherical polar coordinates
        H = 0  #Third spherical coord.
        x_1 = (nu / F0 + H) * cos(lat_1) * cos(lon_1)
        y_1 = (nu / F0 + H) * cos(lat_1) * sin(lon_1)
        z_1 = ((1 - e2) * nu / F0 + H) * sin(lat_1)

        #Perform Helmut transform (to go between Airy 1830 (_1) and GRS80 (_2))
        s = -20.4894 * 10**-6  #The scale factor -1
        tx, ty, tz = 446.448, -125.157, +542.060  #The translations along x,y,z axes respectively
        rxs, rys, rzs = 0.1502, 0.2470, 0.8421  #The rotations along x,y,z respectively, in seconds
        rx, ry, rz = rxs * pi / (180 * 3600.), rys * pi / (
            180 * 3600.), rzs * pi / (180 * 3600.)  #In radians
        x_2 = tx + (1 + s) * x_1 + (-rz) * y_1 + (ry) * z_1
        y_2 = ty + (rz) * x_1 + (1 + s) * y_1 + (-rx) * z_1
        z_2 = tz + (-ry) * x_1 + (rx) * y_1 + (1 + s) * z_1

        #Back to spherical polar coordinates from cartesian
        #Need some of the characteristics of the new ellipsoid
        a_2, b_2 = 6378137.000, 6356752.3141  #The GSR80 semi-major and semi-minor axes used for WGS84(m)
        e2_2 = 1 - (b_2 * b_2) / (a_2 * a_2
                                  )  #The eccentricity of the GRS80 ellipsoid
        p = sqrt(x_2**2 + y_2**2)

        #Lat is obtained by an iterative proceedure:
        lat = arctan2(z_2, (p * (1 - e2_2)))  #Initial value
        latold = 2 * pi
        while abs(lat - latold) > 10**-16:
            lat, latold = latold, lat
            nu_2 = a_2 / sqrt(1 - e2_2 * sin(latold)**2)
            lat = arctan2(z_2 + e2_2 * nu_2 * sin(latold), p)

        #Lon and height are then pretty easy
        lon = arctan2(y_2, x_2)
        H = p / cos(lat) - nu_2

        #Convert to degrees
        self.lat = lat * 180 / pi
        self.lon = lon * 180 / pi

        return True
예제 #19
0
    def _WGS84toOSGB36(self):
        """Perform conversion from WGS84 to OSGB36. If the OSGB36 co-ords are available,
        just return those.
        """
        if self.lat is None or self.lon is None:
            return False

        #First convert to radians
        #These are on the wrong ellipsoid currently: GRS80. (Denoted by _1)
        lat_1 = self.lat * pi / 180
        lon_1 = self.lon * pi / 180

        #Want to convert to the Airy 1830 ellipsoid, which has the following:
        a_1, b_1 = 6378137.000, 6356752.3141  #The GSR80 semi-major and semi-minor axes used for WGS84(m)
        e2_1 = 1 - (b_1 * b_1) / (a_1 * a_1
                                  )  #The eccentricity of the GRS80 ellipsoid
        nu_1 = a_1 / sqrt(1 - e2_1 * sin(lat_1)**2)

        #First convert to cartesian from spherical polar coordinates
        H = 0  #Third spherical coord.
        x_1 = (nu_1 + H) * cos(lat_1) * cos(lon_1)
        y_1 = (nu_1 + H) * cos(lat_1) * sin(lon_1)
        z_1 = ((1 - e2_1) * nu_1 + H) * sin(lat_1)

        #Perform Helmut transform (to go between GRS80 (_1) and Airy 1830 (_2))
        s = 20.4894 * 10**-6  #The scale factor -1
        tx, ty, tz = -446.448, 125.157, -542.060  #The translations along x,y,z axes respectively
        rxs, rys, rzs = -0.1502, -0.2470, -0.8421  #The rotations along x,y,z respectively, in seconds
        rx, ry, rz = rxs * pi / (180 * 3600.), rys * pi / (
            180 * 3600.), rzs * pi / (180 * 3600.)  #In radians
        x_2 = tx + (1 + s) * x_1 + (-rz) * y_1 + ry * z_1
        y_2 = ty + rz * x_1 + (1 + s) * y_1 + (-rx) * z_1
        z_2 = tz + (-ry) * x_1 + rx * y_1 + (1 + s) * z_1

        #Back to spherical polar coordinates from cartesian
        #Need some of the characteristics of the new ellipsoid
        a, b = 6377563.396, 6356256.909  #The GSR80 semi-major and semi-minor axes used for WGS84(m)
        e2 = 1 - (b * b) / (a * a
                            )  #The eccentricity of the Airy 1830 ellipsoid
        p = sqrt(x_2**2 + y_2**2)

        #Lat is obtained by an iterative proceedure:
        lat = arctan2(z_2, (p * (1 - e2)))  #Initial value
        latold = 2 * pi
        while abs(lat - latold) > 10**-16:
            lat, latold = latold, lat
            nu = a / sqrt(1 - e2 * sin(latold)**2)
            lat = arctan2(z_2 + e2 * nu * sin(latold), p)

        #Lon and height are then pretty easy
        lon = arctan2(y_2, x_2)
        H = p / cos(lat) - nu

        #E, N are the British national grid coordinates - eastings and northings
        F0 = 0.9996012717  #scale factor on the central meridian
        lat0 = 49 * pi / 180  #Latitude of true origin (radians)
        lon0 = -2 * pi / 180  #Longtitude of true origin and central meridian (radians)
        N0, E0 = -100000, 400000  #Northing & easting of true origin (m)
        n = (a - b) / (a + b)

        #meridional radius of curvature
        rho = a * F0 * (1 - e2) * (1 - e2 * sin(lat)**2)**(-1.5)
        eta2 = nu * F0 / rho - 1

        M1 = (1 + n + (5 / 4) * n**2 + (5 / 4) * n**3) * (lat - lat0)
        M2 = (3 * n + 3 * n**2 +
              (21 / 8) * n**3) * sin(lat - lat0) * cos(lat + lat0)
        M3 = ((15 / 8) * n**2 +
              (15 / 8) * n**3) * sin(2 * (lat - lat0)) * cos(2 * (lat + lat0))
        M4 = (35 / 24) * n**3 * sin(3 * (lat - lat0)) * cos(3 * (lat + lat0))

        #meridional arc
        M = b * F0 * (M1 - M2 + M3 - M4)

        I = M + N0
        II = nu * F0 * sin(lat) * cos(lat) / 2
        III = nu * F0 * sin(lat) * cos(lat)**3 * (5 - tan(lat)**2 +
                                                  9 * eta2) / 24
        IIIA = nu * F0 * sin(lat) * cos(lat)**5 * (61 - 58 * tan(lat)**2 +
                                                   tan(lat)**4) / 720
        IV = nu * F0 * cos(lat)
        V = nu * F0 * cos(lat)**3 * (nu / rho - tan(lat)**2) / 6
        VI = nu * F0 * cos(lat)**5 * (5 - 18 * tan(lat)**2 + tan(lat)**4 + 14 *
                                      eta2 - 58 * eta2 * tan(lat)**2) / 120

        N = I + II * (lon - lon0)**2 + III * (lon - lon0)**4 + IIIA * (lon -
                                                                       lon0)**6
        E = E0 + IV * (lon - lon0) + V * (lon - lon0)**3 + VI * (lon - lon0)**5

        self.easting = E
        self.northing = N

        return True
예제 #20
0
    def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
                    pivot, sizes, fill_empty, flip):
        """Monkey-patch _make_barbs. Allows pivot to be a float value."""
        # These control the spacing and size of barb elements relative to the
        # length of the shaft
        spacing = length * sizes.get('spacing', 0.125)
        full_height = length * sizes.get('height', 0.4)
        full_width = length * sizes.get('width', 0.25)
        empty_rad = length * sizes.get('emptybarb', 0.15)

        # Controls y point where to pivot the barb.
        pivot_points = dict(tip=0.0, middle=-length / 2.)

        # Check for flip
        if flip:
            full_height = -full_height

        endx = 0.0
        try:
            endy = float(pivot)
        except ValueError:
            endy = pivot_points[pivot.lower()]

        # Get the appropriate angle for the vector components.  The offset is
        # due to the way the barb is initially drawn, going down the y-axis.
        # This makes sense in a meteorological mode of thinking since there 0
        # degrees corresponds to north (the y-axis traditionally)
        angles = -(ma.arctan2(v, u) + np.pi / 2)

        # Used for low magnitude.  We just get the vertices, so if we make it
        # out here, it can be reused.  The center set here should put the
        # center of the circle at the location(offset), rather than at the
        # same point as the barb pivot; this seems more sensible.
        circ = CirclePolygon((0, 0), radius=empty_rad).get_verts()
        if fill_empty:
            empty_barb = circ
        else:
            # If we don't want the empty one filled, we make a degenerate
            # polygon that wraps back over itself
            empty_barb = np.concatenate((circ, circ[::-1]))

        barb_list = []
        for index, angle in np.ndenumerate(angles):
            # If the vector magnitude is too weak to draw anything, plot an
            # empty circle instead
            if empty_flag[index]:
                # We can skip the transform since the circle has no preferred
                # orientation
                barb_list.append(empty_barb)
                continue

            poly_verts = [(endx, endy)]
            offset = length

            # Add vertices for each flag
            for i in range(nflags[index]):
                # The spacing that works for the barbs is a little to much for
                # the flags, but this only occurs when we have more than 1
                # flag.
                if offset != length:
                    offset += spacing / 2.
                poly_verts.extend(
                    [[endx, endy + offset],
                     [endx + full_height, endy - full_width / 2 + offset],
                     [endx, endy - full_width + offset]])

                offset -= full_width + spacing

            # Add vertices for each barb.  These really are lines, but works
            # great adding 3 vertices that basically pull the polygon out and
            # back down the line
            for i in range(nbarbs[index]):
                poly_verts.extend(
                    [(endx, endy + offset),
                     (endx + full_height, endy + offset + full_width / 2),
                     (endx, endy + offset)])

                offset -= spacing

            # Add the vertices for half a barb, if needed
            if half_barb[index]:
                # If the half barb is the first on the staff, traditionally it
                # is offset from the end to make it easy to distinguish from a
                # barb with a full one
                if offset == length:
                    poly_verts.append((endx, endy + offset))
                    offset -= 1.5 * spacing
                poly_verts.extend(
                    [(endx, endy + offset),
                     (endx + full_height / 2, endy + offset + full_width / 4),
                     (endx, endy + offset)])

            # Rotate the barb according the angle. Making the barb first and
            # then rotating it made the math for drawing the barb really easy.
            # Also, the transform framework makes doing the rotation simple.
            poly_verts = transforms.Affine2D().rotate(-angle).transform(
                poly_verts)
            barb_list.append(poly_verts)

        return barb_list
예제 #21
0
파일: geo.py 프로젝트: arctellion/pywind
    def _WGS84toOSGB36(self):
        """Perform conversion from WGS84 to OSGB36. If the OSGB36 co-ords are available,
        just return those.
        """
        if self.lat is None or self.lon is None:
            return False

        #First convert to radians
        #These are on the wrong ellipsoid currently: GRS80. (Denoted by _1)
        lat_1 = self.lat * pi / 180
        lon_1 = self.lon * pi / 180

        #Want to convert to the Airy 1830 ellipsoid, which has the following:
        a_1, b_1 =6378137.000, 6356752.3141    #The GSR80 semi-major and semi-minor axes used for WGS84(m)
        e2_1 = 1 - (b_1*b_1) / (a_1 * a_1)     #The eccentricity of the GRS80 ellipsoid
        nu_1 = a_1/sqrt(1-e2_1*sin(lat_1)**2)

        #First convert to cartesian from spherical polar coordinates
        H = 0 #Third spherical coord.
        x_1 = (nu_1 + H)*cos(lat_1)*cos(lon_1)
        y_1 = (nu_1 + H)*cos(lat_1)*sin(lon_1)
        z_1 = ((1-e2_1)*nu_1 + H)*sin(lat_1)

        #Perform Helmut transform (to go between GRS80 (_1) and Airy 1830 (_2))
        s = 20.4894*10**-6 #The scale factor -1
        tx, ty, tz = -446.448, 125.157, -542.060 #The translations along x,y,z axes respectively
        rxs, rys, rzs = -0.1502, -0.2470, -0.8421#The rotations along x,y,z respectively, in seconds
        rx, ry, rz = rxs*pi/(180*3600.), rys*pi/(180*3600.), rzs*pi/(180*3600.) #In radians
        x_2 = tx + (1+s)*x_1 + (-rz)*y_1 + ry * z_1
        y_2 = ty + rz * x_1 + (1+s)*y_1 + (-rx)*z_1
        z_2 = tz + (-ry) * x_1 + rx * y_1 +(1+s)*z_1

        #Back to spherical polar coordinates from cartesian
        #Need some of the characteristics of the new ellipsoid
        a, b = 6377563.396, 6356256.909 #The GSR80 semi-major and semi-minor axes used for WGS84(m)
        e2 = 1 - (b*b)/(a*a) #The eccentricity of the Airy 1830 ellipsoid
        p = sqrt(x_2**2 + y_2**2)

        #Lat is obtained by an iterative proceedure:
        lat = arctan2(z_2,(p*(1-e2))) #Initial value
        latold = 2*pi
        while abs(lat - latold)>10**-16:
            lat, latold = latold, lat
            nu = a/sqrt(1 - e2 * sin(latold) ** 2)
            lat = arctan2(z_2 + e2 * nu * sin(latold), p)

        #Lon and height are then pretty easy
        lon = arctan2(y_2,x_2)
        H = p/cos(lat) - nu

        #E, N are the British national grid coordinates - eastings and northings
        F0 = 0.9996012717 #scale factor on the central meridian
        lat0 = 49*pi/180#Latitude of true origin (radians)
        lon0 = -2*pi/180#Longtitude of true origin and central meridian (radians)
        N0, E0 = -100000, 400000#Northing & easting of true origin (m)
        n = (a-b)/(a+b)

        #meridional radius of curvature
        rho = a*F0*(1-e2)*(1-e2*sin(lat)**2)**(-1.5)
        eta2 = nu*F0/rho-1

        M1 = (1 + n + (5/4)*n**2 + (5/4)*n**3) * (lat-lat0)
        M2 = (3*n + 3*n**2 + (21/8)*n**3) * sin(lat-lat0) * cos(lat+lat0)
        M3 = ((15/8)*n**2 + (15/8)*n**3) * sin(2*(lat-lat0)) * cos(2*(lat+lat0))
        M4 = (35/24)*n**3 * sin(3*(lat-lat0)) * cos(3*(lat+lat0))

        #meridional arc
        M = b * F0 * (M1 - M2 + M3 - M4)

        I = M + N0
        II = nu*F0*sin(lat)*cos(lat)/2
        III = nu*F0*sin(lat)*cos(lat)**3*(5- tan(lat)**2 + 9*eta2)/24
        IIIA = nu*F0*sin(lat)*cos(lat)**5*(61- 58*tan(lat)**2 + tan(lat)**4)/720
        IV = nu*F0*cos(lat)
        V = nu*F0*cos(lat)**3*(nu/rho - tan(lat)**2)/6
        VI = nu*F0*cos(lat)**5*(5 - 18* tan(lat)**2 + tan(lat)**4 + 14*eta2 - 58*eta2*tan(lat)**2)/120

        N = I + II*(lon-lon0)**2 + III*(lon- lon0)**4 + IIIA*(lon-lon0)**6
        E = E0 + IV*(lon-lon0) + V*(lon- lon0)**3 + VI*(lon- lon0)**5

        self.easting = E
        self.northing = N

        return True
예제 #22
0
    def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
                    pivot, sizes, fill_empty, flip):
        """Monkey-patch _make_barbs. Allows pivot to be a float value."""
        # These control the spacing and size of barb elements relative to the
        # length of the shaft
        spacing = length * sizes.get('spacing', 0.125)
        full_height = length * sizes.get('height', 0.4)
        full_width = length * sizes.get('width', 0.25)
        empty_rad = length * sizes.get('emptybarb', 0.15)

        # Controls y point where to pivot the barb.
        pivot_points = dict(tip=0.0, middle=-length / 2.)  # noqa: C408

        # Check for flip
        if flip:
            full_height = -full_height

        endx = 0.0
        try:
            endy = float(pivot)
        except ValueError:
            endy = pivot_points[pivot.lower()]

        # Get the appropriate angle for the vector components.  The offset is
        # due to the way the barb is initially drawn, going down the y-axis.
        # This makes sense in a meteorological mode of thinking since there 0
        # degrees corresponds to north (the y-axis traditionally)
        angles = -(ma.arctan2(v, u) + np.pi / 2)

        # Used for low magnitude.  We just get the vertices, so if we make it
        # out here, it can be reused.  The center set here should put the
        # center of the circle at the location(offset), rather than at the
        # same point as the barb pivot; this seems more sensible.
        circ = CirclePolygon((0, 0), radius=empty_rad).get_verts()
        if fill_empty:
            empty_barb = circ
        else:
            # If we don't want the empty one filled, we make a degenerate
            # polygon that wraps back over itself
            empty_barb = np.concatenate((circ, circ[::-1]))

        barb_list = []
        for index, angle in np.ndenumerate(angles):
            # If the vector magnitude is too weak to draw anything, plot an
            # empty circle instead
            if empty_flag[index]:
                # We can skip the transform since the circle has no preferred
                # orientation
                barb_list.append(empty_barb)
                continue

            poly_verts = [(endx, endy)]
            offset = length

            # Add vertices for each flag
            for _ in range(nflags[index]):
                # The spacing that works for the barbs is a little to much for
                # the flags, but this only occurs when we have more than 1
                # flag.
                if offset != length:
                    offset += spacing / 2.
                poly_verts.extend(
                    [[endx, endy + offset],
                     [endx + full_height, endy - full_width / 2 + offset],
                     [endx, endy - full_width + offset]])

                offset -= full_width + spacing

            # Add vertices for each barb.  These really are lines, but works
            # great adding 3 vertices that basically pull the polygon out and
            # back down the line
            for _ in range(nbarbs[index]):
                poly_verts.extend([(endx, endy + offset),
                                   (endx + full_height,
                                    endy + offset + full_width / 2),
                                   (endx, endy + offset)])

                offset -= spacing

            # Add the vertices for half a barb, if needed
            if half_barb[index]:
                # If the half barb is the first on the staff, traditionally it
                # is offset from the end to make it easy to distinguish from a
                # barb with a full one
                if offset == length:
                    poly_verts.append((endx, endy + offset))
                    offset -= 1.5 * spacing
                poly_verts.extend([(endx, endy + offset),
                                   (endx + full_height / 2,
                                    endy + offset + full_width / 4),
                                   (endx, endy + offset)])

            # Rotate the barb according the angle. Making the barb first and
            # then rotating it made the math for drawing the barb really easy.
            # Also, the transform framework makes doing the rotation simple.
            poly_verts = transforms.Affine2D().rotate(-angle).transform(
                poly_verts)
            barb_list.append(poly_verts)

        return barb_list
예제 #23
0
파일: geo.py 프로젝트: arctellion/pywind
    def _OSGB36toWGS84(self):
        """ Convert between OSGB36 and WGS84. If we already have values available, we just return them.

        :rtype: float, float
        """
        if self.easting is None or self.northing is None:
            return False

        #E, N are the British national grid coordinates - eastings and northings
        a, b = 6377563.396, 6356256.909     #The Airy 180 semi-major and semi-minor axes used for OSGB36 (m)
        F0 = 0.9996012717                   #scale factor on the central meridian
        lat0 = 49 * pi / 180                    #Latitude of true origin (radians)
        lon0 = -2 * pi / 180                    #Longtitude of true origin and central meridian (radians)
        N0, E0 = -100000, 400000            #Northing & easting of true origin (m)
        e2 = 1 - (b*b)/(a*a)                #eccentricity squared
        n = (a-b) / (a+b)

        #Initialise the iterative variables
        lat, M = lat0, 0

        while self.northing - N0 - M >= 0.00001: #Accurate to 0.01mm
            lat += (self.northing - N0 - M) / (a * F0)
            M1 = (1 + n + (5./4)*n**2 + (5./4)*n**3) * (lat-lat0)
            M2 = (3*n + 3*n**2 + (21./8)*n**3) * sin(lat-lat0) * cos(lat+lat0)
            M3 = ((15./8)*n**2 + (15./8)*n**3) * sin(2*(lat-lat0)) * cos(2*(lat+lat0))
            M4 = (35./24)*n**3 * sin(3*(lat-lat0)) * cos(3*(lat+lat0))
            #meridional arc
            M = b * F0 * (M1 - M2 + M3 - M4)

        #transverse radius of curvature
        nu = a * F0 / sqrt(1 - e2 * sin(lat) ** 2)

        #meridional radius of curvature
        rho = a * F0 * (1 - e2) * (1 - e2 * sin(lat) ** 2) ** (-1.5)
        eta2 = nu / rho-1

        secLat = 1./cos(lat)
        VII = tan(lat)/(2*rho*nu)
        VIII = tan(lat)/(24*rho*nu**3)*(5+3*tan(lat)**2+eta2-9*tan(lat)**2*eta2)
        IX = tan(lat)/(720*rho*nu**5)*(61+90*tan(lat)**2+45*tan(lat)**4)
        X = secLat/nu
        XI = secLat/(6*nu**3)*(nu/rho+2*tan(lat)**2)
        XII = secLat/(120*nu**5)*(5+28*tan(lat)**2+24*tan(lat)**4)
        XIIA = secLat/(5040*nu**7)*(61+662*tan(lat)**2+1320*tan(lat)**4+720*tan(lat)**6)
        dE = self.easting - E0

        #These are on the wrong ellipsoid currently: Airy1830. (Denoted by _1)
        lat_1 = lat - VII*dE**2 + VIII*dE**4 - IX*dE**6
        lon_1 = lon0 + X*dE - XI*dE**3 + XII*dE**5 - XIIA*dE**7

        #Want to convert to the GRS80 ellipsoid.
        #First convert to cartesian from spherical polar coordinates
        H = 0 #Third spherical coord.
        x_1 = (nu/F0 + H)*cos(lat_1)*cos(lon_1)
        y_1 = (nu/F0+ H)*cos(lat_1)*sin(lon_1)
        z_1 = ((1-e2)*nu/F0 +H)*sin(lat_1)

        #Perform Helmut transform (to go between Airy 1830 (_1) and GRS80 (_2))
        s = -20.4894*10**-6 #The scale factor -1
        tx, ty, tz = 446.448, -125.157, + 542.060 #The translations along x,y,z axes respectively
        rxs,rys,rzs = 0.1502,  0.2470,  0.8421  #The rotations along x,y,z respectively, in seconds
        rx, ry, rz = rxs*pi/(180*3600.), rys*pi/(180*3600.), rzs*pi/(180*3600.) #In radians
        x_2 = tx + (1+s)*x_1 + (-rz)*y_1 + (ry)*z_1
        y_2 = ty + (rz)*x_1  + (1+s)*y_1 + (-rx)*z_1
        z_2 = tz + (-ry)*x_1 + (rx)*y_1 +  (1+s)*z_1

        #Back to spherical polar coordinates from cartesian
        #Need some of the characteristics of the new ellipsoid
        a_2, b_2 =6378137.000, 6356752.3141 #The GSR80 semi-major and semi-minor axes used for WGS84(m)
        e2_2 = 1- (b_2*b_2)/(a_2*a_2)   #The eccentricity of the GRS80 ellipsoid
        p = sqrt(x_2**2 + y_2**2)

        #Lat is obtained by an iterative proceedure:
        lat = arctan2(z_2,(p*(1-e2_2))) #Initial value
        latold = 2 * pi
        while abs(lat - latold)>10**-16:
            lat, latold = latold, lat
            nu_2 = a_2/sqrt(1-e2_2*sin(latold)**2)
            lat = arctan2(z_2+e2_2*nu_2*sin(latold), p)

        #Lon and height are then pretty easy
        lon = arctan2(y_2,x_2)
        H = p/cos(lat) - nu_2

        #Convert to degrees
        self.lat = lat*180/pi
        self.lon = lon*180/pi

        return True
예제 #24
0
def scale_by_cal(Data,
                 scale_t_ave=True,
                 scale_f_ave=False,
                 sub_med=False,
                 scale_f_ave_mod=False,
                 rotate=False):
    """Puts all data in units of the cal temperature.
    
    Data is put into units of the cal temperature, thus removing dependence on
    the gain.  This can be done by dividing by the time average of the cal
    (scale_t_ave=True, Default) thus removing dependence on the frequency-
    dependant gain.  Alternatively, you can scale by the frequency average to
    remove the time-dependent gain (scale_f_ave=True). Data is then in units of
    the frequency averaged cal temperture. You can also do both (recommended).
    After some scaling the data ends up in units of the cal temperture as a
    funciton of frequency.

    Optionally you can also subtract the time average of the data off here
    (subtract_time_median), since you might be done with the cal information at
    this point.
    """

    on_ind = 0
    off_ind = 1
    if (Data.field['CAL'][on_ind] != 'T' or Data.field['CAL'][off_ind] != 'F'):
        raise ce.DataError('Cal states not in expected order.')

    if tuple(Data.field['CRVAL4']) == (-5, -7, -8, -6):
        # Here we check the polarizations and cal indicies
        xx_ind = 0
        yy_ind = 3
        xy_inds = [1, 2]

        # A bunch of calculations used to test phase closure.  Not acctually
        # relevant to what is being done here.
        #a = (Data.data[5, xy_inds, on_ind, 15:20]
        #     - Data.data[5, xy_inds, off_ind, 15:20])
        #a /= sp.sqrt( Data.data[5, xx_ind, on_ind, 15:20]
        #              - Data.data[5, xx_ind, off_ind, 15:20])
        #a /= sp.sqrt( Data.data[5, yy_ind, on_ind, 15:20]
        #              - Data.data[5, yy_ind, off_ind, 15:20])
        #print a[0,:]**2 + a[1,:]**2

        diff_xx = Data.data[:, xx_ind, on_ind, :] - Data.data[:, xx_ind,
                                                              off_ind, :]
        diff_yy = Data.data[:, yy_ind, on_ind, :] - Data.data[:, yy_ind,
                                                              off_ind, :]

        if scale_t_ave:
            # Find the cal means (in time) and scale by them.
            # Means work much better than medians.  Medians seems to bias the
            # result by up to 10%.  This seems to be discretization noise.  Cal
            # switches fast enough that we shouldn't need this anyway.
            cal_tmed_xx = ma.mean(diff_xx, 0)
            cal_tmed_yy = ma.mean(diff_yy, 0)
            cal_tmed_xx[sp.logical_or(cal_tmed_xx <= 0,
                                      cal_tmed_yy <= 0)] = ma.masked
            cal_tmed_yy[cal_tmed_xx.mask] = ma.masked

            Data.data[:, xx_ind, :, :] /= cal_tmed_xx
            Data.data[:, yy_ind, :, :] /= cal_tmed_yy
            Data.data[:, xy_inds, :, :] /= ma.sqrt(cal_tmed_yy * cal_tmed_xx)

        if scale_f_ave:
            # The frequency gains have have systematic structure to them,
            # they are not by any approximation gaussian distributed.  Use
            # means, not medians across frequency.
            operation = ma.mean
            cal_fmea_xx = operation(diff_xx, -1)
            cal_fmea_yy = operation(diff_yy, -1)

            # Flag data with wierd cal power.  Still Experimental.
            cal_fmea_xx[sp.logical_or(cal_fmea_xx <= 0,
                                      cal_fmea_yy <= 0)] = ma.masked
            cal_fmea_yy[cal_fmea_xx.mask] = ma.masked
            cal_xx = ma.mean(cal_fmea_xx)
            cal_yy = ma.mean(cal_fmea_yy)
            cal_fmea_xx[sp.logical_or(
                abs(cal_fmea_xx.anom()) >= 0.1 * cal_xx,
                abs(cal_fmea_yy.anom()) >= 0.1 * cal_yy)] = ma.masked
            cal_fmea_yy[cal_fmea_xx.mask] = ma.masked

            ntime = len(cal_fmea_xx)
            cal_fmea_xx.shape = (ntime, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1)
            Data.data[:, xx_ind, :, :] /= cal_fmea_xx
            Data.data[:, yy_ind, :, :] /= cal_fmea_yy
            cal_fmea_xx.shape = (ntime, 1, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1, 1)
            Data.data[:, xy_inds, :, :] /= ma.sqrt(cal_fmea_yy * cal_fmea_xx)

        if scale_f_ave_mod:
            # The frequency gains have have systematic structure to them,
            # they are not by any approximation gaussian distributed.  Use
            # means, not medians across frequency.
            operation = ma.mean
            cal_fmea_xx = operation(diff_xx, -1)
            cal_fmea_yy = operation(diff_yy, -1)
            cal_fmea_xx_off = operation(Data.data[:, xx_ind, off_ind, :], -1)
            cal_fmea_yy_off = operation(Data.data[:, yy_ind, off_ind, :], -1)

            sys_xx = cal_fmea_xx_off / cal_fmea_xx
            sys_yy = cal_fmea_yy_off / cal_fmea_yy
            percent_ok = 0.03
            sys_xx_tmed = ma.median(sys_xx)
            sys_yy_tmed = ma.median(sys_yy)

            maskbad_xx = (sys_xx > sys_xx_tmed + sys_xx_tmed * percent_ok) | (
                sys_xx < sys_xx_tmed - sys_xx_tmed * percent_ok)
            maskbad_yy = (sys_yy > sys_yy_tmed + sys_yy_tmed * percent_ok) | (
                sys_yy < sys_yy_tmed - sys_yy_tmed * percent_ok)

            cal_fmea_xx[sp.logical_or(cal_fmea_xx <= 0,
                                      cal_fmea_yy <= 0)] = ma.masked
            cal_fmea_yy[cal_fmea_xx.mask] = ma.masked
            cal_fmea_xx[maskbad_xx] = ma.masked
            cal_fmea_yy[maskbad_yy] = ma.masked
            cal_xx = ma.mean(cal_fmea_xx)
            cal_yy = ma.mean(cal_fmea_yy)

            ntime = len(cal_fmea_xx)
            cal_fmea_xx.shape = (ntime, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1)
            Data.data[:, xx_ind, :, :] /= cal_fmea_xx
            Data.data[:, yy_ind, :, :] /= cal_fmea_yy
            cal_fmea_xx.shape = (ntime, 1, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1, 1)
            Data.data[:, xy_inds, :, :] /= ma.sqrt(cal_fmea_yy * cal_fmea_xx)

        if scale_f_ave and scale_t_ave:
            # We have devided out t_cal twice so we need to put one factor back
            # in.
            cal_xx = operation(cal_tmed_xx)
            cal_yy = operation(cal_tmed_yy)
            Data.data[:, xx_ind, :, :] *= cal_xx
            Data.data[:, yy_ind, :, :] *= cal_yy
            Data.data[:, xy_inds, :, :] *= ma.sqrt(cal_yy * cal_xx)

        if scale_f_ave_mod and scale_t_ave:
            #Same divide out twice problem.
            cal_xx = operation(cal_tmed_xx)
            cal_yy = operation(cal_tmed_yy)
            Data.data[:, xx_ind, :, :] *= cal_xxcal_imag_mean
            Data.data[:, yy_ind, :, :] *= cal_yy
            Data.data[:, xy_inds, :, :] *= ma.sqrt(cal_yy * cal_xx)

        if scale_f_ave and scale_f_ave_mod:
            raise ce.DataError("time averaging twice")

        if rotate:
            # Define the differential cal phase to be zero and rotate all data
            # such that this is true.
            cal_real_mean = ma.mean(
                Data.data[:, 1, 0, :] - Data.data[:, 1, 1, :], 0)
            cal_imag_mean = ma.mean(
                Data.data[:, 2, 0, :] - Data.data[:, 2, 1, :], 0)
            # Get the cal phase angle as a function of frequency.
            cal_phase = -ma.arctan2(cal_imag_mean, cal_real_mean)

            # Rotate such that the cal phase is zero. Imperative to have a
            # temporary variable.
            New_data_real = (ma.cos(cal_phase) * Data.data[:, 1, :, :] -
                             ma.sin(cal_phase) * Data.data[:, 2, :, :])
            New_data_imag = (ma.sin(cal_phase) * Data.data[:, 1, :, :] +
                             ma.cos(cal_phase) * Data.data[:, 2, :, :])
            Data.data[:, 1, :, :] = New_data_real
            Data.data[:, 2, :, :] = New_data_imag

    elif tuple(Data.field['CRVAL4']) == (1, 2, 3, 4):
        # For the shot term, just devide everything by on-off in I.
        I_ind = 0
        cal_I_t = Data.data[:, I_ind, on_ind, :] - Data.data[:, I_ind,
                                                             off_ind, :]
        cal_I = ma.mean(cal_I_t, 0)

        Data.data /= cal_I
    else:
        raise ce.DataError("Unsupported polarization states.")

    # Subtract the time median if desired.
    if sub_med:
        Data.data -= ma.median(Data.data, 0)
예제 #25
0
    def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
                    pivot, sizes, fill_empty, flip):
        '''
        This function actually creates the wind barbs.  *u* and *v*
        are components of the vector in the *x* and *y* directions,
        respectively.

        *nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
        *respectively, the number of flags, number of barbs, flag for
        *half a barb, and flag for empty barb, ostensibly obtained
        *from :meth:`_find_tails`.

        *length* is the length of the barb staff in points.

        *pivot* specifies the point on the barb around which the
        entire barb should be rotated.  Right now, valid options are
        'head' and 'middle'.

        *sizes* is a dictionary of coefficients specifying the ratio
        of a given feature to the length of the barb. These features
        include:

            - *spacing*: space between features (flags, full/half
               barbs)

            - *height*: distance from shaft of top of a flag or full
               barb

            - *width* - width of a flag, twice the width of a full barb

            - *emptybarb* - radius of the circle used for low
               magnitudes

        *fill_empty* specifies whether the circle representing an
        empty barb should be filled or not (this changes the drawing
        of the polygon).

        *flip* is a flag indicating whether the features should be flipped to
        the other side of the barb (useful for winds in the southern
        hemisphere.

        This function returns list of arrays of vertices, defining a polygon
        for each of the wind barbs.  These polygons have been rotated to
        properly align with the vector direction.
        '''

        #These control the spacing and size of barb elements relative to the
        #length of the shaft
        spacing = length * sizes.get('spacing', 0.125)
        full_height = length * sizes.get('height', 0.4)
        full_width = length * sizes.get('width', 0.25)
        empty_rad = length * sizes.get('emptybarb', 0.15)

        #Controls y point where to pivot the barb.
        pivot_points = dict(tip=0.0, middle=-length / 2.)

        #Check for flip
        if flip:
            full_height = -full_height

        endx = 0.0
        endy = pivot_points[pivot.lower()]

        # Get the appropriate angle for the vector components.  The offset is
        # due to the way the barb is initially drawn, going down the y-axis.
        # This makes sense in a meteorological mode of thinking since there 0
        # degrees corresponds to north (the y-axis traditionally)
        angles = -(ma.arctan2(v, u) + np.pi / 2)

        # Used for low magnitude.  We just get the vertices, so if we make it
        # out here, it can be reused.  The center set here should put the
        # center of the circle at the location(offset), rather than at the
        # same point as the barb pivot; this seems more sensible.
        circ = CirclePolygon((0, 0), radius=empty_rad).get_verts()
        if fill_empty:
            empty_barb = circ
        else:
            # If we don't want the empty one filled, we make a degenerate
            # polygon that wraps back over itself
            empty_barb = np.concatenate((circ, circ[::-1]))

        barb_list = []
        for index, angle in np.ndenumerate(angles):
            #If the vector magnitude is too weak to draw anything, plot an
            #empty circle instead
            if empty_flag[index]:
                #We can skip the transform since the circle has no preferred
                #orientation
                barb_list.append(empty_barb)
                continue

            poly_verts = [(endx, endy)]
            offset = length

            # Add vertices for each flag
            for i in range(nflags[index]):
                # The spacing that works for the barbs is a little to much for
                # the flags, but this only occurs when we have more than 1
                # flag.
                if offset != length:
                    offset += spacing / 2.
                poly_verts.extend(
                    [[endx, endy + offset],
                     [endx + full_height, endy - full_width / 2 + offset],
                     [endx, endy - full_width + offset]])

                offset -= full_width + spacing

            # Add vertices for each barb.  These really are lines, but works
            # great adding 3 vertices that basically pull the polygon out and
            # back down the line
            for i in range(nbarbs[index]):
                poly_verts.extend(
                    [(endx, endy + offset),
                     (endx + full_height, endy + offset + full_width / 2),
                     (endx, endy + offset)])

                offset -= spacing

            # Add the vertices for half a barb, if needed
            if half_barb[index]:
                # If the half barb is the first on the staff, traditionally it
                # is offset from the end to make it easy to distinguish from a
                # barb with a full one
                if offset == length:
                    poly_verts.append((endx, endy + offset))
                    offset -= 1.5 * spacing
                poly_verts.extend(
                    [(endx, endy + offset),
                     (endx + full_height / 2, endy + offset + full_width / 4),
                     (endx, endy + offset)])

            # Rotate the barb according the angle. Making the barb first and
            # then rotating it made the math for drawing the barb really easy.
            # Also, the transform framework makes doing the rotation simple.
            poly_verts = transforms.Affine2D().rotate(-angle).transform(
                poly_verts)
            barb_list.append(poly_verts)

        return barb_list
예제 #26
0
def scale_by_cal(Data, scale_t_ave=True, scale_f_ave=False, sub_med=False,
                 scale_f_ave_mod=False, rotate=False) :
    """Puts all data in units of the cal temperature.
    
    Data is put into units of the cal temperature, thus removing dependence on
    the gain.  This can be done by dividing by the time average of the cal
    (scale_t_ave=True, Default) thus removing dependence on the frequency-
    dependant gain.  Alternatively, you can scale by the frequency average to
    remove the time-dependent gain (scale_f_ave=True). Data is then in units of
    the frequency averaged cal temperture. You can also do both (recommended).
    After some scaling the data ends up in units of the cal temperture as a
    funciton of frequency.

    Optionally you can also subtract the time average of the data off here
    (subtract_time_median), since you might be done with the cal information at
    this point.
    """
    
    on_ind = 0
    off_ind = 1
    if (Data.field['CAL'][on_ind] != 'T' or
        Data.field['CAL'][off_ind] != 'F') :
            raise ce.DataError('Cal states not in expected order.')
    
    if tuple(Data.field['CRVAL4']) == (-5, -7, -8, -6) :
        # Here we check the polarizations and cal indicies
        xx_ind = 0
        yy_ind = 3
        xy_inds = [1,2]
        
        # A bunch of calculations used to test phase closure.  Not acctually
        # relevant to what is being done here.
        #a = (Data.data[5, xy_inds, on_ind, 15:20]
        #     - Data.data[5, xy_inds, off_ind, 15:20])
        #a /= sp.sqrt( Data.data[5, xx_ind, on_ind, 15:20] 
        #              - Data.data[5, xx_ind, off_ind, 15:20])
        #a /= sp.sqrt( Data.data[5, yy_ind, on_ind, 15:20] 
        #              - Data.data[5, yy_ind, off_ind, 15:20])
        #print a[0,:]**2 + a[1,:]**2
        
        diff_xx = Data.data[:,xx_ind,on_ind,:] - Data.data[:,xx_ind,off_ind,:]
        diff_yy = Data.data[:,yy_ind,on_ind,:] - Data.data[:,yy_ind,off_ind,:]
        
        if scale_t_ave :
            # Find the cal means (in time) and scale by them.
            # Means work much better than medians.  Medians seems to bias the
            # result by up to 10%.  This seems to be discretization noise.  Cal
            # switches fast enough that we shouldn't need this anyway.
            cal_tmed_xx = ma.mean(diff_xx, 0)
            cal_tmed_yy = ma.mean(diff_yy, 0)
            cal_tmed_xx[sp.logical_or(cal_tmed_xx<=0, cal_tmed_yy<=0)] = ma.masked
            cal_tmed_yy[cal_tmed_xx.mask] = ma.masked

            Data.data[:,xx_ind,:,:] /= cal_tmed_xx
            Data.data[:,yy_ind,:,:] /= cal_tmed_yy
            Data.data[:,xy_inds,:,:] /= ma.sqrt(cal_tmed_yy*cal_tmed_xx)

        if scale_f_ave :
            # The frequency gains have have systematic structure to them, 
            # they are not by any approximation gaussian distributed.  Use
            # means, not medians across frequency.
            operation = ma.mean
            cal_fmea_xx = operation(diff_xx, -1)
            cal_fmea_yy = operation(diff_yy, -1)
            
            # Flag data with wierd cal power.  Still Experimental.
            cal_fmea_xx[sp.logical_or(cal_fmea_xx<=0,cal_fmea_yy<=0)] = ma.masked
            cal_fmea_yy[cal_fmea_xx.mask] = ma.masked
            cal_xx = ma.mean(cal_fmea_xx)
            cal_yy = ma.mean(cal_fmea_yy)
            cal_fmea_xx[sp.logical_or(abs(cal_fmea_xx.anom()) >= 0.1*cal_xx,
                            abs(cal_fmea_yy.anom()) >= 0.1*cal_yy)] = ma.masked
            cal_fmea_yy[cal_fmea_xx.mask] = ma.masked
            
            ntime = len(cal_fmea_xx)
            cal_fmea_xx.shape = (ntime, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1)
            Data.data[:,xx_ind,:,:] /= cal_fmea_xx
            Data.data[:,yy_ind,:,:] /= cal_fmea_yy
            cal_fmea_xx.shape = (ntime, 1, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1, 1)
            Data.data[:,xy_inds,:,:] /= ma.sqrt(cal_fmea_yy*cal_fmea_xx)

        if scale_f_ave_mod :
            # The frequency gains have have systematic structure to them, 
            # they are not by any approximation gaussian distributed.  Use
            # means, not medians across frequency.
            operation = ma.mean
            cal_fmea_xx = operation(diff_xx, -1)
            cal_fmea_yy = operation(diff_yy, -1)
            cal_fmea_xx_off = operation(Data.data[:,xx_ind,off_ind,:], -1)
            cal_fmea_yy_off = operation(Data.data[:,yy_ind,off_ind,:], -1)


            sys_xx = cal_fmea_xx_off/cal_fmea_xx
            sys_yy = cal_fmea_yy_off/cal_fmea_yy
            percent_ok = 0.03
            sys_xx_tmed = ma.median(sys_xx)
            sys_yy_tmed = ma.median(sys_yy)

            maskbad_xx = (sys_xx > sys_xx_tmed + sys_xx_tmed*percent_ok)|(sys_xx < sys_xx_tmed - sys_xx_tmed*percent_ok)
            maskbad_yy = (sys_yy > sys_yy_tmed + sys_yy_tmed*percent_ok)|(sys_yy < sys_yy_tmed - sys_yy_tmed*percent_ok)

            cal_fmea_xx[sp.logical_or(cal_fmea_xx<=0,cal_fmea_yy<=0)] = ma.masked
            cal_fmea_yy[cal_fmea_xx.mask] = ma.masked
            cal_fmea_xx[maskbad_xx] = ma.masked
            cal_fmea_yy[maskbad_yy] = ma.masked  
            cal_xx = ma.mean(cal_fmea_xx)
            cal_yy = ma.mean(cal_fmea_yy)

            ntime = len(cal_fmea_xx)
            cal_fmea_xx.shape = (ntime, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1)
            Data.data[:,xx_ind,:,:] /= cal_fmea_xx
            Data.data[:,yy_ind,:,:] /= cal_fmea_yy
            cal_fmea_xx.shape = (ntime, 1, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1, 1)
            Data.data[:,xy_inds,:,:] /= ma.sqrt(cal_fmea_yy*cal_fmea_xx) 

        if scale_f_ave and scale_t_ave :
            # We have devided out t_cal twice so we need to put one factor back
            # in.
            cal_xx = operation(cal_tmed_xx)
            cal_yy = operation(cal_tmed_yy)
            Data.data[:,xx_ind,:,:] *= cal_xx
            Data.data[:,yy_ind,:,:] *= cal_yy
            Data.data[:,xy_inds,:,:] *= ma.sqrt(cal_yy*cal_xx)

        if scale_f_ave_mod and scale_t_ave :
            #Same divide out twice problem.
            cal_xx = operation(cal_tmed_xx)
            cal_yy = operation(cal_tmed_yy)
            Data.data[:,xx_ind,:,:] *= cal_xxcal_imag_mean
            Data.data[:,yy_ind,:,:] *= cal_yy
            Data.data[:,xy_inds,:,:] *= ma.sqrt(cal_yy*cal_xx)
           
        if scale_f_ave and scale_f_ave_mod :
            raise ce.DataError("time averaging twice") 

        if rotate:
            # Define the differential cal phase to be zero and rotate all data
            # such that this is true.
            cal_real_mean = ma.mean(Data.data[:,1,0,:] - Data.data[:,1,1,:], 0)
            cal_imag_mean = ma.mean(Data.data[:,2,0,:] - Data.data[:,2,1,:], 0)
            # Get the cal phase angle as a function of frequency.
            cal_phase = -ma.arctan2(cal_imag_mean, cal_real_mean)

            # Rotate such that the cal phase is zero. Imperative to have a
            # temporary variable.
            New_data_real = (ma.cos(cal_phase) * Data.data[:,1,:,:]
                             - ma.sin(cal_phase) * Data.data[:,2,:,:])
            New_data_imag = (ma.sin(cal_phase) * Data.data[:,1,:,:]
                             + ma.cos(cal_phase) * Data.data[:,2,:,:])
            Data.data[:,1,:,:] = New_data_real
            Data.data[:,2,:,:] = New_data_imag

    elif tuple(Data.field['CRVAL4']) == (1, 2, 3, 4) :
        # For the shot term, just devide everything by on-off in I.
        I_ind = 0
        cal_I_t = Data.data[:,I_ind,on_ind,:] - Data.data[:,I_ind,off_ind,:]
        cal_I = ma.mean(cal_I_t, 0)

        Data.data /= cal_I
    else :
        raise ce.DataError("Unsupported polarization states.")

    # Subtract the time median if desired.
    if sub_med :
        Data.data -= ma.median(Data.data, 0)