Ejemplo n.º 1
0
def _apply_function(func, arg):
    # type: (QuilParser.FunctionContext, Any) -> Any
    if isinstance(arg, Expression):
        if func.SIN():
            return parameters.quil_sin(arg)
        elif func.COS():
            return parameters.quil_cos(arg)
        elif func.SQRT():
            return parameters.quil_sqrt(arg)
        elif func.EXP():
            return parameters.quil_exp(arg)
        elif func.CIS():
            return parameters.quil_cis(arg)
        else:
            raise RuntimeError("Unexpected function to apply: " + func.getText())
    else:
        if func.SIN():
            return sin(arg)
        elif func.COS():
            return cos(arg)
        elif func.SQRT():
            return sqrt(arg)
        elif func.EXP():
            return exp(arg)
        elif func.CIS():
            return cos(arg) + complex(0, 1) * sin(arg)
        else:
            raise RuntimeError("Unexpected function to apply: " + func.getText())
Ejemplo n.º 2
0
def compute_linear_ss(model_type: ModelType, e_op):
    """
    Computes the A and B matrices of the linear state space model at the specified operating point for all control
    algorithms that require it.

    :param model_type: the type that should be linearized
    :param e_op: elevation angle (rad) of the desired operating point
    :return: A, B
    """
    Vf_op, Vb_op = compute_feed_forward_static([e_op, 0, 0, 0, 0],
                                               [0, 0, 0, 0, 0])
    Vs_op = Vf_op + Vb_op

    if model_type == ModelType.EASY:
        A = np.array([[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0],
                      [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0],
                      [0, -L2 * sin(e_op) / Je, 0, 0, 0, 0],
                      [L4 * Vs_op * cos(e_op) / Jl, 0, 0, 0, 0, 0]])
    elif model_type == ModelType.FRICTION or model_type == ModelType.CENTRIPETAL:
        A = np.array(
            [[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1],
             [0, 0, 0, -mc.mu_phi / Jp, 0,
              0], [0, -L2 * sin(e_op) / Je, 0, 0, -mc.mu_eps / Je, 0],
             [L4 * Vs_op * cos(e_op) / Jl, 0, 0, 0, 0, -mc.mu_lamb / Jl]])
    else:
        # TODO: Implement for remaining model types
        print("Unsupported model type while trying to linearize system")
        A = np.zeros((6, 6))

    B = np.array([[0, 0], [0, 0], [0, 0], [L1 / Jp, -L1 / Jp],
                  [L3 / Je, L3 / Je], [0, 0]])

    return A, B
Ejemplo n.º 3
0
 def gen_xy(self):
     for i in range(-7, 8):
         y_min = -abs(abs(i) - 7) * cos(pi / 3)
         y_max = abs(abs(i) - 7) * cos(pi / 3)
         for j in np.linspace(y_min, y_max, abs(abs(i) - 7) + 1):
             self.xs.append(int(self.s_x / 2 + i * scale))
             self.ys.append(int(self.s_y / 2 + j * scale))
             self.points[int(self.s_x / 2 + i * scale)] = int(self.s_y / 2 +
                                                              j * scale)
     return
Ejemplo n.º 4
0
def _apply_function(func, arg):
    # type: (QuilParser.FunctionContext, Any) -> Any
    if func.SIN():
        return sin(arg)
    elif func.COS():
        return cos(arg)
    elif func.SQRT():
        return sqrt(arg)
    elif func.EXP():
        return exp(arg)
    elif func.CIS():
        return cos(arg) + complex(0, 1) * sin(arg)
    else:
        raise RuntimeError("Unexpected function to apply: " + str(func))
Ejemplo n.º 5
0
 def getVectorDpsi(traj):
     dpsi = traj.getMaskedPosture(traj.dpsi)
     if float(len(dpsi.compressed()))/float(len(dpsi)) > 0.2:
         vdpsi = ma.array([ma.cos(dpsi), ma.sin(dpsi)]).T
         return vdpsi
     else:
         return ma.zeros((len(dpsi), 2))*ma.masked
Ejemplo n.º 6
0
 def getVectorDpsi(traj):
     dpsi = traj.getMaskedPosture(traj.dpsi)
     if float(len(dpsi.compressed())) / float(len(dpsi)) > 0.2:
         vdpsi = ma.array([ma.cos(dpsi), ma.sin(dpsi)]).T
         return vdpsi
     else:
         return ma.zeros((len(dpsi), 2)) * ma.masked
Ejemplo n.º 7
0
def get_wind_components(speed, wdir):
    r'''Calculate the U, V wind vector components from the speed and
    direction.

    Parameters
    ----------
    speed : array_like
        The wind speed (magnitude)
    wdir : array_like
        The wind direction in degrees, specified as the direction from which the
        wind is blowing.

    Returns
    -------
    u, v : tuple of array_like
        The wind components in the X (East-West) and Y (North-South)
        directions, respectively.

    See Also
    --------
    get_speed_dir
    '''

    wdir = np.deg2rad(wdir)
    u = -speed * sin(wdir)
    v = -speed * cos(wdir)
    return u, v
Ejemplo n.º 8
0
def get_wind_components(speed, wdir):
    r'''Calculate the U, V wind vector components from the speed and
    direction.

    Parameters
    ----------
    speed : array_like
        The wind speed (magnitude)
    wdir : array_like
        The wind direction in degrees, specified as the direction from which the
        wind is blowing.

    Returns
    -------
    u, v : tuple of array_like
        The wind components in the X (East-West) and Y (North-South)
        directions, respectively.

    See Also
    --------
    get_speed_dir
    '''

    wdir = np.deg2rad(wdir)
    u = -speed * sin(wdir)
    v = -speed * cos(wdir)
    return u, v
Ejemplo n.º 9
0
 def test_testUfuncs1(self):
     # Test various functions such as sin, cos.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     assert_(eq(np.cos(x), cos(xm)))
     assert_(eq(np.cosh(x), cosh(xm)))
     assert_(eq(np.sin(x), sin(xm)))
     assert_(eq(np.sinh(x), sinh(xm)))
     assert_(eq(np.tan(x), tan(xm)))
     assert_(eq(np.tanh(x), tanh(xm)))
     with np.errstate(divide='ignore', invalid='ignore'):
         assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
         assert_(eq(np.log(abs(x)), log(xm)))
         assert_(eq(np.log10(abs(x)), log10(xm)))
     assert_(eq(np.exp(x), exp(xm)))
     assert_(eq(np.arcsin(z), arcsin(zm)))
     assert_(eq(np.arccos(z), arccos(zm)))
     assert_(eq(np.arctan(z), arctan(zm)))
     assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
     assert_(eq(np.absolute(x), absolute(xm)))
     assert_(eq(np.equal(x, y), equal(xm, ym)))
     assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
     assert_(eq(np.less(x, y), less(xm, ym)))
     assert_(eq(np.greater(x, y), greater(xm, ym)))
     assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
     assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
     assert_(eq(np.conjugate(x), conjugate(xm)))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
     assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
     assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
Ejemplo n.º 10
0
 def test_testUfuncs1(self):
     # Test various functions such as sin, cos.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     assert_(eq(np.cos(x), cos(xm)))
     assert_(eq(np.cosh(x), cosh(xm)))
     assert_(eq(np.sin(x), sin(xm)))
     assert_(eq(np.sinh(x), sinh(xm)))
     assert_(eq(np.tan(x), tan(xm)))
     assert_(eq(np.tanh(x), tanh(xm)))
     with np.errstate(divide='ignore', invalid='ignore'):
         assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
         assert_(eq(np.log(abs(x)), log(xm)))
         assert_(eq(np.log10(abs(x)), log10(xm)))
     assert_(eq(np.exp(x), exp(xm)))
     assert_(eq(np.arcsin(z), arcsin(zm)))
     assert_(eq(np.arccos(z), arccos(zm)))
     assert_(eq(np.arctan(z), arctan(zm)))
     assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
     assert_(eq(np.absolute(x), absolute(xm)))
     assert_(eq(np.equal(x, y), equal(xm, ym)))
     assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
     assert_(eq(np.less(x, y), less(xm, ym)))
     assert_(eq(np.greater(x, y), greater(xm, ym)))
     assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
     assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
     assert_(eq(np.conjugate(x), conjugate(xm)))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
     assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
     assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
Ejemplo n.º 11
0
 def result(traj):
     psi = unwrapma(traj.getMaskedPosture(traj.psi))
     if float(len(psi.compressed()))/float(len(psi)) > 0.2:
         sel = ~ma.getmaskarray(psi)
         p = np.polyfit(traj.t[sel], psi[sel], 1)
         psi_corr = psi - np.polyval(p, xrange(psi.shape[0]))
         return dotacf(ma.array([ma.cos(psi),ma.sin(psi)]).T, lags, traj.excluded)
     else:
         return ma.zeros((len(lags),))*ma.masked
Ejemplo n.º 12
0
def compute_feed_forward_static(e_and_derivatives, lambda_and_derivatives):
    e = e_and_derivatives[0]
    Vs = -L2 / L3 * cos(e)
    Vd = 0

    Vf = (Vs + Vd) / 2
    Vb = (Vs - Vd) / 2

    return Vf, Vb
Ejemplo n.º 13
0
 def transform_non_affine(self, a):
     # With safeguards
     # TODO: Can improve this?
     a = np.deg2rad(a)  # convert to radians
     m = ma.masked_where((a < -self._thresh) | (a > self._thresh), a)
     if m.mask.any():
         return ma.log(np.abs(ma.tan(m) + 1 / ma.cos(m)))
     else:
         return np.log(np.abs(np.tan(a) + 1 / np.cos(a)))
Ejemplo n.º 14
0
def delta(lon, lat):
    _dLon = transformLon(lon - 105, lat - 35)
    _dLat = transformLat(lon - 105, lat - 35)
    radLat = lat / 180 * PI
    magic = sin(radLat)
    magic = 1 - _ee * magic * magic
    sqrtMagic = sqrt(magic)
    dLon = (_dLon * 180) / (_a / sqrtMagic * cos(radLat) * PI)
    dLat = (_dLat * 180) / ((_a * (1 - _ee)) / (magic * sqrtMagic) * PI)
    return [dLon, dLat]
Ejemplo n.º 15
0
    def calc_PA(self):
        """Calculates the telescope PA. requires LST to be either a field or 
        previously calculated array
        Outputs an  array of PA values for each time in radians.
        This requires the fields Ra = 'CRVAL2', Dec = 'CRVAL3' and 'DATE-OBS'
        to be set.
        """

        self.PA = sp.zeros(self.dims[0])
        for ii in range(self.dims[0]):
            RA = self.field['CRVAL2'][ii]
            DEC = self.field['CRVAL3'][ii]
            LST = utils.LSTatGBT(self.field['DATE-OBS'][ii])
            H = LST - RA
            Latit = 38.0 + 26.0 / 60
            tanPA = ma.sin(H * sp.pi / 180) / (
                ma.cos(DEC * sp.pi / 180) * ma.tan(Latit * sp.pi / 180) -
                ma.sin(DEC * sp.pi / 180) * ma.cos(H * sp.pi / 180))
            self.PA[ii] = ma.arctan(tanPA)
Ejemplo n.º 16
0
 def result(traj):
     psi = unwrapma(traj.getMaskedPosture(traj.psi))
     if float(len(psi.compressed())) / float(len(psi)) > 0.2:
         sel = ~ma.getmaskarray(psi)
         p = np.polyfit(traj.t[sel], psi[sel], 1)
         psi_corr = psi - np.polyval(p, xrange(psi.shape[0]))
         return dotacf(
             ma.array([ma.cos(psi), ma.sin(psi)]).T, lags,
             traj.excluded)
     else:
         return ma.zeros((len(lags), )) * ma.masked
 def LinearizedSingleJointModel(self, exoskeletonGeneratedTorque, humanGeneratedTorque, position, velocity, restPosition, referencePosition):
     self.exoskeletonGeneratedTorque = exoskeletonGeneratedTorque
     self.humanGeneratedTorque = humanGeneratedTorque
     self.position = position
     self.referencePosition = referencePosition
     self.velocity = velocity
     self.restPosition = restPosition
     self.acceleration = 1 / self.inertia * (self.exoskeletonGeneratedTorque + self.humanGeneratedTorque - (
         self.damping * self.velocity + self.stiffness*(
             self.position - self.restPosition) + self.gravitationnalTorque * cos(self.referencePosition - self.restPosition)*(self.referencePosition - self.restPosition)))
     return self.acceleration
Ejemplo n.º 18
0
def polar2cart(rho, theta):
    """
    Convert polar coordinates to cartesian coordinated.

    :param rho: polar rho coordinate
    :param theta: polar theta coordinate in degrees
    :return:
    """
    x = rho * ma.cos(theta)
    y = rho * ma.sin(theta)
    return x, y
    def kinematicEquations(self, velocity):

        dq = np.zeros(10)

        dq[0] = velocity * cos(self.q[2]) * self.matrices.rearWheelRadius
        dq[1] = velocity * sin(self.q[2]) * self.matrices.rearWheelRadius
        dq[2] = (velocity * (self.q[6] / self.matrices.wheelBase)) * np.cos(self.matrices.frontFrameTilt)
        dq[3] = self.q[4]
        dq[9] = (velocity * (self.q[8] / self.matrices.wheelBase)) * np.cos(self.matrices.frontFrameTilt)

        return dq
def polar2cart(rho, theta):
    """
    Convert polar coordinates to cartesian coordinated.

    :param rho: polar rho coordinate
    :param theta: polar theta coordinate in degrees
    :return:
    """
    x = rho * ma.cos(theta)
    y = rho * ma.sin(theta)
    return x, y
Ejemplo n.º 21
0
def drunken_walk(N, step=1, x0=0, y0=0):
    """ Simulates a random walker

    For N iterations, it turns in any direction, equally distributed. Each
    time moves one step
    """
    rad = random(N) * 2 * np.pi
    dx = ma.sin(rad) * step
    dy = ma.cos(rad) * step
    x = dx.cumsum() + x0
    y = dy.cumsum() + y0
    return x, y
Ejemplo n.º 22
0
def drunken_walk(N, step=1, x0=0, y0=0):
    """ Simulates a random walker

    For N iterations, it turns in any direction, equally distributed. Each
    time moves one step
    """
    rad = random(N)*2*np.pi
    dx = ma.sin(rad)*step
    dy = ma.cos(rad)*step
    x = dx.cumsum() + x0
    y = dy.cumsum() + y0
    return x, y
Ejemplo n.º 23
0
    def fitBearingDiffusion(self, trajectory, windowSize=None, plotFit=False):
        lags = np.round(
            np.linspace(0, np.round(100. * trajectory.frameRate),
                        200)).astype(int)
        tau = lags / trajectory.frameRate
        if windowSize is None:
            psi = unwrapma(trajectory.getMaskedPosture(trajectory.psi))
            sel = ~ma.getmaskarray(psi)
            p = np.polyfit(trajectory.t[sel], psi[sel], 1)
            psi_corr = psi - np.polyval(p, xrange(psi.shape[0]))
            C = dotacf(
                ma.array([ma.cos(psi), ma.sin(psi)]).T, lags,
                trajectory.excluded)
        else:

            def result(traj):
                psi = unwrapma(traj.getMaskedPosture(traj.psi))
                if float(len(psi.compressed())) / float(len(psi)) > 0.2:
                    sel = ~ma.getmaskarray(psi)
                    p = np.polyfit(traj.t[sel], psi[sel], 1)
                    psi_corr = psi - np.polyval(p, xrange(psi.shape[0]))
                    return dotacf(
                        ma.array([ma.cos(psi), ma.sin(psi)]).T, lags,
                        traj.excluded)
                else:
                    return ma.zeros((len(lags), )) * ma.masked

            C = ma.array(
                [result(traj) for traj in trajectory.asWindows(windowSize)]).T
            C = C.mean(axis=1)
        p, pcov = opt.curve_fit(self._bearingDiffusionFitFunction, tau, C,
                                [-1.])
        self.D_psi = 10**p[0]
        if plotFit:
            plt.plot(tau, C, 'k.')
            plt.plot(tau, self._bearingDiffusionFitFunction(tau, p[0]), 'r-')
            plt.xlabel(r'$\tau$ (s)')
            plt.ylabel(
                r'$\langle \cos \left( \psi(\tau) - \psi(0)\right) \rangle$')
            textstr = '$D_\psi=%.2f \mathrm{rad}^2/\mathrm{s}$' % (self.D_psi)
            props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
            # place a text box in lower left in axes coords
            ax = plt.gca()
            ax.text(0.95,
                    0.95,
                    textstr,
                    transform=ax.transAxes,
                    fontsize=14,
                    horizontalalignment='right',
                    verticalalignment='top',
                    bbox=props)
            plt.show()
Ejemplo n.º 24
0
 def transform_non_affine(self, a):
     # NOTE: Critical to truncate valid range inside transform *and*
     # in limit_range_for_scale or get weird duplicate tick labels. This
     # is not necessary for positive-only scales because it is harder to
     # run up right against the scale boundaries.
     with np.errstate(divide='ignore', invalid='ignore'):
         m = ma.masked_where((a <= -90) | (a >= 90), a)
         if m.mask.any():
             m = np.deg2rad(m)
             return ma.log(ma.abs(ma.tan(m) + 1 / ma.cos(m)))
         else:
             a = np.deg2rad(a)
             return np.log(np.abs(np.tan(a) + 1 / np.cos(a)))
Ejemplo n.º 25
0
    def getBodyBearingAutocorrelation(self, maxT=50., windowSize=100.):
        lags = np.round(np.linspace(0, np.round(maxT*self.frameRate), 200)).astype(int)
        if windowSize is None:
            psi = self.getMaskedPosture(self.psi)
            C = dotacf(ma.array([ma.cos(psi),ma.sin(psi)]).T, lags, self.excluded)
        else:
            def result(traj):
                psi = traj.getMaskedPosture(traj.psi)
                return dotacf(ma.array([ma.cos(psi),ma.sin(psi)]).T, lags, traj.excluded)

            C = ma.array([result(traj)
                          for traj in self.asWindows(windowSize)]).T
            C = C.mean(axis=1)
        tau = lags / self.frameRate
        return tau, C
Ejemplo n.º 26
0
 def transform_non_affine(self, a):
     """
     This transform takes a numpy array and returns a transformed copy.
     Since the range of the Mercator scale is limited by the
     user-specified threshold, the input array must be masked to
     contain only valid values.  Matplotlib will handle masked arrays
     and remove the out-of-range data from the plot.  However, the
     returned array *must* have the same shape as the input array, since
     these values need to remain synchronized with values in the other
     dimension.
     """
     masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a)
     if masked.mask.any():
         return ma.log(np.abs(ma.tan(masked) + 1 / ma.cos(masked)))
     else:
         return np.log(np.abs(np.tan(a) + 1 / np.cos(a)))
Ejemplo n.º 27
0
 def calc_PA(self) :
     """Calculates the telescope PA. requires LST to be either a field or 
     previously calculated array
     Outputs an  array of PA values for each time in radians.
     This requires the fields Ra = 'CRVAL2', Dec = 'CRVAL3' and 'DATE-OBS'
     to be set.
     """
     
     self.PA = sp.zeros(self.dims[0])
     for ii in range(self.dims[0]) :
         RA = self.field['CRVAL2'][ii]
         DEC = self.field['CRVAL3'][ii]
         LST = utils.LSTatGBT(self.field['DATE-OBS'][ii])
         H = LST-RA
         Latit = 38.0+26.0/60
         tanPA = ma.sin(H*sp.pi/180)/(ma.cos(DEC*sp.pi/180)*ma.tan(Latit*sp.pi/180)-ma.sin(DEC*sp.pi/180)*ma.cos(H*sp.pi/180))
         self.PA[ii] = ma.arctan(tanPA)
Ejemplo n.º 28
0
    def fitReversals(self, trajectory, windowSize=None, plotFit=False):
        lags = np.arange(0, np.round(10.*trajectory.frameRate))
        if windowSize is None:
            dpsi = trajectory.getMaskedPosture(trajectory.dpsi)
            vdpsi = ma.array([ma.cos(dpsi), ma.sin(dpsi)]).T
            C = dotacf(vdpsi, lags, trajectory.excluded)
        else:
            def getVectorDpsi(traj):
                dpsi = traj.getMaskedPosture(traj.dpsi)
                if float(len(dpsi.compressed()))/float(len(dpsi)) > 0.2:
                    vdpsi = ma.array([ma.cos(dpsi), ma.sin(dpsi)]).T
                    return vdpsi
                else:
                    return ma.zeros((len(dpsi), 2))*ma.masked

            C = ma.array([dotacf(getVectorDpsi(traj), lags, traj.excluded)
                          for traj in trajectory.asWindows(windowSize)]).T
            C = C.mean(axis=1)
        tau = lags / trajectory.frameRate

        # do the bounded fit
        params = lmfit.Parameters()
        params.add('log_tau_eff', value=0.)
        params.add('Cinf', value=0.5, min=0., max=1.)

        if C.compressed().shape[0]>0:
            p = lmfit.minimize(self._reversalFitResidual, params, args=(tau, C))

            f_rev = 0.5 - np.sqrt(params['Cinf']/4.)
            self.tau_rev = 10**params['log_tau_eff']/(1.-f_rev)
            self.tau_fwd = 10**params['log_tau_eff']/f_rev
        else:
            self.tau_rev = ma.masked
            self.tau_fwd = ma.masked
        if plotFit:
            plt.plot(tau, C, 'k.')
            plt.plot(tau, self._reversalFitFunction(tau, params['log_tau_eff'], params['Cinf']), 'r-')
            plt.xlabel(r'$\tau$ (s)')
            plt.ylabel(r'$\langle \cos\left(\Delta\psi(\tau) - \Delta\psi(0)\right) \rangle$')
            textstr = '$\\tau_{\mathrm{rev}}=%.2f$ s\n$\\tau_{\mathrm{fwd}}=%.2f$ s'%(self.tau_rev, self.tau_fwd)
            props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
            # place a text box in lower left in axes coords
            ax = plt.gca()
            ax.text(0.95, 0.95, textstr, transform=ax.transAxes, fontsize=14,
                    horizontalalignment='right', verticalalignment='top', bbox=props)
            plt.show()
Ejemplo n.º 29
0
 def transform_non_affine(self, a):
     """
     This transform takes an Nx1 ``numpy`` array and returns a
     transformed copy.  Since the range of the Mercator scale
     is limited by the user-specified threshold, the input
     array must be masked to contain only valid values.
     ``matplotlib`` will handle masked arrays and remove the
     out-of-range data from the plot.  Importantly, the
     ``transform`` method *must* return an array that is the
     same shape as the input array, since these values need to
     remain synchronized with values in the other dimension.
     """
     masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a)
     if masked.mask.any():
         return ma.log(np.abs(ma.tan(masked) + 1.0 / ma.cos(masked)))
     else:
         return np.log(np.abs(np.tan(a) + 1.0 / np.cos(a)))
Ejemplo n.º 30
0
 def transform(self, a):
     """
     This transform takes an Nx1 ``numpy`` array and returns a
     transformed copy.  Since the range of the Mercator scale
     is limited by the user-specified threshold, the input
     array must be masked to contain only valid values.
     ``matplotlib`` will handle masked arrays and remove the
     out-of-range data from the plot.  Importantly, the
     ``transform`` method *must* return an array that is the
     same shape as the input array, since these values need to
     remain synchronized with values in the other dimension.
     """
     masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a)
     if masked.mask.any():
         return ma.log(np.abs(ma.tan(masked) + 1.0 / ma.cos(masked)))
     else:
         return np.log(np.abs(np.tan(a) + 1.0 / np.cos(a)))
Ejemplo n.º 31
0
def get_wind_components(speed, wdir):
    '''
    Calculate the U, V wind vector components from the speed and
    direction (from which the wind is blowing).

    speed : scalar or array
        The wind speed (magnitude)

    wdir : scalar or array
        The wind direction in degrees

    Returns : tuple of scalars or arrays
        The tuple (U,V) corresponding to the wind components in the
        X (East-West) and Y (North-South) directions, respectively.
    '''
    wdir = wdir * degree
    u = -speed * sin(wdir)
    v = -speed * cos(wdir)
    return u, v
Ejemplo n.º 32
0
def get_wind_components(speed, wdir):
    """
    Calculate the U, V wind vector components from the speed and
    direction (from which the wind is blowing).

    speed : scalar or array
        The wind speed (magnitude)

    wdir : scalar or array
        The wind direction in degrees

    Returns : tuple of scalars or arrays
        The tuple (U,V) corresponding to the wind components in the
        X (East-West) and Y (North-South) directions, respectively.
    """
    wdir = wdir * degree
    u = -speed * sin(wdir)
    v = -speed * cos(wdir)
    return u, v
Ejemplo n.º 33
0
    def getBodyBearingAutocorrelation(self, maxT=50., windowSize=100.):
        lags = np.round(np.linspace(0, np.round(maxT * self.frameRate),
                                    200)).astype(int)
        if windowSize is None:
            psi = self.getMaskedPosture(self.psi)
            C = dotacf(
                ma.array([ma.cos(psi), ma.sin(psi)]).T, lags, self.excluded)
        else:

            def result(traj):
                psi = traj.getMaskedPosture(traj.psi)
                return dotacf(
                    ma.array([ma.cos(psi), ma.sin(psi)]).T, lags,
                    traj.excluded)

            C = ma.array([result(traj)
                          for traj in self.asWindows(windowSize)]).T
            C = C.mean(axis=1)
        tau = lags / self.frameRate
        return tau, C
Ejemplo n.º 34
0
    def rebin_velocity(
        self,
        time: np.ndarray,
        time_new: np.ndarray,
        folding_velocity: Union[float, np.ndarray],
        sequence_indices: list,
    ) -> None:
        """Rebins Doppler velocity in polar coordinates.

        Args:
            time: 1D time array.
            time_new: 1D new time array.
            folding_velocity: Folding velocity (m/s). Can be float when it's the same for all
                altitudes, or np.ndarray when it matches difference altitude regions
                (defined in `sequence_indices`).
            sequence_indices: List containing indices of different folding regions,
                e.g. [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10]].

        """

        def _get_scaled_vfold() -> np.ndarray:
            vfold_scaled = math.pi / folding_velocity
            if isinstance(vfold_scaled, float):
                vfold_scaled = np.array([float(vfold_scaled)])
            return vfold_scaled

        def _scale_by_vfold(data_in: np.ndarray, fun) -> np.ndarray:
            data_out = ma.copy(data_in)
            for i, ind in enumerate(sequence_indices):
                data_out[:, ind] = fun(data_in[:, ind], folding_velocity_scaled[i])
            return data_out

        folding_velocity_scaled = _get_scaled_vfold()
        data_scaled = _scale_by_vfold(self.data, np.multiply)
        vel_x = ma.cos(data_scaled)
        vel_y = ma.sin(data_scaled)
        vel_x_mean, _ = utils.rebin_2d(time, vel_x, time_new)
        vel_y_mean, _ = utils.rebin_2d(time, vel_y, time_new)
        mean_vel_scaled = np.arctan2(vel_y_mean, vel_x_mean)
        self.data = _scale_by_vfold(mean_vel_scaled, np.divide)
Ejemplo n.º 35
0
def compute_pitch_and_inputs_flatness_simple(e_and_derivatives,
                                             lambda_and_derivatives):
    e = e_and_derivatives[0]
    de1 = e_and_derivatives[1]
    de2 = e_and_derivatives[2]
    de3 = e_and_derivatives[3]
    de4 = e_and_derivatives[4]

    l = lambda_and_derivatives[0]
    dl1 = lambda_and_derivatives[1]
    dl2 = lambda_and_derivatives[2]
    dl3 = lambda_and_derivatives[3]
    dl4 = lambda_and_derivatives[4]

    b = L3 * Jl * dl2
    c = L4 * cos(e)
    d = Je * de2 - L2 * cos(e)
    a = b * c / d

    db1 = L3 * Jl * dl3
    db2 = L3 * Jl * dl4
    dc1 = -L4 * sin(e) * de1
    dc2 = -L4 * (cos(e) * de1**2 + sin(e) * de2)
    dd1 = Je * de3 + L2 * sin(e) * de1
    dd2 = Je * de4 + L2 * (cos(e) * de1**2 + sin(e) * de2)
    f = db1 * c * d
    g = dc1 * d + c * dd1
    h = c * c * d * d
    da1 = (f - b * g) / h

    df1 = db2 * c * d + db1 * g
    dg1 = dc2 * d + 2 * dc1 * dd1 + c * dd2
    dh1 = 2 * c * dc1 * d**2 + 2 * c**2 * d * dd1
    da2 = ((df1 - (db1 * g + b * dg1)) * h - (f - b * g) * dh1) / h**2

    p = arctan(a)
    dp1 = da1 / (1 + a**2)
    dp2 = (da2 * (1 + a**2) - 2 * a * da1**2) / (1 + a**2)**2

    Vs = ((Jl * dl2 / (L4 * cos(e)))**2 +
          ((Je * de2 - L2 * cos(e)) / L3)**2)**(1 / 2)
    Vd = Jp * dp2 / L1

    Vf = (Vs + Vd) / 2
    Vb = (Vs - Vd) / 2

    return np.array([p, dp1, dp2]), np.array([Vf, Vb])
Ejemplo n.º 36
0
def _vec2comp(wdir, wspd):
    '''
    Underlying function that converts a vector to its components

    Parameters
    ----------
    wdir : number, masked_array
        Angle in meteorological degrees
    wspd : number, masked_array
        Magnitudes of wind vector

    Returns
    -------
    u : number, masked_array (same as input)
        U-component of the wind
    v : number, masked_array (same as input)
        V-component of the wind

    '''
    u = wspd * ma.sin(np.radians(wdir % 360.)) * -1
    v = wspd * ma.cos(np.radians(wdir % 360.)) * -1
    return u, v
Ejemplo n.º 37
0
def _vec2comp(wdir, wspd):
    '''
    Underlying function that converts a vector to its components

    Parameters
    ----------
    wdir : number, masked_array
        Angle in meteorological degrees
    wspd : number, masked_array
        Magnitudes of wind vector

    Returns
    -------
    u : number, masked_array (same as input)
        U-component of the wind
    v : number, masked_array (same as input)
        V-component of the wind

    '''
    u = wspd * ma.sin(np.radians(wdir)) * -1
    v = wspd * ma.cos(np.radians(wdir)) * -1
    return u, v
Ejemplo n.º 38
0
    def fitBearingDiffusion(self, trajectory, windowSize=None, plotFit=False):
        lags = np.round(np.linspace(0, np.round(100.*trajectory.frameRate), 200)).astype(int)
        tau = lags/trajectory.frameRate
        if windowSize is None:
            psi = unwrapma(trajectory.getMaskedPosture(trajectory.psi))
            sel = ~ma.getmaskarray(psi)
            p = np.polyfit(trajectory.t[sel], psi[sel], 1)
            psi_corr = psi - np.polyval(p, xrange(psi.shape[0]))
            C = dotacf(ma.array([ma.cos(psi),ma.sin(psi)]).T, lags, trajectory.excluded)
        else:
            def result(traj):
                psi = unwrapma(traj.getMaskedPosture(traj.psi))
                if float(len(psi.compressed()))/float(len(psi)) > 0.2:
                    sel = ~ma.getmaskarray(psi)
                    p = np.polyfit(traj.t[sel], psi[sel], 1)
                    psi_corr = psi - np.polyval(p, xrange(psi.shape[0]))
                    return dotacf(ma.array([ma.cos(psi),ma.sin(psi)]).T, lags, traj.excluded)
                else:
                    return ma.zeros((len(lags),))*ma.masked

            C = ma.array([result(traj)
                          for traj in trajectory.asWindows(windowSize)]).T
            C = C.mean(axis=1)
        p, pcov = opt.curve_fit(self._bearingDiffusionFitFunction, tau, C, [-1.])
        self.D_psi = 10**p[0]
        if plotFit:
            plt.plot(tau, C, 'k.')
            plt.plot(tau, self._bearingDiffusionFitFunction(tau, p[0]), 'r-')
            plt.xlabel(r'$\tau$ (s)')
            plt.ylabel(r'$\langle \cos \left( \psi(\tau) - \psi(0)\right) \rangle$')
            textstr = '$D_\psi=%.2f \mathrm{rad}^2/\mathrm{s}$'%(self.D_psi)
            props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
            # place a text box in lower left in axes coords
            ax = plt.gca()
            ax.text(0.95, 0.95, textstr, transform=ax.transAxes, fontsize=14,
                    horizontalalignment='right', verticalalignment='top', bbox=props)
            plt.show()
Ejemplo n.º 39
0
def get_p_and_first_derivative_simple(model_type: ModelType, e_and_derivatives,
                                      lambda_and_derivatives):
    "Computes p and dp from the flat output and its derivatives."
    if model_type != ModelType.EASY:
        print("model_type = " + str(model_type))
        raise NotImplementedError(
            "get_p_and_first_derivative is not implemented for other model types than EASY."
        )

    p = np.arctan(
        (Jl * L3 * lambda_and_derivatives[2]) /
        (L4 * cos(e_and_derivatives[0]) *
         (Je * e_and_derivatives[2] - L2 * cos(e_and_derivatives[0]))))
    # the following part is partly copied from compute_feed_forward_flatness
    e = e_and_derivatives[0]
    de1 = e_and_derivatives[1]
    de2 = e_and_derivatives[2]
    de3 = e_and_derivatives[3]
    de4 = e_and_derivatives[4]

    l = lambda_and_derivatives[0]
    dl1 = lambda_and_derivatives[1]
    dl2 = lambda_and_derivatives[2]
    dl3 = lambda_and_derivatives[3]
    dl4 = lambda_and_derivatives[4]

    b = L3 * Jl * dl2
    c = L4 * cos(e)
    d = Je * de2 - L2 * cos(e)
    a = b * c / d

    db1 = L3 * Jl * dl3
    db2 = L3 * Jl * dl4
    dc1 = -L4 * sin(e) * de1
    dc2 = -L4 * (cos(e) * de1**2 + sin(e) * de2)
    dd1 = Je * de3 + L2 * sin(e) * de1
    dd2 = Je * de4 + L2 * (cos(e) * de1**2 + sin(e) * de2)
    f = db1 * c * d
    g = dc1 * d + c * dd1
    h = c * c * d * d
    da1 = (f - b * g) / h

    dp = (1 / (1 + a * a)) * da1
    return p, dp
Ejemplo n.º 40
0
def drunken_drive(cfg):
    """ Simulates a standard normal change in course

    For N iterations it changes the course with a normal probability around
      the previous course. Each time moves one step distance. On the drunken
      drive there is a memory on the course followed, different from the
      drunken walk, and so it is more similar to real sensors (ships, drifters
      etc).

      Variance between -90 to 90 deg.
    """
    N = cfg['montecarlo']['Nsamples']
    Rlimit = cfg['montecarlo']['Rlimit']
    x0 = Rlimit * (random(1)[0] - 0.5) * 2
    y0 = Rlimit * (random(1)[0] - 0.5) * 2
    rad = 0.25 * np.pi * randn(N)
    rad[0] = random(1) * 2 * np.pi
    rad = rad.cumsum()
    step = cfg['montecarlo']['dt'] * cfg['montecarlo']['VSampler']
    dx = ma.sin(rad) * step
    dy = ma.cos(rad) * step
    x = dx.cumsum() + x0
    y = dy.cumsum() + y0
    return x, y
Ejemplo n.º 41
0
def drunken_drive(cfg):
    """ Simulates a standard normal change in course

    For N iterations it changes the course with a normal probability around
      the previous course. Each time moves one step distance. On the drunken
      drive there is a memory on the course followed, different from the
      drunken walk, and so it is more similar to real sensors (ships, drifters
      etc).

      Variance between -90 to 90 deg.
    """
    N = cfg['montecarlo']['Nsamples']
    Rlimit = cfg['montecarlo']['Rlimit']
    x0 = Rlimit*(random(1)[0]-0.5)*2
    y0 = Rlimit*(random(1)[0]-0.5)*2
    rad = 0.25*np.pi*randn(N)
    rad[0] = random(1)*2*np.pi
    rad = rad.cumsum()
    step = cfg['montecarlo']['dt'] * cfg['montecarlo']['VSampler']
    dx = ma.sin(rad)*step
    dy = ma.cos(rad)*step
    x = dx.cumsum() + x0
    y = dy.cumsum() + y0
    return x, y
Ejemplo n.º 42
0
def fun_dct1(x, i, n):
    return cos(pi*x*i/n)
Ejemplo n.º 43
0
 def getBearingAutocorrelation(self, maxT=100):
     n = int(np.round(maxT*self.frameRate))
     tau = range(n)/self.frameRate
     psi = self.getMaskedPosture(self.psi)
     C = dotacf(ma.array([ma.cos(psi), ma.sin(psi)]), n)
     return tau, C
Ejemplo n.º 44
0
def fun_dct2(x, i, n):
    return cos(pi*x*(i+1/2)/n)
Ejemplo n.º 45
0
def scale_by_cal(Data, scale_t_ave=True, scale_f_ave=False, sub_med=False,
                 scale_f_ave_mod=False, rotate=False) :
    """Puts all data in units of the cal temperature.
    
    Data is put into units of the cal temperature, thus removing dependence on
    the gain.  This can be done by dividing by the time average of the cal
    (scale_t_ave=True, Default) thus removing dependence on the frequency-
    dependant gain.  Alternatively, you can scale by the frequency average to
    remove the time-dependent gain (scale_f_ave=True). Data is then in units of
    the frequency averaged cal temperture. You can also do both (recommended).
    After some scaling the data ends up in units of the cal temperture as a
    funciton of frequency.

    Optionally you can also subtract the time average of the data off here
    (subtract_time_median), since you might be done with the cal information at
    this point.
    """
    
    on_ind = 0
    off_ind = 1
    if (Data.field['CAL'][on_ind] != 'T' or
        Data.field['CAL'][off_ind] != 'F') :
            raise ce.DataError('Cal states not in expected order.')
    
    if tuple(Data.field['CRVAL4']) == (-5, -7, -8, -6) :
        # Here we check the polarizations and cal indicies
        xx_ind = 0
        yy_ind = 3
        xy_inds = [1,2]
        
        # A bunch of calculations used to test phase closure.  Not acctually
        # relevant to what is being done here.
        #a = (Data.data[5, xy_inds, on_ind, 15:20]
        #     - Data.data[5, xy_inds, off_ind, 15:20])
        #a /= sp.sqrt( Data.data[5, xx_ind, on_ind, 15:20] 
        #              - Data.data[5, xx_ind, off_ind, 15:20])
        #a /= sp.sqrt( Data.data[5, yy_ind, on_ind, 15:20] 
        #              - Data.data[5, yy_ind, off_ind, 15:20])
        #print a[0,:]**2 + a[1,:]**2
        
        diff_xx = Data.data[:,xx_ind,on_ind,:] - Data.data[:,xx_ind,off_ind,:]
        diff_yy = Data.data[:,yy_ind,on_ind,:] - Data.data[:,yy_ind,off_ind,:]
        
        if scale_t_ave :
            # Find the cal means (in time) and scale by them.
            # Means work much better than medians.  Medians seems to bias the
            # result by up to 10%.  This seems to be discretization noise.  Cal
            # switches fast enough that we shouldn't need this anyway.
            cal_tmed_xx = ma.mean(diff_xx, 0)
            cal_tmed_yy = ma.mean(diff_yy, 0)
            cal_tmed_xx[sp.logical_or(cal_tmed_xx<=0, cal_tmed_yy<=0)] = ma.masked
            cal_tmed_yy[cal_tmed_xx.mask] = ma.masked

            Data.data[:,xx_ind,:,:] /= cal_tmed_xx
            Data.data[:,yy_ind,:,:] /= cal_tmed_yy
            Data.data[:,xy_inds,:,:] /= ma.sqrt(cal_tmed_yy*cal_tmed_xx)

        if scale_f_ave :
            # The frequency gains have have systematic structure to them, 
            # they are not by any approximation gaussian distributed.  Use
            # means, not medians across frequency.
            operation = ma.mean
            cal_fmea_xx = operation(diff_xx, -1)
            cal_fmea_yy = operation(diff_yy, -1)
            
            # Flag data with wierd cal power.  Still Experimental.
            cal_fmea_xx[sp.logical_or(cal_fmea_xx<=0,cal_fmea_yy<=0)] = ma.masked
            cal_fmea_yy[cal_fmea_xx.mask] = ma.masked
            cal_xx = ma.mean(cal_fmea_xx)
            cal_yy = ma.mean(cal_fmea_yy)
            cal_fmea_xx[sp.logical_or(abs(cal_fmea_xx.anom()) >= 0.1*cal_xx,
                            abs(cal_fmea_yy.anom()) >= 0.1*cal_yy)] = ma.masked
            cal_fmea_yy[cal_fmea_xx.mask] = ma.masked
            
            ntime = len(cal_fmea_xx)
            cal_fmea_xx.shape = (ntime, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1)
            Data.data[:,xx_ind,:,:] /= cal_fmea_xx
            Data.data[:,yy_ind,:,:] /= cal_fmea_yy
            cal_fmea_xx.shape = (ntime, 1, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1, 1)
            Data.data[:,xy_inds,:,:] /= ma.sqrt(cal_fmea_yy*cal_fmea_xx)

        if scale_f_ave_mod :
            # The frequency gains have have systematic structure to them, 
            # they are not by any approximation gaussian distributed.  Use
            # means, not medians across frequency.
            operation = ma.mean
            cal_fmea_xx = operation(diff_xx, -1)
            cal_fmea_yy = operation(diff_yy, -1)
            cal_fmea_xx_off = operation(Data.data[:,xx_ind,off_ind,:], -1)
            cal_fmea_yy_off = operation(Data.data[:,yy_ind,off_ind,:], -1)


            sys_xx = cal_fmea_xx_off/cal_fmea_xx
            sys_yy = cal_fmea_yy_off/cal_fmea_yy
            percent_ok = 0.03
            sys_xx_tmed = ma.median(sys_xx)
            sys_yy_tmed = ma.median(sys_yy)

            maskbad_xx = (sys_xx > sys_xx_tmed + sys_xx_tmed*percent_ok)|(sys_xx < sys_xx_tmed - sys_xx_tmed*percent_ok)
            maskbad_yy = (sys_yy > sys_yy_tmed + sys_yy_tmed*percent_ok)|(sys_yy < sys_yy_tmed - sys_yy_tmed*percent_ok)

            cal_fmea_xx[sp.logical_or(cal_fmea_xx<=0,cal_fmea_yy<=0)] = ma.masked
            cal_fmea_yy[cal_fmea_xx.mask] = ma.masked
            cal_fmea_xx[maskbad_xx] = ma.masked
            cal_fmea_yy[maskbad_yy] = ma.masked  
            cal_xx = ma.mean(cal_fmea_xx)
            cal_yy = ma.mean(cal_fmea_yy)

            ntime = len(cal_fmea_xx)
            cal_fmea_xx.shape = (ntime, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1)
            Data.data[:,xx_ind,:,:] /= cal_fmea_xx
            Data.data[:,yy_ind,:,:] /= cal_fmea_yy
            cal_fmea_xx.shape = (ntime, 1, 1, 1)
            cal_fmea_yy.shape = (ntime, 1, 1, 1)
            Data.data[:,xy_inds,:,:] /= ma.sqrt(cal_fmea_yy*cal_fmea_xx) 

        if scale_f_ave and scale_t_ave :
            # We have devided out t_cal twice so we need to put one factor back
            # in.
            cal_xx = operation(cal_tmed_xx)
            cal_yy = operation(cal_tmed_yy)
            Data.data[:,xx_ind,:,:] *= cal_xx
            Data.data[:,yy_ind,:,:] *= cal_yy
            Data.data[:,xy_inds,:,:] *= ma.sqrt(cal_yy*cal_xx)

        if scale_f_ave_mod and scale_t_ave :
            #Same divide out twice problem.
            cal_xx = operation(cal_tmed_xx)
            cal_yy = operation(cal_tmed_yy)
            Data.data[:,xx_ind,:,:] *= cal_xxcal_imag_mean
            Data.data[:,yy_ind,:,:] *= cal_yy
            Data.data[:,xy_inds,:,:] *= ma.sqrt(cal_yy*cal_xx)
           
        if scale_f_ave and scale_f_ave_mod :
            raise ce.DataError("time averaging twice") 

        if rotate:
            # Define the differential cal phase to be zero and rotate all data
            # such that this is true.
            cal_real_mean = ma.mean(Data.data[:,1,0,:] - Data.data[:,1,1,:], 0)
            cal_imag_mean = ma.mean(Data.data[:,2,0,:] - Data.data[:,2,1,:], 0)
            # Get the cal phase angle as a function of frequency.
            cal_phase = -ma.arctan2(cal_imag_mean, cal_real_mean)

            # Rotate such that the cal phase is zero. Imperative to have a
            # temporary variable.
            New_data_real = (ma.cos(cal_phase) * Data.data[:,1,:,:]
                             - ma.sin(cal_phase) * Data.data[:,2,:,:])
            New_data_imag = (ma.sin(cal_phase) * Data.data[:,1,:,:]
                             + ma.cos(cal_phase) * Data.data[:,2,:,:])
            Data.data[:,1,:,:] = New_data_real
            Data.data[:,2,:,:] = New_data_imag

    elif tuple(Data.field['CRVAL4']) == (1, 2, 3, 4) :
        # For the shot term, just devide everything by on-off in I.
        I_ind = 0
        cal_I_t = Data.data[:,I_ind,on_ind,:] - Data.data[:,I_ind,off_ind,:]
        cal_I = ma.mean(cal_I_t, 0)

        Data.data /= cal_I
    else :
        raise ce.DataError("Unsupported polarization states.")

    # Subtract the time median if desired.
    if sub_med :
        Data.data -= ma.median(Data.data, 0)
Ejemplo n.º 46
0
def diff(j, x, N):
    theta_j = theta(j, N)
    theta_j_1 = theta(j+1, N)
    a1 = (cos(x) - cos(theta_j)) / (cos(theta_j_1 - theta_j))
    a2 = (x - theta_j) / (theta_j_1 - theta_j)
    return a1 - a2
Ejemplo n.º 47
0
def angleToVector(alpha):
    return ma.array([ma.cos(alpha), ma.sin(alpha)]).T
Ejemplo n.º 48
0
def calibrate_pol(Data, m_total, flux_status):
    """Subtracts a Map out of Data."""

    # Data is a DataBlock object.  It holds everything you need to know about
    # the data in a single scan and IF.  You should get to know them very well.
    # Data.data is a numpy masked array (see numpy documentation) and holds the
    # acctual data.  It is a 4 dimensional array.  The demensions are (in
    # order): (time, pol, cal, freq).  Each dimension can be any length which
    # you can figure out by looking at Data.dims = sp.shape(Data.data).
    # Data.field is a python dictionary that holds all the other data that you
    # might care about from the origional fits file.  For instance,
    # Data.field['CAL'] is an array with length dims[2].  It normally has
    # values ['T', 'F']. Data.field['CRVAL4'] tells you about the polarization
    # axis of Data.data.  By SDfits convension each polarization is represented
    # by an integer: 1=I, 2=Q, 3=U, 4=V, -5=XX, -6=YY, -7=XY, -8=YX.

    # Also this depends on having the polarizations rotated correctly to IQUV.
    # Some code to do this has been hacked together in the rotate_pol module,
    # but I don't trust it yet.

    # Some dimension checks.
    # We expect 4 polarizations.
    if not Data.dims[1] == 4:
        raise ce.DataError('Require 4 polarizations.')
    # We expect polarizations to be in order IQUV.
    if (Data.field['CRVAL4'][0] != 1 or Data.field['CRVAL4'][1] != 2
            or Data.field['CRVAL4'][2] != 3 or Data.field['CRVAL4'][3] != 4):
        raise ce.DataError('Expected the polarization basis to be IQUV.')

    # A useful function that might need:
    Data.calc_freq()
    # Now data has an atribute Data.freq which is an array that gives the
    # frequency along the last axis.

    # Data.field['CRVAL1'] is center frequency in Hz.
    # Data.data 4 dim array 2nd index polarization, 4th index frequency.

    # Need to get parallactic angle:
    Data.calc_PA()
    # This gives an array (Data.PA) of PA values of length = time dim.

    for time_index in range(0, Data.dims[0]):

        #Generate a sky matrix for this time index:
        m_sky = sp.zeros((4, 4))
        m_sky[0, 0] = 1
        m_sky[1, 1] = ma.cos(2 * Data.PA[time_index] * sp.pi / 180)
        m_sky[1, 2] = ma.sin(2 * Data.PA[time_index] * sp.pi / 180)
        m_sky[2, 1] = -ma.sin(2 * Data.PA[time_index] * sp.pi / 180)
        m_sky[2, 2] = ma.cos(2 * Data.PA[time_index] * sp.pi / 180)
        m_sky[3, 3] = 1

        M_sky = sp.mat(m_sky)
        M_sky = M_sky.I
        #        print M_sky

        for cal_index in range(0, Data.dims[2]):
            # Determines the Mueller Matrix to use
            for freq in range(0, Data.dims[3]):

                # Tells which mueller matrix to use.
                freq_limit = len(m_total[0, 0, :])
                frequency = int(Data.freq[freq] / 1000)
                #               print frequency
                bin = int((900000 - frequency) * freq_limit / 200000)
                #               print bin
                #               if freq_limit == 200:
                #                   bin = 900-frequency
                #Not setup to work with spectrometer data.
                #               elif freq_limit == 260:
                #                   bin = 929-frequency
                #               print bin
                # Converts files into matrix format
                STOKES = Data.data[time_index, :, cal_index, freq]
                #               print STOKES
                MUELLER = sp.mat(m_total[:, :, bin])
                #               print MUELLER

                # Next there is a matrix multiplication that will generate
                # a new set of stokes values.
                stokesmod = STOKES

                if flux_status == 'False':
                    stokesmod = np.dot(MUELLER, stokesmod)
                stokesmod = np.dot(M_sky, stokesmod)

                # You always want to include the M_sky matrix transformation, but you if you just want the flux cal, coment out the MUELLER, STOKES dot product above and include the flux multiplication below instead.
                if flux_status == 'True':
                    stokesmod[0] = stokesmod[0] * MUELLER[0, 0]
                    stokesmod[1] = stokesmod[1] * MUELLER[0, 0]
                    stokesmod[2] = stokesmod[2] * MUELLER[0, 0]
                    stokesmod[3] = stokesmod[3] * MUELLER[0, 0]
#               print stokesmod

                for i in range(0, Data.dims[1]):
                    Data.data[time_index, i, cal_index, freq] = stokesmod[i]
Ejemplo n.º 49
0
 def result(traj):
     psi = traj.getMaskedPosture(traj.psi)
     return dotacf(ma.array([ma.cos(psi),ma.sin(psi)]).T, lags, traj.excluded)
Ejemplo n.º 50
0
def least_squares(obs, navs, init_pos='', vmf_coeffs=()):
    """
    x = (A^TA)^{-1}A^T l
    Takes an observation ``obs`` and all the data ``nav`` from navigation file.
    If we have a-priori information about rover's position,
        then we can filter low satellites and use troposperic correction
    :return: rover's position in ecef [m]
    """
    c = 299792428  # speed of light
    elev_mask = 8  # satellite elevation mask
    now = obs.date
    # Find all possible satellites N
    sats = []
    for r in obs.PRN_number:
        # print r, "Data:", obs.obs_data['C1'][obs.prn(r)], obs.obs_data['P2'][obs.prn(r)]
        if obs.obs_data['C1'][obs.prn(r)] and obs.obs_data['P2'][obs.prn(r)] and ('G' in r):      # iono-free
        # if obs.obs_data['C1'][obs.prn(r)] and obs.obs_data['P1'][obs.prn(r)] and ('R' in r):      # iono-free
        # if obs.obs_data['C1'][i] and ('G' in r):                                # C1 only
            nnt = nav_nearest_in_time(now, navs[r])
            if len(init_pos):
                sat_coord = nnt.eph2pos(now)
                if sat_elev(init_pos, sat_coord) < elev_mask:
                    # print "\tSatellite %s excluded" % r
                    continue
            sats += [(r, nnt)]
    # Form matrix if N >= 4:
    if len(sats) > 3:
        # observed [iono-free] pseudoranges
        # P = np.array([obs.ionofree_pseudorange(s[0]) for s in sats])        # iono-free
        P = np.array([obs.obs_data['C1'][obs.prn(s[0])] for s in sats])     # C1 only
        # get XYZ-coords of satellites
        XYZs = np.array([s[1].eph2pos(now) for s in sats])  # len(XYZs[0]) = 3
        # print "XYZs =",XYZs
    # elif len(sats) <= 3 and len(init_pos):  # FIXME: rewise this logic
    elif len(sats) <= 3:  # FIXME: rewise this logic
        print "\n\tWarning: too few satellites:", len(sats)
        return None
    # else:
    #     print "\n\tWarning: bad measurement!"
    #     print "sats:", sats, init_pos
    #     return None
    # if err == {}: err = {s[0]:0. for s in sats}
    xyzt = [1e-10, 1e-10, 1e-10, 0.]   # initial point
    if len(init_pos):
        xyzt = init_pos + [0.]
    # print "initial position:", lla_string(ecef_to_lat_lon_alt(xyzt)), tuple(xyzt[:3])
    for itr in range(10):
        # print "\n*** iter = %d ***" % itr
        # geometrical ranges
        lla = ecef_to_lat_lon_alt(xyzt, deg=False)
        ## rho is geometric ranges i.e. distances between current position and every satellite in `sats`
        rho = np.array([np.sqrt(sum([(x - xyzt[i]) ** 2 for i, x in enumerate(XYZs[j])])) for j in xrange(len(sats))])
        # print "ρ =", rho
        # form l-vector (sometimes `l` is denoted as `b`)
        # print "cδt =", xyzt[3]
        # l = np.matrix([P[i] - rho[i] + c * s[1].time_offset(now + dt.timedelta(seconds=xyzt[3]))
        # print "time_offset(now + xyzt[3]) =",[s[1].time_offset(now + xyzt[3]) for s in sats]
        l = np.matrix([P[i] - rho[i] + c * s[1].time_offset(now + xyzt[3])
                       - tropmodel(lla, sat_elev(xyzt[:3], XYZs[i], deg=False), vmf_coeffs)
                       for i, s in enumerate(sats)]).transpose()
        # from A-matrix
        A = np.matrix([np.append((xyzt[:3] - XYZs[i]) / rho[i], [c]) for i in xrange(len(sats))])
        AT = A.transpose()
        # form x-vector
        x_hat_matrix = ((AT * A).I * AT * l)
        x_hat = x_hat_matrix.flatten().getA()[0]
        x_hat[3] /= c
        # x_hat[3] *= 1e9    # time in seconds again
        # print "(x,y,z,cδt) ="," m, ".join(map(lambda x: "%.f" %x, x_hat[:3])),"m, %.1e" % x_hat[3]
        xyzt += x_hat
        # print lla_string(ecef_to_lat_lon_alt(xyzt)),"%.4f"%xyzt[3]
        delta = np.sqrt(sum(map(lambda k: k ** 2, x_hat[:3])))
        if delta < 1.:
            # print "1 meter accuracy achieved, break"
            break
        # now += dt.timedelta(seconds=x_hat[3])
        # XYZs = np.array([s[1].eph2pos(now + dt.timedelta(seconds=x_hat[3])) for s in sats])
        XYZs = np.array([s[1].eph2pos(now + x_hat[3]) for s in sats])

    phi, t, h = ecef_to_lat_lon_alt(xyzt, deg=False)
    R = np.matrix([[-sin(phi) * cos(t), -sin(phi) * sin(t), cos(phi)],
                   [-sin(t), cos(t), 0],
                   [cos(phi) * cos(t), cos(phi) * sin(t), sin(phi)]])
    Q = (AT * A).I
    # S_T = R * Q[0:3, 0:3] * R.transpose()
    # GDOP = sqrt(sum(S_T.diagonal().getA()[0]) + Q[3, 3])
    # print "GDOP = %.3f, VDOP = %.3f" % (GDOP,sqrt(S_T[2,2]))
    return xyzt[:3]
def angleToVector(alpha):
    return ma.array([ma.cos(alpha), ma.sin(alpha)]).T
Ejemplo n.º 52
0
def outlier_detector_cmean(np_ma, Vny, Nprf_arr, Nmin=2):

    data = np_ma.data
    mask = np_ma.mask

    f_arr = np.ones(Nprf_arr.shape)
    f_arr[np.where(Nprf_arr == np.min(Nprf_arr))] = -1
    Vny_arr = Vny / Nprf_arr

    kH, kL = np.zeros((5, 5)), np.zeros((5, 5))
    kH[1::2] = 1
    kL[::2] = 1

    # Array with the number of valid neighbours at each point
    Nval_arr_H = local_valid(mask, kernel=kH)
    Nval_arr_L = local_valid(mask, kernel=kL)

    # Convert to angles and calculate trigonometric variables
    ang_ma = (np_ma * pi / Vny)
    cos_ma = ma.cos(ang_ma * Nprf_arr)
    sin_ma = ma.sin(ang_ma * Nprf_arr)

    # Average trigonometric variables in local neighbourhood
    dummy_cos = cos_ma.data * (~mask).astype(int)
    dummy_sin = sin_ma.data * (~mask).astype(int)

    ncols, cos_conv = dummy_cols(dummy_cos, kH, val=0)
    ncols, sin_conv = dummy_cols(dummy_sin, kH, val=0)

    cos_sumH = ndimage.convolve(cos_conv, weights=kH, mode='wrap')
    cos_sumL = ndimage.convolve(cos_conv, weights=kL, mode='wrap')

    sin_sumH = ndimage.convolve(sin_conv, weights=kH, mode='wrap')
    sin_sumL = ndimage.convolve(sin_conv, weights=kL, mode='wrap')

    # Remove added columns
    cos_sumH = cos_sumH[:, :int(cos_sumL.shape[1] - ncols)]
    cos_sumL = cos_sumL[:, :int(cos_sumL.shape[1] - ncols)]
    sin_sumH = sin_sumH[:, :int(sin_sumL.shape[1] - ncols)]
    sin_sumL = sin_sumL[:, :int(sin_sumL.shape[1] - ncols)]

    # Average angle in local neighbourhood
    cos_avgH_ma = ma.array(data=cos_sumH, mask=mask) / Nval_arr_H
    cos_avgL_ma = ma.array(data=cos_sumL, mask=mask) / Nval_arr_L
    sin_avgH_ma = ma.array(data=sin_sumH, mask=mask) / Nval_arr_H
    sin_avgL_ma = ma.array(data=sin_sumL, mask=mask) / Nval_arr_L

    BH = ma.arctan2(sin_avgH_ma, cos_avgH_ma)
    BL = ma.arctan2(sin_avgL_ma, cos_avgL_ma)

    # Average velocity ANGLE of neighbours (reference ANGLE for outlier detection):
    angref_ma = f_arr * (BL - BH)
    angref_ma[angref_ma < 0] = angref_ma[angref_ma < 0] + 2 * pi
    angref_ma[angref_ma > pi] = -(2 * pi - angref_ma[angref_ma > pi])
    angobs_ma = ma.arctan2(ma.sin(ang_ma), ma.cos(ang_ma))

    # Detector array (minimum ANGLE difference between observed and reference):
    diff = angobs_ma - angref_ma
    det_ma = (Vny / pi) * ma.arctan2(ma.sin(diff), ma.cos(diff))

    out_mask = np.zeros(det_ma.shape)
    out_mask[abs(det_ma) > 0.8 * Vny_arr] = 1
    out_mask[(Nval_arr_H < Nmin) | (Nval_arr_L < Nmin)] = 0

    # CORRECTION (2 STEP)

    # Convolution kernel
    kernel = np.ones(kH.shape)

    new_mask = (mask) | (out_mask.astype(bool))

    # Array with the number of valid neighbours at each point (outliers removed)
    Nval_arr = local_valid(new_mask, kernel=kernel)

    out_mask[Nval_arr < Nmin] = 0

    ref_arr = ref_val(data, new_mask, kernel, method='median')
    ref_ma = ma.array(data=ref_arr, mask=mask)

    return ref_ma, out_mask
Ejemplo n.º 53
0
def calibrate_pol(Data, m_total,RM_dir,R_to_sky,DP_correct,RM_correct) :
    """Subtracts a Map out of Data."""
        
    # Data is a DataBlock object.  It holds everything you need to know about
    # the data in a single scan and IF.  You should get to know them very well.
    # Data.data is a numpy masked array (see numpy documentation) and holds the
    # acctual data.  It is a 4 dimensional array.  The demensions are (in
    # order): (time, pol, cal, freq).  Each dimension can be any length which
    # you can figure out by looking at Data.dims = sp.shape(Data.data).
    # Data.field is a python dictionary that holds all the other data that you
    # might care about from the origional fits file.  For instance,
    # Data.field['CAL'] is an array with length dims[2].  It normally has
    # values ['T', 'F']. Data.field['CRVAL4'] tells you about the polarization
    # axis of Data.data.  By SDfits convension each polarization is represented
    # by an integer: 1=I, 2=Q, 3=U, 4=V, -5=XX, -6=YY, -7=XY, -8=YX.

    # Also this depends on having the polarizations rotated correctly to IQUV.
    # Some code to do this has been hacked together in the rotate_pol module,
    # but I don't trust it yet.

    # Some dimension checks.
    # We expect 4 polarizations.
    if not Data.dims[1] == 4 :
       	raise ce.DataError('Require 4 polarizations.')
    # We expect polarizations to be in order IQUV.
    if (Data.field['CRVAL4'][0] != -5 or Data.field['CRVAL4'][1] != -7 or
        Data.field['CRVAL4'][2] != -8 or Data.field['CRVAL4'][3] != -6) :
       	raise ce.DataError('Expected the polarization basis to be XY.')

    # A useful function that might need:
    Data.calc_freq()
    # Now data has an atribute Data.freq which is an array that gives the
    # frequency along the last axis.
          
    # Data.field['CRVAL1'] is center frequency in Hz. 
    # Data.data 4 dim array 2nd index polarization, 4th index frequency. 

    # Need to get parallactic angle:
    Data.calc_PA()
    # This gives an array (Data.PA) of PA values of length = time dim.
#   print Data.dims[0]



# This segment of the code is for Rotation Measure Component

    # Since the RM Tables have half hour time divisions and scans are shorter, we can do 1 selection.

    if RM_correct==True:
        Comp_Time = 0.0
        Full_date = Data.field['DATE-OBS'][Data.dims[0]/2]
        Date = Full_date.split('T')[0]
        Year = Date.split('-')[0]
        Month = Date.split('-')[1]
        Day = Date.split('-')[2]
        Full_time = Full_date.split('T')[1]
        Hour = Full_time.split(':')[0]
        Min = Full_time.split(':')[1]
        Sec = Full_time.split(':')[2]
        if int(Min)<=15:
            Comp_Time = float(Hour)+0.0
        elif int(Min)<=45:
            Comp_Time = float(Hour)+0.5
        else :
            Comp_Time = float(Hour)+1.0
    #Victor's tables have time in format Hour (xx.xx), Az (deg), El (deg), RM
    # Angle phi = RM*(wavelength)^2 where phi is in radians and wavelength is in meters

        RM_file_name = RM_dir + Year + Month + Day + '_RM.txt'
        RM_data = np.loadtxt(RM_file_name)
        RA_RM = sp.zeros(len(RM_data[:,0]))
        DEC_RM = sp.zeros(len(RM_data[:,0]))
        for i in range(0,len(RM_data[:,0])):
            RM_Hr = int(RM_data[i,0])
            if RM_data[i,0]%1 == 0 :
                RM_Min = '00'
                minutes = 0.0
            else:
                RM_Min = '30'
                minutes = 0.5
            Test = float(RM_Hr)+minutes
            if str(Comp_Time) == str(Test):
                UT_RM = Year+'-'+Month+'-'+Day+'T'+str(RM_Hr)+':'+RM_Min+':00.00'
                EL_RM = RM_data[i,2]
                AZ_RM = RM_data[i,1]
                RA_RM[i], DEC_RM[i] = utils.elaz2radecGBT(EL_RM,AZ_RM,UT_RM)
    #Now have tables of RA/DEC to compare to actual RA/DEC
        RM = 0




#This segment of the code is for Differential Phase Correction Generation

# Can determine the differential phase prior to the loop:
    if DP_correct==True:
# Set up a table of data to examine (calon-caloff to get Tcal)
        Tcal = ma.mean(Data.data[:,:,0,:],axis=0)-ma.mean(Data.data[:,:,1,:],axis=0)
#    Tcal = ma.mean(Data.data[:,:,0,:]-Data.data[:,:,1,:],axis=0)



# This version was if we arbitrarily set 4 possible phases and found closest match. There seems to be
# enough variability in the phase within the four categories that this doesn't quite work.

    # Randomly pick frequency bin near one of the zero crossings to compare U's
#    U_test = Tcal[1,230]/sp.sqrt(Tcal[1,230]**2+Tcal[2,230]**2)
#    print Tcal[:,191]
#    print Tcal[1,:]
#    U_test = Tcal[1,191]
#    print U_test
#    chi_sq =sp.zeros(4)
#    dp_dat = sp.zeros((4,2))
#    dp_dat[0] = [0.1354,2.341]
#    dp_dat[1] = [0.0723, 2.4575]
#    dp_dat[1] = [0.0730,2.611] #calculated specifically for sess 81
#    dp_dat[2] = [0.1029,0.045]
#    dp_dat[3] = [0,0]
#    dp_dat[3] = [0.1669,5.609] # giving problems because closer for sess 81 at given freq
#    min = 10
#    val = 5
#    for i in range(0,4):
#       chi_sq[i] = U_test-sp.cos(dp_dat[i,0]*Data.freq[230]/1000000+dp_dat[i,1])/sp.sqrt(Tcal[1,230]**2+Tcal[2,230]**2)
#        print sp.cos(dp_dat[i,0]*Data.freq[191]/1000000+dp_dat[i,1])
#        chi_sq[i] = U_test-sp.cos(dp_dat[i,0]*Data.freq[191]/1000000+dp_dat[i,1])
#        if abs(chi_sq[i]) < min:
#            min = abs(chi_sq[i])
#            val = i
# val tells which of the correction functions to use.    
#    print chi_sq
#    print val
#    print Data.freq[191]



# Alternate code for solving differential phase for each scan.
        fitfunc = lambda p,x: sp.cos(p[0]*x+p[1])
        errfunc = lambda p,x,y: fitfunc(p,x)-y
        freqs = sp.zeros(Data.dims[3])
        U_data = sp.zeros(Data.dims[3])
        V_data = sp.zeros(Data.dims[3])
        R_data = sp.zeros(Data.dims[3])
        for i in range(0,Data.dims[3]):
            freqs[i] = Data.freq[Data.dims[3]-i-1]/1000000
            U_data[i] = Tcal[1,Data.dims[3]-i-1]
            V_data[i] = Tcal[2,Data.dims[3]-i-1]
            R_data[i] = U_data[i]/sp.sqrt(U_data[i]**2+V_data[i]**2)
#    print np.any(np.isnan(R_data))
#    print np.any(np.isinf(R_data))
#    print freqs    

        for j in range(0,Data.dims[3]):
            if int(freqs[j])==710:
                mask_num = j
            if int(freqs[j])==740:
                mask_num2 = j
        Datain = R_data[mask_num:mask_num2]
        fin = freqs[mask_num:mask_num2]
        bad_pts = np.logical_or(np.isnan(Datain),np.isinf(Datain))
        good_ind = np.where(np.logical_not(bad_pts))
        Datain = Datain[good_ind]
        fin = fin[good_ind]
        R0 = [0.18,1.0]
        if len(good_ind[0])>1:
#            print good_ind[0]
            R,success = optimize.leastsq(errfunc,R0[:],args=(fin,Datain),maxfev=10000)
            R[1] = R[1]%(2*sp.pi)
            print R
        else:
            R=[0.0,0.0]
            print "Not able to resolve a noise cal phase, setting phase to zero."
  

# This starts the actual data processing for the given scan
         
    for time_index in range(0,Data.dims[0]):

# Extra data needed for Rotation Measure Correction
        if RM_correct==True:
            RA = Data.field['CRVAL2'][time_index]
            DEC = Data.field['CRVAL3'][time_index]
#        print RA
#        print DEC
            RM = 0
            valid = []
            for i in range(0,len(RA_RM)):
                if RA_RM[i] != 0:
                    if abs(RA-RA_RM[i])<10.0:
                        if abs(DEC-DEC_RM[i])<10.0:
                            RM = RM_data[i,3]     
                            valid.append(i)
            RA_M = 10.0
            DEC_M = 10.0
            for j in range(0,len(valid)):
                if abs(RA-RA_RM[valid[j]])<RA_M:
                    if abs(DEC-DEC_RM[valid[j]])<DEC_M:
                        RM = RM_data[valid[j],3]  
                         
#        print RM
 
    #Generate a sky matrix for this time index (assumes a XY basis):
        m_sky = sp.zeros((4,4))
        m_sky[0,0] = 0.5*(1+ma.cos(2*Data.PA[time_index]*sp.pi/180))
        m_sky[0,1] = -ma.sin(2*Data.PA[time_index]*sp.pi/180)
        m_sky[0,3] = 0.5*(1-ma.cos(2*Data.PA[time_index]*sp.pi/180))
        m_sky[1,0] = 0.5*ma.sin(2*Data.PA[time_index]*sp.pi/180)
        m_sky[1,1] = ma.cos(2*Data.PA[time_index]*sp.pi/180)
        m_sky[1,3] = -0.5*ma.sin(2*Data.PA[time_index]*sp.pi/180)
        m_sky[2,2] = 1
        m_sky[3,0] = 0.5*(1-ma.cos(2*Data.PA[time_index]*sp.pi/180))
        m_sky[3,1] = ma.sin(2*Data.PA[time_index]*sp.pi/180)
        m_sky[3,3] = 0.5*(1+ma.cos(2*Data.PA[time_index]*sp.pi/180))

        M_sky = sp.mat(m_sky)
        M_sky = M_sky.I
#        print M_sky

#        for cal_index in range(0,Data.dims[2]):
        for cal_index in range(0,2):
        # Determines the Gains to use   
            for freq in range(0,Data.dims[3]):
     # Tells which mueller matrix to use. 
               freq_limit = len(m_total[0,:])
               frequency = int(Data.freq[freq]/1000)
#               print frequency
               bin = int((900000-frequency)*freq_limit/200000)
#               print bin
#               if freq_limit == 200:
#                   bin = 900-frequency
#Not setup to work with spectrometer data.
#               elif freq_limit == 260:
#                   bin = 929-frequency
#               print bin

    #Generate a sky matrix for this time index:
    #With faraday rotation  sky matrix now frequency dependent
               if RM_correct==True:
                   wavelength = 300000.0/float(frequency) # should be in meters given that frequency is in kHz
#               print wavelength
                   Phi = RM*wavelength*wavelength
#               print Phi
                   m_sky = sp.zeros((4,4)) 
                   m_sky[0,0] = 0.5*(1+ma.cos(2*Data.PA[time_index]*sp.pi/180+Phi))
                   m_sky[0,1] = -ma.sin(2*Data.PA[time_index]*sp.pi/180+Phi) 
                   m_sky[0,3] = 0.5*(1-ma.cos(2*Data.PA[time_index]*sp.pi/180+Phi))
                   m_sky[1,0] = 0.5*ma.sin(2*Data.PA[time_index]*sp.pi/180+Phi) 
                   m_sky[1,1] = ma.cos(2*Data.PA[time_index]*sp.pi/180+Phi) 
                   m_sky[1,3] = -0.5*ma.sin(2*Data.PA[time_index]*sp.pi/180+Phi)
                   m_sky[2,2] = 1
                   m_sky[3,0] = 0.5*(1-ma.cos(2*Data.PA[time_index]*sp.pi/180+Phi))
                   m_sky[3,1] = ma.sin(2*Data.PA[time_index]*sp.pi/180+Phi)
                   m_sky[3,3] = 0.5*(1+ma.cos(2*Data.PA[time_index]*sp.pi/180+Phi))
  
                   M_sky = sp.mat(m_sky)
                   M_sky = M_sky.I 
#               print M_sky 

    # Converts files into vector format 
               XY_params = Data.data[time_index,:,cal_index,freq]       
    # Next there is a matrix multiplication that will generate 
    # a new set of xy values. (Differential gain correction)
               XY_params[0] = XY_params[0]*m_total[0,bin]
               XY_params[3] = XY_params[3]*m_total[1,bin]
               XY_params[1] = XY_params[1]*sp.sqrt(m_total[0,bin]*m_total[1,bin])
               XY_params[2] = XY_params[2]*sp.sqrt(m_total[0,bin]*m_total[1,bin])

    # Add in correction for differential phase

               if DP_correct==True:
                   XY_params[1] = XY_params[1]*sp.cos(R[0]*frequency/1000+R[1])-XY_params[2]*sp.sin(R[0]*frequency/1000+R[1])
                   XY_params[2] = XY_params[1]*sp.sin(R[0]*frequency/1000+R[1])+XY_params[2]*sp.cos(R[0]*frequency/1000+R[1])

    #Rotate to sky coordinates (and RM correct if set)

               if R_to_sky==True:
                   XY_params = np.dot(M_sky,XY_params)

    #Write corrected data to the new file. 

               for i in range(0,Data.dims[1]):
                    Data.data[time_index,i,cal_index,freq] = XY_params[i]	
Ejemplo n.º 54
0
def calibrate_pol(Data, m_total) :
    """Subtracts a Map out of Data."""
        
    # Data is a DataBlock object.  It holds everything you need to know about
    # the data in a single scan and IF.  You should get to know them very well.
    # Data.data is a numpy masked array (see numpy documentation) and holds the
    # acctual data.  It is a 4 dimensional array.  The demensions are (in
    # order): (time, pol, cal, freq).  Each dimension can be any length which
    # you can figure out by looking at Data.dims = sp.shape(Data.data).
    # Data.field is a python dictionary that holds all the other data that you
    # might care about from the origional fits file.  For instance,
    # Data.field['CAL'] is an array with length dims[2].  It normally has
    # values ['T', 'F']. Data.field['CRVAL4'] tells you about the polarization
    # axis of Data.data.  By SDfits convension each polarization is represented
    # by an integer: 1=I, 2=Q, 3=U, 4=V, -5=XX, -6=YY, -7=XY, -8=YX.

    # Also this depends on having the polarizations rotated correctly to IQUV.
    # Some code to do this has been hacked together in the rotate_pol module,
    # but I don't trust it yet.

    # Some dimension checks.
    # We expect 4 polarizations.
    if not Data.dims[1] == 4 :
       	raise ce.DataError('Require 4 polarizations.')
    # We expect polarizations to be in order IQUV.
    if (Data.field['CRVAL4'][0] != 1 or Data.field['CRVAL4'][1] != 2 or
        Data.field['CRVAL4'][2] != 3 or Data.field['CRVAL4'][3] != 4) :
       	raise ce.DataError('Expected the polarization basis to be IQUV.')

    # A useful function that might need:
    Data.calc_freq()
    # Now data has an atribute Data.freq which is an array that gives the
    # frequency along the last axis.
          
    # Data.field['CRVAL1'] is center frequency in Hz. 
    # Data.data 4 dim array 2nd index polarization, 4th index frequency. 

    # Need to get parallactic angle:
    Data.calc_PA()
    # This gives an array (Data.PA) of PA values of length = time dim.

    for time_index in range(0,Data.dims[0]):

    #Generate a sky matrix for this time index:
        m_sky = sp.zeros((4,4))
        m_sky[0,0] = 1
        m_sky[1,1] = ma.cos(2*Data.PA[time_index]*sp.pi/180)
        m_sky[1,2] = ma.sin(2*Data.PA[time_index]*sp.pi/180)
        m_sky[2,1] = -ma.sin(2*Data.PA[time_index]*sp.pi/180)
        m_sky[2,2] = ma.cos(2*Data.PA[time_index]*sp.pi/180)
        m_sky[3,3] = 1

        M_sky = sp.mat(m_sky)
        M_sky = M_sky.I
#        print M_sky

        for cal_index in range(0,Data.dims[2]):
        # Determines the Inverse Mueller Matrix to use   
#            CenterFrequency = int(Data.field['CRVAL1']/1000000)
            for freq in range(0,Data.dims[3]):
#               if Data.freq[freq] in range(669,699):
#                  bin = 0
#               elif Data.freq[freq] in range(700,729):
#                  bin = 1
#               elif Data.freq[freq] in range(730,759):
#                  bin = 2
#               elif Data.freq[freq] in range(760,789):
#                  bin = 3
#               elif Data.freq[freq] in range(790,819):
#                  bin = 4
#               elif Data.freq[freq] in range(820,849):
#                  bin = 5
#               elif Data.freq[freq] in range(850,879):
#                  bin = 6
#               elif Data.freq[freq] in range(880,929):
#                  bin = 7
#               else :
#                  raise ce.DataError('The frequency outside viable window') 

     # Tells which mueller matrix to use. Assumes at least 1 MHz bins.
               frequency = int(Data.freq[freq]/1000) 
               freq_limit=len(m_total[0,0,:])
               bin = int((900000-frequency)*freq_limit/200000)
#               if freq_limit == 200:
#                   bin = 900-frequency
#               elif freq_limit == 260:
#                   bin = 929-frequency
#               print bin
    # Converts files into matrix format 
               STOKES = Data.data[time_index,:,cal_index,freq]       
#               print STOKES
               MUELLER = sp.mat(m_total[:,:,bin])
#               print MUELLER

    # Next there is a matrix multiplication that will generate 
    # a new set of stokes values.
               stokesmod = np.dot(MUELLER,STOKES)
               stokesmod = np.dot(M_sky,stokesmod)
#               print stokesmod
               for i in range(0,Data.dims[1]):
                    Data.data[time_index,i,cal_index,freq] = stokesmod[i]