Ejemplo n.º 1
0
    def intersection(self,ray) :
        cv = self.placement.location+self.placement.orientation*self.radcurv
        dv = ray.p0 - self.placement.orientation*self.radcurv - self.placement.location        
        a = 1
        b = 2*pl.linalg.dot(ray.d,dv)
        c = pl.linalg.dot(dv,dv)-self.radcurv**2
        
        qs  = b**2-4*a*c
        if qs == 0 :
            lam = -b/(2*a)
        elif qs < 0 :
            lam = None
        else :
            lamp = (-b+pl.sqrt(b**2-4*a*c))/(2*a)
            lamn = (-b-pl.sqrt(b**2-4*a*c))/(2*a)
            pd   = pl.linalg.norm(ray.propagate(lamp)-ray.p0)
            nd   = pl.linalg.norm(ray.propagate(lamn)-ray.p0)
#            lam = min(lamp,lamn)
            
            if self.radcurv > 0 :
                lam = min(lamp,lamn)
            elif self.radcurv < 0 :
                lam = max(lamp,lamn)
            
            # assign intersection
        ray.p1 = ray.propagate(lam)
Ejemplo n.º 2
0
    def rank_by_distance_bhatt(self, qkeys, ikeys, rkeys, dists):
        """
        ::

            Reduce timbre-channel distances to ranks list by ground-truth key indices
            Bhattacharyya distance on timbre-channel probabilities and Kullback distances
        """
        # timbre-channel search using pre-computed distances
        ranks_list = []
        t_keys, t_lens = self.get_adb_lists(0) 
        rdists=pylab.ones(len(t_keys))*float('inf')
        qk = self._get_probs_tc(qkeys)
        for i in range(len(ikeys[0])): # number of include keys
            ikey=[]
            dk = pylab.zeros(self.timbre_channels)
            for t_chan in range(self.timbre_channels): # timbre channels
                ikey.append(ikeys[t_chan][i])
                try: 
                    # find dist of key i for query
                    i_idx = rkeys[t_chan].index( ikey[t_chan] ) # dataset include-key match
                    # the reduced distance function in include_keys order
                    # distance is Bhattacharyya distance on probs and dists
                    dk[t_chan] = dists[t_chan][i_idx]
                except:
                    print "Key not found in result list: ", ikey, "for query:", qkeys[t_chan]
                    raise error.BregmanError()
            rk = self._get_probs_tc(ikey)
            a_idx = t_keys.index( ikey[0] ) # audiodb include-key index
            rdists[a_idx] = distance.bhatt(pylab.sqrt(pylab.absolute(dk)), pylab.sqrt(pylab.absolute(qk*rk)))
        #search for the index of the relevant keys
        rdists = pylab.absolute(rdists)
        sort_idx = pylab.argsort(rdists)   # Sort fields into database order
        for r in self.ground_truth: # relevant keys
            ranks_list.append(pylab.where(sort_idx==r)[0][0]) # Rank of the relevant key
        return ranks_list, rdists
Ejemplo n.º 3
0
def f(x, t):
    # for now masses just = 1.0

    # the 4.0 only works for 2D
    N = len(x) / 4
    xdot = pl.array([])

    for i in range(N):
        temp = 0.0
        for j in range(N):
            if i == j:
                continue
            temp += -(x[2 * N + i] - x[2 * N + j]) / (
                pl.sqrt((x[2 * N + i] - x[2 * N + j]) ** 2 + (x[3 * N + i] - x[3 * N + j]) ** 2) ** 3
            )
        xdot = pl.append(xdot, temp)
    for i in range(N):
        temp = 0.0
        for j in range(N):
            if i == j:
                continue
            temp += -(x[3 * N + i] - x[3 * N + j]) / (
                pl.sqrt((x[2 * N + i] - x[2 * N + j]) ** 2 + (x[3 * N + i] - x[3 * N + j]) ** 2) ** 3
            )
        xdot = pl.append(xdot, temp)
    for i in range(N):
        xdot = pl.append(xdot, x[i])
    for i in range(N):
        xdot = pl.append(xdot, x[N + i])

    print("len xdot is: " + str(len(xdot)))
    return xdot
Ejemplo n.º 4
0
def displayData(X):
    print "Visualizing"
    m, n = X.shape
    width = round(sqrt(n))
    height = width
    display_rows = int(floor(sqrt(m)))
    display_cols = int(ceil(m/display_rows))

    print "Cell width:", width
    print "Cell height:", height    
    print "Display rows:", display_rows
    print "Display columns:", display_cols
        
    display = zeros((display_rows*height,display_cols*width))

    # Iterate through the training sets, reshape each one and populate
    # the display matrix with the letter matrixes.    
    for xrow in range(0, m):
        rowindex = divide(xrow, display_cols)
        columnindex = remainder(xrow, display_cols)
        rowstart = int(rowindex*height)
        rowend = int((rowindex+1)*height)
        colstart = int(columnindex*width)
        colend = int((columnindex+1)*width)
        display[rowstart:rowend, colstart:colend] = X[xrow,:].reshape(height,width).transpose()
         
    imshow(display, cmap=get_cmap('binary'), interpolation='none')
    
    # Show plot without blocking
    draw()    
Ejemplo n.º 5
0
    def calculateFDunc(self):
        #Calculates the uncertainty of the FFT according to:
        #   - J. M. Fornies-Marquina, J. Letosa, M. Garcia-Garcia, J. M. Artacho, "Error Propagation for the transformation of time domain into frequency domain", IEEE Trans. Magn, Vol. 33, No. 2, March 1997, pp. 1456-1459
        #return asarray _tdData
        #Assumes tha the amplitude of each time sample is statistically independent from the amplitude of the other time
        #samples

        # Calculates uncertainty of the real and imaginary part of the FFT and ther covariance
        unc_E_real = []
        unc_E_imag = []
        cov = []
        for f in self.getfreqs():
            unc_E_real.append(py.sum((py.cos(2*py.pi*f*self._tdData.getTimes())*self._tdData.getUncEX())**2))
            unc_E_imag.append(py.sum((py.sin(2*py.pi*f*self._tdData.getTimes())*self._tdData.getUncEX())**2))
            cov.append(-0.5*sum(py.sin(4*py.pi*f*self._tdData.getTimes())*self._tdData.getUncEX()**2))
        
        unc_E_real = py.sqrt(py.asarray(unc_E_real))
        unc_E_imag = py.sqrt(py.asarray(unc_E_imag))
        cov = py.asarray(cov)
        
        # Calculates the uncertainty of the modulus and phase of the FFT
        unc_E_abs = py.sqrt((self.getFReal()**2*unc_E_real**2+self.getFImag()**2*unc_E_imag**2+2*self.getFReal()*self.getFImag()*cov)/self.getFAbs()**2)
        unc_E_ph = py.sqrt((self.getFImag()**2*unc_E_real**2+self.getFReal()**2*unc_E_imag**2-2*self.getFReal()*self.getFImag()*cov)/self.getFAbs()**4)
        
        t=py.column_stack((self.getfreqs(),unc_E_real,unc_E_imag,unc_E_abs,unc_E_ph))
        return self.getcroppedData(t)  
Ejemplo n.º 6
0
def ICeuklid_to_ICcircle(IC):
    """
    converts from IC in euklidean space to IC in circle parameters (rotational invariant).
    The formats are:
    IC_euklid: [x, y, z, vx, vy, vz]
    IC_circle: [y, vy, |v|, |l|, phiv], where |v| is the magnitude of CoM velocity, |l| 
        is the distance from leg1 (assumed to be at [0,0,0]) to CoM, and phiv the angle
        of the velocity in horizontal plane wrt x-axis
    *NOTE* for re-conversion, the leg position is additionally required
    
    :args:
        IC (6x float): the initial conditions in euklidean space

    :returns:
        IC (5x float): the initial conditions in circular coordinates
    
    """
    x,y,z,vx,vy,vz = IC
    v = sqrt(vx**2 + vy**2 + vz**2)
    l = sqrt(x**2 + y**2 + z**2)
    #phiv = arctan2(vz, vx)
    #phiv = arctan2(-vz, vx)
    phiv = -arctan2(-vz, vx)
    #phix = arctan2(-z, -x)
    phix = arctan2(z, -x)
    # warnings.warn('TODO: fix phi_x (add)')
    # print "phix:", phix * 180 / pi
    return [y, vy, v, l, phiv + phix]
Ejemplo n.º 7
0
def renormalize(x_unpurt,x_before,x_purt,epsilon,N):
    # BEFORE ANYTHING: make sure particles near boundaries are shuffeled into places where where the
    # seam is not between any purturbed and fudicial trajectories.
    x_unpurt,x_purt = shuff(x_unpurt,x_purt,N)

    # The trajectory we are going to be returning is going to be the new one for the next run. lets
    # call it
    x_new = pl.copy(x_unpurt)
    # copied it because we are going to add the small amounts to it to purturb it.

    # lets find a vector pointing in the direction of the trajectories path. For this we need the
    # fiducual point at t-dt, which is given to us in the function as x_before. find the vector
    # between x_before and x_unpurt
    traj_vec = x_unpurt-x_before 
    # normalize it
    traj_vec = traj_vec/pl.sqrt(pl.dot(traj_vec,traj_vec))
    print('traj_vec magnitude (should be 1): ' + str(pl.sqrt(pl.dot(traj_vec,traj_vec))))

    # Now lets see how close the vector pointing from the fidicial to the perturbed trajectorie is
    # to orthogonal with the trajectory... should get closer to 1 as we check more because it should
    # be aligning itself with the axis of greatest expansion and that should be orthogonal.
    # First normalize the difference vector
    diff_vec = x_unpurt - x_purt
    # normalize it
    diff_vec = diff_vec/pl.sqrt(pl.dot(diff_vec,diff_vec))
    print('diff_vec magnitude (should be 1): ' + str(pl.sqrt(pl.dot(diff_vec,diff_vec))))
    print('normalized(x_unpurt-x_purt)dot(traj_vec)  (should get close to 0): '+ str(pl.dot(diff_vec,traj_vec)))

    # for now lets just return a point moved back along the difference vector. no gram shmidt or
    # anything.
    return x_new + epsilon*diff_vec
Ejemplo n.º 8
0
def data_to_ch(data):
    ch = {}
    for ch_ind in range(1, 97):
        ch[ch_ind] = {}
        ch[ch_ind]["bl"] = data[ch_ind]["blanks"]
        ch[ch_ind]["bl_mu"] = pl.mean(ch[ch_ind]["bl"])
        ch[ch_ind]["bl_sem"] = pl.std(ch[ch_ind]["bl"]) / pl.sqrt(len(ch[ch_ind]["bl"]))
        for ind in sorted(data[ch_ind].keys()):
            if ind != "blanks":
                k = ind[0]
                if k not in ch[ch_ind]:
                    ch[ch_ind][k] = {}
                    ch[ch_ind][k]["fr"] = []
                    ch[ch_ind][k]["fr_mu"] = []
                    ch[ch_ind][k]["fr_sem"] = []
                    ch[ch_ind][k]["pos_y"] = []
                    ch[ch_ind][k]["dprime"] = []
                ch[ch_ind][k]["fr"].append(data[ch_ind][ind]["on"])
                ch[ch_ind][k]["fr_mu"].append(pl.mean(data[ch_ind][ind]["on"]))
                ch[ch_ind][k]["fr_sem"].append(pl.std(data[ch_ind][ind]["on"]) / pl.sqrt(len(data[1][ind]["on"])))
                ch[ch_ind][k]["pos_y"].append(ind[2])
                # print ch[ch_ind][k]['pos_y']
                # print pl.std(data[ch_ind][ind]['on'])
                ch[ch_ind][k]["dprime"].append(
                    (pl.mean(data[ch_ind][ind]["on"]) - ch[ch_ind]["bl_mu"])
                    / ((pl.std(ch[ch_ind]["bl"]) + pl.std(data[ch_ind][ind]["on"])) / 2)
                )
                # print ch[ch_ind]['OSImage_5']['pos_y']
    return ch
Ejemplo n.º 9
0
def ICcircle_to_ICeuklid(IC):
    """
    converts from IC in cirle parameters to IC in euklidean space (rotational invariant).
    The formats are:
    IC_euklid: [x, y, z, vx, vy, vz]
    IC_circle: [y, vy, |v|, |l|, phiv], where |v| is the magnitude of CoM velocity, |l| 
        is the distance from leg1 (assumed to be at [0,0,0]) to CoM, and phiv the angle
        of the velocity in horizontal plane wrt x-axis
    *NOTE* for re-conversion, the leg position is additionally required, assumed to be [0,0,0]
    Further, it is assumed that the axis foot-CoM points in x-axis
    
    :args:
        IC (5x float): the initial conditions in circular coordinates

    :returns:
        IC (6x float): the initial conditions in euklidean space
    
    """
    y, vy, v, l, phiv = IC
    z = 0
    xsq = l**2 - y**2
    if xsq < 0:
        raise RuntimeError('Error in initial conditions: y > l!')
    x = -sqrt(xsq)
    vhsq = v**2 - vy**2
    if vhsq < 0:
        raise RuntimeError('Error in initial conditions: |vy| > |v|!')
    v_horiz = sqrt(vhsq)
    vx = v_horiz * cos(phiv)
    #vz = v_horiz * sin(phiv)
    vz = v_horiz * sin(phiv)
    return [x, y, z, vx, vy, vz]
Ejemplo n.º 10
0
def haversine (latlong1, latlong2, r):

    deltaLatlong = latlong1 - latlong2
    
    dLat = deltaLatlong[0]
    dLon = deltaLatlong[1]

    lat1 = latlong1[0]
    lat2 = latlong2[0]

    a = (sin (dLat/2) * sin (dLat/2) +
         sin (dLon/2) * sin (dLon/2) * cos (lat1) * cos (lat2))
    c = 2 * arctan2 (sqrt (a), sqrt (1-a))
    d = r * c

    # initial bearing
    y = sin (dLon) * cos (lat2)
    x = (cos (lat1)*sin (lat2) -
         sin (lat1)*cos (lat2)*cos (dLon))
    b1 = arctan2 (y, x);

    # final bearing
    dLon = -dLon
    dLat = -dLat
    tmp = lat1
    lat1 = lat2
    lat2 = tmp
    y = sin (dLon) * cos (lat2)
    x = (cos (lat1) * sin (lat2) - 
         sin (lat1) * cos (lat2) * cos (dLon))
    b2 = arctan2 (y, x)
    b2 = mod ((b2 + pi), 2*pi)

    return (d, b1, b2)
Ejemplo n.º 11
0
 def __calculate__(self):
     global USE_IDENTITY_LINE
     sd1 = (self.signal_plus - self.signal_minus) / pl.sqrt(2)
     if USE_IDENTITY_LINE:
         return pl.sqrt(pl.sum((sd1 ** 2)) / len(self.signal_plus))
     else:
         return pl.sqrt(pl.var(sd1))
def simple_hierarchical_data(n):
    """ Generate data based on the simple one-way hierarchical model
    given in section 3.1.1::

        y[i,j] | alpha[j], sigma^2 ~ N(alpha[j], sigma^2) i = 1, ..., n_j, j = 1, ..., J;
        alpha[j] | mu, tau^2 ~ N(mu, tau^2) j = 1, ..., J.

        sigma^2 ~ Inv-Chi^2(5, 20)
        mu ~ N(5, 5^2)
        tau^2 ~ Inv-Chi^2(2, 10)

    Parameters
    ----------
    n : list, len(n) = J, n[j] = num observations in group j
    """

    inv_sigma_sq = mc.rgamma(alpha=2.5, beta=50.0)
    mu = mc.rnormal(mu=5.0, tau=5.0 ** -2.0)
    inv_tau_sq = mc.rgamma(alpha=1.0, beta=10.0)

    J = len(n)
    alpha = mc.rnormal(mu=mu, tau=inv_tau_sq, size=J)
    y = [mc.rnormal(mu=alpha[j], tau=inv_sigma_sq, size=n[j]) for j in range(J)]

    mu_by_tau = mu * pl.sqrt(inv_tau_sq)
    alpha_by_sigma = alpha * pl.sqrt(inv_sigma_sq)
    alpha_bar = alpha.sum()
    alpha_bar_by_sigma = alpha_bar * pl.sqrt(inv_sigma_sq)

    return vars()
Ejemplo n.º 13
0
    def sample(self, model, evidence):
        g = evidence['g']
        h = evidence['h']
        C = evidence['C']
        z = evidence['z']
        shot_id = evidence['shot_id']
        noise_proportion = evidence['noise_proportion']
        observation_var_g = evidence['observation_var_g']
        observation_var_h = evidence['observation_var_h']

        canopy_cover = model.known_params['canopy_cover']
        z_min = model.known_params['z_min']
        z_max = model.known_params['z_max']

        prior_p = model.hyper_params['T']['p']

        N = len(z)
        T = zeros(N)
        noise_rv = stats.uniform(z_min, z_max - z_min)
        min_index = min(z.index)
        for i in shot_id.index:
            l = zeros(3)
            index = i-min_index
            shot_index = shot_id[i]-min(shot_id)
            l[0] = noise_proportion*noise_rv.pdf(z[i])
            g_norm = stats.norm(g[shot_index], sqrt(observation_var_g))
            C_i = canopy_cover[C[shot_index]]
            l[1] = (1-noise_proportion)*(1-C_i)*g_norm.pdf(z[i])
            h_norm = stats.norm(h[shot_index] + g[shot_index], sqrt(observation_var_h))
            if z[i] > g[shot_index]+3:
                l[2] = (1-noise_proportion)*(C_i)*h_norm.pdf(z[i])
            p = l/sum(l)
            T[index] = Categorical(p).rvs()

        return T
Ejemplo n.º 14
0
def plot_track_props(tracks, nx, ny, len_cutoff=20):
    pl.ioff()
    wdist = wraparound_dist(nx, ny)
    val_fig = pl.figure()
    area_fig = pl.figure()
    psn_fig = pl.figure()
    delta_vals = []
    delta_dists = []
    for tr in tracks:
        if len(tr) < len_cutoff:
            continue
        idxs, regs = zip(*tr)
        delta_vals.extend([abs(regs[idx].val - regs[idx + 1].val) for idx in range(len(regs) - 1)])
        dists = [wdist(regs[i].loc, regs[i + 1].loc) for i in range(len(regs) - 1)]
        delta_dists.extend([abs(dists[idx] - dists[idx + 1]) for idx in range(len(dists) - 1)])
        pl.figure(val_fig.number)
        pl.plot(idxs, [reg.val for reg in regs], "s-", hold=True)
        pl.figure(area_fig.number)
        pl.semilogy(idxs, [reg.area for reg in regs], "s-", hold=True)
        pl.figure(psn_fig.number)
        pl.plot(idxs[:-1], dists, "s-", hold=True)
    pl.figure(val_fig.number)
    pl.savefig("val_v_time.pdf")
    pl.figure(area_fig.number)
    pl.savefig("area_v_time.pdf")
    pl.figure(psn_fig.number)
    pl.savefig("psn_v_time.pdf")
    pl.figure()
    pl.hist(delta_vals, bins=pl.sqrt(len(delta_vals)))
    pl.savefig("delta_vals.pdf")
    pl.figure()
    pl.hist(delta_dists, bins=pl.sqrt(len(delta_dists)))
    pl.savefig("delta_dists.pdf")
    pl.close("all")
Ejemplo n.º 15
0
def render_network(A):
    [L, M] = shape(A)
    sz = int(sqrt(L))
    buf = 1
    A = asarray(A)

    if floor(sqrt(M)) ** 2 != M:
        m = int(sqrt(M / 2))
        n = M / m
    else:
        m = int(sqrt(M))
        n = m

    array = -ones([buf + m * (sz + buf), buf + n * (sz + buf)], "d")

    k = 0
    for i in range(m):
        for j in range(n):
            clim = max(abs(A[:, k]))
            x_offset = buf + i * (sz + buf)
            y_offset = buf + j * (sz + buf)
            array[x_offset : x_offset + sz, y_offset : y_offset + sz] = reshape(A[:, k], [sz, sz]) / clim

            k += 1
    return array
Ejemplo n.º 16
0
def haversine(location1, location2=None):  # calculates great circle distance
    __doc__ = """Returns the great circle distance of the given
    coordinates.
    
    INPUT:  location1 = ((lat1, lon1), ..., n(lat1, lon1))
           *location2 = ((lat2, lon2), ..., n(lat2, lon2))
           *if location2 is not given a square matrix of distances
             for location1 will be put out
    OUTPUT: distance in km
            (dist1  ...  ndist
              :            : 
             ndist1 ...  ndist)
            shape will depend on the input
    METHOD: a = sin(dLat / 2) * sin(dLat / 2) + 
                sin(dLon / 2) * sin(dLon / 2) * 
                cos(lat1) * cos(lat2)
            c = 2 * arctan2(sqrt(a), sqrt(1 - a))
            d = R * c
            
            where R is the earth's radius (6371 km)
            and d is the distance in km"""
    
    from itertools import product, combinations
    from pylab import   deg2rad, sin, cos, arctan2, \
                        meshgrid, sqrt, array, arange
    
    if location2: 
        location1 = array(location1, ndmin=2)
        location2 = array(location2, ndmin=2)
    elif location2 is None:
        location1 = array(location1, ndmin=2)
        location2 = location1.copy()
    
    # get all combinations using indicies
    ind1 = arange(location1.shape[0])
    ind2 = arange(location2.shape[0])
    ind  = array(list(product(ind1, ind2)))
    
    # using combination inds to get lats and lons
    lat1, lon1 = location1[ind[:,0]].T
    lat2, lon2 = location2[ind[:,1]].T
    
    # setting up variables for haversine
    R = 6371.
    dLat = deg2rad(lat2 - lat1)
    dLon = deg2rad(lon2 - lon1)
    lat1 = deg2rad(lat1)
    lat2 = deg2rad(lat2)
    
    # haversine formula
    a = sin(dLat / 2) * sin(dLat / 2) + \
        sin(dLon / 2) * sin(dLon / 2) * \
        cos(lat1) * cos(lat2)
    c = 2 * arctan2(sqrt(a), sqrt(1 - a))
    d = R * c
    
    # reshape accodring to the input
    D = d.reshape(location1.shape[0], location2.shape[0])
    
    return D
Ejemplo n.º 17
0
Archivo: sacm211.py Proyecto: SDK/sacm
def measureDistance(lat1, lon1, lat2, lon2):
    R = 6383.137 # Radius of earth at Chajnantor aprox. in KM
    dLat = (lat2 - lat1) * np.pi / 180.
    dLon = (lon2 - lon1) * np.pi / 180.
    a = pl.sin(dLat/2.) * pl.sin(dLat/2.) + pl.cos(lat1 * np.pi / 180.) * pl.cos(lat2 * np.pi / 180.) * pl.sin(dLon/2.) * pl.sin(dLon/2.)
    c = 2. * atan2(pl.sqrt(a), pl.sqrt(1-a))
    d = R * c
    return d * 1000. # meters
def exhaustVelocity( energyDensity=None, pressureOut=None, pressureIn=const_atmosphericPressure*25, temperature=3500, kappa=1.666666, molarMass=2 ):
	if (energyDensity!=None):	# from kinetic energy
		return pylab.sqrt( 2*energyDensity )
	else:	 # lavalNozzle adiabatic expansion http://en.wikipedia.org/wiki/De_Laval_nozzle
		gamma = (kappa)/(kappa-1)
		ExpansionTerm = 1.0
		if (( pressureOut != None ) ):
				ExpansionTerm = (1.0 - (pressureOut/pressureIn)**(1/gamma) )
		return pylab.sqrt(2000.0*const_universalGas*temperature*gamma*ExpansionTerm/molarMass)			
Ejemplo n.º 19
0
 def pkj1pk(self, r, z=0):
     """
       integral of the power spectrum*k over j1
     """
     return (
         M.sqrt(M.pi / 2.0)
         / r
         * self.besselInt.besselInt(lambda k: self.delta(k / r, z) / M.sqrt(k), 1.5, self.besselN, self.besselh)
     )
Ejemplo n.º 20
0
def xyamb(xytab,qu,xyout=''):

    mytb=taskinit.tbtool()

    if not isinstance(qu,tuple):
        raise Exception,'qu must be a tuple: (Q,U)'

    if xyout=='':
        xyout=xytab
    if xyout!=xytab:
        os.system('cp -r '+xytab+' '+xyout)

    QUexp=complex(qu[0],qu[1])
    print 'Expected QU = ',qu   # , '  (',pl.angle(QUexp)*180/pi,')'

    mytb.open(xyout,nomodify=False)

    QU=mytb.getkeyword('QU')['QU']
    P=pl.sqrt(QU[0,:]**2+QU[1,:]**2)

    nspw=P.shape[0]
    for ispw in range(nspw):
        st=mytb.query('SPECTRAL_WINDOW_ID=='+str(ispw))
        if (st.nrows()>0):
            q=QU[0,ispw]
            u=QU[1,ispw]
            qufound=complex(q,u)
            c=st.getcol('CPARAM')
            fl=st.getcol('FLAG')
            xyph0=pl.angle(pl.mean(c[0,:,:][pl.logical_not(fl[0,:,:])]),True)
            print 'Spw = '+str(ispw)+': Found QU = '+str(QU[:,ispw])  # +'   ('+str(pl.angle(qufound)*180/pi)+')'
            #if ( (abs(q)>0.0 and abs(qu[0])>0.0 and (q/qu[0])<0.0) or
            #     (abs(u)>0.0 and abs(qu[1])>0.0 and (u/qu[1])<0.0) ):
            if ( pl.absolute(pl.angle(qufound/QUexp)*180/pi)>90.0 ):
                c[0,:,:]*=-1.0
                xyph1=pl.angle(pl.mean(c[0,:,:][pl.logical_not(fl[0,:,:])]),True)
                st.putcol('CPARAM',c)
                QU[:,ispw]*=-1
                print '   ...CONVERTING X-Y phase from '+str(xyph0)+' to '+str(xyph1)+' deg'
            else:
                print '      ...KEEPING X-Y phase '+str(xyph0)+' deg'
            st.close()
    QUr={}
    QUr['QU']=QU
    mytb.putkeyword('QU',QUr)
    mytb.close()
    QUm=pl.mean(QU[:,P>0],1)
    QUe=pl.std(QU[:,P>0],1)
    Pm=pl.sqrt(QUm[0]**2+QUm[1]**2)
    Xm=0.5*atan2(QUm[1],QUm[0])*180/pi

    print 'Ambiguity resolved (spw mean): Q=',QUm[0],'U=',QUm[1],'(rms=',QUe[0],QUe[1],')','P=',Pm,'X=',Xm

    stokes=[1.0,QUm[0],QUm[1],0.0]
    print 'Returning the following Stokes vector: '+str(stokes)
    
    return stokes
Ejemplo n.º 21
0
def Av_d(myfile,maxstar=None):
    """run red-clump fitting on the photometry in file, being a 2mass table in a particular
    direction, either FITS or ascii"""
    if myfile[-4:]=="fits" or myfile[-3:]=='fit' or myfile[-3:]=="FTS":
        fred = pyfits.open(myfile)
        pylab.figure(1)
        pylab.clf()
        table = fred[1].data
        fred.close()
        J = table.field("Jmag")
        K = table.field("Kmag")
        flags = table.field("Qflg")
        r = table.field("_r")
        if maxstar:
            print "Max radius: %f arcmin"%r[maxstar]
        else: print "Max radius: %f arcmin"%max(r)
    else:   #assume ascii file, with columns id,ra,dec,j,jerr,h,herr,k,kerr,flag...
        fred = open(myfile)
        mylines = fred.readlines()
        J=[]
        K=[]
        flags=[]
        for aline in mylines:
            words = aline.split()
            if len(words)<10: continue
            J.append(float(words[3]))
            K.append(float(words[7]))
            flags.append(words[9])
        J=pylab.array(J)
        K=pylab.array(K)
    if maxstar:
        J = J[:maxstar]
        K = K[:maxstar]
        flags=flags[:maxstar]
    fred.close()
    pylab.figure(1)
    pylab.clf()
    pylab.plot(J-K,K,'k.')
    pylab.xlabel("$J-K$")
    pylab.ylabel("$K$")
    first = eval(raw_input('Slice star values (python syntax): '))
    last = eval(raw_input('Slice end values (python syntax): '))
    jk,k,sig,nstar = run_clump(J,K,flags,first,last)
    pylab.figure(1)
    sam = pylab.errorbar(jk,k,yerr=(first-last)/2,xerr=sig/pylab.sqrt(nstar),capsize=0)
    pylab.setp(sam[0],marker='d',mec='w',mfc='w',markersize=5,ls='None')
    pylab.setp(sam[1],c='w',lw=2)
    sam = pylab.errorbar(jk,k,yerr=(first-last)/2,xerr=sig/pylab.sqrt(nstar),capsize=0)
    pylab.setp(sam[0],marker='d',mec='c',mfc='c',markersize=4,ls='None')
    pylab.setp(sam[1],c='c',lw=1)
    pylab.axis([0,2.5,15,5])
    av,d,err = convert(jk,k,sig,nstar)
    pylab.figure(2)
    pylab.clf()
    pylab.errorbar(d,av,err)
    return jk,k,sig,nstar
Ejemplo n.º 22
0
def plot_regression(x,y,smoothing=.3):
    fit=sm.nonparametric.lowess(y,x,frac=smoothing)
    df=(fit[:,1]-y)**2
    fit_var=sm.nonparametric.lowess(df,x,frac=smoothing)
    isheld = pl.ishold()
    pl.hold(1)
    pl.plot(fit[:,0],fit[:,1])
    pl.fill_between(fit_var[:,0],fit[:,1]-1*pl.sqrt(fit_var[:,1]),fit[:,1]+1*pl.sqrt(fit_var[:,1]),color=((0,0,.99,.2),))
    pl.plot(x,y,'.')
    pl.hold(isheld)
Ejemplo n.º 23
0
    def calculate(self) :
        print "Intensity:Intensity2D:calcaulate"
        self.project()
        self.sum   = pl.sum(self.xproj)
        self.xmean = pl.sum(self.xproj*self.x)/pl.sum(self.xproj)
        self.ymean = pl.sum(self.yproj*self.y)/pl.sum(self.yproj)
        self.xrms  = pl.sqrt(pl.sum(self.xproj*self.x**2)/self.sum)
        self.yrms  = pl.sqrt(pl.sum(self.yproj*self.y**2)/self.sum)

        print "Intensity:Intensity2D:calculate ",self.xmean,self.ymean,self.xrms,self.yrms
Ejemplo n.º 24
0
def random_euler_angles():
    r1,r2,r3 = pylab.random(3)
    q1 = pylab.sqrt(1.0-r1)*pylab.sin(2.0*pylab.pi*r2)
    q2 = pylab.sqrt(1.0-r1)*pylab.cos(2.0*pylab.pi*r2)
    q3 = pylab.sqrt(r1)*pylab.sin(2.0*pylab.pi*r3)
    q4 = pylab.sqrt(r1)*pylab.cos(2.0*pylab.pi*r3)
    phi = math.atan2(2.0*(q1*q2+q3*q4), 1.0-2.0*(q2**2+q3**2))
    theta = math.asin(2.0*(q1*q3-q4*q2))
    psi = math.atan2(2.0*(q1*q4+q2*q3), 1.0-2.0*(q3**2+q4**2))
    return [phi,theta,psi]
Ejemplo n.º 25
0
def getParamCovMat(prefix,dlogpower = 2, theoconstmult = 1.,dlogfilenames = ['dlogpnldloga.dat'],volume=256.**3,startki = 0, endki = 0, veff = [0.]):
    """
    Calculates parameter covariance matrix from the power spectrum covariance matrix and derivative term
    in the prefix directory
    """
    nparams = len(dlogfilenames)

    kpnl = M.load(prefix+'pnl.dat')
    k = kpnl[startki:,0]

    nk = len(k)
    if (endki == 0):
        endki = nk
        
    pnl = M.array(kpnl[startki:,1],M.Float64)
    covarwhole = M.load(prefix+'covar.dat')
    covar = covarwhole[startki:,startki:]
    if len(veff) > 1:
        sqrt_veff = M.sqrt(veff[startki:])
    else:
        sqrt_veff = M.sqrt(volume*M.ones(nk))

    dlogs = M.reshape(M.ones(nparams*nk,M.Float64),(nparams,nk))
    paramFishMat = M.reshape(M.zeros(nparams*nparams*(endki-startki),M.Float64),(nparams,nparams,endki-startki))
    paramCovMat = paramFishMat * 0.

    # Covariance matrices of dlog's
    for param in range(nparams):
        if len(dlogfilenames[param]) > 0:
            dlogs[param,:] = M.load(prefix+dlogfilenames[param])[startki:,1]

    normcovar = M.zeros(M.shape(covar),M.Float64)
    for i in range(nk):
        normcovar[i,:] = covar[i,:]/(pnl*pnl[i])

    M.save(prefix+'normcovar.dat',normcovar)

    f = k[1]/k[0]

    if (volume == -1.):
        volume = (M.pi/k[0])**3

    #theoconst = volume * k[1]**3 * f**(-1.5)/(12.*M.pi**2) #1 not 0 since we're starting at 1
    for ki in range(1,endki-startki):
        for p1 in range(nparams):
            for p2 in range(nparams):
                paramFishMat[p1,p2,ki] = M.sum(M.sum(\
                M.inverse(normcovar[:ki+1,:ki+1]) *
                M.outerproduct(dlogs[p1,:ki+1]*sqrt_veff[:ki+1],\
                               dlogs[p2,:ki+1]*sqrt_veff[:ki+1])))
                
                
        paramCovMat[:,:,ki] = M.inverse(paramFishMat[:,:,ki])

    return k[1:],paramCovMat[:,:,1:]
Ejemplo n.º 26
0
def calculate_boxplot_stats ( x, **kwargs ):
    whis = kwargs.setdefault ( 'whis', 1.5 )
    bootstrap = kwargs.setdefault ( 'bootstrap', None )

    # Get median and quartiles
    q1,med,q3 = pl.prctile (x, [25,50,75] )
    # Get high extreme
    iq = q3-q1
    hi_val = q3+whis*iq
    wisk_hi = pl.compress ( x<=hi_val, x )
    if len(wisk_hi)==0:
        wisk_hi = q3
    else:
        wisk_hi = max(wisk_hi)
    # Get low extreme
    lo_val = q1-whis*iq
    wisk_lo = pl.compress ( x>=lo_val, x )
    if len(wisk_lo)==0:
        wisk_lo = q3
    else:
        wisk_lo = min(wisk_lo)

    # Get fliers
    flier_hi = pl.compress ( x>wisk_hi, x )
    flier_lo = pl.compress ( x<wisk_lo, x )

    if bootstrap is not None:
        # Do a bootstrap estimate of notch locations
        def bootstrapMedian ( data, N=5000 ):
            # determine 95% confidence intervals of the median
            M = len(data)
            percentile = [2.5,97.5]
            estimate = pl.zeros(N)
            for n in xrange (N):
                bsIndex = pl.randint ( 0, M, M )
                bsData = data[bsIndex]
                estimate[n] = pl.prctile ( bsData, 50 )
            CI = pl.prctile ( estimate, percentile )
            return CI
        CI = bootstrapMedian ( x, N=bootstrap )
        notch_max = CI[1]
        notch_min = CI[0]
    else:
        # Estimate notch locations using Gaussian-based asymptotic
        # approximation
        #
        # For discussion: McGill, R., Tukey, J.W., and
        # Larsen, W.A. (1978) "Variations of Boxplots", The
        # American Statistitian, 32:12-16
        notch_max = med + 1.57*iq/pl.sqrt(len(x))
        notch_min = med - 1.57*iq/pl.sqrt(len(x))
    return {'main':(wisk_lo,q1,med,q3,wisk_hi),
            'fliers':(flier_lo,flier_hi),
            'notch':(notch_min,notch_max)}
Ejemplo n.º 27
0
    def transform_vector(self,uin,vin,lons,lats,nx,ny,returnxy=False,preserve_magnitude=True):
        """
 transform a vector field (uin,vin) from a lat/lon grid with longitudes
 lons and latitudes lats to a (ny,nx) native map projection grid.
 The input vector field is defined in spherical coordinates (it
 has eastward and northward components) while the output
 vector field is defined in map projection coordinates (relative
 to x and y).
 if returnxy=True, the x and y values of the native map projection grid
 are also returned (default False).
 if preserve_magnitude=True (default), the vector magnitude is preserved
 (so that length of vectors represents magnitude of vector relative to
 spherical coordinate system, not map projection coordinates). 

 vectors on a lat/lon grid must be transformed to map projection coordinates
 before they be plotted on the map (with the quiver class method).
        """
        lonsout, latsout, x, y = self.makegrid(nx,ny,returnxy=True)
        # interpolate to map projection coordinates.
        uin = interp(uin,lons,lats,lonsout,latsout)
        vin = interp(vin,lons,lats,lonsout,latsout)
        if preserve_magnitude:
            # compute original magnitude.
            mag = pylab.sqrt(uin**2+vin**2)
        rad2dg = 180./math.pi
        tiny = 1.e-5
        delta = 0.1
        coslats = pylab.cos(latsout/rad2dg)
        # use dx/dlongitude, dx/dlatitude, dy/dlongitude and dy/dlatitude
        # to transform vector to map projection coordinates.
        # dlongitude is delta degrees at equator, dlatitude is delta degrees.
        xn,yn = self(lonsout,pylab.where(latsout+delta<90.,latsout+delta,latsout))
        # at poles, derivs w/respect to longitude will be zero.
        lonse = pylab.where(coslats>tiny,lonsout+(delta/coslats),lonsout)
        xe,ye = self(lonse,latsout)
        uout = uin*(xe-x)*(coslats/delta) + vin*(xn-x)/delta
        vout = uin*(ye-y)*(coslats/delta) + vin*(yn-y)/delta
        # make sure uout, vout not too small (quiver will raise
        # an exception when trying to rescale vectors).
        uout = pylab.where(pylab.fabs(uout)<tiny,tiny,uout)
        vout = pylab.where(pylab.fabs(vout)<tiny,tiny,vout)
        # fix units. 
        if self.projection != 'cyl':
            uout = uout*rad2dg/self.rsphere
            vout = vout*rad2dg/self.rsphere
        # rescale magnitude.
        if preserve_magnitude:
            magout = pylab.sqrt(uout**2+vout**2)
            uout = uout*mag/magout
            vout = vout*mag/magout
        if returnxy:
            return uout,vout,x,y
        else:
            return uout,vout
Ejemplo n.º 28
0
 def calcunc(self,tdDatas):
     #not used anymore, older version, should we remove it???
      #tdDatas is a np array of tdData measurements
     if tdDatas.shape[0]==1:
         repeatability=py.zeros((len(tdDatas[0,:,0]),2))
     else:
         repeatability=py.std(py.asarray(tdDatas[:,:,1:3]),axis=0, ddof = 1)/py.sqrt(self.numberOfDataSets)
     #this line is wrong
     elnNoise=tdDatas[0,:,3:]
     uncarray = py.sqrt(repeatability**2 + elnNoise**2)
     
     return uncarray
Ejemplo n.º 29
0
    def __init__( self, t, x, y, A=[1., 0.85], a=[0.25, 0.85] ):
        '''
        
        Initializing generalized thalamo-cortical loop. Full
        functionality is only obtained in the subclasses, like DOG,
        full_eDOG, etc.
        
        Parameters
        ----------
        
        t : array
            1D Time vector
        x : np.array
            1D vector for x-axis sampling points
        y : np.array
            1D vector for y-axis sampling points

        Keyword arguments
        -----------------
        
        A : sequence (default A = [1., 0.85])
            Amplitudes for DOG receptive field for center and surround, 
            respectively
        a : sequence (default a = [0.25, 0.85])
            Width parameter for DOG receptive field for center and surround,
            respectively

        Usage
        -----
            Look at subclasses for example usage            
        '''
        # Set parameteres as attributes
        self.name = 'pyDOG Toolbox'
        self.t = t
        self.A = A
        self.a = a
        self.x = x
        self.y = y
        # Find sampling rates and sampling freqs and 
        self.nu_xs = 1./(x[1]-x[0])
        self.nu_ys = 1./(y[1]-y[0])
        self.fs = 1./(t[1]-t[0])
        self.f = fft.fftfreq(pl.asarray(t).size, t[1]-t[0])
        # fftshift spatial frequency,
        self.nu_x = fft.fftfreq(pl.asarray(x).size, x[1]-x[0])
        self.nu_y = fft.fftfreq(pl.asarray(y).size, y[1]-y[0])
        # Make meshgrids, may come in handy
        self._xx, self._yy = pl.meshgrid(self.x, self.y)
        self._nu_xx, self._nu_yy = pl.meshgrid(self.nu_x, self.nu_y)
        # r is needed for all circular rfs
        self.r = pl.sqrt(self._xx**2 + self._yy**2)
        self.k = 2 * pl.pi * pl.sqrt(self._nu_xx**2 + self._nu_yy**2)
Ejemplo n.º 30
0
def cartesian2polar(xyz):
    """cartesian2polar(x, y, z)
converts coordinate from xyz to r\theta\phi"""
    x, y, z = xyz
    ## if z == 0:
    ##     raise exceptions.ValueError("cartesian " + str(xyz) + " z must be nonzero.")

    r = pylab.sqrt(sum(map(lambda x: x**2, [x, y, z])))
    theta = pylab.arccos(z/r)
    phi = pylab.arccos(x / pylab.sqrt(sum(map(lambda x: x**2, [x, y]))))
    if y < 0: phi *= -1

    return (r, theta, phi)
Ejemplo n.º 31
0
def Main():
    options, _ = MakeOpts().parse_args(sys.argv)
    assert options.species_filename
    assert options.first_col and options.second_col
    print 'Reading species list from', options.species_filename

    filter_cols, filter_vals = [], []
    if options.filter_cols:
        assert options.filter_vals
        filter_cols = map(str.strip, options.filter_cols.split(','))
        filter_vals = map(str.strip, options.filter_vals.split(','))

    # Read and filter species data
    r = csv.DictReader(open(options.species_filename))
    first_col, second_col = options.first_col, options.second_col
    pairmap = {}
    for row in r:
        apply_filter = lambda x, y: x in row and row[x] == y
        or_reduce = lambda x, y: x or y
        passed_filter = reduce(or_reduce,
                               map(apply_filter, filter_cols, filter_vals),
                               True)
        if not passed_filter:
            continue

        a, b = row[first_col].strip(), row[second_col].strip()
        if not a or not b:
            continue
        key = (a, b)
        pairmap.setdefault(key, []).append(row)

    #for key, row_list in pairmap.iteritems():
    #	if len(row_list) < 5:
    #		print key, row_list

    # Find cross-product of column values (all pairs).
    all_a = list(set([x[0] for x in pairmap.keys()]))
    all_b = list(set([x[1] for x in pairmap.keys()]))
    a_to_num = dict((v, i) for i, v in enumerate(all_a))
    b_to_num = dict((v, i) for i, v in enumerate(all_b))
    all_possible_pairs = list(itertools.product(all_a, all_b))

    third_col = options.third_col or 'fake key'
    get_col = lambda x: x.get(third_col, None)
    col_vals = dict((k, map(get_col, v)) for k, v in pairmap.iteritems())
    counts = {}
    totals = []
    all_vals = set()
    for k, v in col_vals.iteritems():
        counter = Counter(v)
        all_vals.update(counter.keys())
        counts[k] = counter
        totals.append(sum(counter.values()))

    x_vals = []
    y_vals = []
    count_array = []
    z_vals = []
    max_val = max(totals)
    for pair in all_possible_pairs:
        a, b = pair
        x_vals.append(a_to_num[a])
        y_vals.append(b_to_num[b])
        z_vals.append(sum(counts.get(pair, {}).values()))
        count_array.append(counts.get(pair, {}))

    # Plot circle scatter.
    axes = pylab.axes()
    axes.grid(color='g', linestyle='--', linewidth=1)

    if options.third_col:
        colormap = ColorMap(all_vals)
        PieScatter(axes, x_vals, y_vals, count_array, max_val, colormap)

        handles, labels = axes.get_legend_handles_labels()
        mapped_labels = dict(zip(labels, handles))
        labels = sorted(mapped_labels.keys())
        handles = [mapped_labels[k] for k in labels]
        pylab.legend(handles, labels)
    else:
        scaled_z_vals = pylab.array(map(float, z_vals))
        max_z = max(scaled_z_vals)
        scaled_z_vals /= (4.0 * max_z)
        scaled_z_vals = pylab.sqrt(scaled_z_vals)

        CircleScatter(axes, x_vals, y_vals, scaled_z_vals)
        for x, y, z in zip(x_vals, y_vals, z_vals):
            pylab.text(x + 0.1, y + 0.1, str(z))

    # Labels, titles and ticks.
    pylab.title('%s vs. %s' % (options.first_col, options.second_col))
    pylab.xlabel(options.first_col)
    pylab.ylabel(options.second_col)
    pylab.figtext(0.70, 0.02, '%d examples total.' % sum(totals))

    size_8 = FontProperties(size=8)
    a_labels = [a or "None given" for a in all_a]
    b_labels = [b or "None given" for b in all_b]
    pylab.xticks(range(0, len(a_labels)), a_labels, fontproperties=size_8)
    pylab.yticks(range(0, len(b_labels)), b_labels, fontproperties=size_8)

    # Scale and show.
    pylab.axis('scaled')
    pylab.axis([-1, len(all_a), -1, len(all_b)])
    pylab.show()
import pylab as p

x = p.linspace(10, 100, 100000)
n = len(x)

#for i in range(10,15):
c = p.array([10] * n)
y = c + 1 / (p.sqrt(x))

p.figure("Series")
p.plot(x, y)

p.title("DC Motor analysis")
p.xlabel("Torque")
p.ylabel("Speed")
p.show()

p.figure("Shunt")
p.plot(x, y)

p.title("DC Motor analysis")
p.xlabel("Torque")
p.ylabel("Speed")
p.show()
Ejemplo n.º 33
0
pylab.errorbar(t, x, dx, dt, "o", color="black") #aggiungere eventuali errori

def fit_function(t, tau, a, omega, phi, k):
    return a*(math.e)**(-t/tau)*pylab.sin(omega*t+phi)+k

init = numpy.array([15, 450, 0.4, -3.14/2, 400]) #variare i parametri iniziali per ottenere diverse omega.

popt, pcov = curve_fit(fit_function, t, x, init, error, "true") #aggiungere l'errore
tau, a, omega, phi, k = popt
#dtau = 0
#da = 0
#domega = 0
#dphi = 0
#dk 
dtau, da, domega, dphi, dk = pylab.sqrt(((pcov.diagonal())))

print("Parametri del fit:")
print("tau = ", tau, "a = ", a, "omega = ", 1000*omega, "phi = ",  phi, "k = ",  k)
print("dtau = ", dtau, "da = ", da, "domega = ", 1000*domega, "dphi = ", dphi, "dk = ", dk)
print(dtau, da, 1000*domega, dphi, dk)

div = 100000
bucket = numpy.array([0.0 for i in range(div)])
retta = numpy.array([0.0 for i in range(div)])
inc = (t.max())/div 
for i in range(len(bucket)):
        bucket[i]=float(i)*inc
        retta[i] = fit_function(bucket[i], tau, a, omega, phi, k)

pylab.plot(bucket, retta, color = "black")
Ejemplo n.º 34
0
genome_size_std = p.zeros(tot_gain_at_repr.shape[0])
ASLD_means = p.zeros(tot_gain_at_repr.shape[0])

for i in xrange(0, tot_gain_at_repr.shape[0], 1):
    ASLD_means[i] = p.sum(ASLD[i, :] * ASLD_hist_ranges) / p.sum(ASLD[i, :])
    #    ASLD[i, :] = ASLD[i, :] / ASLD[i, :].sum()
    tot_gain_means[i] = p.sum(tot_gain_at_repr[i, :] * uptake_hist_ranges) \
        / p.sum(tot_gain_at_repr[i, :])
    intake_means[i] = p.sum(intake_at_repr[i, :] * uptake_hist_ranges) \
        / p.sum(intake_at_repr[i, :])
    genome_size_means[i] = p.sum(genome_size[i, :] * genome_size_bins) \
        / p.sum(genome_size[i, :])
    for j in xrange(0, genome_size[i, :].shape[0], 1):
        genome_size_std[i] += genome_size[i, j] \
            * (genome_size_bins[j] - genome_size_means[i])**2
    genome_size_std[i] = p.sqrt(genome_size_std[i] / p.sum(genome_size[i, :]))

Summ_ASLD = p.zeros(ASLD.shape[1])
ASLD_to_hist_mean = p.zeros(ASLD.shape[1])
ASLD_to_hist_std = p.zeros(ASLD.shape[1])
for j in xrange(0, ASLD.shape[1], 1):
    ASLD_to_hist_mean[j] = ASLD[:, j].mean()
    ASLD_to_hist_std[j] = ASLD[:, j].std()
    Summ_ASLD[j] = p.sum(ASLD[:, j])

#Summ_ASLD = Summ_ASLD[~p.isnan(Summ_ASLD).any(0)]
#The_summ_ASLD = Summ_ASLD.mean()
#ASLD_to_hist_mean = ASLD_to_hist_mean / Summ_ASLD
#ASLD_to_hist_std = ASLD_to_hist_std / Summ_ASLD

tot_gain_means = tot_gain_means[~p.isnan(tot_gain_means).any(0)]
Ejemplo n.º 35
0
 def extract(self):
     Features.extract(self)
     self.X = P.sqrt((P.diff(self.X)**2).sum(0))/self.X.shape[0]