Пример #1
0
def lowess(x, y, f=2./3., iter=3):
    """lowess(x, y, f=2./3., iter=3) -> yest
    Lowess smoother: Robust locally weighted regression.
    The lowess function fits a nonparametric regression curve to a scatterplot.
    The arrays x and y contain an equal number of elements; each pair
    (x[i], y[i]) defines a data point in the scatterplot. The function returns
    the estimated (smooth) values of y.
    The smoothing span is given by f. A larger value for f will result in a
    smoother curve. The number of robustifying iterations is given by iter. The
    function will run faster with a smaller number of iterations."""
    n = len(x)
    r = int(ceil(f*n))
    h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)]
    w = np.clip(np.abs((x[:,None] - x[None,:]) / h), 0.0, 1.0)
    w = (1 - w**3)**3
    yest = np.zeros(n)
    delta = np.ones(n)
    for iteration in range(iter):
        for i in range(n):
            weights = delta * w[:,i]
            b = np.array([np.sum(weights*y), np.sum(weights*y*x)])
            A = np.array([[np.sum(weights), np.sum(weights*x)],
                   [np.sum(weights*x), np.sum(weights*x*x)]])
            beta = linalg.solve(A, b)
            yest[i] = beta[0] + beta[1]*x[i]

        residuals = y - yest
        s = np.median(np.abs(residuals))
        delta = np.clip(residuals / (6.0 * s), -1, 1)
        delta = (1 - delta**2)**2

    return yest
Пример #2
0
def find_horizon_offset(x, y, max_distance=1e4):
    '''Find minimum number of pixels to crop to guarantee all pixels are within specified distance

    Parameters
    ----------
    x : np.ndarray
        NxM matrix containing real-world x-coordinates
    y : np.ndarray
        NxM matrix containing real-world y-coordinates
    max_distance : float, optional
        Maximum distance from origin to be included in the plot.
        Larger numbers are considered to be beyond the horizon.

    Returns
    -------
    float
        Minimum crop distance in pixels (from the top of the image)
    '''

    offset = 0
    if max_distance is not None:
        try:
            th = (np.abs(x)>max_distance)|(np.abs(y)>max_distance)
            offset = np.max(np.where(np.any(th, axis=1))) + 1
        except:
            pass

    return offset
Пример #3
0
def filterFunc():
    rects = []
    hsv_planes = [[[]]]
    if os.path.isfile(Image_File):
        BGR=cv2.imread(Image_File)
        gray = cv2.cvtColor(BGR, cv2.COLOR_BGR2GRAY)
        img = gray
        f = np.fft.fft2(img)
        fshift = np.fft.fftshift(f)
        magnitude_spectrum = 20*np.log(np.abs(fshift))
        
        plt.subplot(221),plt.imshow(img, cmap = 'gray')
        plt.title('Input Image'), plt.xticks([]), plt.yticks([])
        
        plt.subplot(222),plt.imshow(magnitude_spectrum, cmap = 'gray')
        plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
        
        FiltzeredFFT = HighPassFilter(fshift, 60)
        plt.subplot(223),plt.imshow(np.abs(FiltzeredFFT), cmap = 'gray')
        plt.title('Filtered'), plt.xticks([]), plt.yticks([])
        
		
        f_ishift = np.fft.ifftshift(FiltzeredFFT)
        img_back = np.fft.ifft2(f_ishift)
        img_back = np.abs(img_back)
        plt.subplot(224),plt.imshow(np.abs(img_back), cmap = 'gray')
        plt.title('Filtered Image'), plt.xticks([]), plt.yticks([])
        plt.show()
Пример #4
0
def trilaterate3D(distances2):
    p1=np.array(distances2[0][:3])
    p2=np.array(distances2[1][:3])
    p3=np.array(distances2[2][:3])       
    p4=np.array(distances2[3][:3])
    r1=distances2[0][-1]
    r2=distances2[1][-1]
    r3=distances2[2][-1]
    r4=distances2[3][-1]
    e_x=(p2-p1)/np.linalg.norm(p2-p1)
    i=np.dot(e_x,(p3-p1))
    e_y=(p3-p1-(i*e_x))/(np.linalg.norm(p3-p1-(i*e_x)))
    e_z=np.cross(e_x,e_y)
    d=np.linalg.norm(p2-p1)
    j=np.dot(e_y,(p3-p1))
    x=((r1**2)-(r2**2)+(d**2))/(2*d)
    y=(((r1**2)-(r3**2)+(i**2)+(j**2))/(2*j))-((i/j)*(x))
    z1=np.sqrt(r1**2-x**2-y**2)
    z2=np.sqrt(r1**2-x**2-y**2)*(-1)
    ans1=p1+(x*e_x)+(y*e_y)+(z1*e_z)
    ans2=p1+(x*e_x)+(y*e_y)+(z2*e_z)
    dist1=np.linalg.norm(p4-ans1)
    dist2=np.linalg.norm(p4-ans2)
    if np.abs(r4-dist1)<np.abs(r4-dist2):
        return ans1
    else: 
        return ans2
Пример #5
0
def cada_torrilhon_limiter(r,cfl,epsilon=1.0e-3):
    r"""
    Cada-Torrilhon modified
    
    Additional Input:
     - *epsilon* = 
    """
    a = np.ones((2,len(r))) * 0.95
    b = np.empty((3,len(r)))

    a[0,:] = cfl
    cfl = np.min(a)
    a[1,:] = 0.05
    cfl = np.max(a)
    
    # Multiply all parts except b[0,:] by (1.0 - epsilon) as well
    b[0,:] = 1.0 + (1+cfl) / 3.0 * (r - 1)
    b[1,:] = 2.0 * np.abs(r) / (cfl + epsilon)
    b[2,:] = (8.0 - 2.0 * cfl) / (np.abs(r) * (cfl - 1.0 - epsilon)**2)
    b[1,::2] *= (1.0 - epsilon)
    a[0,:] = np.min(b)
    a[1,:] = (-2.0 * (cfl**2 - 3.0 * cfl + 8.0) * (1.0-epsilon)
                    / (np.abs(r) * (cfl**3 - cfl**2 - cfl + 1.0 + epsilon)))
    
    return np.max(a)
Пример #6
0
def test_UnitConverter_nodistance():

    mywcs = WCS(hdr)

    convert = UnitConverter(mywcs)

    assert convert.ang_size.value == np.abs(hdr['CDELT2'])
    assert convert.ang_size.unit == mywcs.wcs.cunit[0]

    twopix_ang = np.abs(hdr['CDELT2']) * 2 * u.deg
    assert convert.to_pixel(twopix_ang).value == 2.

    assert convert.to_pixel_area(twopix_ang**2) == 4 * u.pix**2

    assert convert.from_pixel(2 * u.pix, u.deg) == twopix_ang
    assert convert.to_angular(2 * u.pix, u.deg) == twopix_ang

    # Round trip
    assert convert.to_pixel(convert.from_pixel(2 * u.pix, u.deg)) == 2 * u.pix
    assert convert.to_pixel(convert.to_angular(2 * u.pix, u.deg)) == 2 * u.pix

    assert convert.from_pixel(convert.to_pixel(twopix_ang), u.deg) == twopix_ang
    assert convert.to_angular(convert.to_pixel(twopix_ang), u.deg) == twopix_ang

    # Physical conversions should fail
    with pytest.raises(AttributeError):
        convert.from_pixel(2 * u.pix, u.pc)
    with pytest.raises(AttributeError):
        assert convert.to_physical(2 * u.pix, u.pc)
Пример #7
0
def check_vpd_ks2_astrometry():
    """
    Check the VPD and quiver plots for our KS2-extracted, re-transformed astrometry.
    """
    catFile = workDir + '20.KS2_PMA/wd1_catalog.fits'
    tab = atpy.Table(catFile)

    good = (tab.xe_160 < 0.05) & (tab.ye_160 < 0.05) & \
        (tab.xe_814 < 0.05) & (tab.ye_814 < 0.05) & \
        (tab.me_814 < 0.05) & (tab.me_160 < 0.05)

    tab2 = tab.where(good)

    dx = (tab2.x_160 - tab2.x_814) * ast.scale['WFC'] * 1e3
    dy = (tab2.y_160 - tab2.y_814) * ast.scale['WFC'] * 1e3

    py.clf()
    q = py.quiver(tab2.x_814, tab2.y_814, dx, dy, scale=5e2)
    py.quiverkey(q, 0.95, 0.85, 5, '5 mas', color='red', labelcolor='red')
    py.savefig(workDir + '20.KS2_PMA/vec_diffs_ks2_all.png')

    py.clf()
    py.plot(dy, dx, 'k.', ms=2)
    lim = 30
    py.axis([-lim, lim, -lim, lim])
    py.xlabel('Y Proper Motion (mas)')
    py.ylabel('X Proper Motion (mas)')
    py.savefig(workDir + '20.KS2_PMA/vpd_ks2_all.png')

    idx = np.where((np.abs(dx) < 10) & (np.abs(dy) < 10))[0]
    print('Cluster Members (within dx < 10 mas and dy < 10 mas)')
    print(('   dx = {dx:6.2f} +/- {dxe:6.2f} mas'.format(dx=dx[idx].mean(),
                                                        dxe=dx[idx].std())))
    print(('   dy = {dy:6.2f} +/- {dye:6.2f} mas'.format(dy=dy[idx].mean(),
                                                        dye=dy[idx].std())))
Пример #8
0
def PrintSimResults(Options, Results, Method, Time):
    print('------------------------------------------------------------------')
    print('CPU Time: %.3f' % Time)
    for req in Options.Repo.PermRequests:
        req_str = req
        if hasattr(req,'__func__'):
            req_str = req.__func__.__name__
        if req in Results.TempStorage:
            try:
                print('%s: %g' % (req_str, Results.TempStorage[req][-1]))
            except TypeError:
                print('%s: %s' % (req_str, Results.TempStorage[req][-1]))
        else:
            try:
                print('%s: %g' % (req_str, Results.PermStorage[req][-1]))
            except TypeError:
                print('%s: %s' % (req_str, Results.PermStorage[req][-1]))
    print('Steps: %d' % Results.thisPermIndex)
    if 'Step' in Options.Repo.PermRequests:
        steps = Results.PermStorage['Step']
        if isinstance(steps[0],np.ndarray):
            steps = np.mean(steps,axis=1)
        print('Step Sum: %g' % sum(steps))
    print('Min |X*|: %.3f' % np.min(np.abs(Results.TempStorage['Data'][-1])))
    print('Max |X*|: %.3f' % np.max(np.abs(Results.TempStorage['Data'][-1])))
    print('------------------------------------------------------------------')
Пример #9
0
    def zplane(self, title="", fontsize=18):
        """ Display filter in the complex plane

        Parameters
        ----------

        """
        rb = self.z
        ra = self.p

        t = np.arange(0, 2 * np.pi + 0.1, 0.1)
        plt.plot(np.cos(t), np.sin(t), "k")

        plt.plot(np.real(ra), np.imag(ra), "x", color="r")
        plt.plot(np.real(rb), np.imag(rb), "o", color="b")
        M1 = -10000
        M2 = -10000
        if len(ra) > 0:
            M1 = np.max([np.abs(np.real(ra)), np.abs(np.imag(ra))])
        if len(rb) > 0:
            M2 = np.max([np.abs(np.real(rb)), np.abs(np.imag(rb))])
        M = 1.6 * max(1.2, M1, M2)
        plt.axis([-M, M, -0.7 * M, 0.7 * M])
        plt.title(title, fontsize=fontsize)
        plt.show()
Пример #10
0
def octahedron(radius, dtype=np.uint8):
    """
    Generates a octahedron-shaped structuring element of a given radius
    (the 3D equivalent of a diamond).  A pixel is part of the
    neighborhood (i.e. labeled 1) if the city block/manhattan distance
    between it and the center of the neighborhood is no greater than
    radius.

    Parameters
    ----------
    radius : int
        The radius of the octahedron-shaped structuring element.

    Other Parameters
    ----------------
    dtype : data-type
        The data type of the structuring element.

    Returns
    -------

    selem : ndarray
        The structuring element where elements of the neighborhood
        are 1 and 0 otherwise.
    """
    # note that in contrast to diamond(), this method allows non-integer radii
    n = 2 * radius + 1
    Z, Y, X = np.mgrid[-radius:radius:n*1j,
                       -radius:radius:n*1j,
                       -radius:radius:n*1j]
    s = np.abs(X) + np.abs(Y) + np.abs(Z)
    return np.array(s <= radius, dtype=dtype)
Пример #11
0
def diamond(radius, dtype=np.uint8):
    """
    Generates a flat, diamond-shaped structuring element of a given
    radius.  A pixel is part of the neighborhood (i.e. labeled 1) if
    the city block/manhattan distance between it and the center of the
    neighborhood is no greater than radius.

    Parameters
    ----------
    radius : int
        The radius of the diamond-shaped structuring element.

    Other Parameters
    ----------------
    dtype : data-type
        The data type of the structuring element.

    Returns
    -------

    selem : ndarray
        The structuring element where elements of the neighborhood
        are 1 and 0 otherwise.
    """
    half = radius
    (I, J) = np.meshgrid(range(0, radius * 2 + 1), range(0, radius * 2 + 1))
    s = np.abs(I - half) + np.abs(J - half)
    return np.array(s <= radius, dtype=dtype)
Пример #12
0
def compare_objs(x, y):
    assert type(x) is type(y)
    if type(x) is dict:
        assert x.keys().sort() == y.keys().sort()
        for ky in x:
            compare_objs(x[ky], y[ky])
    elif type(x) is list:
        assert len(x) == len(y)
        for ind in range(len(x)):
            compare_objs(x[ind], y[ind])
    elif type(x) is np.ndarray:
        assert x.shape == y.shape
        if not np.allclose(x, y, atol=1.0e-5, rtol=0.0):
            x = x.reshape(x.size)
            y = y.reshape(y.size)
            dd = x - y
            worst_case = np.max(np.abs(dd))
            print "worst case abs diff = %e" % worst_case
            ind = np.where((x != 0) | (y != 0))
            rel_err = np.abs(np.divide(dd[ind], np.abs(x[ind]) + np.abs(y[ind])))
            worst_case = np.max(rel_err)
            print "worst case rel diff = %e" % worst_case
            assert False
    else:
        assert x == y
Пример #13
0
    def test_surface_evaluate(self):
        from sfepy.discrete import FieldVariable
        problem = self.problem

        us = problem.get_variables()['us']
        vec = nm.empty(us.n_dof, dtype=us.dtype)
        vec[:] = 1.0
        us.set_data(vec)

        expr = 'ev_surface_integrate.i.Left( us )'
        val = problem.evaluate(expr, us=us)
        ok1 = nm.abs(val - 1.0) < 1e-15
        self.report('with unknown: %s, value: %s, ok: %s'
                    % (expr, val, ok1))

        ps1 = FieldVariable('ps1', 'parameter', us.get_field(),
                            primary_var_name='(set-to-None)')
        ps1.set_data(vec)

        expr = 'ev_surface_integrate.i.Left( ps1 )'
        val = problem.evaluate(expr, ps1=ps1)
        ok2 = nm.abs(val - 1.0) < 1e-15
        self.report('with parameter: %s, value: %s, ok: %s'
                    % (expr, val, ok2))
        ok2 = True

        return ok1 and ok2
Пример #14
0
def plot_marginal_pdfs( res, nbins=101, **kwargs):
    """ plot the results of a classification run
    :return:
    """
    from matplotlib import pyplot as pl
    import numpy as np

    nparam = len(res.vparam_names)
    # nrow = np.sqrt( nparam )
    # ncol = nparam / nrow + 1
    nrow, ncol = 1, nparam

    pdfdict = get_marginal_pdfs( res, nbins )

    fig = pl.gcf()
    for parname in res.vparam_names :
        iax = res.vparam_names.index( parname )+1
        ax = fig.add_subplot( nrow, ncol, iax )

        parval, pdf, mean, std = pdfdict[parname]
        ax.plot(  parval, pdf, **kwargs )
        if np.abs(std)>=0.1:
            ax.text( 0.95, 0.95, '%s  %.1f +- %.1f'%( parname, np.round(mean,1), np.round(std,1)),
                     ha='right',va='top',transform=ax.transAxes )
        elif np.abs(std)>=0.01:
            ax.text( 0.95, 0.95, '%s  %.2f +- %.2f'%( parname, np.round(mean,2), np.round(std,2)),
                     ha='right',va='top',transform=ax.transAxes )
        elif np.abs(std)>=0.001:
            ax.text( 0.95, 0.95, '%s  %.3f +- %.3f'%( parname, np.round(mean,3), np.round(std,3)),
                     ha='right',va='top',transform=ax.transAxes )
        else :
            ax.text( 0.95, 0.95, '%s  %.3e +- %.3e'%( parname, mean, std),
                     ha='right',va='top',transform=ax.transAxes )

    pl.draw()
def getOmega(dels):    
#    for k in range(1,dels.delta_d.shape[0])
    N = dels.delta_d.shape[1]
    delta_t = dels.delta_t
    delta_d = dels.delta_d
    
    a_t = np.diff(delta_t)
    a_t = a_t[:,0:-1]
    
    a_d = np.diff(delta_t[:,::-1])
    a_d = a_d[:,::-1]
    a_d = a_d[:,1::]
    
    b_t = np.diff(delta_d)
    b_t = b_t[:,0:-1]
    
    b_d = np.diff(delta_d[:,::-1])
    b_d = b_d[:,::-1]
    b_d = b_d[:,1::]    
    
    c_t = 0.25*(np.abs(a_t)+np.abs(b_t))*np.sign(a_t)*np.sign(b_t)*(np.sign(a_t)*np.sign(b_t)-1)
    c_d = 0.25*(np.abs(a_d)+np.abs(b_d))*np.sign(a_d)*np.sign(b_d)*(np.sign(a_d)*np.sign(b_d)-1)
    Omega = 1.0/(2*N)*(c_t.mean(axis=0) + c_d.mean(axis=0))

    return Omega
Пример #16
0
def sideband(center_freq,sideband_freq,marker_value,markernum,sideband='up'):
    center_freq = center_freq*1E9
    sideband_freq = sideband_freq*1E9
    up_side = center_freq + sideband_freq
    down_side = center_freq - sideband_freq
    test = False    
    
    while (test == False):
        marker_center = np.abs(marker_value - center_freq)

        if (sideband == 'up'):
            marker_sideband = np.abs(marker_value - up_side)
            if (marker_center > marker_sideband):
                test = True
            else:     
                MXA.next_peak_right()
                qt.msleep(0.1)
                marker_value = MXA.marker_X_value()
        
        if (sideband == 'down'):
            marker_sideband = np.abs(marker_value - down_side)
            if (marker_center > marker_sideband):
                test = True
            else:             
                MXA.next_peak_left()
                qt.msleep(0.1)
                marker_value = MXA.marker_X_value()
Пример #17
0
def isparallel(O1,O2):
    '''
    Judge whether two array-like vectors are parallel to each other.

    Parameters
    ----------
    O1,O2 : 1d array-like
        The input vectors.

    Returns
    -------
    int
        *  0: not parallel
        *  1: parallel
        * -1: anti-parallel
    '''
    norm1=nl.norm(O1)
    norm2=nl.norm(O2)
    if norm1<RZERO or norm2<RZERO:
        return 1
    elif O1.shape[0]==O2.shape[0]:
        buff=np.inner(O1,O2)/(norm1*norm2)
        if np.abs(buff-1)<RZERO:
            return 1
        elif np.abs(buff+1)<RZERO:
            return -1
        else:
            return 0
    else:
        raise ValueError("isparallel error: the shape of the array-like vectors does not match.")
Пример #18
0
def test_known_parametrization():
    R = 1
    P = 1
    toll = 2.e-3

    n = 10
    ii = np.linspace(0,1,n+1)
    control_points_3d = np.asarray(np.zeros([n+1,3]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
    print (control_points_3d.shape)
    control_points_3d[:,0] = np.array([R*np.cos(5*i * np.pi / (n + 1))for i in ii])
    control_points_3d[:,1] = np.array([R*np.sin(5*i * np.pi / (n + 1))for i in ii])
    control_points_3d[:,2] = np.array([P*i for i in range(n+1)])
    vsl = AffineVectorSpace(UniformLagrangeVectorSpace(n+1),0,1)
    arky = ArcLengthParametrizer(vsl, control_points_3d)
    new_control_points_3d = arky.reparametrize()

    #new_arky = ArcLengthParametrizer(vsl, new_control_points_3d)
    #new_new_control_points_3d = arky.reparametrize()
    tt = np.linspace(0, 1, 128)

    vals = vsl.element(control_points_3d)(tt)
    #print vals
    new_vals = vsl.element(new_control_points_3d)(tt)
    #print vals.shape, new_vals.shape
    print (np.amax((np.abs(vals-new_vals))))
    assert np.amax(np.abs(control_points_3d-new_control_points_3d))/P < toll
Пример #19
0
def test_more_known_parametrization_together():
    R = 1
    P = 1
    toll = 7.e-3
    intervals = 5
    vs_order = 2
    n = (intervals*(vs_order)+1-1)

    #n = 18
    ii = np.linspace(0,1,n+1)
    n_1 = 2
    n_2 = 4
    control_points_3d = np.asarray(np.zeros([n+1,n_1,n_2,3]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
    for k in range(n_1):
        for j in range(n_2):
            control_points_3d[:,k,j,0] = np.array([R*np.cos(5*i * np.pi / (n + 1))for i in ii])
            control_points_3d[:,k,j,1] = np.array([R*np.sin(5*i * np.pi / (n + 1))for i in ii])
            control_points_3d[:,k,j,2] = np.array([(k+j+1)*P*i for i in range(n+1)])
    #vsl = IteratedVectorSpace(UniformLagrangeVectorSpace(vs_order+1), np.linspace(0,1,intervals+1))
    vsl = AffineVectorSpace(UniformLagrangeVectorSpace(n+1),0,1)
    arky = ArcLengthParametrizer(vsl, control_points_3d)
    new_control_points_3d = arky.reparametrize()

    #print control_points_3d.shape, new_control_points_3d.shape
    tt = np.linspace(0,1,128)
    for k in range(n_1):
        for j in range(n_2):
            vals = vsl.element(control_points_3d)(tt)
            new_vals = vsl.element(new_control_points_3d)(tt)
            print (np.amax(np.abs(vals-new_vals))/(k+j+1)/P, (k+j+1))
            assert np.amax(np.abs(vals-new_vals))/(k+j+1)/P < toll
Пример #20
0
    def DM(self, z):
        """Transverse Comoving Distance (Mpc)

        Parameters
        ----------
        z : float
            redshift
        
        Returns
        -------
        y : float
            The transverse comoving distance in Mpc, given by Hogg eqn 16
            
        Examples
        --------
        >>> cosmo = Cosmology()
        >>> cosmo.DM(1.0)
        3303.8288058874678
        """
        # Compute the transverse comoving distance in Mpc (Eqn 16)
        if self.OmegaK > 0.0:
            return self.DH / np.sqrt(self.OmegaK) * \
                    np.sinh(np.sqrt(self.OmegaK)*self.DC(z)/self.DH)
        elif self.OmegaK == 0.0:
            return self.DC(z)
        elif self.OmegaK < 0.0:
            return self.DH / np.sqrt(np.abs(self.OmegaK)) * \
                    np.sin(np.sqrt(np.abs(self.OmegaK))*self.DC(z)/self.DH)
Пример #21
0
    def filter(self, intensity_stack, convert_dtype=False):
        I = intensity_stack
        I_dtype = I.dtype if not convert_dtype else self.dtype

        mask = self._get_mask(I.shape)

        I = np.abs(np.array(I).astype(float))
        if self.sci_psf is not None:
            I = self.sci_qe * conv(np.random.poisson(I).astype(float), self.sci_psf)
        if self.psf is not None:
            I = conv(I, self.psf)

        # convert to well counts
        Iel = np.random.poisson(I * self.qe).astype(float)
        # add shot noise
        Iel += np.abs(np.random.standard_normal(I.shape) * self.shot_noise)
        overexposed = Iel >= self.full_well
        Iel[overexposed] = self.full_well
        Iel[Iel < 0.0] = 0.0
        DU = Iel // self.adu
        dt = self.dtype
        if self.on_limit == "clip":
            mx = np.iinfo(dt).max
            DU[DU > mx] = mx

        DU[np.invert(mask)] = 0.0
        mask &= np.invert(overexposed)

        return DU.astype(I_dtype), mask
Пример #22
0
    def _beam_map_single(self, bl_index, f_index):

        p_stokes = [ 0.5 * np.array([[1.0,   0.0], [0.0,  1.0]]),
                     0.5 * np.array([[1.0,   0.0], [0.0, -1.0]]),
                     0.5 * np.array([[0.0,   1.0], [1.0,  0.0]]),
                     0.5 * np.array([[0.0, -1.0J], [1.0J, 0.0]]) ]

        # Get beam maps for each feed.
        feedi, feedj = self.uniquepairs[bl_index]
        beami, beamj = self.beam(feedi, f_index), self.beam(feedj, f_index)

        # Get baseline separation and fringe map.
        uv = self.baselines[bl_index] / self.wavelengths[f_index]
        fringe = visibility.fringe(self._angpos, self.zenith, uv)

        pow_stokes = [ np.sum(beami * np.dot(beamj.conjugate(), polproj), axis=1) * self._horizon for polproj in p_stokes]

        # Calculate the solid angle of each beam
        pxarea = (4*np.pi / beami.shape[0])

        om_i = np.sum(np.abs(beami)**2 * self._horizon[:, np.newaxis]) * pxarea
        om_j = np.sum(np.abs(beamj)**2 * self._horizon[:, np.newaxis]) * pxarea

        omega_A = (om_i * om_j)**0.5

        # Calculate the complex visibility transfer function
        cv_stokes = [ p * (2 * fringe / omega_A) for p in pow_stokes ]

        return cv_stokes
Пример #23
0
Файл: cmc.py Проект: jmrv/nest
def evolve(part, logLstar):
	q = part.params2q(part.params)	# normalized array of thawed parameters
	n = len(q)
	qprime = copy.copy(q)
	m = g.masses[g.thawedIdxs]

	for t in range(g.T):		
		dq = part.stepsize * np.random.randn(n) / m
		dq[np.abs(dq)>g.maxStep] = g.maxStep
		dq[np.abs(dq)<g.minStep] = g.minStep

		qprime += dq

		# bounce if out of bounds:
		if part.outOfBounds(qprime):
			qprime = bounce(qprime)
		
		# check likelihood constraint:
		logL = part.measLoglike(qprime)
		if  logL < logLstar:
			part.reject()
			#print (logL, 'r', part.stepsize, qprime)
		else:
			part.accept(qprime)
			part.distance += np.linalg.norm(dq)
			#print (logL, 'a', part.stepsize, qprime)

	print '\ntraveled %.6f \n' % part.distance
Пример #24
0
    def connect_edges(self):
        """Connect detected edges based on their slopes."""
        # Fitting a straight line to each edge.
        p0 = [0., 0.]
        radian2angle = 180. / np.pi
        for edge in self.edges:
            p1, s = leastsq(self.residuals, p0,
                            args=(edge['x'][:-1], edge['y'][:-1]))
            edge['slope'] = p1[0]
            edge['intercept'] = p1[1]
            edge['slope_angle'] = np.arctan(edge['slope']) * radian2angle

        # Connect by the slopes of two edges.
        len_edges = len(self.edges)
        for i in range(len_edges - 1):
            for j in range(i + 1, len_edges):
                if np.abs(self.edges[i]['slope_angle'] -
                          self.edges[j]['slope_angle']) <= \
                   self.connectivity_angle:
                    # Then, the slope between the centers of the two edges
                    # should be similar with the slopes of
                    # the two lines of the edges as well.
                    c_slope = (self.edges[i]['y_center'] -
                                    self.edges[j]['y_center']) / \
                                   (self.edges[i]['x_center'] -
                                    self.edges[j]['x_center'])
                    c_slope_angle = np.arctan(c_slope) * radian2angle

                    if np.abs(c_slope_angle - self.edges[i]['slope_angle']) <= \
                       self.connectivity_angle and \
                       np.abs(c_slope_angle - self.edges[j]['slope_angle']) <= \
                       self.connectivity_angle:
                        self.edges[i]['connectivity'] = self.edges[j]['index']
                        break
Пример #25
0
def t2smap(catd,mask,tes):
	"""
	t2smap(catd,mask,tes)

	Input:

	catd  has shape (nx,ny,nz,Ne,nt)
	mask  has shape (nx,ny,nz)
	tes   is a 1d numpy array
	"""
	nx,ny,nz,Ne,nt = catd.shape
	N = nx*ny*nz

	echodata = fmask(catd,mask)
	Nm = echodata.shape[0]

	#Do Log Linear fit
	B = np.reshape(np.abs(echodata), (Nm,Ne*nt)).transpose()
	B = np.log(B)
	x = np.array([np.ones(Ne),-tes])
	X = np.tile(x,(1,nt))
	X = np.sort(X)[:,::-1].transpose()

	beta,res,rank,sing = np.linalg.lstsq(X,B)
	t2s = 1/beta[1,:].transpose()
	s0  = np.exp(beta[0,:]).transpose()

	#Goodness of fit
	alpha = (np.abs(B)**2).sum(axis=0)
	t2s_fit = blah = (alpha - res)/(2*res)
	
	out = unmask(t2s,mask),unmask(s0,mask),unmask(t2s_fit,mask)

	return out
  def update(self):
    logging.debug("About to extract features for %d trips " % (len(self.trips)))
    trip_features, labels = self.extract_features()
    logging.debug("trip_features.size() = %s, nTrips = %d " % (trip_features.size, len(self.trips)))
    #TODO: why the hell is this happening..? absurd feature extraction
    trip_features = np.nan_to_num(trip_features)
    trip_features[np.abs(trip_features) < .001] = 0
    trip_features[np.abs(trip_features) > 1000000] = 0
    nonzero = ~np.all(trip_features==0, axis=1)
    logging.debug("nonzero list = %s" % nonzero)
    trip_features = trip_features[nonzero]
    labels = labels[nonzero]

    # logging.debug("Trip Features: %s" % trip_features)
    try:
       self.regression.fit(trip_features, labels)
       self.coefficients = self.regression.coef_
       self.save_coefficients()
    except ValueError as e:
       logging.warning("While fitting the regression, got error %s" % e)
       if ("%s" % e) == "The number of classes has to be greater than one":
          logging.warning("Training set has no alternatives!")
       raise e
       # else:
       #    np.save("/tmp/broken_array", trip_features)
       #    raise e
    '''
def plot_robots_ratio_time_micmac(deploy_robots_mic, deploy_robots_mac, deploy_robots_desired, delta_t):
    plot_option = 0 # 0: ratio, 1: cost
    num_iter = deploy_robots_mic.shape[1]
    total_num_robots = np.sum(deploy_robots_mic[:,0,:])
    
    diffmic_sqs = np.zeros(num_iter)
    diffmac_sqs = np.zeros(num_iter)
    diffmic_rat = np.zeros(num_iter)
    diffmac_rat = np.zeros(num_iter)
    for t in range(num_iter):
        diffmic = np.abs(deploy_robots_mic[:,t,:] - deploy_robots_desired)    
        diffmac = np.abs(deploy_robots_mac[:,t,:] - deploy_robots_desired) 
        diffmic_rat[t] = np.sum(diffmic) / total_num_robots       
        diffmic_sqs[t] = np.sum(np.square(diffmic))
        diffmac_rat[t] = np.sum(diffmac) / total_num_robots 
        diffmac_sqs[t] = np.sum(np.square(diffmac))
        
    x = np.arange(0, num_iter) * delta_t
    if(plot_option==0):
        l1 = plt.plot(x,diffmic_rat)
        l2 = plt.plot(x,diffmac_rat)
    if(plot_option==1):
        l1 = plt.plot(x,diffmic_sqs)
        l2 = plt.plot(x,diffmac_sqs)
    
    plt.xlabel('time [s]')    
    plt.ylabel('ratio of misplaced robots')
    plt.legend((l1, l2),('Micro','Macro'))
    plt.show()
Пример #28
0
def _gpinv(p, k, sigma):
    """Inverse Generalized Pareto distribution function"""
    x = np.full_like(p, np.nan)
    if sigma <= 0:
        return x
    ok = (p > 0) & (p < 1)
    if np.all(ok):
        if np.abs(k) < np.finfo(float).eps:
            x = - np.log1p(-p)
        else:
            x = np.expm1(-k * np.log1p(-p)) / k
        x *= sigma
    else:
        if np.abs(k) < np.finfo(float).eps:
            x[ok] = - np.log1p(-p[ok])
        else:
            x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k
        x *= sigma
        x[p == 0] = 0
        if k >= 0:
            x[p == 1] = np.inf
        else:
            x[p == 1] = - sigma / k

    return x
Пример #29
0
def savgol(x, window_size=3, order=2, deriv=0, rate=1):
    ''' Savitzky-Golay filter '''
        
    # Check the input
    try:
        window_size = np.abs(np.int(window_size))
        order = np.abs(np.int(order))
    except ValueError:
        raise ValueError("window_size and order have to be of type int")
    if window_size > len(x):
        raise TypeError("Not enough data points!")
    if window_size % 2 != 1 or window_size < 1:
        raise TypeError("window_size size must be a positive odd number")
    if window_size < order + 1:
        raise TypeError("window_size is too small for the polynomials order")
    if order <= deriv:
        raise TypeError("The 'deriv' of the polynomial is too high.")


    # Calculate some required parameters
    order_range = range(order+1)
    half_window = (window_size -1) // 2
    num_data = len(x)
    
    # Construct Vandermonde matrix, its inverse, and the Savitzky-Golay coefficients   
    a = [[ii**jj for jj in order_range] for ii in range(-half_window, half_window+1)]
    pa = np.linalg.pinv(a)
    sg_coeff = pa[deriv] * rate**deriv * scipy.special.factorial(deriv)
      
    # Get the coefficients for the fits at the beginning and at the end of the data
    coefs = np.array(order_range)**np.sign(deriv)
    coef_mat = np.zeros((order+1, order+1))
    row = 0
    for ii in range(deriv,order+1):
        coef = coefs[ii]
        for jj in range(1,deriv):
            coef *= (coefs[ii]-jj)
        coef_mat[row,row+deriv]=coef
        row += 1
    coef_mat *= rate**deriv
    
    # Add the first and last point half_window times
    firstvals = np.ones(half_window) * x[0] 
    lastvals  = np.ones(half_window) * x[-1]
    x_calc = np.concatenate((firstvals, x, lastvals))

    y = np.convolve( sg_coeff[::-1], x_calc, mode='full')
    
    # chop away intermediate data
    y = y[window_size-1:window_size+num_data-1]

    # filtering for the first and last few datapoints
    y[0:half_window] = np.dot(np.dot(np.dot(a[0:half_window], coef_mat), \
                                   np.mat(pa)), x[0:window_size])
    y[len(y)-half_window:len(y)] = np.dot(np.dot(np.dot(a[half_window+1:window_size], \
                        coef_mat), pa), x[len(x)-window_size:len(x)])
    
    return y

    
Пример #30
0
def max_lm(baselines, wavelengths, uwidth, vwidth=0.0):
    """Get the maximum (l,m) that a baseline is sensitive to.

    Parameters
    ----------
    baselines : np.ndarray
        An array of baselines.
    wavelengths : np.ndarray
        An array of wavelengths.
    uwidth : np.ndarray
        Width of the receiver in the u-direction.
    vwidth : np.ndarray
        Width of the receiver in the v-direction.

    Returns
    -------
    lmax, mmax : array_like
    """

    umax = (np.abs(baselines[:, 0]) + uwidth) / wavelengths
    vmax = (np.abs(baselines[:, 1]) + vwidth) / wavelengths

    mmax = np.ceil(2 * np.pi * umax).astype(np.int64)
    lmax = np.ceil((mmax**2 + (2*np.pi*vmax)**2)**0.5).astype(np.int64)

    return lmax, mmax
Пример #31
0
def _sgrid_func(fig=None, zeta=None, wn=None):
    if fig is None:
        fig = pylab.gcf()
    ax = fig.gca()
    xlocator = ax.get_xaxis().get_major_locator()

    ylim = ax.get_ylim()
    ytext_pos_lim = ylim[1] - (ylim[1] - ylim[0]) * 0.03
    xlim = ax.get_xlim()
    xtext_pos_lim = xlim[0] + (xlim[1] - xlim[0]) * 0.0

    if zeta is None:
        zeta = _default_zetas(xlim, ylim)

    angules = []
    for z in zeta:
        if (z >= 1e-4) and (z <= 1):
            angules.append(np.pi / 2 + np.arcsin(z))
        else:
            zeta.remove(z)
    y_over_x = np.tan(angules)

    # zeta-constant lines

    index = 0

    for yp in y_over_x:
        ax.plot([0, xlocator()[0]], [0, yp * xlocator()[0]],
                color='gray',
                linestyle='dashed',
                linewidth=0.5)
        ax.plot([0, xlocator()[0]], [0, -yp * xlocator()[0]],
                color='gray',
                linestyle='dashed',
                linewidth=0.5)
        an = "%.2f" % zeta[index]
        if yp < 0:
            xtext_pos = 1 / yp * ylim[1]
            ytext_pos = yp * xtext_pos_lim
            if np.abs(xtext_pos) > np.abs(xtext_pos_lim):
                xtext_pos = xtext_pos_lim
            else:
                ytext_pos = ytext_pos_lim
            ax.annotate(an,
                        textcoords='data',
                        xy=[xtext_pos, ytext_pos],
                        fontsize=8)
        index += 1
    ax.plot([0, 0], [ylim[0], ylim[1]],
            color='gray',
            linestyle='dashed',
            linewidth=0.5)

    angules = np.linspace(-90, 90, 20) * np.pi / 180
    if wn is None:
        wn = _default_wn(xlocator(), ylim)

    for om in wn:
        if om < 0:
            yp = np.sin(angules) * np.abs(om)
            xp = -np.cos(angules) * np.abs(om)
            ax.plot(xp, yp, color='gray', linestyle='dashed', linewidth=0.5)
            an = "%.2f" % -om
            ax.annotate(an, textcoords='data', xy=[om, 0], fontsize=8)
Пример #32
0
def stencil_carve(depth, modelmat, occ_grid, rgb=None, rect=((0,0),(640,480))):
    """

    """
    global coords, b_total, b_occ, b_vac, depthB

    (L,T),(R,B) = rect
    L,T,R,B = map(int, (L,T,R,B))
    coords, depthB = render_blocks(occ_grid,
                                   modelmat,
                                   rect=rect)
    #print depth.mean(), occ_grid.mean(), rect, depthB.mean()
    assert coords.dtype == np.uint8
    assert depthB.dtype == np.float32
    assert depth.dtype == np.uint16
    assert coords.shape[2] == 4
    assert coords.shape[:2] == depthB.shape == depth.shape == (480,640)

    b_total = np.zeros(occ_grid.shape, 'f')
    b_occ = np.zeros(occ_grid.shape, 'f')
    b_vac = np.zeros(occ_grid.shape, 'f')

    gridmin = np.array(config.bounds[0])
    gridmax = np.array(config.bounds[1])
    gridlen = gridmax-gridmin

    if rgb is None:
        rgb = np.empty((480,640,3),'u1')

    global RGBacc, RGB, HSV
    RGBacc = np.zeros((b_total.shape[0],
                       b_total.shape[1],
                       b_total.shape[2], 3),'i')
    RGB = np.zeros((b_total.shape[0],
                    b_total.shape[1],
                    b_total.shape[2], 3),'u1')
    HSV = np.zeros((b_total.shape[0],
                    b_total.shape[1],
                    b_total.shape[2], 3),'u1')
    assert rgb.dtype == np.uint8

    if 1:
        speedup_cy.stencil_carve(depthB, depth, coords,
                                 gridlen[0], gridlen[1], gridlen[2],
                                 RGBacc, rgb, RGB,
                                 b_total, b_occ, b_vac,
                                 T, L, B, R)
    else:
        # This may be out of date - prefer the weave version
        bins = [np.arange(0,gridmax[i]-gridmin[i]+1)-0.5
                for i in range(3)]
        c = coords[:,:,:3].reshape(-1,3)
        w = ((depth>0)&(depthB<inf)).flatten()
        b_total,_ = np.histogramdd(c, bins, weights=w)
        b_occ,_ = np.histogramdd(c, bins, weights=w&
                                 (np.abs(depthB-depth)<10).flatten())
        b_vac,_ = np.histogramdd(c, bins, weights=w&
                                 (depthB+10<depth).flatten())

    # Color targets, defined as hues from 0 to 180
    # red, yellow, green, blue, red
    color_targets = np.array([0, 20, 50, 120, 180],'i')

    if 1:
        cv.CvtColor(RGB.reshape(1,-1,3), HSV.reshape(1,-1,3), cv.CV_RGB2HSV);
        #HSV[:,:,:,1:] = 255
        #Hdiff = np.abs(HSV[:,:,:,:1] - color_targets.reshape(1,1,1,-1))
        #HSV[:,:,:,0] = color_targets[np.argmin(Hdiff,axis=3)]
        speedup_cy.fix_colors(HSV, color_targets)
        cv.CvtColor(HSV.reshape(1,-1,3), RGB.reshape(1,-1,3), cv.CV_HSV2RGB);
    else:
        RGB = (RGB.astype('i')*4).clip(0,255)
    return b_occ, b_vac, b_total
Пример #33
0
 def test_AmpPhasePropagation(self, multisine_testsignal):
     """Test Time2AmpPhase and AmpPhase2Time with noise variance as uncertainty"""
     testsignal, noise_std = multisine_testsignal
     A, P, UAP = Time2AmpPhase(testsignal, noise_std ** 2)
     x, ux = AmpPhase2Time(A, P, UAP)
     assert_almost_equal(np.max(np.abs(testsignal - x)), 0)
Пример #34
0
        frame_max = get_env('FRAME_MAXIMUM', default=_frame_max, vartype=int)   # maximum number of frames

        attributes = {**attributes,
            **{'frame_fin': frame_fin, 'frame_per': frame_per,
            'frame_max': frame_max}}
        movie_dir = joinpath(data_dir,
            naming_standard.movie(folder=True).filename(**attributes)[0])   # movie directory name
        mkdir(movie_dir)                                                    # create movie directory
        mkdir(joinpath(movie_dir, 'frames'), replace=True)                  # create frames directory (or replaces it if existing)

        Nframes = np.min([Nentries, frame_fin]) - init_frame                    # number of frames available for the movie
        Ntimes = Nframes//frame_per                                             # maximum number of rendered frames
        frames = np.array(list(OrderedDict.fromkeys(map(int, init_frame +
            np.linspace(0, Nframes - 1, Ntimes, endpoint=False, dtype=int)))))  # usable frames

        if dt < 0: dt = np.min(np.abs(frames - np.roll(frames, shift=1)))

        frames = frames[frames + dt < Nentries - 1][:frame_max] # rendered frames

        fixed_frame = get_env('FIXED_FRAME', default=False, vartype=bool)   # FIXED_FRAME mode

        with open(wrap_file_name, 'rb') as wrap_file,\
            open(unwrap_file_name, 'rb') as unwrap_file:    # opens wrapped and unwrapped trajectory files

            w_traj = Gsd(wrap_file, prep_frames=prep_frames)    # wrapped trajectory object
            u_traj = Dat(unwrap_file, parameters['N'])			# unwrapped trajectory object

            for frame in frames:    # for rendered frames
                sys.stdout.write(
                    'Frame: %d' % (frames.tolist().index(frame) + 1)
                    + "/%d \r" % len(frames))
Пример #35
0
def compute_mean_spectrogram(s, sample_rate, win_sizes, increment=None, num_freq_bands=100,
                             spec_estimator=GaussianSpectrumEstimator(nstd=6), mask=False, mask_gain=3.0):
    """
        Compute a spectrogram for each time window, and average across time windows to get better time-frequency
        resolution. Post-processing is done with applying the log to change the power spectrum to decibels, and
        then a hard threshold is applied to zero-out the lowest 10% of the pixels.
    """

    #compute spectrograms
    stime = time.time()
    timefreqs = list()
    for k,win_size in enumerate(win_sizes):
        if increment is None:
            inc = win_sizes[0] / 2
        else:
            inc = increment
        t,freq,tf = timefreq(s, sample_rate, win_size, inc, spec_estimator)
        ps = np.abs(tf)
        ps_log = log_spectrogram(ps)
        timefreqs.append( (t, freq, ps_log) )
    etime = time.time() - stime
    #print 'time to compute %d spectrograms: %0.6fs' % (len(win_sizes), etime)

    #compute the mean spectrogram across window sizes
    nyquist_freq = sample_rate / 2.0
    df = nyquist_freq / num_freq_bands
    f_smallest = np.arange(num_freq_bands)*df
    t_smallest = timefreqs[0][0]  # best temporal resolution
    df_smallest = f_smallest[1] - f_smallest[0]
    dt_smallest = t_smallest[1] - t_smallest[0]

    #resample the spectrograms so they all have the same frequency spacing
    stime = time.time()
    rs_specs = list()
    for t,freq,ps in timefreqs:
        rs_t,rs_freq,rs_ps = resample_spectrogram(t, freq, ps, dt_smallest, df_smallest)
        rs_specs.append(rs_ps)
    etime = time.time() - stime
    #print 'time to resample %d spectrograms: %0.6fs' % (len(win_sizes), etime)

    #get the shortest spectrogram length
    min_freq_len = np.min([rs_ps.shape[0] for rs_ps in rs_specs])
    min_t_len = np.min([rs_ps.shape[1] for rs_ps in rs_specs])
    rs_specs_arr = np.array([rs_ps[:min_freq_len, :min_t_len] for rs_ps in rs_specs])
    t_smallest = np.arange(min_t_len)*dt_smallest
    f_smallest = np.arange(min_freq_len)*df_smallest

    #compute mean, std, and zscored power spectrum across window sizes
    tf_mean = rs_specs_arr.mean(axis=0)

    if mask:
        #compute the standard deviation across window sizes
        tf_std = rs_specs_arr.std(axis=0, ddof=1)
        #compute something that is close to the maximum std. we use the 95th pecentile to avoid outliers
        tf_std /= np.percentile(tf_std.ravel(), 95)
        #compute a sigmoidal mask that will zero out pixels in tf_mean that have high standard deviations
        sigmoid = lambda x: 1.0 / (1.0 + np.exp(1)**(-mask_gain*x))
        sigmoid_mask = 1.0 - sigmoid(tf_std)
        #mask the mean time frequency representation
        tf_mean *= sigmoid_mask

    return t_smallest, f_smallest, tf_mean
Пример #36
0
 def make_spectrum(signal, signal_weights):
     denom = (signal_weights**2).sum(axis=0)
     return (np.abs(signal * signal_weights)**2).sum(axis=0) / denom
Пример #37
0
def jitter(rx_x, rx_y, taps, iter, mean=True):
    starttime = datetime.datetime.now()
    mean = mean
    rx_x_single = np.array(rx_x)
    rx_y_single = np.array(rx_y)
    datalength = len(rx_x)
    stepsizelist = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 6.409e-6, 1e-6, 1e-7]
    overhead = 1
    cmataps = taps
    center = int((cmataps - 1) / 2)
    iterator = iter
    earlystop = 0.0001
    stepsizeadjust = 0.95
    stepsize = stepsizelist[6]
    stepsize_x = stepsize
    stepsize_y = stepsize

    costfunx = np.zeros((1, iterator), dtype="complex_")
    costfuny = np.zeros((1, iterator), dtype="complex_")
    inputrx = rx_x_single
    inputry = rx_y_single
    hxx = np.zeros(cmataps, dtype="complex_")
    hyy = np.zeros(cmataps, dtype="complex_")
    hxx[center] = 1
    hyy[center] = 1
    exout = np.zeros(datalength, dtype="complex_")
    eyout = np.zeros(datalength, dtype="complex_")
    errx = np.zeros(datalength, dtype="complex_")
    erry = np.zeros(datalength, dtype="complex_")
    cost_x = np.zeros(datalength, dtype="complex_")
    cost_y = np.zeros(datalength, dtype="complex_")
    squ_Rx = np.zeros(datalength, dtype="complex_")
    squ_Ry = np.zeros(datalength, dtype="complex_")
    squ_Rx, squ_Ry = squ_Rx + 10, squ_Ry + 10

    if mean == True:
        squ_Rx = np.zeros(datalength, dtype="complex_")
        squ_Ry = np.zeros(datalength, dtype="complex_")
        for indx in range(center, datalength - center):
            inx = inputrx[indx - center:indx + center + 1]
            iny = inputry[indx - center:indx + center + 1]
            squ_Rx[indx] = np.mean(abs(inx) ** 4) / np.mean(abs(inx) ** 2)
            squ_Ry[indx] = np.mean(abs(iny) ** 4) / np.mean(abs(iny) ** 2)

    for it in range(iterator):
        for indx in range(center, datalength - center):
            exout[indx] = np.matmul(hxx, inputrx[indx - center:indx + center + 1])
            eyout[indx] = np.matmul(hyy, inputry[indx - center:indx + center + 1])
            if np.isnan(exout[indx]) or np.isnan(eyout[indx]):
                raise Exception("CMA Equaliser didn't converge at iterator {}".format(it))

            errx[indx] = exout[indx] * (squ_Rx[indx] - np.abs(exout[indx]) ** 2)
            erry[indx] = eyout[indx] * (squ_Ry[indx] - np.abs(eyout[indx]) ** 2)

            hxx = hxx + stepsize_x * errx[indx] * np.conj(
                inputrx[indx - center:indx + center + 1])
            hyy = hyy + stepsize_y * erry[indx] * np.conj(
                inputry[indx - center:indx + center + 1])

            cost_x[indx] = (abs(exout[indx])) ** 2 - squ_Rx[indx]
            cost_y[indx] = (abs(eyout[indx])) ** 2 - squ_Ry[indx]
        costfunx[0][it] = -1 * (np.mean(cost_x))
        costfuny[0][it] = -1 * (np.mean(cost_y))
        print('iteration = {}'.format(it))
        print(costfunx[0][it])
        print(costfuny[0][it])
        print('-------')

        if it >= 1:
            # if np.abs(self.costfunx[0][it] - self.costfunx[0][it - 1]) < self.earlystop * \
            #         self.costfunx[0][it]:
            #     print("Earlybreak at iterator {}".format(it))
            #     break
            if np.abs(costfunx[0][it] - costfunx[0][it - 1]) < earlystop:
                stepsize_x *= stepsizeadjust
                print('Stepsize_x adjust to {}'.format(stepsize_x))
            if np.abs(costfuny[0][it] - costfuny[0][it - 1]) < earlystop:
                stepsize_y *= stepsizeadjust
                print('Stepsize_y adjust to {}'.format(stepsize_y))

        # rx_x_cma = exout
        # rx_y_cma = eyout
    endtime = datetime.datetime.now()
    print(endtime - starttime)

    return exout, eyout
Пример #38
0
#caffe_root ="/home/ydwu/work/caffe"
#os.chdir(caffe_root)
#print caffe_root
#sys.path.insert(0, caffe_root + 'python')
import caffe

caffe.set_mode_cpu()
net = caffe.Net(prototxt, source, caffe.TEST)
layers = net.params.keys()
linestyles = ['--', '-']

for idx, layer in enumerate(layers):
    #if not('bn' in layer) and not('scale' in layer): # do not include batch normalization and scaling layers in ResNets
    wT = 0.0
    w = net.params[layer][0].data
    wMax = np.max(np.abs(w))
    r = w / wMax  # normalize
    for n in range(0, N):
        qSgn = np.sign(r)
        qLog = np.log2(abs(r + 1e-32))
        qIdx = np.floor(qLog)
        bLog = qIdx + np.log2(1.5)
        bIdx = qLog > bLog  # border condition
        qIdx[bIdx] = qIdx[bIdx] + 1.0
        q = qSgn * 2**(qIdx)
        qIdxMem = qSgn * (-(n + 1) - qIdx + 2)
        sIdx = (2 - (n + 1) - qIdx) > (2**(B - 1) - 1)  # saturation condition
        q[sIdx] = 0
        qIdxMem[sIdx] = 0
        zIdx = q != 0
        wT += q
Пример #39
0
emas_11=[]
eqju_11=[]
eqav_11=[0 for i in range(ene)]

deltat=5/ene
tiempo=[deltat*i for i in range (ene)]
#ecuacion maestra
for i in range (0,ene):
    mat = d1
    mat = mat -deltat*1j*( np.matmul(Hjc,mat)- np.matmul(mat,Hjc) )
    mat = mat +deltat*0.5*( 2*np.matmul(S1,np.matmul(mat,S1d)) -1*np.matmul(mat,np.matmul(S1d,S1)) -1*np.matmul(S1d,np.matmul(S1,mat)) )
    mat = mat +deltat*0.5*( 2*np.matmul(S2,np.matmul(mat,S2d)) -1*np.matmul(mat,np.matmul(S2d,S2)) -1*np.matmul(S2d,np.matmul(S2,mat)) )
    mat = mat +deltat*0.5*( 2*np.matmul(S3,np.matmul(mat,S3d)) -1*np.matmul(mat,np.matmul(S3d,S3)) -1*np.matmul(S3d,np.matmul(S3,mat)) )
    mat = mat +deltat*0.5*( 2*np.matmul(S4,np.matmul(mat,S4d)) -1*np.matmul(mat,np.matmul(S4d,S4)) -1*np.matmul(S4d,np.matmul(S4,mat)) )
    d1 = mat
    emas_gg.append(np.abs(np.matmul(mat,np.matmul(sigmas,sigmen)).trace()))
    emas_ee.append(np.abs(np.matmul(mat,np.matmul(sigmen,sigmas)).trace()))
    emas_00.append(np.abs(np.matmul(mat,np.matmul(adag,a)).trace()))
    emas_11.append(np.abs(np.matmul(mat,np.matmul(a,adag)).trace()))

#quantum jumps
for m in range(0,nst):
    for n in range(0,ene):
        vec = e1
        aver = np.array([0,0,0,0])
        rn1=np.random.random_sample()
        rn2=np.random.random_sample()
        deltap1= deltat*np.abs(np.dot(np.matmul(S1,vec),np.matmul(S1,vec)))
        deltap2= deltat*np.abs(np.dot(np.matmul(S2,vec),np.matmul(S2,vec)))
        deltap3= deltat*np.abs(np.dot(np.matmul(S3,vec),np.matmul(S3,vec)))
        deltap4= deltat*np.abs(np.dot(np.matmul(S4,vec),np.matmul(S4,vec)))
Пример #40
0
def _default_gains(num, den, xlim, ylim):
    """Unsupervised gains calculation for root locus plot.

    References:
     Ogata, K. (2002). Modern control engineering (4th ed.). Upper Saddle River, NJ : New Delhi: Prentice Hall.."""

    k_break, real_break = _break_points(num, den)
    kmax = _k_max(num, den, real_break, k_break)
    kvect = np.hstack((np.linspace(0, kmax, 50), np.real(k_break)))
    kvect.sort()
    mymat = _RLFindRoots(num, den, kvect)
    mymat = _RLSortRoots(mymat)
    open_loop_poles = den.roots
    open_loop_zeros = num.roots

    if (open_loop_zeros.size != 0) and (open_loop_zeros.size <
                                        open_loop_poles.size):
        open_loop_zeros_xl = np.append(
            open_loop_zeros,
            np.ones(open_loop_poles.size - open_loop_zeros.size) *
            open_loop_zeros[-1])
        mymat_xl = np.append(mymat, open_loop_zeros_xl)
    else:
        mymat_xl = mymat
    singular_points = np.concatenate((num.roots, den.roots), axis=0)
    important_points = np.concatenate((singular_points, real_break), axis=0)
    important_points = np.concatenate((important_points, np.zeros(2)), axis=0)
    mymat_xl = np.append(mymat_xl, important_points)
    false_gain = den.coeffs[0] / num.coeffs[0]
    if false_gain < 0 and not den.order > num.order:
        raise ValueError(
            "Not implemented support for 0 degrees root "
            "locus with equal order of numerator and denominator.")

    if xlim is None and false_gain > 0:
        x_tolerance = 0.05 * (np.max(np.real(mymat_xl)) -
                              np.min(np.real(mymat_xl)))
        xlim = _ax_lim(mymat_xl)
    elif xlim is None and false_gain < 0:
        axmin = np.min(np.real(important_points)) - (np.max(
            np.real(important_points)) - np.min(np.real(important_points)))
        axmin = np.min(np.array([axmin, np.min(np.real(mymat_xl))]))
        axmax = np.max(np.real(important_points)) + np.max(
            np.real(important_points)) - np.min(np.real(important_points))
        axmax = np.max(np.array([axmax, np.max(np.real(mymat_xl))]))
        xlim = [axmin, axmax]
        x_tolerance = 0.05 * (axmax - axmin)
    else:
        x_tolerance = 0.05 * (xlim[1] - xlim[0])

    if ylim is None:
        y_tolerance = 0.05 * (np.max(np.imag(mymat_xl)) -
                              np.min(np.imag(mymat_xl)))
        ylim = _ax_lim(mymat_xl * 1j)
    else:
        y_tolerance = 0.05 * (ylim[1] - ylim[0])

    tolerance = np.max([x_tolerance, y_tolerance])
    distance_points = np.abs(np.diff(mymat, axis=0))
    indexes_too_far = np.where(distance_points > tolerance)

    while (indexes_too_far[0].size > 0) and (kvect.size < 5000):
        for index in indexes_too_far[0]:
            new_gains = np.linspace(kvect[index], kvect[index + 1], 5)
            new_points = _RLFindRoots(num, den, new_gains[1:4])
            kvect = np.insert(kvect, index + 1, new_gains[1:4])
            mymat = np.insert(mymat, index + 1, new_points, axis=0)

        mymat = _RLSortRoots(mymat)
        distance_points = np.abs(np.diff(
            mymat, axis=0)) > tolerance  # distance between points
        indexes_too_far = np.where(distance_points)

    new_gains = kvect[-1] * np.hstack((np.logspace(0, 3, 4)))
    new_points = _RLFindRoots(num, den, new_gains[1:4])
    kvect = np.append(kvect, new_gains[1:4])
    mymat = np.concatenate((mymat, new_points), axis=0)
    mymat = _RLSortRoots(mymat)
    return kvect, mymat, xlim, ylim
Пример #41
0
# a = 0; b = 1
# exact_area = 1 - cos(1)

columns = ('h', 'trapezoid area', 'error')
approx = []
error = []
errorh = []
arr = [1, 2, 4, 8, 16, 32, 64, 128]
arr2 = [1, 1 / 2, 1 / 4, 1 / 8, 1 / 16, 1 / 32, 1 / 64, 1 / 128]
for n in arr:
    h = (b - a) / n
    total_area = 0
    for i in range(0, n):
        total_area += h * 0.5 * (f(a + i * h) + f(a + (i + 1) * h))
    approx.append(total_area)
    error.append(np.abs(total_area - exact_area))
    errorh.append(np.abs(total_area - exact_area) / (h * h))

title_text = 'Error in the Trapezoid Approximation Method, Function: 1-x^2'
fig_background_color = 'skyblue'
fig_border = 'steelblue'
data = [
    ['h', 'Trapezoid Approximation', 'Error', 'Error/h^2'],
    [arr[0], arr2[0], approx[0], error[0], errorh[0]],
    [arr[1], arr2[1], approx[1], error[1], errorh[1]],
    [arr[2], arr2[2], approx[2], error[2], errorh[2]],
    [arr[3], arr2[3], approx[3], error[3], errorh[3]],
    [arr[4], arr2[4], approx[4], error[4], errorh[4]],
    [arr[5], arr2[5], approx[5], error[5], errorh[5]],
    [arr[6], arr2[6], approx[6], error[6], errorh[6]],
    [arr[7], arr2[7], approx[7], error[7], errorh[7]],
Пример #42
0
ax = []
ay = []
az = []
muscle = []
vec = []

with open(r'C:\Users\ebuba\Desktop\test.csv') as csvDatafile:
    csvReader = csv.reader(csvDatafile, delimiter=',')
    for row in csvReader:
        time.append(float(row[0]))
        ax.append(float(row[1]))
        ay.append(float(row[2]))
        az.append(float(row[3]))
        muscle.append(float(row[7]))

axmag = np.square(ax)
aymag = np.square(ay)
azmag = np.square(az)

magn = axmag+aymag+azmag


t = np.linspace(0,1,1000)
f = np.fft.fftfreq(len(magn),t[1]-t[0])

data_fft = np.abs(np.fft.fft(magn)) / len(magn)

print(f)


Пример #43
0
def plot_tail_symbol_stats(tf, allwins, conf):

    tail_state = conf['tail_state']
    fini_state = conf['fini_state']

    tail_sym_latencies = []
    tail_sym_latencies_0 = []
    tail_sym_latencies_1 = []
    tail_sym_latencies_2 = []
    tail_sym_latencies_3 = []

    for win in allwins:
        dd0 = [d0 for d0, d1, d2, d3 in win]
        dd1 = [d1 for d0, d1, d2, d3 in win]
        dd2 = [d2 for d0, d1, d2, d3 in win]
        dd3 = [d3 for d0, d1, d2, d3 in win]

        c0_ts = [ts for (st, ts) in dd0]
        c0_st = [st for (st, ts) in dd0]

        #c1_ts = [ts for (st, ts) in dd1]
        #c1_st = [st for (st, ts) in dd1]

        #c2_ts = [ts for (st, ts) in dd2]
        #c2_st = [st for (st, ts) in dd2]

        c3_ts = [ts for (st, ts) in dd3]
        c3_st = [st for (st, ts) in dd3]

        for (st, ts) in dd0:
            if st == tail_state:
                c0_ts_tail_entry = ts
                break

        for (st, ts) in dd0:
            if st == fini_state:
                c0_ts_fini_exit = ts
        #---

        for (st, ts) in dd1:
            if st == tail_state:
                c1_ts_tail_entry = ts
                break

        for (st, ts) in dd1:
            if st == fini_state:
                c1_ts_fini_exit = ts
        #---

        for (st, ts) in dd2:
            if st == tail_state:
                c2_ts_tail_entry = ts
                break

        for (st, ts) in dd2:
            if st == fini_state:
                c2_ts_fini_exit = ts
        #---

        for (st, ts) in dd3:
            if st == tail_state:
                c3_ts_tail_entry = ts
                break

        for (st, ts) in dd3:
            if st == fini_state:
                c3_ts_fini_exit = ts
        #---

        tail_sym_latency = c3_ts_fini_exit - c0_ts_tail_entry
        tail_sym_latency_0 = c0_ts_fini_exit - c0_ts_tail_entry
        tail_sym_latency_1 = c1_ts_fini_exit - c1_ts_tail_entry
        tail_sym_latency_2 = c2_ts_fini_exit - c2_ts_tail_entry
        tail_sym_latency_3 = c3_ts_fini_exit - c3_ts_tail_entry

        tail_sym_latencies.append(tail_sym_latency)
        tail_sym_latencies_0.append(tail_sym_latency_0)
        tail_sym_latencies_1.append(tail_sym_latency_1)
        tail_sym_latencies_2.append(tail_sym_latency_2)
        tail_sym_latencies_3.append(tail_sym_latency_3)

    tail_sym_latency_mean = np.mean(tail_sym_latencies)
    tail_sym_latency_std = np.std(tail_sym_latencies)
    tail_sym_latency_max = max(tail_sym_latencies)
    tail_sym_latency_min = min(tail_sym_latencies)
    tail_sym_latency_diff = tail_sym_latency_max - tail_sym_latency_min

    tail_sym_latency_0_mean = np.mean(tail_sym_latencies_0)
    tail_sym_latency_0_diff = max(tail_sym_latencies_0) - min(
        tail_sym_latencies_0)
    tail_sym_latency_1_mean = np.mean(tail_sym_latencies_1)
    tail_sym_latency_1_diff = max(tail_sym_latencies_1) - min(
        tail_sym_latencies_1)
    tail_sym_latency_2_mean = np.mean(tail_sym_latencies_2)
    tail_sym_latency_2_diff = max(tail_sym_latencies_2) - min(
        tail_sym_latencies_2)
    tail_sym_latency_3_mean = np.mean(tail_sym_latencies_3)
    tail_sym_latency_3_diff = max(tail_sym_latencies_3) - min(
        tail_sym_latencies_3)

    width = 0.8
    plt.figure(figsize=(10, 10))
    #plt.bar(bincenters,y,width=width, color='r', yerr=menStd)
    #y = [tail_sym_latency_min, tail_sym_latency_mean, tail_sym_latency_max]
    #ystd = tail_sym_latency_std
    #ydiff = tail_sym_latency_diff
    #ymean = [tail_sym_latency_mean]
    #ymin = [tail_sym_latency_min]
    #ymax = [tail_sym_latency_max]
    #plt.bar([1, 2, 3],y, width=width, color='r', yerr=ystd)

    #xmin = [1,2]
    #xmean = [x + width for x in xmin]
    #xmax = [x + width for x in xmean]
    #plt.bar(xmin,ymin, width=width, color='g')
    #plt.bar(xmean,ymean, width=width, color='b', yerr=ystd)
    #plt.bar(xmax,ymax, width=width, color='r')

    ydiff = [
        tail_sym_latency_diff, tail_sym_latency_0_diff,
        tail_sym_latency_1_diff, tail_sym_latency_2_diff,
        tail_sym_latency_3_diff
    ]
    ymean = [
        tail_sym_latency_mean, tail_sym_latency_0_mean,
        tail_sym_latency_1_mean, tail_sym_latency_2_mean,
        tail_sym_latency_3_mean
    ]

    x = [-1.4, -0.4, 0.6, 1.6, 2.6]
    y = ymean
    ys = tail_sym_latencies
    errx = [v + 0.4 for v in x]
    erry = ydiff

    pltData = [x, y, errx, erry, ys]
    print os.getcwd()
    tag = os.getcwd().split('trace')[1].split('/')[-1]
    print tag
    fname = '../hist_stats.pkl'
    try:
        f = open(fname, 'rb')
        try:
            d = pickle.load(f)
        except EOFError:
            d = {}
        f.close()
    except IOError:
        d = {}

    f = open(fname, 'wb')
    d[tag] = pltData
    pickle.dump(d, f)
    f.close()
    print 'Saved plot data to %s' % fname

    plt.bar(x, y, width=width, color='g', yerr=erry)

    #(_, caps, _) = plt.errorbar(errx, ymean, ydiff, capsize=20, elinewidth=3, linestyle='.')
    (_, caps, _) = plt.errorbar(errx,
                                y,
                                erry,
                                capsize=10,
                                elinewidth=6,
                                linestyle='.',
                                ecolor='black')

    for cap in caps:
        cap.set_color('black')
        cap.set_markeredgewidth(3)

    plt.xlabel('Core ID (-1 is for the total symbol latency)')
    #plt.xticks(['Net','Core1','Core2','Core3','Core4'])
    plt.ylabel('Cycles')
    plt.title('Mean tail symbol latency (err: [min, max])')
    plt.savefig('%s_plot_hist.png' % tf)

    plt.figure()
    plt.plot(ys, '.-g')
    plt.savefig('%s_plot_tail_scatter_timeorder.png' % tf)

    ys = np.abs(np.fft.fft(tail_sym_latencies))**2
    plt.figure()
    #plt.plot(ys, 'r')
    plt.plot(ys[1:], 'r')  #discard dc since it hides rest of it
    plt.savefig('%s_plot_tail_scatter_fft.png' % tf)

    ys = tail_sym_latencies
    ys.sort()
    plt.figure()
    plt.plot(ys, '.')
    plt.savefig('%s_plot_tail_scatter.png' % tf)
def mean_absolute_percentage_error(y_obs, y_pred): 
    #y_obs, y_pred = np.array(y_obs), np.array(y_pred)
    y_obs=y_obs.reshape(-1,1)
    #y_obs, y_pred =check_array(y_obs, y_pred)
    return  np.mean(np.abs((y_obs - y_pred) / y_obs)) * 100
Пример #45
0
				v_max=max(upper);
				x_max_max=np.zeros(N);
				x_max_min=np.zeros(N);
				
				v_min=max((beta_array/(alpha_array+1)).sum((0,2))-Reg+C);
				x_min_max=np.ones(N);
				x_min_min=np.zeros(N);
				x_min_min[np.argmax((beta_array/(alpha_array+1)).sum((0,2))-Reg+C)]=1;
				
				#iterate until v_max and v_min are sufficiently close
				while(v_max-v_min>0.0001):
					v_new=1.0/2*(v_max+v_min);
					#v is a convex function of x
					x_new_max=1.0/2*(x_min_max+x_max_max)
					if(not np.isfinite(v_max)):
						v_new=2*np.abs(v_min);
						x_new_max=np.copy(x_min_max)
					x_new_max[v_new>upper]=0 
					x_new_min=np.copy(x_max_min)
					x_new_min[v_new>upper]=0 
					
					#interate x to be determine wether v is smaller or greater than the needed one
					while((x_new_min.sum()-1)*(x_new_max.sum()-1)<0):
						x_new=1.0/2*(x_new_min+x_new_max);
						b_array=(beta_array/(alpha_array+x_new[:,np.newaxis])).sum((0,2))-Reg*x_new+C>v_new
						x_new_min[b_array]=x_new[b_array];
						x_new_max[~b_array]=x_new[~b_array];
					
					if x_new_min.sum()-1>=0 and x_new_max.sum()-1>=0:
						v_min=v_new;
						x_min_min=x_new_min;
Пример #46
0
 def convergence_criterion(self):
     convergence_criterion = np.average(np.abs(np.diff(self.last_losses)))
     return convergence_criterion
Пример #47
0
def find_nearest(array, value):
    array = np.asarray(array)
    idx = (np.abs(array - value)).argmin()
    return idx, array[idx]
Пример #48
0
def get_MAPE(actual, predicted):
    #y_true, y_pred = np.array(actual), np.array(predicted)
    return np.round(np.mean(np.abs((actual - predicted) / actual)) * 100, 2)
Пример #49
0
  def summary(self, level=0.9,
              threshold=0.0,
              tails=1,
              report='last',
              rescale=1.0):
    """Summarise the posterior of the cumulative causal effect, Delta.

    Args:
      level: `float` in (0,1). Determines width of CIs.
      threshold: `float`. Tests whether Delta is greater than threshold.
      tails: `int` in {1,2}. Specifies number of tails to use in tests.
      report: `str`, whether to report on 'all' or 'last' day in test period.
      rescale: `float`, an additional scaling factor for Delta.

    Returns:
      pd.DataFrame, a summary at level, with alpha=1-level, containing:
      - estimate, the median of Delta.
      - precision, distance between the (1-level)/tails and 0.5 quantiles.
      - lower, the value of the (1-level)/tails quantile.
      - upper, if tails=2, the level/tails quantile, otherwise inf.
      - scale, the scale parameter of Delta.
      - level, records the level parameter used to generate the report.
      - threshold, records the threshold parameter.
      - probability, the probability that Delta > threshold.

    Raises:
      ValueError: if tails is neither 1 nor 2.
      ValueError: if level is outside of the interval [0,1].
    """
    # Enforce constraints on the arguments.
    if tails not in (1, 2):
      raise ValueError('tails should be either 1 or 2.')

    if level < 0.0 or level > 1.0:
      raise ValueError('level should be between 0.0 and 1.0.')

    # Calculate the relevant points to evaluate.
    alpha = (1-level) / tails
    if tails == 1:
      pupper = 1.0
    elif tails == 2:
      pupper = 1.0 - alpha

    # Obtain the appropriate posterior distribution.
    delta = self.causal_cumulative_distribution(rescale=rescale)

    # Define periods to credit to test.
    if self.use_cooldown:
      periods = [self.periods.test, self.periods.cooldown]
    else:
      periods = [self.periods.test]

    # Facts about the date index.
    dates = self.causal_effect(periods).index
    ndates = len(dates)
    dates_ones = np.ones(ndates)

    # Data for the report.
    values = {
        'dates': dates,
        'estimate': delta.mean(),
        'precision': np.abs(delta.ppf(alpha) - delta.ppf(0.5)).reshape(ndates),
        'lower': delta.ppf(alpha).reshape(ndates),
        'upper': delta.ppf(pupper).reshape(ndates),
        'scale': delta.kwds['scale'].reshape(ndates),
        'level': level * dates_ones,
        'posterior_threshold': threshold * dates_ones,
        'probability': 1.0 - delta.cdf(threshold).reshape(ndates)
    }

    # Ordering for the report.
    ordering = ['estimate',
                'precision',
                'lower',
                'upper',
                'scale',
                'level',
                'probability',
                'posterior_threshold'
               ]

    # Construct the report, put it in the desired ordering.
    result = pd.DataFrame(values, index=dates)
    result = result[ordering]

    # Decide how much of the report to report.
    if report == 'all':
      lines = result.shape[0]
    elif report == 'last':
      lines = 1

    # Return the report for `lines` last days of the test period.
    return result.tail(lines)
Пример #50
0
    def aggregate_descriptors(self, descriptors_stats = {}):
        aggregated = {}
        for namespace in self.descriptors:
            aggregated[namespace] = {}
            descs = self.descriptors[namespace].keys()
            descs.sort()

            stats_default = [ 'mean' ,'var', 'min', 'max' ]

            for desc in descs:
                values = self.descriptors[namespace][desc]['values']

                if (namespace, desc) in self.statsBlackList:
                    # make sure there is only one value
                    if len(values) != 1:
                        raise EssentiaError('You declared %s as a global descriptors, but there are more than 1 value: %s' % (desc, values))

                    value = values[0]
                    try:
                        # if value is numeric
                        aggregated[namespace][desc] = { 'value': essentia.array(value) }
                    except:
                        # if value is not numeric
                        aggregated[namespace][desc] = { 'value': value }

                    continue

                aggregated[namespace][desc] = {}
                aggrDesc = aggregated[namespace][desc]

                stats = list(stats_default)
                if not isinstance(values[0], numpy.ndarray):
                    #stats += [ 'percentile_5', 'percentile_95' ]
                    stats += [ 'dmean', 'dmean2', 'dvar', 'dvar2']

                if namespace in descriptors_stats and desc in descriptors_stats[namespace]:
                    stats = descriptors_stats[namespace][desc]

                try:

                    if 'mean' in stats:
                        aggrDesc['mean'] = essentia.array(numpy.mean(values, axis=0))

                    if 'var' in stats:
                        aggrDesc['var'] = essentia.array(numpy.var(values, axis=0))

                    if 'min' in stats:
                        aggrDesc['min'] = essentia.array(numpy.min(values, axis=0))

                    if 'max' in stats:
                        aggrDesc['max'] = essentia.array(numpy.max(values, axis=0))

                    derived = None
                    derived2 = None

                    if 'dmean' in stats:
                        if not derived: derived = [a - b for a,b in izip(values[1:], values[:-1])]
                        aggrDesc['dmean'] = essentia.array(numpy.mean(numpy.abs(derived), axis=0))

                    if 'dvar' in stats:
                        if not derived: derived = [a - b for a,b in izip(values[1:], values[:-1])]
                        aggrDesc['dvar'] = essentia.array(numpy.var(derived, axis=0))

                    if 'dmean2' in stats:
                        if not derived: derived = [a - b for a,b in izip(values[1:], values[:-1])]
                        if not derived2: derived2 = [a - b for a,b in izip(derived[1:], derived[:-1])]
                        if derived2:
                            aggrDesc['dmean2'] = essentia.array(numpy.mean(numpy.abs(derived2), axis=0))
                        else:
                            aggrDesc['dmean2'] = 'undefined'

                    if 'dvar2' in stats:
                        if not derived: derived = [a - b for a,b in izip(values[1:], values[:-1])]
                        if not derived2: derived2 = [a - b for a,b in izip(derived[1:], derived[:-1])]
                        if derived2:
                            aggrDesc['dvar2'] = essentia.array(numpy.var(derived2, axis=0))
                        else:
                            aggrDesc['dvar2'] = 'undefined'


                    if 'frames' in stats:
                        aggrDesc['frames'] = essentia.array(values)

                    if 'single_gaussian' in stats:
                        single_gaussian = essentia.SingleGaussian()
                        (m, cov, icov) = single_gaussian(essentia.array(values))
                        aggrDesc['mean'] = m
                        aggrDesc['cov'] = cov
                        aggrDesc['icov'] = icov

                    for stat in stats:
                        if stat.startswith('percentile_'):
                            p = float(stat.split('_')[1])
                            aggrDesc[stat] = essentia.array(percentile(values, p))


                except (TypeError, ValueError): # values are not numeric

                    if len(values) == 1:
                       aggrDesc['value'] = values[0]
                    else:
                       aggrDesc['value'] = []
                       for value in values:
                           aggrDesc['value'].append(value)

        return aggregated
Пример #51
0

def u_exact(t):
    return cos(t)


solver = odespy.RK4(f)
solver.set_initial_condition([1, 0])
N_P = 120  # no of periods
T = 2 * pi * N_P
Ns_per_period = [40, 80, 160, 320]
for N_per_period in Ns_per_period:
    N = N_per_period * N_P
    time_points = numpy.linspace(0, T, N + 1)
    u, t = solver.solve(time_points)

    x = u[:, 0]
    v = u[:, 1]
    error = u_exact(t) - x
    dt = t[1] - t[0]
    print N_per_period, numpy.abs(error).max(), numpy.abs(error).max() / dt**4

import sys
sys.exit(0)
from matplotlib.pyplot import *
i = -10 * N_per_period  # start index for the last 10 periods
plot(t[i:], error[i:], 'r-')
savefig('tmppng')
savefig('tmp.pdf')
show()
Пример #52
0
def train():
    cfig = ConfigFactory()
    #set_gpu(0)
    dataset = 'A'
    # training dataset
    img_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/train_data/images/'
    gt_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/train_data/ground_truth/'
    # testing dataset
    val_img_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/test_data/images/'
    val_gt_root_dir = cfig.data_root_dir + r'part_' + dataset + r'_final/test_data/ground_truth/'

    cfig = ConfigFactory()

    # place holder
    input_img_placeholder = tf.placeholder(tf.float32,
                                           shape=(None, None, None, 3))
    density_map_placeholder = tf.placeholder(tf.float32,
                                             shape=(None, None, None, 1))

    # network generation
    inference_density_map = multi_column_cnn(input_img_placeholder)

    # density map loss
    density_map_loss = 0.5 * tf.reduce_sum(
        tf.square(tf.subtract(density_map_placeholder, inference_density_map)))

    # jointly training
    joint_loss = density_map_loss
    # optimizer = tf.train.MomentumOptimizer(configs.learing_rate, momentum=configs.momentum).minimize(joint_loss)
    # adam optimizer
    optimizer = tf.train.AdamOptimizer(cfig.lr).minimize(joint_loss)

    init = tf.global_variables_initializer()

    file_path = cfig.log_router

    # training log route
    if not os.path.exists(file_path):
        os.makedirs(file_path)

    # model saver route
    if not os.path.exists(cfig.ckpt_router):
        os.makedirs(cfig.ckpt_router)
    log = open(cfig.log_router + cfig.name + r'_training.logs',
               mode='a+',
               encoding='utf-8')

    saver = tf.train.Saver(max_to_keep=cfig.max_ckpt_keep)
    ckpt = tf.train.get_checkpoint_state(cfig.ckpt_router)

    # start session
    sess = tf.Session()
    if ckpt and ckpt.model_checkpoint_path:
        print('load model, ckpt.model_checkpoint_path')
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        sess.run(init)

    data_loader = ImageDataLoader(img_root_dir,
                                  gt_root_dir,
                                  shuffle=True,
                                  downsample=True,
                                  pre_load=True)
    data_loader_val = ImageDataLoader(val_img_root_dir,
                                      val_gt_root_dir,
                                      shuffle=False,
                                      downsample=False,
                                      pre_load=True)
    # start training
    for i in range(cfig.start_iters, cfig.total_iters):
        # training
        index = 1
        for blob in data_loader:
            img, gt_dmp, gt_count = blob['data'], blob['gt_density'], blob[
                'crowd_count']
            feed_dict = {
                input_img_placeholder: (img - 127.5) / 128,
                density_map_placeholder: gt_dmp
            }
            _, inf_dmp, loss = sess.run(
                [optimizer, inference_density_map, joint_loss],
                feed_dict=feed_dict)
            format_time = str(
                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
            format_str = 'step %d, joint loss=%.5f, inference= %.5f, gt=%d'
            log_line = format_time, blob['fname'], format_str % (
                i * data_loader.num_samples + index, loss, inf_dmp.sum(),
                gt_count)
            log.writelines(str(log_line) + '\n')
            print(log_line)
            index = index + 1

        if i % 50 == 0:
            val_log = open(cfig.log_router + cfig.name + r'_validating_' +
                           str(i) + '_.logs',
                           mode='w',
                           encoding='utf-8')
            absolute_error = 0.0
            square_error = 0.0
            file_index = 1
            for blob in data_loader_val:
                img, gt_dmp, gt_count = blob['data'], blob['gt_density'], blob[
                    'crowd_count']
                feed_dict = {
                    input_img_placeholder: (img - 127.5) / 128,
                    density_map_placeholder: gt_dmp
                }
                inf_dmp, loss = sess.run([inference_density_map, joint_loss],
                                         feed_dict=feed_dict)
                format_time = str(
                    time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
                format_str = 'step %d, joint loss=%.5f, inference= %.5f, gt=%d'
                absolute_error = absolute_error + np.abs(
                    np.subtract(gt_count, inf_dmp.sum())).mean()
                square_error = square_error + np.power(
                    np.subtract(gt_count, inf_dmp.sum()), 2).mean()
                log_line = format_time, blob['fname'], format_str % (
                    file_index, loss, inf_dmp.sum(), gt_count)
                val_log.writelines(str(log_line) + '\n')
                print(log_line)
                file_index = file_index + 1
            mae = absolute_error / data_loader_val.num_samples
            rmse = np.sqrt(square_error / data_loader_val.num_samples)
            val_log.writelines(
                str('MAE_' + str(mae) + '_MSE_' + str(rmse)) + '\n')
            val_log.close()
            print(str('MAE_' + str(mae) + '_MSE_' + str(rmse)))
            saver.save(sess, cfig.ckpt_router + '/v1', global_step=i + 1)
Пример #53
0
	total_dataset = form_pickle( dataset_loc, pickle_file )

train_x = total_dataset["train"]
test_x = total_dataset["test"]

scaler = MinMaxScaler()
scaled_train_x = scaler.fit_transform( train_x )
scaled_test_x = scaler.transform( test_x )

print("Loading model")
encoder = pickle.load( open(name,"rb") )

reconstructed, loss = encoder.encode_decode( scaled_test_x )

rec_rescaled = scaler.inverse_transform( reconstructed )
loss = np.abs( rec_rescaled - test_x )
text = "Average test set reconstruction error = {}".format(np.mean(loss))

reconstructed, loss = encoder.encode_decode( scaled_train_x )

rec_rescaled = scaler.inverse_transform( reconstructed )
loss = np.abs( rec_rescaled - train_x )
text += " \n Average train set reconstruction error = {}".format(np.mean(loss))
print( text )

layer = encoder.sparse_layer()
rep = layer.W[1,1:].reshape(28,28)
rep1 = layer.W[2,1:].reshape(28,28)

plt.imsave( arr=rep, cmap="gray", fname="internal_rep1"+outfile )
plt.imsave( arr=rep1, cmap="gray", fname="internal_rep2"+outfile )
Пример #54
0
def FBW(x, b, X):
    """ Force at position x for bulk+wall confinement """
    return -b * (np.abs(x) >= X).astype(int) * np.sign(x)
Пример #55
0
def geodgeo(p1, p2, j):
    """
    This subroutine (1) converts vertical local height (altitude) h and geodetic
    latitude xmu into geocentric coordinates r and theta (geocentric radial
    distance and colatitude, respectively; also known as ecef coordinates),
    as well as (2) performs the inverse transformation from {r,theta} to {h,xmu}.

    The subroutine uses world geodetic system wgs84 parameters for the earth's
    ellipsoid. the angular quantities (geo colatitude theta and geodetic latitude
    xmu) are in radians, and the distances (geocentric radius r and altitude h
    above the earth's ellipsoid) are in kilometers.

    if j>0, the transformation is made from geodetic to geocentric coordinates using simple direct equations.
    if j<0, the inverse transformation from geocentric to geodetic coordinates is made by means of a fast iterative algorithm.

                  j>0            j<0
    input:    j, h,xmu        j, r,theta
    output:  j, r,theta        j, h,xmu

    Author:  N.A. Tsyganenko
    Date: Dec 5, 2007

    :param h: Altitude in km.
    :param xmu: Geodetic latitude in radian.
    :param r: Geocentric distance in km.
    :param theta: Spherical co-latitude in radian.
    """

    # r_eq is the semi-major axis of the earth's ellipsoid,
    # and beta is its second eccentricity squared
    r_eq, beta = 6378.137, 6.73949674228e-3

    if j > 0:  # Direct transformation(GEOD->GEO):
        h, xmu = [p1, p2]
        cosxmu = np.cos(xmu)
        sinxmu = np.sin(xmu)
        den = np.sqrt(cosxmu**2 + (sinxmu / (1 + beta))**2)
        coslam = cosxmu / den
        sinlam = sinxmu / (den * (1 + beta))
        rs = r_eq / np.sqrt(1 + beta * sinlam**2)
        x = rs * coslam + h * cosxmu
        z = rs * sinlam + h * sinxmu
        r = np.sqrt(x**2 + z**2)
        theta = np.arccos(z / r)
        return r, theta
    else:  # Inverse transformation(GEO->GEOD):
        r, theta = [p1, p2]
        phi = np.pi * 0.5 - theta
        phi1, dphi, h, xmu, tol = phi, 0, 0, 0, 1e-6
        for n in range(100):
            if np.abs(dphi) > tol: break
            sp = np.sin(phi1)
            arg = sp * (1 + beta) / np.sqrt(1 + beta * (2 + beta) * sp**2)
            xmu = np.arcsin(arg)
            rs = r_eq / np.sqrt(1 + beta * np.sin(phi1)**2)
            cosfims = np.cos(phi1 - xmu)
            h = np.sqrt((rs * cosfims)**2 + r**2 - rs**2) - rs * cosfims
            z = rs * np.sin(phi1) + h * np.sin(xmu)
            x = rs * np.cos(phi1) + h * np.cos(xmu)
            rr = np.sqrt(x**2 + z**2)
            dphi = np.arcsin(z / rr) - phi
            phi1 -= dphi
        return h, xmu
Пример #56
0
    u_train = u_train[idx, :]

    model = PhysicsInformedNN(X_u_train, u_train, X_f_train, layers, lb, ub, nu)

    start_time = time.time()
    model.train()
    elapsed = time.time() - start_time
    print("Training time: %.4f" % (elapsed))

    u_pred, f_pred = model.predict(X_star)

    error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2)
    print("Error u: %e" % (error_u))

    U_pred = griddata(X_star, u_pred.flatten(), (X, T), method="cubic")
    Error = np.abs(Exact - U_pred)

    ######################################################################
    ############################# Plotting ###############################
    ######################################################################

    fig, ax = newfig(1.0, 1.1)
    ax.axis("off")

    ####### Row 0: u(t,x) ##################
    gs0 = gridspec.GridSpec(1, 2)
    gs0.update(top=1 - 0.06, bottom=1 - 1 / 3, left=0.15, right=0.85, wspace=0)
    ax = plt.subplot(gs0[:, :])

    h = ax.imshow(
        U_pred.T,
Пример #57
0
    # bootstrapped distribution of the CORRELATIONS between the original PANAS
    # scores and the projected X scores (like we estimated above!)
    #
    # We project the bootstrapped X matrix to the original component space and
    # then generate the cross-correlation matrix of the bootstrapped PANAS
    # scores with those projections
    Xbzs = zscore(Xb @ (U / np.linalg.norm(U, axis=0, keepdims=True)), ddof=1)
    y_corr_distrib[..., n] = (Ybz.T @ Xbzs) / (len(Xbzs) - 1)

    # Delete intermediate variables to reduce memory usage
    del Xbz, Ybz, Xbzs

# Calculate the standard error of the bootstrapped functional connection
# weights and generate bootstrap ratios from these values.
U_sum2 = (U_sum**2) / (n_boot + 1)
U_se = np.sqrt(np.abs(U_square - U_sum2) / (n_boot))
bootstrap_ratios = (U @ np.diag(sval)) / U_se

# Calculate the lower/upper confidence intervals of the bootrapped PANAS scores
y_corr_ll, y_corr_ul = np.percentile(y_corr_distrib, [2.5, 97.5], axis=-1)

###############################################################################
# Now we can use these results to re-generate the entirety of Fig 1 from Mirchi
# et al., 2018!

# Community assignments for the MyConnectome dataset are stored online. We'll
# fetch and load those into an array so we can plot the bootstrap ratios sorted
# by communities.

from urllib.request import urlopen
Пример #58
0
                            prof_timeline = timeline.Timeline(run_metadata.step_stats)
                            prof_ctf = prof_timeline.generate_chrome_trace_format()
                            timeline_saver.update_timeline(prof_ctf)
                    #print('reduce '+str(c))
                end_time = time.time()

                if FLAGS.debug is True:
                    timeline_saver.save('./trace/reducer-'+str(task_index)+'-test'+str(repeat)+'.json')

                    if task_index == 0:
                        check = np.allclose(C_blocks[::2], truth_blocks[::2])
                        diff = np.subtract(C_blocks[::2], truth_blocks[::2])
                    else:
                        check = np.allclose(C_blocks[1::2], truth_blocks[1::2])
                        diff = np.subtract(C_blocks[1::2], truth_blocks[1::2]) 
                    print('correct: '+str(check)+' max err: '+str(np.amax(np.abs(diff.flatten()))))

                time_spent = (end_time - start_time)
                flops = ((N * N * N * 2 - N * N) / 10**9) / time_spent
                print('Test '+str(repeat)+' reducer: '+str(task_index)+': '+str(flops)+' Gflops/s '+str(time_spent)+' seconds')
                sess.run(barrier)
    else:
        config, cluster = create_cluster('reducer', FLAGS.num_reducers)
        server  = tf.train.Server(cluster.as_cluster_def(), job_name=job_name, task_index=task_index, config=config, protocol=FLAGS.protocol)
        with tf.train.MonitoredTrainingSession(master=server.target,
                                               is_chief=False,
                                               config=config) as sess:
            for test in range(FLAGS.num_tests):
                sess.run(barrier)
                sess.run(barrier)
Пример #59
0
def dics_source_power(info, forward, noise_csds, data_csds, reg=0.01,
                      label=None, pick_ori=None, verbose=None):
    """Dynamic Imaging of Coherent Sources (DICS).

    Calculate source power in time and frequency windows specified in the
    calculation of the data cross-spectral density matrix or matrices. Source
    power is normalized by noise power.

    NOTE : This implementation has not been heavily tested so please
    report any issues or suggestions.

    Parameters
    ----------
    info : dict
        Measurement info, e.g. epochs.info.
    forward : dict
        Forward operator.
    noise_csds : instance or list of instances of CrossSpectralDensity
        The noise cross-spectral density matrix for a single frequency or a
        list of matrices for multiple frequencies.
    data_csds : instance or list of instances of CrossSpectralDensity
        The data cross-spectral density matrix for a single frequency or a list
        of matrices for multiple frequencies.
    reg : float
        The regularization for the cross-spectral density.
    label : Label | None
        Restricts the solution to a given label.
    pick_ori : None | 'normal'
        If 'normal', rather than pooling the orientations by taking the norm,
        only the radial component is kept.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    stc : SourceEstimate | VolSourceEstimate
        Source power with frequency instead of time.

    Notes
    -----
    The original reference is:
    Gross et al. Dynamic imaging of coherent sources: Studying neural
    interactions in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
    """

    if isinstance(data_csds, CrossSpectralDensity):
        data_csds = [data_csds]

    if isinstance(noise_csds, CrossSpectralDensity):
        noise_csds = [noise_csds]

    def csd_shapes(x):
        return tuple(c.data.shape for c in x)

    if (csd_shapes(data_csds) != csd_shapes(noise_csds) or
       any(len(set(csd_shapes(c))) > 1 for c in [data_csds, noise_csds])):
        raise ValueError('One noise CSD matrix should be provided for each '
                         'data CSD matrix and vice versa. All CSD matrices '
                         'should have identical shape.')

    frequencies = []
    for data_csd, noise_csd in zip(data_csds, noise_csds):
        if not np.allclose(data_csd.frequencies, noise_csd.frequencies):
            raise ValueError('Data and noise CSDs should be calculated at '
                             'identical frequencies')

        # If CSD is summed over multiple frequencies, take the average
        # frequency
        if(len(data_csd.frequencies) > 1):
            frequencies.append(np.mean(data_csd.frequencies))
        else:
            frequencies.append(data_csd.frequencies[0])
    fmin = frequencies[0]

    if len(frequencies) > 2:
        fstep = []
        for i in range(len(frequencies) - 1):
            fstep.append(frequencies[i + 1] - frequencies[i])
        if not np.allclose(fstep, np.mean(fstep), 1e-5):
            warn('Uneven frequency spacing in CSD object, frequencies in the '
                 'resulting stc file will be inaccurate.')
        fstep = fstep[0]
    elif len(frequencies) > 1:
        fstep = frequencies[1] - frequencies[0]
    else:
        fstep = 1  # dummy value

    picks = _setup_picks(picks=None, info=info, forward=forward)

    is_free_ori, _, proj, vertno, G =\
        _prepare_beamformer_input(info, forward, label, picks=picks,
                                  pick_ori=pick_ori)

    n_orient = 3 if is_free_ori else 1
    n_sources = G.shape[1] // n_orient
    source_power = np.zeros((n_sources, len(data_csds)))
    n_csds = len(data_csds)

    logger.info('Computing DICS source power...')
    for i, (data_csd, noise_csd) in enumerate(zip(data_csds, noise_csds)):
        if n_csds > 1:
            logger.info('    computing DICS spatial filter %d out of %d' %
                        (i + 1, n_csds))

        Cm = data_csd.data

        # Calculating regularized inverse, equivalent to an inverse operation
        # after the following regularization:
        # Cm += reg * np.trace(Cm) / len(Cm) * np.eye(len(Cm))
        Cm_inv = linalg.pinv(Cm, reg)

        # Compute spatial filters
        W = np.dot(G.T, Cm_inv)
        for k in range(n_sources):
            Wk = W[n_orient * k: n_orient * k + n_orient]
            Gk = G[:, n_orient * k: n_orient * k + n_orient]
            Ck = np.dot(Wk, Gk)

            if is_free_ori:
                # Free source orientation
                Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
            else:
                # Fixed source orientation
                Wk /= Ck

            # Noise normalization
            noise_norm = np.dot(np.dot(Wk.conj(), noise_csd.data), Wk.T)
            noise_norm = np.abs(noise_norm).trace()

            # Calculating source power
            sp_temp = np.dot(np.dot(Wk.conj(), data_csd.data), Wk.T)
            sp_temp /= max(noise_norm, 1e-40)  # Avoid division by 0

            if pick_ori == 'normal':
                source_power[k, i] = np.abs(sp_temp)[2, 2]
            else:
                source_power[k, i] = np.abs(sp_temp).trace()

    logger.info('[done]')

    subject = _subject_from_forward(forward)
    return _make_stc(source_power, vertices=vertno, tmin=fmin / 1000.,
                     tstep=fstep / 1000., subject=subject)
Пример #60
0
def igrf_geo(r, theta, phi):
    """
    Calculates components of the main (internal) geomagnetic field in the spherical geographic
    (geocentric) coordinate system, using IAGA international geomagnetic reference model
    coefficients  (e.g., http://www.ngdc.noaa.gov/iaga/vmod/igrf.html, revised: 22 march, 2005)

    Before the first call of this subroutine, or if the time was changed,
    the model coefficients should be updated by calling the subroutine recalc

    Python version by Sheng Tian

    :param r: spherical geographic (geocentric) coordinates: radial distance r in units Re=6371.2 km
    :param theta: colatitude theta in radians
    :param phi: longitude phi in radians
    :return: br, btheta, bphi. Spherical components of the main geomagnetic field in nanotesla
        (positive br outward, btheta southward, bphi eastward)
    """

    # common /geopack2/ g(105),h(105),rec(105)
    global g, h, rec

    ct = np.cos(theta)
    st = np.sin(theta)
    minst = 1e-5
    if np.abs(st) < minst: smlst = True
    else: smlst = False

    # In this new version, the optimal value of the parameter nm (maximal order of the spherical
    # harmonic expansion) is not user-prescribed, but calculated inside the subroutine, based
    # on the value of the radial distance r:
    irp3 = np.int64(r + 2)
    nm = np.int64(3 + 30 / irp3)
    if nm > 13: nm = 13
    k = nm + 1

    # r dependence is encapsulated here.
    a = np.empty(k)
    b = np.empty(k)
    ar = 1 / r  # a/r
    a[0] = ar * ar  # a[n] = (a/r)^(n+2).
    b[0] = a[0]  # b[n] = (n+1)(a/r)^(n+2)
    for n in range(1, k):
        a[n] = a[n - 1] * ar
        b[n] = a[n] * (n + 1)

    # t - short for theta, f - short for phi.
    br, bt, bf = [0.] * 3
    d, p = [0., 1]

    # m = 0. P^n,0
    m = 0
    smf, cmf = [0., 1]
    p1, d1, p2, d2 = [p, d, 0., 0]
    l0 = 0
    mn = l0
    for n in range(m, k):
        w = g[mn] * cmf + h[mn] * smf
        br += b[n] * w * p1  # p1 is P^n,m.
        bt -= a[n] * w * d1  # d1 is dP^n,m/dt.
        xk = rec[mn]
        # Eq 16c and its derivative on theta.
        d0 = ct * d1 - st * p1 - xk * d2  # dP^n,m/dt = ct*dP^n-1,m/dt - st*P_n-1,m - K^n,m*dP^n-2,m/dt
        p0 = ct * p1 - xk * p2  # P^n,m = ct*P^n-1,m - K^n,m*P^n-2,m
        d2, p2, d1 = [d1, p1, d0]
        p1 = p0
        mn += n + 1

    # Eq 16b and its derivative on theta.
    d = st * d + ct * p  # dP^m,m/dt = st*dP^m-1,m-1/dt + ct*P^m-1,m-1
    p = st * p  # P^m,m = st*P^m-1,m-1

    # Similarly for P^n,m
    l0 = 0
    for m in range(1, k):  # sum over m
        smf = np.sin(m * phi)  # sin(m*phi)
        cmf = np.cos(m * phi)  # cos(m*phi)
        p1, d1, p2, d2 = [p, d, 0., 0]
        tbf = 0.
        l0 += m + 1
        mn = l0
        for n in range(m, k):  # sum over n
            w = g[mn] * cmf + h[mn] * smf  # [g^n,m*cos(m*phi)+h^n,m*sin(m*phi)]
            br += b[n] * w * p1
            bt -= a[n] * w * d1
            tp = p1
            if smlst: tp = d1
            tbf += a[n] * (g[mn] * smf - h[mn] * cmf) * tp
            xk = rec[mn]
            d0 = ct * d1 - st * p1 - xk * d2  # dP^n,m/dt = ct*dP^n-1,m/dt - st*P_n-1,m - K^n,m*dP^n-2,m/dt
            p0 = ct * p1 - xk * p2  # P^n,m = ct*P^n-1,m - K^n,m*P^n-2,m
            d2, p2, d1 = [d1, p1, d0]
            p1 = p0
            mn += n + 1

        d = st * d + ct * p
        p = st * p

        # update B_phi.
        tbf *= m
        bf += tbf

    if smlst:
        if ct < 0.: bf = -bf
    else: bf /= st

    return br, bt, bf