예제 #1
0
파일: growth_all.py 프로젝트: hellolj/randp
def iterate(v=U, 
        T=10,             # Number of iterates
        flag='chebyshev', # Approximation type
        cheby_order=12,   # Polynomial order in the Chebyshev case
        h=0.5):           # Smoothing parameter, kernel smoother case
    """
    Implements FVI with a given approximation scheme.  The flag and the code
    below create an object called 'approximator'.  This object must have the
    following two methods: set_vals() and __call__().  The first is used to
    set new function values (for approximating a new function), while the
    second is used to evaluate the approximation, taking grid points, function
    values, etc. as given.  The special name __call__ means that the object
    itself can be treated like a function.  For example, approximator(xvec) is
    equivalent to approximator.__call__(xvec).
    """
    if flag == 'chebyshev':
        approximator = CR(xmin, xmax, cheby_order, gridsize, extended=False)
    if flag == 'lin_interp':
        approximator = LinInterp(grid, grid)
    if flag == 'nearest':
        approximator = KNN(grid, grid)
    if flag == 'ks':
        approximator = KS1D(grid, h=h, vals=grid)
    print """
    i = iteration
    e1 = distance from current iterate to true value function
    e2 = distance between current and last iterate
    """
    count = 0
    error_vals = []
    while count < T:
        # Record the function values of the current iterate, and plot
        current_vals = v(grid)
        grey = 1 - (count + 1.0) / T
        plt.plot(grid, current_vals, 'k-', color=str(grey), lw=2)
        # Update the approximator with new function values 
        if flag == 'chebyshev':
            new_vals = bellman(v, grid, integration_method='quadrature')
        else:
            new_vals = bellman(v, grid, integration_method='monte_carlo')
        approximator.set_vals(new_vals)
        v = approximator
        # Print errors
        e1 = max(abs(v(grid) - v_true(grid)))
        e2 = max(abs(v(grid) - current_vals))
        print "i = ", count, "; e1 = ", e1, "; e2 = ", e2
        error_vals.append(e2)
        count = count + 1
    return error_vals
예제 #2
0
def skeletize_latecki(img,gaussian_filter):
    # http://www.cis.temple.edu/~latecki/Papers/icip__SSM07.pdf
    dt=numpy.float32
    if (img.ndim==3):
      img=img.mean(axis=2)
    img=img.astype(dt)
    img/=img.max()      
    gimg=scipy.ndimage.gaussian_filter(img,2)
    ximg=img.max()-img
    dimg=scipy.ndimage.distance_transform_edt(ximg)
    #
    # now let's compute f
    fimg=1-scipy.abs(scipy.convolve(cgradient(gimg),dimg))
    #
    # and now compute its gradient
    u0,v0=scipy.gradient(fimg)
    #
    # now we do diffusion

    
    ##
    # now we apply SSM map
    gvf=diffuse_gradient_vector
    
    #
    # then we do particular point detection
    mimg1=scipy.ndimage.maximum_filter(dimg,size=(3,3))
    img=(((dimg-mimg1)==0).astype(dt))
    img*=ximg
    return img
예제 #3
0
 def xenonlikefunc(x,mX,fracxsigmaT): #flat space erf fit version.
    #x=WIMP-nucleon SI cross section
    #mX=WIMP mass
    #fracxsigmaT=fractional uncertainty in cross section due to theory
    #For given WIMP mass, need to work out appropriate limit and sigma
    #based on obslimitcurve and exp1sigmacurve:
    #print x
    # micromegas output gives cross section in pb, while Xenon limit is
    #stated in cm^2. Must convert to cm^2 to apply limit.
    #print x
    x=x*(10**-36) #(1 pb = 10^-36 cm^2)
    xsigmaT=x*fracxsigmaT   #compute absolute uncertainty in sigmaT
    
    if log10(mX)<minmX or log10(mX)>maxmX:
       return 0  #do not apply constraints outside of mass range reported by xenon, don't really know what happens there.
    else:
       if log10(mX)<=1.3: #below this the curves basically merge and the cutoff is sharp, although my curve digitizer linearly extrapolated the exp+1sigma curve (ignore this section of curve data)
          limit = 10**obslimitcurve.__call__(log10(mX)) #returns the x value at the observed xenon limit for this WIMP mass
          sigma = 0   #reduces the erf to a step function
       else:  #we can approximate the width of the erf in this region (IN FLAT SPACE THIS TIME)
          Y1 = Ytransf(Dchi2obs)
          X1 = 10**obslimitcurve.__call__(log10(mX)) #returns the x value at the observed xenon limit for this WIMP mass
          Y2 = Ytransf(Dchi2exp1sigma)
          X2 = 10**exp1sigmacurve.__call__(log10(mX)) #returns the x value at the expected+1sigma xenon limit for this WIMP mass
          limit=(X1*Y2-X2*Y1)/(Y2-Y1)  #determine the mean of the matching erf likelihood function
          sigma=(1/sqrt(2))*abs((X2-X1)/(Y2-Y1))  #determine the width of " " " 
       #compute theory error contribution
       #print x, log10(x), log10(x)+36, 10**(log10(x)+36), sigma, xsigmaT
       wtheory = LFs.logerfupper(x,limit,sqrt(sigma**2+xsigmaT**2)) #erf fit done in log space
       bare = LFs.logerfupper(x,limit,sigma)
       #print wtheory
       #return wtheory#, bare #erf fit done in log space
       return bare #erf fit done in log space
예제 #4
0
파일: fpi.py 프로젝트: yimingli/edtc-code
def get_value(sigma, v):
    """Computes an approximation to v_sigma, the value
    of following policy sigma. Function v is a guess.
    """
    tol = 1e-2  # Error tolerance
    while 1:
        new_v = T(sigma, v)
        err = max(abs(new_v(grid) - v(grid)))
        if err < tol:
            return new_v
        v = new_v
예제 #5
0
def get_value(sigma, v):
    """Computes an approximation to v_sigma, the value
    of following policy sigma. Function v is a guess.
    """
    tol = 1e-5         # Error tolerance
    while 1:
        new_v = T(sigma, v)
        err = max(abs(new_v(grid) - v(grid)))
        if err < tol:
            return new_v
        v = new_v
예제 #6
0
파일: PCA.py 프로젝트: Dfred/concept-robot
    def similarity(self,fir1,fir2):
        '''Compute the similarity of two faces'''
        assert self.trained == True

        if self.measure == PCA_L1:
            return (scipy.abs(fir1-fir2)).sum()

        if self.measure == PCA_L2:
            return scipy.sqrt(((fir1-fir2)*(fir1-fir2)).sum())

        if self.measure == PCA_COS:
            return (fir1*fir2).sum()
        
        raise NotImplementedError("Unknown distance measure: %d"%self.measure)
예제 #7
0
def PlotPowerSpectrum(g, x0, N, args=()):
    """
    Plot Fourier spectrum of N points (after 1000 transient points)
    """
    sig = IterateList(g, x0, N+1000, args)[1000:]
    sig_freq = sp.fftpack.fftfreq(sig.size, d=1)
    sig_fft  = sp.fftpack.fft(sig)
    pidxs    = sp.where(sig_freq > 0)
    freqs, power = sig_freq[pidxs], sp.abs(sig_fft)[pidxs]
    pylab.plot(freqs, power)
    pylab.xlabel('Frequency [Hz]')
    pylab.ylabel('Power')
    pylab.show()
    print("Peak Frequency:")
    print(freqs[power.argmax()])
예제 #8
0
파일: PCA.py 프로젝트: wolfram2012/MOSSE
    def similarity(self, fir1, fir2):
        '''Compute the similarity of two faces'''
        assert self.trained == True

        if self.measure == PCA_L1:
            return (scipy.abs(fir1 - fir2)).sum()

        if self.measure == PCA_L2:
            return scipy.sqrt(((fir1 - fir2) * (fir1 - fir2)).sum())

        if self.measure == PCA_COS:
            return (fir1 * fir2).sum()

        raise NotImplementedError("Unknown distance measure: %d" %
                                  self.measure)
예제 #9
0
    def __follow_car(self, previous):
        temp_a = MAX_ACC
        if not previous:
            # 如果前车为空,说明自己是leader
            if self.speed <= TURN_MAX_V:
                temp_a = car.__engine_speed_up_acc_curve(self,
                                                         self.speed,
                                                         p=0.3)
            elif self.speed > MAX_V:
                delta_v = sp.abs(self.speed - MAX_V)
                temp_a = -car.__engine_speed_up_acc_curve(
                    self, self.speed - delta_v, p=0.3) * 0.5
        else:
            v1 = self.speed  # 自己的速度
            v2 = previous.speed  # 前车的速度
            if previous.acc < 0.0:
                v2 += AI_DT * previous.acc
            v1 = v1 if v1 > 0 else 0.0
            v2 = v2 if v2 > 0 else 0.0
            s = car.__calc_pure_interDistance(self, previous)
            safer_distance = DES_PLATOON_INTER_DISTANCE
            follow_dis = self.length / 4.47 * v1 + safer_distance
            s -= follow_dis
            if s <= 0.0:
                temp_a = MIN_ACC
            else:
                temp_a = 2.0 * (s / 2.0 - v1 * AI_DT) / (AI_DT * AI_DT)

            if s <= follow_dis:
                if temp_a > 0.0:
                    temp_a /= 2.0

        # 限幅
        if temp_a > MAX_ACC:
            self.acc = MAX_ACC
        elif temp_a < MIN_ACC:
            self.acc = MIN_ACC
        if temp_a < self.acc:
            self.acc = temp_a
예제 #10
0
def SEAerror(x: np.ndarray, xhat: np.ndarray):

    assert x.shape == xhat.shape, "Array need to have same dimension"
    assert xhat.shape[
        0] == 2, "Polygons have to be simple, only trivially guaranteed in 2d"
    #https://math.blogoverflow.com/2014/06/04/greens-theorem-and-area-of-polygons/

    N = x.shape[1]
    A = 0.0
    SEA = np.empty((N - 1, ))

    for i in range(N - 1):
        #The ith polyhedron is composed of the vertices
        #xhat_i, xhat_i+1, x_i+1, x_i, xhat_i
        #So
        # Normally we have to integrate ccw with normal outwards. Since we do not know the direction sign can be inversed
        A = (xhat[0, i + 1] + xhat[0, i]) * (xhat[1, i + 1] - xhat[1, i])
        A += (x[0, i + 1] + xhat[0, i + 1]) * (x[1, i + 1] - xhat[1, i + 1])
        A += (x[0, i] + x[0, i + 1]) * (x[1, i] - x[1, i + 1])
        A += (xhat[0, i] + x[0, i]) * (xhat[1, i] - x[1, i])

        SEA[i] = abs(A) / 2.

    return SEA
예제 #11
0
def get_avg_sep():
    real = tau_class.TauClass('../../data/delta-2.fits.gz')
    real.get_data(skewers_perc=1., ylabel='DELTA')

    avg_r = real.pixel_data[:, 2].mean()
    print("Average distance is {}".format(avg_r))

    ra, dec = real.q_loc[:, 0], real.q_loc[:, 1]

    delta_ra = sp.abs(ra - ra[:, None])
    delta_dec = sp.abs(dec - dec[:, None])

    angle = 2 * sp.arcsin(
        sp.sqrt(
            sp.sin(delta_dec / 2.)**2 +
            sp.cos(dec) * sp.cos(dec[:, None]) * sp.sin(delta_ra / 2.)**2))

    # remove self-distances
    sp.fill_diagonal(angle, sp.inf)

    min_angle = angle.min(0) * 180 / sp.pi
    avg_angle = min_angle.mean()

    p_cmd = "Average distance is {} h^-1 Mpc \n".format(avg_r)
    p_cmd += "Average angle is {} degree \n".format(avg_angle)

    avg_sep = avg_angle * avg_r * sp.pi / 180
    p_cmd += "Average transverse separation is {} h^-1 Mpc \n".format(avg_sep)

    print(p_cmd)
    # *************************************************************************

    simul = tau_class.TauClass('../../data/simulations/'
                               'v6.0.1_delta_transmission_RMplate.fits')
    simul.get_data(skewers_perc=1.)

    avg_r = simul.pixel_data[:, 2].mean()
    print("Average distance is {}".format(avg_r))

    ra, dec = sp.deg2rad(simul.q_loc[:, 0]), sp.deg2rad(simul.q_loc[:, 1])

    delta_ra = sp.abs(ra - ra[:, None])
    delta_dec = sp.abs(dec - dec[:, None])

    angle = 2 * sp.arcsin(
        sp.sqrt(
            sp.sin(delta_dec / 2.)**2 +
            sp.cos(dec) * sp.cos(dec[:, None]) * sp.sin(delta_ra / 2.)**2))

    # remove self-distances
    sp.fill_diagonal(angle, sp.inf)

    min_angle = angle.min(0) * 180 / sp.pi
    avg_angle = min_angle.mean()

    p_cmd = "Average distance is {} h^-1 Mpc \n".format(avg_r)
    p_cmd += "Average angle is {} degree \n".format(avg_angle)

    avg_sep = avg_angle * avg_r * sp.pi / 180
    p_cmd += "Average transverse separation is {} h^-1 Mpc \n".format(avg_sep)

    print(p_cmd)
예제 #12
0
    # Fit to data using Maximum Likelihood Estimation of the parameters
    gp.fit(eigX, y)
    print "TIMER teach", time.clock() - ttt

    # # Make the prediction on training set
    # y_pred, MSE = gp.predict(eigX, eval_MSE=True)
    # sigma = sp.sqrt(MSE)
    # print('\n training set:')
    # print('MAE:  %5.2f kcal/mol'%sp.abs(y_pred-y).mean(axis=0))
    # print('RMSE: %5.2f kcal/mol'%sp.square(y_pred-y).mean(axis=0)**.5)
    # Make the prediction on test set
    y_pred, MSE = gp.predict(eigt, eval_MSE=True)
    sigma = sp.sqrt(MSE)
    print('\n test set:')
    print('MAE:  %5.2f kcal/mol' % sp.abs(y_pred - Ttest.ravel()).mean(axis=0))
    print('RMSE: %5.2f kcal/mol' %
          sp.square(y_pred - Ttest.ravel()).mean(axis=0)**.5)
    alpha.append(gp.alpha)
    covmat.append(gp.K)

# P = dataset['P'][range(0,split)+range(split+1,5)].flatten()
# X = dataset['X'][P]
# T = dataset['T'][P]
# print "TIMER load_data", time.clock() - ttt

# # --------------------------------------------
# # Extract feature(s) from training data
# # --------------------------------------------
# # in this case, only sorted eigenvalues of Coulomb matrix
# ttt = time.clock()
예제 #13
0
 def show(self):
     pylab.matshow(sp.abs(self.T)**2.0)
예제 #14
0
	def show(self):
		pylab.matshow(sp.abs(self.T)**2.0)
예제 #15
0
 def find_nearest(self, a, a0):
     idx = scipy.abs(a - a0).argmin()
     return a.flat[idx]
예제 #16
0
def gradient_magnitude(img):
    g=scipy.gradient(img)
    return scipy.abs(g[0]+1j*g[1])
예제 #17
0
파일: prog.py 프로젝트: marcocaccin/MarcoGP
    # Fit to data using Maximum Likelihood Estimation of the parameters
    gp.fit(eigX, y)
    print "TIMER teach", time.clock() - ttt

    # # Make the prediction on training set
    # y_pred, MSE = gp.predict(eigX, eval_MSE=True)
    # sigma = sp.sqrt(MSE)
    # print('\n training set:')
    # print('MAE:  %5.2f kcal/mol'%sp.abs(y_pred-y).mean(axis=0))
    # print('RMSE: %5.2f kcal/mol'%sp.square(y_pred-y).mean(axis=0)**.5)
    # Make the prediction on test set
    y_pred, MSE = gp.predict(eigt, eval_MSE=True)
    sigma = sp.sqrt(MSE)
    print('\n test set:')
    print('MAE:  %5.2f kcal/mol'%sp.abs(y_pred-Ttest.ravel()).mean(axis=0))
    print('RMSE: %5.2f kcal/mol'%sp.square(y_pred-Ttest.ravel()).mean(axis=0)**.5)
    alpha.append(gp.alpha)
    covmat.append(gp.K)

    


# P = dataset['P'][range(0,split)+range(split+1,5)].flatten()
# X = dataset['X'][P]
# T = dataset['T'][P]
# print "TIMER load_data", time.clock() - ttt

# # --------------------------------------------
# # Extract feature(s) from training data
# # --------------------------------------------