Example #1
2
    def test_power_zero(self):
        # ticket #1271
        zero = np.array([0j])
        one = np.array([1 + 0j])
        cinf = np.array([complex(np.inf, 0)])
        cnan = np.array([complex(np.nan, np.nan)])

        def assert_complex_equal(x, y):
            x, y = np.asarray(x), np.asarray(y)
            assert_array_equal(x.real, y.real)
            assert_array_equal(x.imag, y.imag)

        # positive powers
        for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
            assert_complex_equal(np.power(zero, p), zero)

        # zero power
        assert_complex_equal(np.power(zero, 0), one)
        assert_complex_equal(np.power(zero, 0 + 1j), cnan)

        # negative power
        for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
            assert_complex_equal(np.power(zero, -p), cnan)
        assert_complex_equal(np.power(zero, -1 + 0.2j), cnan)

        def test_fast_power(self):
            x = np.array([1, 2, 3], np.int16)
            assert (x ** 2.00001).dtype is (x ** 2.0).dtype
Example #2
1
    def test_convolve2d_king(self):
        
        gfn = lambda r, s: np.power(2*np.pi*s**2,-1)*np.exp(-r**2/(2*s**2))
        kfn = lambda r, s, g: np.power(2*np.pi*s**2,-1)*(1.-1./g)* \
            np.power(1+0.5/g*(r/s)**2,-g)

        kfn0 = lambda x, y, mux, muy, s, g: kfn(np.sqrt((x-mux)**2+(y-muy)**2),s,g)

        xaxis = Axis.create(-3,3,501)
        yaxis = Axis.create(-3,3,501)

        x, y = np.meshgrid(xaxis.center,yaxis.center)
        xbin, ybin = np.meshgrid(xaxis.width,yaxis.width)

        r = np.sqrt(x**2+y**2)

        # Scalar Input

        mux = 0.5
        muy = -0.2
        mur = (mux**2+muy**2)**0.5

        gsig = 0.1
        ksig = 0.2
        kgam = 4.0

        fval0 = np.sum(kfn0(x,y,mux,muy,ksig,kgam)*gfn(r,gsig)*xbin*ybin)
        fval1 = convolve2d_king(lambda t: gfn(t,gsig),mur,ksig,kgam,3.0,
                                nstep=10000)
#        fval2 = convolve2d_gauss(lambda t: kfn(t,ksig,kgam),mur,gsig,3.0,
#                                 nstep=1000)
#        print fval0, fval1, fval2, fval1/fval0

        assert_almost_equal(fval0,fval1,4)
def updateMatrix(centroids, X_Features, n_points, n_clusters, n_attributes, alpha):

	exp = 1/(float(alpha - 1))
	for x in xrange(n_clusters):
		centroid = centroids[x]
		for y in xrange(n_points):
			
			hammingDist = dissimilarityMeasure(centroid, X_Features[y])
			numerator = np.power(hammingDist, exp)
			denom = 0.0
			flag = 0
			
			for z in xrange(n_clusters):
				if (centroids[z] == X_Features[y]).all() and (centroids[z] == centroid).all():
					membership_mat[y][x] = 1
					flag = 1
					break
				elif (centroids[z] == X_Features[y]).all():
					membership_mat[y][x] = 0
					flag = 1
					break

				denom += np.power(dissimilarityMeasure(centroids[z], X_Features[y]), exp)
		
			if flag == 0:
				membership_mat[y][x] = 1/(float(numerator)/float(denom))	 	 
			
	for row in range(len(membership_mat)):
			membership_mat[row] = membership_mat[row]/sum(membership_mat[row])

	cost_function = costFunction(membership_mat, n_clusters, n_points, alpha, centroids, X_Features)
	return membership_mat, cost_function
 def test_calculatePowerCoefficients(self):
     radius = 35.5
     self.pcPadded.calculatePowerCoefficients(radius)
     windSpeed = 10
     availablePower = 0.5 * pi * np.power(radius, 2) * np.power(windSpeed, 3) * 1.225
     self.assertEqual(round(self.pcPadded.data.ix[20, 'powerCoefficient'], 2),
                      round(self.pcPadded.data.ix[20, 'powerInKilowatts'] * 1000 / availablePower, 2))
Example #5
0
  def compute_distances_no_loops(self, X):
    """
    Compute the distance between each test point in X and each training point
    in self.X_train using no explicit loops.

    Input / Output: Same as compute_distances_two_loops
    """
    num_test = X.shape[0]
    num_train = self.X_train.shape[0]
    dists = np.zeros((num_test, num_train)) 
    #########################################################################
    # TODO:                                                                 #
    # Compute the l2 distance between all test points and all training      #
    # points without using any explicit loops, and store the result in      #
    # dists.                                                                #
    # HINT: Try to formulate the l2 distance using matrix multiplication    #
    #       and two broadcast sums.                                         #
    #########################################################################
    num_feature = X.shape[1]
    A = np.matrix(X);
    B = np.matrix(self.X_train);
    ImT = np.ones((num_feature,num_train));
    Itm = np.ones((num_test,num_feature));
    sums_AB = np.power(A,2)*ImT + Itm*np.power(B.T,2);
    prod_AB = A*B.T;
    dists = np.power(sums_AB - 2*prod_AB,0.5);
    #########################################################################
    #                         END OF YOUR CODE                              #
    #########################################################################
    return dists
def evaluation_10_fold(root='./result/pytorch_result.mat'):
    ACCs = np.zeros(10)
    result = scipy.io.loadmat(root)
    for i in range(10):
        fold = result['fold']
        flags = result['flag']
        featureLs = result['fl']
        featureRs = result['fr']

        valFold = fold != i
        testFold = fold == i
        flags = np.squeeze(flags)

        mu = np.mean(np.concatenate((featureLs[valFold[0], :], featureRs[valFold[0], :]), 0), 0)
        mu = np.expand_dims(mu, 0)
        featureLs = featureLs - mu
        featureRs = featureRs - mu
        featureLs = featureLs / np.expand_dims(np.sqrt(np.sum(np.power(featureLs, 2), 1)), 1)
        featureRs = featureRs / np.expand_dims(np.sqrt(np.sum(np.power(featureRs, 2), 1)), 1)

        scores = np.sum(np.multiply(featureLs, featureRs), 1)
        threshold = getThreshold(scores[valFold[0]], flags[valFold[0]], 10000)
        ACCs[i] = getAccuracy(scores[testFold[0]], flags[testFold[0]], threshold)
    #     print('{}    {:.2f}'.format(i+1, ACCs[i] * 100))
    # print('--------')
    # print('AVE    {:.2f}'.format(np.mean(ACCs) * 100))
    return ACCs
Example #7
0
    def second_order_score(y, mean, scale, shape, skewness):
        """ GAS Skew t Update term potentially using second-order information - native Python function

        Parameters
        ----------
        y : float
            datapoint for the time series

        mean : float
            location parameter for the Skew t distribution

        scale : float
            scale parameter for the Skew t distribution

        shape : float
            tail thickness parameter for the Skew t distribution

        skewness : float
            skewness parameter for the Skew t distribution

        Returns
        ----------
        - Adjusted score of the Skew t family
        """
        m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
        mean = mean + (skewness - (1.0/skewness))*scale*m1
        if (y-mean)>=0:
            return ((shape+1)/shape)*(y-mean)/(np.power(skewness*scale,2) + (np.power(y-mean,2)/shape))
        else:
            return ((shape+1)/shape)*(y-mean)/(np.power(scale,2) + (np.power(skewness*(y-mean),2)/shape))
Example #8
0
def check_kurt_expect(distfn, arg, m, v, k, msg):
    if np.isfinite(k):
        m4e = distfn.expect(lambda x: np.power(x-m, 4), arg)
        npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2), atol=1e-5, rtol=1e-5,
                err_msg=msg + ' - kurtosis')
    else:
        npt.assert_(np.isnan(k))
Example #9
0
def check_skew_expect(distfn, arg, m, v, s, msg):
    if np.isfinite(s):
        m3e = distfn.expect(lambda x: np.power(x-m, 3), arg)
        npt.assert_almost_equal(m3e, s * np.power(v, 1.5),
                decimal=5, err_msg=msg + ' - skew')
    else:
        npt.assert_(np.isnan(s))
Example #10
0
def std(f):
    x = np.array(range(len(f)))
    # normalize; we do not prefer attributes with many values
    x = x / x.mean()
    xf = np.multiply(f, x)
    x2f = np.multiply(f, np.power(x, 2))
    return np.sqrt((np.sum(x2f) - np.power(np.sum(xf), 2) / np.sum(f)) / (np.sum(f) - 1))
Example #11
0
    def reg_score_function(X, y, mean, scale, shape, skewness):
        """ GAS Skew t Regression Update term using gradient only - native Python function

        Parameters
        ----------
        X : float
            datapoint for the right hand side variable
    
        y : float
            datapoint for the time series

        mean : float
            location parameter for the Skew t distribution

        scale : float
            scale parameter for the Skew t distribution

        shape : float
            tail thickness parameter for the Skew t distribution

        skewness : float
            skewness parameter for the Skew t distribution

        Returns
        ----------
        - Score of the Skew t family
        """
        m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
        mean = mean + (skewness - (1.0/skewness))*scale*m1
        if (y-mean)>=0:
            return ((shape+1)/shape)*((y-mean)*X)/(np.power(skewness*scale,2) + (np.power(y-mean,2)/shape))
        else:
            return ((shape+1)/shape)*((y-mean)*X)/(np.power(scale,2) + (np.power(skewness*(y-mean),2)/shape))
Example #12
0
def EN_CID(y):
    """
    CID measure from Batista, G. E. A. P. A., Keogh, E. J., Tataw, O. M. & de
    Souza, V. M. A. CID: an efficient complexity-invariant distance for time
    series. Data Min Knowl. Disc. 28, 634-669 (2014).
    
    Arguments
    ---------

    y: a nitime time-series object, or numpy vector

    """

    # Make the input a row vector of numbers:
    y = makeRowVector(vectorize(y))

    # Prepare the output dictionary
    out = {}
    
     # Original definition (in Table 2 of paper cited above)
    out['CE1'] = np.sqrt(np.mean(np.power(np.diff(y),2))); # sum -> mean to deal with non-equal time-series lengths

    # Definition corresponding to the line segment example in Fig. 9 of the paper
    # cited above (using Pythagoras's theorum):
    out['CE2'] = np.mean(np.sqrt(1 + np.power(np.diff(y),2)));

    return out
Example #13
0
def power_sutera_reweighing(Y, f_pow=lambda x: 1):
    """Re-weights the time series giving more value to the values of the time
    serie when there are a low global activity.

    References
    ---------
    .. [1] Antonio Sutera et al. Simple connectome inference from partial
    correlation statistics in calcium imaging

    """

    ## 0. Prepare variables needed
    m = Y.shape[1]
    global_y = np.sum(Y, axis=1)

    ## 1. Transformation
    Yt = np.zeros(Y.shape)
    for j in range(m):
        Yt[:, j] = np.power((Y[:, j] + 1.),
                            np.power((1.+np.divide(1., global_y)),
                                     f_pow(global_y)))
    # Correct global 0
    Yt[global_y == 0, :] = 1.

    return Yt
Example #14
0
def _fit_quad_to_peak(x, y):
    """
    Fits a quadratic to the data points handed in
    to the from y = b[0](x-b[1])**2 + b[2]

    x -- locations
    y -- values

    returns (b, R2)

    """

    lenx = len(x)

    # some sanity checks
    if lenx < 3:
        raise Exception('insufficient points handed in ')
    # set up fitting array
    X = np.vstack((x ** 2, x, np.ones(lenx))).T
    # use linear least squares fitting
    beta, _, _, _ = np.linalg.lstsq(X, y)

    SSerr = np.sum(np.power(np.polyval(beta, x) - y, 2))
    SStot = np.sum(np.power(y - np.mean(y), 2))
    # re-map the returned value to match the form we want
    ret_beta = (beta[0],
                -beta[1] / (2 * beta[0]),
                beta[2] - beta[0] * (beta[1] / (2 * beta[0])) ** 2)

    return ret_beta, 1 - SSerr / SStot
Example #15
0
    def molar_heat_capacity_p(self, pressure, temperature, volume, params):
        """
        Returns the heat capacity [J/K/mol] as a function of pressure [Pa]
        and temperature [K].
        """
        a, b, c = mt.tait_constants(params)
        T = temperature
        T_e = params['T_einstein']
        n = params['n']
        Pth = self.__relative_thermal_pressure(T, params)

        ksi_over_ksi_0 = einstein.molar_heat_capacity_v(T, T_e, n) \
                         / einstein.molar_heat_capacity_v(params['T_0'], T_e, n)

        dintVdpdT = (params['V_0'] * params['a_0'] * params['K_0'] * a * ksi_over_ksi_0) * (
            np.power((1. + b * (pressure - params['P_0'] - Pth)), 0. - c) - np.power((1. - b * Pth), 0. - c))

        dSdT0 = params['V_0'] * params['K_0'] * np.power((ksi_over_ksi_0 * params['a_0']), 2.0) * \
                (np.power((1. + b * (pressure - params['P_0'] - Pth)), -1. - c) -
                 np.power((1. + b * (-Pth)), -1. - c))

        x = T_e/T
        dCv_einstdT = -(einstein.molar_heat_capacity_v(T, T_e, n) *
                        ( 1 - 2./x + 2./(np.exp(x) - 1.) ) * x/T)

        dSdT1 = -dintVdpdT * dCv_einstdT \
                / einstein.molar_heat_capacity_v(T, T_e, n)

        dSdT = dSdT0 + dSdT1
        return self.molar_heat_capacity_p0(temperature, params) + temperature * dSdT
Example #16
0
def Error(mat):
    """"""
    rows, cols = mat.shape
    mat_2 = np.power(mat, 2)
    sum_ele = mat_2.cumsum()[-1]
    error = np.power(sum_ele, 0.5)
    return error
Example #17
0
def p_jds(Z, L, U, C, M):
    """
    Input:
         coefficient matrix Z
         desired numbers of dynamic active sets L
         dictionary atom label vector U
         the number of classes C
         the number of views M
    Output:
         Index matrix I for top-L dynamic active sets
    """   
    #Initialize
    I = np.zeros((L, M))
    V = np.zeros((C, M))
    _I = np.zeros((C, M))
    S = np.zeors(C)
    for l in xrange(L):
        for i in xrange(C):
            #c代表索引值
            c = find(U, i)
            for m in xrange(M):
                v, t = Max(Z[c, m])
                V[i, m] = v
                _I[i, m] = c[t]
            tmp = np.cumsum(np.power(V[i], 2))[-1]
            S[i] = np.power(tmp, 0.5)
        _v, _t = Max(S)
        I[l, :] = _I[_t,:]
        Z[_I[_t,:]] = 0
    return I 
Example #18
0
    def energiemessung_hist(xmin, n, sigma):
        x_min = np.power(xmin, GAMMA)
        x_max = 0

        rnd1 = r.TRandom3(1)
        rnd2 = r.TRandom3(2)
        rnd3 = r.TRandom3(3)

        hist_energie = r.TH1D("hist_energie", "hist_energie", 20, 0, 5)
        hist_energie.GetXaxis().SetTitle("log(Anzahl Hits)")
        hist_energie.GetYaxis().SetTitle("Anzahl Ereignisse")
        
        for i in range(n):
            ereignis = 1./np.power(x_min + rnd1.Rndm()*(x_max - x_min), 1./GAMMA)
            y = np.power((1-m.exp(-1*ereignis/2)),3)
            y_rndm = rnd2.Rndm()
            
            if (y_rndm <= y):
                while(1):    #immer true 
                    v1 = rnd3.Rndm()*2 - 1
                    v2 = rnd3.Rndm()*2 - 1
                    q = v1*v1 + v2*v2
                    if ((q>=1) or (q==0)): #Check ob Rückweisungsbedingung erfüllt ist
                        continue
                    else:
                        z1 = m.sqrt((-2*np.log(q))/q) #Gaussverteilte Zufallszahl
                        x1 = v1*z1
                        #Transformation der Gaussverteilten  von (0,1) auf (E,0.2E)
                        x1 = sigma*ereignis*x1 + ereignis
                        hits = np.int_(10*x1)
                        if(hits>0):
                            hist_energie.Fill(np.log10(hits))
                        break

        return hist_energie
    def bcaQuantile(self, estimate, bootDist, data, mu0, mu1, nSamples, n):

        tmp = sum(y <= estimate for y in bootDist) / float(nSamples + 1)
        if tmp > 0 and tmp < 1:

            # bias correction
            z0 = s.norm.ppf(tmp)

            # acceleration
            x = np.zeros(n)
            for i in range(n):
                x[i] = self.estimateSingleFraction(mu1, np.delete(data, i), mu0, n - 1)
            xbar = np.mean(x)
            denom = np.power(np.sum(np.power(x - xbar, 2)), 1.5)
            if abs(denom) < 1e-4:
                q1 = 2.5
                q2 = 97.5
            else:
                a = np.divide(np.sum(np.power(x - xbar, 3)), denom) / 6.0

                # quantiles: (k1 and k2 are defined globally)
                q1 = 100 * s.norm.cdf(z0 + (z0 + k1) / (1 - a * (z0 + k1)))
                q2 = 100 * s.norm.cdf(z0 + (z0 + k2) / (1 - a * (z0 + k2)))

        elif tmp == 0.0:
            q1 = 0
            q2 = 0

        elif tmp == 1.0:
            q1 = 100
            q2 = 100

        return (q1, q2)
Example #20
0
def mjt_discrete_movement(amp=1.0, dur=1.0, loc=0.,
                          time=np.arange(-0.5, 0.5, 0.01)):
    """
    Generate a discrete Minumum Jerk Trajectory (MJT) movement speed profile
    of the given amplitude, duration and time location for the given time span.

    Parameters
    ----------
    amp         : float
                  Amplitude of the MJT discrete movement.
    dur         : float
                  Duration of the MJT discrete movement.
    loc         : float
                  The temporal location of the center of the MJT speed
                  profile.
    time        : np.array
                  The time values for which the speed profile values are to be
                  returned.
    Returns
    -------
    movement    : np.array
                  The movement speed profile of the MJT discrete movement.

    Notes
    -----

    Examples
    --------
    """
    t = np.array([np.min([np.max([(_t + 0.5 * dur - loc) / dur, 0.]), 1.])
                  for _t in time])
    return amp * (30 * np.power(t, 4) -
                  60 * np.power(t, 3) +
                  30 * np.power(t, 2))
Example #21
0
    def function1D(self, t):
        A  = self.getParamValue(0)
        B  = self.getParamValue(1)
        R  = self.getParamValue(2)
        T0 = self.getParamValue(3)
        Scale = self.getParamValue(4)
        HatWidth  = self.getParamValue(5)
        KConv  = self.getParamValue(6)

        # A/2 Scale factor has been removed to make A and Scale independent
        f_int = Scale*((1-R)*np.power((A*(t-T0)),2)*
                       np.exp(-A*(t-T0))+2*R*A**2*B/np.power((A-B),3) *
                       (np.exp(-B*(t-T0))-np.exp(-A*(t-T0))*(1+(A-B)*(t-T0)+0.5*np.power((A-B),2)*np.power((t-T0),2))))
        f_int[t<T0] = 0

        mid_point_hat = len(f_int)//2
        gc_x = np.array(range(len(f_int))).astype(float)
        ppd = 0.0*gc_x
        lowIDX  = int(np.floor(np.max([mid_point_hat-np.abs(HatWidth),0])))
        highIDX = int(np.ceil(np.min([mid_point_hat+np.abs(HatWidth),len(gc_x)])))

        ppd[lowIDX:highIDX] = 1.0
        ppd = ppd/sum(ppd)

        gc_x = np.array(range(len(f_int))).astype(float)
        gc_x = 2*(gc_x-np.min(gc_x))/(np.max(gc_x)-np.min(gc_x))-1
        gc_f = np.exp(-KConv*np.power(gc_x,2))
        gc_f = gc_f/np.sum(gc_f)

        npad = len(f_int) - 1
        first = npad - npad//2
        f_int = np.convolve(f_int,ppd,'full')[first:first+len(f_int)]
        f_int = np.convolve(f_int,gc_f,'full')[first:first+len(f_int)]

        return f_int
Example #22
0
def find_nearby(targets, catalog, max_dist=10):
  '''
  Finds all the sources within a max distance of each target.  
  '''

  # Determines the squared separation between targets
  sep = lambda x0,y0,x,y: np.power((x-x0)*np.cos(y0*pi/180),2) + np.power((y-y0),2)
  
  # Convert max distance from arcseconds to squarded degrees
  max_dist = max_dist / 3600.**2

  for each in targets:

    rvec = sep(each['degra'], each['degdec'], catalog['degra'], catalog['degdec'])


    # Ensure that target is recovered in catalog   
    nearby = np.where(rvec < max_dist)

    if each['hstid'] in catalog['hstid'][nearby]:
      write_cmd_file(catalog[nearby], each)

    else: 
      continue    
      print 'ERROR: Failed to find %s.' % each['hstid']
Example #23
0
    def velocidadlimpio(self):
        #sino no llega al 2, asi funciona        
        v = np.arange(-3, 3,1)
        [x,y] = np.meshgrid(v,v)

        z=np.multiply(x,np.exp( -np.power(x,2) - np.power(y,2) ))

        #Matplotlib t invierte el orden de las matrices a diferenciade matlab
        [py,px] = np.gradient(z,1,1)

        print 'x '+str(x)
        print 'y '+str(y)
        print 'z '+str(z)
        print 'px '+str(px)
        print 'py '+str(py)
        
        #q = plt.quiver(X, Y, u, v, angles='xy', scale=40, color=['r'])
        #p = plt.quiverkey(q,1,16.5,50,"50 m/s",coordinates='data',color='r')        

        #primero los rangos, despues los valores q contiene
        q = plt.quiver(x,y, px, py)
        p = plt.quiverkey(q,1,16.5,50,"50 m/s",coordinates='data',color='r')
        plt.title('Velocidad')
        plt.show()
        print 'Fourth plot loaded...'  
Example #24
0
def lee_parallel_betachEq11(x,n):
    beta=x.copy()
    beta_ch=11.6
    rmsV=5.
    cs=1.
    mach_bh= lee_mach_bh(rmsV,cs)
    return np.log( mach_bh**-2*np.power(mach_bh**n+np.power(beta_ch/beta,n/2.), -1./n) )
Example #25
0
def read_excel(file='slab++.xlsx'):
    wb = open_workbook(file)
    sheet = wb.sheets()[0]
    number_of_rows = sheet.nrows
    number_of_columns = sheet.ncols
        
    items = []
    
    rows = []
    slabs= Catalog()
    for row in range(4, number_of_rows):
        slab= Slab()
        values = []
        for col in range(number_of_columns):
            par=str(sheet.cell(3,col).value)
            if par:
                print(par)
            value  = sheet.cell(row,col).value
            print(par,value)
            try:
                slab.params[par]= float(value)
            except:
                slab.params[par] = str(value)
                
        # save temperature at 600 km using thermal parameter
        phi = slab.params['thermalPar']
        z = 6.0
        Ta = 1338.0
        Tz = Ta * (1.0 - ( (2.0/np.pi)*np.exp(-1.0*( (np.power(np.pi,2.0)*z)/ (np.power(2.32,2.0) * phi) )) ) )
        slab.params['Temp600'] = Tz
        print(Tz)
        print(slab.params)
        slabs.append(slab)
    return slabs
Example #26
0
def mdot_magnetic_neq8(x,beta_ch):
    beta=x.copy()
    n=8.
    rmsV=5.
    cs=1.
    mach_bh= lee_mach_bh(rmsV,cs)
    return mach_bh**-2*np.power(mach_bh**n+np.power(beta_ch/beta,n/2.), -1./n)
Example #27
0
def lee_parallel_neq8(x,beta_ch):
    beta=x.copy()
    n=8.
    rmsV=5.
    cs=1.
    mach_bh= lee_mach_bh(rmsV,cs)
    return np.log( mach_bh**-2*np.power(mach_bh**n+np.power(beta_ch/beta,n/2.), -1./n) )
Example #28
0
    def p_corr(self):
        """
        calculate pearson correlation between users
        """
        data = self.data
        rows = self.rows
        (nrows, ncols) = data.shape
        p_corr_dict = {}

        for row_i in range(nrows):
            for row_j in range(nrows):
                valide_data_i = [data[row_i, :][n] \
                            for n in range(ncols) if \
                            data[row_i, n] != 0 and data[row_j, n] != 0]
                valide_data_j = [data[row_j, :][n] \
                                for n in range(ncols) if \
                                data[row_i, n] != 0 and data[row_j, n] != 0] 
                valide_data_i = np.array(valide_data_i) - np.mean(valide_data_i)
                valide_data_j = np.array(valide_data_j) - np.mean(valide_data_j)
                #print np.dot(data[row_i, :], data[row_j, :])
                p_corr = np.dot(valide_data_i, valide_data_j)*1.0/\
                         np.sqrt(sum(np.power(valide_data_i,2))*\
                                 sum(np.power(valide_data_j,2)))
                p_corr_dict[(rows[row_i], rows[row_j])] = p_corr  
        return p_corr_dict
Example #29
0
def calc_v(pos, t, x_table, c_attract, dc_attract, c_repel, dc_repel):
    '''Calculate drift velocity as a function of c(pos,t) and dc(pos,t)'''
    v = 22. # µm/s
    chi = 50000. # µm^2/s
    chi2 = 50000. # µm^2/s
    k = 0.125 # mM
    k2 = 0.125 # mM
    if pos < x_table[0]:
        pos = x_table[0]
        c, dc = int_tables(pos, t, x_table, c_attract, dc_attract)
        c2, dc2 = int_tables(pos, t, x_table, c_repel, dc_repel)
    elif pos > x_table[-1]:
        pos = x_table[-1]
        c, dc = int_tables(pos, t, x_table, c_attract, dc_attract)
        c2, dc2 = int_tables(pos, t, x_table, c_repel, dc_repel)
    else:
        c, dc = int_tables(pos, t, x_table, c_attract, dc_attract)
        c2, dc2 = int_tables(pos, t, x_table, c_repel, dc_repel)
    v1 = ((8*v)/(3*np.pi))*np.tanh(((chi*np.pi)/(8*v))*(k/np.power((k+c),2))*dc)
    v2 = ((8*v)/(3*np.pi))*np.tanh(((chi2*np.pi)/(8*v))*(k2/np.power((k+c2),2))*dc2)
    #print v1, v2
    if np.abs(v1) > np.abs(v2):
        return v1
    else:
        return v2
Example #30
0
    def grad_EVzxVzxT_by_hyper_exact(self, EVzxVzxT_list_this, Z, A, B, hyperno):

        P = Z.shape[0]
        R = Z.shape[1]
        N = A.shape[0]

        if hyperno != 0:
            return EVzxVzxT_list_this * 0

        alpha = self.length_scale * self.length_scale

        I = np.identity(R)
        S = np.diag(B[0, :] * B[0, :])
        Sinv = np.diag(1 / B[0, :] * B[0, :])
        C = I * alpha
        Cinv = I * (1 / alpha)
        CinvSinv = 2 * Cinv + Sinv
        CinvSinv_inv = np.diag(1 / CinvSinv.diagonal())

        dC = self.length_scale * I
        dCinv = -Cinv.dot(dC).dot(Cinv)
        dCinvSinv = 2 * dCinv
        dCinvSinv_inv = -CinvSinv_inv.dot(dCinvSinv).dot(CinvSinv_inv)

        S1 = (
            dCinv
            - dCinv.dot(CinvSinv_inv).dot(Cinv)
            - Cinv.dot(dCinvSinv_inv).dot(Cinv)
            - Cinv.dot(CinvSinv_inv).dot(dCinv)
        )
        S2 = -Sinv.dot(dCinvSinv_inv).dot(Sinv)
        S3 = Sinv.dot(dCinvSinv_inv).dot(Cinv) + Sinv.dot(CinvSinv_inv).dot(dCinv)
        S4 = dCinv.dot(CinvSinv_inv).dot(Cinv) + Cinv.dot(dCinvSinv_inv).dot(Cinv) + Cinv.dot(CinvSinv_inv).dot(dCinv)

        T1s = np.tile(Z.dot(S1).dot(Z.T).diagonal(), [P, 1])
        T1 = np.tile(T1s, [N, 1, 1])
        T2s = T1s.T
        T2 = np.tile(T2s, [N, 1, 1])
        T3 = np.tile(Z.dot(S4).dot(Z.T), [N, 1, 1])
        T4 = np.tile(A.dot(S2).dot(A.T).diagonal(), [P, 1]).T
        T4 = np.expand_dims(T4, axis=2)
        T4 = np.repeat(T4, P, axis=2)
        T5 = A.dot(S3).dot(Z.T)
        T5 = np.expand_dims(T5, axis=2)
        T5 = np.repeat(T5, P, axis=2)
        T6 = np.swapaxes(T5, 1, 2)

        SCinvI = 2 * Cinv.dot(S) + I
        SCinvI_inv = np.diag(1 / SCinvI.diagonal())
        (temp, logDetSCinvI) = np.linalg.slogdet(SCinvI)
        detSCinvI = np.exp(logDetSCinvI)
        dDetSCinvI = -0.5 * np.power(detSCinvI, -0.5) * SCinvI_inv.dot(2 * dCinv).dot(S).trace()

        expTerm = EVzxVzxT_list_this / np.power(detSCinvI, -0.5)

        res = EVzxVzxT_list_this * (-0.5 * T1 - 0.5 * T2 + T3 - 0.5 * T4 + T5 + T6) + dDetSCinvI * expTerm

        res = np.sum(res, axis=0)

        return res
Example #31
0
def bohachevsky(x):
    x = _transform_and_check(x, True)
    xx = np.power(x, 2)
    y = np.sum(xx[:-1] + 2 * xx[1:] - 0.3 * np.cos(3 * np.pi * x[:-1]) -
        0.4 * np.cos(4 * np.pi * x[1:]) + 0.7)
    return y
 def __get_partial_sum_aproximation(self, buffer_size, batch_size, alpha):
     aprox_total = (np.power(buffer_size,1 - alpha) - 1)/(1-alpha) + self.gamma_s # approximation of N-th harmonic number (partial sum of generalized harmonic series)
     segment_len = aprox_total / batch_size
     return aprox_total, segment_len
Example #33
0
    yhat = np.dot(train_xn, W)
    loss = train_yn - yhat
    grad = 2 * np.dot(train_xn.T, loss)

    # Adagrad:
    adagrad += grad ** 2
    update = lr * grad / np.sqrt(adagrad + epsilon)
    # General:
    # update - lr * grad

    W = W - update
    grad_b = -2*np.sum(loss)
    bias = bias - lr*grad_b

    if i % 50 == 0:
        nl = np.power(np.sum(np.power(loss, 2))/loss.shape[0], 0.5)
        print("i={0:06d}, Loss={1:.8f}, Gradient={2:.8f}".format(i, nl, update.mean()))
        # print("loss="+str(nl))
        epoch_list.append(i)
        lost_list.append(nl)
        if line is None:
            line = ax.plot(epoch_list, lost_list)[0]
        else:
            line.set_xdata(epoch_list)
            line.set_ydata(lost_list)
            ax.set_xlim(epoch_list[0], epoch_list[-1])
            ax.set_ylim(min(lost_list), max(lost_list))
        fig.canvas.draw()
        plt.show()

Example #34
0
def fitfuncnore(p, t):
    return 2 * p[0] * np.power(t, 1.0) + p[1]
Example #35
0
def fitfunc(p, t, y, ns):
    return (y - 2 * p[0] * np.power(t, 1.0) - p[1]) / (y / np.sqrt(ns - 1))
Example #36
0
 def cal_angle(position, hid_idx):
     return position / np.power(10000, 2 * (hid_idx) / d_model)
def renorm(EoS,IDs,MR,T,nd,nx,kij,nc,CR,en_auto,beta_auto,SM,n,estimate,L_est,phi_est):
    #nd    Size of density grid
    #nx    Size of mole fraction grid
    #n     Main loop iteration controller
    
    #If only 1 component is present mimic a binary mixture made of the same component
    if nc==1:
        IDs[1] = IDs[0]
    
    #Recover parameters
    L_rg = data.L(IDs)         #Vector with L parameters (cutoff length)
    phi_rg = data.phi(IDs)     #Vector with phi parameters
    Tc = data.Tc(IDs)
    
    #Components parameters
    a = eos.a_calc(IDs,EoS,T)
    b = eos.b_calc(IDs,EoS)
    Tr = T/np.array(Tc)
    
    #Main loop parameters
    x = np.array([0.0001,0.9999])
    stepx = (1/float(nx)) #Step to calculate change
    k = 0               #Vector fill counter
    i = 1               #Main loop counter
    r = 0               #Report counter
    count = 0
    rho = np.empty((nd))            #Density vector
    rhov = []                       #Density vector to export
    x0v = []                        #Mole fraction vector to export
    bmixv = []
    f = np.empty((nd))              #Helmholtz energy density vector
    fv = []                         #Helmholtz energy density vector to export
    fresv = []                      #Residual Helmholtz energy density vector to export
    Tv = []                         #Temperature values to export
    df = np.empty((nd))             #Changes in helmholtz energy density vector
    f_orig = np.empty((nd))         #Unmodified Helmholtz energy density vector
    rhob = []                       #Adimensional density vector
    u = np.empty((nd))
    X = np.ones((4*nc))
    Pv = []
    fmat = []
    Pmatv = np.empty((nx,nd))
    fmatres = []
    umat = []
    ures = np.empty((nd))
    uv = []
    df_vdw = np.empty((nd))
    
    df_vec2 = []
    f_vec2 = []
    P_vec2 = []
    u_vec2 = []
    aa2 = []
    
    if nc==1:
        X = np.ones((8))
    
    #Main loop*************************************
    while x[0]<1.0:
        if nc>1:
            print x[0]
        if x[0]==0.006: #after first step
            x[0]=0.005
        x[1]=1-x[0]
        
        if nc==1:
            x[0] = 0.999999
            x[1] = 0.000001
        
        #Mixture parameters
        bmix = eos.bmix_calc(MR,b,x)
        amix = eos.amix_calc(MR,a,x,kij)
        Nav = 6.023e23
        rhomax = 0.999999
        
        #Mixture Renormalization parameters
        L = np.dot(x,np.power(L_rg,3.0))
        L = np.power(L,1.0/3.0)
        phi = np.dot(x,phi_rg)
        
        #print L
        #print phi
        
        
        pi = math.pi
        omega = data.omega(IDs)[0]
        sig = np.power(6/pi*b/Nav*np.exp(omega),1.0/3.0)[0]
        #sig = np.power(b/Nav,1.0/3.0)[0]
        #c1 = data.c1(IDs)[0]
        #en = data.en(IDs)[0]
        #sig = np.power(1.15798*b/Nav,1.0/3.0)[0]
        L = sig
        #L = 1.5*sig
        #L = 1/c1*sig
        #print L,phi
        #L = 0.5/c1*sig
        #PHI = 4*(pi**2.0)
        
        #PHI = 1.0/pi/4.0
        #lamda = 1.5
        #w_LJ = (9.0*sig/7.0) #lennard-jones
        #print 'LJ=',w_LJ
        #w_SW = np.sqrt((1./5.)*(sig**2.0)*(lamda**5.0-1)/(lamda**3.0-1)) #square-well potential
        #print 'SW=',w_SW
        #phi = PHI*(w_LJ**2)/2/(L**2)
        #phi = PHI*(w_SW**2)/2/(L**2)
        
        #om = data.omega(IDs)
        #phi = 2/np.power(np.exp(om),4)[0]
        #w = 0.575*sig*en/T/kB/b[0]*1e6
        #print 'w=',w
        #phi = 2/np.power(np.exp(c1),4)[0]
        
        #w = 100.0*1e-9/100 #van der waals wavelength 100nm
        #phi = PHI*(w**2)/2/(L**2)
        
        #print L
        #print phi
        #print '---------'
        
        #New parameters
        #L = 1.5*np.power(b/Nav,1.0/3.0)
        #h = 6.626e-34
        #kkB = 1.38e-23
        #MM = 0.034
        #deBroglie = h/np.sqrt(3*kkB*T*MM/Nav)
        #phi = (deBroglie**2.0)/(L**2.0)*150*3.14
        #L = L[0]
        #phi = phi[0]
        #print 'L=',L
        #print 'phi=',phi
        

        if estimate==True:
            L = L_est
            phi = phi_est

        for k in range(0,nd):
            rho[k] = np.array(float(k)/nd/bmix)
            if k==0:
                rho[0] = 1e-6
            if EoS==6:
                if k==0:
                    X = association.frac_nbs(nc,1/rho[k],CR,en_auto,beta_auto,b,bmix,X,0,x,0,T,SM)
                else:
                    X = association.frac_nbs(nc,1/rho[k],CR,en_auto,beta_auto,b,bmix,X,1,x,0,T,SM)
            #print X,k
            #raw_input('...')
            f[k] = np.array(helm_rep(EoS,R,T,rho[k],amix,bmix,X,x,nc))   #Helmholtz energy density
            k = k+1
            
        f_orig = f                                #Initial helmholtz energy density
        
        """
        #-------------------------------------------
        #Fluctuation Analysis-----------------------
        #-------------------------------------------
        drho = rho[int(nd/2)]-rho[int(nd/2)-1]
        for i in range(1,nd-2):
            u[i] = (f[i+1]-f[i-1])/(2*drho)
        u[nd-1] = (f[nd-1]-f[nd-2])/drho
        u[0] = (f[1]-f[0])/drho
        
        fspl = splrep(rho,f,k=3)         #Cubic Spline Representation
        f3 = splev(rho,fspl,der=0)
        u = splev(rho,fspl,der=1)        #Evaluate Cubic Spline First derivative

        P = -f3+rho*u
        
        P_vec2.append(P)
        u_vec2.append(u)
        #===========================================
        #===========================================
        """
    
        #Subtract attractive forces (due long range correlations)
        f = f + 0.5*amix*(rho**2)
        
        df_vec2.append(rho)
        f_vec2.append(rho)

        #Adimensionalization
        rho = rho*bmix
        f = f*bmix*bmix/amix
        T = T*bmix*R/amix
        
        f_vec2.append(f)

        rho1 = rho.flatten()

        #Main loop****************************************************************
        i = 1
        while i<=n:
            #print i
            #K = kB*T/((2**(3*i))*(L**3))
            #K = R*T/((L**3)*(2**(3*i)))
            K = T/(2**(3*i))/((L**3)/bmix*6.023e23)
            
            
            #Long and Short Range forces
            fl = helm_long(EoS,rho,f)
            fs = helm_short(EoS,rho,f,phi,i)

            #Calculate df
            width = rhomax/nd
            w = 0
            for w in range(0,nd):
                df[w] = renorm_df(w,nd,fl,fs,K,rho,width)
            
            #Update Helmholtz Energy Density
            df = np.array(df) #used to evaluate each step
            f = f + df
            df_vec2.append(list(df/bmix/bmix*amix*1e6/rho))
            f_vec2.append(list(f))
            #print 'i=',i,K,f[2],df[2],T,df_vec2[1][2]
            i = i+1
            #print i

        #Dimensionalization
        rho = rho/bmix
        f = f/bmix/bmix*amix
        T = T/bmix/R*amix
        
        #df_total = 
        #df = np.array(df)
        #df_vec.append(df)
        
        #Add original attractive forces
        f = f - 0.5*amix*(rho**2)
        
        #Store residual value of f
        #fres = f - rho*R*T*(np.log(rho)-1) #WRONG
        fres = f - rho*R*T*np.log(rho)
        #f = f + rho*R*T*(np.log(rho)-1) #Already accounting ideal gas energy
        
        #strT = str(T)
        #dfT = ('df_%s.csv' %strT)
        TT = np.zeros((nd))
        df_vdw = 0.5*((rho*bmix)**2)
        df_vec2.append(list(df_vdw))
        f_vec2.append(list(df_vdw))
        for i in range(0,nd):
            TT[i] = T
        df_vec2.append(TT)
        f_vec2.append(TT)
        envelope.report_df(df_vec2,'df.csv')
        envelope.report_df(f_vec2,'f.csv')
        #raw_input('----')

        #if(EoS==6):
        #    f = fres
        
        fv.append(f)
        fresv.append(fres)
        x0v.append(x[0])
        bmixv.append(bmix)
        
        if r==0:
            rhob.append(rho*bmix) #rhob vector is always the same
            rhov.append(rho) #in case the calculation is done for one-component
        r=1

        drho = rho[int(nd/2)]-rho[int(nd/2)-1]
        for i in range(1,nd-2):
            u[i] = (f[i+1]-f[i-1])/(2*drho)
        u[nd-1] = (f[nd-1]-f[nd-2])/drho
        u[0] = (f[1]-f[0])/drho
        
        fspl = splrep(rho,f,k=3)         #Cubic Spline Representation
        f = splev(rho,fspl,der=0)
        u = splev(rho,fspl,der=1)        #Evaluate Cubic Spline First derivative

        P = -f+rho*u
        Pv.append(P)
        for j in range(0,nd):
            Pmatv[count][j] = P[j]
            #print Pmatv[count][j],count,j,x[0]
        count = count+1
        
        """
        #Fluctuation Analysis-----------------------
        P_vec2.append(P)
        u_vec2.append(u)
        P_vec2.append(TT)
        u_vec2.append(TT)
        envelope.report_df(P_vec2,'P.csv')
        envelope.report_df(u_vec2,'u.csv')
        #===========================================
        """

        fmat.append(f)
        fmatres.append(fres)

        x[0] = x[0]+stepx
        #if nc>1:
        #    if abs(x[0]-1.0)<1e-5:
        #        x[0] = 0.9999
        
    if nc>1:
        Pmat = RectBivariateSpline(x0v,rhob,Pmatv)
    else:
        Pmat = 'NULL'
        
        #differente imposed by renormalization
        dfv = []
        dff = f-f_orig
        dfv.append(dff)
        avdw = []
        aa2 = 0.5*amix*(rho**2)
        avdw.append(aa2)

    renorm_out = []
    renorm_out.append(fv)
    renorm_out.append(x0v)
    renorm_out.append(rhov)
    renorm_out.append(rhob)
    renorm_out.append(fmat)
    renorm_out.append(Pmat)
    if nc>1: #If binary mixture, report calculated values
        print 'before report'
        ren_u = report_renorm_bin(rhob,x0v,fmatres,nx,nd,MR,IDs,EoS)
        renorm_out.append(ren_u)
    else:
        renorm_out.append(0)
    renorm_out.append(fresv)
    renorm_out.append(Pv)
    renorm_out.append(bmixv)
    renorm_out.append(dfv)
    renorm_out.append(avdw)
    return renorm_out
Example #38
0
# convert from data frames to numpy matrices
X = np.array(X.values)
y = np.array(y.values)

# convert np array to tensor objects
X_t = tf.convert_to_tensor(X, dtype=tf.float64)
y_t = tf.convert_to_tensor(y, dtype=tf.float64)

# create an placeholder variable for Y(output)
y_p = tf.placeholder(tf.float64, shape=(X_t.shape[0], 1))

# apply feature map to input features x1
X_feat = []
for i in range(1, degree + 1):
    X_feat.append(np.power(X, i))
X_feat_new = np.concatenate((X_feat), axis=1)
X_feat_new_t = tf.convert_to_tensor(X_feat_new, dtype=tf.float64)
X_feat_new_p = tf.placeholder(tf.float64,
                              shape=(X_feat_new_t.shape[0], degree))

# convert to numpy arrays and initalize the parameter array theta
w = np.zeros((1, X_feat_new.shape[1]))
b = np.array([0])
theta = (b, w)

#Converting w and b to tensors
w_t = tf.Variable(w, dtype=tf.float64, name="w")
b_t = tf.Variable(b, dtype=tf.float64, name="b")

init = tf.global_variables_initializer()
Example #39
0
 def backward(self, grad):
     curr_gradient = 1 - np.power(self.output, 2)
     return np.multiply(curr_gradient, grad)
    def _mesh_2d(self, mesh_args) -> Tuple[pp.GridBucket, pp.FractureNetwork2d]:
        """To create a local grid bucket in 2d, we should:
        1) Create the bounding surfaces, from self.surfaces
            i) Find coordinates of all surfaces
            ii) Remove duplicate nodes
            iii) Create a FractureNetwork2d object, create a mesh

        """
        # First, build points and edges for the domain boundary
        domain_pts = np.zeros((3, 0))
        domain_edges = np.zeros((2, 0), dtype=int)
        edge_2_surf = np.empty([], dtype=int)

        # Empty value for frac_edge - this will be filled in if relevant.
        frac_edge = np.zeros((3, 0))

        # Tag the region surfaces as (macro) fracture or not
        surface_is_fracture: List[bool] = []

        for surf_ind, (surf, node_type) in enumerate(
            zip(self.surfaces, self.surface_node_type)
        ):

            # Fracture tips need some special treatment in mpfa regions:
            # First, the region boundary will contain surfaces from the face centers
            # on the two fracture faces, to the tip node. These edges must be removed,
            # or else the domain for local meshing will contain a slit.
            # Second, the line of the macro fracture must still be represented in the
            # local grid, and the 2d domain must have their faces split along the
            # macro fracture. This is achieved by first adding a micro fracture along
            # what is the macro fractures, construct the full GridBucket, and then
            # remove it towards the end.
            # For Tpfa, no such adjustments are needed.
            if self.name == "mpfa":
                # We will only run into this for surfaces with two components
                if len(node_type) == 2:
                    # Checks
                    assert "face" in node_type
                    fi_edge = node_type.index("face")
                    if (
                        self.g.tags["fracture_faces"][surf[fi_edge]]
                        and "node" in node_type
                    ):
                        ni = node_type.index("node")

                        # Consistency check, for mpfa only the central node should be involved
                        # in the interaction region boundary
                        assert surf[ni] == self.reg_ind

                        # If this node is tagged as a tip in the macro grid, fetch the coordinates
                        # the line from face center to node
                        if self.g.tags["node_is_fracture_tip"][self.reg_ind]:
                            # Not sure if this will work for polyline fractures, where
                            # the tip is not really a tip
                            if self.is_tip:
                                # If we have already encountered the tip node,
                                # Check that the edge is the same as previously found
                                # If frac_edge has its initial empty value, something
                                # is really wrong.
                                assert np.allclose(
                                    frac_edge, self.coords(surf, node_type)
                                )
                            self.is_tip = True
                            # Store the coordinate of the macro fracture.
                            frac_edge = self.coords(surf, node_type)
                            # Store this as fracture surface
                            surface_is_fracture.append(True)

                            # No need to do more for this surface
                            continue
                        else:
                            # This is not a fracture tip, but internal to fracture.
                            # Standard treatment should suffice
                            pass

            e = np.vstack(
                (np.arange(len(node_type) - 1), 1 + np.arange(len(node_type) - 1))
            )
            # The new edges are offset by the number of previous points
            domain_edges = np.hstack((domain_edges, domain_pts.shape[1] + e))

            # Then add new points
            domain_pts = np.hstack((domain_pts, self.coords(surf, node_type)))

            # This is not a fracture surface
            surface_is_fracture.append(False)

            edge_2_surf = np.hstack((edge_2_surf, surf_ind + np.ones(e.shape[1])))

        # Next, build up the constraints
        # Todo: Expand this with fractures contained within the region
        edge_2_constraint = np.array([], dtype=int)
        constraint_edges = np.empty((2, 0), dtype=int)
        constraint_pts = np.empty((3, 0))

        for constraint_ind, (constraint, node_type) in enumerate(
            zip(self.constraints, self.constraint_node_type)
        ):
            e = np.vstack(
                (np.arange(len(node_type) - 1), 1 + np.arange(len(node_type) - 1))
            )
            # The new edges are offset by the number of previous points
            constraint_edges = np.hstack(
                (constraint_edges, constraint_pts.shape[1] + e)
            )
            # Then add new points
            constraint_pts = np.hstack(
                (constraint_pts, self.coords(constraint, node_type))
            )

            edge_2_constraint = np.hstack(
                (edge_2_constraint, constraint_ind * np.ones(e.shape[1], dtype=int))
            )

        # Uniquify points on the domain boundary
        unique_domain_pts, _, all_2_unique = pp.utils.setmembership.unique_columns_tol(
            domain_pts, tol=self.network.tol
        )
        unique_domain_edges = all_2_unique[domain_edges]
        # Also sort the boundary points to form a circle
        sorted_edges, sort_ind = pp.utils.sort_points.sort_point_pairs(
            unique_domain_edges
        )
        # Store the mapping from the ordering of the domain boundaries, as represented
        # in the FractureNetwork, to the ordering of surfaces in this region.
        # The mapping must be adjusted to ignore surfaces that are macro fractures
        incr_sort_ind = sort_ind
        for i in np.where(surface_is_fracture)[0]:
            hit = incr_sort_ind >= i
            incr_sort_ind[hit] += 1

        self.domain_edges_2_reg_surface = incr_sort_ind

        # Store boolean of which surfaces are macro fractures.
        self.surface_is_macro_fracture: List[bool] = surface_is_fracture

        constraint_edges += self.network.pts.shape[1]

        int_pts = np.hstack((self.network.pts, constraint_pts))
        int_edges = np.hstack((self.network.edges, constraint_edges))
        int_tags = self.network.tags
        for key, value in int_tags.items():
            int_tags[key] = np.hstack((value, [None] * constraint_edges.shape[1]))

        edge_2_constraint += self.network.edges.shape[1]

        # If this is a tip of a macro fracture, add the part of the macro fracture within
        # this network *at the begining of the fracture list* (will be important later)
        if self.is_tip:
            int_pts = np.hstack((frac_edge, int_pts))
            int_edges = np.hstack((np.array([[0], [1]]), 2 + int_edges))
            edge_2_constraint += 1
            # Also expand the tags with a default value for the macro face-cum-fracture
            for key, value in int_tags.items():
                int_tags[key] = np.hstack(([None], value))

        # Similarly uniquify points in constraint description
        unique_int_pts, _, a2u = pp.utils.setmembership.unique_columns_tol(
            int_pts, tol=self.network.tol
        )
        unique_int_edges = a2u[int_edges]

        # Define a fracture network, using the surface specification as boundary,
        # and the constraints as points
        # Fractures will be added as edges
        network_for_meshing = pp.FractureNetwork2d(
            domain=unique_domain_pts[: self.dim, sorted_edges[0]],
            pts=unique_int_pts[: self.dim],
            edges=unique_int_edges,
            tol=self.network.tol,
        )
        network_for_meshing.tags = int_tags
        gmsh_data = network_for_meshing.prepare_for_gmsh(
            mesh_args=mesh_args,
            constraints=edge_2_constraint,
            remove_small_fractures=True,
        )

        decomp = network_for_meshing._decomposition

        physical_points = {}
        for p in decomp["domain_boundary_points"]:
            physical_points[p] = Tags.DOMAIN_BOUNDARY_POINT.value

        #        gmsh_data.physical_points.update(physical_points)

        #        breakpoint()
        gmsh_writer = GmshWriter(gmsh_data)

        # Generate the mesh. Also write .geo file to ease debugging;
        # we will delete this later on.
        gmsh_writer.generate(
            self.file_name, write_geo=True, clear_gmsh=True, finalize=False
        )

        grid_list = pp.fracs.simplex.triangle_grid_from_gmsh(
            self.file_name, constraints=edge_2_constraint
        )

        # preserve tags for the fractures from the network
        # we are assuming a coherent numeration between the network
        # and the created grids
        frac = np.setdiff1d(
            np.arange(network_for_meshing.edges.shape[1]),
            edge_2_constraint,
            assume_unique=True,
        )
        for idg, g in enumerate(grid_list[1]):
            for key in int_tags:
                if key not in g.tags:
                    g.tags[key] = int_tags[key][frac][idg]

        gb = pp.meshing.grid_list_to_grid_bucket(grid_list)

        #        gb = network.mesh(
        #            mesh_args=mesh_args,
        #            file_name=self.file_name,
        #            constraints=edge_2_constraint,
        #            preserve_fracture_tags=[k for k in int_tags.keys()],
        #        )

        if self.is_tip:
            # For 2d tip nodes, we added a macro fracture to the local fracture
            # network, so that the local 2d grid was split along the macro surface.
            # This fracture should not be part of the local problem, and must
            # be removed.

            # First identify the grid to remove. It should be at the start of the
            # list of fractures.
            gf = gb.grids_of_dimension(self.dim - 1)[0]

            # Check that the grid we have picked out indeed corresponds to the
            # macro scale fracture. This can be removed when we trust the implementation.
            nodes = gf.nodes
            for i in range(2):
                p = frac_edge[:, i].reshape((-1, 1))
                dist = np.min(np.sum(np.power(p - nodes, 2), axis=0))
                assert dist < 1e-8

                # Next remove all lower-dimensional neighbors that are formed by the
                # intersection with other (micro) fractures. Note that this will
                # also remove the interfaces (mortar grids) between intersection and
                # micro fractures. Furthermore, since the micro fractures have been
                # split to accomodate the micro fracture, this will remove the
                # connection of the micro fracture across the micro fracture.
                for low_neigh in gb.node_neighbors(gf, only_lower=True):
                    gb.remove_node(low_neigh)

            # Finally remove the node
            gb.remove_node(gf)
        return gb, network_for_meshing
Example #41
0
def ackley(x):
    x = _transform_and_check(x)
    y = -20 * np.exp(-0.2 * np.sqrt(np.sum(np.power(x, 2)) / x.size)) - \
        np.exp(np.sum(np.cos(2 * np.pi * x)) / x.size) + \
        20 + np.exp(1)
    return y
Example #42
0
def rastrigin(x):
    x = _transform_and_check(x)
    y = 10 * x.size + np.sum(np.power(x, 2) - 10 * np.cos(2 * np.pi * x))
    return y
Example #43
0
def rosenbrock(x):
    x = _transform_and_check(x, True)
    y = 100 * np.sum(np.power(x[1:] - np.power(x[:-1], 2), 2)) + \
        np.sum(np.power(x[:-1] - 1, 2))
    return y
Example #44
0
def griewank(x):
    x = _transform_and_check(x)
    y = np.sum(np.power(x, 2)) / 4000 - np.prod(np.cos(
        x / np.sqrt(np.arange(1, x.size+1)))) + 1
    return y
Example #45
0
def sharp_ridge(x):
    x = _transform_and_check(x, True)
    y = -x[0] + 100 * np.sqrt(np.sum(np.power(x[1:], 2)))
    return y
Example #46
0
def schwefel12(x):
    x = _transform_and_check(x, True)
    x = [np.sum(x[:i + 1]) for i in range(x.size)]
    y = np.sum(np.power(x, 2))
    return y
Example #47
0
def different_powers_beyer(x):
    x = _transform_and_check(x, True)
    y = np.sum(np.power(np.abs(x), 2 + 10 * np.linspace(0, 1, x.size)))
    return y
Example #48
0
def parabolic_ridge(x):
    x = _transform_and_check(x, True)
    y = -x[0] + 100 * np.sum(np.power(x[1:], 2))
    return y
Example #49
0
def cigar_discus(x):
    x = _transform_and_check(x, True)
    x = np.power(x, 2)
    y = x[0] + (10 ** 4) * np.sum(x[1:-1]) + (10 ** 6) * x[-1]
    return y
Example #50
0
def ellipsoid(x):
    x = _transform_and_check(x, True)
    weights = np.power(10, 6 * np.linspace(0, 1, x.size))
    y = np.dot(weights, np.power(x, 2))
    return y
Example #51
0
def cigar(x):
    x = _transform_and_check(x, True)
    x = np.power(x, 2)
    y = x[0] + (10 ** 6) * np.sum(x[1:])
    return y
Example #52
0
def discus(x): # also called tablet
    x = _transform_and_check(x, True)
    x = np.power(x, 2)
    y = (10 ** 6) * x[0] + np.sum(x[1:])
    return y
Example #53
0
print(numpy.sum(nd1))  # 所有元素的求和
print(numpy.max(nd1))  # 所有元素的最大值
print(numpy.min(nd1))  # 所有元素的最小值
print(numpy.std(nd1))  # 所有元素的标准差
print(numpy.var(nd1))  # 所有元素的方差
print(numpy.cumsum(nd1))  # 返回一个一维数组,每个元素都是之前元素的累加和
print(numpy.cumprod(nd1))  # 返回一个一维数组,每个元素都是之前元素的累乘积
print(numpy.sum(nd2, axis=0))  # 数组的按行方向统计列的和
print(numpy.sum(nd2, axis=1))  # 数组的按列方向统计行的和
print(numpy.argmax(nd2, axis=0))  # 数组中每列数据最大值的索引位置
print(numpy.argmin(nd2, axis=1))  # 数组中每行数据最小值的索引位置

print("--------------------------------------------")

## numpy的元素计算函数:
print(numpy.power(nd2, 2))  # 数组中每个元素的2次方
print(numpy.add(nd2, 100))  # 数组中的每个元素加100
print(numpy.subtract(nd2, 100))  # 数组中的每个元素减100
print(numpy.multiply(nd2, 100))  # 数组中的每个元素乘100
print(numpy.divide(nd2, 100))  # 数组中的每个元素除100
print(numpy.ceil(nd2))  # 数组中的元素向上最接近的整数
print(numpy.floor(nd2))  # 数组中的元素向下最接近的整数
print(numpy.rint(nd2))  # 数组中的元素进行四舍五入
print(numpy.isnan(nd2))  # 判断数组中的元素是否为NaN
print(numpy.abs(nd2))  # 数组中的元素进行绝对值
print(numpy.where(nd2, 100, 200))  # 数组中的元素进行三元运算,x if condition else y

print("--------------------------------------------")

## numpy的元素判断函数:
print(numpy.any(nd1 > 0))  # 数组中至少有一个元素满足指定条件,返回True
Example #54
0
def sphere(x):
    x = _transform_and_check(x)
    y = np.sum(np.power(x, 2))
    return y
Example #55
0
def doPosPlots(posDict,
               settings,
               extraTitle,
               selectedCount,
               extraSettings,
               structDict2=None,
               trajectory=None):

    fileName = standardFileName(SCRIPT_DIR, settings.mySeq, extraTitle,
                                settings.trials)

    length = len(settings.mySeq) + 1

    # Make a grid...
    nrows, ncols = settings.cutOff, length
    image = np.zeros(nrows * ncols)

    for i in range(nrows):
        for j in range(ncols):
            image[i + nrows * j] = -990.0

    myMax = -5.0
    myMin = myMax - 7.0
    corrector = 1.0

    goodPosDict = dict(posDict)

    if ("winprob" in extraSettings):
        myMax = np.log10(np.float(2.0))
        myMin = myMax - 1.5

    if ("binaryProb" in extraSettings):
        myMax = 0.1
        myMin = -2.0

    if ("M1" in extraSettings):
        extraTitle = "-ModeOne-" + extraTitle
        corrector = settings.trials

    for pos, val in goodPosDict.items():

        value = 0.0

        if (val > 0.0):
            value = np.log10(val / corrector)
        else:
            value = -99.9

        image[pos.posX + ncols * pos.posY] = np.power(value, 1.0)

    # Reshape things into a grid
    image = image.reshape((nrows, ncols))

    row_labels = range(nrows)
    col_labels = range(ncols)
    plt.matshow(image, cmap=plt.cm.gray_r, vmin=myMin, vmax=myMax)

    if ("M2" in extraSettings):
        pass


#         if not "test" in extraTitle:
#         mostFreq, maxX = plotMostFrequentStructure(goodPosDict, length)

    if ("M1" in extraSettings):
        if not (trajectory == None):
            plotFirstTrajectory(fileName, trajectory, length)

    plt.title(
        standardTitle(settings.mySeq, extraTitle, selectedCount,
                      settings.trials))

    ax = plt.gca()

    ax.xaxis.set_ticks_position('bottom')
    ax.invert_yaxis()

    ax.set_ylabel('Basepairs within strands')
    ax.set_xlabel('Basepairs between strands')

    plt.xticks(range(ncols), col_labels)
    plt.yticks(range(nrows), row_labels)
    plt.colorbar(shrink=0.75)  # orientation='Horizontal'

    plt.savefig(fileName + '.pdf')
    plt.close()

    # Now generate a new plot for probability of success of the most common structures

    if ("M3" in extraSettings and
        (Literals.success in extraTitle or Literals.failure in extraTitle)
            and not "test" in extraTitle):

        fileName = standardFileName(SCRIPT_DIR, settings.mySeq,
                                    extraTitle + "", settings.trials)

        winProb = list()
        plotRange = range(1, maxX)

        f2 = open(fileName + "-mostPopularStructs.txt", 'w')

        for x in plotRange:

            y = mostFreq[x][0]
            myPos = position(x, y)

            winProb.append(100.0 *
                           computeWinProb(f2, myPos, structDict2, settings))

        f2.close()

        fig = plt.figure()

        plt.fill_between(plotRange,
                         0.0,
                         winProb,
                         facecolor='orange',
                         edgecolor='orange',
                         alpha=0.5)
        plt.savefig(fileName + '-structSample.pdf')
Example #56
0
    def perturb(self, params):
        """
        Unlike in C++, this takes a numpy array of parameters as input,
        and modifies it in-place. The return value is still logH.
        """
        logH = 0.0

        reps = 1;
        if(rng.rand() < 0.5):
            reps += np.int(np.power(100.0, rng.rand()));

        # print "going to perturb %d reps" % reps

        for i in range(reps):
            # print "   rep iteration %d" % i
            which = rng.randint(len(params))

            if which == 0:
              rad_idx = 0
              theta_idx =  2

              theta = params[theta_idx]

              #FIND THE MAXIMUM RADIUS STILL INSIDE THE DETECTOR
              theta_eq = np.arctan(detector.detector_length/detector.detector_radius)
              theta_taper = np.arctan(detector.taper_length/detector.detector_radius)
            #   print "theta: %f pi" % (theta / np.pi)
              if theta <= theta_taper:
                 z = np.tan(theta)*(detector.detector_radius - detector.taper_length) / (1-np.tan(theta))
                 max_rad = z / np.sin(theta)
              elif theta <= theta_eq:
                  max_rad = detector.detector_radius / np.cos(theta)
                #   print "max rad radius: %f" %  max_rad
              else:
                  theta_comp = np.pi/2 - theta
                  max_rad = detector.detector_length / np.cos(theta_comp)
                #   print "max rad length: %f" %  max_rad

              #AND THE MINIMUM (from PC dimple)
              #min_rad  = 1./ ( np.cos(theta)**2/detector.pcRad**2  +  np.sin(theta)**2/detector.pcLen**2 )

              min_rad = np.amax([detector.pcRad, detector.pcLen])

              total_max_rad = np.sqrt(detector.detector_length**2 + detector.detector_radius**2 )

              params[which] += total_max_rad*dnest4.randh()
              params[which] = dnest4.wrap(params[which] , min_rad, max_rad)

            elif which ==2: #theta
              rad_idx = 0
              rad = params[rad_idx]

            #   print "rad: %f" % rad
              if rad < np.amin([detector.detector_radius - detector.taper_length, detector.detector_length]):
                  max_val = np.pi/2
                  min_val = 0
                #   print "theta: min %f pi, max %f pi" % (min_val, max_val)
              else:
                  if rad < detector.detector_radius - detector.taper_length:
                      #can't possibly hit the taper
                    #   print "less than taper adjustment"
                      min_val = 0
                  elif rad < np.sqrt(detector.detector_radius**2 + detector.taper_length**2):
                      #low enough that it could hit the taper region
                    #   print "taper adjustment"
                      a = detector.detector_radius - detector.taper_length
                      z = 0.5 * (np.sqrt(2*rad**2-a**2) - a)
                      min_val = np.arcsin(z/rad)
                  else:
                      #longer than could hit the taper
                    #   print  " longer thantaper adjustment"
                      min_val = np.arccos(detector.detector_radius/rad)

                  if rad < detector.detector_length:
                      max_val = np.pi/2
                  else:
                      max_val = np.pi/2 - np.arccos(detector.detector_length/rad)
                #   print "theta: min %f pi, max %f pi" % (min_val, max_val)

              params[which] += np.pi/2*dnest4.randh()
              params[which] = dnest4.wrap(params[which], min_val, max_val)

            # if which == 0:
            #     params[which] += (detector.detector_radius)*dnest4.randh()
            #     params[which] = dnest4.wrap(params[which] , 0, detector.detector_radius)
            elif which == 1:
                max_val = np.pi/4
                params[which] += np.pi/4*dnest4.randh()
                params[which] = dnest4.wrap(params[which], 0, max_val)
                if params[which] < 0 or params[which] > np.pi/4:
                    print "wtf phi"
                #params[which] = np.clip(params[which], 0, max_val)
            # elif which == 2:
            #     params[which] += (detector.detector_length)*dnest4.randh()
            #     params[which] = dnest4.wrap(params[which] , 0, detector.detector_length)

            elif which == 3: #scale
                min_scale = wf.wfMax - 0.01*wf.wfMax
                max_scale = wf.wfMax + 0.005*wf.wfMax
                params[which] += (max_scale-min_scale)*dnest4.randh()
                params[which] = dnest4.wrap(params[which], min_scale, max_scale)
            #   print "  adjusted scale to %f" %  ( params[which])

            elif which == 4: #t0
              params[which] += 1*dnest4.randh()
              params[which] = dnest4.wrap(params[which], min_maxt, max_maxt)
            elif which == 5: #smooth
              params[which] += 0.1*dnest4.randh()
              params[which] = dnest4.wrap(params[which], 0, 25)
                #   print "  adjusted smooth to %f" %  ( params[which])

                # elif which == 6: #wf baseline slope (m)
                #     logH -= -0.5*(params[which]/1E-4)**2
                #     params[which] += 1E-4*dnest4.randh()
                #     logH += -0.5*(params[which]/1E-4)**2
                # elif which == 7: #wf baseline incercept (b)
                #     logH -= -0.5*(params[which]/1E-2)**2
                #     params[which] += 1E-2*dnest4.randh()
                #     logH += -0.5*(params[which]/1E-2)**2

                #   params[which] += 0.01*dnest4.randh()
                #   params[which]=dnest4.wrap(params[which], -1, 1)
                #   print "  adjusted b to %f" %  ( params[which])

            else: #velocity or rc params: cant be below 0, can be arb. large
                print "which value %d not supported" % which
                exit(0)


        return logH
Example #57
0
    kernels = []
    for n in range(scales):
        kernel = np.zeros((image.shape[0], image.shape[1]))
        centerFrequency = 1 / wavelength
        for i in range(kernel.shape[0]):
            for j in range(kernel.shape[1]):
                y = i - (kernel.shape[0] / 2)
                x = j - (kernel.shape[1] / 2)

                normalizedY = y / (kernel.shape[0] / 2)
                normalizedX = x / (kernel.shape[1] / 2)

                normalizedRadius = math.sqrt(normalizedY * normalizedY +
                                             normalizedX * normalizedX)

                elementRadial = np.exp(-1 * np.power(
                    (normalizedRadius / (centerFrequency / 0.5)), 2) /
                                       (2 * (sigmaF) * (sigmaF)))
                theta = math.atan2(-y, x)
                deltaSin = math.sin(theta) * math.cos(centerAngle) - math.cos(
                    theta) * math.sin(centerAngle)
                deltaCosine = math.cos(theta) * math.cos(
                    centerAngle) + math.sin(theta) * math.sin(centerAngle)

                deltaTheta = abs(math.atan2(deltaSin, deltaCosine))

                elementAngular = np.exp(
                    (-1 * deltaTheta * deltaTheta) / (2 * thetaStd * thetaStd))

                kernel[i, j] = elementAngular * elementRadial
                if (kernel[i, j] < 0.001):
                    kernel[i, j] = 0
Example #58
0
def refGroupNormFwd(inputs,
                    gamma,
                    beta,
                    groups,
                    mean=None,
                    inv_std_dev=None,
                    epsilon=0.0015,
                    data_format="NHWC"):
    if data_format == "NHWC":
        feature_index = 3
    elif data_format == "NCHW":
        feature_index = 1
    else:
        raise Exception("Unsupported data format " + data_format)

    num_channels = inputs.shape[feature_index]
    group_size = num_channels // groups
    original_shape = inputs.shape

    # Implementation detail - in Poplibs group norm, the groups are not
    # contiguous, but strided - we replicate that here
    # Move the channels to the first dimension for inputs, gamma and beta
    inputs = np.swapaxes(inputs, 0, feature_index)

    reshuffled_inputs = np.empty(inputs.shape, inputs.dtype)
    reshuffled_gamma = np.empty(gamma.shape, gamma.dtype)
    reshuffled_beta = np.empty(beta.shape, beta.dtype)

    for from_idx in range(num_channels):
        to_idx = (from_idx % groups) * group_size + from_idx // groups
        reshuffled_inputs[to_idx] = inputs[from_idx]
        reshuffled_gamma[to_idx] = gamma[from_idx]
        reshuffled_beta[to_idx] = beta[from_idx]
    inputs = np.swapaxes(reshuffled_inputs, 0, feature_index)
    gamma = reshuffled_gamma
    beta = reshuffled_beta

    if feature_index == 1:
        N, C, H, W = inputs.shape
        inputs = np.reshape(inputs, [N, groups, C // groups, H, W])
        gamma = np.reshape(gamma, [1, C, 1, 1])
        beta = np.reshape(beta, [1, C, 1, 1])
        moments_axes = (feature_index + 1, 3, 4)

        if mean is not None and inv_std_dev is not None:
            mean = np.reshape(mean, [N, groups, 1, 1, 1])
            inv_std_dev = np.reshape(inv_std_dev, [N, groups, 1, 1, 1])
    else:
        N, H, W, C = inputs.shape
        inputs = np.reshape(inputs, [N, H, W, groups, C // groups])
        gamma = np.reshape(gamma, [1, 1, 1, C])
        beta = np.reshape(beta, [1, 1, 1, C])
        moments_axes = (1, 2, feature_index + 1)

        if mean is not None and inv_std_dev is not None:
            mean = np.reshape(mean, [N, 1, 1, groups, 1])
            inv_std_dev = np.reshape(inv_std_dev, [N, 1, 1, groups, 1])

    if mean is None and inv_std_dev is None:
        mean = np.mean(inputs, moments_axes, dtype=np.float32, keepdims=True)
        variance = np.mean(np.power(inputs - mean, 2),
                           moments_axes,
                           dtype=np.float32,
                           keepdims=True)
    else:
        variance = np.power(inv_std_dev, -2) - epsilon

    input_whitened = (inputs - mean) * np.power(variance + epsilon, -0.5)
    input_whitened = np.reshape(input_whitened, original_shape)
    output = input_whitened * gamma + beta

    # Undo the shuffle.
    output = np.swapaxes(output, 0, feature_index)

    reshuffled_output = np.empty(output.shape, output.dtype)
    for to_idx in range(num_channels):
        from_idx = (to_idx % groups) * group_size + to_idx // groups
        reshuffled_output[to_idx] = output[from_idx]
    inv_std_dev = np.power(variance + epsilon, -0.5)
    return (np.swapaxes(reshuffled_output, 0, feature_index),
            np.reshape(np.squeeze(mean), (mean.size)),
            np.reshape(np.squeeze(inv_std_dev), (inv_std_dev.size)))
Example #59
0
def costFunc(X, y, theta, h=hypo):
    m = np.shape(X)[0]
    diff = h(X, theta) - y
    cost_list = np.power(diff, 2)
    return np.sum(cost_list) / (2 * float(m))
Example #60
0
        if is_kp:
            sift_descriptor += ret

    print("3)--- %s seconds ---" % (time.time() - start_time))
    # Plot the image
    plt.imshow(base, cmap='gray')

    i = np.array([])
    x = np.array([])
    y = np.array([])

    for index in range(len(extrema)):
        i = np.append(i, extrema[index][0])
        x = np.append(x, extrema[index][2])
        y = np.append(y, extrema[index][3])
    x = x * np.power(2, i)
    y = y * np.power(2, i)
    plt.scatter(x=y, y=x, c='r', s=10)  # x and y are interchanged in plotting
    print("4)--- %s seconds ---" % (time.time() - start_time))
    plt.show()
    print str(len(keypoints))
    gray = img_orig
    sift = cv2.SIFT()
    kp_sift = sift.detect(gray, None)
    img1 = cv2.drawKeypoints(gray,
                             kp_sift,
                             flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    plt.imshow(img1)
    plt.show()
    print str(len(kp_sift))