def loadData(path="../data/",k=5,log='add',pca_n=0,SEED=34):
	from pandas import DataFrame, read_csv
	from numpy import log as ln
	from sklearn.cross_validation import KFold
	from sklearn.preprocessing import LabelEncoder
	from sklearn.preprocessing import StandardScaler
	train = read_csv(path+"train.csv")
	test = read_csv(path+"test.csv")
	id = test.id
	target = train.target
	encoder = LabelEncoder()
	target_nnet = encoder.fit_transform(target).astype('int32')
	feat_names = [x for x in train.columns if x.startswith('feat')]
	train = train[feat_names].astype(float)
	test = test[feat_names]
	if log == 'add':
		for v in train.columns:
			train[v+'_log'] = ln(train[v]+1)
			test[v+'_log'] = ln(test[v]+1)
	elif log == 'replace':
		for v in train.columns:
			train[v] = ln(train[v]+1)
			test[v] = ln(test[v]+1)      
	if pca_n > 0:
		from sklearn.decomposition import PCA
		pca = PCA(pca_n)
		train = pca.fit_transform(train)
		test = pca.transform(test)
	scaler = StandardScaler()
	scaler.fit(train)
	train = DataFrame(scaler.transform(train),columns=['feat_'+str(x) for x in range(train.shape[1])])
	test = DataFrame(scaler.transform(test),columns=['feat_'+str(x) for x in range(train.shape[1])])
	cv = KFold(len(train), n_folds=k, shuffle=True, random_state=SEED)
	return train, test, target, target_nnet, id, cv, encoder
示例#2
0
文件: sebs.py 项目: suredream/asema
def GKB_1(u_zref, zref, h, LAI, Wfol, Ta, pa):
   """Same as FKB_1, but then for spatial in- and output"""
   
   # Constants
   C_d = 0.2   # foliage drag coefficient
   C_t = 0.05  # heat transfer coefficient
   k = 0.41     # Von Karman constant
   Pr = 0.7    # Prandtl number
   hs = 0.009  # height of soil roughness obstacles (0.009-0.024)
   
   # Calculations
   Wsoil = 1.0 - Wfol
   h = ifthenelse(Wfol == 0.0, hs, h)
   
   z0 = 0.136 * h   # Brutsaert (1982)
   u_h0 = u_zref * ln(2.446) / ln ((zref - 0.667 * h)/z0) # wind speed at canopy height
   ust2u_h = 0.32 - 0.264 / exp(15.1 * C_d * LAI)
   ustarh = ust2u_h * u_h0
   nu0 = 1.327E-5 * (101325.0/pa) * (Ta / 273.15 + 1.0) ** 1.81 # kinematic viscosity
   n_h = C_d * LAI / (2.0 * ust2u_h ** 2.0)
   # First term
   F1st = ifthenelse(pcrne(n_h, 0.0), k * C_d / (4.0 * C_t * ust2u_h * (1.0 - exp(pcrumin(n_h)/2.0))) * Wfol ** 2.0, 0.0)
   # Second term
   S2nd = k * ust2u_h * 0.136 * Pr ** (2.0/3.0) * sqrt(ustarh * h / nu0) * Wfol ** 2.0 * Wsoil ** 2.0
   # Third term
   T3rd = (2.46 * (u_zref * k / ln(zref/hs) * hs / nu0) ** 0.25 - ln(7.4)) * Wsoil ** 2.0
   
   return F1st + S2nd + T3rd
示例#3
0
def binary_logistic_classification(X,y):
    """returns the weights vector"""
    from numpy import array, ones, zeros, c_, exp, log as ln
    #hyperparameters:
    λ = 0.0001
    η = 0.05
    max_iter = 1000
    tol = 0.1
    #initialize:
    m,n = X.shape
    x0 = ones(m)
    X = c_[x0, X]
    y = y.reshape(-1,1)
    θ = zeros(n+1).reshape(-1,1)
    
    for epoch in range(max_iter):
        z = X @ θ
        p = 1 / (1 + exp(-z))
        ε = p - y
        
        θ_ = array([0, *θ[1:]]).reshape(-1,1)   # no regularization for the bias
        g = (X.T @ ε + λ*θ_) / m
        θ = θ - η*g
        
        #check convergence
        J = -(ln(p)*y + ln(1-p)*(1-y)).sum() / m
        if J < tol: break
    else: print("increase the number of max iterations")
    return(θ)
示例#4
0
def pdf_x_0(L, l, delta, dv):
    if -L / 2 + l / 2 + dv < delta < L / 2 - l / 2 - dv:
        return 1 / l * ln(L / (L - l))
    if  L / 2 - l / 2 - dv < delta < L / 2 - l / 2 + dv:
        return 0
    if (delta > L / 2. - l / 2. + dv) and (delta > L / 2. - l / 2. - dv):
        return 1 / l * ln(L / (2 * delta))
示例#5
0
def q710(flow, dates):
    '''
    

    Parameters
    ----------
    flow : list
        Série de vazões.
    dates : list
        Datas referentes a cada uma das vazões.

    Returns
    -------
    q : float
        Vazão mínima de 7 dias com 10 anos de retorno.

    '''
    mean = [np.mean(flow[i:i + 7])
            for i in range(len(flow) - 7)]  # media movel de 7 dias
    dates = dates[7:]  # datas referente as medias
    df = pd.DataFrame(mean, index=dates,
                      columns=['flow'])  # dataframe reunido vazoes e datas
    df.index = pd.to_datetime(df.index)  # converte datas para datatype
    df['year'] = [x.year
                  for x in df.index]  # cria coluna com informacao dos anos
    df_min = df.groupby(df.index.year).transform(
        'min').drop_duplicates()  # calcula vazoes minimas de cada ano
    x_mean = df_min['flow'].mean()
    s = df_min['flow'].std()

    q = x_mean + s * (0.45 + 0.7797 * np.ln(np.ln(10 / 9)))

    return q
示例#6
0
    def calc_fetch_factor(self):
        """
        Calculate fetch factor, defined as ratio between local friction 
        velocity and equilibrium friction velocity
        """

        X = self.X

        # Parameters relating to upwind site
        z0_X = self.z0_X
        SE_X = self.S_exposure_X

        # Parameters relating to site
        z0 = self.z0
        SE = self.S_exposure

        if not all([X, z0, z0_X, SE, SE_X]):
            print([X, z0, z0_X, SE, SE_X])
            raise ValueError("Not all parameters initialised!")

        # Determine m0
        m0 = calc_m0(X=X, z0=z0)

        # Determine fetch factor
        term1 = 1 - ln(z0_X / z0) / (0.42 + ln(m0))
        term2 = SE_X / ln(10 / z0_X)
        term3 = ln(10 / z0) / SE
        SX_X = term1 * term2 * term3

        return SX_X
示例#7
0
    def calc_iu(self, apply_cook_correction=True):
        """
        Calculate along-wind turbulence intensity per Deaves and Harris
        """

        zg = self.zg
        z0 = self.z0
        d = self.d
        z = self.get_z()

        # Define non-dimensional heights used in expression
        z_rel_g = (z - d) / zg
        z_rel_0 = (z - d) / z0

        num = 3 * (1 - z_rel_g) * ((0.538 + 0.09 * ln(z_rel_0))**(
            (1 - z_rel_g)**(16)))
        denom2 = 1 + 0.156 * ln(6 * zg / z0)
        denom1 = ln(z_rel_0)

        if apply_cook_correction:
            # Augment with additional terms per numerator
            # f eqn (9.12) in Cook Part 1
            denom1 += 5.75 * z_rel_g - 1.875 * z_rel_g**2 - (
                4 / 3) * z_rel_g**3 + (1 / 4) * z_rel_g**4

        i_u = num / (denom1 * denom2)

        self.i_u = i_u

        return i_u
示例#8
0
def compute_horizontal_vessel_purchase_cost(W, D, F_M):
    """
    Return the purchase cost [Cp; in USD] of a horizontal vessel,
    including the cost of platforms and ladders.
    
    Parameters
    ----------
    W : float
        Weight [lb].
    D : float
        Diameter [ft].
    F_M : float
        Vessel material factor.
    
    Notes
    -----
    The purchase cost is given by [1]_. See source code for details.
    The purchase cost is scaled according to BioSTEAM's Chemical
    Plant Cost Index, `biosteam.CE`.
    
    """
    # C_v: Vessel cost
    # C_pl: Platforms and ladders cost
    C_v = exp(5.6336 - 0.4599 * ln(W) + 0.00582 * ln(W)**2)
    C_pl = 2275 * D**0.20294
    return bst.CE / 567 * (F_M * C_v + C_pl)
示例#9
0
 def area_scheme_4(x1, x2, y1, y2):
     dx, dy = x2 - x1, y2 - y1
     dlny = ln(y2) - ln(y1)
     # m = dlny/dx
     return np.nan_to_num(
         dy / dlny, nan=y1, posinf=y1,
         neginf=y1) * dx  # nan_to_num is needed to take care of y2 = y1,
示例#10
0
def pdf_x(L, l, delta, dv):
    if -L / 2 + l / 2 + dv < delta < L / 2 - l / 2 - dv:
        return 2 * dv / l * ln(L / (L - l))
    if  L / 2 - l / 2 - dv < delta < L / 2 - l / 2 + dv:
        return 1 / 2. + 1 / l * ((delta - dv) * ln(L - l) + (delta + dv) * (1 - ln(2 * (delta + dv))) + 2 * dv * ln(L) - L / 2.)
    if (delta > L / 2. - l / 2. + dv) and (delta > L / 2. - l / 2. - dv):
        return 1 / l * (2 * dv * (1 + ln(L / 2.)) - (delta + dv) * ln(delta + dv) + (delta - dv) * ln(delta - dv))
示例#11
0
def find_microstrip_width(er, d, z0=50.):
    """Calculate microstrip width required for a given characteristic 
    impedance.

    Args:
        er (float): relative permittivity
        d (float): thickness of the dielectric in [m]
        z0 (float): desired characterisitic impedance in [ohm]

    Returns:
        float: width of microstrip in [m]

    """

    a = z0 / 60 * sqrt((er + 1) / 2) + (er - 1) / (er + 1) * (0.23 + 0.11 / er)
    b = 60 * pi**2 / (z0 * sqrt(er))

    # Eqn. 3.197 in Pozar
    wd1 = 8 * exp(a) / (exp(2 * a) - 2)
    wd2 = 2 / pi * (b - 1 - ln(2 * b - 1) + (er - 1) / (2 * er) *
                    (ln(b - 1) + 0.39 - 0.61 / er))

    if isinstance(wd1, float):
        if a > 1.52:
            return wd1 * d
        elif wd2 >= 2:
            return wd2 * d
    elif isinstance(wd1, np.ndarray):
        wd = np.empty_like(wd1)
        mask = a > 1.52
        wd[mask] = wd1[mask]
        wd[~mask] = wd2[~mask]
        return wd * d
示例#12
0
def compute_vertical_vessel_purchase_cost(W, D, L, F_M):
    """
    Return the purchase cost [Cp; in USD] of a vertical vessel,
    including the cost of platforms and ladders.
    
    Parameters
    ----------
    W : float
        Weight [lb].
    D : float
        Diameter [ft].
    L : float
        Length [ft].
    F_M : float
        Vessel material factor.
    
    Notes
    -----
    The purchase cost is given by [1]_. See source code for details.
    The purchase cost is scaled according to BioSTEAM's Chemical
    Plant Cost Index, `biosteam.CE`.
    
    """
    # C_v: Vessel cost
    # C_pl: Platforms and ladders cost
    C_v = exp(7.1390 + 0.18255 * ln(W) + 0.02297 * ln(W)**2)
    C_pl = 410 * D**0.7396 * L**0.70684
    return bst.CE / 567 * (F_M * C_v + C_pl)
示例#13
0
 def SimhaEllipsoids(self, phi, d_p, p):
     # valid for p>>1
     lam = 1.8
     nu = 8 / 5 + p**2 * (15 * np.ln(2 * p) -
                          lam)**-1 + p**2 * (5 * np.ln(2 * p) - lam + 1)**-1
     visc = 1 + nu * phi
     return visc
    def bisection_method(f, range2, eps):
        # print(range2)
        err = ((-1) * ln(eps / (range2[1] - range2[0]))) / ln(2)

        a = range2[0]
        ya = f(a)  # numpy.longdouble(f(a))

        b = range2[1]
        yb = f(b)  # numpy.longdouble(f(b))

        c = (b + a) / 2
        yc = f(c)  # numpy.longdouble(f(c))

        # c_prev = eps
        n = 1
        while n < err:
            c_prev = c

            if ya * yc < 0:
                b = c
                yb = yc
            # elif yc * yb < 0:
            elif ya * yc >= 0:
                a = c
                ya = yc
            else:
                return None

            c = (b + a) / 2
            yc = f(c)  # numpy.longdouble(f(c))

            ++n  # next row done

            if abs(c - c_prev) < eps:
                return c
示例#15
0
def _get_I_total(R0, d, t):
    A = np.ln(R0)
    B = np.ln(1 + d)
    mu = 0.5 * A / B
    I = 0.5 * np.exp(0.25 * A**2 / B * np.sqrt(np.pi / B))
    I = I * np.sqrt(B) * (np.erf(t - mu) - np.erf(-mu))
    return I
示例#16
0
def compute_Stokes_law_York_Demister_K_value(P):
    """
    Return K-constant in Stoke's Law using the York-Demister equation.
    
    Parameters
    ----------
    P : float
        Pressure [psia].
    
    Examples
    --------
    >>> compute_Stokes_law_York_Demister_K_value(14)
    0.34409663
    >>> compute_Stokes_law_York_Demister_K_value(20)
    0.35
    >>> compute_Stokes_law_York_Demister_K_value(125)
    0.31894878
    
    Notes
    -----
    Equations are given by [2]_. See source code for details.
    
    """
    if P >= 0 and P <= 15.0:
        K = 0.1821 + (0.0029 * P) + (0.046 * ln(P))
    elif P > 15.0 and P <= 40.0:
        K = 0.35
    elif P > 40.0 and P <= 5500.0:
        K = 0.43 - 0.023 * ln(P)
    elif P < 0:
        raise ValueError('invalid Pressure of over 5500 psia')
    else:
        raise ValueError('invalid Pressure of over 5500 psia')
    return K
示例#17
0
 def SimhaRods(self, phi, d_p, p=10):
     # valid for p>>1
     lam = 1.5
     nu = 8 / 5 + p**2 * (15 * np.ln(2 * p) -
                          lam)**-1 + p**2 * (5 * np.ln(2 * p) - lam + 1)**-1
     visc = 1 + nu * phi
     return visc
示例#18
0
 def __init__(
         self,
         table = '1110', # NAND table (a two bit address is expected)
         width = 1,      # data bus width, one bit output
         name  = None):  # device name: None, use generic
     # check if table is a path to the table data
     if not self.tableCheck(table):
         # if table is a path, import table
         table = self.tableImport(table)
     # call parent class constructor
     Device.__init__(self, name)
     # find number of words
     words = ceil(len(table)/width)
     # size in power of 2
     size = ceil(ln(words)/ln(2))
     # complete table up to 2^size
     table += 'U'*(2**size-words)*width
     # record configuration
     self.configuration = size, width, table
     # instantiate output port
     self.Q = outPort(width, "Q")
     # register port
     self.outports.append(self.Q)
     # set default output port value
     self.Q.set(table[0:width])
     # done
     return
示例#19
0
def bisection_method(a, b, e, func):
    if func(a) * func(b) > 0.0:
        print('Try Again with different guess values')
    else:
        step = 1
        err = 10**-10
        error_check = -(ln(err / abs(b - a)) / (ln(2)))
        condition = True
        while condition:
            if step > error_check:
                print("The function does not match the bisection method")
                exit(0)
            m = (a + b) / 2

            if func(a) * func(m) < 0:
                b = m
            else:
                a = m

            step += 1
            condition = abs(func(m)) > e

        if func.__name__ == "derivative_of_f":
            if f(round(m, 1)) == 0.0:
                result.append(m)
        else:
            result.append(m)
示例#20
0
def B_func(Th33, Th1500):
    """calculates the coefficient of moisture-tension, used for water flux estimation"""

    if Th33 <= 0.0:
        Th33 = 0.4
        rwarn("Th33  < 0")
    if np.isneginf(Th33) or np.isposinf(Th33) or not Th33 == Th33:
        Th33 = 0.4
        rwarn("Th33 is NaN or inf")

    if Th1500 <= 0.0:
        Th1500 = 0.2
        rwarn("Th1500  <= 0")
    if np.isneginf(Th1500) or np.isposinf(Th1500) or not Th1500 == Th1500:
        Th1500 = 0.2
        rwarn("Th1500 is NaN or inf")

    D = ln(Th33) - ln(Th1500)
    B = (ln(1500) - ln(33)) / D

    def lbd_func(C):
        """processes the data from B_func, returning the slope of logarithmic tension-moisture curve"""
        if C == 0:
            return 0.0
        lbd = 1 / C
        return lbd

    return lbd_func(B)
示例#21
0
def probability_cut_nooverlaps(lc, lf, delta):
    """
        Probability that we cut fiber of nooverlapped fibers
    """
    # type I and III
    if (lf <= lc / 2.0 - delta) and (lf <= lc / 2.0 + delta):
        # print "Varianta I or III ",
        return lc / lf * (ln(lc / (lc - lf))) - 1
    # type II
    if (lf < lc / 2.0 + delta) and (lf > lc / 2.0 - delta):
        # print "Varianta II ",
        return (
            1
            / 2.0
            / lf
            * (
                (lc + 2.0 * delta) * ln(2.0 / (lc + 2.0 * delta))
                - (ln(lc - lf) + 1) * (lc - 2.0 * delta)
                + 2.0 * lc * ln(lc)
            )
        )
    # type IV
    if (lf >= lc / 2.0 + delta) and (lf >= lc / 2.0 - delta):
        # print "Varianta IV ",
        return 1 + 1 / lf * (
            -lc + (lc / 2.0 - delta) * ln((lc + 2 * delta) / (lc - 2 * delta)) + lc * ln(2 * lc / (lc + 2 * delta))
        )
示例#22
0
def _get_I_total(R0, d, t):
    A = np.ln(R0)
    B = np.ln(1+ d) 
    mu = 0.5 * A / B
    I = 0.5 * np.exp( 0.25 * A**2 / B * np.sqrt( np.pi / B))
    I = I * np.sqrt(B) * (np.erf(t - mu)- np.erf(-mu))
    return I
示例#23
0
def Gaussianfit(xlist, ylist):
    """Computes a gaussian fit in accordance to method of least square in terms of two list that are given."""
    if not isinstance((xlist, ylist), (np.generic, np.ndarray)):
        if isinstance((xlist, ylist), (list, tuple)):
            xlist = np.array(xlist)
            ylist = np.array([[arg] for arg in ylist])
        else:
            raise TypeError(
                "[GaussianFit] Can't make Gaussianfit with given input")
    if len(xlist) < 4 and len(xlist) == len(ylist):
        raise KeyError(
            "[GaussianFit] Can't make Gaussianfit due to too few values given")
    else:
        Error = lambda y, A, x, mu, sigma: np.ln(y) - (np.ln(A) - mu**2 / (
            2 * sigma**2) + 2 * mu * sigma / (2 * sigma**2) - x**2 /
                                                       (2 * sigma**2))

        Constant_a = np.ones(len(xlist))
        line1 = np.ones(len(xlist))
        MatrixA = np.array([np.ones(len(xlist)), xlist, xlist**2]).T
        MatrixAT = MatrixA.T
        try:
            InverseA = np.linalg.inv(MatrixAT.dot(MatrixA))
            ylist2 = np.log(ylist)
            ylist3 = MatrixAT.dot(ylist2)
            Constants = InverseA.dot(ylist3)
        except Exception as E:
            raise E
        sigma = np.sqrt(-1 / (2 * Constants[2]))
        mu = Constants[1] * sigma**2
        A = np.exp(Constants[0] + mu**2 / (2 * sigma**2))
        print(A, mu, sigma)
        print(Constants[0])
        function = lambda x: A * np.exp(-(x - mu)**2 / (2 * sigma**2))
        return function
示例#24
0
def Ar(z, lf):
    #z is the dist between the 
    lf = np.float(lf)
    #######################
            #int#
    #######################
    
    if type(z) == int:
        if z == 0:
            return 1
        if z == lf / 2:
            return 0
        return lf * (lf - 2. * z + 2. * z * (.6931471806 + ln((1. / lf) * z))) / (lf - 2. * z) ** 2
    
    #######################
            #ARRAY#
    #######################
    
    res = 2 * z / lf * (ln(2 * z / lf) - 1) + 1
    if any(z == lf / 2):
        z = list(z)
        ind_h = z.index(lf / 2)
        res[ind_h] = 0
        z = np.array(z)
    if any(z == 0):
        z = list(z)
        ind_z = z.index(0) 
        res[ind_z] = 1 
        z = np.array(z)
    return res 
示例#25
0
def G_vonKarman(z,
                f,
                U_ref=20.0,
                z_ref=10.0,
                z0=0.05,
                Lx=120.0,
                make_plot=False):
    """
    Von Karman expression for along-wind turbulence autospectrum
    
    z       : Height above ground (m), 1D array expected
    f       : Frequency (Hz), 1D array expected
    
    U_10    : mean wind speed at height z_ref (m/s)
    z_ref   : reference height for mean wind speed (m) - see above
    z0      : ground roughness (m)
    
    Lx      : along-wind turbulence length scale    
    
    """

    # Determine shear velocity
    u_star = 0.4 * U_ref / ln(z_ref / z0)

    # Calculate mean wind speed at heights requested
    U_z = u_star / 0.4 * ln(z / z0)

    # Calculate Lx at each height
    if isinstance(Lx, float):
        Lx_z = Lx * numpy.ones((len(z), ))
    else:
        # Assume function provided
        Lx_z = Lx(z)

    # Determine along-wind turbulence spectrum at requested frequencies
    Gv = numpy.empty((len(f), len(U_z)))

    for i, (U, Lx) in enumerate(zip(U_z, Lx_z)):

        numerator = 4 * (5.7 * u_star**2) * (Lx / U)
        denominator = 1.339 * (1 + 39.48 * (f * Lx / U)**2)**(5 / 6)
        Gv[:, i] = numerator / denominator

    if make_plot:

        fig, axarr = plt.subplots(2, sharex=True)

        ax = axarr[0]
        ax.plot(f, Gv)

        ax = axarr[1]
        ax.plot(fm, numpy.abs((Gv.T * f).T))
        ax.set_xlim([-fs / 2, +fs / 2])
        ax.set_xlabel("Frequency f (Hz)")
        ax.set_ylabel("f.G(f)")
        #ax.set_xscale("log")#, nonposy='clip')
        ax.set_yscale("log")  #, nonposy='clip')

    return Gv, U_z
示例#26
0
 def chi(self, phi1, phi2):
     """calculate chi from ph1 and phi2
     this is a rearrangement of the equation dfmix_by_dphi(phi1,chi)=dfmix_by_dphi(phi2,chi)
     for chi where phi1 and phi2 are the volume fractions of protein in the dilute and condensed
     phases, respectively.
     """
     return (1. / self.N1 * ln(phi2 / phi1) + 1. / self.N2 * ln(
         (1 - phi1) / (1 - phi2))) / (2. * phi2 - 2. * phi1)
示例#27
0
文件: sebs.py 项目: suredream/asema
def PSIma(f, g):
   a = 0.33
   b = 0.41
   pi = 3.141592654
   tangens = scalar(atan((2.0 * g - 1.0) / sqrt(3.0))) * pi /180
   tangens = ifthenelse(tangens > pi/2.0, tangens - 2.0 * pi, tangens) 
   PSIma = ln(a + f) - 3.0 * b * f ** (1.0 / 3.0) + b * a ** (1.0 / 3.0) / 2.0 * ln((1 + g) ** 2.0 / (1.0 - g + sqrt(g))) + sqrt(3.0) * b * a ** (1.0 / 3.0) * tangens
   return PSIma
示例#28
0
def maxed_func(phi1, neg=True):
    ap_bin1, ap_bin2 = apriori / sum(apriori)
    c = y_intercept + 1 * sigma_rr * y_step  # upperline
    denom = (0.5 * phi1 + c)
    phi2 = (c - 0.5 * phi1)
    entr = 1 / denom * (phi1 * (ln(phi1 / denom) - ln(ap_bin1)) + phi2 *
                        (ln(phi2 / denom) - ln(ap_bin2)))
    return (entr if not neg else -entr)
def log (a, b):
	try:
		try:
			return div(ln(a), ln(b))
		except AttributeError:
			return div(math.log(a), math.log(b))
	except ValueError:
		return nan
示例#30
0
def pascal(k, p):
    tr = 1
    qr = ln(1 - p)
    for _ in range(k):
        r = random()
        tr = tr * r
    x = floor(ln(tr) / qr)
    return x
示例#31
0
def calculate_democrat_and_republican_probability(probabilities, row_features):
    democrat_probability = 0
    republican_probability = 0
    for h in range(1, 17):
        democrat_probability += ln(probabilities['democrat_col' + str(h) +
                                                 '_' + row_features[h - 1]])
        republican_probability += ln(probabilities['republican_col' + str(h) +
                                                   '_' + row_features[h - 1]])
    return democrat_probability, republican_probability
示例#32
0
def aleatoria_normal_muller(cant_numeros, media, desviacion):
    numeros = []

    for i in range(round(cant_numeros/2)):
        n1 = (pow((-2*ln(random.uniform(0,1))),0.5)*math.cos(2*math.pi*random.uniform(0,1)))*desviacion + media
        n2 =  (pow((-2*ln(random.uniform(0,1))),0.5)*math.sin(2*math.pi*random.uniform(0,1)))*desviacion + media
        numeros.append(n1)
        numeros.append(n2)
    return numeros[:cant_numeros]
示例#33
0
 def area_scheme_3(x1, x2, y1, y2):
     """
     0<=x1<=x2
     """
     dx, dy = x2 - x1, y2 - y1
     dlnx = ln(x2) - ln(x1)
     # m = dy/dlnx
     return y1 * dx + dy * x2 - dy * np.nan_to_num(
         dx / dlnx, nan=x1, posinf=x1, neginf=x1)
示例#34
0
 def cross_entropy_loss(*args):
     #args[0] is the expected value
     #args[1] is the value received
     #args[2] False: calculate error. True: calculate derivative of error
     t = args[0]
     y = args[1]
     if not args[2]:
         return t * np.ln(y) + (1 - t) * np.ln(1 - y)
     elif args[2]:
         return y - t
示例#35
0
def Gb(Hb, Wb, LG):
    X = min(Hb, Wb)
    Y = max(Hb, Wb)
    A = 0.5 * (X / Y) * (4 / math.pi - X / Y)
    B = ln(1 + (Y**2) / (X**2))
    C = ln(X / 2)
    rb = math.exp(A * B + C)
    Ub = LG / rb
    Gb = ln(Ub + (Ub**2 - 1)**0.5)
    return Gb
示例#36
0
def T4_flat_side(rt_s, L, s1, De):
    ### rt_s : Soil resistivity
    ### L : distance from cable centre to groud surface (mm)
    ### s1 : separation of two cable (mm)
    ### De : Cable external OD
    u = 2 * L / De
    A = ln(u + (u**2 - 1)**0.5)
    B = 0.5 * ln(1 + (2 * L / s1)**2)
    T4_side = (rt_s / (2 * math.pi)) * (A + B)
    return T4_side
示例#37
0
def wish_dist(Wi, Wj, k):
    """function to find the Wishart between two covariance matrice
    Returns distance

    Parameters
    ----------
    Wi,Wj : comaplex amtrix
        covariance matrices
    k : int
        distance definition type to be used for calculation.
    Returns
    -------
    data : float
        distance
    """

    # sum of covariance matrices
    Wij = Wi + Wj

    # log of determinant probably
    log_j = np.ln(
        Wj(1, 1) * Wj(2, 2) * Wj(3, 3) * (1 - (np.real(Wj(1, 3)) ^ 2)))
    # this is actually: log(det(Wj)) >>Using analytically reduced form  (Rignot and Chellappa, 1992)
    log_i = np.ln(
        Wi(1, 1) * Wi(2, 2) * Wi(3, 3) * (1 - (np.real(Wi(1, 3)) ^ 2)))
    # this is actually: Log(Wi) >>Using analytically reduced form  (Rignot and Chellappa, 1992)
    log_ij = np.ln(
        Wij(1, 1) * Wij(2, 2) * Wij(3, 3) * (1 - (np.real(Wij(1, 3)) ^ 2)))

    # absolute of trace of inverse matrices
    tri = np.abs(alg.trace(alg.pinv(Wj) * Wi))
    trj = np.abs(alg.trace(alg.pinv(Wi) * Wj))

    if k == 1:
        # default Wishart distance
        dist = log_j + tri

    if k == 2:
        # symmetric Wishart distance
        dist = .5 * (log_i + log_j + tri + trj)

    if k == 3:
        # Bartlett distance
        dist = 2 * log_ij - log_i - log_j

    if k == 4:
        # revised Wishart distance
        dist = log_j - log_i + tri

    if k == 5:
        # another dstance
        dist = tri + trj

    return dist
示例#38
0
文件: sebs.py 项目: suredream/asema
def Cw(hi, L, z0, z0h):
   alfa = 0.12
   beta = 125.0
   C0 = (alfa / beta) * hi
   C1 = pcrumin(z0h) / L
   C11 = -alfa * hi / L
   C21 = hi / (beta * z0)
   C22 = -beta * z0 / L
   C = ifthenelse(z0 < C0, pcrumin(ln(alfa)) + PSIh_y(C11) - PSIh_y(C1), ln(C21) + PSIh_y(C22) - PSIh_y(C1))
   Cw = ifthenelse(C < 0.0, 0.0, C) # This results from unfortunate parameter combination!
   return Cw
示例#39
0
def calculate_air_inleakage(V, P):
    """
    Return air in-leakage in kg/hr.
    
    Parameters
    ----------
    V : float
        Vacuum volume in m3
    P : float
        Suction pressure in Torr
    """
    return 5 + (0.0298 + 0.03088 * ln(P) - 5.733e-4 * ln(P)**2) * V**0.66
示例#40
0
def le(L, l):
    '''
        Solve embedded length l_e of fibes (including null values) (integral)
    '''
    if L < l:
        #print 'very short specimen',
        return L / 4.
    if L < 2. * l:
        #print 'short specimen',
        return -L / 4. - L / 4. * ln(L / 2.) + L / 4. * ln(L) + 1 / 2. * l * (1 - L / (2. * l)) + 1 / 4. * L * ln(L / (2. * l)) + l / 4.
    if L >= 2. * l:
        #print 'long specimen',
        return -1 / 4. * l - 1 / 4. * L * ln(L - l) + 1 / 4. * L * ln(L)
示例#41
0
def prob( lc, lf, delta ):
    #type I and III
    if ( lf <= lc / 2. - delta ) and ( lf <= lc / 2. + delta ):
        print "Varianta I or III ",
        return lc / lf * ( ln( lc / ( lc - lf ) ) ) - 1
    #type II
    if ( lf < lc / 2. + delta ) and ( lf > lc / 2. - delta ):
        print "Varianta II ",
        return 1 / 2. / lf * ( ( lc + 2. * delta ) * ln( 2. / ( lc + 2. * delta ) ) - ( ln( lc - lf ) + 1 ) * ( lc - 2. * delta ) + 2. * lc * ln( lc ) )
    #type IV
    if ( lf >= lc / 2. + delta ) and ( lf >= lc / 2. - delta ):
        print "Varianta IV ",
        return  1 + 1 / lf * ( -lc + ( lc / 2. - delta ) * ln( ( lc + 2 * delta ) / ( lc - 2 * delta ) ) + lc * ln( 2 * lc / ( lc + 2 * delta ) ) )
示例#42
0
文件: sebs.py 项目: suredream/asema
def u_pbl(NDVI):
   """Calculates Planetary Boundary Layer wind speed [m s-1] from NDVI
   
   NDVI Input PCRaster NDVI map (scalar, ratio between 0 and 1)"""
   
   z0m = 0.005 + 0.5 * (nd_mid/nd_max) ** 2.5
   assert z0m >= 0.0
   fc = ((nd_mid - nd_min) / nd_df) ** 2.0    # fractional vegetation cover == Wfol (-)
   assert fc >= 0.0
   h = z0m / 0.136                            # total height of vegetation (m)
   d = 2.0/3.0 * h			      # zero plane displacement (m)
   u_c = ln((z_pbl - d) / z0m) / ln((z_ms - d) / z0m)
   u_pbl = u_s * u_c
   return u_pbl, z0m, d, fc, h
示例#43
0
 def _get_results(self):
     """dividing the interval <a,b>,
     returns aprox x, error estimation, No. of steps ..."""
     int = [self.a, self.b]
     if self.f(self.a) * self.f(self.b) > 0:
         print "None or more than 1 roots in selected interval"
     else:
         while abs(int[0] - int[1])/2 > self.error:
             if self.f(int[0]) * self.f((int[0] + int[1])*0.5) < 0:
                 int.insert(1,(int[0]+int[1])*0.5), int.pop()
             else:
                 int.insert(1,(int[0]+int[1])*0.5), int.pop(0) 
         return [(int[0] + int[1])/2,
                 abs(int[0] - int[1])/2,
                 (ln(2)-ln( (abs( int[0] - int[1] ) /2)/(self.b-self.a)))/ln(2)]
示例#44
0
 def get_eps_x_reinf (self, crack_x):
     self.cbs_allocation(abs(crack_x[0]))
     self.weakest_cb_check()
     if  crack_x[0]  not in self.unsorted_crack_list:
         self.unsorted_crack_list.append(crack_x[0])
         le_array, phi_array, f_array , tau_array = self.fiber_data_tuple[2][self.crack_list.index(abs(crack_x[0]))] , self.fiber_data_tuple[1][self.crack_list.index(abs(crack_x[0]))], self.fiber_data_tuple[3][self.crack_list.index(abs(crack_x[0]))], self.fiber_data_tuple[4][self.crack_list.index(abs(crack_x[0]))]
         diff = self.sum_of_fiberforces(crack_x, le_array, phi_array , f_array, tau_array)
         self.diff_list.append(diff)
     index_lists = self.unsorted_crack_list.index(crack_x[0])
     F = self.P - self.diff_list[index_lists]
     #Ar_fibers = self.active_fibers_list[index_lists] * Pi * self.r **2
     #########TESTPLOT#######################
     Af = len(le_array) * Pi * self.r ** 2 * 2
     plt.plot(crack_x, self.ar_list[index_lists] / Af)
     lf = self.fiber_length
     z = crack_x
     r = self.r
     testAr = 2 * Pi * r ** 2 * (-2 * z + z * ln(2 * z / lf) + lf) / lf
     #testAcki = (10 * [10 - 2. * z + 2. * z * (.6931471806 + ln((1 / 10) * z))]) / (10 - 2. * z) ** 2
     #plt.plot(crack_x, testAcki / (2 * Pi * r ** 2))
     plt.plot(crack_x, testAr / (2 * Pi * r ** 2))
     plt.plot(crack_x, Ar_alc(z, self.fiber_length))
     #maxAr = np.max(self.ar_list[index_lists])
     #minAr = np.min(self.ar_list[index_lists])
     #x_gerade = [0, 4.5]
     #y_gerade = [maxAr, minAr]
     #plt.plot(x_gerade, y_gerade)
     #plt.plot()
     plt.show()
     ################################
     Ar_fibers = self.ar_list[index_lists]
     eps = F / self.Er / Ar_fibers
     return eps * H(eps)
示例#45
0
文件: utils.py 项目: sagittaeri/htt
def significance(signal, background, min_bkg=0, highstat=True):

    if isinstance(signal, (list, tuple)):
        signal = sum(signal)
    if isinstance(background, (list, tuple)):
        background = sum(background)
    sig_counts = np.array(list(signal.y()))
    bkg_counts = np.array(list(background.y()))
    # reverse cumsum
    S = sig_counts[::-1].cumsum()[::-1]
    B = bkg_counts[::-1].cumsum()[::-1]
    exclude = B < min_bkg
    with np.errstate(divide='ignore', invalid='ignore'):
        if highstat:
            # S / sqrt(S + B)
            sig = np.ma.fix_invalid(np.divide(S, np.sqrt(S + B)),
                fill_value=0.)
        else:
            # sqrt(2 * (S + B) * ln(1 + S / B) - S)
            sig = np.sqrt(2 * (S + B) * np.ln(1 + S / B) - S)
    bins = list(background.xedges())[:-1]
    max_bin = np.argmax(np.ma.masked_array(sig, mask=exclude))
    max_sig = sig[max_bin]
    max_cut = bins[max_bin]
    return sig, max_sig, max_cut
示例#46
0
def calc_bethe_entropy(B):
    """Calculate the Bethe entropy given beliefs B"""
    Sbethe = 0
    for roti,rotj in rotedges:
        try:
            if B[(roti,rotj)]>0:
                Sbethe -= B[(roti,rotj)]* ln(B[(roti,rotj)])
        except RuntimeError:
            pass  # can't find edge. . .
    sumqi = sum([len(res2partners[resid])-1 for resid in resids])
    blogb = 0
    for roti in eg.GetVertexIDs():
        if B[roti]>0:
            blogb += B[roti]* ln(B[roti])
    Sbethe += sumqi*blogb
    return Sbethe
示例#47
0
文件: sebs.py 项目: suredream/asema
def Bw(hi, L, z0):
   # constants (Brutsaert, 1999)
   alfa = 0.12
   beta = 125.0
   
   # calculations
   B0 = (alfa / beta) * hi
   B1 = -1.0 *z0 / L
   B11 = -alfa * hi / L
   B21 = hi / (beta * z0)
   B22 = -beta * z0 / L
   tempB11 = PSIm_y(B11)
   tempB1 = PSIm_y(B1)
   B = ifthenelse(z0 < B0, -1.0 * ln(alfa) + PSIm_y(B11) - PSIm_y(B1), ln(B21) + PSIm_y(B22) - PSIm_y(B1))
   Bw = ifthenelse(B < 0.0, 0.0, B) # This results from unfortunate parameter combination!
   return Bw
示例#48
0
def calc_meanfield_entropy(B):
    """Calculate the mean-field entropy given beliefs B"""
    Smeanfield = 0
    for roti in eg.GetVertexIDs():
        if B[roti]>0:
            Smeanfield -= B[roti] * ln(B[roti])
    return Smeanfield
示例#49
0
文件: sebs.py 项目: suredream/asema
def FKB_1(u_zref, zref, h, LAI, Wfol, Ta, pa):
   """Initial determination of roughness length for heat transfer (non-spatial)
   KB-1 function according to Massman, 1999
   Convention of variable names:
   f_z = f(z)
   d2h = d/h
   
   u_zref Input wind speed at reference height [m s-1]
   zref Input reference height [m]
   h Input canopy height [m]
   LAI Input canopy total Leaf Area Index [-]
   Wfol Input Fractional canopy cover [-]
   Ta Input ambient temperature [degrees Celsius]
   pa Input ambient air pressure [Pa]"""

   # Constants
   C_d = 0.2   # foliage drag coefficient
   C_t = 0.01  # heat transfer coefficient
   k = 0.41     # Von Karman constant
   Pr = 0.7    # Prandtl number
   hs = 0.009  # height of soil roughness obstacles (0.009-0.024)
   
   # Calculations
   Wsoil = 1.0 - Wfol
   if Wfol == 0.0: # for bare soil take soil roughness
      h = hs
   assert Wfol >= 0.0 and Wfol <= 1.0 and Wsoil >= 0.0 and Wsoil <= 1.0
   z0 = 0.136 * h   # Brutsaert (1982)
   u_h0 = u_zref * ln(2.446) / ln ((zref - 0.667 * h) / z0) # wind speed at canopy height
   u_h0 = cellvalue(u_h0, 0, 0)
   u_h0 = u_h0[0]
   assert u_h0 >= 0.0
   ust2u_h = 0.32 - 0.264/exp(15.1 * C_d * LAI)
   ustarh = ust2u_h * u_h0
   nu0 = 1.327E-5 * (101325.0 / pa) * (Ta / 273.15 + 1.0) ** 1.81 # kinematic viscosity
   n_h = C_d * LAI / (2.0 * ust2u_h ** 2.0)
   # First term
   if n_h <> 0.0:
      F1st = k * C_d / (4.0 * C_t * ust2u_h * (1.0 - exp(pcrumin(n_h)/2.0))) * Wfol ** 2.0
   else:
      F1st = 0.0
   # Second term
   S2nd = k * ust2u_h * 0.136 * Pr ** (2.0/3.0) * sqrt(ustarh * h / nu0) * Wfol ** 2.0 * Wsoil ** 2.0
   # Third term
   T3rd = (2.46 * (u_zref * k / ln(zref/hs) * hs / nu0) ** 0.25 - ln(7.4)) * Wsoil ** 2.0
   
   return F1st + S2nd + T3rd
示例#50
0
 def mean_approx(self,l):
     yarn = self._get_gbundle_props() 
     # bundle length
     l_b = yarn[0]
     # mean and stdev of Gaussian bundle
     mu_b = yarn[1]
     gamma_b = yarn[2]
     # No. of bundles in series 
     nb = l/l_b
     if nb == 1:
         return mu_b, mu_b, mu_b
     w = ln(nb)
     # approximation of the mean for k = (1;300) (Mirek)
     mu = mu_b + gamma_b * (-0.007 * w**3 + 0.1025 * w**2 - 0.8684 * w)
     ### approximation for the mean from extremes of Gaussian (tends to Gumbel as mb grows large)
     a = gamma_b / sqrt(2*w)
     b = mu_b + gamma_b * ( (ln(w) + ln(4*pi))/ sqrt(8 * w) - sqrt(2 * w))
     med = b + a * ln(ln(2))
     mean = b - 0.5772156649015328606 * a
     return mu, mean, med
示例#51
0
文件: sebs.py 项目: suredream/asema
def PSIh_y(Y):
   # Integrated stability correction function for heat
   # Inputs
   # Y = -z/L, z is the height, L the Obukhov length
   # constants (Brutsaert, 1999)
   c = 0.33
   d = 0.057
   n = 0.78
   # Calculation
   Y =  abs(Y)
   PSIh_y = (1.0 - d) / n * ln((c + Y ** n) / c)
   return PSIh_y
示例#52
0
def calc_bethe_avg_E(B):
    """Calculate the Bethe average energy given beliefs B"""
    Ubethe = 0
    for xi,xj in rotedges:
        try:
            Ubethe -= B[(xi,xj)] * ln(psi[xi,xj])
        except RuntimeError:
            pass  # can't find edge. . .
    for xi in phi.keys():
        #Ubethe -= B[xi]* ln(phi[xi])
        Ubethe -= B[xi] * eg.GetVertexEnergy(xi) 
    return Ubethe
示例#53
0
def inverse_boltzmann(P_q,kT=4.1e-21):
    """
    returns the inverse boltzman; given a probability of being at a given
    extension 

    Args:
        P_q: the probability at each extension
        kT: boltzmann's constant
    
    Returns:
        G_eqm(q), from Gupta, A. N. et al, Nat Phys 7, 631-634, 2011.
    """
    return -kT * np.ln(probability_of_extension)
示例#54
0
文件: Pumps.py 项目: danielct/Honours
    def __init__(self, sigma, P0, Pth):
        """
        Make a Gaussian pump with maximum power P0 * Pth, where Pth is the
        threshold value of P in the normalised GPE.

        Parameters:
            sigma: value of sigma for the Gaussian. Should be in SI units
            P0: Maximum pump power. In units of Pth.
            Pth: The value of the threshold power in the scaled GPE.
        """
        self.charSize = 2.0 * np.sqrt(2 * np.ln(2)) * sigma
        self.function = lambda x, y: (P0 * Pth *
                                      np.exp(-0.5 * (x**2 + y**2) / sigma**2))
def gaussian_kernel(hpbw, dim=2):
    import numpy
    from numpy import log as ln
    from numpy import sqrt
    sigma = hpbw / 2. / sqrt(2 * ln(2))
    sigma2 = 2 * sigma**2.
    s = int(hpbw/2. * 3 + 1)
    if dim == 1:
        x = numpy.mgrid[-s:s+1]
        g = numpy.exp(-(x ** 2 / sigma2))
    elif dim == 2:
        x, y = numpy.mgrid[-s:s+1, -s:s+1]
        g = numpy.exp(-(x ** 2 / sigma2 + y ** 2 / sigma2))
    else:
        return None
    return numpy.array(g/g.sum(), dtype=numpy.float32)
示例#56
0
def model_eps(q,rs,ThomasFermi=True):

  kF = 3.63/(rs) # in A^-1, for a free electron gas
  k0 = 0.815*kF*(rs)**0.5 # this is from AM
  q  = q*kF # parser script is giving things in terms of q/kF

  if ( ThomasFermi == True):

    Fq = 1.0

  else:

    beta = q/(2*kF)
    Fq = (1.0/(8.0*beta))*(4*beta + 2*(1 - beta**2)*ln(abs((1 + beta)/(1 - beta))))

  eps = (1 + (k0**2.0)/(q**2.0)*Fq)
  return eps
示例#57
0
文件: sebs.py 项目: suredream/asema
def PSIm_y(Y):
   # Integrated stability correction function for momentum
   # Inputs
   # Y = -z/L, where z is the height, L the Obukhov length
   # test values
   
   # Constants (Brutsaert, 1999)
   a = 0.33
   b = 0.41
   m = 1.0
   pi= 3.141592654
   
   # Calculation
   #//HK 040902 
   Y = abs(Y) #abs(Y)
   x = (Y/a) ** (1.0/3.0)
   PSI0 = pcrumin(ln(a)) + sqrt(3.0) * b * a ** (1.0 / 3.0) * pi / 6.0
   b_3 = b ** -3.0
   PSIm_y = ifthenelse(Y <= b_3, PSIma(Y, x) + PSI0, PSIma(b_3, ((b_3/a)**(1.0/3.0))) + PSI0)
   #PSIm_y = ifthenelse(Y <= b_3, PSIma(Y, x) + PSI0, (1.0 / (PSIma(b_3, ((b_3/a)**(1.0/3.0))))) + PSI0)
   return PSIm_y
示例#58
0
def tex(hdu, tau, freq=None, Tbg=None):
    tau, tau_str = get_quantity(tau, '')    
    T = get_quantity(hdu.data, get_bunit(hdu))[0]
    freq = get_freq(hdu, freq)
    Tbg = get_Tbg(Tbg)
    
    logger.info('(tex) tau={tau_str}, freq={freq}, Tbg={Tbg}'.format(**locals()))
    logger.info('(tex) start calculation')

    hvk = h * freq / k_B
    A = T / hvk / (-1 * expm1(-tau))
    B = 1 / expm1(hvk / Tbg)
    tex = hvk * (ln(1 + (A + B)**-1))**-1
    
    logger.debug('(tex) hv/k : {0:.3e}'.format(hvk.to('Hz K s')))
    logger.debug('(tex) hv/k/expm1(hv/kTbg) : {0:.3e}'.format(B * hvk.to('Hz K s')))
    logger.info('(tex) done')
    
    new_header = hdu.header.copy()
    new_header['BUNIT'] = 'K'
    new_hdu = astropy.io.fits.PrimaryHDU(tex.to('K').value, new_header)
    return new_hdu
示例#59
0
def tau(hdu, Tex, freq=None, Tbg=None):
    tex, tex_str = get_quantity(Tex, 'K')
    T = get_quantity(hdu.data, get_bunit(hdu))[0]
    freq = get_freq(hdu, freq)
    Tbg = get_Tbg(Tbg)
    
    logger.info('(tau) Tex={tex_str}, freq={freq}, Tbg={Tbg}'.format(**locals()))
    logger.info('(tau) start calculation')
    
    hvk = h * freq / k_B
    A = 1 / expm1(hvk / tex) 
    B = 1 / expm1(hvk / Tbg)
    tau = -ln(1 - T/hvk * (A - B)**-1)
    
    logger.debug('(tau) hv/k : {0:.3e}'.format(hvk))
    logger.debug('(tau) 1/expm1(hv/kTbg) : {0:.3e}'.format(B))
    logger.info('(tau) done')
    
    new_header = hdu.header.copy()
    new_header['BUNIT'] = ''
    new_hdu = astropy.io.fits.PrimaryHDU(tau.to('').value, new_header)
    return new_hdu
示例#60
0
    for j in range( 0, n_spec ):
        cosO = rand( 1, n ) # technicky vzato ma byt 1 - rand()
        lx = lf * cosO # n...cisel
        volna = lf - lx
        sx = rand( 1, n ) * volna + lf / 2. * cosO
        vec_cut += cut_func( sx, lx, sec )
    v.append( sum( vec_cut ) )
print v

#for k in range (0,len(volno[0])): 
#    print volno[0][k]
    
    
N = n_spec * n  

p = ( 1. / 2. - delta / lf ) * ln( ( lf + 2. * delta ) / ( lf - 2. * delta ) ) + ln( 2. * lf / ( lf + 2. * delta ) )
print p
rv = binom( N, p )
x = linspace( 0, N, N + 1 )
plot( x, nsim * rv.pmf( x ) )

# plot histogram
pdf, bins, patches = hist( v, N, normed=0 ) #, facecolor='green', alpha=1
#print pdf/float(nsim)
#plot( bins[:-1], pdf/float(nsim), 'rx' )   # centroids
#print sum( pdf * diff( bins ) )
show()