示例#1
0
    def _update_activation(self, x, x_d):
        self.input_activation = np.exp(-np.pow(self.network[:, :, 0] - x, 2) /
                                       np.pow(self.sigma_x, 2) -
                                       np.pow(self.network[:, :, 1] - x_d, 2) /
                                       np.pow(self.sigma_x_dot, 2))

        return
示例#2
0
def ComputePhi(X_grid, Y_grid, pd, p, sigma):
    # Number of points in the diagram
    num_points = len(pd)

    # Life span of generators
    x = np.array([b for (b, d) in pd])  # birth
    y = np.array([abs(d - b) for (b, d) in pd])  # lifespan

    # Define the weight of y
    omega_y = np.power(y, p)

    Phi = np.zeros(X_grid.shape)

    for k in range(0, num_points):
        Phi = Phi + omega_y[k] * np.exp(
            -(np.pow(X_grid - x[k], 2.0) + np.pow(Y_grid - y[k], 2.0)) /
            (2.0 * sigma**2.0))

    # Weight function to set to zero at the boundary and
    # to smoothly increase to 1 far from the boundary
    W = (2.0 / pi) * atan(Y_grid)
    # Set to 0 if y is 0
    # W = (4 / pi^2) * atan(X_grid) .* atan(Y_grid);  # Set to 0 if x or y are 0

    # Multiply by W
    Phi = np.multiply(W, Phi)  # elementwise
    return Phi
    def evaluate_spline(header, row, spline, limits):
        limits = (max(min(spline.get_knots()), float(limits[0])),
                  min(max(spline.get_knots()), float(limits[1])))

        ys = np.linspace(limits[0], limits[1],
                         len(header) * SplineModel.samples)
        ps = np.exp(spline(ys)) * (limits[1] - limits[0]) / (
            len(header) * SplineModel.samples)
        ps = ps / sum(ps)
        cfs = np.cumsum(ps)

        if 'mean' in header or 'var' in header or 'skew' in header:
            mean = sum(ps * ys)
        if 'var' in header or 'skew' in header:
            var = sum(ps * np.square(ys - mean))

        error = 0
        for ii in range(1, len(header)):
            if isinstance(header[ii], float):
                error = error + np.abs(
                    SplineModelConditional.find_nearest(cfs, header[ii], ys) -
                    float(row[ii]))
            elif header[ii] == 'mean':
                error = error + np.abs(mean - float(row[ii]))
            elif header[ii] == 'mode':
                mode = ys[ps.argmax()]
                error = error + np.abs(mode - float(row[ii]))
            elif header[ii] == 'var':
                error = error + np.sqrt(np.abs(var - float(row[ii])))
            elif header[ii] == 'skew':
                skew = sum(ps * np.pow((ys - mean) / sqrt(var), 3))
                error = error + np.pow(np.abs(skew - float(row[ii])), 1.0 / 3)

        return error
示例#4
0
def find_rot(pts):
    global translation_offset  # Access to the translation accumulator
    global uniform_scale_factor
    v0 = pts[0, 0] - pts[
        0,
        1]  # TODO: phase out v0 & v1 if posssible		# v0 = [x,y,z] = [i<hat>, j<hat>, k<hat>]
    v1 = pts[1, 0] - pts[1, 1]  # v1 = [x,y,z] = [i<hat>, j<hat>, k<hat>]

    alpha = np.arctan2(v1[1], v1[0]) - np.arctan2(
        v0[1], v0[0])  # Alpha angle about x-axis of standard basis (rad)
    r = np.sqrt(pts[1, 0, 0]**2 + pts[1, 0, 1]**2)

    qx = np.cos(-alpha) * pts[1, 0, 0] - np.sin(-alpha) * pts[1, 0, 1]
    qy = np.sin(-alpha) * pts[1, 0, 0] + np.cos(-alpha) * pts[1, 0, 1]
    s2_align_pt = np.array(
        [qx, qy]
    )  # Calculate position of second alignment point after rotation of -alpha about (0,0,0)
    translation_offset += (
        -(s2_align_pt * uniform_scale_factor) + pts[0, 0]
    )  # Find differance between found alignment point and alignment point on first edge

    if (pts.shape == (2, 2, 3)):
        beta = np.arctan2(v1[2], np.sqrt(np.pow(v1[0], 2) + np.pow(v1[1], 2))
                          )  # Beta angle about y-axis of standard basis (rad)
        -np.arctan2(v0[2], np.sqrt(np.pow(v0[0], 2) + np.pow(v0[1], 2)))
        return np.array(
            [0, -beta, -alpha]
        )  # TODO: fix the locations of these, rotation about the: {x-axis, y-axis, z-axis}
    return np.array([0, 0, -alpha])
示例#5
0
    def acceleration(self, T, X, Y):
        # calculate the velocity of the object
        Tv, V, Vx, Vy = self.velocity(self, T, X, Y)

        # increase sample size of T (time) by twice
        Tvi = linspace(Tv[0], Tv[-1], 2 * Tv.size - 1)
        dt = Tvi[1] - Tvi[0]  # calculate delta time

        # calculate accelration in x-direction
        fX = interp1d(Tv, Vx,
                      kind='quadratic')  # interplolate x- velocity function
        Vxi = fX(Tvi)
        dvx = Vxi[1:Vxi.size] - Vxi[0:Vxi.size - 1]
        dvxdt = dvx / dt
        ax = average(dvxdt[1:-1].reshape([int((dvxdt.size - 2) / 2), 2]),
                     axis=1)

        # calculate accelration in y-direction
        fY = interp1d(Tv, Vy,
                      kind='quadratic')  # interplolate y-velocity function
        Vyi = fY(Tvi)
        dvy = Vyi[1:Vyi.size] - Vyi[0:Vyi.size - 1]
        dvydt = dvy / dt
        ay = average(dvydt[1:-1].reshape([int((dvydt.size - 2) / 2), 2]),
                     axis=1)

        # get the magnitude
        t = Tv[1:Tv.size - 1]
        a = sqrt(pow(ax, 2) + pow(ay, 2))

        return t, a, ax, ay
    def evaluate_spline(header, row, spline, limits):
        limits = (max(min(spline.get_knots()), float(limits[0])), min(max(spline.get_knots()), float(limits[1])))

        ys = np.linspace(limits[0], limits[1], len(header) * SplineModel.samples)
        ps = np.exp(spline(ys)) * (limits[1] - limits[0]) / (len(header) * SplineModel.samples)
        ps = ps / sum(ps)
        cfs = np.cumsum(ps)

        if 'mean' in header or 'var' in header or 'skew' in header:
            mean = sum(ps * ys)
        if 'var' in header or 'skew' in header:
            var = sum(ps * np.square(ys - mean))
        
        error = 0
        for ii in range(1, len(header)):
            if isinstance(header[ii], float):
                error = error + np.abs(SplineModelConditional.find_nearest(cfs, header[ii], ys) - float(row[ii]))
            elif header[ii] == 'mean':
                error = error + np.abs(mean - float(row[ii]))
            elif header[ii] == 'mode':
                mode = ys[ps.argmax()]
                error = error + np.abs(mode - float(row[ii]))
            elif header[ii] == 'var':
                error = error + np.sqrt(np.abs(var - float(row[ii])))
            elif header[ii] == 'skew':
                skew = sum(ps * np.pow((ys - mean) / sqrt(var), 3))
                error = error + np.pow(np.abs(skew - float(row[ii])), 1.0/3)

        return error
示例#7
0
 def get_shape(self, x, p, Q2):
     if conf['shape'] == 0:
         if conf['evo'] == 'yes':
             lam2 = conf['lam2evo']
             Q02 = conf['Q02evo']
             s = np.log(np.log(Q2 / lam2) / np.log(Q02 / lam2))
             return (p[0] + p[1] * s) * x**(p[2] + p[3] * s) * (1 - x)**(p[4] + p[5] * s) * (1 + (p[6] + p[7] * s) * x + (p[8] + p[9] * s) * x**2)
         else:
             return p[0] * x**p[1] * (1 - x)**p[2] * (1 + p[3] * x + p[4] * x**2)
     elif conf['shape'] == 1:
         if conf['evo'] == 'yes':
             lam2 = conf['lam2evo']
             Q02 = conf['Q02evo']
             s = np.log(np.log(Q2 / lam2) / np.log(Q02 / lam2))
             norm = self.beta(1 + (p[2] + p[3] * s), (p[4] + p[5] * s) + 1) + (p[6] + p[7] * s) * self.beta(1 + (p[2] + p[3] * s) + 1, (p[4] + p[5] * s) + 1) \
                 + (p[8] + p[9] * s) * self.beta(1 +
                                           (p[2] + p[3] * s) + 2, (p[4] + p[5] * s) + 1)
             return (p[0] + p[1] * s) * x**(p[2] + p[3] * s) * (1 - x)**(p[4] + p[5] * s) * (1 + (p[6] + p[7] * s) * x + (p[8] + p[9] * s) * x**2) / norm
         else:
             norm = self.beta(1 + p[1], p[2] + 1) + p[3] * self.beta(1 +
                                                             p[1] + 1, p[2] + 1) + p[4] * self.beta(1 + p[1] + 2, p[2] + 1)
             return p[0] * x**p[1] * (1 - x)**p[2] * (1 + p[3] * x + p[4] * x**2) / norm
     elif conf['shape'] == 2:
         norm = self.beta(1 + p[1], 1 + p[2]) + p[3] * self.beta(1 + p[1] + 1, 1 + p[2]) + \
             p[4] * self.beta(1 + p[1], 1 + p[2]) * (psi(p[1] + p[2] + 2) - psi(p[1] + 1))
         return p[0] * x**p[1] * (1 - x)**p[2] * (1 + p[3] * x + p[4] * np.log(1 / x)) / norm
     elif conf['shape'] == 3:
         norm = np.pow((p[1] + p[2]), p[1] + p[2]) / \
             (np.pow(p[1], p[1]) * np.pow(p[2], p[2]))
         return norm * p[0] * x**p[1] * (1 - x)**p[2]
     elif conf['shape'] == 4:
         norm = self.beta(2 + p[1], p[2] + 1) + p[3] * self.beta(2 +
                                                         p[1] + 1, p[2] + 1) + p[4] * self.beta(2 + p[1] + 2, p[2] + 1)
         return p[0] * x**p[1] * (1 - x)**p[2] * (1 + p[3] * x + p[4] * x**2) / norm
示例#8
0
    def fn(self, current_neuron, best_neuron):
        """
        Calculate the value for the multi RBF function.
        :param current_neuron: The current neuron.
        :param best_neuron: The best neuron.
        :return: A percent that determines the amount of training the current
        neuron should get.  Usually 100% when it is the bestNeuron.
        """
        vector = np.zeros(len(self.displacement))
        vector_current = self.translate_coordinates(current_neuron)
        vector_best = self.translate_coordinates(best_neuron)
        for i in range(len(vector_current)):
            vector[i] = vector_current[i] - vector_best[i]

        if self.hexagon:

            if len(self.size) != 2:
                raise Exception(
                    "Hexagon lattice can only be used in two dimensions.")

            row = vector[1]
            col = vector[0]
            even_indent = 1
            odd_indent = 2.5
            indent = odd_indent if row % 2 == 1 else even_indent

            vector[1] = int(NeighborhoodRBF.SQ75 +
                            (row * NeighborhoodRBF.SQ75))
            vector[0] = int(indent + (3 * col))

        if self.type == NeighborhoodRBF.TYPE_GAUSSIAN:
            value = 0

            for i in range(len(self.size)):
                value += np.power(vector[i],
                                  2) / (2.0 * self.width * self.width)
            return np.exp(-value)

        elif self.type == NeighborhoodRBF.TYPE_MULTIQUADRIC:
            value = 0

            for i in range(len(self.size)):
                value += np.pow(vector[i], 2) + (self.width * self.width)
            return np.sqrt(value)
        elif self.type == NeighborhoodRBF.TYPE_INVERSE_MULTIQUADRIC:
            value = 0
            for i in range(len(self.size)):
                value += np.pow(vector[i], 2) + (self.width * self.width)
            return 1 / np.sqrt(value)
        elif self.type == NeighborhoodRBF.TYPE_MEXICAN_HAT:
            # calculate the "norm", but don't take square root
            # don't square because we are just going to square it
            norm = 0
            for i in range(len(self.size)):
                norm += np.pow(vector[i], 2)

            # calculate the value
            return (1 - norm) * np.exp(-norm / 2)
        else:
            raise Exception("Invalid RBF function type: {}".format(self.type))
示例#9
0
文件: Kroupa.py 项目: pjs902/ssptools
    def __init__(self, a=[1.3, 2.35], mlim=[0.08, 0.5, 120.0]):

        a = np.array(a)
        mlim = np.array(mlim)
        self._a = a
        self._mlim = mlim

        norm = np.zeros(len(a))
        area = np.zeros(len(a))
        C = np.zeros(len(a))

        # Assure piecewise continuity
        C[0] = pow(1.0 / mlim[1], -a[0])  # i=0
        C[1] = pow(1.0 / mlim[1], -a[1])  # i=1
        for i in range(2, len(a)):  # i>1
            C[i] = pow(1.0 / mlim[i], -a[i])
            for j in range(1, i):
                C[i] *= pow((mlim[j + 1] / mlim[j]), -a[j])

        # Loop through pieces to find normalization
        for i in range(len(a)):
            area[i] = self._mom0(mlim[i], mlim[i + 1], a[i])
        norm = area * C
        self._norm = 1.0 / np.sum(norm)
        self._area = norm * self._norm
        self._C = C
示例#10
0
def tai(adi1, adi2, ati1, ati2, mai, pai):  #  TAI
    ndef = 3
    pai1 = numpy.arctan2(ati1, adi1)
    pai2 = numpy.arctan2(ati2, adi2)

    if ndef == 1:
        mai = numpy.sqrt(mai1 * mai2)
        if mai1 < eps:
            mai = mai2
        if mai2 < eps:
            mai = mai1
            pai = numpy.sqrt(pai1 * pai2)
    elif ndef == 2:
        sumwt = adi1 + adi2
        mai = (mai1 * adi1 + mai2 * adi2) / sumwt
        pai = numpy.sqrt(pai1 * pai2)
    elif ndef == 3:
        sums = mai1 * numpy.sin(pai1) + mai2 * numpy.sin(pai2)
        sumc = mai1 * numpy.cos(pai1) + mai2 * numpy.cos(pai2)
        mai = numpy.sqrt(numpy.pow(sums, 2) + numpy.pow(sumc, 2))
        pai = numpy.arctan2(sums, sumc)

    mai = rtod * numpy.arccos((1 - mai) / (1 + mai))
    pai = rtod * pai

    return atin
示例#11
0
文件: shock.py 项目: ptroyen/caeroc
    def beta(self,m1,d,g=1.4,i=0):
        p=-(m1*m1+2.)/m1/m1-g*np.sin(d)*np.sin(d)
        q=(2.*m1*m1+1.)/ np.pow(m1,4.)+((g+1.)*(g+1.)/4.+
                                          (g-1.)/m1/m1)*np.sin(d)*np.sin(d)
        r=-np.cos(d)*np.cos(d)/np.pow(m1,4.)

        a=(3.*q-p*p)/3.
        b=(2.*p*p*p-9.*p*q+27.*r)/27.

        test=b*b/4.+a*a*a/27.

        if (test>0.0):
            return -1.0
        elif (test==0.0):
          x1=np.sqrt(-a/3.)
          x2=x1
          x3=2.*x1
          if(b>0.0):
            x1*=-1.
            x2*=-1.
            x3*=-1.

        if(test<0.0):
          phi=np.acos(np.sqrt(-27.*b*b/4./a/a/a))
          x1=2.*np.sqrt(-a/3.)*np.cos(phi/3.)
          x2=2.*np.sqrt(-a/3.)*np.cos(phi/3.+np.pi*2./3.)
          x3=2.*np.sqrt(-a/3.)*np.cos(phi/3.+np.pi*4./3.)
          if(b>0.0):
            x1*=-1.
            x2*=-1.
            x3*=-1.

        s1=x1-p/3.
        s2=x2-p/3.
        s3=x3-p/3.

        if(s1<s2 and s1<s3):
          t1=s2
          t2=s3
        elif(s2<s1 and s2<s3):
          t1=s1
          t2=s3
        else:
          t1=s1
          t2=s2

        b1=np.asin(np.sqrt(t1))
        b2=np.asin(np.sqrt(t2))

        betas=b1
        betaw=b2
        if(b2>b1):
          betas=b2
          betaw=b1

        if(i==0):
            return betaw
        if(i==1):
            return betas
 def lowpass_prototype_order(self, wpass, wstop):
     if self.btype == "butter":
         order = (np.log(np.pow(10,0.1*self.gstop)-1.0)-(np.pow(0.1*self.gpass)-1.0))/(np.log(abs(wstop/w0 - w0/wstop)) - np.log(abs(wpass/w0 - w0/wpass))) #page 68
     elif self.ftype in ('cheby1', 'cheby2'):
         """TBI"""
     else:
         raise Exception("Unknown filter's approximation type.")
         sys.exit(1)
def func_θ(θc, tf):
    hf = h(θc, tf)
    uf = u(θc, tf)
    vf = v(θc, tf)
    return (uθ(θc, tf) *
            (vf * sqrt(pow(vf, 2) + 2 * g * hf) + pow(vf, 2) + 2 * g * hf) +
            uf * (vθ(θc, tf) *
                  (vf + sqrt(pow(vf, 2) + 2 * g * hf)) + g * hθ(θc, tf)))
示例#14
0
 def lowpass_prototype_order(self, wpass, wstop):
     if self.btype == "butter":
         order = (np.log(np.pow(10,0.1*self.gstop)-1.0)-(np.pow(0.1*self.gpass)-1.0))/(np.log(abs(wstop/w0 - w0/wstop)) - np.log(abs(wpass/w0 - w0/wpass))) #page 68
     elif self.ftype in ('cheby1', 'cheby2'):
         """TBI"""
     else:
         raise Exception("Unknown filter's approximation type.")
         sys.exit(1)
示例#15
0
def gamma_trans(mat, gamma):
    gamma_mean = np.pow(mean_rgb / 255, gamma)
    tmp_mat = np.pow(mat / 255, gamma)
    gamma_mat = np.zeros(mat.shape, dtype=np.float)
    gamma_mat[:, :, 0] = tmp_mat[:, :, 2] - gamma_mean[:, :, 2]
    gamma_mat[:, :, 1] = tmp_mat[:, :, 1] - gamma_mean[:, :, 1]
    gamma_mat[:, :, 2] = tmp_mat[:, :, 0] - gamma_mean[:, :, 0]
    return gamma_mat
def gamma_trans(mat, gamma):
    gamma_mean = np.pow(mean_rgb / 255, gamma)
    tmp_mat = np.pow(mat / 255, gamma)
    gamma_mat = np.zeros(mat.shape, dtype=np.float)
    gamma_mat[:, :, 0] = tmp_mat[:, :, 2] - gamma_mean[:, :, 2]
    gamma_mat[:, :, 1] = tmp_mat[:, :, 1] - gamma_mean[:, :, 1]
    gamma_mat[:, :, 2] = tmp_mat[:, :, 0] - gamma_mean[:, :, 0]
    return gamma_mat
示例#17
0
def struct_score(DSi, SSi):
    '''Calculate structure score based on Li et al 2012
    method.

    Args:
        DSi (float): Normalized dsRNA-seq (RNaseI resistant) coverage.
        SSi (float): Arcsinh transformed ssRNA-seq (RNaseVI resistant) coverage.
    '''
    return np.log2(DSi + np.sqrt(1 + np.pow(DSi, 2))) - np.log2(SSi + np.sqrt(1 + np.pow(SSi, 2)))
示例#18
0
 def parseData(self, primaryData):
     data = SerialData(primaryData)
     st = b2i.bytes2Int32(data.payLoad)
     tt = (1 / (2.048 * st / 3.3 / numpy.pow(2, 23) + 1) - 1) * numpy.pow(
         10, 6)
     if tt < -50000 or tt >> 50000:
         tt = 0
     self.telemetry.strain = tt
     self.attributes = b2i.bytes2UInt16(data.mcuId)
示例#19
0
def fourier_error(a, f, m, w=None):
    if w == None:
        w = np.ones(np.shape(a))
    efourier_nom = np.pow(
        (np.absolute(f[np.where(m)]) - np.absolute(a[np.where(m)])),
        2) * w[np.where(m)]
    efourier_den = np.sum(np.pow(np.absolute(a[np.where(m)]), 2)) + np.sum(
        np.pow(np.absolute(f[np.where(~m)]), 2))
    return sqrt(efourier_nom.sum() / efourier_den)
示例#20
0
    def fn(self, current_neuron, best_neuron):
        """
        Calculate the value for the multi RBF function.
        :param current_neuron: The current neuron.
        :param best_neuron: The best neuron.
        :return: A percent that determines the amount of training the current
        neuron should get.  Usually 100% when it is the bestNeuron.
        """
        vector = np.zeros(len(self.displacement))
        vector_current = self.translate_coordinates(current_neuron)
        vector_best = self.translate_coordinates(best_neuron)
        for i in range(len(vector_current)):
            vector[i] = vector_current[i] - vector_best[i]

        if self.hexagon:

            if len(self.size) !=2:
                raise Exception("Hexagon lattice can only be used in two dimensions.")

            row = vector[1]
            col = vector[0]
            even_indent = 1
            odd_indent = 2.5
            indent = odd_indent if row%2==1 else even_indent

            vector[1] = int(NeighborhoodRBF.SQ75+(row * NeighborhoodRBF.SQ75))
            vector[0] = int(indent+(3*col))

        if self.type == NeighborhoodRBF.TYPE_GAUSSIAN:
            value = 0

            for i in range(len(self.size)):
                value += np.power(vector[i], 2) / (2.0 * self.width * self.width)
            return np.exp(-value)

        elif self.type == NeighborhoodRBF.TYPE_MULTIQUADRIC:
            value = 0

            for i in range(len(self.size)):
                value += np.pow(vector[i], 2) + (self.width * self.width)
            return np.sqrt(value)
        elif self.type == NeighborhoodRBF.TYPE_INVERSE_MULTIQUADRIC:
            value = 0
            for i in range(len(self.size)):
                value += np.pow(vector[i], 2) + (self.width * self.width)
            return 1 / np.sqrt(value)
        elif self.type == NeighborhoodRBF.TYPE_MEXICAN_HAT:
            # calculate the "norm", but don't take square root
            # don't square because we are just going to square it
            norm = 0
            for i in range(len(self.size)):
                norm += np.pow(vector[i], 2)

            # calculate the value
            return (1 - norm) * np.exp(-norm / 2)
        else:
            raise Exception("Invalid RBF function type: {}".format(self.type))
示例#21
0
文件: profit.py 项目: limu007/Charlay
def ll(e_i,e_m,delt=0.1):
    """

    :param e_i: inclusion dielectric
    :param e_m: bulk dielectric
    :param delt: fraction of inclusion
    :return:
    """
    from numpy import pow
    return pow(delt*pow(e_i,1/3.)+(1-delt)*pow(e_m,1/3.),3.)
示例#22
0
def strangulation(_x):
    x = _x[0]
    y = _x[1]
    return (
        0 -
        (0.2) * np.exp(-(np.pow(x - 0.25, 2) + np.pow(y - 0.25, 2)) / 0.001) -
        (0.2) * np.exp(-(np.pow(x - 0.25, 2) + np.pow(y - 0.75, 2)) / 0.001) -
        (0.2) * np.exp(-(np.pow(x - 0.75, 2) + np.pow(y - 0.25, 2)) / 0.001) -
        (0.2) * np.exp(-(np.pow(x - 0.75, 2) + np.pow(y - 0.75, 2)) / 0.001) +
        (1.0) * np.exp(-(np.pow(x - 0.50, 2) + np.pow(y - 0.50, 2)) / 0.125))
示例#23
0
文件: profit.py 项目: limu007/scanner
def ll(e_i, e_m, delt=0.1):
    """

    :param e_i: inclusion dielectric
    :param e_m: bulk dielectric
    :param delt: fraction of inclusion
    :return:
    """
    from numpy import pow
    return pow(delt * pow(e_i, 1 / 3.) + (1 - delt) * pow(e_m, 1 / 3.), 3.)
示例#24
0
 def __call__(self, eps, a, b, c, d, ee, f, g, h, i, k):
     '''
     Implements the response function with arrays as variables.
     first extract the variable discretizations from the orthogonal grid.
     '''
     return ( a + pow( b, 2 ) + pow( c, 3 ) + pow( d, 4 ) + pow( ee, 5 ) + pow( f, 6 ) \
             + pow( g, 7 ) + pow( h, 8 ) + pow( i, 9 ) + pow( k, 10 ) ) * eps
示例#25
0
 def __call__( self, eps, a, b, c, d, ee, f, g, h, i, k ):
     '''
     Implements the response function with arrays as variables.
     first extract the variable discretizations from the orthogonal grid.
     '''
     return ( a + pow( b, 2 ) + pow( c, 3 ) + pow( d, 4 ) + pow( ee, 5 ) + pow( f, 6 ) \
             + pow( g, 7 ) + pow( h, 8 ) + pow( i, 9 ) + pow( k, 10 ) ) * eps;
示例#26
0
    def calc_mag(self):
        tmp = 0.0
        for i in range(self.N):
            tmp = np.sqrt(
                np.pow(self.real_part[i], 2) + np.pow(self.imag_part[i], 2))
            self.mags.push_back(tmp)

        for i in range(self.N):
            print("Magnitudes: ", i, self.mags[i])

        return
示例#27
0
    def add_element(self, prediction):
        self.pre_hist.append(prediction)

        if self.in_concept_change:
            self.reset()

        self.miss_sum += prediction
        self.miss_prob = self.miss_sum / self.sample_count
        # self.m_s = math.sqrt(self.miss_prob * (1.0 - self.miss_prob) * self._lambda * (1.0 - math.pow(1.0 - self._lambda, 2.0 * self.sample_count)) / (2.0 - self._lambda))
        self.sample_count += 1

        self.fw_miss_prob = 0
        self.fw_miss_num = 1
        tmp = len(self.pre_hist) * self.fw_rate
        for i in range(len(self.pre_hist)):
            if tmp >= self.min_fw_size and i <= tmp:
                self.fw_miss_prob += self.pre_hist[i] * i / tmp
                self.fw_miss_num += i / tmp
                # self.f_z_t += self._lambda*(self.pre_hist[i]*i/tmp-self.f_z_t)
            else:
                self.fw_miss_prob += self.pre_hist[i]
                self.fw_miss_num += 1
                # self.f_z_t += self._lambda*(self.pre_hist[i]-self.f_z_t)

        self.fw_miss_prob /= self.fw_miss_num
        self.f_m_s = math.sqrt(self.fw_miss_prob * (1 - self.fw_miss_prob) /
                               self.fw_miss_num)

        self.z_t += self._lambda * (prediction - self.z_t)

        L_t = 3.97 - 6.56 * self.fw_miss_prob + 48.73 * math.pow(
            self.fw_miss_prob, 3) - 330.13 * math.pow(self.fw_miss_prob,
                                                      5) + 848.18 * math.pow(
                                                          self.fw_miss_prob, 7)

        self.estimation = self.miss_prob
        self.in_concept_change = False
        self.in_warning_zone = False
        self.delay = 0

        if self.sample_count < self.min_instances:
            return

        if self.z_t > self.fw_miss_prob + L_t * self.f_m_s:
            self.in_concept_change = True

        elif self.z_t > self.fw_miss_prob + self.warning_level * L_t * self.f_m_s:
            self.in_warning_zone = True

        else:
            self.in_warning_zone = False
示例#28
0
def distance_geodetic(lat1,lon1,lat2,lon2):
	RADIUS = 6371 #KM

	D2R =  pi/180;

	lat1_rad = float(lat1)*D2R;
	lat2_rad = float(lat2)*D2R;
	lon1_rad = float(lon1)*D2R;
	lon2_rad = float(lon2)*D2R;

	a =    pow( sin((lat1_rad-lat2_rad)/2),2) +  cos(lat1_rad)* cos(lat2_rad)*  pow( sin((lon1_rad-lon2_rad)/2),2);

	distance =  abs(2*RADIUS* arctan2( sqrt(a),  sqrt(1-a)));
	return distance*1000; #meter
示例#29
0
def newton_rafson_numeric(fun, x0, e, n):
    x = x0
    i = 0
    for _ in range(0, n):
        df = derivative(fun, x, n=1)
        ddf = derivative(fun, x, n=2)
        if np.abs(df) < e:
            i += 1
            return x, i
        else:
            xl = x - df / ddf
            dfxl = derivative(fun, xl, n=1)
            t = pow(df, 2) / (pow(df, 2) + pow(dfxl, 2))
            x = x - t * df / ddf
            i += 1
示例#30
0
def PFR_CSTR():
    #PFR
    ans, err = quad(integrate, 0, X1)
    V7 = (F / (k * pow(C0, n))) * ans
    #CSTR
    C1 = C0 * (1 - X1)
    C2 = C1 * (1 - X2)
    r = -k * (pow(C2, n))
    V8 = (F * (X2 - X1)) / (-r)

    print("\nResults:")
    print("Volume of PFR is: " + str(format(V7, '.3f')) + " Litres &", end=' ')
    print("Volume of CSTR is: " + str(format(V8, '.3f')) + " Litres.\n")
    V9 = V7 + V8
    return V9
示例#31
0
    def invTransform(self, value):
        """
        Inverse transformation function

        :param float value: Value
        :return: Modified value
        
        .. seealso::
        
            :py:meth:`transform()`
        """
        if value < 0.:
            return -np.pow(-value, self.__exponent)
        else:
            return np.pow(value, self.__exponent)
示例#32
0
def newton_rafson(fun_prime, fun_second, x0, e, n):
    x = x0
    i = 0
    for _ in range(0, n):
        df = fun_prime(x)
        ddf = fun_second(x)
        if np.abs(df) < e:
            i += 1
            return x, i
        else:
            xl = x - df / ddf
            dfxl = fun_prime(xl)
            t = pow(df, 2) / (pow(df, 2) + pow(dfxl, 2))
            x = x - t * df / ddf
            i += 1
示例#33
0
def geodetic_to_ECEF(lat, lon):
    D2R = pi / 180
    #meters
    a = 6378137.0
    #first eccentricity squared
    pow_e_2 = 6.69437999014 * pow(10, -3)
    #assume 0
    h = 0
    lat_rad = float(lat) * D2R
    lon_rad = float(lon) * D2R
    N = a / sqrt(1 - pow_e_2 * pow(sin(lat_rad), 2))
    x = (N + h) * cos(lat_rad) * cos(lon_rad)
    y = (N + h) * cos(lat_rad) * sin(lon_rad)
    z = (N * (1 - pow_e_2) + h) * sin(lat_rad)
    return [x, y, z]
示例#34
0
    def invTransform(self, value):
        """
        Inverse transformation function

        :param float value: Value
        :return: Modified value
        
        .. seealso::
        
            :py:meth:`transform()`
        """
        if value < 0.:
            return -np.pow(-value, self.__exponent)
        else:
            return np.pow(value, self.__exponent)
示例#35
0
def CSTR_PFR():
    #CSTR
    C1 = C0 * (1 - X1)
    r = -k * (pow(C1, n))
    V4 = (F * X1) / (-r)
    #PFR
    ans, err = quad(integrate, X1, X2)
    V5 = (F / (k * pow(C1, n))) * ans

    print("\nResults:")
    print("Volume of CSTR is: " + str(format(V4, '.3f')) + " Litres &",
          end=' ')
    print("Volume of PFR is: " + str(format(V5, '.3f')) + " Litres.\n")
    V6 = V4 + V5
    return V6
示例#36
0
def geodetic_to_ECEF(lat,lon):
	D2R =  pi/180;
	#meters
	a = 6378137.0 
	#first eccentricity squared 
	pow_e_2 = 6.69437999014*  pow(10,-3)
	#assume 0	
	h = 0 
	lat_rad = float(lat)*D2R;
	lon_rad = float(lon)*D2R;
	N = a/ sqrt(1-pow_e_2*  pow( sin(lat_rad),2))
	x = (N+h)* cos(lat_rad)* cos(lon_rad)
	y = (N+h)* cos(lat_rad)* sin(lon_rad)
	z = (N*(1-pow_e_2)+h)* sin(lat_rad)
	return [x,y,z]
示例#37
0
    def backward(self, bottom_data, bottom_diff, top_data, top_diff):
        padded_ratio = Array.zeros(1, bottom_data.shape[1] + self.size - 1,
                                   bottom_data.shape[2], bottom_data.shape[3])
        accum_ratio = Array.zeros(1, 1, bottom_data.shape[2],
                                  bottom_data.shape[3])
        accum_ratio_times_bottom = Array.zeros(1, 1, bottom_data.shape[2],
                                               bottom_data.shape[3])
        cache_ratio_value = 2.0 * self.apha * self.beta / self.size
        bottom_diff = np.pow(self.scale, -self.beta)
        bottom_diff *= top_diff

        inverse_pre_pad = self.size - (self.size + 1) / 2
        for n in range(bottom_data.shape[0]):
            padded_ratio[0, inverse_pre_pad] = top_diff[n] * top_data[n]
            padded_ratio[0, inverse_pre_pad] /= self.scale[n]
            accum_ratio.fill(0)
            for c in range(self.size - 1):
                accum_ratio += padded_ratio[0, c]

            for c in range(bottom_data.shape[1]):
                accum_ratio += padded_ratio[0, c + self.size - 1]
                accum_ratio_times_bottom += bottom_data[n, c] * accum_ratio
                bottom_data[n, c] += -cache_ratio_value * \
                    accum_ratio_times_bottom
                accum_ratio += -1 * padded_ratio[0, c]
示例#38
0
def Batch():
    v = F / C0
    T = V1 / v
    t = T
    ans, err = quad(integrate, 0, X)
    V3 = (N / (t * k * pow(C0, n))) * ans
    return V3
示例#39
0
文件: core.py 项目: kpatton1/simpdf
    def process_chunk(self, data):

        moment_data = numpy.log(data)
        
        moments = numpy.zeros(self.mmax - self.mmin, dtype=numpy.float32)

        mean = numpy.nanmean(moment_data)

        moment_data = moment_data - mean

        if self.mmin == 1:
            temp = numpy.ones(len(moment_data), dtype=numpy.float32)
        elif self.mmin == 2:
            temp = moment_data
        else:
            temp = numpy.pow(moment_data, self.mmin-1)

        for i in range(0, self.mmax-self.mmin):
            temp = temp * moment_data
            moments[i] = numpy.nanmean(temp)

        if self.mmin == 1:
            moments[0] = mean

        return moments
    def schecterFunctionL(self,L,phistar,Lstar,alpha):
        """ Schecter function for galaxy LF
        Default to redshift ??? LBG properties
        """
        phi=phistar*numpy.pow(L/Lstar,alpha)*numpy.exp(-L/Lstar)

        return phi
示例#41
0
 def _deriv_pow_0(x, y):
     if y == 0:
         return 0.0
     elif x != 0 or y % 1 == 0:
         return y * np.pow(x, y - 1)
     else:
         return np.float('nan')
示例#42
0
    def run_till_convergence(self, tol=.05):
        scale = np.mean(np.sum(np.pow(data, 2), 1), 0)
        runs = 0
        while 1:
            runs += 1
            means_0 = array(self.means)
            self._iterate()
            divergence = np.mean(np.sum(np.pow(means_0 - means, 2), 1), 0)
            if divergence / scale < tol:
                failed = False
                break
            if runs > self.max_runs:
                failed = True
                break

        return failed
示例#43
0
    def func(self, t, y):
        m = self.m
        k = self.k
        p = self.p

        x, v = y
        return v, (self.fext(t,x) - k*np.pow(x,p-1))/m
示例#44
0
def distance_geodetic(lat1, lon1, lat2, lon2):
    RADIUS = 6371  #KM

    D2R = pi / 180

    lat1_rad = float(lat1) * D2R
    lat2_rad = float(lat2) * D2R
    lon1_rad = float(lon1) * D2R
    lon2_rad = float(lon2) * D2R

    a = pow(sin(
        (lat1_rad - lat2_rad) / 2), 2) + cos(lat1_rad) * cos(lat2_rad) * pow(
            sin((lon1_rad - lon2_rad) / 2), 2)

    distance = abs(2 * RADIUS * arctan2(sqrt(a), sqrt(1 - a)))
    return distance * 1000
def squared_error(y, est):
	num_classes = y.shape[1]
	rval = 0
	for c in range(num_classes):
		est_c = est[np.where(y[:,c] == 1)[0]]
		num_class_c = np.sum(y[:,c])
		rval += np.sum(np.pow(np.subtract(np.ones([num_class_c, 1]), est_c), 2))
	return rval
def generate_shear_layer_profile(conv_u,eta,u,u_y,u_yy):
	N = len(u)-1
	for i in range(0,N+1):
		u[i] = 1.0 + conv_u * np.tanh(eta[i])
		u_y[i] = conv_u * ( 1.0 - np.pow(np.tanh(eta[i]), 2.0) )
		u_yy[i] = 0.0
	print "############# Need to calculate analytical function for u_yy ###############"
	return
def generate_wake_profile(wake_half_width,u_deficit,eta,u,u_y,u_yy):
	N = len(u)-1
	for i in range(0,N+1):
		sech_n = 1.0 / np.cosh(eta[ii] / wake_half_width)
		u[i] = 1.0 - u_deficit * pow(sech_n,2.0)
		u_y[ii] = 2.0 * u_deficit * np.pow(sech_n,2.0) * np.tanh(eta[i])
		u_yy[ii] = 0.0
	print "############# Need to calculate analytical function for u_yy ###############"
	return 
示例#48
0
def distance(node1,node2,lat_a, lon_a):
	lat1 = float(lat_a[node1])
	lat2 = float(lat_a[node2])
	lon1 = float(lon_a[node1])
	lon2 = float(lon_a[node2])

	RADIUS = 6371 #KM

	D2R =  pi/180;

	lat1_rad = lat1*D2R;
	lat2_rad = lat2*D2R;
	lon1_rad = lon1*D2R;
	lon2_rad = lon2*D2R;

	a =    pow( sin((lat1_rad-lat2_rad)/2),2) +  cos(lat1_rad)* cos(lat2_rad)*   pow( sin((lon1_rad-lon2_rad)/2),2);

	distance =  abs(2*RADIUS* arctan2( sqrt(a),  sqrt(1-a)));
	return distance*1000; #meter
示例#49
0
    def inverse(self, value):
        if not self.scaled():
            raise ValueError("Not invertible until scaled")
        vmin, vmax = float(self.vmin), float(self.vmax)

        if cbook.iterable(value):
            val = np.ma.asarray(value)
            return vmin * np.ma.power((vmax/vmin), val)
        else:
            return vmin * np.pow((vmax/vmin), value)
示例#50
0
def ecef2geodetic(x, y, z):
    """Convert ECEF coordinates to geodetic.
    J. Zhu, "Conversion of Earth-centered Earth-fixed coordinates \
    to geodetic coordinates," IEEE Transactions on Aerospace and \
    Electronic Systems, vol. 30, pp. 957-961, 1994."""
    r =  sqrt(x * x + y * y)
    Esq = a * a - b * b
    F = 54 * b * b * z * z
    G = r * r + (1 - esq) * z * z - esq * Esq
    C = (esq * esq * F * r * r) / (  pow(G, 3))
    S = cbrt(1 + C +  sqrt(C * C + 2 * C))
    P = F / (3 *   pow((S + 1 / S + 1), 2) * G * G)
    Q =  sqrt(1 + 2 * esq * esq * P)
    r_0 =  -(P * esq * r) / (1 + Q) +  sqrt(0.5 * a * a*(1 + 1.0 / Q) - \
	P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r)
    U =  sqrt(  pow((r - esq * r_0), 2) + z * z)
    V =  sqrt(  pow((r - esq * r_0), 2) + (1 - esq) * z * z)
    Z_0 = b * b * z / (a * V)
    h = U * (1 - b * b / (a * V))
    lat =  arctan((z + e1sq * Z_0) / r)
    lon =  arctan2(y, x)
    return  degrees(lat),  degrees(lon)
示例#51
0
文件: core.py 项目: kpatton1/simpdf
    def process_chunk(self, data):

        moment_data = data / self.scale
        
        moments = numpy.zeros(self.mmax - self.mmin, dtype=numpy.float32)

        if self.mmin == 2:
            temp = moment_data
        else:
            temp = numpy.pow(moment_data, self.mmin-1)

        for i in range(0, self.mmax-self.mmin):
            temp = temp * moment_data
            moments[i] = numpy.mean(temp)

        return moments
示例#52
0
 def __call__(self, RA, Dec, energy):
     self.ncalls += 1
     Norm = self.parameters["Norm"].value
     # Map is in Galactic coords need to convert from Celestial
     lon, lat = return_lonlat(RA, Dec)
     # Get the pixel position
     px, py = coordToPixel(lon, lat, self.filename)
     # determine values at requested energie by PL interp on the nearest available in the model
     E_0 = np.max(np.where(energies <= np.log10(energy)), axis=1)[0]
     pos_0 = [E_0, py, px]
     vals_0 = fp_linear_interp(pos_0, self.cube, self.filename)
     pos_1 = [E_0 + 1, py, px]
     vals_1 = fp_linear_interp(pos_1, self.cube, self.filename)
     gamma = -(np.log10(vals_1) - np.log10(vals_0)) / self.w[0].header["CDELT3"]
     vals = vals_0 - gamma * (np.log(energy) - logE0)
     vals = np.pow(10, vals)
     return Norm * vals
示例#53
0
def log_sample(min_, max_, size=1, base='e'):
    """Sample from a log scale.

    :param min_: float, The minimum for the sample range.
    :param max_: float, The maximum for the sample range.
    :param size: int, The number of samples to draw.  :param base: str or int, The base of the log function.
    """
    if base == 'e':
        a = math.log(min_)
        b = math.log(max_)
    else:
        a = math.log(min_, base)
        b = math.log(max_, base)
    r = np.random.uniform(a, b, size)
    if base == 'e':
        return np.exp(r)
    else:
        return np.pow(base, r)
示例#54
0
def power(inputArray, exponente=3.0, scale_min=None, scale_max=None):
	print "[cvSpace]::power"
	img=np.array(inputArray, copy=True)
	
	if scale_min == None:
		scale_min = img.min()
	if scale_max == None:
		scale_max = img.max()
	factor = 1.0 / np.pow(scale_max, exponente)
	img = img + scale_min
	print "Factor: "+str(factor)
	indices0 = np.where(img < scale_min)
	indices1 = np.where((img >= scale_min) & (img <= scale_max))
	indices2 = np.where(img > scale_max)
	img[indices0] = 0.0
	img[indices2] = 1.0
	img[indices1] = np.power((img[indices1] - scale_min), exponente)*factor

	return 255.0*img
示例#55
0
def get_rgh_hrd(beamdat,dep,absorp,c,nf,transfreq,equivbeam,maxW,pi,ft):
    peakstart = (int)((float(dep)/c)*76923)
    noiseend = (int)(round(0.9*peakstart))
    E1start = peakstart+int(ft/2) #27
    E1end = peakstart+(nf*3) #131
    E2start = (int)(2*peakstart)
    E2end = (int)(2*peakstart)+(nf*3) #131
    sum = 0

    try:
       for k in range(nf,noiseend): #80
          backstrength = ((30.0/255.0)*(float((np.squeeze(beamdat))[k])))+(20*log10(float(dep)))+(2*(absorp/1000)*(float(dep)))-(10*(log10(((maxW*(pow((c/(transfreq/1000)),2))*c*0.0007*equivbeam)/(32*(pow(pi,2)))))))
          backcoeff = pow(10,(backstrength/10))
          sum = sum + backcoeff
       n = noiseend - nf + 1 #80 + 1
       noise = (4*pi*(pow(1852.0,2))*(2*sum))/max(n,1)
       sum = 0
       for k in range(E1start,E1end):
          backstrength = ((30.0/255.0)*(float((np.squeeze(beamdat))[k])))+(20*log10(float(dep)))+(2*(absorp/1000)*(float(dep)))-(10*(log10(((maxW*(pow((c/(transfreq/1000)),2))*c*0.0007*equivbeam)/(32*(pow(pi,2)))))))
          backcoeff = pow(10,(backstrength/10))
          sum = sum + backcoeff
       sv_e1 = sum
       n = E1end - E1start + 1
       energy = (4*pi*(pow(1852.0,2))*(2*sum))-(max(n,1)*noise)
       if energy < 0:
          energy = 1.0
       rough = log10(energy)
    except:
       rough = np.nan
       sv_e1 = np.nan

    try:    
       sum = 0
       for k in range(E2start,E2end):
          backstrength = ((30.0/255.0)*(float((np.squeeze(beamdat))[k])))+(20*log10(float(dep)))+(2*(absorp/1000)*(float(dep)))-(10*(log10(((maxW*(pow((c/(transfreq/1000)),2))*c*0.0007*equivbeam)/(32*(pow(pi,2)))))))
          backcoeff = pow(10,(backstrength/10))
          sum = sum + backcoeff
       sv_e2 = sum
       n = E2end - E2start + 1
       energy = (4*pi*(pow(1852.0,2))*(2*sum))-(max(n,1)*noise)
       if energy < 0:
          energy = 1.0
       hard = log10(energy)
       sum = 0
    except:
       hard = np.nan
       sv_e2 = np.nan

    return rough, hard, sv_e1, sv_e2, E1start, E1end, E2start, E2end
示例#56
0
def CND(X):
    ''' 
    Cumulative normal distribution

    '''

    a1, a2, a3, a4, a5 = (0.31938153, -0.356563782, 1.781477937, 
                         -1.821255978, 1.330274429)
    L = np.abs(X)

    K = 1.0 / (1.0 + 0.2316419 * L)

    w1 = 1.0/np.sqrt(2*pi)*np.exp(-L*L/2.)
    
    w2 = (a1*K+a2*K*K+a3*np.pow(K,3) + a4*pow(K,4) + a5*pow(K,5))
    
    w = 1.0 - w1 * w2
    
    if X<0:
        w = 1.0-w

    return w
def generate_polynomials(data, degrees):
  """Creates a dictionary of orthonormal polynomial basis functions from a vector.
    ARGS
      x : numerical data <numpy array>
      degrees : list with degrees of polynomial <int>
    RETURN
      Z : dictionary with orthonormal polynomial basis functions {degree:basis_functions} <dictionary>  
  """
  if isinstance(degrees, int):
    degrees = [degrees]
  if not isinstance(degrees,list):
    raise Exception("degrees must be an int or a list, got %s" % degrees)
  
  polys = {}
  
  for degree in degrees:
    polys[degree] = np.empty((data.shape[0], data.shape[1] * degree))
    for i in range(data.shape[1]):
      for k in range(degree):
        polys[degree][:,i*k + k] = np.pow(data[:,i], k)
        
  return polys
示例#58
0
文件: layer.py 项目: wOOL/deepnet
  def UpdateParams(self, deriv, step):
    """ Update the parameters associated with this layer.

    Update the bias.
    Args:
      deriv: Gradient w.r.t the inputs to this layer.
      step: Training step.
    """
    logging.debug('UpdateParams in %s', self.name)
    h = self.hyperparams

    # Linearly interpolate between initial and final momentum.
    if h.momentum_change_steps > step:
      f = float(step) / h.momentum_change_steps
      momentum = (1.0 - f) * h.initial_momentum + f * h.final_momentum
    else:
      momentum = h.final_momentum

    # Decide learning rate.
    if h.epsilon_decay == deepnet_pb2.Hyperparams.NONE:
      epsilon = h.base_epsilon
    elif h.epsilon_decay == deepnet_pb2.Hyperparams.INVERSE_T:
      epsilon = h.base_epsilon / (1 + float(step) / h.epsilon_decay_half_life)
    elif h.epsilon_decay == deepnet_pb2.Hyperparams.EXPONENTIAL:
      epsilon = h.base_epsilon / np.pow(2, float(step) / h.epsilon_decay_half_life)
    if step < h.start_learning_after:
      epsilon = 0.0

    b_delta = self.params['grad_bias']
    b = self.params['bias']

    # Update bias.
    b_delta.mult(momentum)
    b_delta.add_sums(deriv, axis=1, mult = (1.0 - momentum) / self.batchsize)
    if h.apply_l2_decay:
      b_delta.add_mult(b, (1-momentum) * h.l2_decay)
    b.add_mult(b_delta, -epsilon)
示例#59
0
 def _get_real_memsize( self ):
     return pow( self.n_int, self.n_rv )