Ejemplo n.º 1
0
 def sgrad(self, x, ndata=100, bound=True, average=True):
     low_no = sp.random.randint(0, high=50000 - ndata)
     high_no = low_no + ndata
     sli = slice(low_no, high_no)
     data2 = []
     data2.append(self.data.train[0][sli])
     data2.append(self.data.train[1][sli])
     gradient = self.gradEval(x, tuple(data2))
     m = int(sp.shape(gradient)[1])
     '''u is the random direction matrix'''
     u = sp.random.randint(0, high=m, size=sp.shape(gradient))
     '''taking element wise product of u and gradient and then row wise sum to 
     get dot product of the matrices. dotprod is 10X1 matrix'''
     dotprod = (u * gradient).sum(axis=1, keepdims=True)
     stoc_grad = dotprod * u
     if bound == True:
         len_vec = sp.sqrt(
             sp.diagonal(sp.dot(stoc_grad, sp.transpose(stoc_grad))))
         #creating the list where len_vec is greater than desired value
         bool_list = list(map(lambda z: z > 10, len_vec))
         #converting boolean list to array of 1 0
         bool_array = sp.array(bool_list, dtype=int)
         #calculating factor to be divided with
         norm_factor = sp.divide(bool_array, float(m) * len_vec)
         norm_factor[norm_factor == 0] = 1  #replacing 0's with 1
         temp_norm = sp.reshape(norm_factor,
                                (len(norm_factor), 1)) * sp.ones(
                                    sp.shape(stoc_grad))
         stoc_grad = sp.divide(stoc_grad, temp_norm)
         '''alternatively we can use this
         for i in range(len(len_vec)): #this for loop is small as len_vec len is 10
             if len_vec[i] > 10:
                 stoc_grad[i,:] = stoc_grad[i,:]/(float(m)*float(len_vec[i]))'''
     return stoc_grad
Ejemplo n.º 2
0
def indx_1dto3d(idx, sz):
    """
    Translate 1D vector coordinates to 3D matrix coordinates for a 3D matrix of size sz.

    Parameters
    ----------
    idx : array
        A 1D numpy coordinate vector.
    sz : array
        Shape of 3D matrix idx.

    Returns
    -------
    x : int
        x-coordinate of 3D matrix coordinates.
    y : int
        y-coordinate of 3D matrix coordinates.
    z : int
        z-coordinate of 3D matrix coordinates.

    References
    ----------
    .. Adapted from PyClusterROI
    """
    from scipy import divide, prod
    x = divide(idx, prod(sz[1:3]))
    y = divide(idx - x * prod(sz[1:3]), sz[2])
    z = idx - x * prod(sz[1:3]) - y * sz[2]
    return x, y, z
Ejemplo n.º 3
0
 def __init__(self, fc, c_vel, alp_g, mu_los, mu_nlos, a, b, noise_var, hUAV, xUAV, yUAV, xUE, yUE):
     dist = sp.sqrt( sp.add(sp.square(sp.subtract(yUAV, yUE)), sp.square(sp.subtract(xUAV, xUE))) )
     R_dist = sp.sqrt( sp.add(sp.square(dist), sp.square(hUAV)) )
     temp1 = sp.multiply(10, sp.log10(sp.power(fc*4*sp.pi*R_dist/c_vel, alp_g)))
     temp2 = sp.multiply(sp.subtract(mu_los, mu_nlos), sp.divide(1, (1+a*sp.exp(-b*sp.arctan(hUAV/dist)-a))))
     temp3 = sp.add(sp.add(temp1, temp2), mu_nlos)
     self.pathloss = sp.divide(sp.real(sp.power(10, -sp.divide(temp3, 10))), noise_var)
Ejemplo n.º 4
0
	def european_option_delta(self):
		numerator = sp.add(
			sp.log(
				sp.divide(
					self.spot_price,
					self.strike_price
				)
			),
			sp.multiply(
				( self.interest_rate - self.dividend_yield + 0.5*sp.power(self.sigma,2)),
				self.time_to_maturity
			)
		)
		d1 = sp.divide(
			numerator,
			sp.prod(
				[
					self.sigma,
					sp.sqrt(self.time_to_maturity)
				],
				axis=0,
			)
		)
		call_delta = self.bls_erf_value(d1)
		put_delta = call_delta - 1 
		
		return call_delta, put_delta
Ejemplo n.º 5
0
	def european_option_rho(self):
		"Price of the call option"
		"the vectorized method can compute price of multiple options in array"
		numerator = sp.add(
			sp.log(
				sp.divide(
					self.spot_price,
					self.strike_price,
				)
			),
			sp.multiply(
				(
					self.interest_rate - self.dividend_yield +
					0.5*sp.power(self.sigma,2)
				),
				self.time_to_maturity)
		)
		d1 = sp.divide(
			numerator,
			sp.prod(
				[
					self.sigma,
					sp.sqrt(self.time_to_maturity)
				],
				axis=0,
			)
		)
		d2 = sp.add(
			d1,
			-sp.multiply(
				self.sigma,
				sp.sqrt(self.time_to_maturity)
			)
		)


		j = sp.product(
			[
				self.spot_price,
				self.time_to_maturity,
				sp.exp(
					sp.multiply(
						-self.interest_rate,
						self.time_to_maturity
					)
				),
			],
			axis=0
		)

		c_rho = j * self.bls_erf_value(d2)
		p_rho = -j * self.bls_erf_value(-d2)

		return c_rho, p_rho
Ejemplo n.º 6
0
def PointDipole(Freq,EpsB,Cell,NX,NY,NZ,Xs,Ys,Zs,XPol,YPol,ZPol):
    """Returns the incident field Ein over the computational domain,
       produced by a point dipole source polarized as (XPol,YPol,ZPol)
       and located at (Xs,Ys,Zs)
    """
    c0=299792458.0 # speed of light in vacuum
    Mu0=4.0*sci.pi*1.0e-7 # vacuum permeability
    Eps0=1.0/(Mu0*c0*c0) # vacuum permittivity
    Field=sci.zeros((NX,NY,NZ,3),complex)
    Omega=2.0*sci.pi*Freq
    EtaB=-1.0j*Omega*Eps0*EpsB
    ZetaB=-1.0j*Omega*Mu0
    KB=Omega*sci.sqrt(Eps0*EpsB*Mu0)
    # 3D arrays of x,y,z coordinates
    xx,yy,zz=sci.mgrid[0:NX*Cell:Cell,0:NY*Cell:Cell,0:NZ*Cell:Cell]
    # 3D arrays of distances
    dd=sci.sqrt((xx-Xs)**2+(yy-Ys)**2+(zz-Zs)**2)
    dd2=dd*dd
    Xd=xx-Xs
    Yd=yy-Ys
    Zd=zz-Zs
    # 3D arrays of components of the Q-matrix
    Q11=(Xd*Xd)/dd2
    Q12=(Xd*Yd)/dd2
    Q13=(Xd*Zd)/dd2
    Q21=Q12
    Q22=(Yd*Yd)/dd2
    Q23=(Yd*Zd)/dd2
    Q31=Q13
    Q32=Q23
    Q33=(Zd*Zd)/dd2
    QJ1=Q11*XPol+Q12*YPol+Q13*ZPol
    QJ2=Q21*XPol+Q22*YPol+Q23*ZPol
    QJ3=Q31*XPol+Q32*YPol+Q33*ZPol

    dd2=dd*dd
    dd3=dd2*dd

    Field[0:NX,0:NY,0:NZ,0]=sci.exp(1.0j*KB*dd)*\
                (sci.divide((3.0*QJ1-XPol),(4.0*EtaB*sci.pi*dd3))\
                +sci.divide((3.0*QJ1-XPol)*(-1.0j*KB),(4.0*EtaB*sci.pi*dd2))\
                +sci.divide((QJ1-XPol),(4.0*ZetaB*sci.pi*dd)))

    Field[0:NX,0:NY,0:NZ,1]=sci.exp(1.0j*KB*dd)*\
                (sci.divide((3.0*QJ2-YPol),(4.0*EtaB*sci.pi*dd3))\
                +sci.divide((3.0*QJ2-YPol)*(-1.0j*KB),(4.0*EtaB*sci.pi*dd2))\
                +sci.divide((QJ2-YPol),(4.0*ZetaB*sci.pi*dd)))

    Field[0:NX,0:NY,0:NZ,2]=sci.exp(1.0j*KB*dd)*\
                (sci.divide((3.0*QJ3-ZPol),(4.0*EtaB*sci.pi*dd3))\
                +sci.divide((3.0*QJ3-ZPol)*(-1.0j*KB),(4.0*EtaB*sci.pi*dd2))\
                +sci.divide((QJ3-ZPol),(4.0*ZetaB*sci.pi*dd)))

    return Field
Ejemplo n.º 7
0
 def __init__(self,WeakLearner,training_data, \
              bin_training_label,T,Twl):
     self.T = T
     self.weakLearner = WeakLearner
     numOfSamples = len(training_data)
     self.numOfFeatures = len(training_data[0])
     self.D = [1.0/numOfSamples for _ in xrange(0,numOfSamples)]
     self.w = [0.0 for _ in xrange(0,T)]
     self.ht = []
     for t in xrange(0,T):
         if t%10==0:
             print "getting weak learner number "+str(t)
         wl = self.weakLearner(training_data,bin_training_label,Twl,D=self.D)
         self.ht.append(wl)
         epsilon=0.00001 
         #we want to prevent the case where epsilon=0,
         #because the calculation of w relays on epsilon!=0 
         for i in xrange(0,numOfSamples):
             h_t_xi = self.ht[t].Classify(training_data[i])
             y_xi = bin_training_label[i]
             if h_t_xi != y_xi :
                 epsilon+=self.D[i]
         if epsilon>0.5:
             print "        warning: for the "+str(t)+" iteration got "+\
                 "{0:.2f}".format(epsilon)+" error the training set "
         self.w[t]=0.5*math.log(1.0/epsilon-1)
         for i in xrange(0,numOfSamples):
             yi = labelTosign(bin_training_label[i])
             hti =labelTosign(self.ht[t].Classify(training_data[i]))
             self.D[i]=self.D[i]*math.exp(-1.0*self.w[t]*yi*hti)
         self.D = sp.divide(self.D,sp.sum(self.D))
Ejemplo n.º 8
0
 def density(self, x):
     assert x.shape == self.dim, "Problem with the dimensionalities"
     assert x.dtype == int, "x has to be an integer array"
     theta = self.params['theta'].flatten()
     x = x.flatten()
     # return s.prod (stats.poisson.pmf(x,theta) )
     return s.prod(s.divide(theta**x * s.exp(-theta), s.misc.factorial(x)))
def generate_stats(trueY, forecastY, missing=True):
    """ From TRMF code """
    nz_mask = trueY != 0
    diff = forecastY - trueY
    abs_true = sp.absolute(trueY)
    abs_diff = sp.absolute(diff)

    def my_mean(x):
        tmp = x[sp.isfinite(x)]
        assert len(tmp) != 0
        return tmp.mean()

    with sp.errstate(divide='ignore'):
        # rmse
        rmse = sp.sqrt((diff**2).mean())
        # normalized root mean squared error
        nrmse = sp.sqrt((diff**2).mean()) / abs_true.mean()

        # baseline
        abs_baseline = sp.absolute(trueY[1:, :] - trueY[:-1, :])
        mase = abs_diff.mean() / abs_baseline.mean()
        m_mase = my_mean(abs_diff.mean(axis=0) / abs_baseline.mean(axis=0))

        mape = my_mean(sp.divide(abs_diff, abs_true, where=nz_mask))

        return mape, mase, rmse
Ejemplo n.º 10
0
 def __init__(self,WeakLearner,training_data, \
              bin_training_label,T,Twl):
     self.T = T
     self.weakLearner = WeakLearner
     numOfSamples = len(training_data)
     self.numOfFeatures = len(training_data[0])
     self.D = [1.0 / numOfSamples for _ in xrange(0, numOfSamples)]
     self.w = [0.0 for _ in xrange(0, T)]
     self.ht = []
     for t in xrange(0, T):
         if t % 10 == 0:
             print "getting weak learner number " + str(t)
         wl = self.weakLearner(training_data,
                               bin_training_label,
                               Twl,
                               D=self.D)
         self.ht.append(wl)
         epsilon = 0.00001
         #we want to prevent the case where epsilon=0,
         #because the calculation of w relays on epsilon!=0
         for i in xrange(0, numOfSamples):
             h_t_xi = self.ht[t].Classify(training_data[i])
             y_xi = bin_training_label[i]
             if h_t_xi != y_xi:
                 epsilon += self.D[i]
         if epsilon > 0.5:
             print "        warning: for the "+str(t)+" iteration got "+\
                 "{0:.2f}".format(epsilon)+" error the training set "
         self.w[t] = 0.5 * math.log(1.0 / epsilon - 1)
         for i in xrange(0, numOfSamples):
             yi = labelTosign(bin_training_label[i])
             hti = labelTosign(self.ht[t].Classify(training_data[i]))
             self.D[i] = self.D[i] * math.exp(-1.0 * self.w[t] * yi * hti)
         self.D = sp.divide(self.D, sp.sum(self.D))
Ejemplo n.º 11
0
def reflect1(v, u, c):
    print("Reflect by vector math variant 1:")
    c = 0
    center_ = eT(center(len(v)))
    print("center_:", center_)
    print("v:", v)
    v = scipy.subtract(v, center_)
    print("v:", v)
    print("u:", u)
    print("c:", c)
    v_dot_u = scipy.dot(v, u)
    print("v_dot_u:", v_dot_u)
    v_dot_u_minus_c = scipy.subtract(v_dot_u, c)
    print("v_dot_u_minus_c:", v_dot_u_minus_c)
    u_dot_u = scipy.dot(u, u)
    print("u_dot_u:", u_dot_u)
    quotient = scipy.divide(v_dot_u_minus_c, u_dot_u)
    print("quotient:", quotient)
    subtrahend = scipy.multiply((2 * quotient), u)
    print("subtrahend:", subtrahend)
    reflection = scipy.subtract(v, subtrahend)
    print("reflection:", reflection)
    reflection = scipy.add(reflection, center_)
    print("reflection:", reflection)
    return reflection
Ejemplo n.º 12
0
def hyperplane_equation_from_dimensonality(dimensions,
                                           transpositional_equivalence=False,
                                           sector=1):
    center_ = center(dimensions)
    cyclical_region_vertices_ = cyclical_region_vertices(
        dimensions, transpositional_equivalence)
    for i in range(sector):
        front = cyclical_region_vertices_.pop(0)
        cyclical_region_vertices_.append(front)
    upper_point = midpoint(cyclical_region_vertices_[1],
                           cyclical_region_vertices_[2])
    lower_point = midpoint(cyclical_region_vertices_[0],
                           cyclical_region_vertices_[-1])
    print("sector:", sector)
    print("upper_point:", upper_point)
    print("lower_point:", lower_point)
    if transpositional_equivalence == True:
        center_ = eT(center_)
        #~ lower_point = eT(lower_point)
        #~ upper_point = eT(upper_point)
    normal_vector = scipy.subtract(upper_point, lower_point)
    norm = scipy.linalg.norm(normal_vector)
    unit_normal_vector = scipy.divide(normal_vector, norm)
    constant_term = unit_normal_vector.dot(center_)
    debug("hyperplane_equation_from_dimensonality for", dimensions, "voices:")
    print("center:")
    for e in center_:
        print(e)
    print("unit_normal_vector:")
    for e in unit_normal_vector:
        print(e)
    print("constant_term:", constant_term)
    print()
    return unit_normal_vector, constant_term
Ejemplo n.º 13
0
def hyperplane_equation_by_svd_from_vectors(points, t_equivalence='True'):
    t_ = []
    if t_equivalence == True:
        debug("original points:\n", points)
        for point in points:
            t_.append(eT(point))
        points = t_
    debug("points:\n", points)
    vectors = []
    subtrahend = points[-1]
    debug("subtrahend:", subtrahend)
    for i in range(len(points) - 1):
        vector = scipy.subtract(points[i], subtrahend)
        vectors.append(vector)
    # debug("vectors:\n", vectors)
    vectors = scipy.array(vectors)
    debug("vectors:\n", vectors)
    U, singular_values, V = scipy.linalg.svd(vectors)
    debug("U:\n", U)
    print("singular values:", singular_values)
    debug("V:\n", V)
    normal_vector = V[-1]
    debug("normal_vector:", normal_vector)
    norm = scipy.linalg.norm(normal_vector)
    debug("norm:", norm)
    unit_normal_vector = scipy.divide(normal_vector, norm)
    print("unit_normal_vector:")
    for e in unit_normal_vector:
        print(e)
    constant_term = scipy.dot(scipy.transpose(unit_normal_vector), subtrahend)
    print("constant_term:", constant_term)
    print()
    return unit_normal_vector, constant_term
Ejemplo n.º 14
0
def hyperplane_equation_by_nullspace_from_points(points, t_equivalence='True'):
    t_ = []
    if t_equivalence == True:
        debug("original points:\n", points)
        for point in points:
            t_.append(eT(point))
        points = t_
    debug("points:\n", points)
    try:
        debug("determinant of points:", scipy.linalg.det(points))
    except:
        pass
    homogeneous_points = []
    for point in points:
        homogeneous_points.append(scipy.append(point, [1]))
    debug("homogeneous points:", homogeneous_points)
    nullspace = scipy.linalg.null_space(homogeneous_points)
    nullspace = scipy.ndarray.flatten(nullspace[:-1])
    debug("nullspace from homogeneous points:", nullspace)
    constant_term = scipy.dot(scipy.transpose(nullspace), points[0])
    debug("constant_term:", constant_term)
    norm = scipy.linalg.norm(nullspace)
    debug("norm:", norm)
    unit_normal = scipy.divide(nullspace, norm)
    print("unit normal vector:", unit_normal)
    constant_term = scipy.dot(scipy.transpose(unit_normal), points[0])
    print("constant_term:", constant_term)
    return unit_normal, constant_term
Ejemplo n.º 15
0
def hyperplane_equation_by_nullspace_from_vectors(points,
                                                  t_equivalence='True'):
    vectors = []
    t_ = []
    if t_equivalence == True:
        debug("original points:", points)
        for point in points:
            t_.append(eT(point))
        points = t_
    debug("points:", points)
    try:
        debug("determinant of points:", scipy.linalg.det(points))
    except:
        pass
    subtrahend = points[-1]
    debug("subtrahend:", subtrahend)
    for i in range(len(points) - 1):
        vector = scipy.subtract(points[i], subtrahend)
        debug("vector[", i, "]:", vector)
        vectors.append(vector)
    nullspace = scipy.linalg.null_space(vectors)
    debug("nullspace from vectors:", nullspace)
    norm = scipy.linalg.norm(nullspace)
    debug("norm:", norm)
    unit_normal_vector = scipy.divide(nullspace, norm)
    print("unit_normal_vector:")
    for e in unit_normal_vector:
        print(e)
    constant_term = scipy.dot(scipy.transpose(unit_normal_vector), subtrahend)
    print("constant_term:", constant_term)
    return unit_normal_vector, constant_term
Ejemplo n.º 16
0
def hyperplane_equation_by_cross_product(points, t_equivalence='True'):
    vectors = []
    t_ = []
    if t_equivalence == True:
        debug("original points:", points)
        for point in points:
            t_.append(eT(point))
        points = t_
    debug("points:", points)
    debug("determinant of points:", scipy.linalg.det(points))
    subtrahend = points[-1]
    debug("subtrahend:", subtrahend)
    for i in range(len(points) - 1):
        vector = scipy.subtract(points[i], subtrahend)
        debug("vector[", i, "]:", vector)
        vectors.append(vector)
    debug("vectors:", vectors)
    product = generalized_cross_product(vectors)
    print("generalized_cross_product:", product)
    norm = scipy.linalg.norm(product)
    debug("norm:", norm)
    unit_normal_vector = scipy.divide(product, norm)
    print("unit_normal_vector:")
    for e in unit_normal_vector:
        print(e)
    constant_term = scipy.dot(scipy.transpose(unit_normal_vector), subtrahend)
    print("constant_term:", constant_term)
    return unit_normal_vector, constant_term
Ejemplo n.º 17
0
def BodefromTwoTimeDomainVectors(timevector,output,input,truncfreq=100):
    """This function calculates the Bode response between two time
    domain signals.  The timevector is used to calculate the frequency
    vector, which is then used to truncate the Bode response to reduce
    calculation time and return only useful information.  Input and
    output are time domain vectors.

    The return values are
    freq, magnitude ratio, phase, complex

    The goal of this function is to be useful for small amounts of
    data and/or as part of a routine to calculate a Bode response from
    fixed sine data."""

    N=len(timevector)
    f=makefreqvect(timevector)
    co=thresh_py(f,truncfreq)
    f=f[0:co]
    curin_fft=fft(input,None,0)*2/N
    curout_fft=fft(output,None,0)*2/N
    curin_fft=curin_fft[0:co]
    curout_fft=curout_fft[0:co]
    curGxx=norm2(curin_fft)
    curGyy=norm2(curout_fft)
    curGxy=scipy.multiply(scipy.conj(curin_fft),curout_fft)
    H=scipy.divide(curGxy,curGxx)
    Hmag=abs(H)
    Hphase=mat_atan2(scipy.imag(H),scipy.real(H))*180.0/pi
    return f,Hmag,Hphase,H
Ejemplo n.º 18
0
 def get_F1Mean(self):
     """
         Compute F1 Mean
     """
     nl = sp.sum(self.confusionMatrix,axis=1,dtype=float)
     nc = sp.sum(self.confusionMatrix,axis=0,dtype=float)
     return 2*sp.mean( sp.divide( sp.diag(self.confusionMatrix), (nl + nc)) )
Ejemplo n.º 19
0
    def gradEval(self, x, data1):
        '''gradient form is negative for the form shown in the image'''
        self.x_temp = x
        self.dat_temp = data1
        self.fn2 = None
        map(self.secondpart, [i for i in range(10)])
        gd2b = self.fn2  #DSX1

        #fn_stacked creates first parts w's
        self.gd_stacked = sp.ones(
            784)  #next two steps are req for calling stack fn
        map(self.stack, data1[1], ['gd_stacked' for i in range(len(data1[1]))])
        self.gd_stacked = sp.delete(
            self.gd_stacked, (0),
            axis=0)  #deleting the first row (of ones created above)

        gd2a1 = self.gd_stacked * self.dat_temp[0]
        gd2a = sp.exp(gd2a1.sum(axis=1, keepdims=True))  #DSX1

        temp = sp.divide(gd2a, gd2b) * self.dat_temp[0]

        self.grad_vec_l = -1 * self.dat_temp[0] + temp

        self.grad_vec = sp.ones((10, 1))
        iter_temp = sp.array([i for i in range(784)])
        map(self.grad_pooling, iter_temp)
        self.grad_vec = sp.delete(self.grad_vec, (0), axis=1)
        return self.grad_vec
Ejemplo n.º 20
0
def array_factor(number_of_elements_x, number_of_elements_y, element_spacing_x,
                 element_spacing_y, frequency, scan_angle_theta,
                 scan_angle_phi, theta, phi):
    """
    Calculate the array factor for a planar uniform array.
    :param number_of_elements_x: The number of elements in the x-direction.
    :param number_of_elements_y: The number of elements in the y-direction.
    :param element_spacing_x: The spacing of the elements in the x-direction (m).
    :param element_spacing_y: The spacing of the elements in the y-direction (m).
    :param frequency: The operating frequency (Hz).
    :param scan_angle_theta: The scan angle in the theta-direction (rad).
    :param scan_angle_phi: The scan angle in the phi-direction (rad).
    :param theta: The pattern angle in theta (rad).
    :param phi: The pattern angle in phi (rad).
    :return: The array factor for a planar uniform array.
    """
    # Calculate the wave number
    k = 2.0 * pi * frequency / c

    # Calculate the phase
    psi_x = k * element_spacing_x * (
        sin(theta) * cos(phi) - sin(scan_angle_theta) * cos(scan_angle_phi))
    psi_y = k * element_spacing_y * (
        sin(theta) * sin(phi) - sin(scan_angle_theta) * sin(scan_angle_phi))

    # Break into numerator and denominator
    numerator = sin(0.5 * number_of_elements_x * psi_x) * sin(
        0.5 * number_of_elements_y * psi_y)
    denominator = number_of_elements_x * number_of_elements_y * sin(
        0.5 * psi_x) * sin(0.5 * psi_y)

    return divide(numerator,
                  denominator,
                  ones_like(psi_x),
                  where=(denominator != 0.0)), psi_x, psi_y
Ejemplo n.º 21
0
    def generate(cls, trueY, forecastY, missing=True):
        nz_mask = trueY != 0
        diff = forecastY - trueY
        abs_true = sp.absolute(trueY)
        abs_diff = sp.absolute(diff)

        def my_mean(x):
            tmp = x[sp.isfinite(x)]
            assert len(tmp) != 0
            return tmp.mean()

        with sp.errstate(divide='ignore'):
            nrmse = sp.sqrt((diff**2).mean()) / abs_true.mean()
            m_nrmse = my_mean(
                sp.sqrt((diff**2).mean(axis=0)) / abs_true.mean(axis=0))

            nd = abs_diff.sum() / abs_true.sum()
            m_nd = my_mean(abs_diff.sum(axis=0) / abs_true.sum(axis=0))

            abs_baseline = sp.absolute(trueY[1:, :] - trueY[:-1, :])
            mase = abs_diff.mean() / abs_baseline.mean()
            m_mase = my_mean(abs_diff.mean(axis=0) / abs_baseline.mean(axis=0))

            mape = my_mean(sp.divide(abs_diff, abs_true, where=nz_mask))

        return cls(nd=nd,
                   mase=mase,
                   nrmse=nrmse,
                   m_nd=m_nd,
                   m_mase=m_mase,
                   m_nrmse=m_nrmse,
                   mape=mape)
Ejemplo n.º 22
0
def processing_part3(flann, gray_roi, gray_img):
    orb = cv2.ORB(nfeatures=100, nlevels=4, scaleFactor=1.3)
    key_points, query_descriptors = orb.detectAndCompute(gray_roi, None)
    matches = flann.knnMatch(np.uint8(query_descriptors), k=6)
    vote_array = np.zeros((gray_img.shape[0] / 10, gray_img.shape[1] / 10),
                          dtype=np.uint8)

    for match in matches:
        for index in match:
            vectorx = (
                key_points[index.queryIdx].size *
                key_points_math_array[index.trainIdx].distance[0] *
                scipy.cos(key_points_math_array[index.trainIdx].distance[2] +
                          key_points[index.queryIdx].distance[2] -
                          key_points_math_array[index.trainIdx].distance[2])
            ) / key_points_math_array[index.trainIdx].size

            vectory = (
                key_points[index.queryIdx].size *
                key_points_math_array[index.trainIdx].distance[0] *
                scipy.sin(key_points_math_array[index.trainIdx].distance[2] +
                          key_points[index.imgIdx].distance[2] -
                          key_points_math_array[index.trainIdx].distance[2])
            ) / key_points_math_array[index.trainIdx].size

            kpx = int(
                scipy.divide(key_points[index.imgIdx].pt[0] + vectorx, 10))
            kpy = int(
                scipy.divide(key_points[index.imgIdx].pt[1] + vectory, 10))

            if (kpx > 0 and kpy > 0) and (kpx < vote_array.shape[1]
                                          and kpy < vote_array.shape[0]):
                vote_array[kpy, kpx] += 1

    vote_array = cv2.resize(vote_array,
                            None,
                            fx=10,
                            fy=10,
                            interpolation=cv2.INTER_NEAREST)
    max_index = np.unravel_index(vote_array.argmax(), vote_array.shape)
    position = (max_index[0], max_index[1])
    cv2.circle(gray_img,
               position,
               gray_img.shape[1] / 33, (0, 0, 255),
               thickness=2)
    return gray_img
Ejemplo n.º 23
0
 def updateExpectations(self):
     a, b = self.params['a'], self.params['b']
     E = s.divide(a, a + b)
     lnE = special.digamma(a) - special.digamma(a + b)
     lnEInv = special.digamma(b) - special.digamma(
         a + b)  # expectation of ln(1-X)
     lnEInv[s.isinf(
         lnEInv)] = -s.inf  # there is a numerical error in lnEInv if E=1
     self.expectations = {'E': E, 'lnE': lnE, 'lnEInv': lnEInv}
Ejemplo n.º 24
0
	def european_option_vega(self):
		numerator = sp.add(
			sp.log(
				sp.divide(
					self.spot_price,
					self.strike_price
				)
			),
			sp.multiply(
				( self.interest_rate - self.dividend_yield + 0.5*sp.power(self.sigma,2)),
				self.time_to_maturity
			)
		)
		d1 = sp.divide(
			numerator,
			sp.prod(
				[
					self.sigma,
					sp.sqrt(self.time_to_maturity)
				],
				axis=0,
			)
		)
		
		val = sp.multiply(
			sp.multiply(
				self.spot_price,
				sp.exp(
					-sp.multiply(
						self.dividend_yield,
						self.time_to_maturity
					)
				)
			),
			sp.exp(-sp.square(d1)*0.5)
		)
		val = sp.multiply(
			val,
			sp.sqrt(self.time_to_maturity)
		)
		vega = (1/sqrt(2*pi))*val
		
		return vega
Ejemplo n.º 25
0
def diffTaylor(v,i): 
    coeffs = polyfit(v,i,3)
    vFit = linspace(min(v),max(v),100) 
    iFit = polyval(coeffs, vFit) 
    polynomial = poly1d(coeffs)
   
    #didv = lambda ix: 6*coeffs[0]*((ix)**5) + 5*coeffs[1]*((ix)**4) + 4*coeffs[2]*((ix)**3) + 3*coeffs[3]*((ix)**2) + 2*coeffs[4]*(ix) + coeffs[5]
    didv = lambda ix:  3*coeffs[0]*((ix)**2) + 2*coeffs[1]*(ix) + coeffs[2]
    g = [float(didv(val)) for val in vFit]  
    rFit = divide(1,g)
    return vFit, iFit, rFit
    def loss_to_pair(self, pair, atg_a, atg_b, pl_exp=4, gamma=1e2):
        dist = sp.sqrt(
            sp.add(sp.square(pair.tx_x),
                   sp.add(sp.square(pair.tx_y), sp.square(self.h))))
        phi = sp.multiply(sp.divide(180, sp.pi),
                          sp.arcsin(sp.divide(self.h, dist)))
        pr_LOS = sp.divide(
            1,
            sp.add(
                1,
                sp.multiply(
                    atg_a, sp.exp(sp.multiply(-atg_b, sp.subtract(phi,
                                                                  atg_a))))))
        pr_NLOS = sp.subtract(1, pr_LOS)

        total_loss = sp.add(
            sp.multiply(pr_LOS, sp.power(dist, -pl_exp)),
            sp.multiply(sp.multiply(pr_NLOS, gamma), sp.power(dist, -pl_exp)))

        return total_loss
Ejemplo n.º 27
0
 def feval(self, x, average=True):
     '''I'm considering opt problem regarding parameters of each digit 
     independently i.e.., I have ten opt problems to solve. 
     loss_fn is an 10X1 matrix or vector'''
     data = self.data.train
     loss_fn = self.funcEval(x, data)
     if average == True:
         n_vals = sp.bincount(self.data.train[1]).astype(
             float)  #counts no of 1's 2's ..... in the dataset
         loss_fn = sp.divide(loss_fn, sp.reshape(n_vals, sp.shape(loss_fn)))
         return loss_fn
     else:
         return loss_fn
Ejemplo n.º 28
0
 def sfeval(self, x, ndata=100, average=True):
     '''slice the data, slice size is given by ndata.'''
     low_no = sp.random.randint(0, high=50000 - ndata)
     high_no = low_no + ndata
     sli = slice(low_no, high_no)
     data2 = []
     data2.append(self.data.train[0][sli])
     data2.append(self.data.train[1][sli])
     loss_fn = self.funcEval(x, tuple(data2))
     if average == True:
         '''assuming that in this 100 numbers every digit happens atleast once'''
         n_vals = sp.bincount(data2[1]).astype(float)
         loss_fn = sp.divide(loss_fn, sp.reshape(n_vals, sp.shape(loss_fn)))
         return loss_fn
     else:
         return loss_fn
Ejemplo n.º 29
0
def far_fields(radius, frequency, r, theta, phi):
    """
    Calculate the electric and magnetic fields in the far field of the aperture.
    :param r: The range to the field point (m).
    :param theta: The theta angle to the field point (rad).
    :param phi: The phi angle to the field point (rad).
    :param radius: The radius of the aperture (m).
    :param frequency: The operating frequency (Hz).
    :return: The electric and magnetic fields radiated by the aperture (V/m), (A/m).
    """
    # Calculate the wavenumber
    k = 2.0 * pi * frequency / c

    # Calculate the wave impedance
    eta = sqrt(mu_0 / epsilon_0)

    # Calculate the argument for the Bessel function
    z = k * radius * sin(theta)

    # Calculate the Bessel function
    bessel_term = divide(jv(1, z), z, where=z != 0.0)

    # Define the radial-component of the electric far field (V/m)
    e_r = 0.0

    # Define the theta-component of the electric far field (V/m)
    e_theta = 1j * k * radius**2 * exp(
        -1j * k * r) / r * sin(phi) * bessel_term

    # Define the phi-component of the electric far field (V/m)
    e_phi = 1j * k * radius**2 * exp(-1j * k * r) / r * cos(phi) * bessel_term

    # Define the radial-component of the magnetic far field (A/m)
    h_r = 0.0

    # Define the theta-component of the magnetic far field (A/m)
    h_theta = 1j * k * radius**2 * exp(
        -1j * k * r) / r * -cos(theta) * cos(phi) / eta * bessel_term

    # Define the phi-component of the magnetic far field (A/m)
    h_phi = 1j * k * radius**2 * exp(
        -1j * k * r) / r * sin(phi) / eta * bessel_term

    # Return all six components of the far field
    return e_r, e_theta, e_phi, h_r, h_theta, h_phi
Ejemplo n.º 30
0
def diffTaylor(v, i):

    coeffs = polyfit(v, i, 3)
    vFit = linspace(min(v), max(v), 100)
    iFit = polyval(coeffs, vFit)
    polynomial = poly1d(coeffs)

    # Second order
    didv = lambda ix: 3 * coeffs[0] * (
        (ix)**2) + 2 * coeffs[1] * (ix) + coeffs[2]

    # Fifth order
    #didv = lambda ix:
    #		6*coeffs[0]*((ix)**5) + 5*coeffs[1]*((ix)**4) + 4*coeffs[2]*((ix)**3) +
    # 		3*coeffs[3]*((ix)**2) + 2*coeffs[4]*(ix) 	  + coeffs[5]

    rFit = divide(1, [float(didv(val)) for val in vFit])
    return vFit, iFit, rFit
Ejemplo n.º 31
0
def main():
    wheel_graph = networkx.generators.classic.wheel_graph(10)
    model = glove.Glove(2, learning_rate=0.01, alpha=0.2, max_count=1000)

    adj_matrix = networkx.adjacency_matrix(wheel_graph)
    adj_matrix = adj_matrix.toarray().astype('d')
    normalized_adj_matrix = scipy.divide(adj_matrix,
                                         adj_matrix.sum(1)[:, scipy.newaxis])
    model.fit(scipy.sparse.coo_matrix(normalized_adj_matrix), epochs=1000)

    vertex_positions = {
        vertex_idx: tuple(model.word_vectors[vertex_idx])
        for vertex_idx in range(wheel_graph.order())
    }

    networkx.drawing.draw(wheel_graph, pos=vertex_positions)
    plt.savefig("asdf.png")
    pass
Ejemplo n.º 32
0
def reflect(v, u, c):
    print("Reflect by vector math:")
    print("v:", v)
    print("u:", u)
    print("c:", c)
    v_dot_u = scipy.dot(v, u)
    print("v_dot_u:", v_dot_u)
    v_dot_u_minus_c = scipy.subtract(v_dot_u, c)
    print("v_dot_u_minus_c:", v_dot_u_minus_c)
    u_dot_u = scipy.dot(u, u)
    print("u_dot_u:", u_dot_u)
    quotient = scipy.divide(v_dot_u_minus_c, u_dot_u)
    print("quotient:", quotient)
    subtrahend = scipy.multiply(u, (2 * quotient))
    print("subtrahend:", subtrahend)
    reflection = scipy.subtract(v, subtrahend)
    print("reflection:", reflection)
    return reflection
Ejemplo n.º 33
0
 def build_W(self, N_x, sparsity_tuples, scaling_W):
     # N_x integer
     # sparsity between 0 and 1 inclusive
     # scaling_W >= 1
     sparsity_tuples_list = tuple([(a_row[0], a_row[1])
                                   for a_row in sparsity_tuples])
     if os.path.isfile('./W_(adjacency)/W_' + str(N_x) + '_' + str(N_x) +
                       '_' + str(sparsity_tuples_list) + '.txt'):
         print("Loading W")
         # If file already exists
         W = sp.loadtxt('./W_(adjacency)/W_' + str(N_x) + '_' + str(N_x) +
                        '_' + str(sparsity_tuples_list) + '.txt')
     else:
         # If file doesn't yet exist
         # No notion of locality here
         # Designate connection or no connection at each element of the (N_x) by (N_x) matrix:
         W_sparsity_list = []
         # Rows used for each sparsity
         rows_used_for_sparsity = (sp.around(
             sp.multiply(sparsity_tuples[:, 1], N_x))).astype(int)
         for i, sparsity_pair in enumerate(sparsity_tuples):
             this_density = sparsity_pair[0]
             W_sparsity_list.append(
                 sp.sparse.random(rows_used_for_sparsity[i],
                                  N_x,
                                  density=this_density).todense())
             #TODO: Make sure there are no 'holes!' in W
         # Build sparse adjacency matrix W:
         W_unnormalized = sp.multiply(
             sp.random.choice((-1, 1), size=(N_x, N_x)),
             sp.vstack(W_sparsity_list))
         # Normalize by largest eigenvalue and additional scaling factor
         # to control decrease of spectral radius.
         spectral_radius = sp.amax(abs(sp.linalg.eigvals(W_unnormalized)))
         print("SPECTRAL RADIUS IS IS " + str(spectral_radius))
         W = sp.multiply(scaling_W,
                         sp.divide(W_unnormalized, spectral_radius))
         sp.savetxt('W_(adjacency)/W_' + str(N_x) + '_' + str(N_x) + '_' +
                    str(sparsity_tuples_list) + '.txt',
                    W,
                    fmt='%.4f')
     return W
Ejemplo n.º 34
0
def load_graph_adj_matrix():
    adj_list_in_ids = {}
    tag_name_to_idx = {}
    with open('graph.csv') as graph_file:
        graph_reader = csv.DictReader(graph_file)
        tag_names = set()
        for row in graph_reader:
            tag_name_1 = row['tag1']
            tag_name_2 = row['tag2']
            post_count = int(row['post_count'])

            tag_names.add(tag_name_1)
            tag_names.add(tag_name_2)
            try:
                adj_list_in_ids[tag_name_1].append((tag_name_2, post_count))
            except KeyError:
                adj_list_in_ids[tag_name_1] = [(tag_name_2, post_count)]

        tag_name_to_idx = {
            tag_name: idx
            for idx, tag_name in enumerate(sorted(tag_names))
        }

    # initialize empty matrix
    tags_count = len(adj_list_in_ids)
    adj_matrix = scipy.zeros((tags_count, tags_count))
    for tag_name, adj_row in adj_list_in_ids.items():
        tag_namex = tag_name_to_idx[tag_name]
        for adj_tag, post_count in adj_row:
            adj_tag_namex = tag_name_to_idx[adj_tag]
            adj_matrix[tag_namex, adj_tag_namex] = post_count
            adj_matrix[adj_tag_namex, tag_namex] = post_count
    # normalization
    normalized_adj_matrix = scipy.divide(adj_matrix,
                                         adj_matrix.sum(1)[:, scipy.newaxis])

    #ordered_by_idx_tag_names = sorted(tag_name_to_idx.keys(), key=lambda tag_name: tag_name_to_idx[tag_name])
    return scipy.sparse.coo_matrix(normalized_adj_matrix), tag_name_to_idx
Ejemplo n.º 35
0
def CalcBodesFromDataset(tddict, bodelist, description='', \
                         truncfreq=100, filto=2, wn=0.1,\
                         avefilt=True, jump=250):
#    pdb.set_trace()
    bodedict={}
    bodedict['description']=description

    tempfreq=makefreqvect(tddict['t'])
    co=thresh_py(tempfreq,truncfreq)
    bodedict['freq']=tempfreq[0:co]
    bodedict['xname']='freq'
    bodedict['bodes']=[]

    avedict=copy.deepcopy(bodedict)
    N=len(tddict['t'])

    fncopy=tddict['filenames']
    fnout=[]
    for fn in fncopy:
       folder,name=os.path.split(fn)
       fnout.append(name)
    bodedict['filenames']=fnout
    avedict['filenames']=fnout
    for i, curbode in enumerate(bodelist):
       outbode=copy.deepcopy(curbode)
       outbode.labels=bodedict['filenames']
       curavebode=copy.deepcopy(curbode)
       curavebode.averaged=True
       curavebode.labels=outbode.labels
       curin_t=tddict[curbode.input]
       curout_t=tddict[curbode.output]
       t1=time.time()
       curin_fft=fft(curin_t,None,0)*2/N
       curout_fft=fft(curout_t,None,0)*2/N
       t2=time.time()
       print('fft time='+str(t2-t1))
       curin_fft=curin_fft[0:co]
       curout_fft=curout_fft[0:co]
       curGxx=norm2(curin_fft)
       curGyy=norm2(curout_fft)
       curGxy=scipy.multiply(scipy.conj(curin_fft),curout_fft)
       H=scipy.divide(curGxy,curGxx)
       Hmag=abs(H)
       outbode.mag=Hmag
       Gxyave=scipy.mean(curGxy,1)
       Gxxave=scipy.mean(curGxx,1)
       Gyyave=scipy.mean(curGyy,1)
       cohnum=norm2(Gxyave)
       cohden=scipy.multiply(Gxxave,Gyyave)
       curavebode.coh=scipy.divide(cohnum,cohden)
       Have=scipy.divide(Gxyave,Gxxave)
       curavebode.mag=abs(Have)
       Hphase=mat_atan2(scipy.imag(H),scipy.real(H))
       Hphase_ave=mat_atan2(scipy.imag(Have),scipy.real(Have))
       Hphase=Hphase*180/scipy.pi
       Hphase_ave=Hphase_ave*180/scipy.pi
       if curbode.seedfreq and curbode.seedphase:
#           print('Massaging the phase')
            Hphase=PhaseMassage(Hphase,bodedict['freq'],curbode.seedfreq,curbode.seedphase)
            Hphase_ave=PhaseMassage(Hphase_ave,avedict['freq'],curbode.seedfreq,curbode.seedphase)
            if avefilt:
                Hphase=PhaseMassageFilter(Hphase,filto,wn,jump)
                Hphase_ave=PhaseMassageFilter(Hphase_ave,filto,wn,jump)
#           Hphase=AveMessage(Hphase,bodedict['freq'],curbode.seedfreq,curbode.seedphase,280.0,5)
#           Hphase_ave=AveMessage(Hphase_ave,avedict['freq'],curbode.seedfreq,curbode.seedphase,280.0,5)
       outbode.phase=Hphase
       curavebode.phase=Hphase_ave
       bodedict['bodes'].append(outbode)
       avedict['bodes'].append(curavebode)
    return bodedict,avedict
Ejemplo n.º 36
0
def findColorRegions(picture, res):
	print "Counting Color Regions:", os.path.basename(picture) , '(', res, ')...', 
	
	struct = np.array([[0,1,0],
					   [1,1,1],
					   [0,1,0]])
	
	##open image
	im = Image.open(picture)

	#im.show()
	im = im.convert("L")

	xsize, ysize = im.size

	if (xsize*res < 60) or (ysize*res < 60):
		print "Image to small for given resolution..."
		res = 60.0 / min([xsize, ysize])
		print "New resolution for this image:", res
			
	im = im.resize((xsize*res, ysize*res),Image.ANTIALIAS)
	
	xsize, ysize = im.size
	
	im = fromimage(im)
	im = scipy.transpose(im)
	##reduce colors
	im = scipy.divide(im, 10)
	im = im *10
	
	##make markers
	mark = 0
	markers = np.zeros_like(im).astype('int')
	
	pd = 0
	opd = 0
	for x in range(xsize):
		pd = int(x/xsize*.5)
		if pd > opd: print '.',
		opd = pd
		
		for y in range(ysize):
			if (x%(int(xsize/30)) == 0) and (y%(int(ysize/30)) == 0):
				mark += 1
				markers[x,y] = mark
	##run watershed
	water = ndimage.watershed_ift(im.astype('uint8'), markers, structure = struct)

	##make some masks and count the size of each region
	sizecount = []
	
	marks = range(mark+1)
	for index in range(len(marks)):
		sizecount.append([])
	
	pd = 0
	opd = 0
	for x in range(0,xsize):
		for y in range(0,ysize):
			sizecount[marks.index(water[x,y])].append((x,y))
	
	##make markers based on large regions
	mark = 0
	shapes = 0
	markers = np.zeros_like(im).astype('int')
	for mark in marks:
		if len(sizecount[marks.index(mark)]) >= (xsize/30 + ysize/30)/2: #should be a better ratio
			shapes += 1
			
	print shapes
	return shapes
Ejemplo n.º 37
0
def waterPanelize(picture):

	print "Water Panalizing:", os.path.basename(picture)
	
	struct = np.array([[0,1,0],
					   [1,1,1],
					   [0,1,0]])
	
	##open image
	im = Image.open(picture)
	imcopy = im
	#im.show()
	im = im.convert("L")
	
	xsize, ysize = im.size
	
	#prescaling
	im = im.resize((xsize*ops.scaling, ysize*ops.scaling))
	xsize, ysize = im.size
	
	print "xSize:", xsize, "ySize:", ysize

	print '!'
	
	##reduce colors
	im = scipy.divide(im, 30)
	im = im *30
	
	#transpose for scipy
	im = scipy.transpose(im)
	print "im shape:", im.shape

	
	##make markers
	mark = 0
	markers = np.zeros_like(im).astype('int')
	for x in range(xsize):
		for y in range(ysize):
			if (x%(ops.markRes) == 0) and (y%(ops.markRes) == 0):
				mark += 1
				markers[x,y] = mark
	
	##run watershed
	water = ndimage.watershed_ift(im.astype('uint8'), markers, structure = struct)
	#water[xm, ym] = water[xm-1, ym-1] # remove the isolate seeds
	
	#make diff mask for 'gutter'
	bgwc = water[1,1] ##assumes that (1,1) is part of the gutter
	blackbg = np.zeros_like(im).astype('uint8')
	
	for x in range(xsize):
		for y in range(ysize):
			if not (water[x,y] == bgwc):
				blackbg[x,y] = im[x,y]
	
	#subtract balck bg mask
	im = im - blackbg

	##run watershed
	water = ndimage.watershed_ift(im.astype('uint8'), markers, structure = struct) 

	
	##make some masks and count the size of each region
	sizecount = []
	masks = []
	
	marks = range(mark+1)
	for index in range(len(marks)):
		sizecount.append(0)
		masks.append([])
	
	for x in range(0,xsize,ops.res):
		print int(float(x)/xsize*100), '%'
		for y in range(0,ysize,ops.res):
			sizecount[marks.index(water[x,y])] += 1
			masks[marks.index(water[x,y])].append((x,y))
	
	##make markers based on large regions
	mark = 0
	markers = np.zeros_like(im).astype('int')
	for mark in marks:
		if sizecount[marks.index(mark)] >= 200*200/ops.res/ops.res*ops.scaling:
			markers[masks[marks.index(mark)][int(200*200/ops.res/ops.res*ops.scaling)]] = mark
			print 'panel found'
	
	##run watershed
	water = ndimage.watershed_ift(im.astype('uint8'), markers, structure = struct) 
	
	##retranspose and postscale
	water = scipy.transpose(water)
	water = water/ops.scaling
	
	##save and view
	if ops.view: toimage(water).resize((xsize/2,ysize/2)).show()
	if ops.save: 
		print (os.path.join(ops.outDir,os.path.basename(picture)+'_mask.jpg'), "JPEG")
		toimage(water).convert('RGB').save(os.path.join(ops.outDir,os.path.basename(picture)+'_mask.jpg'), "JPEG")
		
	return os.path.join(ops.outDir,os.path.basename(picture)+'_mask.jpg'), "JPEG"
Ejemplo n.º 38
0
def findShapes(picture, res):
	print "Counting Shapes:", os.path.basename(picture), '(', res, ')...', 
	
	struct = np.array([[0,1,0],
					   [1,1,1],
					   [0,1,0]])
	
	##open image
	im = Image.open(picture)

	#im.show()
	im = im.convert("L")

	xsize, ysize = im.size
	
	if (xsize*res < 60) or (ysize*res < 60):
		print "Image to small for given resolution..."
		res = 60.0 / min([xsize, ysize])
		print "New resolution for this image:", res
		
	im = im.resize((xsize*res, ysize*res), Image.ANTIALIAS)
	xsize, ysize = im.size
	
	im = im.filter(ImageFilter.FIND_EDGES)

	im = fromimage(im)
	im = scipy.transpose(im)
	im = scipy.divide(im, 10)
	im = im *10
	##make markers
	mark = 0
	markers = np.zeros_like(im).astype('int')
	
	for x in range(xsize):
		for y in range(ysize):
			if (x%(int(xsize/30)) == 0) and (y%(int(ysize/30)) == 0):
				mark += 1
				markers[x,y] = mark
	
	##run watershed
	water = ndimage.watershed_ift(im.astype('uint8'), markers, structure = struct)
	#toimage(water).save("sw"+ os.path.basename(picture)) #debug output
	
	##make some masks and count the size of each region
	sizecount = []
	
	marks = range(mark+1)
	for index in range(len(marks)):
		sizecount.append(0)
	
	for x in range(0,xsize):
		for y in range(0,ysize):
			sizecount[marks.index(water[x,y])] += 1
	
	##make markers based on large regions
	mark = 0
	shapes = 0
	markers = np.zeros_like(im).astype('int')
	for mark in marks:
		if sizecount[marks.index(mark)] >= (xsize/30 + ysize/30)/2:
			shapes += 1
			
	print shapes
	return shapes
Ejemplo n.º 39
0
def greentensor(Freq,EpsB,Cell,NX,NY,NZ):
    """Returns the Fourier transform GF of the
       circular extension of the Green's tensor array
    """
    c0=299792458.0 # speed of light in vacuum
    Mu0=4.0*sci.pi*1.0e-7 # vacuum permeability
    Eps0=1.0/(Mu0*c0*c0) # vacuum permittivity
    Omega=2.0*sci.pi*Freq
    EtaB=-1.0j*Omega*Eps0*EpsB
    ZetaB=-1.0j*Omega*Mu0
    KB=Omega*sci.sqrt(Eps0*EpsB*Mu0)
    G=sci.zeros((NX,NY,NZ,3,3),complex)
    GC=sci.zeros((NX*2,NY*2,NZ*2,3,3),complex)
    GF=sci.zeros((NX*2,NY*2,NZ*2,3,3),complex)
    # 3D arrays of x,y,z coordinates
    xx,yy,zz=sci.mgrid[0:NX*Cell:Cell,0:NY*Cell:Cell,0:NZ*Cell:Cell]
    dd=sci.zeros((NX,NY,NZ),complex)
    alpha=sci.zeros((NX,NY,NZ),complex)
    beta=sci.zeros((NX,NY,NZ),complex)
    Q11=sci.zeros((NX,NY,NZ),complex)
    Q12=sci.zeros((NX,NY,NZ),complex)
    Q13=sci.zeros((NX,NY,NZ),complex)
    Q21=sci.zeros((NX,NY,NZ),complex)
    Q22=sci.zeros((NX,NY,NZ),complex)
    Q23=sci.zeros((NX,NY,NZ),complex)
    Q31=sci.zeros((NX,NY,NZ),complex)
    Q32=sci.zeros((NX,NY,NZ),complex)
    Q33=sci.zeros((NX,NY,NZ),complex)
    # 3D arrays of distances
    dd=sci.sqrt((xx)**2+(yy)**2+(zz)**2)
    dd2=dd*dd
    # 3D arrays of components of the Q-matrix
    Q11=sci.divide(xx*xx,dd2)
    Q12=sci.divide(xx*yy,dd2)
    Q13=sci.divide(xx*zz,dd2)
    Q21=Q12
    Q22=sci.divide(yy*yy,dd2)
    Q23=sci.divide(yy*zz,dd2)
    Q31=Q13
    Q32=Q23
    Q33=sci.divide(zz*zz,dd2)
    # alpha and beta scalar multipliers
    alpha=sci.divide(sci.exp(1.0j*KB*dd),4.0*sci.pi*dd)*\
    (-KB**2.0 - sci.divide(1.0j*3.0*KB,dd) + sci.divide(3.0,dd2))
    beta=sci.divide(sci.exp(1.0j*KB*dd),4.0*sci.pi*dd)*\
    (KB**2.0 + sci.divide(1.0j*KB,dd) - sci.divide(1.0,dd2))
    print('All divisions by zero are done')
    # Green's tensor without self-patch
    G[:,:,:,0,0]=Q11*alpha+beta
    G[:,:,:,0,1]=Q12*alpha
    G[:,:,:,0,2]=Q13*alpha
    G[:,:,:,1,0]=Q21*alpha
    G[:,:,:,1,1]=Q22*alpha+beta
    G[:,:,:,1,2]=Q23*alpha
    G[:,:,:,2,0]=Q31*alpha
    G[:,:,:,2,1]=Q32*alpha
    G[:,:,:,2,2]=Q33*alpha+beta
    G=G*(Cell**3) # multiplying by the elementary volume
    # self-patch
    G[0,0,0,0,0]=(2./3.)*(1.-1.j*KB*Cell*((3./(4.*sci.pi))**(1./3.)))*\
    sci.exp(1.j*KB*Cell*((3./(4.*sci.pi))**(1./3.)))-1.0
    G[0,0,0,0,1]=0.
    G[0,0,0,0,2]=0.
    G[0,0,0,1,0]=0.
    G[0,0,0,1,1]=G[0,0,0,0,0]
    G[0,0,0,1,2]=0.
    G[0,0,0,2,0]=0.
    G[0,0,0,2,1]=0.
    G[0,0,0,2,2]=G[0,0,0,0,0]
    #Circular extension of G
    GC[0:NX,0:NY,0:NZ,:,:]=G
    DeltaOp=sci.eye(3,3)
    s=0
    while s<=2:
      ss=0
      while ss<=2:
        GC[NX+1:,0:NY,0:NZ,s,ss]=(1-2*DeltaOp[0,s])*(1-2*DeltaOp[0,ss])*G[:0:-1,:,:,s,ss]
        GC[NX+1:,NY+1:,0:NZ,s,ss]=(1-2*DeltaOp[0,s])*(1-2*DeltaOp[0,ss])*\
                             (1-2*DeltaOp[1,s])*(1-2*DeltaOp[1,ss])*G[:0:-1,:0:-1,:,s,ss]
        GC[NX+1:,NY+1:,NZ+1:,s,ss]=(1-2*DeltaOp[0,s])*(1-2*DeltaOp[0,ss])*\
                             (1-2*DeltaOp[1,s])*(1-2*DeltaOp[1,ss])*\
                             (1-2*DeltaOp[2,s])*(1-2*DeltaOp[2,ss])*G[:0:-1,:0:-1,:0:-1,s,ss]
        GC[0:NX,NY+1:,0:NZ,s,ss]=(1-2*DeltaOp[1,s])*(1-2*DeltaOp[1,ss])*G[:,:0:-1,:,s,ss]
        GC[0:NX,NY+1:,NZ+1:,s,ss]=(1-2*DeltaOp[1,s])*(1-2*DeltaOp[1,ss])*\
                             (1-2*DeltaOp[2,s])*(1-2*DeltaOp[2,ss])*G[:,:0:-1,:0:-1,s,ss]
        GC[0:NX,0:NY,NZ+1:,s,ss]=(1-2*DeltaOp[2,s])*(1-2*DeltaOp[2,ss])*G[:,:,:0:-1,s,ss]
        GC[NX+1:,0:NY,NZ+1:,s,ss]=(1-2*DeltaOp[0,s])*(1-2*DeltaOp[0,ss])*\
                             (1-2*DeltaOp[2,s])*(1-2*DeltaOp[2,ss])*G[:0:-1,:,:0:-1,s,ss]
        ss=ss+1
      s=s+1
    # FFT of the Green's tensor array
    s=0
    while s<=2:
      ss=0
      while ss<=2:
        GF[:,:,:,s,ss]=fft.fftn(sci.squeeze(GC[:,:,:,s,ss]))
        ss=ss+1
      s=s+1

    return GF
Ejemplo n.º 40
0
def SingleReceiver(E,EpsArr,Freq,EpsB,Cell,NX,NY,NZ,Xr,Yr,Zr):
    """Returns the scattered field 3-vector Esc
       at the location of the receiver (Xr,Yr,Zr)
    """
    c0 = 299792458.0 # speed of light in vacuum
    Mu0 = 4.0*sci.pi*1.0e-7 # vacuum permeability
    Eps0 = 1.0/(Mu0*c0*c0) # vacuum permittivity
    Omega = 2.0*sci.pi*Freq
    EtaB = -1.0j*Omega*Eps0*EpsB
    ZetaB = -1.0j*Omega*Mu0
    KB = Omega*sci.sqrt(Eps0*EpsB*Mu0)
    Esc = sci.zeros(3,complex)

    J = sci.zeros((NX,NY,NZ,3),complex)
    for s in range(3):
        J[0:NX,0:NY,0:NZ,s]=E[0:NX,0:NY,0:NZ,s]*(EpsArr[0:NX,0:NY,0:NZ]-1.0)

    G = sci.zeros((NX,NY,NZ,3,3),complex)
    # 3D arrays of x,y,z coordinates
    xx,yy,zz = sci.mgrid[0:NX*Cell:Cell,0:NY*Cell:Cell,0:NZ*Cell:Cell]
    dd = sci.zeros((NX,NY,NZ),complex)
    alpha = sci.zeros((NX,NY,NZ),complex)
    beta = sci.zeros((NX,NY,NZ),complex)
    Q11 = sci.zeros((NX,NY,NZ),complex)
    Q12 = sci.zeros((NX,NY,NZ),complex)
    Q13 = sci.zeros((NX,NY,NZ),complex)
    Q21 = sci.zeros((NX,NY,NZ),complex)
    Q22 = sci.zeros((NX,NY,NZ),complex)
    Q23 = sci.zeros((NX,NY,NZ),complex)
    Q31 = sci.zeros((NX,NY,NZ),complex)
    Q32 = sci.zeros((NX,NY,NZ),complex)
    Q33 = sci.zeros((NX,NY,NZ),complex)
    # 3D arrays of distances
    dd = sci.sqrt((Xr-xx)**2+(Yr-yy)**2+(Zr-zz)**2)
    dd2 = dd*dd
    # 3D arrays of components of the Q-matrix
    Q11 = sci.divide((Xr-xx)*(Xr-xx),dd2)
    Q12 = sci.divide((Xr-xx)*(Yr-yy),dd2)
    Q13 = sci.divide((Xr-xx)*(Zr-zz),dd2)
    Q21 = Q12
    Q22 = sci.divide((Yr-yy)*(Yr-yy),dd2)
    Q23 = sci.divide((Yr-yy)*(Zr-zz),dd2)
    Q31 = Q13
    Q32 = Q23
    Q33 = sci.divide((Zr-zz)*(Zr-zz),dd2)
    # alpha and beta scalar multipliers
    alpha = sci.divide(sci.exp(1.0j*KB*dd),4.0*sci.pi*dd)*\
    (-KB**2.0 - sci.divide(1.0j*3.0*KB,dd) + sci.divide(3.0,dd2))
    beta = sci.divide(sci.exp(1.0j*KB*dd),4.0*sci.pi*dd)*\
    (KB**2.0 + sci.divide(1.0j*KB,dd) - sci.divide(1.0,dd2))
    # Green's tensor
    G[:,:,:,0,0] = Q11*alpha+beta
    G[:,:,:,0,1] = Q12*alpha
    G[:,:,:,0,2] = Q13*alpha
    G[:,:,:,1,0] = Q21*alpha
    G[:,:,:,1,1] = Q22*alpha+beta
    G[:,:,:,1,2] = Q23*alpha
    G[:,:,:,2,0] = Q31*alpha
    G[:,:,:,2,1] = Q32*alpha
    G[:,:,:,2,2] = Q33*alpha+beta
    G = G*(Cell**3) # multiplying by the elementary volume

    Esc[0] = sci.sum(sci.squeeze(sci.multiply(G[:,:,:,0,0],J[:,:,:,0])+\
                                 sci.multiply(G[:,:,:,0,1],J[:,:,:,1])+\
                                 sci.multiply(G[:,:,:,0,2],J[:,:,:,2])))
    Esc[1] = sci.sum(sci.squeeze(sci.multiply(G[:,:,:,1,0],J[:,:,:,0])+\
                                 sci.multiply(G[:,:,:,1,1],J[:,:,:,1])+\
                                 sci.multiply(G[:,:,:,1,2],J[:,:,:,2])))
    Esc[2] = sci.sum(sci.squeeze(sci.multiply(G[:,:,:,2,0],J[:,:,:,0])+\
                                 sci.multiply(G[:,:,:,2,1],J[:,:,:,1])+\
                                 sci.multiply(G[:,:,:,2,2],J[:,:,:,2])))

    return Esc