Exemple #1
0
    def test_chain_bernoulli(self):
        # runs the sampler on a distribution of infdependent bernoulli variables
        # and compares the mean
        d = 5
        ps = rand(d)
        ps /= norm(ps)
        distribution = Bernoulli(ps)

        num_history = 100
        Z = distribution.sample(num_history).samples
        threshold = 0.8
        spread = 0.2

        gamma = 0.2
        kernel = HypercubeKernel(gamma)

        mcmc_sampler = DiscreteKameleon(distribution, kernel, Z, threshold,
                                        spread)

        start = zeros(distribution.dimension, dtype=numpy.bool8)
        mcmc_params = MCMCParams(start=start, num_iterations=1000)
        chain = MCMCChain(mcmc_sampler, mcmc_params)

        chain.run()
        self.assertAlmostEqual(norm(mean(chain.samples, 0) - ps), 0, delta=0.2)
Exemple #2
0
 def alfaV(self, v0):
     """Retourne le alfa tel que V(alfa) = v0. Ne correspond pas à l'équilibre."""
     #         return newton(lambda alfa : self.V(alfa)-v0, radians(6.0), full_output=True, disp=False)
     #         except RuntimeError as msg : return nan
     alfa, res = newton(lambda alfa: self.V(alfa) - v0,
                        radians(6.0),
                        tol=self.EPS,
                        full_output=True,
                        disp=False)
     if res.converged:
         return alfa % π
     else:
         alfa = res.root
         res = dict(root=fmt(degrees(res.root)),
                    iterations=res.iterations,
                    function_calls=res.function_calls,
                    converged=res.converged,
                    cause=res.flag)
         rn = norm(self.resultante(self.normalise(alfa)))
         r = norm(self.resultante(alfa))
         cm = fmt(self.CmT(alfa))
         debug('alfaV : non CV')
         print("alfa_normalisé = %s," % fmt(degrees(alfa % π)),
               'resultante = (%s,%s),' % fmt((rn, r)), 'CmT =', cm)
         print('résultats newton = ', fmt(res))
         print()
         if abs(r) < self.EPS:
             return alfa % π
Exemple #3
0
    def cosine_distance(vector1, vector2):
        """夹角余弦(Cosine)
            几何中夹角余弦可用来衡量两个向量方向的差异,机器学习中借用这一概念来衡量样本向量之间的差异
            夹角余弦取值范围为[-1,1]
            夹角余弦越大表示两个向量的夹角越小,夹角余弦越小表示两向量的夹角越大。
            当两个向量的方向重合时夹角余弦取最大值 1,当两个向量的方向完全相反夹角余弦取最小值-1。
            """
        # numpy.multiply(v1, v2)  v1矩阵和v2矩阵各个元素乘积之后的矩阵
        # numpy.dot(v1, v2)   v1矩阵和v2矩阵的 矩阵乘积  等同于  v1 * v2  当v1和v2都是numpy.matrixlib.defmatrix.matrix类型时
        dot = np.multiply(vector1, vector2)
        # print("dot", dot)

        # linalg.norm(v) # 计算矩阵距离原点的距离, 也就是范数
        norm1 = linalg.norm(vector1)
        # print("norm1", str(norm1))
        norm2 = linalg.norm(vector2)
        # print("norm2", str(norm2))

        # 两个矩阵的范数的乘积
        norm_vector_ = norm1 * norm2
        # print("norm_vector_", str(norm_vector_))

        result = (np.sum(dot) / norm_vector_)
        # print("result", str(result))
        return result
Exemple #4
0
    def fit(self,
            input_matrix,
            sample=True,
            param_centres=None,
            two_pass=False):
        '''
            Function to input the data and call run_kmeans on it.

            PARAMETERS:
                input_matrix (scipy.sparse or numpy.ndarray): Matrix containing input samples. It can either be a scipy sparse matrix or a numpy 2darray.
                sample (boolean): By default set to True, this flag is used to sample centres from the input data. If set to False, a numpy array containing centre points should be passed to param_centres.
                param_centres (scipy.sparse or numpy.ndarray): Is set to None by default. Should be passed a sparse matrix or numpy 2darray containing centre points, if sample is set to False.
                two_pass (boolean): By default set to Flase, set this flag to True to execute a two pass k-means. If set to True, this flag takes precedence over the sample flag and ignores its value.

            RETURNS: None

        '''
        input_matrix = input_matrix / norm(input_matrix, axis=1)

        if two_pass:
            self.sample_kmeans(input_matrix)
            return

        if sample:
            self.centres = self.sample_centres(input_matrix, self.no_clusters)
        else:
            if not param_centres:
                raise ValueError(
                    "Must provide centre matrix if sample_centres is set to False."
                )
            self.centres = param_centres / norm(param_centres, axis=1)
        self.run_kmeans(input_matrix)
Exemple #5
0
def spiral_sphere(N, Om=2*pi, b=array((0, 0, 1))):
    """
    Internal helper function for the raycasting that returns an array of
    unit vectors (N, 3) giving equally distributed directions on a part of
    sphere given by the center direction b and the solid angle Om
    """
    # first produce 'equally' distributed directions in spherical coords
    o = 4*pi/Om
    h = -1+ 2*arange(N)/(N*o-1.)
    theta = arccos(h)
    phi = zeros_like(theta)
    for i, hk in enumerate(h[1:]):
        phi[i+1] = phi[i]+3.6/sqrt(N*o*(1-hk*hk)) % (2*pi)
    # translate to cartesian coords
    xyz = vstack((sin(theta) * cos(phi), sin(theta) * sin(phi), cos(theta)))
    # mirror everything on a plane so that b points into the center
    a = xyz[:, 0]
    b = b/norm(b)
    ab = (a-b)[:, newaxis]
    if norm(ab)<1e-10:
        return xyz
    # this is the Householder matrix for mirroring
    H = identity(3)-dot(ab, ab.T)/dot(ab.T, a)
    # actual mirroring
    return dot(H, xyz)
Exemple #6
0
def calibrateImages(images):
    for image in images:
        logging.info(f'IMAGE {image.id:02d}:Calibrating images...')
        ins = image.ins
        ex = image.ex
        pmat = ins @ ex
        R = np.array([[ex[0][0], ex[0][1], ex[0][2]],
                      [ex[1][0], ex[1][1], ex[1][2]],
                      [ex[2][0], ex[2][1], ex[2][2]]])
        t = np.array([ex[0][3], ex[1][3], ex[2][3]])
        center = -inv(R) @ t
        center = np.array([center[0], center[1], center[2], 1])
        zaxis = np.array(pmat[2])
        zaxis[3] = 0
        ftmp = norm(zaxis)
        zaxis /= ftmp
        zaxis = np.array([zaxis[0], zaxis[1], zaxis[2]])
        xaxis = np.array([pmat[0][0], pmat[0][1], pmat[0][2]])
        yaxis = cross(zaxis, xaxis)
        yaxis /= norm(yaxis)
        xaxis = cross(yaxis, zaxis)

        image.pmat = pmat
        image.center = center
        image.xaxis = xaxis
        image.yaxis = yaxis
        image.zaxis = zaxis
Exemple #7
0
    def pearson_distance(self, img2):
        items = self.items()
        x = [i[1] for i in items]
        items = img2.items()
        y = [i[1] for i in items]
        #print('val', values)
        #r, p = pearsonr(self, y)def pearsonr(x, y):

        n = len(x)
        if n != len(y):
            raise ValueError('x and y must have the same length.')

        if n < 2:
            raise ValueError('x and y must have length at least 2.')

        x = np.asarray(x)
        y = np.asarray(y)
        dtype = type(1.0 + x[0] + y[0])

        if n == 2:
            return dtype(np.sign(x[1] - x[0]) * np.sign(y[1] - y[0])), 1.0

        xmean = x.mean(dtype=dtype)
        ymean = y.mean(dtype=dtype)

        xm = x.astype(dtype) - xmean
        ym = y.astype(dtype) - ymean

        normxm = linalg.norm(xm)
        normym = linalg.norm(ym)

        r = np.dot(xm / normxm, ym / normym)
        r = max(min(r, 1.0), -1.0)
        return 1 - r
Exemple #8
0
def spiral_sphere(N, Om=2 * pi, b=array((0, 0, 1))):
    """
    Internal helper function for the raycasting that returns an array of
    unit vectors (N, 3) giving equally distributed directions on a part of
    sphere given by the center direction b and the solid angle Om
    """
    # first produce 'equally' distributed directions in spherical coords
    o = 4 * pi / Om
    h = -1 + 2 * arange(N) / (N * o - 1.)
    theta = arccos(h)
    phi = zeros_like(theta)
    for i, hk in enumerate(h[1:]):
        phi[i + 1] = phi[i] + 3.6 / sqrt(N * o * (1 - hk * hk)) % (2 * pi)
    # translate to cartesian coords
    xyz = vstack((sin(theta) * cos(phi), sin(theta) * sin(phi), cos(theta)))
    # mirror everything on a plane so that b points into the center
    a = xyz[:, 0]
    b = b / norm(b)
    ab = (a - b)[:, newaxis]
    if norm(ab) < 1e-10:
        return xyz
    # this is the Householder matrix for mirroring
    H = identity(3) - dot(ab, ab.T) / dot(ab.T, a)
    # actual mirroring
    return dot(H, xyz)
Exemple #9
0
def cos_sim(A, B):
    if isinstance(A, list):
        A = np.array(A)
        B = np.array(B)
    num = float((A * B).sum())  # 若为列向量则 A.T  * B
    denom = linalg.norm(A) * linalg.norm(B)
    cos = num / denom if denom != 0 else 0  # 余弦值
    return cos
Exemple #10
0
def projection_matrix(xyz0_m, normal_vector):
    r_unit = normal_vector
    meridian_normal = normal_vector_meridian_plane(xyz0_m)
    q_unit = cross(meridian_normal, r_unit)
    q_unit /= norm(q_unit)
    p_unit = cross(q_unit, r_unit)
    p_unit /= norm(p_unit)
    return array([p_unit, q_unit, r_unit]).T
Exemple #11
0
    def loss(self, i_param, j_param, x_uij):
        m = x_uij.shape[0]
        loss = -(1 / m) * np.sum(np.log(1 / (1 + np.exp(-x_uij))))
        reg = (0.5 / m) * (self.lambda_i *
                           (LA.norm(i_param)**2) + self.lambda_j *
                           (LA.norm(j_param)**2))

        return loss + reg
 def compute_cos_similarity(self, doc_vector, query_vector):
     '''
     Uses the dot produt to compute cosine similarity between vectors
     :param doc_vector: a document vector
     :param query_vector: a query vector
     :return: the cosine similarity between the document and query vectors
     '''
     return linalg.dot(doc_vector, query_vector) / (
         linalg.norm(doc_vector) * linalg.norm(query_vector))
Exemple #13
0
 def j(self, x, u):
     j = super(CtmAdmm, self).j(x, u)
     x_strip = self.strip_x(x)
     u_strip = self.strip_u(u)
     ramp_strip = self.strip_ramp(u)
     j += tog.t * self.yl.dot(x_strip) + self.beta / 2.0 * norm(x_strip - self.xbarl)
     j += tog.t * self.yr.dot(u_strip) + self.beta / 2.0 * norm(u_strip - self.xbarr)
     j += tog.t * self.yramp.dot(ramp_strip) + self.beta / 2.0 * norm(ramp_strip - self.xbarramp)
     return j
Exemple #14
0
 def similarity_cosine(self, vector1, vector2):
     cos1 = np.sum(vector1 * vector2)
     cos21 = linalg.norm(vector1)
     cos22 = linalg.norm(vector2)
     # cos21 = np.sqrt(sum(vector1 ** 2))
     # cos22 = np.sqrt(sum(vector2 ** 2))
     similarity = cos1 / float(cos21 * cos22)
     if str(similarity) == 'nan':
         return 0.0
     else:
         return similarity
def get_angle(vector_1, vector_2):
    # it assumes that the destination is in front of you
    unit_vector_1 = vector_1 / linalg.norm(vector_1)
    unit_vector_2 = vector_2 / linalg.norm(vector_2)
    dot_product = dot(unit_vector_1, unit_vector_2)
    angle = int(degrees(arccos(dot_product)))
    clockwise = not (cross(unit_vector_1, unit_vector_2) > 0)
    if angle > 90:
        angle = 180 - angle
        clockwise = not clockwise
        ##print("angle is greater than 180")
    return angle, clockwise
 def test_3(self):
     kernel=GaussianKernel(sigma=10)
     X=randn(3000,10)
     K_chol, I, R, W=incomplete_cholesky(X, kernel, eta=0.001)
     K=kernel.kernel(X)
     
     self.assertEqual(shape(K_chol), (len(I), (len(I))))
     self.assertEqual(shape(R), (len(I), (len(X))))
     self.assertEqual(shape(W), (len(I), (len(X))))
     
     self.assertLessEqual(norm(K-R.T.dot(R)), 1)
     self.assertLessEqual(norm(K-W.T.dot(K_chol.dot(W))), 1)
Exemple #17
0
def create_square_from_two_points(a_point,b_point):
    """   
    Let a and b two points, this function will return a square (polygon)
    in which the a_point is at one corner and in the other extreme corner a point in the direction of b.
    
    Parameters
    ----------
        
        a_point : geometry (shapely Point)
        b_point : geometry (shapely Point) the direction is what matters
    
    """

        
    d = create_rectangle_from_two_points(a_point, b_point)
    
    apt = d['a']
    bpt = d['b']
    appt = d['a_p']
    bppt = d['b_p']

    a = asarray(apt)
    b = asarray(bpt)
    bp = asarray(bppt)
    ap = asarray(appt)
    
    a_m_bp = a - bp
    a_m_b = a - b
    a_m_ap = a - ap
    
    n_a_m_b = norm(a_m_b)
    n_a_m_ap = norm(a_m_ap)
    n_a_m_bp = norm(a_m_bp)
    
    if n_a_m_b < 0 :
        sig = -1.0
    elif n_a_m_b > 0 :
        sig = 1.0
    else:
        logger.error("The points are equal. It's not possible to generate an area with only one point.")
    
    
    p = (sig * ( n_a_m_ap / n_a_m_bp ) ) * a_m_bp

    pp = p + (a_m_ap) + a
    
    #n_chiqui = min((n_a_m_bp,n_a_m_ap))
    
    pp_pt = Point(pp)
    a_point = Point(a_point)
    new_dic = create_rectangle_from_two_points(a_point, pp_pt)
    return new_dic
Exemple #18
0
    def v(self, xx):
        """
        Provides the flow field as a function of the location. This is
        implemented here only for the component in the direction of :attr:`flow`;
        entrainment components are set to zero.

        Parameters
        ----------
        xx : array of floats of shape (3, )
            Location in the fluid for which to provide the data.

        Returns
        -------
        tuple with two elements
            The first element in the tuple is the velocity vector and the
            second is the Jacobian of the velocity vector field, both at the
            given location.
        """
        # TODO: better to make sure that self.flow and self.plane are indeed unit vectors before
        # normalize
        flow = self.flow / norm(self.flow)
        plane = self.plane / norm(self.plane)
        # additional axes of global co-ordinate system
        yy = -cross(flow, plane)
        zz = cross(flow, yy)
        # distance from slot exit plane
        xx1 = xx - self.origin
        # local co-ordinate system
        x = dot(flow, xx1)
        y = dot(yy, xx1)
        x1 = 0.109 * x
        h1 = abs(y) + sqrt(pi) * 0.5 * x1 - 0.5 * self.B
        if h1 < 0.0:
            # core jet
            Ux = self.v0
            Udx = 0
            Udy = 0
        else:
            # shear layer
            Ux = self.v0 * exp(-h1 * h1 / (2 * x1 * x1))
            Udx = (h1 * h1 / (x * x1 * x1) - sqrt(pi) * 0.5 * h1 /
                   (x * x1)) * Ux
            Udy = -sign(y) * h1 * Ux / (x1 * x1)
        # Jacobi matrix
        dU = array(((Udx, 0, 0), (Udy, 0, 0), (0, 0, 0))).T
        # rotation matrix
        R = array((flow, yy, zz)).T
        return dot(R, array((Ux, 0, 0))), dot(dot(R, dU), R.T)
Exemple #19
0
def nonlinear_cg(f, dims, start):
    '''The parameters to the nonlinear_cg function to compute minimum are
    f - the objective function
    dims - list of variables in the function
    start - starting point;
    Returns dictionary containing the initial starting point,
    minimum point, number of iterations and the final value'''
    fn = Matrix([f])
    fnc = lambdify([dims], fn, 'numpy')                                 #Lambdifying the function for numpy processing
    gradient = fn.jacobian([dims]).transpose()                          #Computing the gradient
    grad = lambdify([dims],gradient,'numpy')
    x_o = start.reshape(len(dims),1)                                    #Setting the values of x_o, d_o, r_o
    d_o = np.zeros((len(dims),1))
    r_o = -1*grad(start.squeeze().tolist())
    d_new = r_o
    beta_o = 0
    alpha = goldstein_armijo_line_search(f,dims,list(start.squeeze()))  #Using Goldstein-Armijo to determine alpha
    x_new = start.reshape(len(dims),1)+ alpha*d_new
    max_iter = 10
    n = 0
    pt_list = []
    for i in range(0,max_iter):
        if la.norm(grad(x_new)) < 0: break                              #Checking if the norm of gradient at the point is less than tolerance
        else:
            beta_new = np.squeeze(grad(x_new).transpose(),axis=1).dot(np.squeeze(grad(x_new),axis=1))/(np.squeeze(grad(x_o).transpose(),axis=1).dot(np.squeeze(grad(x_o),axis=1)))  #Computing beta value
            d_new = -1*grad(x_new.squeeze().tolist()) + beta_new*(d_o)  #Computing new d value
            x_o = x_new
            d_o = d_new
            alpha = goldstein_armijo_line_search(f, dims, list(x_o.squeeze()))  #Computing new alpha using Goldstein-Armijo criteria
            x_new = x_new.reshape(len(dims),1) + alpha*d_new
            n = n+1
            pt_list.append(x_new)
    output_dict = {'start': start, 'prev': x_o, 'new': x_new, 'iterations': n, 'final_value': fnc(x_new.squeeze().tolist()), 'point_list': pt_list}
    return output_dict
Exemple #20
0
    def integrate_gyro(self):
        w_gyr = array( self.gyr.get_gyr(  ) ) \
                * self.deg_to_rad
        g_acc = array(self.acc.get_acc())
        dg = cross(self.g, w_gyr)

        is_to_suppress_acc                 \
            = norm( g_acc ) - self.g0_norm \
            > self.d_gravity_threashold
        #

        for i in range(3):
            if is_to_suppress_acc:
                self.kalman[i].y_innov_modulate = 0.
                print 'Suppressed acc'
            else:
                self.kalman[i].y_innov_modulate = 1.
            #
            res = self.kalman[ i ] \
                  ( dg[ i ], g_acc[ i ], self.dt )
            if not isnan(res):
                self.g[i] = res
            #
        #

        self.g = self.normalize(self.g)
        return
Exemple #21
0
    def test_mode_newton_2d(self):
        X = asarray([-1, 1])
        X = reshape(X, (len(X), 1))
        y = asarray([+1 if x >= 0 else -1 for x in X])
        covariance = SquaredExponentialCovariance(sigma=1, scale=1)
        likelihood = LogitLikelihood()
        gp = GaussianProcess(y, X, covariance, likelihood)
        laplace = LaplaceApproximation(gp, newton_start=asarray([3, 3]))
        
        f_mode, _, steps = laplace.find_mode_newton(return_full=True)
        F = linspace(-10, 10, 20)
        D = zeros((len(F), len(F)))
        for i in range(len(F)):
            for j in range(len(F)):
                f = asarray([F[i], F[j]])
                D[i, j] = gp.log_posterior_unnormalised(f)
           
        idx = unravel_index(D.argmax(), D.shape)
        empirical_max = asarray([F[idx[0]], F[idx[1]]])
        
        pcolor(F, F, D)
        hold(True)
        plot(steps[:, 0], steps[:, 1])
        plot(f_mode[1], f_mode[0], 'mo', markersize=10)
        hold(False)
        colorbar()
        clf()
#        show()
           
        self.assertLessEqual(norm(empirical_max - f_mode), 1)
def on_mouse_move(event):
	if event.press_event is None:
		return

	modifiers = event.modifiers
	pos = event.press_event.pos
	if is_in_view(pos, view1.camera):
		if modifiers is not ():
			if 1 in event.buttons and modifiers[0].name=='Control':
				# Translate: camera._scene_transform.imap(event.pos)
				p1 = np.array(pos)[:2]
				p2 = np.array(event.last_event.pos)[:2]
				p1 = p1 - view1.pos
				p2 = p2 - view1.pos
				# print p1,p2
				p1s = view1.camera._scene_transform.imap(p1)[:2]
				p2s = view1.camera._scene_transform.imap(p2)[:2]
				print p1s, p2s
				pos_ = np.vstack((p2s,p1s))
				# print pos_
				measure_line.set_data(pos=pos_)
				measure_line.visible = True
				d_pixel = norm(pos_[1,:]-pos_[0,:])
				d_um = d_pixel*get_mpp(_id)
				print 'distance =',d_um
				measure_text.visible = True
				measure_text.text = '%.2f um' % d_um
				measure_text.pos = pos_[1,:]
				measure_text.pos[0] -= 10
				event.handled = True
Exemple #23
0
 def calculate(self, chromosome: ndarray) -> float:
     labels = self.cluster.run(chromosome, self.samples)
     sse = 0
     for k in range(chromosome.shape[0]):
         members = self.samples[k == labels]
         sse += (linalg.norm(members - chromosome[k], axis=0)**2).sum()
     return 1 / (sse * chromosome.shape[0])
Exemple #24
0
def computePatch(f, fprime, ref) : 
    center = utils.triangulate(f, fprime, ref.pmat, fprime.image.pmat)
    normal = ref.center - center
    normal /= norm(normal)
    patch = Patch(center, normal, ref)

    return patch
Exemple #25
0
def example_function_4(x, y, z, t):
    '''
  sphere

  mode_volume_mum3 should be exactly (4/3)*pi*r^3 = 0.065450
  
  calculateMV function finds::

    allFilesFound =  1
    TotalEnergy =  1.8266
    MaximumEnergyDensity =  28
    mode_volume_mum3 =  0.065237
    normalized_mode_volume = NaN
    Lambda_mum =  299792458
    f0_MHz =  1
    first =  1
    snap_time_number = 0
    repetition =  524200
    Niterations =  1
    refractive_index_defect = NaN
  '''
    centro = numpy.array([0.5, 0.5, 0.5])
    radius = 0.25
    if norm(numpy.array([x, y, z]) - centro) <= radius:
        epsilon = 2
        E = array([1, 2, 3], dtype=complex)
        H = array([4, 5, 6], dtype=complex)
    else:
        epsilon = 1
        E = array([0, 0, 0], dtype=complex)
        H = array([0, 0, 0], dtype=complex)
    return (epsilon, E, H)
Exemple #26
0
def mag(x):
    if x.shape[0] == 1:
        return np.asscalar(np.sqrt(np.dot(x, x.T)))
    elif x.shape[1] == 1:
        return np.asscalar(np.sqrt(np.dot(x.T, x)))
    else:
        return linalg.norm(x)
Exemple #27
0
 def test_norm_vectors_in_matrix_row(self):
     n_vectors = 4
     n_rows = 8
     mx = complex_randn((n_rows, n_vectors))
     norm_vectors_in_matrix(mx, 1)
     for i_row in np.arange(n_rows):
         self.assertAlmostEqual(1, norm(mx[i_row, :]))
Exemple #28
0
    def _compute_colors(self, array_x, array_y):

        # on calcule le maximum absolu de toutes valeurs pour former un carré
        abs_maximum = max([max(map(abs,array_x)), max(map(abs,array_y))])
        diagonal_length = norm(array([abs_maximum, abs_maximum])) # longueur de la projection
        diag = array([diagonal_length, diagonal_length])
        anti_diag = array([-diagonal_length, diagonal_length])

        # on instancie le gradient de couleur sur le modèle de couleur du centre
        linear_normalizer = mpl.colors.Normalize(vmin=-abs_maximum, vmax=abs_maximum)
        log_normalizer = mpl.colors.SymLogNorm(abs_maximum/5, vmin=-abs_maximum, vmax=abs_maximum)
        r_to_b_gradient = cm.ScalarMappable(norm=linear_normalizer, cmap=redtoblue)

        # on calcule le produit scalaire de chaque valeur avec la diagonale
        # ensuite, on calcule la couleur à partir de la valeur de la projection sur la diagonale
        hex_color_values = []
        for i, x in enumerate(array_x):
            # on calcule les produits scalaire du point avec la diagonale et l'antidiagonale
            scal_p_diag = dot(array([array_x[i], array_y[i]]), diag) / diagonal_length
            scal_p_antidiag = dot(array([array_x[i], array_y[i]]), anti_diag) / diagonal_length

            #on calcule le gradient de couleur sur la diagonale
            on_diag_color = colorConverter.to_rgb(r_to_b_gradient.to_rgba(scal_p_diag))
            # puis on utilise cette couleur (en rgb) pour définir un gradient, dont la valeur sera estimée
            # sur l'antidiagonale
            on_diag_gradient = make_white_gradient(on_diag_color, log_normalizer)
            final_color = on_diag_gradient.to_rgba(scal_p_antidiag)

            #on traduit en HEX
            hex_color_values.append(rgb2hex(colorConverter.to_rgb(final_color)))

        return hex_color_values, abs_maximum
Exemple #29
0
def UniverseIsFullOfBalls(x, y, z, t):
    balls = [
        ((2, 2, 2), 1, 2),
        ((5, 2, 2), 1, 5),
        ((8, 2, 2), 1, 8),
    ]

    epsilon = 1
    Ex = 0
    Ey = 0
    Ez = 0
    Hx = 0
    Hy = 0
    Hz = 0
    for c_ball, r_ball, eps_ball in balls:
        r = norm(array([x, y, z]) - array(c_ball))
        if r <= r_ball:
            epsilon = eps_ball * (10**0)
            Ex = eps_ball * (10**1)
            Ey = eps_ball * (10**2)
            Ez = eps_ball * (10**3)
            Hx = eps_ball * (10**4)
            Hy = eps_ball * (10**5)
            Hz = eps_ball * (10**6)

    E = array([Ex, Ey, Ez], dtype=complex)
    H = array([Hx, Hy, Hz], dtype=complex)
    return (epsilon, E, H)
Exemple #30
0
    def integrate_gyro( self ):
        w_gyr = array( self.gyr.get_gyr(  ) ) \
                * self.deg_to_rad;
        g_acc = array( self.acc.get_acc(  ) );
        dg    = cross( self.g, w_gyr );

        is_to_suppress_acc                 \
            = norm( g_acc ) - self.g0_norm \
            > self.d_gravity_threashold;
        #
        
        for i in range( 3 ):
            if is_to_suppress_acc:
                self.kalman[ i ].y_innov_modulate = 0.;
                print 'Suppressed acc';
            else:
                self.kalman[ i ].y_innov_modulate = 1.;
            #
            res = self.kalman[ i ] \
                  ( dg[ i ], g_acc[ i ], self.dt );
            if not isnan( res ):
                self.g[ i ] = res;
            #
        #

        self.g = self.normalize( self.g );
        return;
    def test_adapt_does_nothing(self):
        dimension = 3
        ps = rand(dimension)
        distribution = Bernoulli(ps)
        kernel = HypercubeKernel(1.)
        Z = zeros((2, distribution.dimension))
        threshold = 0.5
        spread = .5
        sampler = DiscreteKameleon(distribution, kernel, Z, threshold, spread)
        
        # serialise, call adapt, load, compare
        f = NamedTemporaryFile()
        dump(sampler, f)
        f.seek(0)
        sampler_copy = load(f)
        f.close()
        
        sampler.adapt(None, None)
        
        # rough check for equality, dont do a proper one here
        self.assertEqual(type(sampler_copy.kernel), type(sampler.kernel))
        self.assertEqual(sampler_copy.kernel.gamma, sampler.kernel.gamma)
        
        self.assertEqual(type(sampler_copy.distribution), type(sampler.distribution))
        self.assertEqual(sampler_copy.distribution.dimension, sampler.distribution.dimension)
        
        self.assertEqual(type(sampler_copy.Z), type(sampler.Z))
        self.assertEqual(sampler_copy.Z.shape, sampler.Z.shape)
        self.assertAlmostEqual(norm(sampler_copy.Z - sampler.Z), 0)
        
        self.assertEqual(sampler_copy.spread, sampler.spread)

        # this is none, so just compare
        self.assertEqual(sampler.Q, sampler_copy.Q)
 def terminate(self, sigma):
     difference = np.subtract(self.xU, self.U)
     sum_square = linalg.norm(difference)
     # sum_square = math.sqrt(np.sum(difference ** 2))
     # print("sum_square   " + str(sum_square))
     # print("difference   " + str(np.amax(difference)))
     return sum_square < sigma
Exemple #33
0
    def v( self, xx):
        """
        Provides the flow field as a function of the location. This is
        implemented here only for the component in the direction of :attr:`flow`;
        entrainment components are set to zero.

        Parameters
        ----------
        xx : array of floats of shape (3, )
            Location in the fluid for which to provide the data.

        Returns
        -------
        tuple with two elements
            The first element in the tuple is the velocity vector and the
            second is the Jacobian of the velocity vector field, both at the
            given location.
        """
        # TODO: better to make sure that self.flow and self.plane are indeed unit vectors before
        # normalize
        flow = self.flow/norm(self.flow)
        plane = self.plane/norm(self.plane)
        # additional axes of global co-ordinate system
        yy = -cross(flow,plane)
        zz = cross(flow,yy)
        # distance from slot exit plane
        xx1 = xx-self.origin
        # local co-ordinate system 
        x = dot(flow,xx1)
        y = dot(yy,xx1)
        x1 = 0.109*x
        h1 = abs(y)+sqrt(pi)*0.5*x1-0.5*self.B
        if h1 < 0.0:
            # core jet
            Ux = self.v0
            Udx = 0
            Udy = 0
        else:
            # shear layer
            Ux = self.v0*exp(-h1*h1/(2*x1*x1))
            Udx = (h1*h1/(x*x1*x1)-sqrt(pi)*0.5*h1/(x*x1))*Ux
            Udy = -sign(y)*h1*Ux/(x1*x1)
        # Jacobi matrix
        dU = array(((Udx,0,0),(Udy,0,0),(0,0,0))).T
        # rotation matrix
        R = array((flow,yy,zz)).T
        return dot(R,array((Ux,0,0))), dot(dot(R,dU),R.T)
Exemple #34
0
    def equilibre(self, **kargs):
        """
        :param kargs: les variables à modifier, parmi self.VARIABLES
        :return:float, l'incidence alfa° telle que CmT(alfa)=0
        >>> P = Parapente('diamir')
        >>> eq = Equilibriste(P)
        >>> alfa = eq.equilibre(c=29.0, mp=100.0, S=28.0, d=10.0)
        # alfa sera l'incidence d'équilibre pour le parapente P,
        # avec un calage c, un pilote de masse mp, une surface S, et 10% de freins
        """
        objectif = self.CmT

        def objectif(alfa):
            """
            La fonction à annuler pour trouver l'équilibre
            Peut être que return self.CmT(alfa) suffit...
            """
            alfa = alfa % π
            fx, fz = self.resultante(alfa)
            return self.CmT(alfa)**2 + fx**2 + fz**2
            return fx**2 + fz**2

        if kargs: self.load(kargs)
        #disp=False pour que newton ne lève pas une exception
        alfa, res = newton(objectif,
                           radians(6.0),
                           tol=self.EPS,
                           full_output=True,
                           disp=False)
        if res.converged:
            return alfa % π
        else:
            alfa = res.root
            res = dict(root=fmt(degrees(res.root)),
                       iterations=res.iterations,
                       function_calls=res.function_calls,
                       converged=res.converged,
                       cause=res.flag)
            rn = norm(self.resultante(self.normalise(alfa)))
            r = norm(self.resultante(alfa))
            cm = self.CmT(alfa)
            debug('Non CV', resultante=(rn, r), CmT=cm, res=res)
            if abs(r) < self.EPS:
                return alfa % π
#             debug('Non CV', resultante=(fmt(rn,6), fmt(r,6)))
            return nan
Exemple #35
0
def eval_svs_main_(filename):

    ######## reference
    ref_iKalaURI = Parameters.iKalaURI + '/Wavfile/'
    ref_fileURI = os.path.join(ref_iKalaURI, filename)

    loader = essentia.standard.MonoLoader(filename=ref_fileURI, downmix='left')
    trueKaraoke = loader()

    loader = essentia.standard.MonoLoader(filename=ref_fileURI,
                                          downmix='right')
    trueVoice = loader()

    ref_iKalaURI = Parameters.iKalaURI + '/Wavfile/mono/'
    ref_file_mono_URI = os.path.join(ref_iKalaURI, filename)

    loader = essentia.standard.MonoLoader(filename=ref_file_mono_URI)
    trueMixed = loader()

    ######## estimated
    estim_iKalaURI = Parameters.iKalaURI + '/Wavfile_resynth/'

    estim_fileURI = os.path.join(estim_iKalaURI, filename)

    loader = essentia.standard.MonoLoader(filename=estim_fileURI)
    estimatedVoice = loader()

    estimatedVoice_resized = numpy.zeros((trueMixed.shape))

    diff_length = len(trueMixed) - len(estimatedVoice)
    estimatedVoice_resized[diff_length:] = estimatedVoice

    estimatedKaraoke = trueMixed - estimatedVoice_resized
    norm_est = norm(estimatedVoice_resized + estimatedKaraoke)
    norm_est = 229.0
    norm_true = norm(trueVoice + trueKaraoke)
    a = vstack((estimatedVoice_resized, estimatedKaraoke))
    b = vstack((trueVoice, trueKaraoke))
    (SDR, SIR, SAR,
     perm) = mir_eval.separation.bss_eval_sources(a / norm_est, b / norm_true)
    (NSDR,NSIR,NSAR, perm) = mir_eval.separation.bss_eval_sources( \
        [trueMixed,trueMixed] / norm(trueMixed + trueMixed),\
        [trueVoice,trueKaraoke] / norm(trueVoice + trueKaraoke))
    NSDR = SDR - NSDR

    return NSDR, SIR, SAR
Exemple #36
0
def get_angles(points):
    first_points = points[:-1]
    second_points = points[1:]
    vectors = second_points - first_points
    first_vectors = vectors[:-1]
    second_vectors = vectors[1:]
    DotProducts = first_vectors[:,
                                0] * second_vectors[:,
                                                    0] + first_vectors[:,
                                                                       1] * second_vectors[:,
                                                                                           1]
    NormKronecker = norm(first_vectors, axis=1) * norm(second_vectors, axis=1)
    cosine_sim = -DotProducts / NormKronecker
    cosine_sim[np.where(cosine_sim > 1)] = 1
    cosine_sim[np.where(cosine_sim < -1)] = -1
    angles = np.arccos(cosine_sim) / np.pi * 180
    return angles
def print_matrix_with_meta_data(filename, matrix):
    np.save(os.path.join(DATA_DIR, filename), matrix)
    details_file = open(os.path.join(DATA_DIR, filename + '.txt'), 'w')
    try:
        details_file.write(np.array2string(matrix) + "\n")
        details_file.write("\ndeterminer:                " +
                           str(linalg.det(matrix)) + "\n")
        details_file.write("inverse matrix determiner: " +
                           str(linalg.det(linalg.inv(matrix))) + "\n")
        details_file.write("euclidean norm:            " +
                           str(linalg.norm(matrix, 'fro')) + "\n")
        details_file.write("euclidean norm \nof inverse matrix:         " +
                           str(linalg.norm(linalg.inv(matrix), 'fro')) + "\n")
        details_file.write("condition number:          " +
                           str(linalg.cond(matrix, 'fro')))
    finally:
        details_file.close()
Exemple #38
0
def sortF(F, f) : 
    for feat in F : 
        pt = utils.triangulate(f, feat, f.image.pmat, feat.image.pmat)
        res = f.image.pmat @ pt
        vec = pt - f.image.center
        depth = norm(vec)
        feat.depth = depth
    utils.insertionSort(F)
Exemple #39
0
	def move_axis( self , dist , axis ) :
		''' translate cursor in self.node space '''

		axis = np.array( axis )
		axis = axis / la.norm( axis )
		axis*= dist

		return self.move_vec( axis )
Exemple #40
0
def obj_func(thetas, predicted_price_change, actual_price_change):
    return norm(
        np.subtract(
            np.transpose(actual_price_change),
            np.add(
                thetas[0],
                np.dot(thetas[1:len(thetas)],
                       np.transpose(predicted_price_change)))))
def dibujarpf(bdcha,cadena,pasos=1,ppo=0, fin = np.inf,multi = False):
    '''dibuja a partir de datos recogidos en arrays de datos tipo barco y cadena
    ver el sistema de preparar matrices para guardar datos dibuja también
    un buque fondeado buque = [eslora, manga, posición, orientación,de     sup'''
    
    
    
    if fin > bdcha.shape[0]-1:
        fin = bdcha.shape[0]-1
        
    for i in range(ppo,fin,pasos):
        if multi:
            figure()
        
        cms = np.array([cadena[i,0,:],cadena[i,1,:]])
        para = np.array([cadena[i,-2,:],cadena[i,-1,:]])
        pl.plot(cms[0,:],cms[1,:],'bo')
        pl.hold(True)
        barrasi = cms + cadena[-1,1,0] * para
        barrasd = cms - cadena[-1,1,0] * para
        pl.plot([barrasi[0,:],barrasd[0,:]],[barrasi[1,:],barrasd[1,:]],'k')
                        

        
        

        vertices = np.array([[-bdcha[-1,6]/2.,-0.25*bdcha[-1,6]/2],\
        [-bdcha[-1,6]/2.,0.25*bdcha[-1,6]/2],\
        [-0.25*bdcha[-1,6]/2,0.35*bdcha[-1,6]/2],[bdcha[-1,6]/2.,0],\
        [-0.25*bdcha[-1,6]/2,-0.35*bdcha[-1,6]/2],[-bdcha[-1,6]/2.,\
        -0.25*bdcha[-1,6]/2]])        
        rot = np.array([[np.cos(bdcha[i,6]),- np.sin(bdcha[i,6])],[np.sin(bdcha[i,6]),\
        np.cos(bdcha[i,6])]])       
        vertrot = np.array([np.dot(rot,j) for j in vertices]) + [bdcha[i,0],bdcha[i,1]]
        codes = [Path.MOVETO,Path.LINETO,Path.CURVE3,Path.CURVE3,Path.CURVE3,\
        Path.CURVE3]
     
        pathd = Path(vertrot,codes)
        patchd = patches.PathPatch(pathd,facecolor = 'green') #'green'
        pl.gca().add_patch(patchd)
         #######################dibujar cable de arrastre derecha#######################
        rot = np.array([[np.cos(bdcha[i,6]),- np.sin(bdcha[i,6])],[np.sin(bdcha[i,6]),\
        np.cos(bdcha[i,6])]])  
        popad =  np.dot(rot, np.array([-bdcha[-1,6]/2.,0])) + [bdcha[i,0],bdcha[i,1]]  
        tipd = - para[:,-1] * cadena[-1,1,0] + cms[:,-1]
        
        
        distd = norm(popad - tipd)
        dd = distd/cadena[-1,3,1]
        #print dd
        if dd > 1: dd = 1
        r = bezier_cvr.bezier4p([[tipd[0]],[tipd[1]]],[[popad[0]],[popad[1]]],1,1,1.5,\
        (1-dd) * bdcha[i,6]\
         +dd * np.arctan2(popad[1]-tipd[1],popad[0] - tipd[0]),\
        (1-dd) * np.arctan2(-para[0,0],-para[0,1])\
         +dd * np.arctan2(popad[1]-tipd[1],popad[0] - tipd[0]),\
        100)
        bezier_cvr.pintar_bezier(r[0],color = 'b')
 def test_log_pdf_1d_2n(self):
     mu = asarray([0], dtype=numpy.bool8)
     spread = rand()
     dist = DiscreteRandomWalkProposal(mu, spread)
     X = asarray([[1], [0]], dtype=numpy.bool8)
     log_liks = dist.log_pdf(X)
     expected = asarray([log(1.), -inf])
     self.assertAlmostEqual(norm(log_liks[0] - expected[0]), 0)
     self.assertEqual(log_liks[1], expected[1])
 def test_log_lik_vector_multiple2(self):
     n=100
     y=randint(0,2,n)*2-1
     F=randn(10,n)
     
     lik=LogitLikelihood()
     multiples=lik.log_lik_vector_multiple(y, F)
     singles=asarray([lik.log_lik_vector(y, f) for f in F])
     
     self.assertLessEqual(norm(singles-multiples), 1e-10)
 def test_kernel_X_two_points_fixed(self):
     gamma = .2
     k = HypercubeKernel(gamma)
     X = asarray([[1, 0], [1, 1]], dtype=numpy.bool8)
     K = zeros((2, 2))
     for i in range(2):
         for j in range(2):
             dist = sum(X[i] != X[j])
             K[i, j] = tanh(gamma) ** dist
     self.assertAlmostEqual(norm(K - k.kernel(X)), 0)
def varify_gradient_decent(lambda_num):
    input_layer_size = 3
    hidden_layer_size = 5
    num_labels = 3
    m = 5
    input = generate_debug_input(m, input_layer_size - 1)
    print(input)
    output = 1 + np.mod(np.arange(m).reshape(1, m), num_labels).T
    print(output)
    theta1 = rand_initialize_weights(input_layer_size, hidden_layer_size)
    theta2 = rand_initialize_weights(hidden_layer_size, num_labels)
    nn_param = unroll_params(theta1, theta2)

    cost_func = lambda param: cost_function(param, input_layer_size, hidden_layer_size, input, output, lambda_num,
                                            num_labels)
    (cost, grad) = cost_func(nn_param)
    num_grad = compute_numerical_gradient(cost_func, nn_param)
    diff = linalg.norm(num_grad - grad, 2) / linalg.norm(num_grad + grad,2)
    print("The relative difference will be small (less than 1e-9)", diff)
def testFitNorm():
    X = coo_matrix((ones(4),([0, 1, 2, 2], [1, 1, 0, 1])), shape=(3, 3), dtype=np.uint8).tolil()
    A = np.array([[0.9, 0.1],
         [0.8, 0.2],
         [0.1, 0.9]])
    R = np.array([[0.9, 0.1],
         [0.1, 0.9]])
    expectedNorm = norm(X - dot(A,dot(R, A.T)))**2
    assert_almost_equal(fitNorm(X, A, R), expectedNorm)  
    assert_almost_equal(fitNormWithoutNormX(X, A, R) + squareFrobeniusNormOfSparse(X), expectedNorm)
 def test_log_lik_vector_multiple1(self):
     n=100
     y=randint(0,2,n)*2-1
     f=randn(n)
     
     lik=LogitLikelihood()
     multiple=lik.log_lik_vector_multiple(y, f.reshape(1,n))
     single=lik.log_lik_vector(y, f)
     
     self.assertLessEqual(norm(single-multiple), 1e-10)
Exemple #48
0
 def rknewton(self,f,t,x0,tol=1e-5, nmax=50, nmax_gss=100):
     F = lambda x,x0:  Matrix(lsolve(-self.J(t,x),f(x0)))
     #F = lambda x,x0:  Matrix(lsolve(-J(x),f(x0)))
     #F = lambda x,x0: -inv(J(x))*f(x0)
     if norm(f(x0),1) == 0:
         return [x0,f(x0), 0]
     else:
         for n in range(1,nmax+1):
             x = self.RK.RKX(lambda t,Y:F(Y,x0), 0, x0,1,1)[:,1]
             if norm(f(x),2) > norm(f(x0),2):
                 s = x - x0
                 f2 = lambda Y: (f(Y).T*f(Y))[0]
                 f2_= lambda alpha:f2(x0 + alpha*s)
                 alpha = gnewton(f2_,0,min(1.0*tol/norm(s,2),1e-2),nmax_gss)[0]
                 x = x0 + alpha*s
             if norm(x-x0, 1)<tol:
                 break
             else:
                 x0 = x
     return [x,f(x), n]
def create_matrix(m, k, p, N, iteration_number, starting_conditions):
    x = [random.choice([-1, 1]) for _ in range(0, N)]
    A = zeros(shape=(N, N))
    for i in range(0, N):
        for j in range(0, N):
            if i == j:
                A[i][j] = k
            elif j > i:
                A[i][j] = (-1) ** (j + 1) * (m / (j + 1))
            elif j == (i - 1):
                A[i][j] = m / (i + 1)
    x_copy = x
    b = dot(A, x)
    D = diag(A)
    R = A - diagflat(D)
    x = starting_conditions
    x_norm = p + 1
    i = 0
    B = R/D
    e_vals, e_vect = linalg.eig(B)
    print(";".join((str(N), str(max(abs(e_vals))))))
    # print "results for ||x(i+1) - x(i): "
    while (x_norm >= p) or (i > iteration_number):
        prev_x = x
        x = (b - dot(R, x)) / D
        x_norm = linalg.norm(x - prev_x, inf)  # norma typu max po kolumnach
        i += 1

    # print ";".join((str(N), str("%.8f" % p), str(i), str("%.15f" % linalg.norm(x_copy - x)), str("%.15f" % linalg.norm(x_copy - x, inf)))) + ";",
    x = x_copy
    b = dot(A, x)
    D = diag(A)
    R = A - diagflat(D)
    x = starting_conditions
    b_norm = p + 1
    i = 0
    # print "results for ||Ax(i) -b ||"
    while (b_norm >= p) or (i > iteration_number):
        x = (b - dot(R, x)) / D
        b_norm = linalg.norm(dot(A, x) - b, inf)
        i += 1
Exemple #50
0
def generalized_lasso(X, y, D, alpha, niter=100, x0=None, tol=0.001):
    n = X.shape[0]
    p = X.shape[1]
    indices = arange(p)
    if x0 is None:
        res = rand(p)
    else:
        res = x0
    for it in xrange(1000):
        gap = 0
        for i in xrange(p):
            x_i = res[indices != i]
            X_i = X[:, indices != i]
            prev = res[i]
            res[i] = soft_thresh(alpha*D[:,i].sum()/norm(X[:,i]),
                    dot(X[:,i].T, y - dot(X_i, x_i))/dot(X[:,i].T, X[:,i]))
            gap += norm(res[i] - prev)
        if gap < tol:
            break
    print 'yues'
    return res
Exemple #51
0
 def fr(x0, n0, rmax, dt, v, xyz, t):
     s0 = n0 / (c+dot(v(x0)[0], n0))
     y0 = hstack((x0, s0))
     oo = ode(f1)
     oo.set_f_params(v)
     oo.set_integrator('vode', rtol=1e-2)
     oo.set_initial_value(y0, 0)
     while oo.successful():
         xyz.append(oo.y[0:3])
         t.append(oo.t)
         if norm(oo.y[0:3]-x0)>rmax:
             break
         oo.integrate(oo.t+dt)
Exemple #52
0
 def TRX(self, f, t0, Y0, n, tf, tol=1e-5, nmax_gnr=50, nmax_gss=100):
     h = (tf-t0)/(1.0*n)
     Yrk = zeros(len(Y0),n+1)
     Y = zeros(len(Y0),n+1)
     Yrk[:,0] = Y0
     Y[:,0] = Y0
     t = t0
     F = lambda t,Y: Y - 0.5*h*f(t, Y)
     self.GNR = GNR(self.method_gnr, F ,Y0)
     for i in range(n):
         #print i
         Yrk[:,i+1] = self.RK.RKX(f, t, Yrk[:,i],1,t+h)[:,1]
         F0 = Y[:,i] + 0.5*h*f(t,Y[:,i])
         F = lambda Y: Y - 0.5*h*f(t+h, Y) - F0
         if( norm(F(Yrk[:,i+1]),1) < norm(F(Y[:,i]),1) ):
             sol = self.GNR.rknewton(F,t+h, Yrk[:,i+1], tol, nmax_gnr, nmax_gss)
         else:
             sol = self.GNR.rknewton(F,t+h, Y[:,i], tol, nmax_gnr, nmax_gss)
         Y[:,i+1] = sol[0]
         #print(sol[2])
         Yrk[:,i+1] = Y[:,i+1]
         t += h
     return Y
    def assert_file_matrix(self, filename, M):
        try:
            with open(filename):
                m = loadtxt(filename)

                # python loads vectors as 1d-arrays, but we want 2d-col-vectors
                if len(shape(m)) == 1:
                    m = reshape(m, (len(m), 1))

                self.assertEqual(M.shape, m.shape)
                self.assertLessEqual(norm(m - M), 1e-5)
                return True
        except IOError:
            return False
 def test_chain_bernoulli(self):
     # runs the sampler on a distribution of infdependent bernoulli variables
     # and compares the mean
     d = 5
     ps = rand(d)
     ps /= norm(ps)
     distribution = Bernoulli(ps)
     
     num_history = 100
     Z = distribution.sample(num_history).samples
     threshold = 0.8
     spread = 0.2
     
     gamma = 0.2
     kernel = HypercubeKernel(gamma)
     
     mcmc_sampler = DiscreteKameleon(distribution, kernel, Z, threshold, spread)
     
     start = zeros(distribution.dimension, dtype=numpy.bool8)
     mcmc_params = MCMCParams(start=start, num_iterations=1000)
     chain = MCMCChain(mcmc_sampler, mcmc_params)
 
     chain.run()
     self.assertAlmostEqual(norm(mean(chain.samples, 0) - ps), 0, delta=0.2)
 def test_kernel_X_many_points_random(self):
     gamma = .2
     n_X = 4
     d = 5
     num_runs = 100
     k = HypercubeKernel(gamma)
     
     for _ in range(num_runs):
         X = randint(0, 2, (n_X, d)).astype(numpy.bool8)
         K = zeros((n_X, n_X))
         for i in range(n_X):
             for j in range(n_X):
                 dist = sum(X[i] != X[j])
                 K[i, j] = tanh(gamma) ** dist
         self.assertAlmostEqual(norm(K - k.kernel(X)), 0)
Exemple #56
0
def powerMethodGPU(intFrog, itr = 1):
	'''
	Extracts the pulse and gate as functions of time from the FROG
	with the power method
	'''

	nbrPoints = shape(intFrog)[0]
	tn = linspace(-nbrPoints/2, nbrPoints/2, nbrPoints)
	U0 = (pulse.gaussianPulse(tn, nbrPoints/2, 1.0) * (1 + 0.4*rand(nbrPoints))).real

	for i in arange(itr):
		U0A = array(matrix(intFrog)*matrix(U0).T)
		U0 = U0A.T/(la.norm(U0A))

	return U0
Exemple #57
0
    def paint(self, painter, QStyleOptionGraphicsItem, QWidget_widget=None):
        painter.setBrush(Qt.darkGray)
        painter.drawLine(0, 0, self.vector[0], self.vector[1])
        vector = self._scale_vector(self.vector)
        arrow_point = QPointF(vector[0], vector[1])

        vector = self._scale_vector(vector)
        unit_vector = vector / (linalg.norm(vector) / 5)

        painter.drawPolygon(QPolygonF([
            QPointF(vector[0] - unit_vector[1], vector[1] + unit_vector[0]),
            QPointF(vector[0] + unit_vector[1], vector[1] - unit_vector[0]),
            arrow_point
        ]))
        if self.weight is not None:
            angle = numpy.arccos(self.vector[0] / linalg.norm(self.vector))
            translation = [x * 0.4 for x in self.vector]
            if vector[1] < 0:
                angle = -angle
                translation = [x * 0.7 for x in self.vector]
            painter.translate(translation[0], translation[1])
            painter.scale(0.5, 0.5)
            painter.rotate(numpy.degrees(angle))
            painter.drawText(0, 0, str(round(self.weight, 3)))
def testMatrixFitNorm():
    A = np.array([[0.1, 0.1, 0.1],
         [0.1, 0.1, 0.001],
         [0.2, 0.1, 0.1],
         [0.1, 0.3, 0.1],
         [0.4, 0.1, 0.1],
         [0.001, 0.01, 0.1]])
    V = np.array([[0.1, 0.4, 0.1, 0.1],
         [0.01, 0.3, 0.1, 0.3],
         [0.1, 0.01, 0.4, 0.001]])
    D = coo_matrix((ones(6),([0, 1, 2, 3, 4, 5], [0, 1, 1, 2, 3, 3])), shape=(6, 4), dtype=np.uint8).tocsr()
    expectedNorm = norm(D - dot(A,V))**2
    assert_almost_equal(matrixFitNorm(D, A, V), expectedNorm)
    assert_almost_equal(squareFrobeniusNormOfSparse(D) + matrixFitNormWithoutNormD(D, A, V), expectedNorm)
        
    
Exemple #59
0
 def planificador(self, puntos, rumbos, veloc = None, tiempos = None,
 ndat =10.  ):
     ''' Este planificador une con curvas de bezier de grado tres los puntos 
     de paso suministrados en el array puntos,de modo que el barco quede 
     orientado de acurdo con los angulos suminstrados en rumbos. si se le
     asignan velocidades a los puntos de paso, la curva de becier se ajustara
     para que el barco tome dicha velocidades, por ultimo si se asigna tiempos
     se calculara la trayectoria para que el barco pase por los puntos de 
     paso en los tiempos establecidos. De momento el planificador no tiene 
     en cuenta los límites impuestos por la propia dinamica del barco,  lo 
     cual hace que sea posible planificar trayectorias irrealizables... '''
     
     if veloc is None:
          #si no se especifican las velocidades por los puntos de paso,
          #supondremos que el barco parte del reposo y que las velocidades
          #se fijan a 5 m/s. Por supuesto estos valores por defecto
          #podrían cambiarse. Quizá lo más cómodo sería definirlos como
          #atributos del barco...
          veloc = np.repeat([0.,5.],[1,puntos.shape[0]-1])
     if tiempos is None:
         #si no se especifican los tiempos de paso, estos se obtienen a 
         #partir de las velocidades medias y la distancia entre los puntos.
         #Esto puede suponer que el barco tenga que acelerar o desacelerar 
         #mas de la cuenta en el tramo entre puntos de paso... pero pa 
         #probar nos vale
         
         tiempos = 2 * norm(puntos[1:] - puntos[:-1],axis =1).squeeze() / \
         (veloc[1:] + veloc[:-1])
         
         print tiempos
         
     lstplan = []
         
     for i,j,k,l,m,n,o in zip(puntos[:-1],puntos[1:],veloc[:-1],
     veloc[1:],tiempos, rumbos[:-1], rumbos[1:]):
                 #suponemos que el barco parte del repose en el primer punto
                 #elegimos una velocidad constate, 5m/s fija, en realidad
         print i, '\n'
         print j, '\n'
         print k, '\n'
         print l, '\n'
         print m, '\n'
         
         lstplan.append(bezier_cvr.bezier4p(i,j,k,l,m,n,o,ndat))
     
     return lstplan