예제 #1
0
    def locallogisticHessian(self, theta, weights, reg_param):
        """
        Hessian for regulatrized local logistic regression L2 loss

        Args:
            theta (np.array): Current lwlr parameters of shape
                [1, n_features]
            weights (np.array): training set weights of shape
                [n_samples, 1]
            reg_param (float): L2 regularization weight. If 0, no
                no regulatrization is used.

        Returns:
            Hessian (np.ndarray): Hessian of shape [n_features, n_features]
        """
        # Add bias to X
        X = np.insert(self.X, 0, 1, axis=1)
        
        D = []
        for row in range(np.shape(X)[0]):
            D.append(weights[row] *
                     self.logistic_function(np.dot(X[row, :],
                                                   np.transpose(theta))) *
                     (1 -
                      self.logistic_function(np.dot(X[row, :],
                                                    np.transpose(theta)))))
        D = np.diag(D)
        hessian = (np.matmul(np.matmul(X.T, D),
                             X) -
                   np.identity(np.shape(X)[1]) * reg_param)
        return hessian
예제 #2
0
파일: matmul.py 프로젝트: harshit98/onnx
    def export():  # type: () -> None
        node = onnx.helper.make_node(
            'MatMul',
            inputs=['a', 'b'],
            outputs=['c'],
        )

        # 2d
        a = np.random.randn(3, 4).astype(np.float32)
        b = np.random.randn(4, 3).astype(np.float32)
        c = np.matmul(a, b)
        expect(node, inputs=[a, b], outputs=[c],
               name='test_matmul_2d')

        # 3d
        a = np.random.randn(2, 3, 4).astype(np.float32)
        b = np.random.randn(2, 4, 3).astype(np.float32)
        c = np.matmul(a, b)
        expect(node, inputs=[a, b], outputs=[c],
               name='test_matmul_3d')

        # 4d
        a = np.random.randn(1, 2, 3, 4).astype(np.float32)
        b = np.random.randn(1, 2, 4, 3).astype(np.float32)
        c = np.matmul(a, b)
        expect(node, inputs=[a, b], outputs=[c],
               name='test_matmul_4d')
예제 #3
0
파일: test.py 프로젝트: Bruslan/MV3D-1
def box3d_to_rgb_projections(boxes3d, Mt=None, Kt=None):

    if Mt is None: Mt = np.array(MATRIX_Mt)
    if Kt is None: Kt = np.array(MATRIX_Kt)

    num  = len(boxes3d)
    projections = np.zeros((num,8,2),  dtype=np.int32)
    for n in range(num):
        if 1:
            box3d = boxes3d[n]
            Ps = np.hstack(( box3d, np.ones((8,1))) )
            Qs = np.matmul(Ps,Mt)
            Qs = Qs[:,0:3]
            qs = np.matmul(Qs,Kt)
            zs = qs[:,2].reshape(8,1)
            qs = (qs/zs)
            projections[n] = qs[:, 0:2]
        else:
            _Kt = ([[200,      0., 0.],
                    [0.,      200, 0.],
                    [1368/2., 1096/2., 1.]])
            Kt = np.array(_Kt)
            box3d = boxes3d[n].copy()
            box3d[:,0],box3d[:,1],box3d[:,2]=box3d[:,1],box3d[:,2],box3d[:,0]
            Qs=box3d
            qs = np.matmul(Qs, Kt)
            zs = qs[:, 2].reshape(8, 1)
            qs = (qs / zs)
            projections[n] = qs[:, 0:2]
    print(projections)
    return projections
def MRlogL_sandwichCov(dt, Ic, Is):
    """
    Estimates the asymptotic covariance matrix with the sandwich method
    evaluated at the Maximum Likelihood Estimates for Ic, Is
    
    It's Cov_hessian * Cov_OPG^-1 * Cov_hessian
    
    INPUTS:
        dt: list of inter-arrival times [seconds]
        Ic: The maximum likelihood estimate of Ic [1/second]
        Is: 
    OUTPUTS:
        covariance matrix for mle Ic, Is from sandwich method
        [[cov(Ic,Ic), cov(Ic,Is)], [cov(Is,Ic), cov(Is,Is)]]
    """
    h_cov = MRlogL_hessianCov(dt, Ic, Is)
    
    grad_Ic = -1./(1./dt+Is) + 1./(Ic+Is+dt*Is**2.)
    grad_Is = dt**2.*Ic/(1.+dt*Is)**2. - 3.*dt/(1.+dt*Is) + (1.+2.*dt*Is)/(Ic+Is+dt*Is**2.)
    
    #n=1.0*len(dt)
    grad_Ic2 = np.sum(grad_Ic**2.)
    grad_Is2 = np.sum(grad_Is**2.)
    grad_IcIs = np.sum(grad_Ic*grad_Is)
    opg_cov_inv = np.asarray([[grad_Ic2, grad_IcIs], [grad_IcIs, grad_Is2]])
    
    return np.matmul(np.matmul(h_cov, opg_cov_inv),h_cov)
예제 #5
0
파일: nnarx.py 프로젝트: RJT1990/pyflux
    def predict_new(self, X, z):

        first_layer_output = np.zeros(self.units)
        
        for unit in range(self.units):
            first_layer_output[unit] = self.activation(np.matmul(np.transpose(X), z[unit*(self.ar+len(self.X_names)+1):((unit+1)*(self.ar+len(self.X_names)+1))]))

        params_used = ((self.units)*(self.ar+len(self.X_names)+1))

        # Hidden layers
        hidden_layer_output = np.zeros((self.units, self.layers-1))
        for layer in range(1, self.layers):
            for unit in range(self.units):
                if layer == 1:
                    hidden_layer_output[unit,layer-1] = self.activation(np.matmul(first_layer_output,
                        z[params_used+unit*(self.units)+((layer-1)*self.units**2):((params_used+(unit+1)*self.units)+((layer-1)*self.units**2))]))
                else:
                    hidden_layer_output[unit,layer-1] = self.activation(np.matmul(hidden_layer_output[:,layer-1],
                        z[params_used+unit*(self.units)+((layer-1)*self.units**2):((params_used+(unit+1)*self.units)+((layer-1)*self.units**2))]))

        params_used = params_used + (self.layers-1)*self.units**2

        # Output layer
        if self.layers == 1:
            mu = np.matmul(first_layer_output, z[params_used:params_used+self.units])
        else:
            mu = np.matmul(hidden_layer_output[:,-1], z[params_used:params_used+self.units])

        return mu
예제 #6
0
def backPropogation(W,H,A,y_hat,inputDataVector,trueOutput):
	
	grad_aL_Loss = np.zeros((numClasses,1)) 
	
	if(loss=="ce"):
		grad_aL_Loss = -(trueClassVector(trueOutput,numClasses)-y_hat)
	else:
		#TODO squared error loss
		Identity = np.identity(numClasses)
		repeatMatrix = np.repeat(y_hat,numClasses,axis=1)
		changeI = Identity - repeatMatrix
		sumMatrix = np.multiply((y_hat - trueClassVector(trueOutput,numClasses)), y_hat)
		grad_aL_Loss = np.matmul(changeI,sumMatrix)

	grad_W_Loss = []
	grad_B_Loss = []

	for k in range(num_hidden,-1,-1):
		
		if k == 0:
			tempGrad_W_Loss = np.matmul(grad_aL_Loss,np.transpose(inputDataVector))
		else:
			tempGrad_W_Loss = np.matmul(grad_aL_Loss,np.transpose(H[k-1]))
		
		grad_W_Loss.insert(0,np.copy(tempGrad_W_Loss))
		grad_B_Loss.insert(0,np.copy(grad_aL_Loss))

		if(k>0):
			if(activation=="sigmoid"):
				grad_aL_Loss = (np.matmul(np.transpose(W[k]),grad_aL_Loss))*sigmoidDerivativeFunctionToVector(A[k-1])
			else:
				grad_aL_Loss = (np.matmul(np.transpose(W[k]),grad_aL_Loss))*tanhDerivativeFunctionToVector(A[k-1])

	return grad_W_Loss,grad_B_Loss
예제 #7
0
 def computeJonesRes(self):
     """Compute the Jones that results from applying the E-Jones to the
     right.
     The structure of the jonesrbasis is [timeIdx, sphIdx, skycompIdx].
     """
     idxshape = self.jonesrbasis.shape[0:-2]
     jonesrbasis = np.reshape(self.jonesrbasis, (-1, 3, 3))
     jonesrbasis_to = np.matmul(np.asarray(self.stnRot.T), jonesrbasis)
     (az_from, el_from) = crt2sph(jonesrbasis[..., 0].squeeze().T)
     theta_phi_view = (np.pi/2-el_from.flatten(), az_from.flatten())
     ejones = self.dualPolElem.getJonesAlong(self.freqChan, theta_phi_view)
     #(theta_lcl, phi_lcl) = self.dualPolElem.getBuildCoordinates(math.pi/2-r_sph[1], r_sph[0])
     #print theta_lcl, phi_lcl
     r_lcl = crt2sph(jonesrbasis_to[..., 0].squeeze().T)
     #print np.rad2deg(r_lcl)
     jonesbasisMat = getSph2CartTransfMat(jonesrbasis_to[..., 0].squeeze())
     #paraRot = np.matmul(np.conjugate(jonesbasisMat), jonesrbasis_to)
     self.jonesbasis = np.reshape(jonesbasisMat,
                                  idxshape+jonesbasisMat.shape[1:])
     # This is the actual MEq multiplication:
     if ejones.ndim > 3:
         frqdimsz = (ejones.shape[0],)
     else:
         frqdimsz = ()
     self.jones = np.reshape(
                     np.matmul(ejones, np.reshape(self.jonesr, (-1, 2, 2))),
                     frqdimsz+idxshape+(2, 2)
                     )
     self.thisjones = np.reshape(ejones, frqdimsz+idxshape+(2, 2))
예제 #8
0
    def get_pelecs_and_pions(self, convert_to_muC_per_cm2=False):
        """
        Get the electronic and ionic dipole moments / polarizations.

        convert_to_muC_per_cm2: Convert from electron * Angstroms to microCoulomb
            per centimeter**2
        """

        if not convert_to_muC_per_cm2:
            return self.p_elecs, self.p_ions

        if convert_to_muC_per_cm2:
            p_elecs = self.p_elecs.T
            p_ions = self.p_ions.T

            volumes = [s.lattice.volume for s in self.structures]
            e_to_muC = -1.6021766e-13
            cm2_to_A2 = 1e16
            units = 1.0 / np.array(volumes)
            units *= e_to_muC * cm2_to_A2

            p_elecs = np.matmul(units, p_elecs)
            p_ions = np.matmul(units, p_ions)

            p_elecs, p_ions = p_elecs.T, p_ions.T

            return p_elecs, p_ions
예제 #9
0
def forwardPropogation(W,B,inputDataVector):

	A = []
	H = []

	A.append(np.add(B[0],np.matmul(W[0],inputDataVector)))
	
	if(activation=="sigmoid"):
		H.append(sigmoidFunctionToVector(A[0]))
	else:
		H.append(tanhFunctionToVector(A[0]))

	for k in range(1,num_hidden):
		A.append(np.add(B[k],np.matmul(W[k],H[k-1])))
		
		if(activation=="sigmoid"):
			H.append(sigmoidFunctionToVector(A[k]))
		else:
			H.append(tanhFunctionToVector(A[k]))

	A.append(np.add(B[-1],np.matmul(W[-1],H[-1])))
	
	y_hat = softmax(A[-1])

	return A,H,y_hat
def TestBackpropMmt():
    X = np.array([[0, 0, 1],
                  [0, 1, 1],
                  [1, 0, 1],
                  [1, 1, 1]])
    
    D = np.array([[0],
                  [1],
                  [1],
                  [0]])
    
    W1 = 2*np.random.random((4, 3)) - 1
    W2 = 2*np.random.random((1, 4)) - 1
    
    for _epoch in range(10000):
        W1, W2 = BackPropMmt(W1, W2, X, D)
        
    N = 4
    for k in range(N):
        x  = X[k, :].T
        v1 = np.matmul(W1, x)
        y1 = Sigmoid(v1)
        v  = np.matmul(W2, y1)
        y  = Sigmoid(v)
        print(y)
예제 #11
0
  def _testSvdCorrectness(self, dtype, shape):
    np.random.seed(1)
    x_np = np.random.uniform(low=-1.0, high=1.0, size=shape).astype(dtype)
    m, n = shape[-2], shape[-1]
    _, s_np, _ = np.linalg.svd(x_np)
    with self.cached_session() as sess:
      x_tf = array_ops.placeholder(dtype)
      with self.test_scope():
        s, u, v = linalg_ops.svd(x_tf, full_matrices=True)
      s_val, u_val, v_val = sess.run([s, u, v], feed_dict={x_tf: x_np})
      u_diff = np.matmul(u_val, np.swapaxes(u_val, -1, -2)) - np.eye(m)
      v_diff = np.matmul(v_val, np.swapaxes(v_val, -1, -2)) - np.eye(n)
      # Check u_val and v_val are orthogonal matrices.
      self.assertLess(np.linalg.norm(u_diff), 1e-2)
      self.assertLess(np.linalg.norm(v_diff), 1e-2)
      # Check that the singular values are correct, i.e., close to the ones from
      # numpy.lingal.svd.
      self.assertLess(np.linalg.norm(s_val - s_np), 1e-2)
      # The tolerance is set based on our tests on numpy's svd. As our tests
      # have batch dimensions and all our operations are on float32, we set the
      # tolerance a bit larger. Numpy's svd calls LAPACK's svd, which operates
      # on double precision.
      self.assertLess(
          np.linalg.norm(self._compute_usvt(s_val, u_val, v_val) - x_np), 2e-2)

      # Check behavior with compute_uv=False.  We expect to still see 3 outputs,
      # with a sentinel scalar 0 in the last two outputs.
      with self.test_scope():
        no_uv_s, no_uv_u, no_uv_v = gen_linalg_ops.svd(
            x_tf, full_matrices=True, compute_uv=False)
      no_uv_s_val, no_uv_u_val, no_uv_v_val = sess.run(
          [no_uv_s, no_uv_u, no_uv_v], feed_dict={x_tf: x_np})
      self.assertAllClose(no_uv_s_val, s_val, atol=1e-4, rtol=1e-4)
      self.assertEqual(no_uv_u_val, 0.0)
      self.assertEqual(no_uv_v_val, 0.0)
def angle_median(alpha, dt, speed, delta=1, offset=0.0, length=CAR_LENGTH):
    # Rotation radius
    # radius = length / np.sin(angle)
    # alpha = speed * dt / length * np.sin(angle)

    # Rotation matrices.
    rot_mat = np.zeros(shape=(len(alpha), 2, 2), dtype=np.float32)
    rot_mat[:, 1, 1] = np.cos(alpha)
    rot_mat[:, 0, 0] = np.cos(alpha)
    rot_mat[:, 0, 1] = np.sin(alpha)
    rot_mat[:, 1, 0] = -np.sin(alpha)

    # dx displacement vectors.
    dx = np.zeros(shape=(len(alpha), 2), dtype=np.float32)
    dx[:, 0] = speed * dt * cosc(alpha)
    dx[:, 1] = speed * dt * sinc(alpha)
    steps = np.ones(shape=(len(alpha), ), dtype=np.int8)

    # Local coordinate system. TODO: dense matrix notation...
    ax = np.zeros(shape=(len(alpha), 2, 1), dtype=np.float32)
    ay = np.zeros(shape=(len(alpha), 2, 1), dtype=np.float32)
    ax[:, 0, 0] = ay[:, 1, 0] = 1.0
    ax = np.matmul(rot_mat, ax)
    ay = np.matmul(rot_mat, ay)

    # Delta - Cumulative transformations and dx.
    cumul_dx = dx.copy()
    for j in range(1, delta):
        # Update cumulative dx.
        cumul_dx[:-j, 0] += dx[j:, 0] * ax[:-j, 0, 0]
        cumul_dx[:-j, 1] += dx[j:, 0] * ax[:-j, 1, 0]

        cumul_dx[:-j, 0] += dx[j:, 1] * ay[:-j, 0, 0]
        cumul_dx[:-j, 1] += dx[j:, 1] * ay[:-j, 1, 0]

        # Update local coordinate system.
        ax[:-j] = np.matmul(rot_mat[j:], ax[:-j])
        ay[:-j] = np.matmul(rot_mat[j:], ay[:-j])
        steps[:-j] += 1

    # Median fit...
    P0 = np.zeros(shape=(len(alpha), 2), dtype=np.float32)
    P0[:, 0] = offset
    P1 = cumul_dx

    mask = (P1[:, 1] < 0.)
    P1[mask, 1] = 0.0

    # Parameters in equation: ax - b = 0.
    a = P1 - P0
    m = (P0 + P1) / 2.
    b = a[:, 0] * m[:, 0] + a[:, 1] * m[:, 1]
    # Inverse radius and angle.
    kappa = a[:, 0] / b
    angle = np.arcsin(kappa * length)

    # Just in case...
    angle[np.isnan(angle)] = 0.0

    return angle
def BackPropMmt(W1, W2, X, D):
    alpha = 0.9
    beta  = 0.9
    
    mmt1 = np.zeros_like(W1)
    mmt2 = np.zeros_like(W2)
    
    N = 4
    for k in range(N):
        x = X[k, :].T
        d = D[k]
        
        v1 = np.matmul(W1, x)
        y1 = Sigmoid(v1)
        v  = np.matmul(W2, y1)
        y  = Sigmoid(v)
        
        e     = d - y
        delta = y*(1-y) * e
        
        e1     = np.matmul(W2.T, delta)
        delta1 = y1*(1-y1) * e1
        
        dW1  = (alpha*delta1).reshape(4, 1) * x.reshape(1, 3)
        mmt1 = dW1 + beta*mmt1
        W1   = W1 + mmt1
        
        dW2  = alpha * delta * y1
        mmt2 = dW2 + beta*mmt2
        W2   = W2 + mmt2
    
    return W1, W2
예제 #14
0
 def predict(self, X_test):
     self.check_fitted()
     if X_test.ndim != 2:
         raise Exception("X_test should have 2 dimensions! X_dim:{}"
                         .format(X_test.ndim))
     X_test = np.float32(GPRNP.check_array(X_test))
     test_size = X_test.shape[0]
     arr_offset = 0
     length_scale = self.length_scale
     yhats = np.zeros([test_size, 1])
     sigmas = np.zeros([test_size, 1])
     eips = np.zeros([test_size, 1])
     while arr_offset < test_size:
         if arr_offset + self.batch_size_ > test_size:
             end_offset = test_size
         else:
             end_offset = arr_offset + self.batch_size_
         xt_ = X_test[arr_offset:end_offset]
         K2 = self.magnitude * np.exp(-ed(self.X_train, xt_) / length_scale)
         K3 = self.magnitude * np.exp(-ed(xt_, xt_) / length_scale)
         K2_trans = np.transpose(K2)
         yhat = np.matmul(K2_trans, np.matmul(self.K_inv, self.y_train))
         sigma = np.sqrt(np.diag(K3 - np.matmul(K2_trans, np.matmul(self.K_inv, K2)))) \
             .reshape(xt_.shape[0], 1)
         u = (self.y_best - yhat) / sigma
         phi1 = 0.5 * special.erf(u / np.sqrt(2.0)) + 0.5
         phi2 = (1.0 / np.sqrt(2.0 * np.pi)) * np.exp(np.square(u) * (-0.5))
         eip = sigma * (u * phi1 + phi2)
         yhats[arr_offset:end_offset] = yhat
         sigmas[arr_offset:end_offset] = sigma
         eips[arr_offset:end_offset] = eip
         arr_offset = end_offset
     GPRNP.check_output(yhats)
     GPRNP.check_output(sigmas)
     return GPRResult(yhats, sigmas)
def BackpropXOR(W1, W2, X, D):
    alpha = 0.9
    
    N = 4
    for k in range(N):
        x = X[k, :].T
        d = D[k]
        
        v1 = np.matmul(W1, x)
        y1 = Sigmoid(v1)
        v  = np.matmul(W2, y1)
        y  = Sigmoid(v)
        
        e     = d - y
        delta = y*(1-y) * e
        
        e1     = np.matmul(W2.T, delta)
        delta1 = y1*(1-y1) * e1
        
        dW1 = (alpha*delta1).reshape(4, 1) * x.reshape(1, 3)
        W1  = W1 + dW1
        
        dW2 = alpha * delta * y1
        W2  = W2 + dW2
    
    return W1, W2
예제 #16
0
def rotate_moment_tensor(mt,R):
    """
    rotates moment-tensor mt using rotation matrix R
    """
    # converts from eigenvectors & eigenvalues representation to harvard-convention
    # mt = [Mrr, Mtt, Mpp, Mrt, Mrp, Mtp ] (given in diagonal-form)
    M11 = mt[0]
    M22 = mt[1]
    M33 = mt[2]
    M12 = mt[3]
    M13 = mt[4]
    M23 = mt[5]

    # symmetric 3x3 moment-tensor
    mat = np.array([ [M11,M12,M13], [M12,M22,M23], [M13,M23,M33] ])

    # using rotation matrix R (is eigenvector matrix here):  R * M * transp(R)
    res = np.matmul(mat,np.transpose(R))
    mat = np.matmul(R,res)

    # note: rotation back to original coordinate-frame will introduce numerical round-off errors
    #print("  rotated mat:\n",R,mat)

    # mt = [Mrr, Mtt, Mpp, Mrt, Mrp, Mtp ]
    mt_rot = np.array([mat[0,0],mat[1,1],mat[2,2],mat[0,1],mat[0,2],mat[1,2]])

    return mt_rot
예제 #17
0
    def backward(self, y, all_x):
        """backward

        :param y:  the label, the actual class of the samples, in one-hot format
        :param all_x: input data and activation from every layer
        """
        
        # [TODO 1.5] Compute delta factor from the output
        delta = all_x[-1] - y
        delta /= y.shape[0]
        # print('last delta shape = ', delta.shape)
        
        # [TODO 1.5] Compute gradient of the loss function with respect to w of softmax layer, use delta from the output        
        grad_last = np.matmul(np.transpose(all_x[-2]), delta)

        grad_list = []
        grad_list.append(grad_last)
        
        for i in range(len(self.layers) - 1)[::-1]:
            prev_layer = self.layers[i+1]
            layer = self.layers[i]
            x = all_x[i]
            # [TODO 1.5] Compute delta_prev factor for previous layer (in backpropagation direction)
            # print('last layer shape = ', prev_layer.w.shape)
            delta_prev = np.matmul(delta, np.transpose(prev_layer.w))
	        # Use delta_prev to compute delta factor for the next layer (in backpropagation direction)
            grad_w, delta = layer.backward(x, delta_prev)
            grad_list.append(grad_w.copy())

        grad_list = grad_list[::-1]
        return grad_list
예제 #18
0
    def get_inverse_transform(self, im_ref, mode='affine'):
        aff_im_self = self.im_file.affine
        aff_im_ref = im_ref.im_file.affine
        if mode == 'affine':
            transform = np.matmul(np.linalg.inv(aff_im_ref), aff_im_self)
        else:
            T_self, R_self, Sc_self, Sh_self = affines.decompose44(aff_im_self)
            T_ref, R_ref, Sc_ref, Sh_ref = affines.decompose44(aff_im_ref)
            if mode == 'translation':
                T_transform = T_self - T_ref
                R_transform = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
                Sc_transform = np.array([1.0, 1.0, 1.0])
                transform = affines.compose(T_transform, R_transform, Sc_transform)
            elif mode == 'rigid':
                T_transform = T_self - T_ref
                R_transform = np.matmul(np.linalg.inv(R_ref), R_self)
                Sc_transform = np.array([1.0, 1.0, 1.0])
                transform = affines.compose(T_transform, R_transform, Sc_transform)
            elif mode == 'rigid_scaling':
                T_transform = T_self - T_ref
                R_transform = np.matmul(np.linalg.inv(R_ref), R_self)
                Sc_transform = Sc_self / Sc_ref
                transform = affines.compose(T_transform, R_transform, Sc_transform)
            else:
                transform = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])

        return transform
예제 #19
0
    def get_Matrix_VC(self):
        H_x_to_xe0 = np.zeros([self.num_all_edges, self.n], np.float32)
        H_sum_by_V_to_C = np.zeros([self.num_all_edges, self.num_all_edges], dtype=np.float32)
        H_xe_last_to_y = np.zeros([self.n, self.num_all_edges], dtype=np.float32)
        Map_row_to_line = np.zeros([self.num_all_edges, 1])

        for i in range(0, self.num_all_edges):
            Map_row_to_line[i] = np.where(self.loc_nzero1 == self.loc_nzero2[i])

        map_H_row_to_line = np.zeros([self.num_all_edges, self.num_all_edges], dtype=np.float32)

        for i in range(0, self.num_all_edges):
            map_H_row_to_line[i, int(Map_row_to_line[i])] = 1

        count = 0
        for i in range(0, self.n):
            temp = count + self.H_sum_line[i]
            H_sum_by_V_to_C[count:temp, count:temp] = 1
            H_xe_last_to_y[i, count:temp] = 1
            H_x_to_xe0[count:temp, i] = 1
            for j in range(0, self.H_sum_line[i]):
                H_sum_by_V_to_C[count + j, count + j] = 0
            count = count + self.H_sum_line[i]
        print("return Matrics V-C successfully!\n")
        return H_x_to_xe0, np.matmul(H_sum_by_V_to_C, map_H_row_to_line), np.matmul(H_xe_last_to_y, map_H_row_to_line)
    def calculate_discretization(self, X, U, sigma):
        """
        Calculate discretization for given states, inputs and total time.

        :param X: Matrix of states for all time points
        :param U: Matrix of inputs for all time points
        :param sigma: Total time
        :return: The discretization matrices
        """
        for k in range(self.K - 1):
            self.V0[self.x_ind] = X[:, k]
            V = np.array(odeint(self._ode_dVdt, self.V0, (0, self.dt),
                                args=(U[:, k], U[:, k + 1], sigma))[1, :])

            # using \Phi_A(\tau_{k+1},\xi) = \Phi_A(\tau_{k+1},\tau_k)\Phi_A(\xi,\tau_k)^{-1}
            # flatten matrices in column-major (Fortran) order for CVXPY
            Phi = V[self.A_bar_ind].reshape((self.n_x, self.n_x))
            self.A_bar[:, k] = Phi.flatten(order='F')
            self.B_bar[:, k] = np.matmul(Phi, V[self.B_bar_ind].reshape(
                (self.n_x, self.n_u))).flatten(order='F')
            self.C_bar[:, k] = np.matmul(Phi, V[self.C_bar_ind].reshape(
                (self.n_x, self.n_u))).flatten(order='F')
            self.S_bar[:, k] = np.matmul(Phi, V[self.S_bar_ind])
            self.z_bar[:, k] = np.matmul(Phi, V[self.z_bar_ind])

        return self.A_bar, self.B_bar, self.C_bar, self.S_bar, self.z_bar
 def dlnprob(self, theta):
     
     if self.batchsize > 0:
         batch = [ i % self.N for i in range(self.iter * self.batchsize, (self.iter + 1) * self.batchsize) ]
         ridx = self.permutation[batch]
         self.iter += 1
     else:
         ridx = np.random.permutation(self.X.shape[0])
         
     Xs = self.X[ridx, :]
     Ys = self.Y[ridx]
     
     w = theta[:, :-1]  # logistic weights
     alpha = np.exp(theta[:, -1])  # the last column is logalpha
     d = w.shape[1]
     
     wt = np.multiply((alpha / 2), np.sum(w ** 2, axis=1))
     
     coff = np.matmul(Xs, w.T)
     y_hat = 1.0 / (1.0 + np.exp(-1 * coff))
     
     dw_data = np.matmul(((nm.repmat(np.vstack(Ys), 1, theta.shape[0]) + 1) / 2.0 - y_hat).T, Xs)  # Y \in {-1,1}
     dw_prior = -np.multiply(nm.repmat(np.vstack(alpha), 1, d) , w)
     dw = dw_data * 1.0 * self.X.shape[0] / Xs.shape[0] + dw_prior  # re-scale
     
     dalpha = d / 2.0 - wt + (self.a0 - 1) - self.b0 * alpha + 1  # the last term is the jacobian term
     
     return np.hstack([dw, np.vstack(dalpha)])  # % first order derivative 
예제 #22
0
        def rotate_smooth(self, current_up, current_angvel, target_rot, speed = 0.01):
	    for i in range(len(target_rot)):
	        if target_rot[i] > 360:
		    target_rot[i] -= 360
                if target_rot[i] < 0:
		    target_rot[i] += 360
	#    direction = (np.array(target_rot) - np.array(current_rot))
	#    print str(target_rot)
	#    print str(current_rot)
        #   direction = speed * direction

	    target_rot = np.array(target_rot)
	    target_rot = np.deg2rad(target_rot)
	    # x axis rotation
	    th = target_rot[0]
	    rx = np.array([[1, 0, 0], [0, np.cos(th), np.sin(th)], [0, -np.sin(th), np.cos(th)]])
	    # y axis rotation
	    th = target_rot[1]
	    ry = np.array([[np.cos(th), 0, -np.sin(th)], [0, 1, 0], [np.sin(th), 0, np.cos(th)]])
	    # z axis rotation
	    th = target_rot[2]
	    rz = np.array([[np.cos(th), np.sin(th), 0], [-np.sin(th), np.cos(th), 0], [0, 0, 1]])

	    target_axis = np.matmul(np.matmul(np.matmul(rx,ry), rz), current_up)
 
	    # z rotation only does not work with [0, 0, 1] have to rotate around other axis
            #if(target_axis == np.array([0, 0, 1])).all():
            #    current_up = [0, 1, 0]
	    #	target_axis = np.matmul(np.matmul(np.matmul(rx,ry), rz), current_up)
            return target_axis #self.stabilize(current_up, current_angvel, target_axis)
예제 #23
0
파일: boxes3d.py 프로젝트: Bruslan/MV3D-1
def box3d_to_rgb_box(boxes3d, Mt=None, Kt=None):
    if (cfg.DATA_SETS_TYPE == 'kitti'):
        if Mt is None: Mt = np.array(MATRIX_Mt)
        if Kt is None: Kt = np.array(MATRIX_Kt)

        num  = len(boxes3d)
        projections = np.zeros((num,8,2),  dtype=np.int32)
        for n in range(num):
            box3d = boxes3d[n]
            Ps = np.hstack(( box3d, np.ones((8,1))) )
            Qs = np.matmul(Ps,Mt)
            Qs = Qs[:,0:3]
            qs = np.matmul(Qs,Kt)
            zs = qs[:,2].reshape(8,1)
            qs = (qs/zs)
            projections[n] = qs[:,0:2]
        return projections

    else:
        num = len(boxes3d)
        projections = np.zeros((num, 8, 2), dtype=np.int32)
        for n in range(num):
            box3d=boxes3d[n].copy()
            if np.sum(box3d[:,0]>0) >0:
                box2d = box3d_to_rgb_projection_cv2(box3d)
                box2d,out_range=convert_points_to_croped_image(box2d)
                if np.sum(out_range==False)>=2:
                    projections[n]=box2d
        return projections
예제 #24
0
 def _mel_to_linear_matrix(self):
   """Get the inverse mel transformation matrix."""
   m = self._linear_to_mel_matrix()
   m_t = np.transpose(m)
   p = np.matmul(m, m_t)
   d = [1.0 / x if np.abs(x) > 1.0e-8 else x for x in np.sum(p, axis=0)]
   return np.matmul(m_t, np.diag(d))
예제 #25
0
def geometric_distort (image0):

  assert image0.shape[0] == image0.shape[1], 'need a square on input'
  assert is_bgra(image0), image0.shape

  # warp
  shear1 = exp((np.random.rand()-0.5) * COEF_SHEAR)
  rot    = np.random.randn() * COEF_ROT
  shear2 = exp((np.random.rand()-0.5) * COEF_SHEAR)
  Shear1 = np.asarray([[shear1, 0], [0, 1.0/shear1]])
  Rot    = np.asarray([[cos(rot), sin(rot)], [-sin(rot), cos(rot)]])
  Shear2 = np.asarray([[shear2, 0], [0, 1.0/shear2]])    
  #print shear1, rot, shear2
  M = np.matmul(np.matmul(Shear2, Rot), Shear1)
  image = warp_patch (image0, M, 2)

  # crop to roi
  nnz = np.nonzero(image[:,:,3])
  # roi = [y1 x1 y2 x2)
  roi = (min(nnz[0].tolist()), min(nnz[1].tolist()),
         max(nnz[0].tolist()), max(nnz[1].tolist()))
  #print roi
  image = image[roi[0]:roi[2],roi[1]:roi[3],:]

  return image
예제 #26
0
    def back_propagation(self, xs, ts):
        """
        calc back propagation of cross_entropy
        [in] xs: train image data
             ts: one hot vector corresponds the answer of x
        [out]
             pertial derivatives of MSE
        """
        W1, W2 = self.parameters['W1'], self.parameters['W2']
        b1, b2 = self.parameters['b1'], self.parameters['b2']
        grads = {}

        batch_num = xs.shape[0]

        # forward
        z0 = xs
        u1 = np.matmul(z0, W1)+b1
        z1 = sigmoid(u1)
        u2 = np.matmul(z1, W2)+b2
        ys = softmax(u2)
        # backward
        # calc output layer error
        delta_output = (ys - ts) / batch_num
        # backward to hidden layer
        grads['W2'] = np.dot(z1.T, delta_output)
        grads['b2'] = np.sum(delta_output, axis=0)
        # take Hadamard product
        delta_1 = deriv_sigmoid(u1) * np.dot(delta_output, W2.T)
        # backward to hidden layer
        grads['W1'] = np.dot(z0.T, delta_1)
        grads['b1'] = np.sum(delta_1, axis=0)

        return grads
예제 #27
0
def MultiClass(W1, W2, X, D):
    alpha = 0.9
    
    N = 5
    for k in range(N):
        x = np.reshape(X[:,:,k], (25, 1))
        d = D[k, :].T
        
        v1 = np.matmul(W1, x)
        y1 = Sigmoid(v1)
        v  = np.matmul(W2, y1)
        y  = Softmax(v)
            
        e     = d - y
        delta = e
        
        e1     = np.matmul(W2.T, delta)
        delta1 = y1*(1-y1) * e1
        
        dW1 = alpha * delta1 * x.T
        W1  = W1 + dW1
        
        dW2 = alpha * delta * y1.T
        W2  = W2 + dW2
        
    return W1, W2
def update_data(t):
    """
    Is run each step
    Calculates the seedbank size and plant population in the next step by
        multiplying M, the transition matrix, by X, the data matrix
    """

    global M
    global D
    global M_original
    global D_original

    if STEP_OUTPUT:
        print "[t: {}] Updating data...".format(t)
    # Manual changes in transition matrix and disperion matrix
    if t == 30:
        # Initial inundation of right side. Good graphs with N = 50, T = 75, 
        for cell_i in range(N-26, N):
            M[cell_i,0] = [ss*(1-g*0.001), 0.0]
            M[cell_i,1] = [g*0.001, l*0.001]

    # Migrate Seeds Produced
    X[t + 1] = np.transpose([np.matmul(M[c], X[t, :, c]) for c in range(0, int(N))]) + \
        np.matmul(e * np.transpose([[X[t, 1, c], 0]
                                    for c in range(0, int(N))]), D)
    if STEP_OUTPUT:
        print X[t]
    if t == T - 2:
        print "Data Calculation finished"
예제 #29
0
def projectBackBFM_withExpr(model, features, expr_paras):
	alpha = model.shapeEV * 0
	for it in range(0, 99):
		alpha[it] = model.shapeEV[it] * features[it]
	S = np.matmul(model.shapePC, alpha)

	expr = model.expEV * 0
	for it in range(0, 29):
		expr[it] = model.expEV[it] * expr_paras[it]
	E = np.matmul(model.expPC, expr)

	## Adding back average shape
	S = model.shapeMU + S + model.expMU + E
	numVert = S.shape[0]/3

	# (Texture)
	beta = model.texEV * 0
	for it in range(0, 99):
		beta[it] = model.texEV[it] * features[it+99]
	T = np.matmul(model.texPC, beta)
	## Adding back average texture
	T = model.texMU + T
	## Some filtering
	T = [truncateUint8(value) for value in T]
	## Final Saving for visualization
	S = np.reshape(S,(numVert,3))
	T = np.reshape(T,(numVert, 3))
	return S,T
예제 #30
0
파일: edf.py 프로젝트: kinshuk4/Coursera
 def backward(self):
     if self.x.grad is not None:
         self.x.grad = self.x.grad + np.matmul(self.y.value, self.grad.T).T
     if self.y.grad is not None:
         nabla = np.matmul(self.x.value.T.reshape(list(self.x.value.shape) + [1]),
                           self.grad.reshape([1] + list(self.grad.shape)))
         self.y.grad = self.y.grad + nabla
예제 #31
0
 def Calculate_IDCT(self):
     self.TM = np.matmul(self.Tt, self.R)
     self.fin = np.matmul(self.TM, self.T)
     self.fin = self.fin.astype(int)
                    feed_dict['output'][1]['output'],
                    weights=feed_dict['output'][2]['weights'],
                    nodes=layers[2]['nodes'],
                    activation=layers[2]['activation'])

            #cost calculation
            cost_per_epoch = cost_per_epoch - np.log(
                feed_dict['output'][2]['output'][np.argmax(
                    feed_dict['train_label'][i + batch * batch_size])])

            #calculating the gradients
            feed_dict['output'][2]['semi_update'] = feed_dict['output'][2][
                'output'] - feed_dict['train_label'][
                    i + batch * batch_size].reshape(-1, 1)
            feed_dict['output'][2]['update'] = np.matmul(
                feed_dict['output'][2]['semi_update'],
                np.transpose(np.vstack((feed_dict['output'][1]['output'], 1))))

            temp = feed_dict['output'][2][
                'weights'][:, 0:feed_dict['output'][2]['weights'].shape[1] - 1]
            feed_dict['output'][1]['semi_update'] = np.matmul(
                np.transpose(temp),
                feed_dict['output'][2]['semi_update']) * deriv(
                    feed_dict['output'][1]['output_raw'])
            feed_dict['output'][1]['update'] = np.matmul(
                feed_dict['output'][1]['semi_update'],
                np.transpose(np.vstack((feed_dict['output'][0]['output'], 1))))

            temp = feed_dict['output'][1][
                'weights'][:, 0:feed_dict['output'][1]['weights'].shape[1] - 1]
            feed_dict['output'][0]['semi_update'] = np.matmul(
예제 #33
0
wage = train_data[0:2250, 10]
yeart = train_data[2250:3000, 0]
aget = train_data[2250:3000, 1]
edut = train_data[2250:3000, 4]
waget = train_data[2250:3000, 10]
error = 0
degree = 6  #input the degree here
Y = []
i = 0
x = np.linspace(20, 80, 400)
x = np.array(x)
X = np.ones(np.size(age))
X = np.transpose(X)
for i in range(degree):
    X = np.c_[X, age**(i + 1)]
W = np.matmul(np.linalg.inv(np.matmul(np.transpose(X), X)),
              np.matmul(np.transpose(X), wage))
Y = W[0] * np.ones(400)
for i in range(1, degree + 1):
    Y = Y + W[i] * (x**i)
for i in range(0, 750):
    a = 0
    for j in range(0, degree + 1):
        a = a + W[j] * (aget[i]**j)
    error = error + (a - waget[i])**2
print(error)
plt.xlabel("Wage")
plt.ylabel("Age")
plt.title("Polynomial regression for Age Vs Wage")  # error
plt.plot(age, wage, 'r.')  #points
plt.plot(x, Y)  #Plot of the curve
plt.show()
예제 #34
0
def superitems_duals(superitems_pool, duals):
    """
    Compute Superitems dual from the given Items duals and SuperitemsPool
    """
    fsi, _, _ = superitems_pool.get_fsi()
    return np.matmul(duals, fsi.T)
    def get_cloud_and_segments(self, dist=0.2, small_segment_size=250):
        """Get the point cloud and the segment values.

        Parameters
        ----------
        dist : float
            Threshold. If distance points in segments is greater than dist, than
            the segment will be split.
        small_segment_size : int
            Threshold to specify a small segments. The small segments will be
            deleted.

        Returns
        -------
        tuple(np.ndarray, np.ndarray, int)
            The point cloud, the segments and the scene id.

        """
        segments_filepath = "./ScannetScenes/" + self.id + "/" + self.id + "_segments.pcd"
        color_filepath = "./ScannetScenes/" + self.id + "/" + self.id + "_color.pcd"
        filtered_idxs_filepath = "./ScannetScenes/" + self.id + "/" + self.id + "_indices.txt"
        save_path = "./ScannetScenes/" + self.id + "/" + self.id + ".npz"
        segments_path = "./ScannetScenes/" + self.id + "/" + self.id + "_segments.npz"

        if not os.path.isfile(segments_path):
            P, C, orig_segments = load(
                segments_filepath=segments_filepath,
                color_filepath=color_filepath,
                filtered_idxs_filepath=filtered_idxs_filepath,
                save_path=save_path,
                verbose=self.verbose)

            uni_segments = np.unique(orig_segments)
            n_segments = uni_segments.shape[0]
            if self.verbose:
                print("n segments:", n_segments)
            P = np.hstack((P, C))
            if self.verbose:
                print("split segments")
            orig_segments, n_segments = split_segments(
                P=P,
                segments=orig_segments,
                dist=dist,
                verbose=self.verbose)
            if self.verbose:
                print("n segments", n_segments)
                print("sort points according to segment idxs")
            sorted_idxs = np.argsort(orig_segments)
            orig_segments = orig_segments[sorted_idxs]
            P = P[sorted_idxs]

            uni_segments, uni_idxs, uni_counts = np.unique(
                orig_segments, return_index=True, return_counts=True)
            n_segments = uni_segments.shape[0]
            if self.verbose:
                print("n segments:", n_segments)

            np.savez(segments_path, P=P, orig_segments=orig_segments)
            if self.verbose:
                print("segments saved")
        else:
            data = np.load(segments_path)
            P = data["P"]
            orig_segments = data["orig_segments"]
            if self.verbose:
                print("segments loaded")

        P[:, 3:] *= 255
        if self.verbose:
            print("filter small segments")
        idx_to_del = filter_small_segments(
            arr=orig_segments,
            small_segment_size=small_segment_size,
            verbose=self.verbose)
        P = np.delete(P, idx_to_del, axis=0)
        orig_segments = np.delete(orig_segments, idx_to_del)
        if self.verbose:
            print("reassgin segments")
        orig_segments, n_segments, _ = create_segments(arr=orig_segments)

        xyz_mean = np.mean(P[:, :3], axis=0)
        P[:, :3] = P[:, :3] - xyz_mean

        pca = PCA()
        pca.fit(P[:, :3])
        vec = pca.components_[0]
        ang = angle(vec, np.array([1, 0, 0]))
        if self.verbose:
            print("rotate P around", -np.rad2deg(ang), "°")
        R = get_rotation_mat(angle=-ang, axis=np.array([0, 0, 1]))
        P[:, :3] = np.transpose(np.matmul(R, np.transpose(P[:, :3])))

        self.P = P
        self.segments = orig_segments
        return self.P, self.segments, self.id
예제 #36
0
 def backward(self):
     if self.x.grad is None:
         return
     gvdot = np.matmul(self.grad[..., np.newaxis, :],
                       self.value[..., np.newaxis]).squeeze(-1)
     self.x.grad = self.x.grad + self.value * (self.grad - gvdot)
예제 #37
0
def master_problem(layer_pool, tlim=None, relaxation=True, enable_output=False):
    """
    Solve the master problem, either in its full version (MP)
    or in its relaxed version (RMP). Returns the following:
    - Objective value: minimization of sum(alpha[l] * h[l]), with h heights and l layer
    - Alpha values: alpha[l] represents layer selection
    - [RMP] Duals: one dual for each item
    """
    logger.info("RMP defining variables and constraints")

    # Solver
    if relaxation:
        slv = pywraplp.Solver("RMP", pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
    else:
        slv = pywraplp.Solver("MP", pywraplp.Solver.BOP_INTEGER_PROGRAMMING)

    # Enable verbose output from solver
    if enable_output:
        slv.EnableOutput()

    # Utility
    fsi, _, _ = layer_pool.superitems_pool.get_fsi()
    zsl = layer_pool.get_zsl()
    ol = layer_pool.get_ol()
    infinity = slv.infinity()
    n_layers = len(layer_pool)
    n_items = fsi.shape[-1]

    # Variables
    if relaxation:
        al = [slv.NumVar(0, infinity, f"alpha_{l}") for l in range(n_layers)]
    else:
        al = [slv.BoolVar(f"alpha_{l}") for l in range(n_layers)]

    # Constraints
    constraints = []
    coefficients = np.matmul(fsi.T, zsl)

    # Select each item at least once
    # sum(al[l] * zsl[s, l] * fsi[s, i])
    for i in range(n_items):
        c = slv.Constraint(1, infinity, f"c_{i}")
        for l in range(n_layers):
            if coefficients[i, l] > 0:
                c.SetCoefficient(al[l], float(coefficients[i, l]))
        constraints += [c]

    # Objective
    obj = slv.Objective()
    for l, h in enumerate(ol):
        obj.SetCoefficient(al[l], float(h))
    obj.SetMinimization()

    # Set a time limit in milliseconds
    if tlim is not None:
        slv.SetTimeLimit(1000 * tlim)

    # Solve
    logger.debug(f"RMP variables: {slv.NumVariables()}")
    logger.debug(f"RMP constraints: {slv.NumConstraints()}")
    status = slv.Solve()
    logger.debug(f"RMP iterations: {slv.iterations()}")

    # Extract results
    duals, alphas = None, None
    objective = float("inf")
    if status in (slv.OPTIMAL, slv.FEASIBLE):
        logger.info(f"RMP solved")

        # Extract alpha values
        alphas = [al[l].solution_value() for l in range(n_layers)]
        logger.debug(f"RMP alphas: {alphas}")
        if not all(alphas[l] in (0, 1) for l in range(n_layers)):
            logger.debug("RMP solution not feasible (at least one alpha value is not binary)")

        # Extract objective value
        objective = slv.Objective().Value()
        logger.debug(f"RMP objective: {objective}")

        # Extract duals
        if relaxation:
            duals = np.array([c.DualValue() for c in constraints])
            logger.debug(f"RMP duals: {duals}")
    else:
        logger.warning("RMP unfeasible")

    logger.debug(f"RMP time: {slv.WallTime() / 1000}")
    return objective, alphas, duals
예제 #38
0
 def forward(self):
     self.value = np.matmul(self.x.value, self.y.value)
예제 #39
0
true_vect = strf_to_vector(true_strf)

# # plots the stim, delayed lines version, STRF and vectorized STRF
# fig, axes = plt.subplots(2,2)
# axes = np.ravel(axes)
# axes[0].imshow(X, aspect='auto')
# axes[0].set_title('stimulus')
# axes[1].imshow(delayed, aspect='auto')
# axes[1].set_title('stim delayed lines')
# axes[2].imshow(true_strf, aspect='auto')
# axes[2].set_title('true STRF')
# axes[3].imshow(true_vect[:, None],)
# axes[3].set_title('true vector')

# uses matrix multiplication to get the STRF response(Y) to the dummy sound (X)
Y = np.matmul(true_vect, delayed)

# defienes linear model
model = Sequential()
model.add(
    Dense(1,
          input_dim=int(channels * delays),
          kernel_initializer='normal',
          activation='linear'))
model.compile(loss='mean_squared_error', optimizer='adam')

# fits the model and transforms the vector into the equivalent STRF
model.fit(delayed.T, Y, batch_size=100, epochs=10000, verbose=1)

fit_vect = model.layers[0].get_weights()[0].squeeze()
fit_strf = vector_to_strf(fit_vect, delays)
예제 #40
0
    # Truth - each column of phi spans the global domain
    phi_trunc, cf_trunc = generate_pod_bases(sm, num_modes, tsteps)
    perfect_output = cf_trunc[:, -1]

    # # POD Galerkin - for comparison
    output_state_gp, state_tracker_gp = galerkin_projection(
        phi_trunc, cf_trunc, sm_mean, tsteps, Rnum, dt, dx, num_modes)

    # # LSTM network - note this will only give good predictions till the last three timesteps
    model = lstm_for_dynamics(cf_trunc, deployment_mode)
    output_state_lstm, state_tracker_lstm = evaluate_rom_deployment_lstm(
        model, cf_trunc, tsteps)
    np.save('Burgulence_LSTM_Coefficients.npy', state_tracker_lstm)

    #Visualization - Spectra
    u_true = sm_mean + (np.matmul(phi_trunc, perfect_output))[:]
    u_gp = sm_mean + (np.matmul(phi_trunc, output_state_gp))[:]
    u_lstm = sm_mean + (np.matmul(phi_trunc, output_state_lstm[:, 0]))[:]

    plt.figure()
    kx_plot = np.array([float(i) for i in list(range(0, nx // 2))])
    espec1 = spectra_calculation(u_true)
    espec2 = spectra_calculation(u_gp)
    espec3 = spectra_calculation(u_lstm)
    plt.loglog(kx_plot, espec1, label='Truth')
    plt.loglog(kx_plot, espec2, label='GP')
    plt.loglog(kx_plot, espec3, label='LSTM')
    plt.legend()
    plt.show()

    # Spectra residuals
예제 #41
0
 def backward(self):
     if self.x.grad is not None:
         self.x.grad = self.x.grad + np.matmul(self.y.value, self.grad.T).T
     if self.y.grad is not None:
         self.y.grad = self.y.grad + np.matmul(self.x.value.T, self.grad)
예제 #42
0
 def _mean(self, x):
     ks = self._kernel(x, self._x)
     return np.matmul(ks.T, self._kfmap)
#print (x)
#print (s)
#print (v.shape)

su = 0
x_axis = []
y_axis = []

for i in range(0,520,1):
	su += s[i];
	per = su/x;  	   # Percentage of image re-constructed
	x_axis.append(i+1) # x-axis contains : No. of feature taken for the reconstruction of image
	y_axis.append(per)

v = np.transpose(v); # Transpose
coeff = np.matmul(im_matrix,v) #Coefficient Matrix , Reconstruction

a = coeff[:,0:1]
b = coeff[:,1:2]
c = coeff[:,2:3]
d = np.zeros(520)

plt.scatter(a,d)
plt.xlabel('PCA-1')
plt.title('1-Dimensinal Plot')

plt.figure()
plt.scatter(a,b)
plt.xlabel('PCA-2')
plt.ylabel('PCA-3')
plt.title('2-Dimensinal Plot')
예제 #44
0
def simrank_similarity_numpy(
    G,
    source=None,
    target=None,
    importance_factor=0.9,
    max_iterations=100,
    tolerance=1e-4,
):
    """Calculate SimRank of nodes in ``G`` using matrices with ``numpy``.

    The SimRank algorithm for determining node similarity is defined in
    [1]_.

    Parameters
    ----------
    G : NetworkX graph
        A NetworkX graph

    source : node
        If this is specified, the returned dictionary maps each node
        ``v`` in the graph to the similarity between ``source`` and
        ``v``.

    target : node
        If both ``source`` and ``target`` are specified, the similarity
        value between ``source`` and ``target`` is returned. If
        ``target`` is specified but ``source`` is not, this argument is
        ignored.

    importance_factor : float
        The relative importance of indirect neighbors with respect to
        direct neighbors.

    max_iterations : integer
        Maximum number of iterations.

    tolerance : float
        Error tolerance used to check convergence. When an iteration of
        the algorithm finds that no similarity value changes more than
        this amount, the algorithm halts.

    Returns
    -------
    similarity : numpy matrix, numpy array or float
        If ``source`` and ``target`` are both ``None``, this returns a
        Matrix containing SimRank scores of the nodes.

        If ``source`` is not ``None`` but ``target`` is, this returns an
        Array containing SimRank scores of ``source`` and that
        node.

        If neither ``source`` nor ``target`` is ``None``, this returns
        the similarity value for the given pair of nodes.

    Examples
    --------
        >>> from numpy import array
        >>> G = nx.cycle_graph(4)
        >>> sim = nx.simrank_similarity_numpy(G)

    References
    ----------
    .. [1] G. Jeh and J. Widom.
           "SimRank: a measure of structural-context similarity",
           In KDD'02: Proceedings of the Eighth ACM SIGKDD
           International Conference on Knowledge Discovery and Data Mining,
           pp. 538--543. ACM Press, 2002.
    """
    # This algorithm follows roughly
    #
    #     S = max{C * (A.T * S * A), I}
    #
    # where C is the importance factor, A is the column normalized
    # adjacency matrix, and I is the identity matrix.
    import numpy as np

    adjacency_matrix = nx.to_numpy_array(G)

    # column-normalize the ``adjacency_matrix``
    adjacency_matrix /= adjacency_matrix.sum(axis=0)

    newsim = np.eye(adjacency_matrix.shape[0], dtype=np.float64)
    for _ in range(max_iterations):
        prevsim = np.copy(newsim)
        newsim = importance_factor * np.matmul(
            np.matmul(adjacency_matrix.T, prevsim), adjacency_matrix
        )
        np.fill_diagonal(newsim, 1.0)

        if np.allclose(prevsim, newsim, atol=tolerance):
            break

    if source is not None and target is not None:
        return newsim[source, target]
    if source is not None:
        return newsim[source]
    return newsim
예제 #45
0
파일: test_tree.py 프로젝트: zakraicik/shap
def test_single_tree_nonlinear_transformations():
    """ Make sure Independent Tree SHAP single trees with non-linear
    transformations.
    """
    # Supported non-linear transforms
    # def sigmoid(x):
    #     return(1/(1+np.exp(-x)))

    # def log_loss(yt,yp):
    #     return(-(yt*np.log(yp) + (1 - yt)*np.log(1 - yp)))

    # def mse(yt,yp):
    #     return(np.square(yt-yp))

    try:
        import xgboost
    except:
        print("Skipping test_several_trees!")
        return
    import shap
    import numpy as np
    np.random.seed(10)

    n = 1000
    X = np.random.normal(size=(n, 7))
    b = np.array([-2, 1, 3, 5, 2, 20, -5])
    y = np.matmul(X, b)
    y = y + abs(min(y))
    y = np.random.binomial(n=1, p=y / max(y))
    max_depth = 6

    # train a model with single tree
    Xd = xgboost.DMatrix(X, label=y)
    model = xgboost.train(
        {
            'eta': 1,
            'max_depth': max_depth,
            'base_score': y.mean(),
            "lambda": 0,
            "objective": "binary:logistic"
        }, Xd, 1)
    pred = model.predict(Xd, output_margin=True)  # In margin space (log odds)
    trans_pred = model.predict(Xd)  # In probability space

    expl = shap.TreeExplainer(model, X, feature_perturbation="interventional")
    f = lambda inp: model.predict(xgboost.DMatrix(inp), output_margin=True)
    expl_kern = shap.KernelExplainer(f, X)

    x_ind = 0
    x = X[x_ind:x_ind + 1, :]
    itshap = expl.shap_values(x)
    kshap = expl_kern.shap_values(x, nsamples=300)
    assert np.allclose(itshap.sum() + expl.expected_value, pred[x_ind]), \
    "SHAP values don't sum to model output on explaining margin!"
    assert np.allclose(itshap, kshap), \
    "Independent Tree SHAP doesn't match Kernel SHAP on explaining margin!"

    model.set_attr(objective="binary:logistic")
    expl = shap.TreeExplainer(model,
                              X,
                              feature_perturbation="interventional",
                              model_output="probability")
    itshap = expl.shap_values(x)
    assert np.allclose(itshap.sum() + expl.expected_value, trans_pred[x_ind]), \
    "SHAP values don't sum to model output on explaining logistic!"
예제 #46
0
import numpy as np

a = np.array([2,4,6,8]).reshape(2,2)
b = np.array([2,2,2,2]).reshape(2,2)

print(a)
print(b)

print(a+b)
print(a-b)
print(a*b)
print(a/b)

# 행열의 곱셈
print(np.matmul(a,b))
예제 #47
0
    def testSampleLarge(self):
        mu = np.array([-1., 1], dtype=np.float32)
        scale_tril = np.array([[3., 0], [1, -2]], dtype=np.float32) / 3.

        true_mean = mu
        true_scale = scale_tril
        true_covariance = np.matmul(true_scale, true_scale.T)
        true_variance = np.diag(true_covariance)
        true_stddev = np.sqrt(true_variance)

        dist = tfd.MultivariateNormalTriL(loc=mu,
                                          scale_tril=scale_tril,
                                          validate_args=True)

        # The following distributions will test the KL divergence calculation.
        mvn_chol = tfd.MultivariateNormalTriL(loc=np.array([0.5, 1.2],
                                                           dtype=np.float32),
                                              scale_tril=np.array(
                                                  [[3., 0], [1, 2]],
                                                  dtype=np.float32),
                                              validate_args=True)

        n = int(10e3)
        samps = dist.sample(n, seed=test_util.test_seed())
        sample_mean = tf.reduce_mean(input_tensor=samps, axis=0)
        x = samps - sample_mean
        sample_covariance = tf.matmul(x, x, transpose_a=True) / n

        sample_kl_chol = tf.reduce_mean(input_tensor=dist.log_prob(samps) -
                                        mvn_chol.log_prob(samps),
                                        axis=0)
        analytical_kl_chol = tfd.kl_divergence(dist, mvn_chol)

        scale = dist.scale.to_dense()

        [
            sample_mean_,
            analytical_mean_,
            sample_covariance_,
            analytical_covariance_,
            analytical_variance_,
            analytical_stddev_,
            sample_kl_chol_,
            analytical_kl_chol_,
            scale_,
        ] = self.evaluate([
            sample_mean,
            dist.mean(),
            sample_covariance,
            dist.covariance(),
            dist.variance(),
            dist.stddev(),
            sample_kl_chol,
            analytical_kl_chol,
            scale,
        ])

        sample_variance_ = np.diag(sample_covariance_)
        sample_stddev_ = np.sqrt(sample_variance_)

        tf1.logging.vlog(2, "true_mean:\n{}  ".format(true_mean))
        tf1.logging.vlog(2, "sample_mean:\n{}".format(sample_mean_))
        tf1.logging.vlog(2, "analytical_mean:\n{}".format(analytical_mean_))

        tf1.logging.vlog(2, "true_covariance:\n{}".format(true_covariance))
        tf1.logging.vlog(2,
                         "sample_covariance:\n{}".format(sample_covariance_))
        tf1.logging.vlog(
            2, "analytical_covariance:\n{}".format(analytical_covariance_))

        tf1.logging.vlog(2, "true_variance:\n{}".format(true_variance))
        tf1.logging.vlog(2, "sample_variance:\n{}".format(sample_variance_))
        tf1.logging.vlog(
            2, "analytical_variance:\n{}".format(analytical_variance_))

        tf1.logging.vlog(2, "true_stddev:\n{}".format(true_stddev))
        tf1.logging.vlog(2, "sample_stddev:\n{}".format(sample_stddev_))
        tf1.logging.vlog(2,
                         "analytical_stddev:\n{}".format(analytical_stddev_))

        tf1.logging.vlog(2, "true_scale:\n{}".format(true_scale))
        tf1.logging.vlog(2, "scale:\n{}".format(scale_))

        tf1.logging.vlog(
            2, "kl_chol:      analytical:{}  sample:{}".format(
                analytical_kl_chol_, sample_kl_chol_))

        self.assertAllClose(true_mean, sample_mean_, atol=0., rtol=0.03)
        self.assertAllClose(true_mean, analytical_mean_, atol=0., rtol=1e-6)

        self.assertAllClose(true_covariance,
                            sample_covariance_,
                            atol=0.,
                            rtol=0.03)
        self.assertAllClose(true_covariance,
                            analytical_covariance_,
                            atol=0.,
                            rtol=1e-6)

        self.assertAllClose(true_variance,
                            sample_variance_,
                            atol=0.,
                            rtol=0.02)
        self.assertAllClose(true_variance,
                            analytical_variance_,
                            atol=0.,
                            rtol=1e-6)

        self.assertAllClose(true_stddev, sample_stddev_, atol=0., rtol=0.01)
        self.assertAllClose(true_stddev,
                            analytical_stddev_,
                            atol=0.,
                            rtol=1e-6)

        self.assertAllClose(true_scale, scale_, atol=0., rtol=1e-6)

        self.assertAllClose(sample_kl_chol_,
                            analytical_kl_chol_,
                            atol=0.,
                            rtol=0.02)
예제 #48
0
	
	while not rospy.is_shutdown():

		if get == 1: #each time we have a new value
			get = 0
			# ----  Extended Kalman Filter ------------------------------------------------- #
			dt = vect_temps[2]
			F = np.array([[1,dt],[0,1]])
			B = np.array([[dt**2/2],[dt]])
			kf_x.kalman_set_F(F)
			kf_x.kalman_set_B(B)

			ax,ay,az = vect_imu[0,0],vect_imu[1,0],vect_imu[2,0]
			

			#[x,P] = kf_x.kalman_predict(acc_NED[0,0])

			display_mag()

			
			R_mat = rotation_matrix(pitch,roll,yaw)
			v = np.matmul(np.linalg.inv(R_mat),np.array([[ax],[ay],[az]]))
			


			#plt.xlim((-1,1))
			#plt.ylim((-1,1))
			#plt.plot([0,cos(wind_direction)],[0, sin(wind_direction)])
			#plt.plot([0,cos(vect_wind_direction[1])],[0, sin(vect_wind_direction[1])])
			#plt.pause(0.01)
			#plt.cla()
    def calc_descent(self):
        time.sleep(5)

        self.initialization()

        twist = Twist()
        twist.linear.x = self.vel[0]
        twist.linear.y = 0.0
        twist.linear.z = 0.0
        twist.angular.x = 0.0
        twist.angular.y = 0.0
        twist.angular.z = self.vel[1]

        print(twist)
        self.move_pub.publish(twist)

        #        # Transmit the current velocity, then change it based on accel
        r = rospy.Rate(5)
        while not rospy.core.is_shutdown():
            # Add next state
            self.get_light()
            temp = np.append(self.loc, self.light)
            #            print(temp)
            if len(self.states) >= 20:
                self.states.popleft()
            self.states.append(temp)

            # Calculate descent direction
            sum = np.array([0.0, 0.0])
            weight = 0.0
            gamma = -6
            factor = 0.1
            curState = self.states[-1]
            #            print("weight")
            for i in range(0, len(self.states) - 1):
                dif = self.states[i][0:2] - curState[0:2]
                dif_norm = np.linalg.norm(dif)
                weight = (1 / np.power(2, dif_norm)) - np.power(
                    10, (gamma * dif_norm))
                light_dif = weight * (self.states[i][2] -
                                      self.light) / (dif_norm)
                vec = light_dif * dif
                sum += vec
            sum = sum / np.linalg.norm(sum)
            if (np.any(np.isnan(sum))):
                continue
            var_mag = 0.3
            sum += [random.gauss(0, 1) * var_mag, random.gauss(0, 1) * var_mag]
            sum = np.array(factor * sum / np.linalg.norm(sum))
            #            print("sum")
            #            print(sum)

            self.acc = np.matmul(np.array([[1.0, 0], [0, 0.3]]),
                                 np.transpose(np.array(self.transmit(sum))))
            #            print(self.acc)
            self.vel = self.vel + self.acc * 0.2
            if (np.abs(self.vel[0]) > 0.08):
                print('check')
                self.vel[0] = np.sign(self.vel[0]) * 0.08
            if (np.abs(self.vel[1]) > 1.00):
                self.vel[1] = np.sign(self.vel[1]) * 1.00

            twist = Twist()
            twist.linear.x = self.vel[0]
            twist.linear.y = 0.0
            twist.linear.z = 0.0
            twist.angular.x = 0.0
            twist.angular.y = 0.0
            twist.angular.z = self.vel[1]

            print(twist)
            self.move_pub.publish(twist)

            r.sleep()

        twist = Twist()
        twist.linear.x = 0.0
        twist.linear.y = 0.0
        twist.linear.z = 0.0
        twist.angular.x = 0.0
        twist.angular.y = 0.0
        twist.angular.z = 0.0
        self.move_pub.publish(twist)

        return