def PCA_transform(train_feature, test_feature, mat_file_path):
    """ Unsupervised dimension reduction
    Note: 由于scikit-learn中PCA无法针对目前的训练集进行PCA分解,所以这里采用matlab的分解结果
    """
    #import ipdb; ipdb.set_trace()
    print 'PCA fitting...'
    """
    from sklearn.decomposition import PCA
    pca = PCA(n_components, whiten=False).fit(model_train_data)
    print 'PCA transformation...'
    model_train_data2 = pca.transform(model_train_data)
    model_test_data2 = pca.transform(model_test_data)
    test_data2 = pca.transform(test_data)
    """
    # 读入mat文件
    import scipy.io
    mat_dict = scipy.io.loadmat(mat_file_path)
    W = np.asmatrix(mat_dict['COEFF'])
    
    # train_feature_transformed 包含所有的训练数据
    #train_feature_transformed = np.asmatrix(mat_dict['train']) # 已经转换好的
    #test_feature_transformed = np.asmatrix(mat_dict['test'])
    
    train_feature_transformed = np.asmatrix(train_feature) * W # or this one
    test_feature_transformed = np.asmatrix(test_feature) * W
    
    return np.array(train_feature_transformed), np.array(test_feature_transformed)
Пример #2
0
def noRidgeGradientDescentWhole(train,alpha):
    train = np.asmatrix(train)
    train_x = train[0:, 0 : -1]
    train_y = train[0:, -1]
    train_m, train_n = train_x.shape
    w_c = [0]*train_n
    w_c = np.asmatrix(w_c).T
    train_err_list = []
    for j in range(1,100000):
        a_c = 1.0 / (j+ 1)
        gradient = 1.0/train_m *((train_x.T * train_x) * w_c - train_x.T * train_y) 
        w_n = w_c - alpha*a_c* gradient
        mm = w_n - w_c
        sum_m = np.sum(np.square(mm))
        sqrt = sum_m ** 0.5
        w_c = w_n
        train_err = ((train_y - train_x * w_c).T * (train_y - train_x * w_c))/2.0/train_m 
        # + lamb/2.0/train_m * w_c.T*w_c
        train_err = np.asarray(train_err)[0][0]
        train_err_list.append(train_err)
        if j % 100 == 0:
            print "try " + str(j) +"times"
        # print "gradient"+ str(gradient)
            # print sqrt,'sqrt'
        if sqrt < 0.2:
            wo = w_c
            train_err_last = train_err
            break
    # print train_err_last,'training err on the whole data set'
    return wo,train_err_list,train_err_last
Пример #3
0
    def sum(self, axis=None):
        """Sum the matrix over the given axis.  If the axis is None, sum
        over both rows and columns, returning a scalar.
        """
        # We use multiplication by an array of ones to achieve this.
        # For some sparse matrix formats more efficient methods are
        # possible -- these should override this function.
        m, n = self.shape

        # Mimic numpy's casting.
        if np.issubdtype(self.dtype, np.float_):
            res_dtype = np.float_
        elif (np.issubdtype(self.dtype, np.int_) or
              np.issubdtype(self.dtype, np.bool_)):
                res_dtype = np.int_
        elif np.issubdtype(self.dtype, np.complex_):
            res_dtype = np.complex_
        else:
            res_dtype = self.dtype

        if axis is None:
            # sum over rows and columns
            return (self * np.asmatrix(np.ones((n, 1), dtype=res_dtype))).sum()

        if axis < 0:
            axis += 2
        if axis == 0:
            # sum over columns
            return np.asmatrix(np.ones((1, m), dtype=res_dtype)) * self
        elif axis == 1:
            # sum over rows
            return self * np.asmatrix(np.ones((n, 1), dtype=res_dtype))
        else:
            raise ValueError("axis out of bounds")
Пример #4
0
 def init_random( self, scale = 1.0 ):
     """
     Randomly initialise the biases and weights
     """
     self.b = numpy.asmatrix( numpy.random.normal( loc = 0.0, scale = scale, size = self.H ) )
     self.c = numpy.asmatrix( numpy.random.normal( loc = 0.0, scale = scale, size = self.V ) )
     self.W = numpy.asmatrix( numpy.random.normal( loc = 0.0, scale = scale, size = (self.H, self.V) ) )
Пример #5
0
def test_arclength_half_circle():
    """ Here we define the tests for the lenght computer of our ArcLengthParametrizer, we try it with a half a 
    circle and a fan. 
    We test it both in 2d and 3d."""


    # Number of interpolation points minus one
    n = 5
    toll = 1.e-6
    points = np.linspace(0, 1, (n+1) ) 
    R = 1
    P = 1
    control_points_2d = np.asmatrix(np.zeros([n+1,2]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
    control_points_2d[:,0] = np.transpose(np.matrix([R*np.cos(1 * i * np.pi / (n + 1))for i in range(n+1)]))
    control_points_2d[:,1] = np.transpose(np.matrix([R*np.sin(1 * i * np.pi / (n + 1))for i in range(n+1)]))

    control_points_3d = np.asmatrix(np.zeros([n+1,3]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
    control_points_3d[:,0] = np.transpose(np.matrix([R*np.cos(1 * i * np.pi / (n + 1))for i in range(n+1)]))
    control_points_3d[:,1] = np.transpose(np.matrix([R*np.sin(1 * i * np.pi / (n + 1))for i in range(n+1)]))
    control_points_3d[:,2] = np.transpose(np.matrix([P*i for i in range(n+1)]))

    vsl = AffineVectorSpace(UniformLagrangeVectorSpace(n+1),0,1)
    dummy_arky_2d = ArcLengthParametrizer(vsl, control_points_2d)
    dummy_arky_3d = ArcLengthParametrizer(vsl, control_points_3d)
    length2d = dummy_arky_2d.compute_arclength()[-1,1]
    length3d = dummy_arky_3d.compute_arclength()[-1,1]
#    print (length2d)
#    print (n * np.sqrt(2))
    l2 = np.pi * R
    l3 = 2 * np.pi * np.sqrt(R * R + (P / (2 * np.pi)) * (P / (2 * np.pi)))
    print (length2d, l2)
    print (length3d, l3)
    assert (length2d - l2) < toll
    assert (length3d - l3) < toll
Пример #6
0
    def solve(self, xk, iteraciones):
        bk_matrix = self.Identity(numpy.size(xk))
        #xk=numpy.transpose(numpy.asmatrix(xk))
        xk1 = xk[:]
        def f(xk):
            a=xk[0]*xk[0]
            return a

        self.function= f

        for x in range(iteraciones):
            #gxk = self.Gradiente(xk)
            gxk = scipy.optimize.approx_fprime(xk,self.function,0.01)
            gxkt=numpy.transpose(numpy.asmatrix(gxk))
            dk_vector = - bk_matrix * gxkt
            def deriv(punto):
                return scipy.optimize.approx_fprime(xk,self.function,0.01)
            tupla = scipy.optimize.line_search(self.function,deriv,numpy.transpose(numpy.asmatrix(xk)),dk_vector,numpy.asmatrix(gxk))
            ak=tupla[0]
            #ak, _, _ = scipy.optimize.line_search(self.function, xk, dk_vector, gxk, self.function(xk))
            #ak, fevals, gfevals = scipy.optimize.line_search(self.function, None, self.xk_vector, self.dk_vector, gxk)
            dkvec=numpy.array(numpy.transpose(dk_vector))
            vec0=dkvec[0]
            xk1 = xk + ak * vec0
            xk = numpy.array(xk1)
        return xk1
Пример #7
0
    def handle_monocular(self, msg):

        (image, camera) = msg
        gray = self.mkgray(image)
        C = self.image_corners(gray)
        if C:
            linearity_rms = self.mc.linear_error(C, self.board)

            # Add in reprojection check
            image_points = C
            object_points = self.mc.mk_object_points([self.board], use_board_size=True)[0]
            dist_coeffs = numpy.zeros((4, 1))
            camera_matrix = numpy.array( [ [ camera.P[0], camera.P[1], camera.P[2]  ],
                                           [ camera.P[4], camera.P[5], camera.P[6]  ],
                                           [ camera.P[8], camera.P[9], camera.P[10] ] ] )
            ok, rot, trans = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs)
            # Convert rotation into a 3x3 Rotation Matrix
            rot3x3, _ = cv2.Rodrigues(rot)
            # Reproject model points into image
            object_points_world = numpy.asmatrix(rot3x3) * (numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans))
            reprojected_h = camera_matrix * object_points_world
            reprojected   = (reprojected_h[0:2, :] / reprojected_h[2, :])
            reprojection_errors = image_points.squeeze().T - reprojected.T
            reprojection_rms = numpy.sqrt(numpy.sum(numpy.array(reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape))

            # Print the results
            print("Linearity RMS Error: %.3f Pixels      Reprojection RMS Error: %.3f Pixels" % (linearity_rms, reprojection_rms))
        else:
            print('no chessboard')
Пример #8
0
 def L(self, alpha, Q):
     a = alpha.reshape((len(alpha), 1))
     a = np.asmatrix(a)
     c = np.ones_like(a)
     c = np.asmatrix(c)
     a_T = a.getT()
     return 0.5*((a_T*Q)*a) - c.getT()*a
Пример #9
0
    def weights(self, X, Y, res):
        alphas = res.x

        #get weights from valid support vectors - probably should check the math here?
        w1 = 0.0
        w2 = 0.0
        sv_indexes = []
        for i in range(0, len(alphas)):
            if alphas[i] > 1.0e-03:
                w1 += alphas[i] * Y[i] * X[i][0]
                w2 += alphas[i] * Y[i] * X[i][1]
                self.sv_count += 1.0
                sv_indexes.append(i)

        W = [w1, w2]
        self.W = W
        #solve for b, or w0, using any SV
        Wm = np.asmatrix(W)
        try:
            n = sv_indexes[0]
        except IndexError:
            self.no_svs += 1
            return self.fit(X, Y)

        xn = np.asmatrix(X[n])
        xn = xn.getT()
        self.b = (1/Y[n]) - Wm*xn
Пример #10
0
    def mulliken(self):
        """
        perform a mulliken population analysis
        """
        if self.overlap is not None:
            Smat_ao = np.asmatrix(self.overlap)
        else:
            raise Exception('mulliken analysis is missing the overlap matrix')

        population = {}
        for kind, mo in self.mo.items():
            population[kind] = np.zeros(len(self.basis_set.center_charges))
            Cmat = np.asmatrix(mo.coefficients)
            D = Cmat * np.diag(mo.occupations) * Cmat.T
            DS = np.multiply(D, Smat_ao)
            for i, (ao, basis_id) in enumerate(zip(np.asarray(DS), mo.basis_ids)):
                pop = np.sum(ao)
                cgto_tuple = mo.basis_set.contracted_ids[basis_id]
                center_id, l, n, m = cgto_tuple
                population[kind][center_id-1] += pop

        if self.unrestricted:
            population_total = population['alpha'] + population['beta']
        else:
            population_total = population['restricted']
        mulliken_charges = self.basis_set.center_charges - population_total
        return mulliken_charges
Пример #11
0
    def fit(self, bags, y):
        """
        @param bags : a sequence of n bags; each bag is an m-by-k array-like
                      object containing m instances with k features
        @param y : an array-like object of length n containing -1/+1 labels
        """
        self._bags = [np.asmatrix(bag) for bag in bags]
        y = np.asmatrix(y).reshape((-1, 1))
        bs = BagSplitter(self._bags, y)

        if self.verbose:
            print 'Training initial sMIL classifier for sbMIL...'
        initial_classifier = sMIL(kernel=self.kernel, C=self.C, p=self.p, gamma=self.gamma,
                                  scale_C=self.scale_C, verbose=self.verbose,
                                  sv_cutoff=self.sv_cutoff)
        initial_classifier.fit(bags, y)
        if self.verbose:
            print 'Computing initial instance labels for sbMIL...'
        f_pos = initial_classifier.predict(bs.pos_inst_as_bags)
        # Select nth largest value as cutoff for positive instances
        n = int(round(bs.L_p * self.eta))
        n = min(bs.L_p, n)
        n = max(bs.X_p, n)
        f_cutoff = sorted((float(f) for f in f_pos), reverse=True)[n - 1]

        # Label all except for n largest as -1
        pos_labels = -np.matrix(np.ones((bs.L_p, 1)))
        pos_labels[np.nonzero(f_pos >= f_cutoff)] = 1.0

        # Train on all instances
        if self.verbose:
            print 'Retraining with top %d%% as positive...' % int(100 * self.eta)
        all_labels = np.vstack([-np.ones((bs.L_n, 1)), pos_labels])
        super(SIL, self).fit(bs.instances, all_labels)
	def test_probabilities(self):
		grbm = GaussianRBM(7, 13)
		grbm.W = np.asmatrix(np.random.randn(grbm.X.shape[0], grbm.Y.shape[0]))
		grbm.b = np.asmatrix(np.random.rand(grbm.X.shape[0], 1))
		grbm.c = np.asmatrix(np.random.randn(grbm.Y.shape[0], 1))
		grbm.sigma = np.random.rand() * 0.5 + 0.5
		grbm.sigma = 1.

		examples_vis = np.asmatrix(np.random.randn(grbm.X.shape[0], 1000) * 2.)
		examples_hid = np.matrix(np.random.rand(grbm.Y.shape[0], 100) < 0.5)

		states_hid = utils.binary_numbers(grbm.Y.shape[0])

		# check that conditional probabilities are normalized
		logprobs = grbm._clogprob_hid_vis(examples_vis, states_hid, all_pairs=True)
		self.assertTrue(np.all(utils.logsumexp(logprobs, 1) < 1E-8))

		# test for consistency
		logprobs1 = grbm._ulogprob(examples_vis, examples_hid, all_pairs=True)
		logprobs2 = grbm._clogprob_vis_hid(examples_vis, examples_hid, all_pairs=True) \
		          + grbm._ulogprob_hid(examples_hid)
		logprobs3 = grbm._clogprob_hid_vis(examples_vis, examples_hid, all_pairs=True) \
		          + grbm._ulogprob_vis(examples_vis).T

		self.assertTrue(np.all(np.abs(logprobs1 - logprobs2) < 1E-10))
		self.assertTrue(np.all(np.abs(logprobs1 - logprobs3) < 1E-3))
Пример #13
0
Файл: linalg.py Проект: bjzu/MF
def elop(X, Y, op):
    """
    Compute element-wise operation of matrix :param:`X` and matrix :param:`Y`.
    
    :param X: First input matrix.
    :type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix`
    :param Y: Second input matrix.
    :type Y: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix`
    :param op: Operation to be performed. 
    :type op: `func` 
    """
    try:
        zp1 = op(0, 1) if sp.isspmatrix(X) else op(1, 0)
        zp2 = op(0, 0) 
        zp = zp1 != 0 or zp2 != 0
    except:
        zp = 0
    if sp.isspmatrix(X) or sp.isspmatrix(Y):
        return _op_spmatrix(X, Y, op) if not zp else _op_matrix(X, Y, op)
    else:
        try:
            X[X == 0] = np.finfo(X.dtype).eps
            Y[Y == 0] = np.finfo(Y.dtype).eps
        except ValueError:
            return op(np.asmatrix(X), np.asmatrix(Y))
        return op(np.asmatrix(X), np.asmatrix(Y))
Пример #14
0
    def _alpha_cal(self,observations):
        # Calculate alpha matrix and return it
        num_states = self.em_prob.shape[0]
        total_stages = len(observations)

        # Initialize values
        ob_ind = self.obs_map[ observations[0] ]
        alpha = np.asmatrix(np.zeros((num_states,total_stages)))
        c_scale = np.asmatrix(np.zeros((total_stages,1)))

        # Handle alpha base case
        alpha[:,0] = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob ).transpose()
        # store scaling factors, scale alpha
        c_scale[0,0] = 1/np.sum(alpha[:,0])
        alpha[:,0] = alpha[:,0] * c_scale[0]
        # Iteratively calculate alpha(t) for all 't'
        for curr_t in range(1,total_stages):
            ob_ind = self.obs_map[observations[curr_t]]
            alpha[:,curr_t] = np.dot( alpha[:,curr_t-1].transpose() , self.trans_prob).transpose()
            alpha[:,curr_t] = np.multiply( alpha[:,curr_t].transpose() , np.transpose( self.em_prob[:,ob_ind] )).transpose()

            # Store scaling factors, scale alpha
            c_scale[curr_t] = 1/np.sum(alpha[:,curr_t])
            alpha[:,curr_t] = alpha[:,curr_t] * c_scale[curr_t]

        # return the computed alpha
        return (alpha,c_scale)
Пример #15
0
    def _randomize():
        # Generate random transition,start and emission probabilities
        # Store observations and states
        num_obs = len(self.observations)
        num_states = len(states)

        # Generate a random list with sum of numbers = 1
        a = np.random.random(num_states)
        a /= a.sum()
        # Initialize start_prob
        self.start_prob = a

        # Initialize transition matrix
        # Fill each row with a list that sums upto 1
        self.trans_prob = np.asmatrix(np.zeros((num_states,num_states)))
        for i in range(num_states):
            a = np.random.random(num_states)
            a /= a.sum()
            self.trans_prob[i,:] = a

        # Initialize emission matrix
        # Fill each row with a list that sums upto 1
        self.em_prob = np.asmatrix(np.zeros((num_states,num_obs)))
        for i in range(num_states):
            a = np.random.random(num_obs)
            a /= a.sum()
            self.em_prob[i,:] = a

        return self.start_prob, self.trans_prob, self.em_prob 
def calculate_transform_error(R, t, positions_1, positions_2):

  #TODO: Incorporate Bayes rule:

  # prob of R given point_correspondances = 
  # prob of pts given R (if R is large, points are likely on one side of image?)*
  # prob of R (large R is more unlikely than small R for keypoints) /
  # prob of points (proportional to match strengh, inversely prop to distance)

  #add error to R if it is very oblique
  #(since large angles of R are unlikely)
  [axis, theta] = get_axis_angle(R)
  angle_likelihood = theta**2

  cum_error = 0
  num_points = positions_1.shape[0]

  for i in range(num_points):
    pt_1 = np.asmatrix(positions_1[i, :])
    pt_2 = np.asmatrix(positions_2[i, :])
    
    pt_1 = pt_1.transpose()
    pt_2 = pt_2.transpose()

    transformed_pt_1 = (np.dot(R, pt_1)) + t
    cum_error += np.linalg.norm((transformed_pt_1 - pt_2))**2

  return cum_error*angle_likelihood
Пример #17
0
    def plot_pr(self, i0, rg, qmax=5., dmax=200., ax=None):
        """ calculate p(r) function
        use the given i0 and rg value to fill in the low q part of the gap in data
        truncate the high q end at qmax
        """
        if ax==None:
            ax = plt.gca()
        ax.set_xscale('linear')
        ax.set_yscale('linear')

        if self.qgrid[-1]<qmax: qmax=self.qgrid[-1]
        tqgrid = np.arange(0,qmax,qmax/len(self.qgrid))
        tint = np.interp(tqgrid,self.qgrid,self.data)

        tint[tqgrid*rg<1.] = i0*np.exp(-(tqgrid[tqgrid*rg<1.]*rg)**2/3.)
        #tint -= tint[-10:].sum()/10
        # Hanning window for reducing fringes in p(r)
        tw = np.hanning(2*len(tqgrid)+1)[len(tqgrid):-1]
        tint *= tw

        trgrid = np.arange(0,dmax,1.)
        kern = np.asmatrix([[rj**2*np.sinc(qi*rj/np.pi) for rj in trgrid] for qi in tqgrid])
        tt = np.asmatrix(tint*tqgrid**2).T
        tpr = np.reshape(np.array((kern.T*tt).T),len(trgrid))
        tpr /= tpr.sum()

        #plt.plot(tqgrid,tint,"g-")
        #tpr = np.fft.rfft(tint)
        #tx = range(len(tpr))
        ax.plot(trgrid,tpr,"g-")
        ax.set_xlabel("$r (\AA)$", fontsize='x-large')
        ax.set_ylabel("$P(r)$", fontsize='x-large')
Пример #18
0
    def predict(self, image):
        """
        Predict the face
        """
        #image as row
        imageAsRow = np.asarray(image.reshape(image.shape[0]*image.shape[1],1),
                                np.float64);
        #project inthe subspace
        p = self.pca.apply_to_feature_vector(RealFeatures(imageAsRow).get_feature_vector(0));

        #min value to find the face
        minDist =1e100;
        #class
        minClass = -1;
        #search which face is the best match
        for sampleIdx in range(len(self._projections)):
            test = RealFeatures(np.asmatrix(p,np.float64).T)
            projection = RealFeatures(np.asmatrix(self._projections[sampleIdx],
                                        np.float64).T)
            dist = EuclideanDistance( test, projection).distance(0,0)

            if(dist < minDist ):
                minDist = dist;
                minClass = self._labels[sampleIdx];

        return minClass
Пример #19
0
def mk_stochastic(T):
    """
    % MK_STOCHASTIC Ensure the argument is a stochastic matrix, i.e., the sum over the last dimension is 1.
    % [T,Z] = mk_stochastic(T)
    %
    % If T is a vector, it will sum to 1.
    % If T is a matrix, each row will sum to 1.
    % If T is a 3D array, then sum_k T(i,j,k) = 1 for all i,j.
    
    % Set zeros to 1 before dividing
    % This is valid since S(j) = 0 iff T(i,j) = 0 for all j
    """

    T = np.asfarray(T)

    if T.ndim == 1 or (T.ndim == 2 and (T.shape[0] == 1 or T.shape[1] == 1)):  # isvector
        T, Z = normalise(T)
    elif T.ndim == 2:  # matrix
        T = np.asmatrix(T)
        Z = np.sum(T, 1)
        S = Z + (Z == 0)
        norm = np.tile(S, (1, T.shape[1]))
        T = np.divide(T, norm)
    else:  # multi-dimensional array
        ns = T.shape
        T = np.asmatrix(np.reshape(T, (np.prod(ns[0:-1]), ns[-1])))
        Z = np.sum(T, 1)
        S = Z + (Z == 0)
        norm = np.tile(S, (1, ns[-1]))
        T = np.divide(T, norm)
        T = np.reshape(np.asarray(T), ns)

    return T, Z
Пример #20
0
def dynamical_matrix(struc,protopairs,fconstants,k):
    """Calculate the dynamical matrix for a given k point.

    :struc: Crystal
    :protopairs: [(Site,Site)], (probably NN pairs)
    :fconstants: list of 4 floats, for each xx,xy,yx,yy forces
    :k: 2x1 matrix, k-point of interest
    :returns: 2x2 matrix
    """
    D=np.zeros((2*len(struc._basis),2*len(struc._basis)),dtype=complex)

    sg=struc.factor_group()
    dynbasisentries=dynamical_basis_entries(protopairs,sg,struc,fconstants)

    for stack,pair in dynbasisentries:
        #This is the 2x2 sum of the tensor basis with the force constants
        L=flatten_tensor_stack(stack)

        #This is the exponential part
        rn,tb0,tb1=dynamical_exp_inputs(pair[0],pair[1],struc._lattice)
        expstuff=dynamical_exp_elem(k,rn,tb0,tb1)

        sumvalue=L*expstuff
        D_entry=dynamical_pair_location(pair[0],pair[1],struc)

        D[D_entry]+=sumvalue

    D=np.asmatrix(D)
    assert is_hermitian(D)

    return np.asmatrix(D)
Пример #21
0
def main():
    fnm = 'prob3.data'
    data = md.read_data(fnm)
    D1 = data[0:8,].T
    D2 = data[8:,].T

    u1 = np.matrix((np.mean(D1[0,:]), np.mean(D1[1,:]))).T
    u2 = np.matrix((np.mean(D2[0,:]), np.mean(D2[1,:]))).T

    sigma1 = np.asmatrix(np.cov(D1, bias=1))
    sigma2 = np.asmatrix(np.cov(D1, bias=1))

    g1 = discrim_func(u1, sigma1)
    g2 = discrim_func(u2, sigma2)

    steps = 100
    x = np.linspace(-2,2,steps)
    y = np.linspace(-6,6,steps)

    X,Y = np.meshgrid(x,y)
    z = [g1(X[r,c], Y[r,c]) - g2(X[r,c], Y[r,c])
         for r in range(0,steps) for c in range(0,steps)]
    Z = np.array(z)
    px = X.ravel()
    py = Y.ravel()
    pz = Z.ravel()
    gridsize = 50
    plot = plt.subplot(111)
    plt.hexbin(px,py,C=pz, gridsize=gridsize, cmap=cm.jet, bins=None)
    cb = plt.colorbar()
    cb.set_label('g1 minus g2')
    return plot
Пример #22
0
 def active(self, X):
     pre_h = np.zeros((1, self.h_size), dtype=theano.config.floatX)
     [R, Z, GH, H] = self.cell.active(X, pre_h)
     self.activation = np.asmatrix(H)
     self.R = np.asmatrix(R)
     self.Z = np.asmatrix(Z)
     self.GH = np.asmatrix(GH)
Пример #23
0
    def train_map(self):
        if len(self.dataset) == 0:
            return
        
        X = self.dataset.inputs
        Y = self.dataset.targets
        
        # choose random center vectors from training set
        rnd_idx = np.random.permutation(X.shape[0])[:self.numCenters]
        self.centers = [X[i,:] for i in rnd_idx]

        # calculate activations of RBFs
        G = np.asmatrix(self._designMatrix(X))
        Y = np.asmatrix(Y)
        M = self.numCenters
        
        # create (reset) prior over weights w
        m0 = np.matrix(np.zeros((M, 1), float))
        S0 = np.matrix(self.alpha*np.eye(M))

        # calculate posterior (p. 153, eqns. 3.50, 3.51)
        self.SN = S0.I + self.beta*G.T*G
        self.mN = np.linalg.inv(self.SN) * (S0.I*m0 + self.beta*G.T*Y)

        self.W = np.asarray(self.mN)
 def similarity(self):
     matrixvectout = numpy.asmatrix(self.vectout)
     # print("matrixvectout shape is ", matrixvectout.shape)
     matrixqvectsout = numpy.asmatrix(self.qvectsout.toarray())
     # print("matrix qvectsout shape is ", matrixqvectsout.shape)
     out = self.bm_vectobj.get_feature_names()
     self.similaritymatrix = numpy.asarray(matrixvectout*matrixqvectsout.T)
 def similarity(self):
     matrixvectout = numpy.asmatrix(self.vectout)
     print("matrixvectout shape is ", matrixvectout.shape)
     matrixqvectsout = numpy.asmatrix(self.qvectsout.toarray())
     print("matrix qvectsout shape is ", matrixqvectsout.shape)
     self.similaritymatrix = numpy.asarray(matrixvectout*matrixqvectsout.T)
     print(self.similaritymatrix.shape)
Пример #26
0
    def test_sum_squares(self):
        X = Variable(5, 4)
        P = np.asmatrix(np.random.randn(3, 5))
        Q = np.asmatrix(np.random.randn(4, 7))
        M = np.asmatrix(np.random.randn(3, 7))

        y = P*X*Q + M
        self.assertFalse(y.is_constant())
        self.assertTrue(y.is_affine())
        self.assertTrue(y.is_quadratic())
        self.assertTrue(y.is_dcp())

        s = sum_squares(y)
        self.assertFalse(s.is_constant())
        self.assertFalse(s.is_affine())
        self.assertTrue(s.is_quadratic())
        self.assertTrue(s.is_dcp())

        # Frobenius norm squared is indeed quadratic
        # but can't show quadraticity using recursive rules
        t = norm(y, 'fro')**2
        self.assertFalse(t.is_constant())
        self.assertFalse(t.is_affine())
        self.assertFalse(t.is_quadratic())
        self.assertTrue(t.is_dcp())
Пример #27
0
def generate_dataset():
    global avg_trans_lis, type2_patient_lis, data_set, patient_ge_ag_lis, trans_dic, patient_smok_rec
    for it in avg_trans_lis:
        item_lis = it[1:6]
        # get the gender and age
        for a_it in patient_ge_ag_lis:
            if it[0] == a_it[0]:
                # gender and age
                item_lis.append(a_it[1])
                item_lis.append(a_it[2])
        # get the number of visiting doctors
        num_vis = len(trans_dic[it[0]])
        item_lis.append(num_vis)
        # get the smoking status record
        item_lis.append(patient_smok_rec[it[0]])
        # get the class tag
        for ite in type2_patient_lis:
            if it[0] == ite[0]:
                item_lis.append(int(ite[1]))
        # only store the patient has smoking record
        # if int(patient_smok_rec[it[0]])>0:
        # print ".......................00000000000000000000000000000"
        data_set.append(item_lis)
    # transform the dataset to change the distance from euclidean distance
    # to mahalanobis distance
    # get the covariance matrix
    cov_matri = np.cov((np.asmatrix(data_set)[:, [0, 1, 2, 3, 4, 5, 6, 7, 8]]).transpose())
    temp_data_set = (np.dot(np.asmatrix(data_set)[:, [0, 1, 2, 3, 4, 5, 6, 7, 8]], cov_matri)).tolist()
    for it in range(0, len(data_set)):
        item_lis = temp_data_set[it][0:9]
        item_lis.append(data_set[it][9])
        data_set[it] = item_lis
Пример #28
0
    def __init__(self, submod, V, eps_p_f, ti=None, tally=True, verbose=0):

        self.f_eval = submod.f_eval
        self.f = submod.f
        pm.StepMethod.__init__(self, [self.f, self.f_eval], tally=tally)

        self.children_no_data = copy.copy(self.children)
        if isinstance(eps_p_f, pm.Variable):
            self.children_no_data.discard(eps_p_f)
            self.eps_p_f = eps_p_f
        else:
            for epf in eps_p_f:
                self.children_no_data.discard(epf)
            self.eps_p_f = pm.Lambda("eps_p_f", lambda e=eps_p_f: np.hstack(e), trace=False)

        self.V = pm.Lambda("%s_vect" % V.__name__, lambda V=V: V * np.ones(len(submod.f_eval)))
        self.C_eval = submod.C_eval
        self.M_eval = submod.M_eval
        self.S_eval = submod.S_eval

        M_eval_shape = pm.utils.value(self.M_eval).shape
        C_eval_shape = pm.utils.value(self.C_eval).shape
        self.ti = ti or np.arange(M_eval_shape[0])

        # Work arrays
        self.scratch1 = np.asmatrix(np.empty(C_eval_shape, order="F"))
        self.scratch2 = np.asmatrix(np.empty(C_eval_shape, order="F"))
        self.scratch3 = np.empty(M_eval_shape)

        # Initialize hidden attributes
        self.accepted = 0.0
        self.rejected = 0.0
        self._state = ["rejected", "accepted", "proposal_distribution"]
        self._tuning_info = []
        self.proposal_distribution = None
Пример #29
0
def GDA_N_D(X, M, cov1, cov2, cov3, classes,priors):
    dclass1 = []
    dclass2 = []
    dclass3 = []
    cov1 = np.asmatrix(cov1, dtype='float')
    cov2 = np.asmatrix(cov2, dtype='float')
    cov3 = np.asmatrix(cov3, dtype='float')
    X = np.asmatrix(X[:, 0:4], dtype='float')
    M = np.asmatrix(M, dtype='float')
    for i in range(0, len(X)):
        x = (X[i] - M[0])
        y = (X[i] - M[1])
        z = (X[i] - M[2])
        dclass1.append(-mth.log(np.linalg.det(cov1)) - 0.5 * (
            np.dot(np.dot(x, np.linalg.inv(cov1)), x.transpose())) + mth.log(priors[0]))
        dclass2.append(-mth.log(np.linalg.det(cov2)) - 0.5 * (
            np.dot(np.dot(y, np.linalg.inv(cov2)), y.transpose())) + mth.log(priors[1]))
        dclass3.append(-mth.log(np.linalg.det(cov3)) - 0.5 * (
            np.dot(np.dot(z, np.linalg.inv(cov3)), z.transpose())) + mth.log(priors[2]))
    predict_class = []
    for i, j, k in zip(dclass1, dclass2, dclass3):
        if i > j and i > k:
            predict_class.append(classes[0])
        elif j > i and j > k:
            predict_class.append(classes[1])
        else:
            predict_class.append(classes[2])
    return predict_class
Пример #30
0
def k_means(data, k, init_center=None, max_iter=10, dist=lambda x, y: np.dot((x - y), (x - y).T), tau=1e-6):
    old_center = None
    if init_center is None:
        center = random.sample([np.asmatrix(d) for d in data], k)
    else:
        center = [np.asmatrix(c) for c in init_center]
    i = 0
    label = [
        np.argmax([dist(c, x) for c in center]) for x in data
        ]
    while i < max_iter and \
            (old_center is None or not all(np.allclose(o, c, tau) for o, c in zip(old_center, center))):
        old_center = center
        # find the mean of the cluster.
        center = [
            np.average([c[0] for c in cluster], axis=0)
            for label, cluster in itertools.groupby(sorted(zip(data, label),
                                                           key=lambda d: d[1]),
                                                    key=lambda d: d[1])
            ]
        # in case that length of center does not equal to k.
        for i in range(k - len(center)):
            center.append(max((min(dist(c, x) for c in center), x) for x in data)[1])
        # label the data.
        label = [
            np.argmin([dist(c, x) for c in center]) for x in data
            ]
        # increment iteration.
        i += 1
    return center, label
Пример #31
0
 def find_fiedler(L, x, normalized, tol):
     q = 2 if method == 'pcg' else min(4, L.shape[0] - 1)
     X = asmatrix(normal(size=(q, L.shape[0]))).T
     sigma, X = _tracemin_fiedler(L, X, normalized, tol, method)
     return sigma[0], X[:, 0]
Пример #32
0
def Datahandler():
    global start_time, PI, pub, a, b, c, n, w1, w2, w3, mode, Veh_init_X, Veh_init_Y, X_offset, Y_offset, phase, STATES, start_state, Des_X, Des_Y, R_star, V_veh, Range, pub_vt  #,pub_Range#,vt
    WP = Trajectories()
    WP.Obj = [Trajectory()] * 1
    time_now = rospy.get_time()
    t = time_now - start_time
    Xv_init = start_state.x
    Yv_init = start_state.y

    #Veh_pos = np.asmatrix(np.zeros((3,1)))
    #Veh_pos[0] = STATES.x
    #Veh_pos[1] = STATES.y
    #Veh_pos[2] = STATES.z
    xx = STATES.x
    yy = STATES.y
    zz = STATES.z
    Veh_V = np.asmatrix(np.zeros((3, 1)))
    Veh_V[0] = STATES.u
    Veh_V[1] = STATES.v
    Veh_V[2] = STATES.w
    V_v = np.linalg.norm(Veh_V)
    #print V_v

    if t < 0.1:  # This time should be multiples of 0.05 and cannot be less than 0.05, otherwise this node will shut down.
        RANGE = R_star  # Formula for Initial Range has to be defined here.
    else:
        RANGE = Range.data
    #print RANGE

    if mode == 0:

        #=================#
        #    Trajectory   #
        #=================#

        traj = Trajectory()
        traj.name = "WP"
        # Position
        traj.x = Veh_init_X
        traj.y = Veh_init_Y
        traj.z = n
        traj.psi = 0
        #-3.14/2
        # Velocity
        traj.xdot = 0
        traj.ydot = 0
        traj.zdot = 0
        traj.psidot = 0
        # Acceleration
        traj.xddot = 0
        traj.yddot = 0
        traj.zddot = 0
        traj.psiddot = 0
        vt = V_veh
    else:

        #Correction_X = (Xv_init-Des_X)
        #Correction_Y = (Yv_init-Des_Y)

        if RANGE < 0.008 * R_star:
            LOS = R_star
        else:
            LOS = RANGE

        #print LOS
        vt = (V_veh *
              R_star) / LOS  # CHECK IF (V_v) is Working or (V_veh) would work
        #if vt > 1.5*V_veh:
        #    Tar_vel = 0
        #else:
        #    Tar_vel = vt
        #print vt

        vt = np.convolve(vt, 0.5)
        w = vt / a

        #=================#
        #    Trajectory   #
        #=================#

        traj = Trajectory()
        traj.name = "TS"
        # Position

        traj.x = a * cos(phase + w * t)  #Correction_X+
        traj.y = b * sin(phase + w * t)  #Correction_Y+
        traj.z = n + c * sin(w * t)
        traj.psi = 0
        # Velocity
        traj.xdot = -a * w * sin(w * t)
        traj.ydot = b * w * cos(w * t)
        traj.zdot = c * w * cos(w * t)
        traj.psidot = 0
        # Acceleration
        traj.xddot = -a * w * w * cos(w * t)
        traj.yddot = -b * w * w * sin(w * t)
        traj.zddot = -c * w * w * sin(w * t)
        traj.psiddot = 0
        #print yy
        #Rxy   = sqrt((traj.x-xx)*(traj.x-xx)+(traj.y-yy)*(traj.y-yy))
    #==================#
    #     Publish      #
    #==================#

    WP.Obj = [traj]
    pub.publish(WP)
    pub_vt.publish(vt)
#for all the directories
#for all the files in a directory
#wordLengthMatrix.py
#put matrix in a list for the author

#for each author's list of matrices
#compute centroid
#put each centroid in a list of centroids

#for each book
#calculate distance between the book and each of the centroids

import numpy as np
import analysis_functions as af

A = np.array([[1, 2], [3, 4]])
B = np.array([[0, 1], [3, 5]])
C = np.array([[1, 3], [6, 10]])

print(af.distance(np.asmatrix(A), np.asmatrix(C)))
#print(af.centroid([np.asmatrix(A),np.asmatrix(B),np.asmatrix(C)]))

# In[66]:


# Generate equispaced floats in the interval [0, 2*pi]
x_train = np.linspace(0, 2*np.pi, N)
# Generate noise
mean = 0
std = 0.05
# Generate some numbers from the sine function
y = np.sin(x_train)
# Add noise
y += np.random.normal(mean, std, N)
#defining it as a matrix
y_train = np.asmatrix(y.reshape(N,1))


# # adding the bias and higher order terms to x

# In[72]:


x = x_train.reshape((N,1))
for i in range(0,poly_order-1):
	x = np.append(x,(x_train.reshape((N,1)))**(i+2),axis = 1)
x = np.asmatrix(x)
print(x.shape)
# print(x)

Пример #35
0
 def objective(x):
     return (np.square(((np.linalg.lstsq(T, self.fkine(x))[0]) -
                        np.asmatrix(np.eye(4, 4))) * omega)).sum()
Пример #36
0
def vec_linspace(a, b, num=10):
    return np.asmatrix([
        np.linspace(a[0, 0], b[0, 0], num=num),
        np.linspace(a[0, 1], b[0, 1], num=num),
        np.linspace(a[0, 2], b[0, 2], num=num)
    ])
Пример #37
0
SHOULDER_LINK = 'shoulder_Link'
UPPER_ARM_LINK = 'upperArm_Link'
FOREARM_LINK = 'foreArm_Link'
WRIST1_LINK = 'wrist1_Link'
WRIST2_LINK = 'wrist2_Link'
WRIST3_LINK = 'wrist3_Link'

# Set end effector constants
INITIAL_JOINTS = [0, 0, 0, 0, 0, 0]

# Set the number of goal points. 1 by default for a single end effector tip.
NUM_EE_POINTS = 1
EE_POINTS = np.array([[0, 0, 0]])

# Specify a goal state in cartesian coordinates.
EE_POS_TGT = np.asmatrix([.30, -0.30, .80])
"""UR 10 Examples:
EE_POS_TGT = np.asmatrix([.29, .52, .62]) # Target where all joints are 0.
EE_POS_TGT = np.asmatrix([.65, .80, .30]) # Target in positive octant near ground.
EE_POS_TGT = np.asmatrix([.70, .70, .50]) # Target in positive octant used for debugging convergence.
The Gazebo sim converges to the above point with non-action costs: 
(-589.75, -594.71, -599.54, -601.54, -602.75, -603.28, -604.28, -604.79, -605.55, -606.29)
Distance from Goal: (0.014, 0.005, -0.017)
"""

# Set to identity unless you want the goal to have a certain orientation.
EE_ROT_TGT = np.asmatrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])

# Only edit these when editing the robot joints and links.
# The lengths of these arrays define numerous parameters in GPS.
JOINT_ORDER = [
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

x_train = np.linspace(-1, 1, 10)
y_train = np.asmatrix([0, 0, 0, 0, 1, 1, 1, 1, 1, 1]).T
x_train = np.asmatrix(x_train).T

n_dim = x_train.shape[1]

lr = tf.constant(0.01, dtype=tf.float32)
num_epochs = 3000

x = tf.placeholder(tf.float32, [x_train.shape[0], n_dim])
y = tf.placeholder(tf.float32, [x_train.shape[0], 1])
w = tf.Variable(np.ones([n_dim, 1]), dtype=tf.float32)
b = tf.Variable(0, dtype=tf.float32)
init = tf.initialize_all_variables()

yhat = (1. / (1 + tf.exp(tf.matmul(x, w) + b)))
loss = tf.reduce_mean(tf.square(yhat - y))
train_op = tf.train.GradientDescentOptimizer(lr).minimize(loss)

sess = tf.Session()
sess.run(init)

loss_history = []
for epoch in range(num_epochs):
    sess.run(train_op, feed_dict={x: x_train, y: y_train})
    loss_history.append(sess.run(loss, feed_dict={x: x_train, y: y_train}))
Пример #39
0
def getHomography(histogram, capturePoints, canonicalPoints):    
    
    NH = 0
    
    orderIdx = np.argsort(histogram)
    
    bestMarker = orderIdx[len(orderIdx)-1]
    secondBestMarker = orderIdx[len(orderIdx)-2]
    

    
    H1 = None
    H2 = None
    bestRatio = None
    
    mat_canonical_points1 = None
    mat_capture_points1 = None
    mat_canonical_points2 = None
    mat_capture_points2 = None
    
    #Get RANSCAR homography for the best marker
    if histogram[bestMarker] >= 4:
        H1 = cv.CreateMat(3, 3, cv.CV_32FC1)
        
        mat_canonical_points1 = cv.CreateMat(len(canonicalPoints[bestMarker]), 3, cv.CV_32FC1)
        mat_capture_points1 = cv.CreateMat(len(canonicalPoints[bestMarker]), 3, cv.CV_32FC1)

       
              
        for i in range(0,len(canonicalPoints[bestMarker])):
            mat_canonical_points1[i,0] = canonicalPoints[bestMarker][i][0]
            mat_canonical_points1[i,1] = canonicalPoints[bestMarker][i][1]
            mat_canonical_points1[i,2] = 1
            
            mat_capture_points1[i,0] = capturePoints[bestMarker][i][0]
            mat_capture_points1[i,1] = capturePoints[bestMarker][i][1]
            mat_capture_points1[i,2] = 1
        
        
        
        
        cv.FindHomography(mat_canonical_points1, mat_capture_points1, H1, cv.CV_RANSAC, 10)
        
        NH+=1


        
    #Get RANSCAR homography for the second best marker
    if histogram[secondBestMarker] >= 4:
        H2 = cv.CreateMat(3, 3, cv.CV_32FC1)
        mat_canonical_points2 = cv.CreateMat(len(canonicalPoints[secondBestMarker]), 3, cv.CV_32FC1)
        mat_capture_points2 = cv.CreateMat(len(canonicalPoints[secondBestMarker]), 3, cv.CV_32FC1)
        
        for i in range(0,len(canonicalPoints[secondBestMarker])):
            mat_canonical_points2[i,0] = canonicalPoints[secondBestMarker][i][0]
            mat_canonical_points2[i,1] = canonicalPoints[secondBestMarker][i][1]
            mat_canonical_points2[i,2] = 1
            
            mat_capture_points2[i,0] = capturePoints[secondBestMarker][i][0]
            mat_capture_points2[i,1] = capturePoints[secondBestMarker][i][1]
            mat_capture_points2[i,2] = 1
        
        
        
        
        cv.FindHomography(mat_canonical_points2, mat_capture_points2, H2, cv.CV_RANSAC, 10)
        
        NH+=1
    
    THRESHOLD = 10

    bestH = None
    bestImage = None 
    
    
    if(H1!=None):
        ##Calculare outliner and inliners for H1
        i1 = 0.
        o1 = 0.
        
        H1np = np.asarray(H1)
        
        
        
        for i in range(0,len(canonicalPoints[bestMarker])):
            
            
            tempPoint = np.dot(H1np,np.asmatrix(mat_canonical_points1[i]).T)
            
            dist = euclidDist([tempPoint[0]/tempPoint[2],tempPoint[1]/tempPoint[2]], capturePoints[bestMarker][i])
            
            if dist < THRESHOLD:
                i1+=1.
            else:
                o1+=1.           
    
    
    if(H2!=None):
        ##Calculte outliners and inliners for H2
        i2 = 0.
        o2 = 0.
        
        H2np = np.asarray(H2)
        
        for i in range(0,len(canonicalPoints[secondBestMarker])):
           
            tempPoint = np.dot(H2np,np.asmatrix(mat_canonical_points2[i]).T)
            
            dist = euclidDist([tempPoint[0]/tempPoint[2],tempPoint[1]/tempPoint[2]], capturePoints[secondBestMarker][i])
            
            if dist < THRESHOLD:
                i2+=1.
            else:
                o2+=1. 
        
        
        #print "I/0: "+str(i1)+", "+str(o1)+", "+str(i2)+", "+str(o2)
        
        #Compare ratios of inliers and outliers
        if(i1/(i1+o1) < i2/(i2+o2)):
            bestH = H2
            bestImage = secondBestMarker
            bestRatio = i1/(i1+o1)
        else:
            bestH = H1
            bestImage = bestMarker
            bestRatio = i2/(i2+o2)
        
        
        
    
    if H1 !=None and H2 == None:
        bestH = H1
        bestImage = bestMarker
        bestRatio = i1/(i1+o1)
        

    
    return bestH, bestImage, NH, bestRatio
Пример #40
0
def _tracemin_fiedler(L, X, normalized, tol, method):
    """Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.
    """
    n = X.shape[0]

    if normalized:
        # Form the normalized Laplacian matrix and determine the eigenvector of
        # its nullspace.
        e = sqrt(L.diagonal())
        D = spdiags(1. / e, [0], n, n, format='csr')
        L = D * L * D
        e *= 1. / norm(e, 2)

    if not normalized:

        def project(X):
            """Make X orthogonal to the nullspace of L.
            """
            X = asarray(X)
            for j in range(X.shape[1]):
                X[:, j] -= X[:, j].sum() / n
    else:

        def project(X):
            """Make X orthogonal to the nullspace of L.
            """
            X = asarray(X)
            for j in range(X.shape[1]):
                X[:, j] -= dot(X[:, j], e) * e

    if method is None:
        method = 'pcg'
    if method == 'pcg':
        # See comments below for the semantics of P and D.
        def P(x):
            x -= asarray(x * X * X.T)[0, :]
            if not normalized:
                x -= x.sum() / n
            else:
                x = daxpy(e, x, a=-ddot(x, e))
            return x

        solver = _PCGSolver(lambda x: P(L * P(x)), lambda x: D * x)
    elif method == 'chol' or method == 'lu':
        # Convert A to CSC to suppress SparseEfficiencyWarning.
        A = csc_matrix(L, dtype=float, copy=True)
        # Force A to be nonsingular. Since A is the Laplacian matrix of a
        # connected graph, its rank deficiency is one, and thus one diagonal
        # element needs to modified. Changing to infinity forces a zero in the
        # corresponding element in the solution.
        i = (A.indptr[1:] - A.indptr[:-1]).argmax()
        A[i, i] = float('inf')
        solver = (_CholeskySolver if method == 'chol' else _LUSolver)(A)
    else:
        raise nx.NetworkXError('unknown linear system solver.')

    # Initialize.
    Lnorm = abs(L).sum(axis=1).flatten().max()
    project(X)
    W = asmatrix(ndarray(X.shape, order='F'))

    while True:
        # Orthonormalize X.
        X = qr(X)[0]
        # Compute interation matrix H.
        W[:, :] = L * X
        H = X.T * W
        sigma, Y = eigh(H, overwrite_a=True)
        # Compute the Ritz vectors.
        X *= Y
        # Test for convergence exploiting the fact that L * X == W * Y.
        res = dasum(W * asmatrix(Y)[:, 0] - sigma[0] * X[:, 0]) / Lnorm
        if res < tol:
            break
        # Depending on the linear solver to be used, two mathematically
        # equivalent formulations are used.
        if method == 'pcg':
            # Compute X = X - (P * L * P) \ (P * L * X) where
            # P = I - [e X] * [e X]' is a projection onto the orthogonal
            # complement of [e X].
            W *= Y  # L * X == W * Y
            W -= (W.T * X * X.T).T
            project(W)
            # Compute the diagonal of P * L * P as a Jacobi preconditioner.
            D = L.diagonal().astype(float)
            D += 2. * (asarray(X) * asarray(W)).sum(axis=1)
            D += (asarray(X) * asarray(X * (W.T * X))).sum(axis=1)
            D[D < tol * Lnorm] = 1.
            D = 1. / D
            # Since TraceMIN is globally convergent, the relative residual can
            # be loose.
            X -= solver.solve(W, 0.1)
        else:
            # Compute X = L \ X / (X' * (L \ X)). L \ X can have an arbitrary
            # projection on the nullspace of L, which will be eliminated.
            W[:, :] = solver.solve(X)
            project(W)
            X = (inv(W.T * X) * W.T).T  # Preserves Fortran storage order.

    return sigma, asarray(X)
Пример #41
0
 def test_boolean_indexing(self):
     A = np.arange(6)
     A.shape = (3, 2)
     x = asmatrix(A)
     assert_array_equal(x[:, np.array([True, False])], x[:, 0])
     assert_array_equal(x[np.array([True, False, False]),:], x[0,:])
Пример #42
0
def main(env_name, seed=1, run_name=None):
    # Read hyperparameters
    try:
        globals().update(config_env[env_name])
    except KeyError as e:
        print()
        print('\033[93m No hyperparameters defined for \"' + env_name + '\". Using default one.\033[0m')
        print()
        pass

    # Init environment
    env = gym.make(env_name)
    if filter_env:
        env = make_filtered_env(env)

    # Init seeds
    seed = int(seed)
    np.random.seed(seed)
    tf.set_random_seed(seed)
    env.seed(seed)

    config_tf = tf.ConfigProto()
    config_tf.gpu_options.allow_growth=True
    session = tf.Session(config=config_tf)

    # Init placeholders
    obs_size = env.observation_space.shape[0]
    act_size = env.action_space.shape[0]
    obs = tf.placeholder(dtype=precision, shape=[None, obs_size], name='obs')

    # Build pi
    act_bound = np.asscalar(env.action_space.high[0])
    assert act_bound == -np.asscalar(env.action_space.low[0])
    mean = MLP([obs], pi_sizes+[act_size], pi_activations+[None], 'pi_mean')
    with tf.variable_scope('pi_std'): std = tf.Variable(std_noise * tf.ones([1, act_size], dtype=precision), dtype=precision)
    pi = MVNPolicy(session, obs, mean.output[0], std, act_bound=act_bound)

    # Build V
    v = MLP([obs], v_sizes+[1], v_activations+[None], 'v')

    # V optimization
    target_v = tf.placeholder(dtype=precision, shape=[None, 1], name='target_v')
    loss_v = tf.losses.mean_squared_error(v.output[0], target_v)
    optimize_v = tf.train.AdamOptimizer(lrate_v).minimize(loss_v)

    # pi optimization
    advantage = tf.placeholder(dtype=precision, shape=[None, 1], name='advantage')
    td_reg = tf.placeholder(dtype=precision, shape=[None, 1], name='td_reg')
    old_log_probs = tf.placeholder(dtype=precision, shape=[None, 1], name='old_log_probs')
    prob_ratio = tf.exp(pi.log_prob - old_log_probs)
    clip_pr = tf.clip_by_value(prob_ratio, 1.-e_clip, 1.+e_clip)
    alpha = tf.placeholder(dtype=precision, name='alpha') # TD-regularization coefficient
    loss_pi = -tf.reduce_mean(tf.minimum(tf.multiply(prob_ratio, advantage), tf.multiply(clip_pr, advantage))) + tf.reduce_mean(tf.maximum(tf.multiply(prob_ratio, alpha*td_reg), tf.multiply(clip_pr, alpha*td_reg)))
    optimize_pi = tf.train.AdamOptimizer(lrate_pi).minimize(loss_pi)

    # Init variables
    session.run(tf.global_variables_initializer())
    mean.reset(session, 0.)
    v.reset(session, 0.)

    alpha_value = 0.1
    alpha_decay = 0.99999

    logger = LoggerData('ppo_tdreg_' + str(alpha_value) + '_' + str(alpha_decay), env_name, run_name)
    print()
    print('    V LOSS                         PI LOSS                        ENTROPY        RETURN          MSTDE')
    for itr in range(maxiter):
        paths = collect_samples(env, policy=pi.draw_action, min_trans=min_trans_per_iter)
        nb_trans = len(paths["rwd"])

        # Update V
        for epoch in range(epochs_v):
            v_values = session.run(v.output[0], {obs: paths["obs"]})
            a_values = gae(paths, v_values, gamma, lambda_trace) # compute the advantage
            target_values = v_values + a_values # generalized Bellman operator
            if epoch == 0:
                v_loss_before = session.run(loss_v, {obs: paths["obs"], target_v: target_values})
            for batch_idx in minibatch_idx_list(batch_size, nb_trans):
                session.run(optimize_v, {obs: paths["obs"][batch_idx], target_v: target_values[batch_idx]})
        v_loss_after = session.run(loss_v, {obs: paths["obs"], target_v: target_values})

        # Estimate advantages and TD error
        v_values = session.run(v.output[0], {obs: paths["obs"]})
        a_values = gae(paths, v_values, gamma, lambda_trace)
        td_values = gae(paths, v_values, gamma, 0)
        mstde = np.mean(td_values**2)

        a_values = (a_values - np.mean(a_values)) / np.std(a_values)
        td_values = (td_values - np.mean(td_values)) / np.std(td_values)
        reg_values = td_values**2
        reg_values = (reg_values - np.mean(reg_values)) / np.std(reg_values)

        # Udpate pi
        old_lp = pi.get_log_prob(paths["obs"], paths["act"])
        pi_loss_before = session.run(loss_pi, {obs: paths["obs"], pi.act: paths["act"], old_log_probs: old_lp, advantage: a_values, alpha: alpha_value, td_reg: reg_values})
        for epoch in range(epochs_pi):
            for batch_idx in minibatch_idx_list(batch_size, nb_trans):
                dct_pi = {obs: paths["obs"][batch_idx],
                            pi.act: paths["act"][batch_idx],
                            old_log_probs: old_lp[batch_idx],
                            advantage: a_values[batch_idx],
                            td_reg: reg_values[batch_idx],
                            alpha: alpha_value}
                session.run(optimize_pi, dct_pi)
        pi_loss_after = session.run(loss_pi, {obs: paths["obs"], pi.act: paths["act"], old_log_probs: old_lp, advantage: a_values, alpha: alpha_value, td_reg: reg_values})

        alpha_value *= alpha_decay

        # Evaluate pi
        # avg_rwd = evaluate_policy(env, policy=pi.draw_action_det, min_paths=paths_eval)
        avg_rwd = np.sum(paths["rwd"]) / paths["nb_paths"]
        entr = pi.estimate_entropy(paths["obs"])
        print('%d | %e -> %e   %e -> %e   %e   %e   %e   ' % (itr, v_loss_before, v_loss_after, pi_loss_before, pi_loss_after, entr, avg_rwd, mstde), flush=True)
        with open(logger.fullname, 'ab') as f:
            np.savetxt(f, np.asmatrix([v_loss_before, v_loss_after, pi_loss_before, pi_loss_after, entr, avg_rwd, mstde])) # save data
Пример #43
0
 def test_scalar_indexing(self):
     x = asmatrix(np.zeros((3, 2), float))
     assert_equal(x[0, 0], x[0][0])
Пример #44
0
 def test_list_indexing(self):
     A = np.arange(6)
     A.shape = (3, 2)
     x = asmatrix(A)
     assert_array_equal(x[:, [1, 0]], x[:, ::-1])
     assert_array_equal(x[[2, 1, 0],:], x[::-1,:])
Пример #45
0
 def test_asmatrix(self):
     A = np.arange(100).reshape(10, 10)
     mA = asmatrix(A)
     A[0, 0] = -10
     assert_(A[0, 0] == mA[0, 0])
Пример #46
0
 def test_row_column_indexing(self):
     x = asmatrix(np.eye(2))
     assert_array_equal(x[0,:], [[1, 0]])
     assert_array_equal(x[1,:], [[0, 1]])
     assert_array_equal(x[:, 0], [[1], [0]])
     assert_array_equal(x[:, 1], [[0], [1]])
Пример #47
0
 def to_dense(self):
     t = self.contract_tags(...)
     t.fuse([('k', list(map(self.site_ind_id.format, self.sites)))],
            inplace=True)
     return np.asmatrix(t.data.reshape(-1, 1))
Пример #48
0
 def test_basic(self):
     x = asmatrix(np.zeros((3, 2), float))
     y = np.zeros((3, 1), float)
     y[:, 0] = [0.8, 0.2, 0.3]
     x[:, 1] = y > 0.5
     assert_equal(x, [[0, 1], [0, 0], [0, 0]])
Пример #49
0
def main():
    lqr.init()
    clock = pygame.time.Clock()

    pos = np.asmatrix([0., 1., 0.]).T
    vel = np.asmatrix([3., 0., 3.]).T

    visualizer = vis.Visualizer()

    fps = 60
    dt = 1.0 / fps
    t = 0.0
    i = 0

    maxon_motor = motor.Motor(
        resistance=1.03,
        torque_constant=0.0335,
        speed_constant=0.0335,
        rotor_inertia=135*1e-3*(1e-2**2))
    gearbox = motor.Gearbox(gear_ratio=20.0 / 60.0, gear_inertia=0)
    gear_motor = motor.GearMotor(maxon_motor, gearbox)
    our_robot = robot.Robot(
        robot_mass=6.5,
        robot_radius=0.085,
        gear_motor=gear_motor,
        robot_inertia=6.5*0.085*0.085*0.5,
        wheel_radius=0.029,
        wheel_inertia=2.4e-5,
        wheel_angles = np.deg2rad([45, 135, -135, -45]))
        # wheel_angles=np.deg2rad([60, 129, -129, -60]))

    controller = Controller(dt, our_robot)

    while not visualizer.close:
        visualizer.update_events()

        v = 3

        rx = np.asmatrix([np.sin(v * t), np.cos(v * t / 3), v * np.sin(t)]).T
        rv = v * np.asmatrix([np.cos(v * t), -np.sin(v * t / 3) / 3, np.cos(t)]).T
        ra = v ** 2 * np.asmatrix([-np.sin(v * t), -np.cos(v * t / 3) / 9, -np.sin(t) / v]).T

        vel_b = np.linalg.inv(rotation_matrix(pos[2,0])) * vel
        rv_b = np.linalg.inv(rotation_matrix(pos[2,0])) * rv

        rx_b = np.linalg.inv(rotation_matrix(pos[2,0])) * rx
        pos_b = np.linalg.inv(rotation_matrix(pos[2,0])) * pos

        # u = controller.feedforward_control(pos, vel, rx, rv, ra)
        state_vector = np.vstack((rx_b-pos_b,rv_b-vel_b))
        u = lqr.controlLQR(state_vector) # +=
        u = u[2:,0]
        # u = controller.control(pos, vel, rx, rv, ra)

        vdot_b = our_robot.forward_dynamics_body(vel_b, u)
        vdot = our_robot.forward_dynamics_world(pos, vel, u)

        # print(u - our_robot.inverse_dynamics_body(vel_b, vdot_b))

        robot.sysID(u[0,0], u[1,0], u[2,0], u[3,0], vel_b[0,0], vel_b[1,0], vel_b[2,0], vdot_b[0,0], vdot_b[1,0], vdot_b[2,0])
        
        visualizer.draw(pos, rx)
        pos += dt * vel + 0.5 * vdot * dt ** 2
        vel += dt * vdot
        # robot_data_line = [vdot_b[0,0], vdot_b[1,0], vdot_b[2,0], vel_b[0,0], vel_b[1,0], vel_b[2,0], u[0,0], u[1,0], u[2,0], u[3,0]]
        # robot_data.append(robot_data_line)

        clock.tick(60)
        t += dt
        i += 1
        if t > 20:
            print("t = 20")
            t = 0.0
            i = 0
            pos = np.asmatrix([0, 1, 0.]).T
            vel = np.asmatrix([3, 0, 3.]).T
            controller.reset()
    robot.main()
Пример #50
0
 def to_dense(self):
     t = self.TN.contract_tags(...)
     t.fuse([('k', list(map(self.lower_ind_id.format, self.TN.sites))),
             ('b', list(map(self.upper_ind_id.format, self.TN.sites)))],
            inplace=True)
     return np.asmatrix(t.data)
Пример #51
0
def test_PCA():
    '''(10 points) PCA'''

    #-------------------------------
    # an example matrix
    #X = np.random.random((100,10)) # generate an N = 100, D = 10 random data matrix
    X = np.mat([[0., 2.],
                [2., 0.],
                [1., 1.]])

    # call the function
    Xp, P = PCA(X)

    assert type(P) == np.matrixlib.defmatrix.matrix
    assert type(Xp) == np.matrixlib.defmatrix.matrix
    assert Xp.shape ==(3,1)
    assert P.shape ==(2,1)


    P_true = np.mat([[ .707],
                     [-.707]])
    Xp_true = np.mat([[-1.414],
                      [ 1.414],
                      [ 0    ]])

    # test the result
    assert np.allclose(P,P_true, atol=1e-2) or np.allclose(P,-P_true, atol=1e-2)
    assert np.allclose(Xp, Xp_true, atol=1e-2) or np.allclose(Xp, -Xp_true, atol=1e-2)

    #-------------------------------
    # an example matrix
    X = np.mat([[0., 2., 2.],
                [2., 0., 0]])

    # call the function
    Xp, P = PCA(X)

    assert Xp.shape ==(2,1)
    assert P.shape ==(3,1)

    # test the result
    P_true = np.mat([[ .577],
                     [-.577],
                     [-.577]])
    Xp_true = np.mat([[-1.73205081],
                      [ 1.73205081]])

    assert np.allclose(P,P_true, atol=1e-2) or np.allclose(P,-P_true, atol=1e-2)
    assert np.allclose(Xp,Xp_true, atol=1e-2) or np.allclose(Xp,-Xp_true, atol=1e-2)

    #-------------------------------
    # an example matrix
    #X = np.random.random((100,10)) # generate an N = 100, D = 10 random data matrix
    X = np.mat([[2., 2.],
                [0., 0]])


    # call the function
    Xp, P = PCA(X,2)

    assert Xp.shape ==(2,2)
    assert P.shape ==(2,2)

    P_true = np.mat([[.707, -.707],
                     [.707,  .707]])

    Xp_true = np.mat([[ 1.414, 0],
                      [-1.414, 0]])
    assert np.allclose(P,P_true, atol=1e-2) or np.allclose(P,-P_true, atol=1e-2)
    assert np.allclose(Xp,Xp_true, atol=1e-2) or np.allclose(Xp,-Xp_true, atol=1e-2)
    #-------------------------------
    # test on random matrix
    for _ in range(20):
        n = np.random.randint(3,40)
        p = np.random.randint(3,40)
        k = np.random.randint(p-1)+1
        X = np.random.random((n,p))
        X = np.asmatrix(X)
        # call the function
        Xp, P = PCA(X,k)

        # test the result
        assert Xp.shape == (n,k)
        assert P.shape == (p,k)
Пример #52
0
def to_numpy_matrix(G,
                    nodelist=None,
                    dtype=None,
                    order=None,
                    multigraph_weight=sum,
                    weight='weight'):
    """Return the graph adjacency matrix as a NumPy matrix.

    Parameters
    ----------
    G : graph
        The NetworkX graph used to construct the NumPy matrix.

    nodelist : list, optional
       The rows and columns are ordered according to the nodes in `nodelist`.
       If `nodelist` is None, then the ordering is produced by G.nodes().

    dtype : NumPy data type, optional
        A valid single NumPy data type used to initialize the array. 
        This must be a simple type such as int or numpy.float64 and
        not a compound data type (see to_numpy_recarray)
        If None, then the NumPy default is used.

    order : {'C', 'F'}, optional
        Whether to store multidimensional data in C- or Fortran-contiguous
        (row- or column-wise) order in memory. If None, then the NumPy default 
        is used.

    multigraph_weight : {sum, min, max}, optional
        An operator that determines how weights in multigraphs are handled.
        The default is to sum the weights of the multiple edges.

    weight : string or None   optional (default='weight')
        The edge attribute that holds the numerical value used for 
        the edge weight.  If None then all edge weights are 1.


    Returns
    -------
    M : NumPy matrix
       Graph adjacency matrix.

    See Also
    --------
    to_numpy_recarray, from_numpy_matrix

    Notes
    -----
    The matrix entries are assigned with weight edge attribute. When
    an edge does not have the weight attribute, the value of the entry is 1.
    For multiple edges, the values of the entries are the sums of the edge
    attributes for each edge.

    When `nodelist` does not contain every node in `G`, the matrix is built 
    from the subgraph of `G` that is induced by the nodes in `nodelist`.

    Examples
    --------
    >>> G = nx.MultiDiGraph()
    >>> G.add_edge(0,1,weight=2)
    >>> G.add_edge(1,0)
    >>> G.add_edge(2,2,weight=3)
    >>> G.add_edge(2,2)
    >>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
    matrix([[ 0.,  2.,  0.],
            [ 1.,  0.,  0.],
            [ 0.,  0.,  4.]])
    """
    try:
        import numpy as np
    except ImportError:
        raise ImportError(\
          "to_numpy_matrix() requires numpy: http://scipy.org/ ")

    if nodelist is None:
        nodelist = G.nodes()

    nodeset = set(nodelist)
    if len(nodelist) != len(nodeset):
        msg = "Ambiguous ordering: `nodelist` contained duplicates."
        raise nx.NetworkXError(msg)

    nlen = len(nodelist)
    undirected = not G.is_directed()
    index = dict(zip(nodelist, range(nlen)))

    if G.is_multigraph():
        # Handle MultiGraphs and MultiDiGraphs
        # array of nan' to start with, any leftover nans will be converted to 0
        # nans are used so we can use sum, min, max for multigraphs
        M = np.zeros((nlen, nlen), dtype=dtype, order=order) + np.nan
        # use numpy nan-aware operations
        operator = {sum: np.nansum, min: np.nanmin, max: np.nanmax}
        try:
            op = operator[multigraph_weight]
        except:
            raise ValueError('multigraph_weight must be sum, min, or max')

        for u, v, attrs in G.edges_iter(data=True):
            if (u in nodeset) and (v in nodeset):
                i, j = index[u], index[v]
                e_weight = attrs.get(weight, 1)
                M[i, j] = op([e_weight, M[i, j]])
                if undirected:
                    M[j, i] = M[i, j]
        # convert any nans to zeros
        M = np.asmatrix(np.nan_to_num(M))
    else:
        # Graph or DiGraph, this is much faster than above
        M = np.zeros((nlen, nlen), dtype=dtype, order=order)
        for u, nbrdict in G.adjacency_iter():
            for v, d in nbrdict.items():
                try:
                    M[index[u], index[v]] = d.get(weight, 1)
                except KeyError:
                    pass
        M = np.asmatrix(M)
    return M
Пример #53
0
 def __init__(self, W1, W2, b1, b2):
     self.W1 = np.asmatrix(W1)
     self.W2 = np.asmatrix(W2)
     self.b1 = b1
     self.b2 = b2
        "/home/emily2h/Summer/cg-abstracts/data_encompassing/ar/ar_{}{}1".
        format(flag2, flag), "rb") as f:
    label1 = pickle.load(f)
with open(
        "/home/emily2h/Summer/cg-abstracts/data_encompassing/ar/ar_{}{}2".
        format(flag2, flag), "rb") as f:
    label2 = pickle.load(f)
with open(
        "/home/emily2h/Summer/cg-abstracts/data_encompassing/ar/ar_{}{}3".
        format(flag2, flag), "rb") as f:
    label3 = pickle.load(f)
with open(
        "/home/emily2h/Summer/cg-abstracts/data_encompassing/ar/ar_{}{}4".
        format(flag2, flag), "rb") as f:
    label4 = pickle.load(f)

print(label0.shape, label1.shape, label2.shape, label3.shape, label4.shape)

mat = np.asmatrix([label0, label1, label2, label3, label4]).transpose()
print(mat)
matfunc = np.vectorize(func)
mat = matfunc(mat)
print(mat.shape)
print(type(mat))
print(mat)

pickling_on = open(
    "data_encompassing/label_mat_{}/label_mat_{}{}.pickle".format(
        flag, flag2, flag), "wb")
pickle.dump(mat, pickling_on)
def goal(t):
    return np.asmatrix([0., 300.]).T
Пример #56
0
def Cart2Pixel(Q=None,
               A=None,
               B=None,
               dynamic_size=False,
               mutual_info=False,
               only_model=False,
               params=None):
    # TODO controls on input
    if A is not None:
        A = A - 1
    if (B != None):
        B = B - 1
    # to dataframe
    feat_cols = ["col-" + str(i + 1) for i in range(Q["data"].shape[1])]
    df = pd.DataFrame(Q["data"], columns=feat_cols)
    if Q["method"] == 'pca':
        pca = PCA(n_components=2)
        Y = pca.fit_transform(df)
    elif Q["method"] == 'tSNE':
        tsne = TSNE(n_components=2, method="exact")
        Y = tsne.fit_transform(df)
    elif Q["method"] == 'kpca':
        kpca = KernelPCA(n_components=2, kernel='linear')
        Y = kpca.fit_transform(df)

    x = Y[:, 0]
    y = Y[:, 1]
    n, n_sample = Q["data"].shape
    plt.scatter(x, y)
    bbox = minimum_bounding_rectangle(Y)
    plt.fill(bbox[:, 0], bbox[:, 1], alpha=0.2)
    # rotation
    grad = (bbox[1, 1] - bbox[0, 1]) / (bbox[1, 0] - bbox[0, 0])
    theta = np.arctan(grad)
    R = np.asmatrix([[np.cos(theta), np.sin(theta)],
                     [-np.sin(theta), np.cos(theta)]])
    bboxMatrix = np.matrix(bbox)
    zrect = (R.dot(bboxMatrix.transpose())).transpose()
    # zrect=R.dot(bboxMatrix)
    plt.fill(zrect[:, 0], zrect[:, 1], alpha=0.2)

    coord = np.array([x, y])
    rotatedData = np.array(R.dot(coord))  # Z

    # rotatedData = np.delete(rotatedData, [59], 1)
    # rotatedData=np.delete(rotatedData, [175],1)
    # rotatedData=np.delete(rotatedData, [184],1)
    # Q["data"] = np.delete(Q["data"], [59], axis=0)
    # Q["data"] = np.delete(Q["data"], [175], axis=0)
    # Q["data"] = np.delete(Q["data"], [184], axis=0)
    # n = n - 1
    plt.scatter(rotatedData[0, :], rotatedData[1:])
    plt.axis('square')
    plt.show(block=False)

    # find duplicate
    for i in range(len(rotatedData[0, :])):
        for j in range(i + 1, len(rotatedData[0])):
            if rotatedData[0, i] == rotatedData[0, j] and rotatedData[
                    1, i] == rotatedData[1, j]:
                print("duplicate:" + str(i) + " " + str(j))

    # nearest point

    min_dist = np.inf
    min_p1 = 0
    min_p2 = 0
    for p1 in range(n):
        for p2 in range(p1 + 1, n):
            d = (rotatedData[0, p1] - rotatedData[0, p2])**2 + (
                rotatedData[1, p1] - rotatedData[1, p2])**2
            if min_dist > d > 0 and p1 != p2:
                min_p1 = p1
                min_p2 = p2
                min_dist = d
    # plt.scatter([rotatedData[0, min_p1], rotatedData[0, min_p2]], [rotatedData[1, min_p1], rotatedData[1, min_p2]])
    # plt.show(block=False)

    # euclidean distance
    dmin = np.linalg.norm(rotatedData[:, min_p1] - rotatedData[:, min_p2])
    rec_x_axis = abs(zrect[0, 0] - zrect[1, 0])
    rec_y_axis = abs(zrect[1, 1] - zrect[2, 1])
    if only_model:
        count_model_col(rotatedData, Q, 5, 50, params)

    if dynamic_size:
        precision_old = math.sqrt(2)
        A = math.ceil(rec_x_axis * precision_old / dmin)
        B = math.ceil(rec_y_axis * precision_old / dmin)
        print("Dynamic [A:" + str(A) + " ; B:" + str(B) + "]")
        if A > Q["max_A_size"] or B > Q["max_B_size"]:
            # precision = precision_old * Q["max_px_size"] / max([A, B])
            precision = precision_old * (Q["max_A_size"] /
                                         A) * (Q["max_B_size"] / B)
            A = math.ceil(rec_x_axis * precision / dmin)
            B = math.ceil(rec_y_axis * precision / dmin)
    # cartesian coordinates to pixels
    tot = []
    xp = np.round(1 + (A * (rotatedData[0, :] - min(rotatedData[0, :])) /
                       (max(rotatedData[0, :]) - min(rotatedData[0, :]))))
    yp = np.round(1 + (-B) * (rotatedData[1, :] - max(rotatedData[1, :])) /
                  (max(rotatedData[1, :]) - min(rotatedData[1, :])))
    # Modified Feature Position | custom cut
    cut = params["cut"]
    if cut is not None:
        xp[59] = cut
    zp = np.array([xp, yp])
    A = max(xp)
    B = max(yp)

    # find duplicates
    print("Collisioni: " + str(find_duplicate(zp)))

    # Training set

    images = []
    toDelete = 0
    name = "_" + str(int(A)) + 'x' + str(int(B))
    if params["No_0_MI"]:
        name = name + "_No_0_MI"
    if mutual_info:
        Q["data"], zp, toDelete = dataset_with_best_duplicates(
            Q["data"], Q["y"], zp)
        name = name + "_MI"
    else:
        name = name + "_Mean"
    if cut is not None:
        name = name + "_Cut" + str(cut)

    # save model to file
    image_model = {
        "xp": zp[0].tolist(),
        "yp": zp[1].tolist(),
        "A": A,
        "B": B,
        "custom_cut": cut,
        "toDelete": toDelete
    }
    j = json.dumps(image_model)
    f = open(params["dir"] + "model" + name + ".json", "w")
    f.write(j)
    f.close()

    if only_model:
        a = ConvPixel(Q["data"][:, 0], zp[0], zp[1], A, B)
        plt.imshow(a, cmap="gray")
        plt.show()
    else:  # custom_cut=range(0, cut),
        a = ConvPixel(Q["data"][:, i], zp[0], zp[1], A, B, index=i)
        plt.imshow(a, cmap="gray")
        plt.show()
        if cut is not None:
            images = [
                ConvPixel(Q["data"][:, i],
                          zp[0],
                          zp[1],
                          A,
                          B,
                          custom_cut=cut - 1,
                          index=i) for i in range(0, n_sample)
            ]
        else:
            # a=np.where(Q["y"]==0)
            # attacks=Q["data"][:,a]
            for i in range(0, 3):
                images = [
                    ConvPixel(Q["data"][:, i], zp[0], zp[1], A, B, index=i)
                    for i in range(0, n_sample)
                ]

        filename = params["dir"] + "train" + name + ".pickle"
        f_myfile = open(filename, 'wb')
        pickle.dump(images, f_myfile)
        f_myfile.close()

    return images, image_model, toDelete
    A_d, B_d, Q_d, R_d = c2d(A_c, B_c, dt, Q_noise, R_noise)
    K = clqr(A_c, B_c, Q_controller, R_controller)
    Kff = feedforwards(A_d, B_d, Q_ff)
    L = place(A_d.T, C.T, [0.05, 0.12]).T

    gains = StateSpaceGains(name, dt, A_d, B_d, C, None, Q_d, R_noise, K, Kff,
                            L)
    gains.A_c = A_c
    gains.B_c = B_c
    gains.Q_c = Q_noise

    return gains


u_max = np.asmatrix([12.]).T
x0 = np.asmatrix([0., 0.]).T

gains = make_gains()

plant = StateSpacePlant(gains, x0)
controller = StateSpaceController(gains, -u_max, u_max)
observer = StateSpaceObserver(gains, x0)


def goal(t):
    return np.asmatrix([0., 300.]).T


if __name__ == '__main__':
    if len(sys.argv) == 3:
Пример #58
0
    def removeInterference(self,
                           interf=2,
                           hei_interf=None,
                           nhei_interf=None,
                           offhei_interf=None):

        jspectra = self.dataOut.data_spc
        jcspectra = self.dataOut.data_cspc
        jnoise = self.dataOut.getNoise()
        num_incoh = self.dataOut.nIncohInt

        num_channel = jspectra.shape[0]
        num_prof = jspectra.shape[1]
        num_hei = jspectra.shape[2]

        # hei_interf
        if hei_interf is None:
            count_hei = int(num_hei / 2)
            hei_interf = numpy.asmatrix(list(
                range(count_hei))) + num_hei - count_hei
            hei_interf = numpy.asarray(hei_interf)[0]
        # nhei_interf
        if (nhei_interf == None):
            nhei_interf = 5
        if (nhei_interf < 1):
            nhei_interf = 1
        if (nhei_interf > count_hei):
            nhei_interf = count_hei
        if (offhei_interf == None):
            offhei_interf = 0

        ind_hei = list(range(num_hei))
        #         mask_prof = numpy.asarray(range(num_prof - 2)) + 1
        #         mask_prof[range(num_prof/2 - 1,len(mask_prof))] += 1
        mask_prof = numpy.asarray(list(range(num_prof)))
        num_mask_prof = mask_prof.size
        comp_mask_prof = [0, num_prof / 2]

        # noise_exist:    Determina si la variable jnoise ha sido definida y contiene la informacion del ruido de cada canal
        if (jnoise.size < num_channel or numpy.isnan(jnoise).any()):
            jnoise = numpy.nan
        noise_exist = jnoise[0] < numpy.Inf

        # Subrutina de Remocion de la Interferencia
        for ich in range(num_channel):
            # Se ordena los espectros segun su potencia (menor a mayor)
            power = jspectra[ich, mask_prof, :]
            power = power[:, hei_interf]
            power = power.sum(axis=0)
            psort = power.ravel().argsort()

            # Se estima la interferencia promedio en los Espectros de Potencia empleando
            junkspc_interf = jspectra[ich, :, hei_interf[psort[list(
                range(offhei_interf, nhei_interf + offhei_interf))]]]

            if noise_exist:
                #    tmp_noise = jnoise[ich] / num_prof
                tmp_noise = jnoise[ich]
            junkspc_interf = junkspc_interf - tmp_noise
            #junkspc_interf[:,comp_mask_prof] = 0

            jspc_interf = junkspc_interf.sum(axis=0) / nhei_interf
            jspc_interf = jspc_interf.transpose()
            # Calculando el espectro de interferencia promedio
            noiseid = numpy.where(
                jspc_interf <= tmp_noise / numpy.sqrt(num_incoh))
            noiseid = noiseid[0]
            cnoiseid = noiseid.size
            interfid = numpy.where(
                jspc_interf > tmp_noise / numpy.sqrt(num_incoh))
            interfid = interfid[0]
            cinterfid = interfid.size

            if (cnoiseid > 0):
                jspc_interf[noiseid] = 0

            # Expandiendo los perfiles a limpiar
            if (cinterfid > 0):
                new_interfid = (numpy.r_[interfid - 1, interfid, interfid + 1]
                                + num_prof) % num_prof
                new_interfid = numpy.asarray(new_interfid)
                new_interfid = {x for x in new_interfid}
                new_interfid = numpy.array(list(new_interfid))
                new_cinterfid = new_interfid.size
            else:
                new_cinterfid = 0

            for ip in range(new_cinterfid):
                ind = junkspc_interf[:, new_interfid[ip]].ravel().argsort()
                jspc_interf[new_interfid[ip]] = junkspc_interf[
                    ind[nhei_interf // 2], new_interfid[ip]]

            jspectra[ich, :, ind_hei] = jspectra[
                ich, :, ind_hei] - jspc_interf  # Corregir indices

            # Removiendo la interferencia del punto de mayor interferencia
            ListAux = jspc_interf[mask_prof].tolist()
            maxid = ListAux.index(max(ListAux))

            if cinterfid > 0:
                for ip in range(cinterfid * (interf == 2) - 1):
                    ind = (jspectra[ich, interfid[ip], :] < tmp_noise *
                           (1 + 1 / numpy.sqrt(num_incoh))).nonzero()
                    cind = len(ind)

                    if (cind > 0):
                        jspectra[ich, interfid[ip], ind] = tmp_noise * \
                            (1 + (numpy.random.uniform(cind) - 0.5) /
                             numpy.sqrt(num_incoh))

                ind = numpy.array([-2, -1, 1, 2])
                xx = numpy.zeros([4, 4])

                for id1 in range(4):
                    xx[:, id1] = ind[id1]**numpy.asarray(list(range(4)))

                xx_inv = numpy.linalg.inv(xx)
                xx = xx_inv[:, 0]
                ind = (ind + maxid + num_mask_prof) % num_mask_prof
                yy = jspectra[ich, mask_prof[ind], :]
                jspectra[ich,
                         mask_prof[maxid], :] = numpy.dot(yy.transpose(), xx)

            indAux = (jspectra[ich, :, :] < tmp_noise *
                      (1 - 1 / numpy.sqrt(num_incoh))).nonzero()
            jspectra[ich, indAux[0], indAux[1]] = tmp_noise * \
                (1 - 1 / numpy.sqrt(num_incoh))

        # Remocion de Interferencia en el Cross Spectra
        if jcspectra is None:
            return jspectra, jcspectra
        num_pairs = int(jcspectra.size / (num_prof * num_hei))
        jcspectra = jcspectra.reshape(num_pairs, num_prof, num_hei)

        for ip in range(num_pairs):

            #-------------------------------------------

            cspower = numpy.abs(jcspectra[ip, mask_prof, :])
            cspower = cspower[:, hei_interf]
            cspower = cspower.sum(axis=0)

            cspsort = cspower.ravel().argsort()
            junkcspc_interf = jcspectra[ip, :, hei_interf[cspsort[list(
                range(offhei_interf, nhei_interf + offhei_interf))]]]
            junkcspc_interf = junkcspc_interf.transpose()
            jcspc_interf = junkcspc_interf.sum(axis=1) / nhei_interf

            ind = numpy.abs(jcspc_interf[mask_prof]).ravel().argsort()

            median_real = int(
                numpy.median(
                    numpy.real(junkcspc_interf[
                        mask_prof[ind[list(range(3 * num_prof // 4))]], :])))
            median_imag = int(
                numpy.median(
                    numpy.imag(junkcspc_interf[
                        mask_prof[ind[list(range(3 * num_prof // 4))]], :])))
            comp_mask_prof = [int(e) for e in comp_mask_prof]
            junkcspc_interf[comp_mask_prof, :] = numpy.complex(
                median_real, median_imag)

            for iprof in range(num_prof):
                ind = numpy.abs(junkcspc_interf[iprof, :]).ravel().argsort()
                jcspc_interf[iprof] = junkcspc_interf[iprof,
                                                      ind[nhei_interf // 2]]

            # Removiendo la Interferencia
            jcspectra[ip, :,
                      ind_hei] = jcspectra[ip, :, ind_hei] - jcspc_interf

            ListAux = numpy.abs(jcspc_interf[mask_prof]).tolist()
            maxid = ListAux.index(max(ListAux))

            ind = numpy.array([-2, -1, 1, 2])
            xx = numpy.zeros([4, 4])

            for id1 in range(4):
                xx[:, id1] = ind[id1]**numpy.asarray(list(range(4)))

            xx_inv = numpy.linalg.inv(xx)
            xx = xx_inv[:, 0]

            ind = (ind + maxid + num_mask_prof) % num_mask_prof
            yy = jcspectra[ip, mask_prof[ind], :]
            jcspectra[ip, mask_prof[maxid], :] = numpy.dot(yy.transpose(), xx)

        # Guardar Resultados
        self.dataOut.data_spc = jspectra
        self.dataOut.data_cspc = jcspectra

        return 1
if __name__ == "__main__":
  if len(sys.argv) !=2:
    print >> sys.stderr, "Usage: linreg <datafile>"
    exit(-1)

  sc = SparkContext(appName="LinearRegression")

  # Input yx file has y_i as the first element of each line 
  # and the remaining elements constitute x_i
  yxinputFile = sc.textFile(sys.argv[1])
  yxlines = yxinputFile.map(lambda line: line.split(','))
  print yxlines
  
  #We need to calculate A. First Calculate (X * (X_Transpose)) and add them using reduceBYKey function  
  A = np.asmatrix(yxlines.map(lambda line: ("KeyA",keyA(line))).reduceByKey(lambda x1,x2: np.add(x1,x2)).map(lambda l: l[1]).collect()[0])
  print A
  
  #We need to calculate B. First Calculate (X * Y) and add them using reduceBYKey function
  B = np.asmatrix(yxlines.map(lambda line: ("KeyB",keyB(line))).reduceByKey(lambda x1,x2: np.add(x1,x2)).map(lambda l: l[1]).collect()[0])
  print B
 

  yxfirstline = yxlines.first()
  #print yxfirstline[0]," ",yxfirstline[1]
  yxlength = len(yxfirstline)
  print "yxlength: ", yxlength

  # dummy floating point array for beta to illustrate desired output format
  beta = np.zeros(yxlength, dtype=float)
def make_gains():
    # x = |       Angle      |
    #     | Angular velocity |
    # u = voltage
    # y = encoder

    name = 'gains'

    # Parameters
    moment_inertia = 0.226796 * (1 * .0256)**2.0 / 2.0 + 0.226796 * (
        0.5 * 0.0256)**2.0
    gear_ratio = 1.0 / 4.0
    efficiency = .91

    # motor characteristics
    free_speed = 18700. / 60.0 * 6.28
    free_current = .67
    stall_torque = .71
    stall_current = 134.
    resistance = 12. / stall_current
    torque_constant = stall_torque / stall_current
    velocity_constant = (12. - free_current * resistance) / free_speed

    num_motors = 2.0
    sensor_ratio = 1.0

    # back emf torque
    emf = -(torque_constant * velocity_constant) / (
        resistance * gear_ratio**2. / num_motors)

    # motor torque
    mtq = efficiency * torque_constant / (gear_ratio * resistance / num_motors)

    # rotational acceleration
    t2a = 1. / moment_inertia

    A_c = np.asmatrix([[0., 1.], [0., t2a * emf]])

    B_c = np.asmatrix([[0.], [t2a * mtq]])

    C = np.asmatrix([[sensor_ratio, 0.]])

    # Controller weighting
    Q_controller = np.asmatrix([[0., 0.], [0., 5e-3]])

    R_controller = np.asmatrix([[1.]])

    # Noise
    Q_noise = np.asmatrix([[1e-2, 0.], [0., 1e3]])

    R_noise = np.asmatrix([[0.1]])

    Q_ff = np.asmatrix([[0., 0.], [0., 1.]])

    A_d, B_d, Q_d, R_d = c2d(A_c, B_c, dt, Q_noise, R_noise)
    K = clqr(A_c, B_c, Q_controller, R_controller)
    Kff = feedforwards(A_d, B_d, Q_ff)
    L = place(A_d.T, C.T, [0.05, 0.12]).T

    gains = StateSpaceGains(name, dt, A_d, B_d, C, None, Q_d, R_noise, K, Kff,
                            L)
    gains.A_c = A_c
    gains.B_c = B_c
    gains.Q_c = Q_noise

    return gains