Esempio n. 1
0
def PCA(X, k):
    """
        Compute PCA on the given matrix.

        Args:
                X - Matrix of dimesions (n,d). Where n is the number of sample points and d is the dimension of each sample.
                For example, if we have 10 pictures and each picture is a vector of 100 pixels then the dimesion of the matrix would be (10,100).
                k - number of eigenvectors to return

        Returns:
          U - Matrix with dimension (k,d). The matrix should be composed out of k eigenvectors corresponding to the largest k eigenvectors 
                        of the covariance matrix.
          S - k largest eigenvalues of the covariance matrix. vector of dimension (k, 1)
        """

    cov_id_vaccine = np.matmul(mat.transpose(X), X)
    w, v = lin.eig(cov_id_vaccine)
    v = mat.transpose(v)
    vec = []
    d = len(w)
    for i in range(d):
        vec.append((w[i], v[i]))
    vec.sort(key=lambda x: x[0], reverse=True)

    U = np.stack([vec[i][1] for i in range(k)], axis=0)
    #print(type(w))
    w[::-1].sort()
    w = w[:k]
    S = np.array([w]).T
    return U, S
Esempio n. 2
0
def section_d():
    lfw_people = load_data()
    names = lfw_people.target_names
    data, Y_values, h, w = get_pictures_by_names(names)
    data = np.array(data)[:, :, 0]
    standardize(data)
    X_train, X_test, Y_train, Y_test = train_test_split(data,
                                                        Y_values,
                                                        test_size=0.25,
                                                        random_state=0)
    k_vals = [1, 5, 10, 30, 50, 100, 150, 300, len(data[0])]
    scores = []
    for k in k_vals:
        print("k is {}".format(k))
        U, S = PCA(X_train, k)
        A = np.matmul(X_train, mat.transpose(U))
        X_test_newDim = mat.transpose(np.matmul(U,
                                                np.matrix.transpose(X_test)))
        res = svm.SVC(kernel='rbf', C=1000, gamma=10**-7).fit(A, Y_train)
        scores.append(res.score(X_test_newDim, Y_test))
    plt.plot(k_vals, scores, color='blue')
    plt.xlabel('k values')
    plt.ylabel('Accuracy')
    plt.title('Accuracy through different dimensions')
    plt.show()
Esempio n. 3
0
def run():
    error = 0
    final_g = [0 for i in range(7)]
    max_err = 100000000000000000000
    for i in range(1000):
        #error = 0
        p = [(uniform(-1, 1), uniform(-1, 1)), (uniform(-1, 1), uniform(-1,
                                                                        1))]
        target = [
            -p[0][0] + (p[0][0] - p[1][0]) * p[1][0] / (p[1][0] - p[1][1]), 1,
            -(p[0][0] - p[1][0]) / (p[1][0] - p[1][1])
        ]
        N = [(1, uniform(-1, 1), uniform(-1, 1)) for i in range(1000)]
        classification = map(y_function,
                             [(x[1]**2 + x[2]**2 - 0.6) > 0 for x in N])
        flipped_nums = []
        for j in range(100):
            flip = randrange(0, 1000)
            while flip in flipped_nums:
                flip = randrange(0, 1000)
            flipped_nums.append(flip)
        for f in flipped_nums:
            classification[f] = -classification[f]

        M = [(1, x[1], x[2], x[1] * x[2], x[1]**2, x[2]**2) for x in N]
        g = matrix.transpose(
            matrix(pinv([list(m)
                         for m in M])) * matrix([[float(c)]
                                                 for c in classification]))

        N = [(1, uniform(-1, 1), uniform(-1, 1)) for i in range(1000)]
        classification = map(y_function,
                             [(x[1]**2 + x[2]**2 - 0.6) > 0 for x in N])
        flipped_nums = []
        for j in range(100):
            flip = randrange(0, 1000)
            while flip in flipped_nums:
                flip = randrange(0, 1000)
            flipped_nums.append(flip)
        for f in flipped_nums:
            classification[f] = -classification[f]

        M = [(1, x[1], x[2], x[1] * x[2], x[1]**2, x[2]**2) for x in N]
        g_classification = matrix(
            map(y_function2, [g * matrix.transpose(matrix(m)) for m in M]))
        #final_g = [a + b for (a,b) in zip(final_g,[x/abs(g.tolist()[0][0]) for x in g.tolist()[0]])]
        errors = [
            x != t
            for (x, t) in zip(g_classification.tolist()[0], classification)
        ]
        for e in errors:
            if e:
                error += 1
        #if error < max_err:
        #   max_err = error
        #   final_g = g
        #   print [x/abs(g.tolist()[0][0]) for x in g.tolist()[0]]
    error /= 1000.0 * 1000.0
    print error
Esempio n. 4
0
    def sample_z_i(self, i):
        
        """
        Samples from distribution of i'th value of z-vector given other
        parameters

        :type i: int
        :param i: number which indicates which feature's z value to sample 
        """
        
        if self.zs[i] == 1.0:
            # This is all on page 48 (Appendix A) of group feature selection
            # paper - pretty hard to explain without looking at that. 
            Theta = matrix(self.data)
            Theta_t = matrix.transpose(Theta)
            mult = np.zeros(self.d)
            mult = mult + self.vs
            mult[self.big_zeros] = 0.0
            for j in range(self.multi_dim):
                mult[i + j*self.g] = 0.0
                
            A_minusg = mult*np.identity(self.d)
            C_minusg = (self.sigma2*np.identity(self.size) +
                        Theta*A_minusg*Theta_t)
            X_g = np.zeros([self.size, self.multi_dim])
            for k in range(self.multi_dim):
                X_g[:, k] = self.data[:, i + k*self.g]
            X_g = matrix(X_g)
            X_g_t = matrix.transpose(X_g)
            C_inv = np.linalg.inv(C_minusg)
            M = X_g_t*C_inv*X_g
            eig_vals, eig_vecs = np.linalg.eigh(M)
            y = matrix(self.y)
            L = 0.0
            for j in range(self.multi_dim):
                s_j = eig_vals[j]
                e_j = eig_vecs[:, j]
                temp = y*C_inv*X_g
                q_j = float(temp*e_j) 
                L = (L + 0.5*(q_j**2.0/((1.0/self.vs) + s_j)
                              - np.log(1.0 + self.vs*s_j)))
            L_tot = np.exp(L)
        else:
            L_tot = 1.0
        test = np.random.uniform()
        
        if (test < (self.p0*L_tot)/(self.p0*L_tot + (1.0 - self.p0))
            or L_tot > 1000000000000.0):
            self.zs[i] = 1.0
        else:
            self.zs[i] = 0.0
        
        self.zeros, self.ones = find_zero(self.zs)
        self.big_zeros = np.zeros(len(self.zeros)*self.multi_dim, dtype = int)
        self.big_ones = np.zeros(len(self.ones)*self.multi_dim, dtype = int)
        self.fill_in_binary() 
 
        return
Esempio n. 5
0
def utility_kalman_filter_ball(ball):    
    
    if (t==0):  # initialize filter
        ball.xhat = matlib.zeros((8,1))
        ball.xhat_delayed = ball.xhat
        ball.S = np.diag([\
            globals.field_width/2, 	# initial variance of x-position of ball
            globals.field_width/2, 	# initial variance of y-position of ball 
            .01, 				# initial variance of x-velocity of ball 
            .01,				# initial variance of y-velocity of ball
            .001,				# initial variance of x-acceleration of ball 
            .001,				# initial variance of y-acceleration of ball
            .0001,				# initial variance of x-jerk of ball 
            .0001,				# initial variance of y-jerk of ball
            ]);
        ball.S_delayed=ball.S
    
    # prediction step between measurements
    N = 10;
    for i in range(1,N):
        xhat = xhat + (globals.loop_rate/N)*globals.A_ball*xhat;
        ball.S = ball.S + (globals.loop_rate/N)*(globals.A_ball*ball.S+ball.S*matrix.transpose(globals.A_ball)+globals.Q_ball)
 
    # correction step at measurement
    # only update when the camera flag is one indicating a new measurement
    # case 1 does not compensate for camera delay
    # case 2 compensates for fixed camera delay
    if ball.camera_flag == 1:
            y = ball.position_camera; # actual measurement
            y_pred = globals.C_ball*xhat;  # predicted measurement
            L = S*matrix.transpose(globals.C_ball)/(globals.R_ball+globals.C_ball*S*matrix.transpose(globals.C_ball))
            S = (np.eye(8)-L*globals.C_ball)*S;
            xhat = xhat + L*(y-y_pred);
    elif ball.camera_flag == 2:
            y = ball.position_camera; # actual measuremnt
            y_pred = globals.C_ball*ball.xhat_delayed;  # predicted measurement
            L = ball.S_delayed*matrix.transpose(globals.C_ball)/(globals.R_ball+globals.C_ball*ball.S_delayed*matrix.transpose(globals.C_ball))  
            ball.S_delayed = (np.eye(8)-L*globals.C_ball)*ball.S_delayed
            ball.xhat_delayed = ball.xhat_delayed + L*(y-y_pred);
            for i in range(1,N*(globals.camera_sample_rate/globals.loop_rate)):
                ball.xhat_delayed = ball.xhat_delayed + (globals.loop_rate/N)*(globals.A_ball*ball.xhat_delayed);
                ball.S_delayed = ball.S_delayed + (globals.loop_rate/N)*(globals.A_ball*ball.S_delayed+ball.S_delayed*matrix.transpose(globals.A_ball)+globals.Q_ball)
            ball.xhat = ball.xhat_delayed
            ball.S    = ball.S_delayed

    
    # output current estimate of state
    ball.position     = xhat[0:1];
    ball.velocity     = xhat[2:3];
    ball.acceleration = xhat[4:5];
    ball.jerk         = xhat[6:7];
    ball.S            = globals.S_ball;
Esempio n. 6
0
def draw_object():
    glVertexAttribPointer(locations[b"vertex"], 3, GL_FLOAT, False, record_len,
                          vertex_offset)
    glVertexAttribPointer(locations[b"tex_coord"], 3, GL_FLOAT, False,
                          record_len, tex_coord_offset)
    glVertexAttribPointer(locations[b"normal"], 3, GL_FLOAT, False, record_len,
                          normal_offset)
    glVertexAttribPointer(locations[b"color"], 3, GL_FLOAT, False, record_len,
                          color_offset)

    modelview = m.identity()
    modelview = m.mul(modelview, m.scale(scale, scale, scale))
    modelview = m.mul(modelview, q.matrix(rotation))
    glUniformMatrix4fv(locations[b"modelview_matrix"], 1, GL_FALSE,
                       c_matrix(modelview))

    normal = m.transpose(m.inverse(m.top_left(modelview)))
    glUniformMatrix3fv(locations[b"normal_matrix"], 1, GL_FALSE,
                       c_matrix(normal))

    offset = 0
    for size in sizes:
        glDrawElements(GL_TRIANGLE_STRIP, size, GL_UNSIGNED_INT,
                       c_void_p(offset))
        offset += size * uint_size
    def greedyBipartite(cls, cost_matrix):
        """
        Greedy algorithm for bipartite matching of two tracks.
        Takes in a len(track1) x len(track2) matrix, where the cells are weights between the nodes in
        the tracks.

        Finds a, b and c for the tracks, as defined above.
        """
        from numpy import zeros, shape, max, argmax, unravel_index, matrix

        dimensions = shape(cost_matrix)
        cost = {'a': 0.0, 'b': 0.0, 'c': 0.0, 'd': -1}

        cost['b'] += cls.getZeroOccurrences(cost_matrix)
        cost['c'] += cls.getZeroOccurrences(matrix.transpose(cost_matrix))

        while max(cost_matrix) > 0:
            pos = argmax(cost_matrix)
            row, col = unravel_index([pos], dimensions)
            dimensions = list(dimensions)

            val = max(cost_matrix)
            cost_matrix[row[0], :] = zeros(dimensions[1])
            cost_matrix[:, col[0]] = zeros(dimensions[0])
            cost['a'] += val

        return cost
    def lapjvBipartite(cls, cost_matrix):
        """
        Optimal algorithm for bipartite matching of two tracks.
        Takes in len(track1) x len(track2) matrix, where the cells are weights between the nodes in
        the tracks.

        Finds a, b and c for the tracks, as defined above.
        """
        from quick.webtools.clustering.JonkerVolgenant import JonkerVolgenant
        from numpy import matrix
        matches = JonkerVolgenant.findJonkerVolgenant(cost_matrix)

        cost = {'a': 0, 'b': 0, 'c': 0, 'd': -1}

        track1Length = len(cost_matrix)
        track2Length = len(cost_matrix[0])

        cost['b'] += cls.getZeroOccurrences(cost_matrix)
        cost['c'] += cls.getZeroOccurrences(matrix.transpose(cost_matrix))

        if track1Length < track2Length:
            for matchID in range(0, track1Length):
                val = cost_matrix[matchID, matches[matchID]]
                cost['a'] += val
        else:
            for matchID in range(0, track2Length):
                val = cost_matrix[matches[matchID], matchID]
                cost['a'] += val

        return cost
Esempio n. 9
0
 def SqrtCorrel(self):
     (w,v) = linalg.eig(self.Sigma)
     wreal = []
     for i in range(self.N):
         wreal += [sqrt(w[i].real)]
     wdiag = diag(wreal)
     self.Sqrt = wdiag.dot(matrix.transpose(v))
Esempio n. 10
0
 def _inversion(self, P_mat_t, fft_v, m):
     b = np.zeros([self.L - m, 1], dtype = complex)
     for el in range(m, self.L, 1):
         b[el-m] = fft_v[m + el**2][0]
     P = P_mat_t[:, m:]
     flmt_t = np.linalg.lstsq(npmtrx.transpose(P),b)[0]
     return flmt_t
Esempio n. 11
0
def draw_object():
	glVertexAttribPointer(locations[b"vertex"], 3, GL_FLOAT, False,
	                      record_len, vertex_offset)
	glVertexAttribPointer(locations[b"tex_coord"], 3, GL_FLOAT, False,
	                      record_len, tex_coord_offset)
	glVertexAttribPointer(locations[b"normal"], 3, GL_FLOAT, False,
	                      record_len, normal_offset)
	glVertexAttribPointer(locations[b"color"], 3, GL_FLOAT, False,
	                      record_len, color_offset)
	
	modelview = m.identity()
	modelview = m.mul(modelview, m.scale(scale, scale, scale))
	modelview = m.mul(modelview, q.matrix(rotation))
	glUniformMatrix4fv(locations[b"modelview_matrix"], 1, GL_FALSE,
	                   c_matrix(modelview))
	
	normal = m.transpose(m.inverse(m.top_left(modelview)))
	glUniformMatrix3fv(locations[b"normal_matrix"], 1, GL_FALSE,
	                   c_matrix(normal))
		
	offset = 0
	for size in sizes:
		glDrawElements(GL_TRIANGLE_STRIP,
		               size, GL_UNSIGNED_INT,
		               c_void_p(offset))
		offset += size*uint_size
Esempio n. 12
0
def brkga_mutation(runsize, num_col=4):
    '''Here we will create random mutant solutions of randomly chosen levels and number of factors,
    Also no mutation rate is required as we always have to generate a particular number of mutant,
    solutions based on the size of population. Call this function recursively to generate more than one mutant'''
    lev = find_levels(runsize)

    new_matrix = []
    temp_fact = []
    for l in range(num_col):
        lev_selected = lev[random.randint(0, len(lev)-1)]
        temp_fact.append(lev_selected)
        new_col = []
        for i in range(lev_selected):
            for j in range(int(runsize/lev_selected)):
                new_col.append(i)
        random.shuffle(new_col)
        new_matrix.append(new_col)

    new_matrix = matrix(new_matrix).reshape(num_col, runsize)
    new_matrix = matrix.transpose(new_matrix)

    temp = str(runsize)
    for i in list(remove_duplicates(temp_fact)):
        temp += ',' + str(i) + '^' + str(temp_fact.count(i))

    new_oa = OA(temp, new_matrix)

    return new_oa
Esempio n. 13
0
def brkga_mutation(runsize, num_col=4):
    '''Here we will create random mutant solutions of randomly chosen levels and number of factors,
    Also no mutation rate is required as we always have to generate a particular number of mutant,
    solutions based on the size of population. Call this function recursively to generate more than one mutant'''
    lev = find_levels(runsize)

    new_matrix = []
    temp_fact = []
    for l in range(num_col):
        lev_selected = lev[random.randint(0, len(lev) - 1)]
        temp_fact.append(lev_selected)
        new_col = []
        for i in range(lev_selected):
            for j in range(int(runsize / lev_selected)):
                new_col.append(i)
        random.shuffle(new_col)
        new_matrix.append(new_col)

    new_matrix = matrix(new_matrix).reshape(num_col, runsize)
    new_matrix = matrix.transpose(new_matrix)

    temp = str(runsize)
    for i in list(remove_duplicates(temp_fact)):
        temp += ',' + str(i) + '^' + str(temp_fact.count(i))

    new_oa = OA(temp, new_matrix)

    return new_oa
Esempio n. 14
0
def regression_weights(x_t, head, exponential=False):
    """does needed matrix calculations for regression weights

    Args:
        x_t (nd arr): X transpose matrix
        head (md arr): Y(training data) matrix
        exponential (bool, optional): if a simple exponential regression is about to do. Defaults to False.

    Returns:
        pd arr: calculated weights
    """
    product = np.copy(x_t)
    product = np.matmul(product, mt.transpose(product))
    product = la.inv(product)
    product = np.matmul(product, x_t)
    product = np.matmul(product,(np.log(mt.transpose(head)) if (exponential)  else mt.transpose(head))) # exponential must fit the logarithm of records
    return product
Esempio n. 15
0
 def to_compositions(self):
     '''
     Return corresponding compositional data object
     '''
     from Compositions import CompData
     matrix, row_labels, col_labels = self.to_matrix()
     f_mat = CompData(matrix.transpose(), dtype=float)
     return f_mat, row_labels, col_labels
Esempio n. 16
0
 def to_compositions(self):
     '''
     Return corresponding compositional data object
     '''
     from Compositions import CompData
     matrix, row_labels, col_labels = self.to_matrix()
     f_mat = CompData(matrix.transpose(), dtype=float)
     return f_mat, row_labels, col_labels 
def run():
   error = 0
   final_g = [0 for i in range(7)]
   max_err = 100000000000000000000
   for i in range(1000):
      #error = 0
      p = [(uniform(-1,1), uniform(-1,1)), (uniform(-1,1), uniform(-1,1))]
      target = [-p[0][0]+(p[0][0]-p[1][0])*p[1][0]/(p[1][0]-p[1][1]), 1, -(p[0][0]-p[1][0])/(p[1][0]-p[1][1])]
      N = [(1,uniform(-1,1),uniform(-1,1)) for i in range(1000)]
      classification = map(y_function, [(x[1]**2 + x[2]**2 - 0.6) > 0 for x in N])
      flipped_nums = []
      for j in range(100):
         flip = randrange(0,1000)
         while flip in flipped_nums:
            flip = randrange(0,1000)
         flipped_nums.append(flip)
      for f in flipped_nums:
         classification[f] = -classification[f]
      
      M = [(1, x[1], x[2], x[1]*x[2], x[1]**2, x[2]**2) for x in N]
      g = matrix.transpose(matrix(pinv([list(m) for m in M]))*matrix([[float(c)] for c in classification]))
     
      N = [(1,uniform(-1,1),uniform(-1,1)) for i in range(1000)]
      classification = map(y_function, [(x[1]**2 + x[2]**2 - 0.6) > 0 for x in N])
      flipped_nums = []
      for j in range(100):
         flip = randrange(0,1000)
         while flip in flipped_nums:
            flip = randrange(0,1000)
         flipped_nums.append(flip)
      for f in flipped_nums:
         classification[f] = -classification[f]
      
      M = [(1, x[1], x[2], x[1]*x[2], x[1]**2, x[2]**2) for x in N]
      g_classification = matrix(map(y_function2, [g*matrix.transpose(matrix(m)) for m in M]))
      #final_g = [a + b for (a,b) in zip(final_g,[x/abs(g.tolist()[0][0]) for x in g.tolist()[0]])]
      errors = [x != t for (x,t) in zip(g_classification.tolist()[0], classification)]
      for e in errors:
         if e:
            error += 1
      #if error < max_err:
      #   max_err = error
      #   final_g = g
      #   print [x/abs(g.tolist()[0][0]) for x in g.tolist()[0]]
   error /= 1000.0 * 1000.0
   print error
Esempio n. 18
0
def Example():
    C = Copula([Currency.XBT,Currency.ETH, Currency.BCH, Currency.LTC, Currency.XRP])
    #C = Copula([Currency.BCH])
    C.ComputeCorrelation()
    print(C.Sigma)
    tab = C.Simulate(10000)
    cov = matrix.transpose(tab).dot(tab)/10000

    print(cov)
Esempio n. 19
0
 def sample_sigma2(self, k, sigma2_hat):
     
     """
     Samples from distribution of the error given other parameters.
     """
     
     alpha0 = k/2.0
     beta0 = k*sigma2_hat/2.0
     alpha = alpha0 + self.size/2.0
     data = np.copy(self.data)
     y = np.copy(self.y)
     w = np.copy(self.w)
     data = matrix(data)
     y = matrix.transpose(matrix(y))
     w = matrix.transpose(matrix(w))
     beta = beta0 + 0.5*matrix.transpose(y - data*w)*(y - data*w)
     self.sigma2 = invgamma.rvs(alpha, scale = beta)
     return
Esempio n. 20
0
 def sample_w(self):
     
     """
     Samples from distribution of weights given other parameters.
     """
     
     nothing, eta_rem = self.regcalc()
     eta_rem = np.delete(eta_rem, self.big_zeros, 0)
     eta_rem = np.delete(eta_rem, self.big_zeros, 1)
     X = self.data
     X = np.delete(X, self.big_zeros, 1)
     y = matrix.transpose(matrix(self.y))
     mean_rem = (1.0/(self.sigma2))*eta_rem*matrix.transpose(matrix(X))*y
     mean_simp = np.zeros(len(self.big_ones))
     for i in range(len(self.big_ones)):
         mean_simp[i] = float(mean_rem[i])
     if len(mean_simp) > 0:
         self.w[self.big_ones] = multivariate_normal.rvs(mean_simp, eta_rem)
     self.w[self.big_zeros] = 0.0
     return
Esempio n. 21
0
 def plot_dist_mat(self, metric  ='euclidean', file = None, transpose = True, show_labels = False, **kwargs):
     import distances
     from heatmap_clust import clust_data, heatmap_clust
     matrix, row_labels, col_labels = self.to_matrix()
     if transpose: 
         mat    = matrix.transpose()
         labels = map(lambda s: s.split('_')[-1], col_labels)
     else:         
         mat    = matrix
         labels = map(lambda s: s.split('_')[-1], row_labels)
     D = distances.pdist(mat, metric)
     if show_labels: heatmap_clust(D, file = file, labels =labels, **kwargs)
     else:           heatmap_clust(D, file = file, **kwargs)
Esempio n. 22
0
 def plot_dist_mat(self, metric  ='euclidean', file = None, transpose = True, show_labels = False, **kwargs):
     import distances
     from heatmap_clust import clust_data, heatmap_clust
     matrix, row_labels, col_labels = self.to_matrix()
     if transpose: 
         mat    = matrix.transpose()
         labels = map(lambda s: s.split('_')[-1], col_labels)
     else:         
         mat    = matrix
         labels = map(lambda s: s.split('_')[-1], row_labels)
     D = distances.pdist(mat, metric)
     if show_labels: heatmap_clust(D, file = file, labels =labels, **kwargs)
     else:           heatmap_clust(D, file = file, **kwargs)
Esempio n. 23
0
 def ComputeCorrelation(self):
     A = zeros(shape = (self.N, self.T))
     for i in range(self.N):
         A[i] = self.Densities[self.CurrencyList[i]].StdReturns
     temp = DataFrame(A)
     Sigma = A.dot(matrix.transpose(A))
     Sigma /= float(self.T)
     std = []
     for i in range(self.N):
         std += [sqrt(Sigma[i,i])]
     for i in range(self.N):
         for j in range(self.N):
             Sigma[i,j] /= std[i] * std[j]
     self.Sigma = Sigma
Esempio n. 24
0
def nmf_div(V, k, n_max_iterations=1000, random_seed=RANDOM_SEED):
    """
    Non-negative matrix factorize matrix with k from ks using divergence.
    :param V: numpy array or pandas DataFrame; (n_samples, n_features); the matrix to be factorized by NMF
    :param k: int; number of components
    :param n_max_iterations: int;
    :param random_seed:
    :return:
    """

    eps = finfo(float).eps

    N = V.shape[0]
    M = V.shape[1]
    V = matrix(V)
    seed(random_seed)
    W = rand(N, k)
    H = rand(k, M)
    for t in range(n_max_iterations):
        VP = dot(W, H)
        W_t = matrix.transpose(W)
        H = multiply(H, dot(W_t, divide(V, VP))) + eps
        for i in range(k):
            W_sum = 0
            for j in range(N):
                W_sum += W[j, i]
            for j in range(M):
                H[i, j] = H[i, j] / W_sum
        VP = dot(W, H)
        H_t = matrix.transpose(H)
        W = multiply(W, dot(divide(V, VP + eps), H_t)) + eps
        W = divide(W, ndarray.sum(H, axis=1, keepdims=False))

    err = sum(multiply(V, log(divide(V + eps, VP + eps))) - V + VP) / (M * N)

    return W, H, err
Esempio n. 25
0
def MLE(X, y):
    """
    Maximum likelihood estimate of weights

    :type X: array of size #data points*4 by #dimensions*4 containing floats
    :param X: matrix containing features for each data point

    :type y: array of size #data points*4 containing floats
    :param y: vector filled with target variables
    """

    Theta = np.matrix(X)
    Theta_t = np.matrix.transpose(Theta)
    inv = np.linalg.inv(Theta_t * Theta)
    return inv * Theta_t * matrix.transpose(matrix(y))
Esempio n. 26
0
def GaussNewton(steps, X, Y, B):
    print('Running Gauss-Newton method for B =', B)
    J = linalg.norm(f(X, Y, B))**2
    Jo = J
    histB = [B]
    for i in range(steps):
        Jac = buildJ(X, Y, B)
        JacT = matrix.transpose(Jac)
        B = B - np.matmul(np.linalg.inv(np.matmul(JacT, Jac)),
                          np.matmul(JacT, f(X, Y, B)))
        J = linalg.norm(f(X, Y, B))**2
        histB.append(B)

    histB = np.matrix(histB)
    if (J > Jo):
        print("Diverged!")
    else:
        print("Converged to", B)
    return histB
Esempio n. 27
0
    def regcalc(self):
        
        """
        Calculates the estimates for mean and covariance matrix of weights
        given a particular z-vector.
        """
        
        Theta = np.matrix(self.data)
        Theta_t = np.matrix.transpose(Theta)
        mult = np.zeros(self.d)
        mult = mult + 1.0/self.vs
        mult[self.big_zeros] = 10000000.0
        I_vs = mult*np.identity(self.d)
        Eta_z = np.linalg.inv((1.0/self.sigma2)*Theta_t*Theta + I_vs)
        ys = matrix.transpose(matrix(self.y))
        prelim = Theta_t * ys
        M_N = (1/self.sigma2) * Eta_z * prelim

        return M_N, Eta_z
Esempio n. 28
0
    def _spatial_elimination(self, P_mat_t, f_t, flm_t, m):
        P_mat = npmtrx.transpose(P_mat_t[:, :m])
        gm = np.dot(P_mat, flm_t)
        gm_neg = np.conj(gm)

        phi_weights = np.reshape(self._phi_weights[m][:m ** 2], [m ** 2, 1])
        phi_weights_neg =np.reshape(self._phi_weights_neg[m][:m ** 2], [m ** 2, 1])

        f_temp = np.zeros_like(phi_weights)
        f_temp_neg = np.zeros_like(phi_weights_neg)

        for ii in range(m):
            f_temp[ii**2 : (ii+1)**2, :] = gm[ii]
            f_temp_neg[ii ** 2 : (ii + 1) ** 2, :] = gm_neg[ii]

        f_temp= np.multiply(f_temp, phi_weights)
        f_temp_neg = np.multiply(f_temp_neg, phi_weights_neg)
        f_t = f_t - f_temp - f_temp_neg

        return f_t
Esempio n. 29
0
def section_c(k_vals=[1, 5, 10, 30, 50, 100, 150, 300]):
    selected_images, h, w = get_pictures_by_name()
    data = np.array(selected_images)[:, :, 0]
    n = len(data)
    standardize(data)
    k_vals.append(len(data[0]))
    dist = []
    for k in k_vals:
        #print("k= "+str(k))
        l2 = 0
        U, S = PCA(data, k)
        V = np.matmul(data, np.matrix.transpose(U))
        xPrime = mat.transpose(np.matmul(np.transpose(U), np.transpose(V)))
        for i in range(5):
            rand_number = np.random.randint(0, n - 1)
            #plot_vectors(np.array([data[rand_number], xPrime[rand_number]]), h, w, 1, 2)
            l2 += lin.norm(data[rand_number] - xPrime[rand_number])
        dist.append(l2)
    plt.plot(k_vals, dist, color='r', marker='o')
    plt.xlabel('dim')
    plt.ylabel('L2 norms')
    plt.title('L2_distances after changing dimensions')
    plt.show()
Esempio n. 30
0
    def LS(self, abscissa, observations, PARAM, x_fix, x_ini):
        # The initial parameters are the ones from DLT but where the radio button is set as free
        x = []
        for i in range(9):
            if (PARAM[i] == 1) or (PARAM[i] == 2):
                #Free or apriori values
                x.append(x_ini[i])
        x = array(x)

        # 2D coordinates are understood as observations
        observations = array(observations)
        # 3D coordinates are understood as the abscissas
        abscissa = array(abscissa)
        npoints = size(observations[:, 1])

        l_x = size(
            x
        )  #9-size(nonzero(PARAM==0)[0])#int(sum(PARAM))#Number of free parameters
        sigmaobservationservation = 1
        Kl = zeros(shape=(2 * npoints, 2 * npoints))

        # A error of "sigmaobservationservation" pixels is a priori set
        for i in range(npoints):
            Kl[2 * i - 1, 2 * i - 1] = sigmaobservationservation**2
            Kl[2 * i, 2 * i] = sigmaobservationservation**2

        # The P matrix is a weight matrix, useless if equal to identity (but can be used in some special cases)
        P = linalg.pinv(Kl)
        # A is the Jacobian matrix
        A = zeros(shape=(2 * npoints, l_x))
        # H is the hessian matrix
        H = zeros(shape=(l_x, l_x))
        # b is a transition matrix
        b = zeros(shape=(l_x))
        # v contains the residual errors between observations and predictions
        v = zeros(shape=(2 * npoints))
        # v_test contain the residual errors between observations and predictions after an update of H
        v_test = zeros(shape=(2 * npoints))
        # x_test is the updated parameters after an update of H
        x_test = zeros(shape=(l_x))
        # dx is the update vector of x and x_test
        dx = array([0.] * l_x)

        it = -1
        maxit = 1000
        # At least one iteration, dx > inc
        dx[0] = 1
        # Lambda is the weightage parameter in Levenberg-marquart between the gradient and the gauss-newton parts.
        Lambda = 0.01
        # increment used for Jacobian and for convergence criterium
        inc = 0.001
        while (max(abs(dx)) > inc) & (it < maxit):
            #new iteration, parameters updates are greater than the convergence criterium
            it = it + 1
            # For each observations, we compute the derivative with respect to each parameter
            # We form therefore the Jacobian matrix
            for i in range(npoints):
                #ubul and vbul are the prediction with current parameters
                ubul, vbul = self.dircal(x, abscissa[i, :], x_fix, PARAM)
                # The difference between the observation and prediction is used for parameters update
                v[2 * i - 1] = observations[i, 0] - ubul
                v[2 * i] = observations[i, 1] - vbul
                for j in range(l_x):
                    x_temp = copy(x)
                    x_temp[j] = x[j] + inc
                    u2, v2 = self.dircal(x_temp, abscissa[i, :], x_fix, PARAM)
                    A[2 * i - 1, j] = (u2 - ubul) / inc
                    A[2 * i, j] = (v2 - vbul) / inc
            # The sum of the square of residual (S0) must be as little as possible.
            # That's why we speak of "least square"... tadadam !
            S0 = sum(v**2)
            H = dot(dot(matrix.transpose(A), P), A)
            b = dot(dot(matrix.transpose(A), P), v)
            try:
                dx = dot(linalg.pinv(H + Lambda * diag(diag(H))), b)
                x_test = x + dx
            except:
                # The matrix is not always reversal.
                # In this case, we don't accept the update and go for another iteration
                S2 = S0
            else:
                for i in range(npoints):
                    # We check that the update has brought some good stuff in the pocket
                    # In other words, we check that the sum of square of less than before (better least square!)
                    utest, vtest = self.dircal(x_test, abscissa[i, :], x_fix,
                                               PARAM)
                    v_test[2 * i - 1] = observations[i, 0] - utest
                    v_test[2 * i] = observations[i, 1] - vtest
                    S2 = sum(v_test**2)
            # Check if sum of square is less
            if S2 < S0:
                Lambda = Lambda / 10
                x = x + dx
            else:
                Lambda = Lambda * 10

        # Covariance matrix of parameters
        self.Qxx = sqrt(diag(linalg.inv(dot(dot(matrix.transpose(A), P), A))))

        p = zeros(shape=(len(PARAM)))
        m = 0
        #n = 0
        for k in range(len(PARAM)):
            if (PARAM[k] == 1) or (PARAM[k] == 2):
                p[k] = x[m]
                m = m + 1
            else:
                p[k] = x_fix[k]
                #n = n+1

        L1p = self.CoeftoMatrixProjection(p)

        x0 = p[0]
        y0 = p[1]
        z0 = p[2]
        tilt = p[3]
        azimuth = p[4]
        swing = p[5]
        focal = p[6]
        u0 = p[7]
        v0 = p[8]

        R = zeros((3, 3))
        R[0, 0] = -cos(azimuth) * cos(swing) - sin(azimuth) * cos(tilt) * sin(
            swing)
        R[0, 1] = sin(azimuth) * cos(swing) - cos(azimuth) * cos(tilt) * sin(
            swing)
        R[0, 2] = -sin(tilt) * sin(swing)
        R[1, 0] = cos(azimuth) * sin(swing) - sin(azimuth) * cos(tilt) * cos(
            swing)
        R[1, 1] = -sin(azimuth) * sin(swing) - cos(azimuth) * cos(tilt) * cos(
            swing)
        R[1, 2] = -sin(tilt) * cos(swing)
        R[2, 0] = -sin(azimuth) * sin(tilt)
        R[2, 1] = -cos(azimuth) * sin(tilt)
        R[2, 2] = cos(tilt)

        # Get "look at" vector for openGL pose
        ######################################

        #Generate vectors in camera system
        dirCam = array([0, 0, -focal])
        upCam = array([0, -1, 0])
        downCam = array([0, 1, 0])

        #Rotate in the world system
        dirWorld = dot(linalg.inv(R), dirCam.T)
        lookat = array(dirWorld) + array([x0, y0, z0])

        upWorld = dot(linalg.inv(R), upCam.T)
        #not_awesome_vector = array([0,0,-focal])
        #almost_awesome_vector = dot(linalg.inv(R),not_awesome_vector.T)
        #awesome_vector = array(almost_awesome_vector)+array([x0,y0,z0])

        return x, L1p, lookat, upWorld  #awesome_vector
Esempio n. 31
0
#!/usr/bin/env python3

from numpy import linalg
from numpy import matlib
from numpy import random
from numpy import matmul
from numpy import matrix

random.seed(1543)

amat = matlib.rand(10, 10)
bmat = linalg.inv(amat)
dmat = matrix.transpose(bmat)
cmat = matmul(amat, bmat)


def printmat(mat):
    rows = mat.shape[0]
    columns = mat.shape[1]
    print(rows, columns)
    for i in range(rows):
        for j in range(columns):
            print(mat.item((i, j)), end=" ")
        print()


printmat(amat)
printmat(dmat)
Esempio n. 32
0
import librosa
from python_speech_features import mfcc
import scipy.io.wavfile as wav
from pydub import AudioSegment
from numpy import matrix
from scipy.fftpack import dct

# Loading the audio from 30 to 35 secs in the 'y'
y, sr = librosa.load("./Tutorials/sample.wav", offset=30, duration=5.0)
onset_frames = librosa.onset.onset_detect(y=y, sr=sr)
actual_frames = librosa.frames_to_time(onset_frames, sr=sr)

# For getting the first onset from the audio
t1 = 30 * 1000  # start time
t2 = (30 + actual_frames[0]) * 1000  # end time
newAudio = AudioSegment.from_wav("./Tutorials/sample.wav")
newAudio = newAudio[t1:t2]
newAudio.export('./Tutorials/newSample.wav', format="wav")

# Getting the MFCC
(rate, sig) = wav.read("./Tutorials/newSample.wav")
mfcc_feat = matrix.transpose(
    mfcc(signal=sig, samplerate=rate, nfft=2048, ceplifter=4))

# for getting the first 4 dct of the mfcc we obtained
# TODO: In paper it said that we get 52 dimensional vector. Check that???
mfcc_feat_dct = []
for mfcc_coeff in mfcc_feat:
    mfcc_feat_dct.append(dct(x=mfcc_coeff, n=4))
Esempio n. 33
0
iterations = 0
error = 0
for i in range(10000):
    p = [(uniform(-1, 1), uniform(-1, 1)), (uniform(-1, 1), uniform(-1, 1))]
    target = [
        1, -(p[0][0] - p[1][0]) / (p[1][0] - p[1][1]),
        -p[0][0] + (p[0][0] - p[1][0]) * p[1][0] / (p[1][0] - p[1][1])
    ]
    N = [(uniform(-1, 1), uniform(-1, 1), 1) for i in range(10)]
    classification = map(
        y_function,
        [(x[0] * target[0] + x[1] * target[1] + x[2] * target[2]) > 0
         for x in N])
    g = matrix.transpose(
        matrix(pinv([list(n) for n in N])) *
        matrix([[float(c)] for c in classification])).tolist()[0]
    g_classification = map(y_function,
                           [(x[0] * g[0] + x[1] * g[1] + x[2] * g[2]) > 0
                            for x in N])

    while g_classification != classification:
        #print g

        incorrect = [
            p for p in N
            if classification[N.index(p)] != g_classification[N.index(p)]
        ]
        #print len(incorrect)
        index = randrange(0, len(incorrect))
        #print [classification[N.index(incorrect[index])] * x for x in incorrect[index]]
Esempio n. 34
0
 def LS(self,abscissa,observations,PARAM,x_fix,x_ini):
     # The initial parameters are the ones from DLT but where the radio button is set as free
     x = []
     for i in range(9):
         if PARAM[i]:
             x.append(x_ini[i])
     x = array(x)
     # 2D coordinates are understood as observations
     observations = array(observations)
     # 3D coordinates are understood as the abscissas
     abscissa = array(abscissa)
     npoints = size(observations[:,1])
     
     l_x = int(sum(PARAM));
     sigmaobservationservation = 1
     Kl =  zeros(shape=(2*npoints,2*npoints))
     
     # A error of "sigmaobservationservation" pixels is a priori set
     for i in range (npoints):
         Kl[2*i-1,2*i-1]=sigmaobservationservation**2
         Kl[2*i,2*i]=sigmaobservationservation**2
     
     # The P matrix is a weight matrix, useless if equal to identity (but can be used in some special cases)    
     P=linalg.pinv(Kl);
     # A is the Jacobian matrix
     A = zeros(shape=(2*npoints,l_x))
     # H is the hessian matrix
     H = zeros(shape=(l_x,l_x))
     # b is a transition matrix
     b = zeros(shape=(l_x))
     # v contains the residual errors between observations and predictions
     v = zeros(shape=(2*npoints))
     # v_test contain the residual errors between observations and predictions after an update of H
     v_test = zeros(shape=(2*npoints))
     # x_test is the updated parameters after an update of H
     x_test = zeros(shape=(l_x))
     # dx is the update vector of x and x_test
     dx = array([0.]*l_x)
     
     
     it=-1;            
     maxit=1000;     
     # At least one iteration, dx > inc
     dx[0]=1
     # Lambda is the weightage parameter in Levenberg-marquart between the gradient and the gauss-newton parts.
     Lambda = 0.01
     # increment used for Jacobian and for convergence criterium
     inc = 0.001
     while (max(abs(dx))> inc) & (it<maxit):
         #new iteration, parameters updates are greater than the convergence criterium
         it=it+1;
         # For each observations, we compute the derivative with respect to each parameter
         # We form therefore the Jacobian matrix
         for i in range(npoints):
             #ubul and vbul are the prediction with current parameters
             ubul, vbul = self.dircal(x,abscissa[i,:],x_fix,PARAM)
             # The difference between the observation and prediction is used for parameters update
             v[2*i-1]=observations[i,0]-ubul
             v[2*i]=observations[i,1]-vbul
             for j in range(l_x):
                 x_temp = copy(x);
                 x_temp[j] = x[j]+inc
                 u2, v2 = self.dircal(x_temp,abscissa[i,:],x_fix,PARAM)
                 A[2*i-1,j]= (u2-ubul)/inc
                 A[2*i,j]= (v2-vbul)/inc
         # The sum of the square of residual (S0) must be as little as possible.        
         # That's why we speak of "least square"... tadadam !
         S0 = sum(v**2);
         H = dot(dot(matrix.transpose(A),P),A);
         b = dot(dot(matrix.transpose(A),P),v);
         try:
             dx = dot(linalg.pinv(H+Lambda*diag(diag(H))),b);
             x_test = x+dx;
         except:
             # The matrix is not always reversal.
             # In this case, we don't accept the update and go for another iteration 
             S2 = S0
         else:
             for i in range(npoints):
                 # We check that the update has brought some good stuff in the pocket
                 # In other words, we check that the sum of square of less than before (better least square!)
                 utest, vtest = self.dircal(x_test,abscissa[i,:],x_fix,PARAM);
                 v_test[2*i-1]=observations[i,0]-utest;
                 v_test[2*i]=observations[i,1]-vtest; 
                 S2 = sum(v_test**2);
         # Check if sum of square is less
         if S2<S0:
             Lambda = Lambda/10
             x = x + dx
         else:
             Lambda = Lambda*10
     
     # Covariance matrix of parameters
     self.Qxx = sqrt(diag(linalg.inv(dot(dot(matrix.transpose(A),P),A))))
     
     p = zeros(shape=(len(PARAM)))
     m = 0
     n = 0
     for k in range(len(PARAM)):
         if PARAM[k]:
             p[k] = x[m]
             m = m+1
         else:
             p[k] = x_fix[n]
             n = n+1
     L1p = self.CoeftoMatrixProjection(p)
     
     x0 = p[0];
     y0 = p[1];
     z0 = p[2];
     tilt = p[3];
     azimuth = p[4];
     swing = p[5];
     focal = p[6];
     u0 = p[7];
     v0 = p[8];
     
     R = zeros((3,3))
     R[0,0] = -cos(azimuth)*cos(swing)-sin(azimuth)*cos(tilt)*sin(swing)
     R[0,1] =  sin(azimuth)*cos(swing)-cos(azimuth)*cos(tilt)*sin(swing) 
     R[0,2] = -sin(tilt)*sin(swing)
     R[1,0] =  cos(azimuth)*sin(swing)-sin(azimuth)*cos(tilt)*cos(swing)
     R[1,1] = -sin(azimuth)*sin(swing)-cos(azimuth)*cos(tilt)*cos(swing) 
     R[1,2] = -sin(tilt)*cos(swing)
     R[2,0] = -sin(azimuth)*sin(tilt)
     R[2,1] = -cos(azimuth)*sin(tilt)
     R[2,2] =  cos(tilt)
     
     # Get "look at" vector for openGL pose
     not_awesome_vector = array([0,0,-focal])
     almost_awesome_vector = dot(linalg.inv(R),not_awesome_vector.T)
     awesome_vector = array(almost_awesome_vector)+array([x0,y0,z0])
     
     return x, L1p, awesome_vector
Esempio n. 35
0
                          usecols = [0, 1], 
                          dtype = {0: 'S30', 1: 'int'}, 
                          names = ['word', document], 
                          header = None)
        
        # merge with previous ones
        y = pd.merge(y, y_i, on = 'word', how = 'outer')

        # kill NaNs
        y = y.fillna(0)

# choose prior
print ''
priorChoice = int(input('Uninformative (1) or informative (2) prior? '))
if priorChoice == 1:
    alpha_i = m.transpose(m([0.01] * len(y)))
elif priorChoice == 2:
    priors = pd.read_csv(rpath + 'corpus.csv', # load global frequencies
                         usecols = [0, 1], 
                         names = ['word', 'gfreq'], 
                         header = None)
    y = pd.merge(y, priors, on = 'word', how = 'left') # merge w/ y
    y = y.fillna(y['gfreq'].min()) # replace missing by argmin(alphas)
    alpha_i = m.transpose(m(y.gfreq)) # extract alphas
    del y['gfreq'] # clean up y
else:
    sys.exit('Invalid choice')

# estimate p_i
yword = m.transpose(m(np.hstack((['word'], np.array(y.word))))) # word list
y_i = m(y.iloc[:, 1:])
Esempio n. 36
0
def EoM(t,x):
	'''
		FLIGHT Equations of Motion
	'''

	global m 
	global Ixx 
	global Iyy
	global Izz
	global Ixz
	global S
	global b
	global cBar
	global CONHIS
	global u
	global tuHis
	global deluHis
	global uInc
	global MODEL
	global RUNNING

	MODEL = 2
	CONHIS = 1
	RUNNING = 1
	tuHis	=	array([0, 33, 67, 100])
	deluHis	=	array(zeros(28)).reshape((4,7))
	u = array([0,0,0,0,0,0,0]).reshape((7,1)).ravel()

	print(f'tuHis = {tuHis}')
	print(f'deluHis = {deluHis}')
	print(f'u = {u}')

	print(f'x = {x}')

	if MODEL == 0:
		from AeroModelAlpha import AeroModelAlpha as AeroModel
	elif MODEL == 1:
		from AeroModelMach import AeroModelMach as AeroModel
	else:
		from AeroModelUser import AeroModelUser as AeroModel

	##### Event Function ####### ????????????????
	############################


	# Earth-to-Body-Axis Transformation Matrix
	HEB = DCM(x[9],x[10],x[11])
	# Atmospheric States
	x[5] = min(x[5], 0)     #Limit x[5] to <=0
	(airDens,airPres,temp,soundSpeed) = Atoms(-x[5])
	# Body-Axis Wind Field
	windb	=	WindField(-x[5],x[9],x[10],x[11])
	# Body-Axis Gravity Components
	gb = matmul(HEB, array([0,0,9.80665]).reshape((3,1))).ravel()

	print(f'windb = {windb}')


	# Air-Relative Velocity Vector
	x[0]    =   max(x[0],0)     # Limit axial velocity to >= 0 m/s
	Va		=   array([[x[0],x[1],x[2]]]).reshape(3,1).ravel() + windb

	print(f'Va 1st part = {array([[x[0],x[1],x[2]]]).reshape(3,1).ravel()}')
	print(f'windb.T = {matrix.transpose(windb)}')
	print(f'Va = {Va}')

	V		=	sqrt(matmul(matrix.transpose(Va), Va))

	print(f'V = {V}')

	alphar = arctan(Va[2]/abs(Va[0]))
	#alphar  =   min(alphar, (pi/2 - 1e-6))     # Limit angle of attack to <= 90 deg
	#alpha	=	57.2957795 * float(alphar)
	alpha = 57.2957795 * alphar
	betar	= 	arcsin(Va[1] / V)
	beta	= 	57.2957795 * betar
	Mach	= 	V / soundSpeed
	qbar	=	0.5 * airDens * V**2

	print(f'Mach = {Mach}')


	# Incremental Flight Control Effects
	if CONHIS >=1 and RUNNING ==1:
		## uInc = array([])
		uInc    =   interp(t, tuHis,deluHis[:, 0])
		uInc    =   matrix.transpose(array(uInc))   # Transpose
		uTotal  =   u + uInc
	else:
		uTotal  =   u
	
	# Force and Moment Coefficients; Thrust
	(CD,CL,CY,Cl,Cm,Cn,Thrust) = AeroModel(x,uTotal,Mach,alphar,betar,V)
	print(f'CD = {CD}')

	m               =   4800
	Ixx = 20950
	Iyy=49675
	Izz = 62525
	Ixz = -1710
	cBar            =   3.03
	b               =   10
	S               =   27.77
	lHT             =   5.2
	lVT             =   3.9
	StaticThrust    =   49000

	qbarS   =   qbar * S

	CX	=	-CD * cos(alphar) + CL * sin(alphar)	# Body-axis X coefficient
	CZ	= 	-CD * sin(alphar) - CL * cos(alphar)	# Body-axis Z coefficient 

	# State Accelerations
	Xb =	(CX * qbarS + Thrust) / m
	print(f'CX = {CX}')
	print(f'qbarS = {qbarS}')
	print(f'Thrust = {Thrust}')
	Yb =	CY * qbarS / m
	Zb =	CZ * qbarS / m
	Lb =	Cl * qbarS * b
	Mb =	Cm * qbarS * cBar
	Nb =	Cn * qbarS * b
	nz	=	-Zb / 9.80665               # Normal load factor

	# Dynamic Equations
	xd1 = Xb + gb[0] + x[8] * x[1] - x[7] * x[2]
	xd2 = Yb + gb[1] - x[8] * x[0] + x[6] * x[2]
	xd3 = Zb + gb[2] + x[7] * x[0] - x[6] * x[1]

	print(f'Xb = {Xb}')
	print(f'gb[0] = {gb[0]}')

	print(f'xd1 = {xd1}')

	# xd1 = xd1[0][0]
	# xd2 = xd2[0][0]
	# xd3 = xd3[0][0]

	HEB_T = matrix.transpose(HEB)
	y = matmul(HEB_T, array([x[0], x[1], x[2]]))
	#HEB_T = matrix.transpose(HEB)
	#y = matmul(HEB_T, (array(x[0],x[1],x[2]))


	xd4 = y[0]
	xd5 = y[1]
	xd6 = y[2]

	xd7	= 	((Izz * Lb + Ixz * Nb - (Ixz * (Iyy - Ixx - Izz) * x[6] + (Ixz**2 + Izz * (Izz - Iyy)) * x[8]) * x[7]) / (Ixx * Izz - Ixz**2))
	xd8 = 	((Mb - (Ixx - Izz) * x[6] * x[8] - Ixz * (x[6]**2 - x[8]**2)) / Iyy)
	xd9 =	((Ixz * Lb + Ixx * Nb + (Ixz * (Iyy - Ixx - Izz) * x[8] + (Ixz**2 + Ixx * (Ixx - Iyy)) * x[6]) * x[7]) / (Ixx * Izz - Ixz**2))

	# xd7 = xd7[0][0]
	# xd8 = xd8[0][0]
	# xd9 = xd9[0][0]

	cosPitch	=	cos(x[10])
	if abs(cosPitch)	<=	0.00001:
		cosPitch	=	0.00001 * (abs(cosPitch)/cosPitch)      # sign(cosPitch) == (abs(cosPitch)/cosPitch) for python
	tanPitch	=	sin(x[10]) / cosPitch
		
	xd10	=	x[6] + (sin(x[9]) * x[7] + cos(x[9]) * x[8]) * tanPitch
	xd11	=	cos(x[9]) * x[7] - sin(x[9]) * x[8]
	xd12	=	(sin(x[9]) * x[7] + cos(x[9]) * x[8]) / cosPitch

	
	xdot	=	array([xd1,xd2,xd3,xd4,xd5,xd6,xd7,xd8,xd9,xd10,xd11,xd12])

	for i in range(1,13):
		print(f'xd{str(i)} = {xdot[i-1]} ') 

	return xdot
I = []
for i in range(len(M[0])):
   row = []
   for j in range(len(M[0])):
      if i == j:
         row.append(1.0)
      else:
         row.append(0.0)
   I.append(row)
I = matrix(I)
print (M.transpose()*M).diagonal(0)
print (M.transpose()*M+(float(10**k)*I)).diagonal(0)
g = (pinv(M.transpose()*M+(float(10**k)*I))*M.transpose()*matrix([[float(c)] for c in classification])).transpose()
print g
   
g_classification = matrix(map(y_function2, [g*matrix.transpose(matrix(m)) for m in M]))
errors = [x != t for (x,t) in zip(g_classification.tolist()[0], classification)]
for e in errors:
   if e:
      in_error += 1
in_error /= float(len(errors))
print in_error




N = []
classification = []
for line in out_sample:
   x1,x2,y = map(lambda x: float(x), line.split())
   N.append([1,x1,x2])
Esempio n. 38
0
    2: "linear-sinusoidal regression",
    3: "exponential regression"
}
##
x_t = np.arange(1, train + 1, 1)
x_t = [x_t, np.copy(x_t), np.copy(x_t), np.copy(x_t)]
##
x_t[0] = np.array([np.ones(train), x_t[0]])
x_t[1] = np.array([np.ones(train), x_t[1], x_t[1]**2])
x_t[2] = np.array([np.ones(train), x_t[2], np.sin(2 * ma.pi / 500 * x_t[2])])
x_t[3] = np.array([np.ones(train), x_t[3]])
## calculation
product = [np.ones(train), np.ones(train), np.ones(train), np.ones(train)]
for i in range(4):
    product[i] = np.copy(x_t[i])
    product[i] = np.matmul(product[i], mt.transpose(product[i]))
    product[i] = la.inv(product[i])
    product[i] = np.matmul(product[i], x_t[i])
    product[i] = np.matmul(
        product[i],
        (np.log(mt.transpose(head)) if (i == 3) else
         mt.transpose(head)))  # exponential must fit the logarithm of records

# %%
product

# %% [markdown]
# ### plotting whole records and regression points through whole domain

# %%
domain = np.arange(1., train + 11, 1)

def y_function(b):
   if b:
      return 1
   else:
      return -1
     
iterations = 0
error = 0
for i in range(10000):
   p = [(uniform(-1,1), uniform(-1,1)), (uniform(-1,1), uniform(-1,1))]
   target = [1, -(p[0][0]-p[1][0])/(p[1][0]-p[1][1]), -p[0][0]+(p[0][0]-p[1][0])*p[1][0]/(p[1][0]-p[1][1])]
   N = [(uniform(-1,1),uniform(-1,1),1) for i in range(10)]
   classification = map(y_function, [(x[0]*target[0] + x[1]*target[1] + x[2]*target[2]) > 0 for x in N])
   g = matrix.transpose(matrix(pinv([list(n) for n in N]))*matrix([[float(c)] for c in classification])).tolist()[0]
   g_classification = map(y_function, [(x[0]*g[0] + x[1]*g[1] + x[2]*g[2]) > 0 for x in N])
   
   while g_classification != classification:
      #print g
     
      incorrect = [p for p in N if classification[N.index(p)] != g_classification[N.index(p)]]
      #print len(incorrect)
      index = randrange(0,len(incorrect))
      #print [classification[N.index(incorrect[index])] * x for x in incorrect[index]]
      g = [p[0] + p[1] for p in zip(g, [classification[N.index(incorrect[index])] * x for x in incorrect[index]])]
      g_classification = map(y_function, [(x[0]*g[0] + x[1]*g[1] + x[2]*g[2]) > 0 for x in N])
      iterations += 1
      #print classification[N.index(incorrect[index])]
      #print incorrect[index], N.index(incorrect[index])
      #print classification, g_classification