コード例 #1
0
    def train(self, numOfNeurons):
        #seting matrices to zero
        self.protos = np.zeros(shape=(0, len(self.scaledData[1])))
        self.weights = 0
        self.spread = np.zeros(shape=(1, numOfNeurons))

        #self.pickDatapoints(numOfNeurons)
        self.kmeans(numOfNeurons, self.scaledDatanoClassColumn)
        self.sigma(numOfNeurons)
        #self.std_dev(numOfNeurons)

        hiddenOut = np.zeros(shape=(0, numOfNeurons))
        for item in self.scaledDatanoClassColumn:
            out=[]
            for i, proto in enumerate(self.protos):
                distance = np.square(euclidean_distance(item, proto))
                #divide by zero
                if (np.square(self.spread[0, i]) == 0):
                	self.spread[0, i] += 0.000001
                	#print(np.square(self.spread[0, i]))
                neuronOut = np.exp(-(distance)/(2 * np.square(self.spread[0, i])))
                out.append(neuronOut)
            hiddenOut = np.vstack([hiddenOut,np.array(out)])
        #print ("hiddenOut:\n", hiddenOut)

        #print ("klase:\n", self.ClassLabels)
        #print ("pseudo inverz:\n", pinv(hiddenOut).shape)
        if hiddenOut.any():
            self.weights = np.dot(pinv(hiddenOut), self.ClassLabels)
コード例 #2
0
    def train(self, numOfNeurons):
        #seting matrices to zero
        self.protos = np.zeros(shape=(0, len(self.scaledData[1])))
        self.weights = 0
        self.spread = np.zeros(shape=(1, numOfNeurons))

        #Set training variants for centers and spreads
        if (training_method == 'random'):
            self.pickDatapoints(numOfNeurons)
        elif (training_method == 'k-means'):
            self.kmeans(numOfNeurons, self.scaledDatanoClassColumn)
        if (spread_method == 'sigma'):
            self.sigma(numOfNeurons)
        elif (spread_method == 'std_dev'):
            self.std_dev(numOfNeurons)

        #calculate outputs from hidden layer
        hiddenOut = np.zeros(shape=(0, numOfNeurons))
        for item in self.scaledDatanoClassColumn:
            out = []
            for i, proto in enumerate(self.protos):
                distance = np.square(euclidean_distance(item, proto))
                #divide by zero
                if (np.square(self.spread[0, i]) == 0):
                    self.spread[0, i] += 0.000001
                    #print(np.square(self.spread[0, i]))
                neuronOut = np.exp(-(distance) /
                                   (2 * np.square(self.spread[0, i])))
                out.append(neuronOut)
            hiddenOut = np.vstack([hiddenOut, np.array(out)])
        #print ("hiddenOut:\n", hiddenOut)

        #calculate second layer weights
        if hiddenOut.any():
            self.weights = np.dot(pinv(hiddenOut), self.ClassLabels)
コード例 #3
0
    def testImage(self):
        C = numpy.random.rand(50, 50)
        information = pinv(C)
        T = numpy.random.rand(50, 50, 3)

        report = Node("rangefinder")
        report.data("Tx", T[:, :, 0])
        report.data("Ty", T[:, :, 1])
        report.data("Tz", T[:, :, 2])
        cov = report.data("covariance", C)
        report.data("information", information)

        pylab = get_pylab_instance()
        with cov.data_file("plot", MIME_PNG) as f:
            pylab.figure()
            pylab.plot(C)
            pylab.savefig(f)
            pylab.close()

        report.table("results", cols=["One", "Two"], data=[[1, 2], [3, 4]])

        f = report.figure(caption="Covariance and information matrix", cols=3)
        f.sub("covariance", "should default to plot")
        f.sub("covariance/plot", "Same as <-")

        f = report.figure("Tensors", cols=3)
        f.sub("Tx", display="posneg")
        f.sub("Ty", display="posneg")
        f.sub("Tz", display="posneg")

        self.node_serialization_ok(report)
コード例 #4
0
    def testImage(self):
        C = numpy.random.rand(50, 50)
        information = pinv(C)
        T = numpy.random.rand(50, 50, 3)

        report = Node('rangefinder')
        report.data('Tx', T[:, :, 0])
        report.data('Ty', T[:, :, 1])
        report.data('Tz', T[:, :, 2])
        cov = report.data('covariance', C)
        report.data('information', information)

        pylab = get_pylab_instance()
        with cov.data_file('plot', 'image/png') as f:
            pylab.figure()
            pylab.plot(C)
            pylab.savefig(f)
            pylab.close()

        report.table('results',
                         cols=['One', 'Two'],
                         data=[[1, 2], [3, 4]])

        f = report.figure(caption='Covariance and information matrix', cols=3)
        f.sub('covariance', 'should default to plot')
        f.sub('covariance/plot', 'Same as <-')

        f = report.figure('Tensors', cols=3)
        f.sub('Tx', display='posneg')
        f.sub('Ty', display='posneg')
        f.sub('Tz', display='posneg')

        self.node_serialization_ok(report)
コード例 #5
0
ファイル: multilateration.py プロジェクト: Feriority/doppler
def multilaterate(mic_positions, time__mic_dart_distances_stream):
	"""Take a stream of mic - dart distances and yield coordinates.

	mic_positions must be a a N x 3 array of coordinates of each mic.
	time__mic_dart_distances_stream must be a generator of time + CHANNEL *
		distances from mic to dart.
	Yields time, 3D coords of dart.
	"""
	mic_positions = array(mic_positions)
	origin = mic_positions[0]
	mic_positions = mic_positions - origin

	for time__mic_dart_distances in time__mic_dart_distances_stream:
		time_seconds = time__mic_dart_distances[0]
		mic_dart_distances = array(time__mic_dart_distances[1:])

		# The algorithm fails on any 0 - m distance degeneracies. Add some
		# wiggle if so.
		degeneracies = (mic_dart_distances[1:] == mic_dart_distances[0])
		mic_dart_distances[1:] += 1.0e-12 * degeneracies

		vt = mic_dart_distances - mic_dart_distances[0]
		free_vt = vt[2:]

		A = 2.0 * mic_positions[2:, 0] / free_vt - 2.0 * mic_positions[1, 0] / vt[1]
		B = 2.0 * mic_positions[2:, 1] / free_vt - 2.0 * mic_positions[1, 1] / vt[1]
		C = 2.0 * mic_positions[2:, 2] / free_vt - 2.0 * mic_positions[1, 2] / vt[1]
		D = free_vt - vt[1] - sum(mic_positions[2:, :] ** 2, axis=1) / free_vt + sum(mic_positions[1] ** 2) / vt[1]

		M = concatenate([transpose_1D(A), transpose_1D(B), transpose_1D(C)], axis=1)

		try:
			yield time_seconds, pinv(M).dot(-transpose_1D(D)).reshape(3) + origin
		except LinAlgError:
			sys.stderr.write('Could not multilaterate at t = %f\n' % time_seconds)
コード例 #6
0
ファイル: affine.py プロジェクト: AndreaCensi/bvexp201007
    def update(self, value):
        if self.maximum is None:
            self.maximum = value
        else:
            self.maximum = numpy.maximum(value, self.maximum)

        if self.minimum is None:
            self.minimum = value
        else:
            self.minimum = numpy.minimum(value, self.minimum)

        self.mean_accum.update(value)
        self.mean = self.mean_accum.get_value()
        value_norm = value - self.mean
        P = outer(value_norm, value_norm)
        self.covariance_accum.update(P)
        self.covariance = self.covariance_accum.get_value()
        try:
            self.information = pinv(self.covariance, rcond=1e-2)
        except LinAlgError:
            filename = "pinv-failure"
            with open(filename + ".pickle", "w") as f:
                self.last_value = value
                pickle.dump(self, f)

            raise JobFailed("Did not converge; saved on %s" % filename)
コード例 #7
0
ファイル: linearreg.py プロジェクト: Dfred/concept-robot
    def train_least_squares(self, inputs, outputs):
        '''
        inputs  - is a matrix where each row is an input.
        outputs - is a matrix where each row is a corresponding output.
        
        based on: http://en.wikipedia.org/wiki/Linear_regression (2007/06/07)
        '''
        self.mat = []
        self.RMSE = []
        # create the data matrix
        for output in range(outputs.shape[1]):
            print "Training output ", output
            y = outputs[:,output]
            #print "y:\n",y
            X = inputs
            tmp = numpy.ones(shape=(X.shape[0],1))
            X = numpy.concatenate([tmp, X],axis=1)
            #print "X:\n",X
            B = dot(dot(pinv(dot(X.transpose(),X)),X.transpose()),y)
            #print "B:\n",B
            
            E = y - dot(X,B)
            self.RMSE.append(sqrt((E*E).sum()))
            #print "E:", E, E < 0.0001
            self.mat.append( B )

        self.mat =  numpy.array(self.mat)
        
        return self.RMSE
コード例 #8
0
    def train_least_squares(self, inputs, outputs):
        '''
        inputs  - is a matrix where each row is an input.
        outputs - is a matrix where each row is a corresponding output.
        
        based on: http://en.wikipedia.org/wiki/Linear_regression (2007/06/07)
        '''
        self.mat = []
        self.RMSE = []
        # create the data matrix
        for output in range(outputs.shape[1]):
            print "Training output ", output
            y = outputs[:, output]
            #print "y:\n",y
            X = inputs
            tmp = numpy.ones(shape=(X.shape[0], 1))
            X = numpy.concatenate([tmp, X], axis=1)
            #print "X:\n",X
            B = dot(dot(pinv(dot(X.transpose(), X)), X.transpose()), y)
            #print "B:\n",B

            E = y - dot(X, B)
            self.RMSE.append(sqrt((E * E).sum()))
            #print "E:", E, E < 0.0001
            self.mat.append(B)

        self.mat = numpy.array(self.mat)

        return self.RMSE
コード例 #9
0
 def train(self, X, y):
     self.generateHiddenNeurons(X)
     self.sigma()
     hiddenOut = np.zeros(shape=(X.shape[0], self.hidden_shape))
     for i, item in enumerate(X):
         out = []
         for j, proto in enumerate(self.hidden_neurons):
             distance = np.square(np.linalg.norm(item - proto))
             hiddenOut[i,
                       j] = np.exp(-(distance) / (np.square(self.spread)))
             #out.append(neuronOut)
         #hiddenOut = np.vstack([hiddenOut,np.array(out)])
     #print hiddenOut
     #print ('hiddenOut',hiddenOut.shape)
     print(pinv(hiddenOut).shape, 'X', y.shape)
     self.weights = np.dot(pinv(hiddenOut), y)
     print 'weights = ', self.weights.shape
コード例 #10
0
def getcorrection_adding(old_tree, new_tree, sigma, branches, U_matrix):

    node_keys = sorted(get_leaf_keys(old_tree))

    A, _, _ = make_coefficient_matrix(old_tree,
                                      node_keys=node_keys,
                                      branch_keys=branches[:-3])
    B, _, _ = make_coefficient_matrix(new_tree,
                                      node_keys=node_keys,
                                      branch_keys=branches)

    x_A = get_specific_branch_lengths(old_tree, branches[:-3])
    x_B = get_specific_branch_lengths(new_tree, branches)

    B2 = B.dot(U_matrix)

    lambd = pinv(B2.dot(B2.T)).dot(A - B2).dot(x_A)

    mu_new = (B2.T).dot(lambd) + x_A

    x_new_reduced = mu_new + norm.rvs(scale=sigma, size=len(mu_new))

    q_forward = reduce(mul, norm.pdf(mu_new - x_new_reduced, scale=sigma))

    x_new = U_matrix.dot(x_new_reduced)
    print 'x_A', x_A
    print 'x_B', x_B
    print 'mu_new', mu_new
    print 'x_new_reduced', x_new_reduced
    print 'x_new', x_new

    reverse_lambd = pinv(A.dot(A.T)).dot(B2 - A).dot(x_new_reduced)
    reverse_mu_new = (A.T).dot(reverse_lambd) + x_new_reduced

    print 'matrix_rank , dimension (A)', matrix_rank(A), A.shape
    print 'matrix_rank , dimension (B)', matrix_rank(B), B.shape
    print 'mu_reverse', reverse_mu_new

    q_backward = reduce(mul, norm.pdf(reverse_mu_new - x_A, scale=sigma))

    #wear the new values
    #print branches

    new_tree = update_specific_branch_lengths(new_tree, branches, x_new)

    return new_tree, q_forward, q_backward
コード例 #11
0
def normalEquation(X, y):
    """
    X: matrix of features, one sample per row (without bias unit)
    y: values (continuous) corresponding to rows (samples) in X
    """
    numSamples = y.size
    X = np.insert(X, 0, np.ones(numSamples), axis=1)

    return pinv(X)*y
コード例 #12
0
def regularised_ml_weights(inputmtx, targets, reg_coeff):
    """
    This method returns the regularised weights that give the best linear fit between
    the processed inputs and the targets.
    """
    Phi = np.matrix(inputmtx)
    targets = np.matrix(targets).reshape((len(targets), 1))
    I = np.identity(Phi.shape[1])
    weights = linalg.pinv(I * reg_coeff + Phi.transpose() * Phi) * Phi.transpose() * targets
    return np.array(weights).flatten()
コード例 #13
0
def calculate_scores(results):
    teams = extract_teams(results)
    selection_matrix = build_selection_matrix(results, teams)
    result_vector = build_result_vector(results)

    score_vector = pinv(selection_matrix).dot(result_vector.T)
    error_vector = calculate_errors(score_vector, result_vector,
                                    selection_matrix)

    return map_scores_to_teams(score_vector, teams), error_vector
コード例 #14
0
def ml_weights(inputmtx, targets):
    """
    This method returns the weights that give the best linear fit between
    the processed inputs and the targets.
    """
    Phi = np.matrix(inputmtx)
    PhiT = Phi.transpose()
    targets = np.matrix(targets).reshape((len(targets), 1))
    weights = linalg.pinv(PhiT * Phi) * PhiT * targets
    return np.array(weights).flatten()
コード例 #15
0
ファイル: imp.py プロジェクト: AndreaCensi/astatsa
 def get_information(self, rcond=1e-2):
     self.assert_some_data()
     try:
         P = self.get_covariance()
         return pinv(P, rcond=rcond)
     except LinAlgError:
         filename = 'pinv-failure'
         import pickle
         with  open(filename + '.pickle', 'w') as f:
             pickle.dump(self, f)
コード例 #16
0
def calculate_scores(results):
    teams = extract_teams(results)
    selection_matrix = build_selection_matrix(results, teams)
    result_vector = build_result_vector(results)

    score_vector = pinv(selection_matrix).dot(result_vector.T)
    error_vector = calculate_errors(score_vector, result_vector,
                                    selection_matrix)

    return map_scores_to_teams(score_vector, teams), error_vector
コード例 #17
0
 def train(self):
     self.generatePrototypes()
     self.sigma()
     hiddenOut = np.zeros(shape=(0, self.pTypes * 3))
     for item in self.scaledData:
         out = []
         for proto in self.protos:
             distance = np.square(np.linalg.norm(item - proto))
             neuronOut = np.exp(-(distance) / (np.square(self.spread)))
             out.append(neuronOut)
         hiddenOut = np.vstack([hiddenOut, np.array(out)])
     # print(hiddenOut)
     self.weights = np.dot(pinv(hiddenOut), self.labels)
コード例 #18
0
 def train(self, data, target, deep):
     'En esta funcion se realiza 10-Fold CV para entrenar la red con una expansion de entre 20-75%.'
     'El algoritmo de entrenamiento es Descenso por Gradiente Estocastico.'
     # 10-Fold Cross Validation
     folds = 10
     iters = 10
     kf = KFold(data.shape[0], n_folds=folds)
     if deep:
         hiddenNodes = np.arange(data.shape[1], 2 * data.shape[1]) + 1
     else:
         hiddenNodes = np.arange(data.shape[1], 10 * data.shape[1]) + 1
     hiddenNodes = hiddenNodes[hiddenNodes > 0]
     Error_HNodes = []
     Nets_HNodes = []
     for j in hiddenNodes:
         self.setHiddenNodes([j])
         Mean_error_iter = []
         Mean_nets_iter = []
         for train_index, val_index in kf:
             X, Xval = data[train_index], data[val_index]
             T, Tval = target[train_index], target[val_index]
             Error_iter = []
             Nets_iter = []
             for i in np.arange(iters):
                 self.initialization()  # Inicializaciones comunes
                 Out, H, N = self.sim(X)
                 H = H[-1]
                 self.Weights[-1] = np.dot(pinv(H), T)
                 # Validation
                 Out_val, H_val, N_val = self.sim(Xval)
                 # Se guarda el error y la red
                 # MSE = [mean_squared_error(Tval,Out_val)]
                 # Error de clasificacion
                 Error = [accuracy_score(Tval, Out_val)]
                 #Error = [f1_score(Tval, Out_val)]
                 Networks = [self.Weights]
                 Error_iter.append(np.min(Error))
                 Nets_iter.append(Networks[np.argmin(Error)])
             Mean_error_iter.append(np.mean(Error_iter))
             Mean_nets_iter.append(Nets_iter[np.argmin(Error_iter)])
         Error_HNodes.append(np.mean(Mean_error_iter))
         Nets_HNodes.append(Mean_nets_iter[np.argmin(Mean_error_iter)])
     self.Weights = Nets_HNodes[np.argmin(Error_HNodes)]
     Final_Error = np.min(Error_HNodes)
     selected_Nodes = hiddenNodes[np.argmin(Error_HNodes)]
     self.setHiddenNodes([selected_Nodes])
     return Final_Error
コード例 #19
0
 def train(self, data, target, deep):
     'En esta funcion se realiza 10-Fold CV para entrenar la red con una expansion de entre 20-75%.'
     'El algoritmo de entrenamiento es Descenso por Gradiente Estocastico.'
     # 10-Fold Cross Validation
     folds = 10; iters = 10;
     kf = KFold(data.shape[0], n_folds=folds)
     if deep:
         hiddenNodes = np.arange(data.shape[1],2*data.shape[1])+1
     else:
         hiddenNodes = np.arange(data.shape[1],10*data.shape[1])+1
     hiddenNodes = hiddenNodes[hiddenNodes>0]
     Error_HNodes = []
     Nets_HNodes = []
     for j in hiddenNodes:
         self.setHiddenNodes([j])
         Mean_error_iter = []
         Mean_nets_iter = []
         for train_index, val_index in kf:
             X, Xval = data[train_index], data[val_index]
             T, Tval = target[train_index], target[val_index]
             Error_iter = []
             Nets_iter = []
             for i in np.arange(iters):
                 self.initialization() # Inicializaciones comunes
                 Out,H,N = self.sim(X)
                 H = H[-1]
                 self.Weights[-1] = np.dot(pinv(H),T)
                 # Validation
                 Out_val,H_val,N_val = self.sim(Xval)
                 # Se guarda el error y la red
                 # MSE = [mean_squared_error(Tval,Out_val)]
                 # Error de clasificacion
                 Error = [accuracy_score(Tval, Out_val)]
                 #Error = [f1_score(Tval, Out_val)]
                 Networks = [self.Weights]
                 Error_iter.append(np.min(Error))
                 Nets_iter.append(Networks[np.argmin(Error)])
             Mean_error_iter.append(np.mean(Error_iter))
             Mean_nets_iter.append(Nets_iter[np.argmin(Error_iter)])
         Error_HNodes.append(np.mean(Mean_error_iter))
         Nets_HNodes.append(Mean_nets_iter[np.argmin(Mean_error_iter)])
     self.Weights = Nets_HNodes[np.argmin(Error_HNodes)]
     Final_Error = np.min(Error_HNodes)
     selected_Nodes = hiddenNodes[np.argmin(Error_HNodes)]
     self.setHiddenNodes([selected_Nodes])
     return Final_Error
コード例 #20
0
def multilaterate(mic_positions, time__mic_dart_distances_stream):
    """Take a stream of mic - dart distances and yield coordinates.

	mic_positions must be a a N x 3 array of coordinates of each mic.
	time__mic_dart_distances_stream must be a generator of time + CHANNEL *
		distances from mic to dart.
	Yields time, 3D coords of dart.
	"""
    mic_positions = array(mic_positions)
    origin = mic_positions[0]
    mic_positions = mic_positions - origin

    for time__mic_dart_distances in time__mic_dart_distances_stream:
        time_seconds = time__mic_dart_distances[0]
        mic_dart_distances = array(time__mic_dart_distances[1:])

        # The algorithm fails on any 0 - m distance degeneracies. Add some
        # wiggle if so.
        degeneracies = (mic_dart_distances[1:] == mic_dart_distances[0])
        mic_dart_distances[1:] += 1.0e-12 * degeneracies

        vt = mic_dart_distances - mic_dart_distances[0]
        free_vt = vt[2:]

        A = 2.0 * mic_positions[2:, 0] / free_vt - 2.0 * mic_positions[
            1, 0] / vt[1]
        B = 2.0 * mic_positions[2:, 1] / free_vt - 2.0 * mic_positions[
            1, 1] / vt[1]
        C = 2.0 * mic_positions[2:, 2] / free_vt - 2.0 * mic_positions[
            1, 2] / vt[1]
        D = free_vt - vt[1] - sum(mic_positions[
            2:, :]**2, axis=1) / free_vt + sum(mic_positions[1]**2) / vt[1]

        M = concatenate([transpose_1D(A),
                         transpose_1D(B),
                         transpose_1D(C)],
                        axis=1)

        try:
            yield time_seconds, pinv(M).dot(-transpose_1D(D)).reshape(
                3) + origin
        except LinAlgError:
            sys.stderr.write('Could not multilaterate at t = %f\n' %
                             time_seconds)
コード例 #21
0
 def train(self, data, training):
     'En esta funcion se realiza 10-Fold CV para entrenar la red con una expansion de entre 20-75%.'
     'El algoritmo de entrenamiento es Descenso por Gradiente Estocastico o Extreme Learning Machine.'
     # 10-Fold Cross Validation
     folds = 10; iters = 10;
     kf = KFold(data.shape[0], n_folds=folds)
     hiddenNodes = arange(2*data.shape[1])+1
     Error_HNodes = []
     Nets_HNodes = []
     for j in hiddenNodes:
         self.setHiddenNodes([j])
         Mean_error_iter = []
         Mean_nets_iter = []
         for train_index, val_index in kf:
             X, Xval = data[train_index], data[val_index]
             Error_iter = []
             Nets_iter = []
             for i in np.arange(iters):
                 self.initialization() # Inicializaciones comunes
                 if training == 'elm':
                     Out,H,N = self.sim(X)
                     H = H[-1]
                     pseudoinverse = pinv(H)
                     beta = np.dot(pseudoinverse,X)
                     self.Weights[-1] = beta
                     # Validation
                     Out_val,H_val,N_val = self.sim(Xval)
                     # Se guarda el error y la red
                     MSE = [mean_squared_error(Xval,Out_val)]
                     Networks = [self.Weights]
                 Error_iter.append(np.min(MSE))
                 Nets_iter.append(Networks[np.argmin(MSE)])
             Mean_error_iter.append(np.mean(Error_iter))
             Mean_nets_iter.append(Nets_iter[np.argmin(Error_iter)])
         Error_HNodes.append(np.mean(Mean_error_iter))
         Nets_HNodes.append(Mean_nets_iter[np.argmin(Mean_error_iter)])
     self.Weights = Nets_HNodes[np.argmin(Error_HNodes)]
     Final_Error = np.min(Error_HNodes)
     selected_Nodes = hiddenNodes[np.argmin(Error_HNodes)]
     self.setHiddenNodes([selected_Nodes])
     return Final_Error
コード例 #22
0
ファイル: ik.py プロジェクト: stephane-caron/icra-2015
    def _enforce(self, q_warm):
        self.converged = False
        self.robot.rave.SetDOFValues(q_warm)
        self.robot.rave.SetActiveDOFs(self.active_indexes)
        q_max = array([DOF_SCALE * dof.ulim for dof in self.active_dofs])
        q_min = array([DOF_SCALE * dof.llim for dof in self.active_dofs])
        I = eye(self.nb_active_dof)

        q = full_to_active(q_warm, self.active_dofs)
        self.robot.rave.SetActiveDOFValues(q)

        for itnum in xrange(self.max_iter):
            conv_vect = array([norm(task.f()) for task in self.tasks])
            if numpy.all(conv_vect < self.conv_thres):
                self.converged = True
                break
            if DEBUG_IK:
                conv = ["%10.8f" % x for x in conv_vect]
                print "   %4d: %s" % (itnum, ' '.join(conv))

            ker_proj = eye(self.nb_active_dof)
            dq = zeros(self.nb_active_dof)
            qd_max_reg = self.gain * (q_max - q)
            qd_min_reg = self.gain * (q - q_min)
            for i, task in enumerate(self.tasks):
                J = task.J()
                Jn = dot(J, ker_proj)
                b = -self.gain * task.f() - dot(J, dq)
                In = eye(Jn.shape[0])
                sr_inv = dot(Jn.T, linalg.inv(dot(Jn, Jn.T) + 1e-8 * In))
                dq += dot(sr_inv, b)
                ker_proj = dot(ker_proj, I - dot(linalg.pinv(Jn), Jn))

            qd_max_reg = self.gain * (q_max - q)
            qd_min_reg = self.gain * (q - q_min)
            q += solve_ineq(I, dq, I, qd_max_reg, qd_min_reg)
            self.robot.rave.SetActiveDOFValues(q)

        return self.robot.rave.GetDOFValues()
コード例 #23
0
 def fineTuning(self, data, target):
     # Una vez establecidos todos los pesos, se procede al ajuste fino
     epoch = 0
     Error = []
     Networks = []
     while epoch <= 10:
         Out, H, N = self.sim(data)
         H = H[-1]
         pseudoinverse = pinv(H)
         beta = np.dot(pseudoinverse, target)
         self.Weights[-1] = beta
         # Validation
         Out, H, N = self.sim(data)
         # Error de regresion. MSE
         #Error.append(mean_squared_error(data,Out))
         Networks.append(self.Weights)
         # Error de clasificacion
         Error.append(accuracy_score(target, Out))
         #Error.append(f1_score(target, Out))
         epoch += 1
     Final_Error = np.min(Error)
     self.Weights = Networks[np.argmin(Error)]
     return Final_Error
コード例 #24
0
 def fineTuning(self,data,target):
     # Una vez establecidos todos los pesos, se procede al ajuste fino
     epoch = 0
     Error = []
     Networks = []
     while epoch <= 10:
         Out,H,N = self.sim(data)
         H = H[-1]
         pseudoinverse = pinv(H)
         beta = np.dot(pseudoinverse,target)
         self.Weights[-1] = beta
         # Validation
         Out,H,N = self.sim(data)
         # Error de regresion. MSE
         #Error.append(mean_squared_error(data,Out))
         Networks.append(self.Weights)
         # Error de clasificacion
         Error.append(accuracy_score(target, Out))
         #Error.append(f1_score(target, Out))
         epoch += 1
     Final_Error = np.min(Error)
     self.Weights = Networks[np.argmin(Error)]
     return Final_Error
コード例 #25
0
 def test_ica_unmixing(self):
     sk_w = self.sk_ica.components_.dot(linalg.pinv(self.sk_ica.whitening_))
     assert numpy.allclose(numpy.absolute(self.W),
                           numpy.absolute(sk_w),
                           atol=1e-03)
コード例 #26
0
ファイル: classifiers.py プロジェクト: yk/patternhs12
 def __train__(self, data, labels):
     o=ones((data.shape[0],1))
     h=hstack((data, o))
     pseudoX=pinv(h)
     self.w=dot(pseudoX, labels.reshape((-1,1)))