def parcel_plv(x, y, source_identities): """ Function for computing the complex phase-locking value at parcel level. Input: x : complex Source time series. y : complex Source time series. source_identities : int Vector mapping of source parcel identities. Output: cPLV: complex PLV of parcels, sorted by identity. """ """Change to amplitude 1, keep angle using Euler's formula.""" x = np.exp(1j * (asmatrix(np.angle(x)))) y = np.exp(1j * (asmatrix(np.angle(y)))) """Get cPLV needed for flips and weighting.""" cplv = np.zeros(len(source_identities), dtype='complex') for i, identity in enumerate(source_identities): """Compute cPLV only of parcel source pairs of sources that belong to that parcel. One source belong to only one parcel.""" if (source_identities[i] >= 0): cplv[i] = (np.sum( (np.asarray(y[identity])) * np.conjugate(np.asarray(x[i])))) cplv /= np.shape(x)[1] return cplv
def load_data(fname, delimiter=','): """ return the features x and result y as matrix """ data = sp.loadtxt(fname, delimiter=delimiter) m, n = data.shape x = sp.asmatrix(data[:, range(0, n - 1)].reshape(m, n - 1)) y = sp.asmatrix(data[:, n - 1].reshape(m, 1)) return x, y
def dare(A, B, Q, R, S=None, E=None, stabilizing=True): """ (X,L,G) = dare(A,B,Q,R) solves the discrete-time algebraic Riccati equation :math:`A^T X A - X - A^T X B (B^T X B + R)^{-1} B^T X A + Q = 0` where A and Q are square matrices of the same dimension. Further, Q is a symmetric matrix. The function returns the solution X, the gain matrix G = (B^T X B + R)^-1 B^T X A and the closed loop eigenvalues L, i.e., the eigenvalues of A - B G. (X,L,G) = dare(A,B,Q,R,S,E) solves the generalized discrete-time algebraic Riccati equation :math:`A^T X A - E^T X E - (A^T X B + S) (B^T X B + R)^{-1} (B^T X A + S^T) + Q = 0` where A, Q and E are square matrices of the same dimension. Further, Q and R are symmetric matrices. The function returns the solution X, the gain matrix :math:`G = (B^T X B + R)^{-1} (B^T X A + S^T)` and the closed loop eigenvalues L, i.e., the eigenvalues of A - B G , E. """ if S is not None or E is not None or not stabilizing: return dare_old(A, B, Q, R, S, E, stabilizing) else: Rmat = asmatrix(R) Qmat = asmatrix(Q) X = solve_discrete_are(A, B, Qmat, Rmat) G = solve(B.T.dot(X).dot(B) + Rmat, B.T.dot(X).dot(A)) L = eigvals(A - B.dot(G)) return X, L, G
def dare(A, B, Q, R, S=None, E=None): """ (X,L,G) = dare(A,B,Q,R) solves the discrete-time algebraic Riccati equation A^T X A - X - A^T X B (B^T X B + R)^-1 B^T X A + Q = 0 where A and Q are square matrices of the same dimension. Further, Q is a symmetric matrix. The function returns the solution X, the gain matrix G = (B^T X B + R)^-1 B^T X A and the closed loop eigenvalues L, i.e., the eigenvalues of A - B G. (X,L,G) = dare(A,B,Q,R,S,E) solves the generalized discrete-time algebraic Riccati equation A^T X A - E^T X E - (A^T X B + S) (B^T X B + R)^-1 (B^T X A + S^T) + + Q = 0 where A, Q and E are square matrices of the same dimension. Further, Q and R are symmetric matrices. The function returns the solution X, the gain matrix G = (B^T X B + R)^-1 (B^T X A + S^T) and the closed loop eigenvalues L, i.e., the eigenvalues of A - B G , E. """ if S is not None or E is not None: return dare_old(A, B, Q, R, S, E) else: Rmat = asmatrix(R) Qmat = asmatrix(Q) X = solve_discrete_are(A, B, Qmat, Rmat) G = inv(B.T.dot(X).dot(B) + Rmat) * B.T.dot(X).dot(A) L = eigvals(A - B.dot(G)) return X, L, G
def rows_array2(my_m, n_iter): my_m = asmatrix(my_m) # not strictly needed, but should be noop # the following is valid as long as we assume a fixed matrix size (and not in a general function) v1 = asmatrix(full((my_m.shape[1], 1), 1.0 / my_m.shape[1])) for i in xrange(n_iter): #v1 = asmatrix(full( (my_m.shape[1], 1), 1.0/my_m.shape[1])) m1 = my_m * v1 # row means m1[0] = 0 # just to avoid complaints
def count_hops(data, definition, def_args, year, A, B): scipy_matrix = scipy.asmatrix(scipy.array(unsigned_adjacency_matrix(data, definition, def_args, year))) multiplied_matrix = scipy.asmatrix(scipy.array(unsigned_adjacency_matrix(data, definition, def_args, year))) hop_count = 1 while hop_count < len(data.countries()): if multiplied_matrix.tolist()[index_of_country(A)][index_of_country(B)] != 0: return hop_count multiplied_matrix = memoize_matrix_mult(multiplied_matrix, scipy_matrix) hop_count += 1 return INFINITE_HOPS
def rankOneMatrix(vec1, *args): """ Create rank one matrices (dyadics) from vectors. r1mat = rankOneMatrix(vec1) r1mat = rankOneMatrix(vec1, vec2) vec1 is m1 x n, an array of n hstacked m1 vectors vec2 is m2 x n, (optional) another array of n hstacked m2 vectors r1mat is n x m1 x m2, an array of n rank one matrices formed as c1*c2' from columns c1 and c2 With one argument, the second vector is taken to the same as the first. Notes: *) This routine loops on the dimension m, assuming this is much smaller than the number of points, n. """ if len(vec1.shape) > 2: raise RuntimeError("input vec1 is the wrong shape") if (len(args) == 0): vec2 = vec1.copy() else: vec2 = args[0] if len(vec1.shape) > 2: raise RuntimeError("input vec2 is the wrong shape") m1, n1 = asmatrix(vec1).shape m2, n2 = asmatrix(vec2).shape if (n1 != n2): raise RuntimeError("Number of vectors differ in arguments.") m1m2 = m1 * m2 r1mat = zeros((m1m2, n1), dtype='float64') mrange = asarray(list(range(m1)), dtype='int') for i in range(m2): r1mat[mrange, :] = vec1 * tile(vec2[i, :], (m1, 1)) mrange = mrange + m1 r1mat = reshape(r1mat.T, (n1, m2, m1)).transpose(0, 2, 1) return squeeze(r1mat)
def main(): saved_handler = sp.seterrcall(err_handler) saved_err = sp.seterr(all='call') print('============ Part 1: Plotting =============================') x, y = load_data('ex2/ex2data1.txt') plot_data(x, y) pl.show() print('============ Part 2: Compute Cost and Gradient ============') m, n = x.shape x = sp.column_stack((sp.ones((m, 1)), x)) init_theta = sp.asmatrix(sp.zeros((n + 1, 1))) cost, grad = cost_function(init_theta, x, y) print('Cost at initial theta: %s' % cost) print('Gradient at initial theta:\n %s' % grad) print('============ Part 3: Optimizing minimize ====================') # res = op.minimize(cost_function, init_theta, args=(x, y), jac=True, method='Newton-CG') res = op.minimize(cost_function_without_grad, init_theta, args=(x, y), method='Powell') # print('Cost at theta found by fmin: %s' % cost) print('Result by minimize:\n%s' % res) plot_decision_boundary(res.x, x, y) pl.show() print('============ Part 4: Optimizing fmin ====================') res = op.fmin(cost_function_without_grad, init_theta, args=(x, y)) # print('Cost at theta found by fmin: %s' % cost) print('Result by fmin:\n%s' % res) plot_decision_boundary(res, x, y) pl.show() sp.seterrcall(saved_handler) sp.seterr(**saved_err)
def __init__(self, d): Layer.__init__(self, 1, d) sigma = s.sqrt(1.0/d) self.w = s.asmatrix(s.random.normal(0.0, sigma, (1, d))) self.b = s.random.normal(0.0, sigma, 1)
def test(): '''a = mx('1,2,3;0,4,5;9,0,8') print a.shape print a.I print mx.A''' a = mx('1,2;3,2') b = mx('1,0,0;0,1,1') c = mx('1,0;0,1;1,0') # print c*(a*b) print a.shape[0] ai = sp.identity(a.shape[1]) # aif = ai.flat ail = ai.tolist() newit = ail[0] ail.append(newit) print ail #.repeat(2,1)#.reshape((2,)) ailm = sp.asmatrix(ail) print ailm # c = mx('1,2;0,4;9,2') # d = mx('1,2,0;4,9,2') # print a*b #每加入一个节点,得一A同型+1单位阵,,修改Isi+=1,与A相乘得新输出矩阵 linec = 0
def run(): input_layer_size = 400 hidden_layer_size = 25 num_labels = 10 print('Loading and Visualizing Data...') X, y = digit_data['X'], digit_data['y'] m = X.shape[0] # Randomly select 100 data points to display sel = sp.random.permutation(m) multiclass.displayData(X[sel, :]) print('Program paused. Press enter to continue.') raw_input() Theta1, Theta2 = weights_data['Theta1'], weights_data['Theta2'] neurnet.predict(Theta1, Theta2, X) print('Program paused. Press enter to continue.') raw_input() rp = sp.random.permutation(m) for i in range(0, m): pred = neurnet.predict(Theta1, Theta2, sp.asmatrix(X[rp[i], :])) print("Neural Network Prediction: %d (digit %d)\n" % (pred[0], sp.mod(pred[0], 10)))
def costFunctionReg(flattendTheta, X, y, lmbda): """ Calculate the cost and gradient for logistic regression using regularization (helps with preventing overfitting with many features) """ # numpy fmin function only allows flattened arrays instead of # matrixes which is stupid so it has to be converted every time flattendTheta = sp.asmatrix(flattendTheta) (a, b) = flattendTheta.shape if a < b: theta = flattendTheta.T else: theta = flattendTheta m = sp.shape(y)[0] (J, grad) = costFunction(theta, X, y) # f is a filter vector that will disregard regularization for theta0 f = sp.ones((theta.shape[0], 1)) f[0, 0] = 0 thetaFiltered = sp.multiply(theta, f) J = J + (lmbda/(2.0 * m)) * (thetaFiltered.T.dot(thetaFiltered)) grad = grad + ((lmbda/m) * thetaFiltered).T return (J, grad)
def run(): theta = sp.zeros((3, 1)) data = sp.copy(admission_data) X = data[:, [0, 1]] y = data[:, [2]] m = sp.shape(y)[0] # Add intercept term to x X = sp.concatenate((sp.ones((m, 1)), X), axis=1) """ Part 1: Plotting """ print('Plotting data with + indicating (y = 1) examples and o indicating (y = 0) examples.') logres.plotData(data) plt.xlabel('Exam 1 score') plt.ylabel('Exam 2 score') plt.legend('Admitted', 'Not admitted') plt.show() print('Program paused. Press enter to continue.') raw_input() """ Part 2: Compute Cost and Gradient """ (m, n) = X.shape initial_theta = sp.zeros((n, 1)) (cost, grad) = logres.costFunction(initial_theta, X, y) print('Cost at initial theta (zeros): ', cost) print('Gradient at initial theta (zeros): ', grad) print('Program paused. Press enter to continue.') raw_input() """ Part 3: Optimizing using fminunc """ (theta, cost) = logres.find_minimum_theta(theta, X, y) print('Cost at theta found by fmin: ', cost) print('Theta: ', theta) logres.plotDecisionBoundary(data, X, theta) plt.show() """ Part 4: Predict and Accuracies """ prob = logres.sigmoid(sp.asmatrix([1, 45, 85]).dot(theta)) print('For a student with scores 45 and 85, we predict an admission probability of ', prob[0, 0]) print('Program paused. Press enter to continue.')
def __init__(self, neurons_num, d): Layer.__init__(self, neurons_num, d) links = 2 * self.h sigma = s.sqrt(1.0/d) self.w = s.asmatrix(s.random.normal(0.0, sigma, (links, d))) self.b = s.random.normal(0.0, sigma, links)
def __init__(self, nodes, edges): degree = dict(Counter([n for e in edges for n in e])) self.vol = sum(degree) self.n = N = len(nodes) # dimension of all matrix self.A = sp.asmatrix(sp.zeros([N, N])) self.D = sp.asmatrix(sp.zeros([N, N])) for edge in edges: # fill affinity matrix i = nodes.index(edge[0]) # source j = nodes.index(edge[1]) # target self.A[i, j] = self.A[j, i] = 1.0 for node in nodes: # fill degree matrix i = nodes.index(node) self.D[i, i] = degree[node] self.L = sp.asmatrix(self.D - self.A)
def _generateRateMatrix(states, transitions, rates): n_states = len(states) Q = asmatrix(zeros((n_states, n_states))) for (src, transition, dst) in transitions: Q[src,dst] = rates[transition] for i in xrange(n_states): row = Q[i,:] Q[i,i] = -sum(row) return Q
def setVauleofMatrix(self,preoutputmatrix,setpositionlist,valuelist=None): netDimNow = preoutputmatrix.shape[0] poslen = mx.max(sp.asmatrix(setpositionlist))#sp.maximum(setpositionlist)+1#len(setpositionlist) outmat = self.addrowcol_matrix(preoutputmatrix,poslen-netDimNow+1,poslen-netDimNow+1) # print outmat.shape, for pos in setpositionlist: if pos: outmat.itemset(pos,outmat.item(pos)+1) return outmat
def cols_array2(my_m, n_iter): my_m = asmatrix(my_m) # not strictly needed, but should be noop # the following is valid as long as we assume a fixed matrix size (and not in a general function) #v1 = asmatrix(full( (1, my_m.shape[0]), 1.0/my_m.shape[0])) v1 = full((1, my_m.shape[0]), 1.0 / my_m.shape[0]) for i in xrange(n_iter): #v1 = asmatrix(full( (1, my_m.shape[0]), 1.0/my_m.shape[0])) #m1 = v1 * my_m m1 = np.dot(v1, my_m) m1[0] = 0 # just to avoid complaints
def pPca(data, dim): """Return a matrix which contains the first `dim` dimensions principal components of data. data is a matrix which's rows correspond to datapoints. Implementation of the 'probabilistic PCA' algorithm. """ num = data.shape[1] data = asmatrix(makeCentered(data)) # Pick a random reduction W = asmatrix(standard_normal((num, dim))) # Save for convergence check W_ = W[:] while True: E = inv(W.T * W) * W.T * data.T W, W_ = data.T * E.T * inv(E * E.T), W if abs(W - W_).max() < 0.001: break return W.T
def addrow_mat(a, rows2addcnt=1): ai = a #sp.identity(a.shape[1]) # aif = ai.flat ail = ai.tolist() for i in range(rows2addcnt): rowtoadd = [0 for i in range(1, len(ail[0]) + 1)] ail.append(rowtoadd) ailm = sp.asmatrix(ail) return ailm
def reduceDim(data, dim, func='pca'): """Reduce the dimension of datapoints to dim via principal component analysis. A matrix of shape (n, d) specifies n points of dimension d. """ try: pcaFunc = globals()[func] except KeyError: raise ValueError('Unknown function to calc principal components') pc = pcaFunc(data, dim) return (pc * asmatrix(makeCentered(data)).T).T
def getoutMatrix(personcnt, ma_fans, cbm_frs, inv_mention, act_micorcnt, worksfolder): preoutmat = mx('1') for i in range(1, personcnt + 1): # print '-------------------',i input = zip( *[ma_fans[:i], cbm_frs[:i], inv_mention[:i], act_micorcnt[:i]]) inputm = sp.asmatrix(input, long) min2mout_pat_mat = min2mout_mat(inputm, preoutmat, supposeMatrixDim=personcnt) preoutmat = min2mout_pat_mat return preoutmat
def compute_weighted_operator(fwd, inv, source_identities): """Function for computing a fidelity-weighted inverse operator. Input arguments: ================ fwd : ndarray The forward operator. inv : ndarray The original inverse operator. source_identities : ndarray Vector mapping sources to parcels or labels. Output argument: ================ weighted_inv : ndarray The fidelity-weighted inverse operator. """ """Maybe one should test if unique non-negative values == max+1. This is expected in the code.""" n_parcels = max(source_identities) + 1 """Samples. Peaks at about 20 GB ram with 30 000 samples. Using too few samples will give poor results.""" time_output = 6000 """Samples to remove from ends to get rid of border effects.""" time_cut = 20 """Original values 1, 31. Higher number wider span.""" widths = scipy.arange(5, 6) """Make and clone parcel time series to source time series.""" fwd, inv = asmatrix(fwd), asmatrix(inv) parcel_series = make_series(n_parcels, time_output, time_cut, widths) source_series = parcel_series[source_identities] source_series[source_identities < 0] = 0 """Forward then inverse model source series.""" source_series = inv * (fwd * source_series) weighted_inv = _compute_weights(source_series, parcel_series, source_identities, inv) return weighted_inv
def deserialize_weights(self, w): """Takes array w, reshapes it and stores it as internal weights and bias parameters Note: this is not a serialization of the whole class, this reconstruction uses dimensions already stored in object """ assert(len(w) == self.get_weights_len()) (wrows, wcols) = self.w.shape wtotal = wrows * wcols self.w = s.asmatrix(w[:wtotal]).reshape(wrows, wcols) self.b = w[wtotal:]
def plv(x, y, source_identities): """ Function for computing the complex phase-locking value. x : Source time series y : Parcel time series source_identities : ndarray [sources] Expected ids for parcels are 0 to n-1, where n is number of parcels, and -1 for sources that do not belong to any parcel. """ """Change to amplitude 1, keep angle using Euler's formula.""" x = np.exp(1j * (asmatrix(np.angle(x)))) y = np.exp(1j * (asmatrix(np.angle(y)))) """Get cPLV needed for flips and weighting.""" cplv = np.zeros(len(source_identities), dtype='complex') for i, identity in enumerate(source_identities): """Compute cPLV only of parcel source pairs of sources that belong to that parcel. One source belong to only one parcel.""" if (source_identities[i] >= 0): cplv[i] = (np.sum( (np.asarray(y[identity])) * np.conjugate(np.asarray(x[i])))) cplv /= np.shape(x)[1] return cplv
def logistic_regression(): """ Predicts the probability that a student will be admitted to a university based on how well he did on two exams Params: exam1: Integer score exam2: Integer score """ exam1 = int(request.args.get('exam1')) exam2 = int(request.args.get('exam2')) prob = sigmoid(sp.asmatrix([1, exam1, exam2]).dot(theta)) return jsonify({ 'probability_accepted': prob[0,0] })
def get_relationship_matrix(data, year, relationship_definition, def_args): array_data = [] for export_country in countries.countries: row = [] for import_country in countries.countries: if relationship_definition(data, year, export_country, import_country, def_args) and export_country != import_country: row.append(1) else: row.append(0) array_data.append(row) a = scipy.array(array_data) b = scipy.asmatrix(a) return b
def plv(x, y, identities): """Function for computing phase-locking values between x and y. Output arguments: ================= cplv : ndarray Complex-valued phase-locking values. """ """Change to amplitude 1, keep angle using Euler's formula.""" x = scipy.exp(1j * (asmatrix(scipy.angle(x)))) y = scipy.exp(1j * (asmatrix(scipy.angle(y)))) """Get cPLV needed for flips and weighting.""" cplv = scipy.zeros(len(identities), dtype='complex') for i, identity in enumerate(identities): """Compute cPLV only of parcel source pairs of sources that belong to that parcel. One source belong to only one parcel.""" if (identities[i] >= 0): cplv[i] = (scipy.sum((scipy.asarray(y[identity])) * scipy.conjugate(scipy.asarray(x[i])))) cplv /= np.shape(x)[1] return cplv
def costFunction(flattendTheta, X, y): """Calculate the cost and gradient for logistic regression""" # numpy fmin function only allows flattened arrays instead of # matrixes which is stupid so it has to be converted every time flattendTheta = sp.asmatrix(flattendTheta) (a, b) = flattendTheta.shape if a < b: theta = flattendTheta.T else: theta = flattendTheta m = sp.shape(y)[0] J = (1.0/m) * ((-y).T.dot(sp.log(sigmoid(X.dot(theta)))) - \ (-y + 1).T.dot(sp.log(-sigmoid(X.dot(theta)) + 1))) grad = (1.0/m) * (sigmoid(X.dot(theta)) - y).T.dot(X) return (J, grad)
def covar(samples): """ Calculate the covariance matrix as used by Estimator. This is not the same as the Octave/Matlab function cov(), but is instead equal to Mean [ sample.H * sample ], where sample is a single sample. I.E., it is actually the second moment matrix. Parameters ---------- samples : K x Numel or Numel x 0 complex ndarray Complex samples for each of Numel antennas sampled at K times. Returns ------- return : Numel x Numel complex ndarray Second moment matrix for complex random vector samples. Used by Estimator. """ samples = sp.asmatrix(samples) return ( (samples.H * samples) / samples.shape[0] )
def covar(samples): """ Calculate the covariance matrix as used by Estimator. This is not the same as the Octave/Matlab function cov(), but is instead equal to Mean [ sample.H * sample ], where sample is a single sample. I.E., it is actually the second moment matrix. Parameters ---------- samples : K x Numel or Numel x 0 complex ndarray Complex samples for each of Numel antennas sampled at K times. Returns ------- return : Numel x Numel complex ndarray Second moment matrix for complex random vector samples. Used by Estimator. """ samples = sp.asmatrix(samples) return ((samples.H * samples) / samples.shape[0])
def isHermitian(M, tol): MM = asmatrix(M) return norm(MM - MM.H) < tol
# pos = sum(x > 0 for x in guidance_matrix) print(guidance_matrix) ward = Ward(n_clusters=6, n_components=2, connectivity=guidance_matrix) predicts = ward.fit_predict(self.A) print(predicts) # print(circles.keys(), len(circles.itervalues()) if __name__ == "__main__": sp.set_printoptions(precision=2, suppress=True) nodes = [1, 2, 3, 4, 5, 6] edges = [[1, 2], [1, 3], [2, 3], [3, 4], [4, 5], [4, 6], [5, 6]] Q1 = sp.asmatrix(sp.identity(6)) Q = sp.mat([[1.0, 1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0]]) c = Cluster(nodes, edges) vec = c.spectral(2) print(vec) # vec_idc = c.csp(Q1, 0, 3) ''' # Constrained spectral clustering def csp(self, Q, beta, K):
"""Collection of functions for converting between magnitude systems.""" import scipy _usno_to_sdss_matrix = scipy.array([ [1, 0, 0, 0, 0], #u [0, 1.06, -0.06, 0, 0], #g [0, 0, 1.035, -0.035, 0], #r [0, 0, 0.041, 1.0 - 0.041, 0], #i [0, 0, 0, -0.03, 1.03] #z ]) _sdss_to_usno_matrix = scipy.asarray(scipy.asmatrix(_usno_to_sdss_matrix).I) _sdss_to_usno_offset = scipy.array([[0], [0.06 * 0.53], [0.035 * 0.21], [0.041 * 0.21], [-0.03 * 0.09]]) def sdss_to_usno(sdss_ugriz): """ Return the estimated USNO 1m estimated magnitudes from SDSS 2.5m ones. Args: sdss_ugriz(5xN scipy.array): The values of the u, g, r, and z magnitudes in the SDSS 2.5m system. Each magnitude is a column. Returns: 5 x N scipy array: The values of the u', g', r', i', and z' magnitudes in the USNO 1m system in the same format as the input. """
import csv # IMPORT PROVIDED NETWORK DATA FROM FILES nodes_file = open("../Data/QMEE_Net_Mat_nodes.csv", 'rb') edges_file = open("../Data/QMEE_Net_Mat_edges.csv", 'rb') nodes = [] csvread = csv.reader(nodes_file) csvread.next() # skip header row for row in csvread: nodes.append(tuple(row)) nodes_file.close() edges = sc.array csvread = csv.reader(edges_file) AdjNames = list(csvread.next()) tmp = csvread.next() tmp = map(int, tmp) Adj = sc.asmatrix(tmp) for row in csvread: tmp = map(int, row) Adj = sc.append(Adj, [tmp], axis=0) edges_file.close() ###### PLOTTING ####### plt.close('all') G = nx.Graph(Adj) nx.draw_circular(G) plt.show()
def generateMotion (self): self.vecs = self.pcs*scipy.asmatrix(self.weights).T self.vecs = self.vecs.T self.vecs = scipy.asarray(self.vecs)[0]
import scipy import numpy from scipy.sparse import csc_matrix a = scipy.array([[1, 2, 3], [4, 5, 6], [4, 5, 6]]) b = scipy.asmatrix(a) print "b" print b print "b.tolist()[1][1]" print b.tolist()[1][1] print "numpy.dot(b, b)" print numpy.dot(b, b) squared_matrix = csc_matrix(b) * csc_matrix(b) squared_matrix1 = csc_matrix(b).getrow(1).getcol(1) #print squared_matrix print "squared_matrix.tobsr()" print squared_matrix.tobsr() print "abc" print "squared_matrix.todense()" print squared_matrix.todense() print "squared_matrix.todense().tolist()[1][1]" print squared_matrix.todense().tolist()[1][1]
# pos = sum(x > 0 for x in guidance_matrix) print(guidance_matrix) ward = Ward(n_clusters=6, n_components=2, connectivity=guidance_matrix) predicts = ward.fit_predict(self.A) print(predicts) # print(circles.keys(), len(circles.itervalues()) if __name__ == "__main__": sp.set_printoptions(precision=2, suppress=True) nodes = [1, 2, 3, 4, 5, 6] edges = [[1, 2], [1, 3], [2, 3], [3, 4], [4, 5], [4, 6], [5, 6]] Q1 = sp.asmatrix(sp.identity(6)) Q = sp.mat([[1.0, 1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, 1.0, 1.0]]) c = Cluster(nodes, edges) vec = c.spectral(2) print(vec) # vec_idc = c.csp(Q1, 0, 3)
def find_pattern_rotated(PF, pattern, image, rescale = 1.0, rotate=(-60,61,120), roi_center=None, roi_size=(41,41), plot=False): #Get current time to determine runtime of search start_time = time.time() #Initialize values needed later on result = [] vmax = 0.0 vmin = sp.Inf #Set region of interest if roi_center is None: roi_center = sp.array(im.shape[:2])/2.0 - 0.5 roi = center_roi_around(roi_center*rescale, roi_size) #Give user some feedback on what is happening print("Rescaling image and target by scale={rescale}.\n" " image {0}x{1} px to {2:.2f}x{3:.02f} px." .format(image.shape[0], image.shape[1], image.shape[0]*rescale, image.shape[1]*rescale, rescale=rescale), flush=True) print("ROI: center={0}, {1}, in unscaled image.\n" " height={2}, width={3} in scaled image" .format(roi_center[0], roi_center[1], roi_size[0], roi_size[1])) if rotate[2]>1: print("Now correlating rotations from {0}º to {1}º in {2} steps:" .format(*rotate)) else: print("Rotation is kept constant at {0}°".format(rotate[0])) # Create rescaled copies of image and pattern, determine center coordinates of both pattern_scaled = transform.rescale(pattern, rescale) image_scaled = transform.rescale(image, rescale) PF.set_image(image_scaled) cols_scaled, rows_scaled = pattern_scaled.shape[:2] pattern_scaled_center = sp.array((rows_scaled, cols_scaled))/2. - 0.5 cols, rows = pattern.shape[:2] pattern_center = sp.array((rows, cols))/2. - 0.5 # Launch PatternFinder for all rotations defined in function input rotations = sp.linspace(*rotate) for r in rotations: # Calculate transformation matrix for rotation around center of scaled pattern rotation_matrix = rotation_transform_center(pattern_scaled,r,center_xy=pattern_scaled_center) # Launch Patternfinder out, min_coords, value = PF.find(transform.warp(pattern_scaled,rotation_matrix), image=None, roi=roi) # Collect Min and Max values for plotting later on outmax = out.max() outmin = out.min() if outmax > vmax: vmax = outmax if outmin < vmin: vmin = outmin # undo the rescale for the coordinates min_coords = min_coords.astype(sp.float64) / rescale # create a list of results for all rotations result.append([r, min_coords, value, out]) # Progress bar... kind of :) print(".",end="", flush=True) print("") print("took {0} seconds.".format(time.time()-start_time)) #Select the best result from the result list and extract its parameters best_param_set = result[sp.argmin([r[2] for r in result])] best_angle = best_param_set[0] # The rotation angle is the 0-th element in result best_coord = best_param_set[1] # The coordinates are in the 2-nd element best_value = best_param_set[2] # The actual value is the 3-rd element # Calculate transformation to transform image onto pattern move_to_center = transform.AffineTransform(translation=-(best_coord)[::-1]) move_back = transform.AffineTransform(translation=(best_coord[::-1])) rotation = transform.AffineTransform(rotation=-sp.deg2rad(best_angle)) translation = transform.AffineTransform(translation=sp.asmatrix((best_coord-pattern_center)[::-1])) T = translation + move_to_center + rotation + move_back #Create a plot showing error over angle if plot and rotate[2] > 1: fig, ax = plt.subplots(1) ax.plot([a[0] for a in result], [a[2] for a in result]) ax.set_xlabel('Angle (rotation)') ax.set_ylabel('difference image-target') plt.show() #Create heat plot of where target is in image if plot == 'all': n_rows = int(sp.sqrt(len(result))) n_cols = int(sp.ceil(len(result)/n_rows)) fig, ax = plt.subplots(n_rows, n_cols, squeeze=False, figsize = (2 * n_cols, 2 * n_rows)) fig.tight_layout(rect=[0, 0.03, 1, 0.97]) fig.suptitle("Correlation map of where target is in image\n", size=16) n = 0 for i in range(n_rows): for j in range(n_cols): ax[i,j].axis("off") if n < len(result): ax[i,j].imshow(result[n][3], interpolation="nearest", cmap='cubehelix', vmin=vmin, vmax=vmax) ax[i,j].annotate('Angle:{0:.2f}; Value:{1:.2f}' .format(result[n][0],result[n][2]),[0,0]) n += 1 plt.show() return T, best_value
sourceTimeSeries[i] = parcelTimeSeries[ identity] # Clone parcel time series to source space. checkSourceTimeSeries = scipy.real(sourceTimeSeries[:]) # For checking ########## Forward then inverse model source series # sourceTimeSeries = inverseOperator*(forwardOperator * sourceTimeSeries) this didn't work sourceTimeSeries = np.dot(inverseOperator, np.dot(forwardOperator, sourceTimeSeries)) # this works ########## Change to amplitude 1, keep angle using Euler's formula. sourceTimeSeries = scipy.exp(1j * (scipy.asmatrix(scipy.angle(sourceTimeSeries)))) parcelTimeSeries = scipy.exp(1j * (scipy.asmatrix(scipy.angle(parcelTimeSeries)))) ########## Get cPLV needed for flips and weighting cPLVArray = 1j * scipy.zeros(len(sourceIdentities), dtype=float) # Initialize as zeros (complex). for i, identity in enumerate( sourceIdentities ): # Compute cPLV only of parcel source pairs of sources that belong to that parcel. One source belong to only one parcel. if sourceIdentities[ i] >= 0: # Don't compute negative values. These should be sources not belonging to any parcel. cPLVArray[i] = scipy.sum( (scipy.asarray(parcelTimeSeries[identity])) *