def princomp_B(A,numpc=0): """ Compute 1st Principal Component. Function modified from: http://glowingpython.blogspot.ch/2011/07/principal-component-analysis-with-numpy.html Parameters ---------- A : object of type MstConfiguration Contains the following attributes: subtract_column_mean_at_start (bool), debug (bool), use_gfp_peaks (bool), force_avgref (bool), set_gfp_all_1 (bool), use_smoothing (bool), gfp_type_smoothing (string), smoothing_window (int), use_fancy_peaks (bool), method_GFPpeak (string), original_nr_of_maps (int), seed_number (int), max_number_of_iterations (int), ERP (bool), correspondance_cutoff (double): numpc : int number of principal components Returns ------- coeff: array first principal component """ M=A.T a=numpy.dot(M,M.T) #get covariance matrix by matrix multiplication of data with transposed data [latent,coeff]=eig(a) p = size(coeff,axis=1) idx = argsort(latent) # sorting the eigenvalues idx = idx[::-1] # in ascending order # sorting eigenvectors according to the sorted eigenvalues coeff = coeff[:,idx] #latent = latent[idx] # sorting eigenvalues if numpc < p or numpc >= 0: coeff = coeff[:,range(numpc)] # cutting some PCs #score = dot(coeff.T,M) # projection of the data in the new space return coeff
def princomp_B(A, numpc=0): """ Compute 1st Principal Component. Function modified from: http://glowingpython.blogspot.ch/2011/07/principal-component-analysis-with-numpy.html Parameters ---------- A : object of type MstConfiguration Contains the following attributes: subtract_column_mean_at_start (bool), debug (bool), use_gfp_peaks (bool), force_avgref (bool), set_gfp_all_1 (bool), use_smoothing (bool), gfp_type_smoothing (string), smoothing_window (int), use_fancy_peaks (bool), method_GFPpeak (string), original_nr_of_maps (int), seed_number (int), max_number_of_iterations (int), ERP (bool), correspondance_cutoff (double): numpc : int number of principal components Returns ------- coeff: array first principal component """ M = A.T a = np.dot( M, M.T ) #get covariance matrix by matrix multiplication of data with transposed data [latent, coeff] = eig(a) p = size(coeff, axis=1) idx = argsort(latent) # sorting the eigenvalues idx = idx[::-1] # in ascending order # sorting eigenvectors according to the sorted eigenvalues coeff = coeff[:, idx] #latent = latent[idx] # sorting eigenvalues if numpc < p or numpc >= 0: coeff = coeff[:, list(range(numpc))] # cutting some PCs #score = dot(coeff.T,M) # projection of the data in the new space return coeff
def get_prediction(self, a,b,c, ok_index, restrict_vocab=30000): ok_vocab = dict(sorted(iteritems(self.model.vocab), key=lambda item: -item[1].count) [:restrict_vocab]) ok_index = set(v.index for v in itervalues(ok_vocab)) ignore = set(self.model.vocab[v].index for v in [a, b, c]) # indexes of words to ignore positive = [b, c] negative = [a] for index in argsort(self.model.most_similar(self.model, positive, negative, False))[::-1]: if index in ok_index and index not in ignore: predicted = self.model.index2word[index] break return predicted
elementSizeAll.append(elementSize) numElementsAll.append(numElements) print(np.array(numElementsAll)) print(np.array(elementSizeAll)) heaveforceAll = np.array(heaveforceAll) pitchmomentAll = np.array(pitchmomentAll) elementSizeAll = np.array(elementSizeAll) metaCenter = zB + Sxx / V0 GML = metaCenter - zG sinkage = heaveforceAll / (g * density * A) pitch = pitchmomentAll / ((g * density * V0 * GML)) perm = argsort(Frh) sink_unc = 0 * sinkage for i in range(sinkage.shape[0]): sink_unc[i, :] = NumericalUncertainty(elementSizeAll[i, :], sinkage[i, :], showNumericalUncertainty) cnt = sinkage.shape[1] - 1 if (showHeaveForce): plt.figure(1) plt.plot(Frh[perm], heaveforceAll[perm, cnt], 'o-', label="BEM") plt.xlabel(r"$Fr_h$") plt.ylabel(r"$F_z [N]$") plt.grid(True) plt.legend()