def get_angle_potential_value(atoms, angle): i = angle.atomi j = angle.atomj k = angle.atomk rij = rel_pos_pbc(atoms, i, j) dij = linalg.norm(rij) eij = rij / dij rkj = rel_pos_pbc(atoms, k, j) dkj = linalg.norm(rkj) ekj = rkj / dkj eijekj = np.dot(eij, ekj) if np.abs(eijekj) > 1.0: eijekj = np.sign(eijekj) a = np.arccos(eijekj) if angle.cos: da = np.cos(a) - np.cos(angle.a0) else: da = a - angle.a0 da = da - np.around(da / np.pi) * np.pi v = 0.5 * angle.k * da ** 2 angle.a = a return i, j, k, v
def checkNumericalGradient(): # This code can be used to check your numerical gradient implementation # in computeNumericalGradient() # It analytically evaluates the gradient of a very simple function called # simpleQuadraticFunction (see below) and compares the result with your numerical # solution. Your numerical gradient implementation is incorrect if # your numerical solution deviates too much from the analytical solution. # Evaluate the function and gradient at x = [4; 10]; (Here, x is a 2d vector.) x = np.array([4,10]) value,grad = simpleQuadraticFunction(x) # Use your code to numerically compute the gradient of simpleQuadraticFunction at x. # (The notation "lambda x: simpleQuadraticFunction(x)[0]" creates a function # that only returns the cost and not the grad of simpleQuadraticFunction.) numgrad = computeNumericalGradient(lambda x: simpleQuadraticFunction(x)[0],x) # Visually examine the two gradient computations. The two columns # you get should be very similar. print(np.array([numgrad,grad]).T) print("The above two columns you get should be very similar.") print("(Left-Your Numerical Gradient, Right-Analytical Gradient)\n\n") # Evaluate the norm of the difference between two solutions. # If you have a correct implementation, and assuming you used EPSILON = 0.0001 # in computeNumericalGradient.m, then diff below should be 2.1452e-12 diff = norm(numgrad-grad)/norm(numgrad+grad) print(diff) print("Norm of the difference between numerical and analytical gradient (should be < 1e-9)\n\n")
def plot(self, string): walls = decode_string_to_tuplearray(string) """ publish walls with cubes """ for i, (p1, p2) in enumerate(walls): marker = get_marker('wall_%d' % i, Marker.CUBE) # set scale marker.scale.x = norm(p1-p2) marker.scale.y = 20 marker.scale.z = 1e3 # set color marker.color.r = 1.0 marker.color.a = 1.0 # set position p = marker.pose.position p.x, p.y = .5 * (p1 + p2) p.z = .5e3 # set orientation dp = p2 - p1 psi = acos(dp[0]/norm(dp)) o = marker.pose.orientation o.x, o.y, o.z, o.w = quaternion_from_euler(0, 0, psi) # publish publisher.publish(marker) for i in range(len(walls), self.prev_num_plotted_walls): marker = get_marker('wall_%d' % i, Marker.CUBE) p = marker.pose.position p.x = -1e20 publisher.publish(marker) self.prev_num_plotted_walls = len(walls)
def _rank1(X, profunc1, args1, profunc2, args2, niter=500, eps=1e-6): u = X.sum(axis=1); u /= norm(u) v = X.sum(axis=0); v /= norm(v) rho_old = dot(dot(u,X),v) for i in xrange(niter): alpha = dot(X,v) if profunc1 == None: u = alpha else: u = profunc1(alpha, **args1) u /= norm(u) beta = dot(X.T,u) if profunc2 == None: v = beta else: v = profunc2(beta, **args2) rho = norm(v) v /= rho if abs(rho-rho_old) <= eps: break else: rho_old = rho return u, rho, v
def test3(): import spacy.en from spacy.parts_of_speech import ADV nlp = spacy.en.English() # Find log probability of Nth most frequent word probs = [lex.prob for lex in nlp.vocab] probs.sort() is_adverb = lambda tok: tok.pos == ADV and tok.prob < probs[-1000] tokens = nlp(u"‘Give it back,’ he pleaded abjectly, ‘it’s mine.’") o = u''.join(tok.string.upper() if is_adverb(tok) else tok.string for tok in tokens) assert o == u'‘Give it back,’ he pleaded ABJECTLY, ‘it’s mine.’' pleaded = tokens[7] assert pleaded.repvec.shape == (300,) o = pleaded.repvec[:5] assert sum(o) != 0 from numpy import dot from numpy.linalg import norm cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2)) words = [w for w in nlp.vocab if w.check(IS_LOWER) and w.has_repvec] words.sort(key=lambda w: cosine(w.repvec, pleaded.repvec)) words.reverse() o = [w.orth_ for w in words[0:20]] assert o == [u'pleaded', u'pled', u'plead', u'confessed', u'interceded', u'pleads', u'testified', u'conspired', u'motioned', u'demurred', u'countersued', u'remonstrated', u'begged', u'apologised', u'consented', u'acquiesced', u'petitioned', u'quarreled', u'appealed', u'pleading'] o = [w.orth_ for w in words[50:60]] assert o == [u'counselled', u'bragged', u'backtracked', u'caucused', u'refiled', u'dueled', u'mused', u'dissented', u'yearned', u'confesses'] o = [w.orth_ for w in words[100:110]] assert o == [u'cabled', u'ducked', u'sentenced', u'perjured', u'absconded', u'bargained', u'overstayed', u'clerked', u'confided', u'sympathizes']
def __init__(self, v1, v2): self.v1 = v1 self.v2 = v2 self.a = la.norm(v1) self.b = la.norm(v2) self.alpha = np.arcsin(np.dot(v1,v2)/(self.a*self.b)) self.area = self.v1.dot(self.v2)
def isparallel(O1,O2): ''' Judge whether two array-like vectors are parallel to each other. Parameters ---------- O1,O2 : 1d array-like The input vectors. Returns ------- int * 0: not parallel * 1: parallel * -1: anti-parallel ''' norm1=nl.norm(O1) norm2=nl.norm(O2) if norm1<RZERO or norm2<RZERO: return 1 elif O1.shape[0]==O2.shape[0]: buff=np.inner(O1,O2)/(norm1*norm2) if np.abs(buff-1)<RZERO: return 1 elif np.abs(buff+1)<RZERO: return -1 else: return 0 else: raise ValueError("isparallel error: the shape of the array-like vectors does not match.")
def project_aln(alignment, weighted_3d_algn, weight): '''computes projected weighted alignment based on the alignment in ascii format, the weighted 3D alginment tensor (weighted_3d_algn) & the weight matrix (weight) it does so by computing the frequencies first call (aa_freq) ''' algn_shape = get_algn_shape(alignment) no_seq = algn_shape.no_seq no_pos = algn_shape.no_pos no_aa = algn_shape.no_aa algn_aa_freq = aa_freq(alignment) proj_vect = zeros((no_pos, no_aa)) wt_mat = zeros((no_aa, no_pos)) for i in range(0, no_pos): for aa in range(0, no_aa): wt_mat[aa, i] = weight[i, aa]*algn_aa_freq[aa, i] if LA.norm(wt_mat[:, i]) > 0: proj_vect[i, :] = wt_mat[:, i] / LA.norm(wt_mat[:, i]) prj_wt_aln = zeros((no_seq, no_pos)) for i in range(0, no_pos): for aa in range(0, no_aa): prj_wt_aln[:, i]\ = prj_wt_aln[:, i]\ + proj_vect[i, aa] * weighted_3d_algn[:, i, aa] return ProjectionMatrices(prj_wt_aln, proj_vect)
def test_retrieve_data(self): ptree = PropertyTree() ptree.put_string('type', 'SeriesRC') ptree.put_double('series_resistance', 100e-3) ptree.put_double('capacitance', 2.5) device = EnergyStorageDevice(ptree) ptree = PropertyTree() ptree.put_string('type', 'ElectrochemicalImpedanceSpectroscopy') ptree.put_double('frequency_upper_limit', 1e+2) ptree.put_double('frequency_lower_limit', 1e-1) ptree.put_int('steps_per_decade', 1) ptree.put_int('steps_per_cycle', 64) ptree.put_int('cycles', 2) ptree.put_int('ignore_cycles', 1) ptree.put_double('dc_voltage', 0) ptree.put_string('harmonics', '3') ptree.put_string('amplitudes', '5e-3') ptree.put_string('phases', '0') eis = Experiment(ptree) with File('trash.hdf5', 'w') as fout: eis.run(device, fout) spectrum_data = eis._data with File('trash.hdf5', 'r') as fin: retrieved_data = retrieve_impedance_spectrum(fin) print(spectrum_data['impedance'] - retrieved_data['impedance']) print(retrieved_data) self.assertEqual(linalg.norm(spectrum_data['frequency'] - retrieved_data['frequency'], inf), 0.0) # not sure why we don't get equality for the impedance self.assertLess(linalg.norm(spectrum_data['impedance'] - retrieved_data['impedance'], inf), 1e-10)
def problem8(): "problem set 2.1, problem 8, page 56" import LUdecomp A = np.array([[-3,6,-4],[9,-8,24],[-12,24,-26]],dtype=float) A_orig = A.copy() LU = LUdecomp.LUdecomp(A) b = np.array([-3,65,-42],dtype=float) b_orig = b.copy() x = LUdecomp.LUsolve(LU,b) # extract L and U for verification U = np.triu(LU) # L = np.tril(LU) L[ np.diag_indices_from(L) ] = 1.0 print(""" Problem 8: A = {} LU decomposition A = LU, LU (in one matrix) = {} Solving Ax=b, with b = {} Solution x = {} Verifying solution: residual ||Ax-b||_2 = {} ||A - dot(L,U)||_inf = {} """.format(A_orig,LU,b_orig,x, la.norm(np.dot(A_orig,x)-b_orig,2), la.norm(A_orig - np.dot(L,U),np.inf)) )
def sparse_calculate_error(mat, sketch, normalized=True): cov = (mat.transpose()).dot(mat).todense() cov_sketch = np.dot(sketch.T, sketch) if normalized: return (ln.norm(cov - cov_sketch, ord=2) / sparse_squared_frobenius_norm(mat)) else: return ln.norm(cov - cov_sketch, ord=2)
def genGraph(S_actual, S_est, S_previous, empCov_set, nodeID, e1, e2, e3, e4, display = False): D = np.where(S_est != 0)[0].shape[0] T = np.where(S_actual != 0)[0].shape[0] TandD = float(np.where(np.logical_and(S_actual,S_est) == True)[0].shape[0]) P = TandD/D R = TandD/T offDiagDiff = S_actual - S_est offDiagDiff = offDiagDiff - np.diag(np.diag(offDiagDiff)) S_diff = (S_est - S_previous) S_diff = S_diff - np.diag(np.diag(S_diff)) ind = (S_diff < 1e-2) & (S_diff > - 1e-2) S_diff[ind] = 0 K = np.count_nonzero(S_diff) e1.append( alg.norm(offDiagDiff, 'fro')) e2.append(2* P*R/(P+R)) K = float(np.where(np.logical_and((S_est>0) != (S_previous>0), S_est>0) == True)[0].shape[0]) e3.append(-np.log(alg.det(S_est)) + np.trace(np.dot(S_est, empCov_set[nodeID])) + K) e4.append(alg.norm(S_est - S_previous, 'fro')) display = False if display == True: if (nodeID >timeShift -10) and (nodeID < timeShift + 10): print 'nodeID = ', nodeID print 'S_true = ', S_actual,'\nS_est', S_est # print 'S_error = ',S_actual - S_est, '\n its Fro error = ', alg.norm(S_actual - S_est, 'fro') print 'D = ',D,'T = ', T,'TandD = ', TandD,'K = ', K,'P = ', P,'R = ', R,'Score = ', 2* P*R/(P+R) return e1, e2, e3, e4
def compute_TPS_K(ctrl_pts, landmarks = None, _lambda = 0): """ Compute the kernel matrix for thin-plate splines. Reference: Landmark-based Image Analysis, Karl Rohr, p195 """ #kernel_func = [lambda r,_lambda=0: 0 if r==0 else r*r*log(r), lambda r,_lambda=0: -r] #the above definition is not used because the if else syntax is not supported in Python2.4 def kernel_func_2d(r, _lambda=0): #_lambda reserved for regularization if r == 0: return 0 else: return r*r*log(r) def kernel_func_3d(r, _lambda=0): #_lambda reserved for regularization return -r kernel_func = (kernel_func_2d, kernel_func_3d) [n,d] = ctrl_pts.shape K = [kernel_func[d-2](norm(ctrl_pts[i]-ctrl_pts[j]), _lambda) for i in arange(n) for j in arange(n)] K = array(K).reshape(n,n) if landmarks is not None: [m,d] = landmarks.shape # assert (d,d) equal U = [kernel_func[d-2](norm(landmarks[i]-ctrl_pts[j]), _lambda) for i in arange(m) for j in arange(n)] U = array(U).reshape(m,n) else: U = None return K,U
def Normal(a, b): """finds the unit normal vector of 2 vectors""" vector = cross(a, b) length = norm(vector) normal = vector / length assert allclose(norm(normal), 1.) return normal
def evaluate(self, locations): total = 0 for i, layer in enumerate(self._all_edges): s = layer.shape for j1 in range(s[0]): if j1 == self.nodes[i].shape[1]: break for j2 in range(s[1]): #firstpoint = (i*hscale + hoff, j1*vscale + voff) #secondpoint = ((i+1)*hscale + hoff, j2*vscale + voff) one = locations[(i, j1)] two = locations[(i + 1, j2)] distance = linalg.norm(one - two) f = distance * distance influence = abs(layer[j1, j2]) # quality is decreased when connected things are far apart total -= f * influence for _, v in locations: for _, v2 in locations: if v != v2: d = linalg.norm(v - v2) total += 1000 / d # this is like electric potential return total
def sphere_project(v,r): """ sphere_project - project point X,Y,Z to the surface of sphere radius r V = sphere_project(v,r,c) Cartesian inputs: v is the vertex matrix, Nx3 (XYZ) r is the sphere radius, 1x1 (default 1) c is the sphere centroid, 1x3 (default 0,0,0) XYZ are converted to spherical coordinates and their radius is adjusted according to r, from c toward XYZ (defined with theta,phi) V is returned as Cartesian 3D coordinates """ from numpy.linalg import norm for i in xrange(v.shape[0]): c0 = 0.207 c1 = 2.000 c2 = -1.123 R = r X = v[0] Y = v[1] a = ((X**2) +(Y**2))/(R**2) Z = (0.5*R*(1.0-a)**(0.5))*(c0 + c1*a + c2*(a**2)) magn = norm(v[i]) vecnorm = v[i]/norm(v[i]) v[i] = vecnorm*r
def AreaNormal(nodes): """ Returns area,unitNormal n = Normal = a x b Area = 1/2 * |a x b| V = <v1,v2,v3> |V| = sqrt(v1^0.5+v2^0.5+v3^0.5) = norm(V) Area = 0.5 * |n| unitNormal = n/|n| """ (n0, n1, n2) = nodes a = n0 - n1 b = n0 - n2 vector = cross(a, b) length = norm(vector) normal = vector / length area = 0.5 * length if not allclose(norm(normal), 1.): print("a = ", a) print("b = ", b) print("normal = ", normal) print("length = ", length) sys.exit('check...') return area, normal
def decompose(m): ''' Factor 3x4 camera matrix into 3x3 internal and 3x4 external. This is an RQ decomposition, enforcing a --+ signature for the internal matrix and positive determinant for the rotation matrix. The internal matrix is also normalised. NumPy only provides QR, and the index gymnastics required to convert QR to RQ make it faster to use a custom routine. ''' r = np.zeros_like(m[:, :-1]) q = np.empty_like(m) r[2, 2] = la.norm(m[2, :-1]) q[2, :] = m[2, :] / r[2, 2] r[1, 2] = inner(m[1, :-1], q[2, :-1]) w = m[1, :] - r[1, 2] * q[2, :] r[1, 1] = -la.norm(w[:-1]) q[1, :] = w / r[1, 1] r[0, 1:] = inner(m[0, :-1], q[1:, :-1]) w = m[0, :] - dot(r[0, 1:], q[1:, :]) r[0, 0] = -la.norm(w[:-1]) q[0, :] = w / r[0, 0] q *= np.sign(la.det(q[:, :-1])) return r / r[2, 2], q
def getjvq(): from np.linalg import norm unit = io.read("POSCAR_unit") atoms = io.read("POSCAR") supercell = map( int, norm( atoms.cell, axis=1) / norm( unit.cell, axis=1) + [.5] * 3) c = supercell q = [] u = [int(x / 2) * 2 + 1 for x in c] for i in range(u[0]): for j in range(u[1]): for k in range(u[2]): b = np.array([float(i - c[0] / 2) / c[0], float(j - c[1] / 2) / c[1], float(k - c[2] / 2) / c[2]]) q.append(b) allpos = np.load('allpos.npy') v = np.gradient(allpos)[0]
def subgradientPolyakCFM(f, sgf, x0, lowerBounder, gamma=1, maxIters=100, report=None): ''' Use the Polyak step size and CFM direction update rule. ''' xk = x0 gk = sgf(x0) sk = gk lb = lowerBounder(xk) fb = f(xk) # Using optimal step size for fixed number of iterations for k in arange(maxIters) + 1: if report: report(xk, k) sko = sk gko = gk gk = sgf(xk) #betak = 0.25 '''betak = (- gamma * gk.dot(sko)/(norm(sko)**2) if sko.dot(gk) < 0 else 0 )''' betak = (- gamma * gk.dot(gko) / (norm(gko) ** 2) if gko.dot(gk) < 0 else 0 ) #sk = gk + betak*sko sk = gk + betak * gko nfb = f(xk) fb = fb if fb < nfb else nfb if nfb < fb: nlb = lowerBounder(xk) lb = lb if lb > nlb else nlb alphak = 0.5 * (fb - lb) / (norm(sk)) #**2 ) #* (k ** -0.66) xk = xk - alphak * sk return xk
def fastica_defl(X, nIC=None, guess=None, nonlinfn = pow3nonlin, termtol = 5e-7, maxiters = 2e3): nPC, siglen = X.shape nIC = nIC or nPC-1 guess = guess or randn(nPC,nIC) if _orth_loaded: guess = orth(guess) B = zeros(guess.shape, np.float64) errvec = [] icc = 0 while icc < nIC: w = randn(nPC,1) - 0.5 w -= dot(dot(B, transp(B)), w) w /= norm(w) wprev = zeros(w.shape) for i in xrange(long(maxiters) +1): w -= dot(dot(B, transp(B)), w) w /= norm(w) #wprev = w.copy() if (norm(w-wprev) < termtol) or (norm(w + wprev) < termtol): B[:,icc] = transp(w) icc += 1 break wprev = w.copy() return B.real, errvec
def hexagon_bz(reciprocals=None,nk=100,vh='H'): ''' The whole hexagonal BZ. ''' if reciprocals is not None: b1=reciprocals[0] b2=reciprocals[1] temp=np.inner(b1,b2)/nl.norm(b1)/nl.norm(b2) assert np.abs(np.abs(temp)-0.5)<RZERO if np.abs(temp+0.5)<RZERO: b2=-b2 else: if vh in ('H','h'): b1=np.array([np.sqrt(3.0)/2,0.5])*4*np.pi/np.sqrt(3.0) b2=np.array([np.sqrt(3.0)/2,-0.5])*4*np.pi/np.sqrt(3.0) else: b1=np.array([1.0,0.0])*4*np.pi/np.sqrt(3.0) b2=np.array([0.5,np.sqrt(3.0)/2])*4*np.pi/np.sqrt(3.0) p0,p1,p2,p3,p4=-(b1+b2)/3,(b1+b2)/3,(b1+b2)*2/3,(b1*2-b2)/3,(b2*2-b1)/3 mesh=np.zeros((nk**2,b1.shape[0])) for i in range(nk): for j in range(nk): coords=b1*i/nk+b2*j/nk+p0 if isintratriangle(coords,p1,p2,p3,vertexes=(False,True,False),edges=(True,True,False)): coords=coords-b1 if isintratriangle(coords,p1,p2,p4,vertexes=(False,True,False),edges=(True,True,False)): coords=coords-b2 mesh[i*nk+j,:]=coords volume=np.abs(np.cross(b1,b2)) return BaseSpace(('k',mesh,volume))
def distance(self, other, method='euclidean'): """ Distance between the center of this source and another. Parameters ---------- other : Source, or array-like Either another source, or the center coordinates of another source method : str Specify a distance measure to used for spatial distance between source centers. Current options include Euclidean distance ('euclidean') and L1-norm ('l1'). """ from numpy.linalg import norm checkParams(method, ['euclidean', 'l1']) if method == 'l1': order = 1 else: order = 2 if isinstance(other, Source): return norm(self.center - other.center, ord=order) elif isinstance(other, list) or isinstance(other, ndarray): return norm(self.center - asarray(other), ord=order)
def reset(self, direction, speed): # initial location p = [random.uniform(-1, 1) for i in range(3)] p /= norm(p) # orienting point o = [0, 0, 0] mini = min(range(len(p)), key=lambda i: abs(p[i])) o[mini] = 1 if p[mini] < 0 else -1 # velocity vector v = cross(p, o) v /= norm(v) v *= speed*pi/180 r = 1.145 shape = SphericalPolygon([rotate(rotate(p, o, r*random.uniform(0.9,1.1)), p, th) for th in [i*pi/8 for i in range(16)]]) for t in self.tiles.values(): t.bottom = 0 t.layers = [Layer('T', 1)] if shape.contains(t.vector) else [] t.limit() self._indexedtiles = [] for t in self.tiles.values(): self._indexedtiles.append(t) self._index = PointTree(dict([[self._indexedtiles[i].vector, i] for i in range(len(self._indexedtiles))])) self._direction = direction self._velocity = v
def test_ordinary(self): N=4 np.random.seed() state,target=np.zeros((2,)*N),SQN(0.0) for index in QuantumNumbers.decomposition([SQNS(0.5)]*N,signs=[1]*N,target=target): state[index]=np.random.random() state=state.reshape((-1,)) sites=[Label('S%s'%i,qns=SQNS(0.5),flow=1) for i in range(N)] bonds=[Label('B%s'%i,qns=None,flow=None) for i in range(N+1)] bonds[+0]=bonds[+0].replace(qns=SQNS(0.0),flow=+1) bonds[-1]=bonds[-1].replace(qns=QuantumNumbers.mono(target),flow=-1) for cut in range(N+1): mps=MPS.fromstate(state,sites,bonds,cut=cut,ttype='D') self.assertTrue(all(mps.iscanonical())) self.assertAlmostEqual(norm(state-mps.state),0.0) for cut in range(N+1): mps.canonicalize(cut) self.assertTrue(all(mps.iscanonical())) for cut in range(N+1): mps=MPS.fromstate(state,sites,bonds,cut=cut,ttype='S') self.assertTrue(all(mps.iscanonical())) self.assertAlmostEqual(norm(state-mps.state),0.0) for cut in range(N+1): mps.canonicalize(cut) self.assertTrue(all(mps.iscanonical()))
def summarize_evaluation(query=None, url=None, summary=None): j=[] if url: b = URL(url) a = Document(b.download(cached=True)) for b in a.get_elements_by_tagname("p"): j.append(plaintext(b.content).encode("utf-8")) j = [word for sentence in j for word in sentence.split() if re.match("^[a-zA-Z_-]*$", word) or '.' in word or "'" in word or '"' in word] j = ' '.join(j) lsa = LSA(stopwords, ignore_characters) sentences = j.split('.') sentences = [sentence for sentence in sentences if len(sentence)>1 and sentence != ''] for sentence in sentences: lsa.parse(sentence) else: lsa = LSA(stopwords, ignore_characters) for sentence in query: lsa.parse(sentence) lsa.build() lsa.calc() lsa2 = LSA(stopwords, ignore_characters) for sentence in summary: lsa2.parse(sentence) lsa2.build() lsa2.calc() vectors =[(dot(lsa.S,lsa.U[0,:]),dot(lsa.S,lsa.U[i,:])) for i in range(len(lsa.U))] vectors2 =[(dot(lsa2.S,lsa2.U[0,:]),dot(lsa2.S,lsa2.U[i,:])) for i in range(len(lsa2.U))] angles = [arccos(dot(a,b)/(norm(a,2)*norm(b,2))) for a in vectors for b in vectors2] return str(abs(1 - float(angles[1])/float(pi/2)))
def cosine_similarity(a, b): tn = np.inner(a, b) td = la.norm(a) * la.norm(b) if td != 0.0: return tn / td else: return 0.0
def fista(objfunc, dfunc, p, Profunc, regu, args={'lam':100.0}, theta=None, eta=2, L0=1.0, maxiter=500, eps=1e-6): if theta == None: theta = uniform(-0.1, 0.1, size=p) x_old, y = theta, theta tk_old, L, i = 1.0, L0, 0 while i < maxiter: fy, dfy = objfunc(y), dfunc(y) # step one while True: xk = Profunc(y, dfy, L, regu, args) if objfunc(xk) <= fy + dot(xk-y, dfy) + (L/2.0) * norm(xk-y)**2: break else: L *= eta # step two tk = (1.0 + sqrt(1.0+4.0*tk_old**2))/2.0 dx = xk - x_old y = xk + (tk_old-1.0)/tk * dx tk_old = tk x_old = xk i += 1 if norm(dx) <= eps: return xk return xk
def check_cost_function(lambda_coef=0): X_t = np.random.rand(4, 3) Theta_t = np.random.rand(5, 3) Y = X_t.dot(Theta_t.T) Y[np.random.rand(*Y.shape) > 0.5] = 0 R = np.zeros_like(Y) R[Y != 0] = 1 X = np.random.randn(*X_t.shape); Theta = np.random.randn(*Theta_t.shape); num_movies, num_users = Y.shape num_features = Theta_t.shape[1] J = lambda t: cofi_cost_func(t, Y, R, num_users, num_movies, num_features, lambda_coef)[0] numgrad = compute_numerical_gradient(J, np.hstack((X.ravel(), Theta.ravel()))) cost, grad = cofi_cost_func(np.hstack((X.ravel(), Theta.ravel())), Y, R, num_users, num_movies, num_features, lambda_coef) for i, j in zip(numgrad, grad): print i, j print('The above two columns you get should be very similar.\n' '(Left-Your Numerical Gradient, Right-Analytical Gradient)\n') diff = norm(numgrad-grad)/norm(numgrad+grad) print('If your backpropagation implementation is correct, then \n' 'the relative difference will be small (less than 1e-9). \n' 'Relative Difference: %s' % diff)
def __init__(self,reciprocals,path): ''' Constructor. Parameters ---------- reciprocals : iterable of 1d ndarray The translation vectors of the reciprocal lattice. path : str The str-formed path. ''' path=path.replace(' ', '') assert path[0] in KMap.database and path[1]==':' space,path,database,reciprocals=path[0],path[2:].split(','),KMap.database[path[0]],np.asarray(reciprocals) if space=='L': assert len(reciprocals)==1 elif space=='S': assert len(reciprocals)==2 inner=np.inner(reciprocals[0],reciprocals[1])/nl.norm(reciprocals[0])/nl.norm(reciprocals[1]) assert np.abs(inner)<RZERO elif space=='H': assert len(reciprocals)==2 inner=np.inner(reciprocals[0],reciprocals[1])/nl.norm(reciprocals[0])/nl.norm(reciprocals[1]) assert np.abs(np.abs(inner)-0.5)<RZERO if np.abs(inner+0.5)<RZERO: reciprocals[1]=-reciprocals[1] for segment in path: segment=segment.split('-') assert len(segment)==2 self.append([reciprocals.T.dot(database[segment[0]]),reciprocals.T.dot(database[segment[1]])])
def reward_function(self, debug=0): """"" Reward Function: Working with PPO great results. Shaping with some ideas based on Continuous Lunar Lander v.2 gym environment: https://gym.openai.com/envs/LunarLanderContinuous-v2/ """"" self.reward = 0 velocity = self.state[1:6:2] euler_angles = self.ang psi = self.ang[2] body_ang_vel = self.state[-3:] action = self.action shaping = -SHAPING_WEIGHT/np.sum(SHAPING_INTERNAL_WEIGHTS)*(SHAPING_INTERNAL_WEIGHTS[0]*norm(velocity/BB_VEL)+ SHAPING_INTERNAL_WEIGHTS[1]*norm(psi/4)+ SHAPING_INTERNAL_WEIGHTS[2]*norm(euler_angles[0:2]/BB_ANG)) #CASCADING REWARDS r_state = np.concatenate((velocity, [psi])) for TR_i, TR_Pi in zip(TR, TR_P): if norm(r_state) < norm(np.ones(len(r_state))*TR_i): shaping += TR_Pi if norm(euler_angles[0:2]) < norm(np.ones(2)*TR_i*4): shaping += TR_Pi break if self.prev_shaping is not None: self.reward = shaping - self.prev_shaping self.prev_shaping = shaping #ABSOLUTE CONTROL PENALTY ## TOTAL REWARD SHAPING ## abs_control = -np.sum(np.square(action - self.zero_control)) * P_C self.reward += + abs_control #SOLUTION ACHIEVED? self.target_state = 9*(TR[0]**2) self.current_state = np.sum(np.square(np.concatenate((velocity, euler_angles, body_ang_vel)))) if self.current_state < self.target_state: self.reward += SOLVED_REWARD self.solved = 1 if self.ppo_training: self.done = True elif self.i >= self.n: self.reward = self.reward self.solved = 0 self.done=True elif self.done: self.reward += BROKEN_REWARD self.solved = 0
def gaussian_kernel(x, y, sigma=5.0): return np.exp(-linalg.norm(x-y)**2 / (2 * (sigma ** 2)))
# Compute four eigenmodes: index = [0,2,5,8] Lam, V = eig(-L) ii = argsort(Lam) Lam = Lam[ii] #V = V[:,ii] ii = ii[index] V = real(V[:,ii]) Lam = sqrt(Lam[index]/Lam[0]) # Plot eigenmodes with nodal lines underneath: [rr,tt] = meshgrid(r[0:N2+1],hstack([0,t])) xx = rr*cos(tt) yy = rr*sin(tt) z = exp(1j*pi*arange(-100,101)/100) fig = plt.figure() for i in range(0,4): u = reshape(V[:,i], (N2,M)).T u = hstack([zeros((M+1,1)),vstack([u[M-1,:],u[0:M,:]])]) u = u/norm(u) ax = fig.add_subplot(2,2,i+1, projection='3d') plt.hold('on') ax.plot_surface(xx, yy, u, rstride=1, cstride=1, cmap="coolwarm", alpha=0.3) ax.set_xlim(-1.05, 1.05) ax.set_ylim(-1.05, 1.05) ax.set_zlim(-1.05, 1.05) ax.set_axis_off() #plt.contour(xx, yy, u-1) #plt.plot() plt.show()
def Classo_R2(pb, lam, compute=True): pb_type = pb.type # can be 'Path-Alg', 'P-PDS' , 'PF-PDS' or 'DR' (m, d, k), (A, C, y) = pb.dim, pb.matrix lamb, rho = lam * pb.lambdamax, pb.rho if lam < 1e-5: pb_type = "DR" compute = "True" # here we simply refer to Classo_R1 that is called line 42. # Path-Alg # here we compute the path algo until our lambda, and just take the last beta if pb_type == "Path-Alg": if pb.intercept: AA, CC = A[:, 1:], C[:, 1:] else: AA, CC = A[:, :], C[:, :] out = solve_path((AA, CC, y), lam, False, rho, "R2", intercept=pb.intercept) if pb.intercept: beta0, beta = out[0][-1], out[1][-1] beta = np.array([beta0] + list(beta)) else: beta = out[0][-1] return beta # DR : regpath = pb.regpath r = lamb / (2 * rho) if pb_type == "DR": if compute: pb.init_R1(r=r) x = Classo_R1(pb.prob_R1, lamb / pb.prob_R1.lambdamax) beta = x[:-m] if pb.intercept: betaO = pb.prob_R1.ybar - np.vdot(pb.prob_R1.Abar, x) beta = np.array([betaO] + list(beta)) return beta else: pb.add_r(r=r) if len(pb.init) == 3: pb.prob_R1.init = pb.init x, warm_start = Classo_R1(pb.prob_R1, lamb / pb.prob_R1.lambdamax) beta = x[:-m] if pb.intercept: betaO = pb.prob_R1.ybar - np.vdot(pb.prob_R1.Abar, x) beta = np.array([betaO] + list(beta)) return (beta, warm_start) tol = pb.tol * LA.norm(y) / LA.norm(A, "fro") # tolerance rescaled if compute: pb.compute_param() tau, Proj, AtA, Aty = pb.tauN, proj_c(C, d), pb.AtA, pb.Aty gamma = pb.gam / (2 * (pb.AtAnorm + r**2)) t = lamb * gamma w, tm, zerom, zerod = ( t * pb.weights, t * np.ones(m), np.zeros(m), np.zeros(d), ) o, xbar, x, v = pb.init # vectors usefull to compute the prox of f(b)= sum(wi |bi|) # FORWARD BACKWARD if pb_type == "P-PDS": for i in range(pb.N): grad = AtA.dot(x) - Aty v = v + tau * C.dot(xbar) S = x - 2 * gamma * grad - 2 * gamma * r * (A.T).dot(o) - ( C.T).dot(v) o = prox( o * (1 - 2 * gamma * r**2) + 2 * gamma * r * (y - A.dot(x)), tm, zerom, ) p = prox(S, w, zerod) nw_x = Proj.dot(p) eps = nw_x - x xbar = p + eps if i % 10 == 2 and LA.norm(eps) < tol: # 0.6 if regpath: return (x, (o, xbar, x, v)) else: return x x = nw_x if LA.norm(x) > 1e10: raise ValueError("The algorithm of P-PDS diverges") raise ValueError( "The algorithm of P-PDS did not converge after %i iterations " % pb.N) else: # "PF-PDS" for i in range(pb.N): grad = AtA.dot(x) - Aty S1 = x - 2 * gamma * grad - 2 * gamma * r * (A.T).dot(o) - ( C.T).dot(v) S2 = o * (1 - 2 * gamma * r**2) + 2 * gamma * r * (y - A.dot(x)) p1 = prox(S1, w, zerod) p2 = prox(S2, tm, zerom) v = v + tau * C.dot(p1) v2 = v + tau * C.dot(x) eps1 = (p1 + 2 * gamma * (Aty - AtA.dot(p1) - r * A.T.dot(o)) - C.T.dot(v2) - S1) eps2 = p2 + 2 * r * gamma * (y - r * p2 - A.dot(x)) - S2 x = x + eps1 o = o + eps2 if i % 10 == 2 and LA.norm(eps1) + LA.norm(eps2) < tol: if regpath: return (x, (o, xbar, x, v)) else: return x if LA.norm(x) + LA.norm(o) + LA.norm(v) > 1e6: raise ValueError("The algorithm of PF-PDS diverges") raise ValueError( "The algorithm of PF-PDS did not converge after %i iterations " % pb.N)
def euclidean(p, Q): return numpy.apply_along_axis(lambda q: linalg.norm(p - q), 0, Q)
def hellinger(p, Q): factor = 1 / math.sqrt(2) sqrt_p = numpy.sqrt(p) return factor * numpy.apply_along_axis( lambda q: linalg.norm(sqrt_p - numpy.sqrt(q)), 0, Q)
def main(): dimension_number, exp_number = 2, 20 x_min_dist, x_max_dist, y_min_dist, y_max_dist, x0, dot_num, points_seq_style, way_style, exact_solution_style, \ figsize, uniform_distr_low, uniform_distr_high, calc_epsilon, min_dist_between_points, level_max_diff = \ -10.0, 10.0, -10.0, 10.0, [0, 0], 500, 'ko', 'k-', 'ro', (15, 7.5), -5, 5, 1e-4, 1, 1000 a, b, c = \ rand(dimension_number, dimension_number) * (uniform_distr_high - uniform_distr_low) + uniform_distr_low, \ rand(dimension_number) * (uniform_distr_high - uniform_distr_low) + uniform_distr_low, 0 for j in range(exp_number): b = rand(dimension_number) * (uniform_distr_high - uniform_distr_low) + uniform_distr_low while True: a = rand(dimension_number, dimension_number) * ( uniform_distr_high - uniform_distr_low) + uniform_distr_low hessian_of_f = (a + a.T) / 2 flag = False for i in range(dimension_number): if linalg.det(hessian_of_f[:i + 1, :i + 1]) < 1e-15: flag = True break if not flag: break exact_solution = linalg.solve(hessian_of_f, b) x0 = rand(dimension_number) x0[0] = x0[0] * (x_max_dist - x_min_dist) + exact_solution[0] + x_min_dist x0[1] = x0[1] * (y_max_dist - y_min_dist) + exact_solution[1] + y_min_dist points_seq, _ = r_algorithm(f, x0, args=(a, b, c), form='B', calc_epsilon=calc_epsilon, iter_lim=100, step_method='adaptive', default_step=10, step_red_mult=0.75, step_incr_mult=1.25, lim_num=3, reduction_epsilon=1e-15) argmin = points_seq[points_seq.shape[0] - 1] count = 0 while count < points_seq.shape[0] - 1: if linalg.norm(points_seq[count] - points_seq[count + 1]) < min_dist_between_points: points_seq = np.delete(points_seq, count + 1, 0) count -= 1 count += 1 points_seq = np.append(points_seq, argmin).reshape(points_seq.shape[0] + 1, 2) levels = np.sort( f(np.array([points_seq[:, 0], points_seq[:, 1]]), (a, b, c))) count = 0 while count < levels.size - 1: if levels[count + 1] - levels[count] > level_max_diff: levels = np.insert(levels, count + 1, (levels[count + 1] + levels[count]) / 2.0) count -= 1 count += 1 levels = np.array(list(set(levels))) levels.sort() x, y = \ np.linspace(exact_solution[0] + x_min_dist, exact_solution[0] + x_max_dist, dot_num), \ np.linspace(exact_solution[1] + y_min_dist, exact_solution[1] + y_max_dist, dot_num) xx, yy = np.meshgrid(x, y) z = f(np.array([xx, yy]), (a, b, c)) plt.figure(figsize=figsize) plt.grid(True) numerical_contour = plt.contour(x, y, z, levels=levels) plt.clabel(numerical_contour, inline=1, fontsize=10) plt.plot(points_seq[:, 0], points_seq[:, 1], points_seq_style, label=u"Наближення") for i in range(points_seq.shape[0] - 1): plt.plot([points_seq[i][0], points_seq[i + 1][0]], [points_seq[i][1], points_seq[i + 1][1]], way_style) plt.plot(exact_solution[0], exact_solution[1], exact_solution_style) plt.show() plt.close()
def _tracemin_fiedler(L, X, normalized, tol, method): """Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm. """ n = X.shape[0] if normalized: # Form the normalized Laplacian matrix and determine the eigenvector of # its nullspace. e = sqrt(L.diagonal()) D = spdiags(1. / e, [0], n, n, format='csr') L = D * L * D e *= 1. / norm(e, 2) if not normalized: def project(X): """Make X orthogonal to the nullspace of L. """ X = asarray(X) for j in range(X.shape[1]): X[:, j] -= X[:, j].sum() / n else: def project(X): """Make X orthogonal to the nullspace of L. """ X = asarray(X) for j in range(X.shape[1]): X[:, j] -= dot(X[:, j], e) * e if method is None: method = 'pcg' if method == 'pcg': # See comments below for the semantics of P and D. def P(x): x -= asarray(x * X * X.T)[0, :] if not normalized: x -= x.sum() / n else: x = daxpy(e, x, a=-ddot(x, e)) return x solver = _PCGSolver(lambda x: P(L * P(x)), lambda x: D * x) elif method == 'chol' or method == 'lu': # Convert A to CSC to suppress SparseEfficiencyWarning. A = csc_matrix(L, dtype=float, copy=True) # Force A to be nonsingular. Since A is the Laplacian matrix of a # connected graph, its rank deficiency is one, and thus one diagonal # element needs to modified. Changing to infinity forces a zero in the # corresponding element in the solution. i = (A.indptr[1:] - A.indptr[:-1]).argmax() A[i, i] = float('inf') solver = (_CholeskySolver if method == 'chol' else _LUSolver)(A) else: raise nx.NetworkXError('unknown linear system solver.') # Initialize. Lnorm = abs(L).sum(axis=1).flatten().max() project(X) W = asmatrix(ndarray(X.shape, order='F')) while True: # Orthonormalize X. X = qr(X)[0] # Compute interation matrix H. W[:, :] = L * X H = X.T * W sigma, Y = eigh(H, overwrite_a=True) # Compute the Ritz vectors. X *= Y # Test for convergence exploiting the fact that L * X == W * Y. res = dasum(W * asmatrix(Y)[:, 0] - sigma[0] * X[:, 0]) / Lnorm if res < tol: break # Depending on the linear solver to be used, two mathematically # equivalent formulations are used. if method == 'pcg': # Compute X = X - (P * L * P) \ (P * L * X) where # P = I - [e X] * [e X]' is a projection onto the orthogonal # complement of [e X]. W *= Y # L * X == W * Y W -= (W.T * X * X.T).T project(W) # Compute the diagonal of P * L * P as a Jacobi preconditioner. D = L.diagonal() D += 2. * (asarray(X) * asarray(W)).sum(axis=1) D += (asarray(X) * asarray(X * (W.T * X))).sum(axis=1) D[D < tol * Lnorm] = 1. D = 1. / D # Since TraceMIN is globally convergent, the relative residual can # be loose. X -= solver.solve(W, 0.1) else: # Compute X = L \ X / (X' * (L \ X)). L \ X can have an arbitrary # projection on the nullspace of L, which will be eliminated. W[:, :] = solver.solve(X) project(W) X = (inv(W.T * X) * W.T).T # Preserves Fortran storage order. return sigma, asarray(X)
def gaussian(sigma): return lambda x,y:\ np.exp(-np.sqrt(la.norm(x-y)**2/(2*sigma**2)))
def main(): # Training settings parser = argparse.ArgumentParser(description='PyTorch Cifar Example') use_cuda = True kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} transform_test = transforms.Compose([ transforms.ToTensor() ]) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=16) #classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # test_loader = torch.utils.data.DataLoader( # datasets.MNIST('../data', train=False, transform=transforms.Compose([ # transforms.ToTensor() # #transforms.Normalize((0.1307,), (0.3081,)) # ])), # batch_size=1000, shuffle=True, **kwargs) args = parser.parse_args() gpu = 0 torch.manual_seed(1) device = torch.device("cuda" if use_cuda else "cpu") #device = "cpu" model = ResNet18_adv(w=1).to(device) model = AttackPGD(model) model = torch.nn.DataParallel(model) torch.cuda.set_device(gpu) model.cuda(gpu) criterion = CrossEntropyLossMaybeSmooth(smooth_eps=0.0).cuda(gpu) load_model = 'resnet18_adv_w1.pt' # unlike resume, load model does not care optimizer status or start_epoch print('==> Loading from {}'.format(load_model)) model.load_state_dict(torch.load(load_model,map_location = {'cuda:0':'cuda:{}'.format(gpu)})) print(model) validate(model, testloader,criterion, device) for name,W in model.named_parameters(): W = W.cpu().detach().numpy() shape = W.shape W2d = W.reshape(shape[0],-1) column_l2_norm = LA.norm(W2d,2,axis=0) zero_column = np.sum(column_l2_norm == 0) nonzero_column = np.sum(column_l2_norm !=0) print ("column sparsity of layer {} is {}".format(name,zero_column/(zero_column+nonzero_column))) for name,W in model.named_parameters(): W = W.cpu().detach().numpy() shape = W.shape W2d = W.reshape(shape[0],-1) row_l2_norm = LA.norm(W2d,2,axis=1) zero_row = np.sum(row_l2_norm == 0) nonzero_row = np.sum(row_l2_norm !=0) print ('filter sparsity of layer {} is {}'.format(name,zero_row/(zero_row+nonzero_row)))
def normalize(vec): return vec / npl.norm(vec)
def radial_basis(gamma=10): return lambda x, y: np.exp(-gamma * la.norm(np.subtract(x, y)))
vessel_d['specific_impulse'] = vessel.specific_impulse #print(game_delta_time) if nav_mode == 'gfold': #跟随gfold路径 (tf, x, u, m, s, z) = gfold_path #n_i = max(n_i + game_delta_time * 0.2 * N/tf, find_nearest_index(x, error, vel)) n_i = max(n_i - game_delta_time * 0.2 * N / tf, find_nearest_index(x, error, vel)) print(n_i) # x_i = (x[0:3, n_i] + x[0:3, min(n_i+1 ,N-1)]) / 2 # v_i = (x[3:6, n_i] + x[3:6, min(n_i+1 ,N-1)]) / 2 # u_i = (u[:, n_i] + u[:, min(n_i+1 ,N-1)]) / 2 (x_i, v_i, u_i) = sample_index(n_i) (x_i_, v_i_, u_i_) = sample_index(n_i + min(1.5 * N / tf, npl.norm(vel) / 50 * N / tf)) #v_i_dir = v_i / npl.norm(v_i) # target_a = u_i_ # target_v = v_i_ # target_x = x_i # + np.dot((error - x_i), v_i_dir) * v_i_dir # #print(n_i, target_a, target_v, target_x) # target_a += (target_v - vel) * k_v + (target_x - error) * k_x target_a = u_i + (v_i - vel) * k_v + (x_i - error) * k_x target_a_ = u_i_ + (v_i_ - vel) * k_v + (x_i - error) * k_x if debug_lines: target_line.start = error target_line.end = (x_i[0], x_i[1], x_i[2]) target2_line.start = error
def test_empty(self): assert_equal(norm([]), 0.0) assert_equal(norm(array([], dtype=self.dt)), 0.0) assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def almost_equal_norm(self, other, tol, relative=True): """ Compare whether two array types are almost equal, normwise. If the 'relative' parameter is 'True' (the default) then the 'tol' parameter (which must be positive) is interpreted as a relative tolerance, and the comparison returns 'True' only if abs(norm(self-other)) <= tol*abs(norm(self)). If 'relative' is 'False', then 'tol' is an absolute tolerance, and the comparison is true only if abs(norm(self-other)) <= tol Other meta-data (type, dtype, and length) must be exactly equal. If either object's memory lives on the GPU it will be copied to the CPU for the comparison, which may be slow. But the original object itself will not have its memory relocated nor scheme changed. Parameters ---------- other another Python object, that should be tested for almost-equality with 'self', based on their norms. tol a non-negative number, the tolerance, which is interpreted as either a relative tolerance (the default) or an absolute tolerance. relative A boolean, indicating whether 'tol' should be interpreted as a relative tolerance (if True, the default if this argument is omitted) or as an absolute tolerance (if tol is False). Returns ------- boolean 'True' if the data agree within the tolerance, as interpreted by the 'relative' keyword, and if the types, lengths, and dtypes are exactly the same. """ # Check that the tolerance is non-negative and raise an # exception otherwise. if (tol < 0): raise ValueError("Tolerance cannot be negative") # Check that the meta-data agree; the type check is written in # this way so that this method may be safely called from # subclasses as well. if type(other) != type(self): return False if self.dtype != other.dtype: return False if len(self) != len(other): return False # The numpy() method will move any GPU memory onto the CPU. # Slow, but the user was warned. diff = self.numpy() - other.numpy() dnorm = norm(diff) if relative: return (dnorm <= tol * norm(self)) else: return (dnorm <= tol)
def clamp_mag(vec, maxmag): mag = npl.norm(vec) if mag > maxmag: return vec / mag * maxmag return vec
def compute_z_phi(samples, alpha): return np.array([(math.exp((-1) * alpha * math.pow(LA.norm(sample), 2))) for sample in samples]).sum()
def stupidNormals(S, hardCodedJac, dim): ''' Hard Coded normal calculator for the different members for dim 1 there are no normals, for dim = 2 the normals are to the lines for dim = 3 the normals are to the faces only OUTPUT: normal- is the normal vector to a member that has been normalized it is an np.array of the form [x,y,z] ''' if dim == 1: normal = [] print('error: no normals for dim=1') elif dim == 2: if S == 0: normal = [] print('error: no normals for S = 0') elif S == 1: temp = np.cross(hardCodedJac[:, 0], np.array([0, 0, 1])) normal = 1 / LA.norm(temp) * temp elif S == 2: temp = np.cross(np.array([0, 0, 1]), hardCodedJac[:, 0]) normal = 1 / LA.norm(temp) * temp elif S == 3: temp = np.cross(np.array([0, 0, 1]), hardCodedJac[:, 1]) normal = 1 / LA.norm(temp) * temp elif S == 4: temp = np.cross(hardCodedJac[:, 1], np.array([0, 0, 1])) normal = 1 / LA.norm(temp) * temp else: normal = [] print('error: S outside of range') elif dim == 3: if S == 0: normal = [] print('error: no normals for S = 0') elif S == 6: temp = np.cross(hardCodedJac[:, 2], hardCodedJac[:, 1]) normal = 1 / LA.norm(temp) * temp elif S == 5: temp = np.cross(hardCodedJac[:, 1], hardCodedJac[:, 2]) normal = 1 / LA.norm(temp) * temp elif S == 4: temp = np.cross(hardCodedJac[:, 0], hardCodedJac[:, 2]) normal = 1 / LA.norm(temp) * temp elif S == 3: temp = np.cross(hardCodedJac[:, 2], hardCodedJac[:, 0]) normal = 1 / LA.norm(temp) * temp elif S == 2: temp = np.cross(hardCodedJac[:, 1], hardCodedJac[:, 0]) normal = 1 / LA.norm(temp) * temp elif S == 1: temp = np.cross(hardCodedJac[:, 0], hardCodedJac[:, 1]) normal = 1 / LA.norm(temp) * temp else: normal = [] print('error: S outside of range') return (normal)
def displacement_stress(self, model, positions, q, dofs): n = self.n o1 = zeros(n, 'float64') e1 = zeros(n, 'float64') f1 = zeros(n, 'float64') o4 = zeros(n, 'float64') e4 = zeros(n, 'float64') f4 = zeros(n, 'float64') As = self.get_area_by_element_id(self.property_id) Es = self.get_E_by_element_id(self.property_id) Gs = self.get_G_by_element_id(self.property_id) Js = self.get_J_by_element_id(self.property_id) Cs = self.get_c_by_element_id(self.property_id) for i in range(n): A = As[i] E = Es[i] G = Gs[i] E = Es[i] J = Js[i] C = Cs[i] n1, n2 = self.node_ids[i, :] p1 = positions[n1] p2 = positions[n2] v1 = p1 - p2 L = norm(p1 - p2) if L == 0.0: msg = 'invalid CTUBE length=0.0\n%s' % (self.__repr__()) raise ZeroDivisionError(msg) #======================== #mat = self.get_material_from_index(i) #jmat = searchsorted(mat.material_id, self.material_id[i]) #E = mat.E[jmat] #G = mat.G[jmat] #G = self.G() #print("A=%g E=%g G=%g J=%g L=%g" % (A, E, G, J, L)) k_axial = A * E / L k_torsion = G * J / L #k_axial = 1.0 #k_torsion = 2.0 #k = array([[1., -1.], [-1., 1.]]) # 1D rod Lambda = _Lambda(v1, debug=False) #print("**dofs =", dofs) n11 = dofs[(n1, 1)] n21 = dofs[(n2, 1)] n12 = dofs[(n1, 2)] n22 = dofs[(n2, 2)] n13 = dofs[(n1, 3)] n23 = dofs[(n2, 3)] # moments n14 = dofs[(n1, 4)] n24 = dofs[(n2, 4)] n15 = dofs[(n1, 5)] n25 = dofs[(n2, 5)] n16 = dofs[(n1, 6)] n26 = dofs[(n2, 6)] q_axial = array([ q[n11], q[n12], q[n13], q[n21], q[n22], q[n23] ]) q_torsion = array([ q[n14], q[n15], q[n16], q[n24], q[n25], q[n26] ]) #print("type=%s n1=%s n2=%s" % (self.type, n1, n2)) #print("n11=%s n12=%s n21=%s n22=%s" %(n11,n12,n21,n22)) #print("q2[%s] = %s" % (self.eid, q2)) #print("Lambda = \n"+str(Lambda)) #print("Lsize = ", Lambda.shape) #print("qsize = ", q.shape) u_axial = dot(array(Lambda), q_axial) du_axial = -u_axial[0] + u_axial[1] u_torsion = dot(array(Lambda), q_torsion) du_torsion = -u_torsion[0] + u_torsion[1] #L = self.Length() #E = self.E() #A = self.area() #C = self.C() #J = self.J() #G = self.G() axial_strain = du_axial / L torsional_strain = du_torsion * C / L axial_stress = E * axial_strain torsional_stress = G * torsional_strain axial_force = axial_stress * A torsional_moment = du_torsion * G * J / L #print("axial_strain = %s [psi]" % axial_strain) #print("axial_stress = %s [psi]" % axial_stress) #print("axial_force = %s [lb]\n" % axial_force) o1[i] = axial_stress o4[i] = torsional_stress e1[i] = axial_strain e4[i] = torsional_strain f1[i] = axial_force f4[i] = torsional_moment return (e1, e4, o1, o4, f1, f4)
def is_unit_vector(vector): return math.isclose(LA.norm(vector), 1.0, rel_tol=1e-2)
def main(ctx_factory, dim=2, order=4, visualize=False): cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext( queue, allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)), force_device_scalars=True, ) # {{{ parameters # domain [-d/2, d/2]^dim d = 1.0 # number of points in each dimension npoints = 20 # final time final_time = 1.0 # velocity field c = np.array([0.5] * dim) norm_c = la.norm(c) # flux flux_type = "central" # }}} # {{{ discretization from meshmode.mesh.generation import generate_box_mesh mesh = generate_box_mesh( [np.linspace(-d / 2, d / 2, npoints) for _ in range(dim)], order=order) from grudge import DiscretizationCollection dcoll = DiscretizationCollection(actx, mesh, order=order) # }}} # {{{ weak advection operator def f(x): return actx.np.sin(3 * x) def u_analytic(x, t=0): return f(-np.dot(c, x) / norm_c + t * norm_c) from grudge.models.advection import WeakAdvectionOperator adv_operator = WeakAdvectionOperator( dcoll, c, inflow_u=lambda t: u_analytic(thaw(dcoll.nodes(dd=BTAG_ALL), actx), t=t), flux_type=flux_type) nodes = thaw(dcoll.nodes(), actx) u = u_analytic(nodes, t=0) def rhs(t, u): return adv_operator.operator(t, u) dt = adv_operator.estimate_rk4_timestep(actx, dcoll, fields=u) logger.info("Timestep size: %g", dt) # }}} # {{{ time stepping from grudge.shortcuts import set_up_rk4 dt_stepper = set_up_rk4("u", dt, u, rhs) plot = Plotter(actx, dcoll, order, visualize=visualize, ylim=[-1.1, 1.1]) step = 0 norm_u = 0.0 for event in dt_stepper.run(t_end=final_time): if not isinstance(event, dt_stepper.StateComputed): continue if step % 10 == 0: norm_u = actx.to_numpy(op.norm(dcoll, event.state_component, 2)) plot(event, "fld-weak-%04d" % step) step += 1 logger.info("[%04d] t = %.5f |u| = %.5e", step, event.t, norm_u) # NOTE: These are here to ensure the solution is bounded for the # time interval specified assert norm_u < 1
def norm(self): """ Quaternion norm. """ n = norm(self.__quat); return n
def side_contact(poly1, poly2): ''' Returns (length,normal) giving length of "face-to-face" contact, and if so left and right endpoints, and a unit vector facing poly1 normal to the contact. ''' # first check if the polygons can possibly be in contact # note: we don't need to factor in collision tol, since the radius is a # maximum reached at a corner point, never at a side point, so this is # always a conservative check com_dist = norm(poly1.pos - poly2.pos) if com_dist > max(poly1.radius, poly2.radius): return 0, None, None, None best_side = None best_score = .01 for i1,(p1p1,p1p2,s1) in enumerate(poly1.side_pos_iter()): for i2,(p2p1,p2p2,s2) in enumerate(poly2.side_pos_iter()): anglediff = abs(np.dot(s1,s2))/(norm(s1)*norm(s2)) if 1-anglediff < ANGLE_TOL: # I believe this always has to be minimized with p1p2 # assuming coords entered consistently cw or ccw # Lines should be in reverse orientation to each other z = p1p1-p2p2 score = norm(np.cross(z/norm(z),s2/norm(s2))) if score < best_score: best_side = (i1,i2) best_score = score crosslen1 = norm(p1p1-p2p1) crosslen2 = norm(p2p2-p1p2) minlen = min(norm(s1),norm(s2)) contact_length = min(minlen, crosslen1, crosslen2) # Again, with CW assumption, this turns toward object 1 normal = np.array([p2p2[1] - p2p1[1], p2p1[0] - p2p2[0]]) a,b,c,d = p1p1,p1p2,p2p1,p2p2 if a[0] > b[0]: a,b = b,a if c[0] > d[0]: c,d = d,c if a[0] < c[0] and c[0] < b[0]: left = c else: left = a if a[0] < d[0] and d[0] < b[0]: right = d else: right = b #assert(abs(contact_length - norm(right - left)) / abs(contact_length) < EPSILON) if not best_side: return 0,None, None, None unit_normal = -normal/norm(normal) return contact_length, left, right, unit_normal
def get_stiffness_matrix(self, i, model, positions, index0s, knorm=1.0): #print("----------------") pid = self.property_id[i] assert isinstance(pid, int), pid A = self.get_area_by_element_id(pid) E = self.get_E_by_element_id(pid) G = self.get_G_by_element_id(pid) J = self.get_J_by_element_id(pid) #print('A=%s E=%s G=%s J=%s' % (A, E, G, J)) #======================== #(n1, n2) = self.node_ids() n1 = self.node_ids[i, 0] n2 = self.node_ids[i, 1] i1 = index0s[n1] i2 = index0s[n2] #print("n0", n0) #print("n1", n1) n1 = positions[n1] n2 = positions[n2] #p1 = model.Node(n1).xyz v1 = n1 - n2 L = norm(v1) if L == 0.0: msg = 'invalid CROD length=0.0\n%s' % (self.__repr__()) raise ZeroDivisionError(msg) #======================== #print("A=%g E=%g G=%g J=%g L=%g" % (A, E, G, J, L)) k_axial = A * E / L k_torsion = G * J / L #k_axial = 1.0 #k_torsion = 2.0 k = array([[1., -1.], [-1., 1.]]) # 1D rod Lambda = _Lambda(v1, debug=True) K = dot(dot(transpose(Lambda), k), Lambda) Ki, Kj = K.shape # for testing #K = ones((Ki, Ki), 'float64') K2 = zeros((Ki*2, Kj*2), 'float64') if k_axial == 0.0 and k_torsion == 0.0: dofs = [] n_ijv = [] K2 = [] elif k_torsion == 0.0: # axial; 2D or 3D K2 = K * k_axial dofs = array([ i1, i1+1, i1+2, i2, i2+1, i2+2, ], 'int32') n_ijv = [ # axial (n1, 1), (n1, 2), (n1, 3), (n2, 1), (n2, 2), (n2, 3), ] elif k_axial == 0.0: # torsion; assume 3D K2 = K * k_torsion dofs = array([ i1+3, i1+4, i1+5, i2+3, i2+4, i2+5, ], 'int32') n_ijv = [ # torsion (n1, 4), (n1, 5), (n2, 6), (n2, 4), (n2, 5), (n1, 6), ] else: # axial + torsion; assume 3D # u1fx, u1fy, u1fz, u2fx, u2fy, u2fz K2[:Ki, :Ki] = K * k_axial # u1mx, u1my, u1mz, u2mx, u2my, u2mz K2[Ki:, Ki:] = K * k_torsion dofs = array([ i1, i1+1, i1+2, i2, i2+1, i2+2, i1+3, i1+4, i1+5, i2+3, i2+4, i2+5, ], 'int32') n_ijv = [ # axial (n1, 1), (n1, 2), (n1, 3), (n2, 1), (n2, 2), (n2, 3), # torsion (n1, 4), (n1, 5), (n1, 6), (n2, 4), (n2, 5), (n2, 6), ] #Fg = dot(dot(transpose(Lambda), grav), Lambda) #print("K=\n", K / knorm) #print("K2=\n", K2 / knorm) #======================== #print(K / knorm) #print("K[%s] = \n%s\n" % (self.eid, list_print(K/knorm))) self.model.log.info('dofs = %s' % dofs) self.model.log.info('K =\n%s' % list_print(K / knorm)) return(K2, dofs, n_ijv)
def update(self): ''' This function is called once each time step. All position and velocity updates, and all collisions are handled within. ''' if energylog: self.log.write(','.join([str(x) for x in self.energy()]) + '\n') # Apply linear damping force if exists if len(self.global_damping_force) == 0: gdf = [] else: gdf = [self.global_damping_force] dt = self.time_disc max_collision_overlap = COLLISION_TOL + 1 first_iter = True # Check there is at least one 'true' collision, AND time is stable if verbosity>2: print('in:',self.objs[0].com.pos[1]) if verbosity: hashes = [hash(obj) for obj in self.objs] k = 0 while max_collision_overlap > COLLISION_TOL and dt > TIME_TOL: k+=1 if k == 2:print('starting collision: ', self.state // self.time_disc) if k > 1:print(k,end=', ') if first_iter: first_iter = False else: dt /= 2 for i,obj in enumerate(self.objs): obj.reverse_update() if verbosity: if hash(obj) != hashes[i]: print('WRONG, dt = ',dt) print(obj) # Do first pass of position, velocity and acceleration updates for # each object in the world for obj in self.objs: #obj.forces = [] obj.pre_update(gdf, dt) # check_collisions() compiles a list of collisions with information: # (1,2) the two objects contained in the collision # (3) the (signed) unit vector normal to the surface of collision # (4) (unsigned) magnitude of normal vector collisions = self.check_collisions() magnitudes = [abs(magnitude) for _,__,___,magnitude in collisions] if collisions: max_collision_overlap = max(magnitudes) else: max_collision_overlap = 0 if max_collision_overlap > 0:# and dt < TIME_TOL: if verbosity == 1: print(dt,max_collision_overlap, self.objs[0].com.pos[1]) if dt < TIME_TOL: print("Warning: time became too small while still in collision. ({0},{1})".format(dt,max_collision_overlap)) for obj in self.objs: obj.reverse_update() collisions = self.check_collisions() while collisions: for obj1, obj2, unit_normal_vec, normal_vec_mag in collisions: # Makes things easier if the first object is never fixed if obj1.is_fixed: obj1,obj2 = obj2,obj1 [side_length, left, right, normal] = side_contact(obj1,obj2) if side_length: # Check if torque is induced # (TODO: this is not gravity-direction neutral) if obj1.com.pos[0] < left[0] or obj1.com.pos[0] > right[0]: pass # induce torque # Reassemble normal vector with some added spacing so two # objects are definitely non-overlapping after collision # resolution normal_vec = unit_normal_vec * (normal_vec_mag + EPSILON) if verbosity>1: print("normal vec: ", normal_vec) # obj1 is NEVER fixed, so this branch is for a collision with a # free object and a fixed object # # 1. The free object updates its position by the normal vector # 2. The free object updates its velocity by twice the (negated) # component of its velocity normal to the surface # #print(obj1.is_fixed, obj2.is_fixed) if obj2.is_fixed: for i in range(len(obj1.points)): obj1.points[i].pos += normal_vec vdotN = np.dot(obj1.points[i].vel, unit_normal_vec) obj1.points[i].vel -= (1+ELASTICITY)*vdotN*unit_normal_vec # TODO: why is this here? if norm(normal_vec) > VIBRATE_TOL: obj1.com.pos += normal_vec vdotN = np.dot(obj1.com.vel, unit_normal_vec) #if norm(vdotN) > VIBRATE_TOL: ##obj1.com.vel -= (1+ELASTICITY)*vdotN * unit_normal_vec #J = obj1.mass* K0 = .5*obj1.mass*norm(obj1.com.vel)**2 obj1.com.vel -= (1+ELASTICITY)*vdotN * unit_normal_vec # Update heat energy, dispersed equally to each object H = .5*obj1.mass*(1-ELASTICITY**2)*(vdotN**2) K1 = .5*obj1.mass*norm(obj1.com.vel)**2 obj1.heat += H/2 obj2.heat += H/2 self.heat += H obj1.finish_update() obj2.finish_update() # Only other possible case is that both objects are free else: v1 = obj1.com.vel mtotal = obj1.mass + obj2.mass mprop1 = obj1.mass / mtotal mprop2 = 1-mprop1 obj1.com.pos += mprop1 * (normal_vec) vn1 = np.dot(obj1.com.vel,unit_normal_vec) vn2 = np.dot(obj2.com.vel,unit_normal_vec) v1_update = (mprop1 - mprop2)*vn1 + 2*mprop2*vn2 obj1.com.vel += (v1_update - np.dot(obj1.com.vel,unit_normal_vec))*unit_normal_vec for point in obj1.points: point.pos += mprop1 * (normal_vec) point.vel += (v1_update - np.dot(point.vel,unit_normal_vec))*unit_normal_vec obj1.finish_update() obj2.com.pos -= mprop2 * normal_vec v2_update = 2*mprop1*vn1 + (mprop2 - mprop1)*vn2 obj2.com.vel += (v2_update - np.dot(obj2.com.vel,unit_normal_vec))*unit_normal_vec for point in obj2.points: point.pos -= mprop2 * (normal_vec) point.vel += (v2_update - np.dot(point.vel,unit_normal_vec))*unit_normal_vec obj2.finish_update() if verbosity>1: print("inits", v0a,v0b) print("a_update=",vn2, v1_update) print("b_update=",vn1, v2_update) print("va1 =", obj1.com.vel, v0a+vn1) print("vb1 =", obj2.com.vel, v0b+vn2) print('K1a=',K0a + .5*obj1.mass*(vn1**2 - vn2**2), .5*obj1.mass*norm(obj1.com.vel)**2) print('K1b=',K0b + .5*obj2.mass*(vn2**2 - vn1**2), .5*obj2.mass*norm(obj2.com.vel)**2) #print('mafter:',obj1.momentum(normal_vec) , obj2.momentum(normal_vec)) collisions = self.check_collisions() # Do second (final) pass for each object in the world for obj in self.objs: # Obj.com x,v,a attrs are given to Obj itself obj.finish_update() # Update the remainder of the time step # ensures each frame transition is consistent time width collisions = self.check_collisions() if collisions: print('time eval: ', self.state // self.time_disc) magnitudes = [abs(magnitude) for _,__,___,magnitude in collisions] print('1/2still {0} collisions (dt = {1}),{2},{3},{4}'.format(len(collisions),dt,magnitudes,collisions[0][0],collisions[0][1])) if dt != self.time_disc: for obj in self.objs: obj.pre_update(gdf, self.time_disc - dt) obj.finish_update() collisions = self.check_collisions() if collisions: magnitudes = [abs(magnitude) for _,__,___,magnitude in collisions] print('2/2still {0} collisions (dt = {1}),{2}'.format(len(collisions),dt,magnitudes)) # Advance time self.state += self.time_disc
def __init__(self, subcase_id, location, titles, headers, dxyz, linked_scale_factor, #xyz, scalar, scales, data_formats=None, nlabels=None, labelsize=None, ncolors=None, colormap='jet', set_max_min=False, uname='NastranGeometry'): """ Defines a Displacement/Eigenvector result Parameters ---------- subcase_id : int the flag that points to self.subcases for a message headers : List[str] the sidebar word titles : List[str] the legend title #xyz : (nnodes, 3) #the nominal xyz locations #scalars : (nnodes,n) float ndarray ##the data to make a contour plot with #does nothing dxyz : (nnodes, 3) the delta xyz values linked_scale_factor : bool is the displacement scale factor linked displacements/loads steps should be force/eigenvectors should not be scales : List[float] the table (e.g., deflection, SPC Forces) scale factors nominally, this starts as an empty list and is filled later data_formats : List[str] the type of data result (e.g. '%i', '%.2f', '%.3f') ncolors : int; default=None sets the default for reverting the legend ncolors set_max_min : bool; default=False set default_mins and default_maxs Unused ------ #deflects : bool; default=True #seems to be an unused parameter... uname : str some unique name for ... """ GuiResultCommon.__init__(self) self.subcase_id = subcase_id self.location = location assert location in ['node', 'centroid'], 'location=%r' % location self.linked_scale_factor = linked_scale_factor #assert self.subcase_id > 0, self.subcase_id self.dxyz = dxyz self.dim = len(self.dxyz.shape) self.uname = uname #self.dxyz_norm = norm(dxyz, axis=1) #self.deflects = deflects self.titles = titles self.headers = headers self.scales = scales self.subcase_id = subcase_id self.data_type = self.dxyz.dtype.str # '<c8', '<f4' self.is_real = True if self.data_type in ['<f4', '<f8'] else False #print('self.data_type = %r' % self.data_type) self.is_complex = not self.is_real self.nlabels = nlabels self.labelsize = labelsize self.ncolors = ncolors self.colormap = colormap self.data_formats = data_formats self.titles_default = deepcopy(self.titles) self.headers_default = deepcopy(self.headers) self.scales_default = deepcopy(self.scales) self.data_formats_default = deepcopy(self.data_formats) if self.dim == 2: ntimes = 1 self.default_mins = zeros(1, dtype=self.dxyz.dtype) self.default_maxs = zeros(1, dtype=self.dxyz.dtype) normi = norm(self.dxyz, axis=1) self.default_mins[0] = normi.min().real self.default_maxs[0] = normi.max().real elif self.dim == 3: ntimes = self.dxyz.shape[0] self.default_mins = zeros(ntimes) self.default_maxs = zeros(ntimes) for itime in range(ntimes): normi = norm(self.dxyz[itime, :, :], axis=1) self.default_mins[itime] = normi.min().real self.default_maxs[itime] = normi.max().real if not self.is_real: #: stored in degrees self.phases = np.zeros(ntimes) else: raise NotImplementedError('dim=%s' % self.dim) if set_max_min: self.min_values = deepcopy(self.default_mins) self.max_values = deepcopy(self.default_maxs) else: self.max_values = None self.min_values = None
def momentum(self, direction): return self.mass * np.dot(self.com.vel, direction)/norm(direction)
def polypoly_collision(poly1, poly2): ''' Determine whether two polygons are currently intersecting each other ''' # array of edges in the form # poly1_edges = [(Point1, Point2), (Point2, Point3), ..., (Point[n], Point1)] poly1_edges = zip(poly1.points, poly1.points[1:] + [poly1.points[0]]) # array of outward-facing normals for each of the previous edges poly1_normals = [np.array([p2.pos[1] - p1.pos[1], p1.pos[0] - p2.pos[0]]) for p1,p2 in poly1_edges] poly2_edges = zip(poly2.points, poly2.points[1:] + [poly2.points[0]]) poly2_normals = [np.array([p2.pos[1] - p1.pos[1], p1.pos[0] - p2.pos[0]]) for p1,p2 in poly2_edges] if verbosity >= 2: print('IN COLLISION') if verbosity >= 3: print([p.pos for p in poly1.points[:-1]]) print([p.pos for p in poly2.points[:-1]]) overlap = np.inf for axis,flag in list(zip(poly1_normals,[1]*len(poly1_normals))) + list(zip(poly2_normals, [2]*len(poly2_normals))): # Normalize normal_axis = axis / norm(axis) # Determine projection bounds onto separating axis (perpindicular to separating line # initialize bounds with current edge poly1_bounds = [np.dot(normal_axis, poly1.points[0].pos)]*2 poly2_bounds = [np.dot(normal_axis, poly2.points[0].pos)]*2 # increase bounds as necessary if other edges increase projection line length for point in poly1.points: curr = np.dot(normal_axis, point.pos) if curr < poly1_bounds[0]: poly1_bounds[0] = curr if curr > poly1_bounds[1]: poly1_bounds[1] = curr for point in poly2.points: curr = np.dot(normal_axis, point.pos) if curr < poly2_bounds[0]: poly2_bounds[0] = curr if curr > poly2_bounds[1]: poly2_bounds[1] = curr mag_flag = 1 if poly1_bounds[0] > poly2_bounds[0]: mag_flag = 2 poly1_bounds,poly2_bounds = poly2_bounds, poly1_bounds #If a single check fails, then there is no collision if poly1_bounds[1] < poly2_bounds[0]: return [],None # Save axis and overlap magnitude if this is the smallest gap current_overlap = poly1_bounds[1] - poly2_bounds[0] if verbosity>1: print('co:',current_overlap,poly1_bounds,poly2_bounds) if current_overlap < overlap: fix_axis = normal_axis fix_mag_flag = mag_flag overlap = current_overlap ''' Sanity Check: This whole fix_axis direction makes physical sense. It is resolving the collision in a direction which *must* be orthogonal to one of the two objects' sides, which should be the only way a collision could happen, aside from some point-point intersection which is a.s. not the case. ''' if fix_mag_flag: fix_axis *= -1 return fix_axis, overlap
# Debug import sys sys.path.append("../") from nn.sdnn import DNN from nn.iris import IrisDF from numpy.linalg import norm # http://ufldl.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization if __name__ == "__main__": df = IrisDF() Xs = df.X_train # np.array([[0.68619022, 0.31670318, 0.61229281, 0.232249 ]]) ys = df.y_train # np.array([[1, 0, 0]]) nn = DNN(shape=[4, 6, 3]) calc_grad = nn.objective(nn.get_params(), Xs, ys)[1] num_grad = nn.compute_num_grads(Xs, ys) print(calc_grad, end="\n\n") print(num_grad, end="\n\n") print(norm(calc_grad - num_grad) / (norm(calc_grad + num_grad))) # e-9 or less !
def k_energy(self): return .5*self.mass * norm(self.vel)**2