def test_hermvander(self): # get_inds for 1d x x = np.arange(3) v = herm.hermvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4): coef = [0] * i + [1] assert_almost_equal(v[..., i], herm.hermval(x, coef)) # get_inds for 2d x x = np.array([[1, 2], [3, 4], [5, 6]]) v = herm.hermvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4): coef = [0] * i + [1] assert_almost_equal(v[..., i], herm.hermval(x, coef))
def test_hermvander(self) : # check for 1d x x = np.arange(3) v = herm.hermvander(x, 3) assert_(v.shape == (3, 4)) for i in range(4) : coef = [0]*i + [1] assert_almost_equal(v[..., i], herm.hermval(x, coef)) # check for 2d x x = np.array([[1, 2], [3, 4], [5, 6]]) v = herm.hermvander(x, 3) assert_(v.shape == (3, 2, 4)) for i in range(4) : coef = [0]*i + [1] assert_almost_equal(v[..., i], herm.hermval(x, coef))
def _mario_helper(bands, s, poly_order, opts, callback): # Build the polynomial basis over the bands. P = hermvander(bands, poly_order - 1) f = P.sum(axis=0) if HAS_CVXOPT: solvers.options['show_progress'] = opts['disp'] solvers.options['maxiters'] = opts['maxiter'] solvers.options['abstol'] = opts['tol'] solvers.options['reltol'] = opts['tol'] solvers.options['feastol'] = 1e-100 # For some reason this helps. try: res = solvers.lp(cvx_matrix(f), cvx_matrix(-P), cvx_matrix(-s)) except ValueError as e: # This can be thrown when poly_order is too large for the data size. res = {'status': e.message, 'x': None} return res, P res = linprog(f, A_ub=-P, b_ub=-s, bounds=(-np.inf, np.inf), options=opts, callback=callback) res = {'status': res.message, 'x': res.x if res.success else None} return res, P
def test_100(self): x, w = herm.hermgauss(100) # test orthogonality. Note that the results need to be normalized, # otherwise the huge values that can arise from fast growing # functions like Laguerre can be very confusing. v = herm.hermvander(x, 99) vv = np.dot(v.T * w, v) vd = 1 / np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # get_inds that the integral of 1 is correct tgt = np.sqrt(np.pi) assert_almost_equal(w.sum(), tgt)
def test_100(self): x, w = herm.hermgauss(100) # test orthogonality. Note that the results need to be normalized, # otherwise the huge values that can arise from fast growing # functions like Laguerre can be very confusing. v = herm.hermvander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) vv = vd[:,None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct tgt = np.sqrt(np.pi) assert_almost_equal(w.sum(), tgt)
def _mario_helper(bands, s, poly_order, opts, callback): # Build the polynomial basis over the bands. P = hermvander(bands, poly_order-1) f = P.sum(axis=0) if HAS_CVXOPT: solvers.options['show_progress'] = opts['disp'] solvers.options['maxiters'] = opts['maxiter'] solvers.options['abstol'] = opts['tol'] solvers.options['reltol'] = opts['tol'] solvers.options['feastol'] = 1e-100 # For some reason this helps. try: res = solvers.lp(cvx_matrix(f), cvx_matrix(-P), cvx_matrix(-s)) except ValueError as e: # This can be thrown when poly_order is too large for the data size. res = {'status': e.message, 'x': None} return res, P res = linprog(f, A_ub=-P, b_ub=-s, bounds=(-np.inf,np.inf), options=opts, callback=callback) res = {'status': res.message, 'x': res.x if res.success else None} return res, P
def vandermonde(self, x): V = hermite.hermvander(x, self.shape(False) - 1) return V
def vandermonde(self, x): V = hermite.hermvander(x, self.N - 1) return V
M = 2 eta = 0.1 #eta = lambda i: eta/(i**0.6) nb_iter = 500*10 ## lb,ub = 0,1 freq_sin = 4 # 2.3 f_target = lambda x: np.sin(2*np.pi*freq_sin*x) N_train = 10 X_train = np.linspace(lb,ub,N_train) Y_train = f_target(X_train).reshape(N_train,1) x_horizontal = np.linspace(lb,ub,1000).reshape(1000,1) ## degree of mdl Degree_mdl = N_train-1 ## Hermite Kern_train = hermvander(X_train,Degree_mdl) print(f'Kern_train.shape={Kern_train.shape}') Kern_train = Kern_train.reshape(N_train,Kern_train.shape[1]) ## Kern_train_pinv = np.linalg.pinv( Kern_train ) c_pinv = np.dot(Kern_train_pinv, Y_train) nb_terms = c_pinv.shape[0] ## condition_number_hessian = np.linalg.cond(Kern_train) ## graph = tf.Graph() with graph.as_default(): X = tf.placeholder(tf.float32, [None, nb_terms]) Y = tf.placeholder(tf.float32, [None,1]) w = tf.Variable( tf.zeros([nb_terms,1]) ) #w = tf.Variable( tf.truncated_normal([Degree_mdl,1],mean=0.0,stddev=1.0) )