def f_solver(): tmp = numx.array((1., 2.), ) sys = multiminimize.gsl_multimin_function(my_f, tmp, 2) solver = multiminimize.nmsimplex(sys, 2) start_point = numx.array((5., 7.), ) initial_steps = numx.array((0.1, 0.1), ) solver.set(start_point, initial_steps) print "Using solver ", solver.name() for i in range(100): status = solver.iterate() if status != errno.GSL_SUCCESS: break ssval = solver.size() rval = multiminimize.test_size (ssval, 1e-2); if rval == 0: print "converged to minimum at" fval = solver.getf() x = solver.getx() t = (i, x[0], x[1], fval, ssval) print "iter %3d x % 10.3e % 10.3e f() = %-10.3f ssize = %.3f" % t if rval == 0: break else: raise ValueError, "Number of Iterations exceeded!"
def run_fsolver(): params = numx.array((1., 10.), numx.Float) #solver = multiroots.hybrids(mysys, 2) solver = multiroot.dnewton(2) tmp = numx.array((-10., -5.), numx.Float) solver.set(rosenbrock_f, tmp, params) #solver = multiroots.broyden(mysys, 2) #solver = multiroots.hybrid(mysys, 2) print "# Testing solver ", solver.name(), solver.type() print "# %5s %9s %9s %9s %10s" % ("iter", "x[0]", "x[1]", "f[0]", "f[1]") for iter in range(100): status = solver.iterate() x = solver.root() dx = solver.dx() f = solver.f() status = multiroot.test_residual(f, 1e-7) if status == 0: print "# Converged :" print " %5d % .7f % .7f % .7f % .7f" %(iter, x[0], x[1], f[0], f[1]) if status == 0: break else: raise ValueError, "Number of Iterations exceeded!"
def fdf_solver(): params = numx.array((1., 2.), ) sys = multiminimize.gsl_multimin_function_fdf(my_f, my_df, my_fdf, params, 2) #solver = multiminimize.conjugate_pr(sys, 2) #solver = multiminimize.conjugate_fr(sys, 2) solver = multiminimize.vector_bfgs(sys, 2) #solver = multiminimize.steepest_descent(sys, 2) start = numx.array((5., 7.), ) solver.set(start, 0.01, 1e-4) print "Using solver ", solver.name() print "%5s %9s %9s %9s %9s %9s" % ("iter", "x", "y", "f", "dx", "dy") for iter in range(200): status = solver.iterate() gradient = solver.gradient() x = solver.getx() f = solver.getf() status = multiminimize.test_gradient(gradient, 1e-3) if status == errno.GSL_SUCCESS: print "Converged " print "%5d % .7f % .7f % .7f % .7f % .7f" %(iter, x[0], x[1], f, gradient[0], gradient[1]) if status == errno.GSL_SUCCESS: break else: raise ValueError, "Number of Iterations exceeded!"
def run_fdfsolver(): params = numx.array((1., 10.),) mysys = multiroots.gsl_multiroot_function_fdf(rosenbrock_f, rosenbrock_df, rosenbrock_fdf, params, 2) #solver = multiroots.newton(mysys, 2) solver = multiroots.gnewton(mysys, 2) #solver = multiroots.hybridj(mysys, 2) #solver = multiroots.hybridsj(mysys, 2) tmp = numx.array((-10., -5.), ) solver.set(tmp) print "# Testing solver ", solver.name() print "# %5s %9s %9s %9s %10s" % ("iter", "x[0]", "x[1]", "f[0]", "f[1]") for iter in range(100): status = solver.iterate() r = solver.root() x = solver.getx() f = solver.getf() status = multiroots.test_residual(f, 1e-7) if status == errno.GSL_SUCCESS: print "# Converged :" print " %5d % .7f % .7f % .7f % .7f" %(iter, x[0], x[1], f[0], f[1]) if status == errno.GSL_SUCCESS: break else: raise ValueError, "Number of Iterations exceeded!"
def test_wmean(self): self.failIf(wmean(numx.array([1,1,1]), numx.array([-1.,-3.,1.])) != -1.0) self.failIf(wmean(numx.array([1,1,1]),[1,2,3]) != 2) data = numx.array([1.,2.,3.,4.,5.,6.,7.,8.,9.,10.]) weight = numx.ones(data.shape) self.failIf(wmean(weight,data) != 5.5) self.failIf(wmean(weight[::2],data[::2]) != 5.0) self.failIf(wmean(weight[::-1],data[::-1]) != 5.5) self.failIf(wmean(weight[::-2],data[::-2]) != 6.0)
def test_mean(self): self.failIf(mean(numx.array([-1.,-3.,1.])) != -1.0) self.failIf(mean([1,2,3]) != 2) # test stride != 1 # these are only valid tests when using NumPy, # since otherwise the sequence is trnsformed to an # contigous array anyway... data = numx.array([1.,2.,3.,4.,5.,6.,7.,8.,9.,10.]) self.failIf(mean(data) != 5.5) self.failIf(mean(data[::2]) != 5.0) self.failIf(mean(data[::-1]) != 5.5) self.failIf(mean(data[::-2]) != 6.0)
def run_fdfsolver(): A = 1. lambda_ = .1 b = .5 n = 40 p = 3 t = numx.arange(n); y = testfunc(t, A, lambda_, b) sigma = numx.ones(n) * 0.1 data = numx.array((t,y,sigma), numx.Float) #mysys = multifit_nlin.gsl_multifit_function_fdf(exp_f, exp_df, exp_fdf, # data, n,p) pygsl.set_debug_level(0) solver = multifit_nlin.lmsder(n, p) pygsl.set_debug_level(0) #solver = multifit_nlin.lmder(mysys, n, p) x = numx.array((1.0, 0.0, 0.0)) pygsl.set_debug_level(0) solver.set(exp_f, exp_df, exp_fdf, x, data) pygsl.set_debug_level(0) print "# Testing solver ", solver.name() print "# %5s %9s %9s %9s %10s" % ("iter", "A", "lambda", "b", "|f(x)|") for iter in range(20): status = solver.iterate() x = solver.x() dx = solver.dx() f = solver.f() J = solver.J() #tdx = multifit_nlin.gradient(J, f) #status = multifit_nlin.test_delta(dx, x, 1e-8, 1e-8) status = solver.test_delta(1e-8, 1e-8) fn = numx.sqrt(numx.sum(f*f)) if status == 0: print "# Convereged :" if status == 0: break print " %5d % .7f % .7f % .7f % .7f" %(iter, x[0], x[1], x[2], fn) else: raise ValueError, "Number of Iterations exceeded!" J = solver.J() covar = multifit_nlin.covar(solver.J(), 0.0) print "# A = % .5f +/- % .5f" % (x[0], covar[0,0]) print "# lambda = % .5f +/- % .5f" % (x[1], covar[1,1]) print "# b = % .5f +/- % .5f" % (x[2], covar[2,2])
def run_fdfsolver(): a = 1.0 b = 0.0 c = -5.0 mysys = roots.gsl_function_fdf(quadratic, quadratic_deriv, quadratic_fdf, numx.array((a,b,c))) solver = roots.newton(mysys) #solver = roots.secant(mysys) #solver = roots.steffenson(mysys) x = 5.0 solver.set(x) r_expected = numx.sqrt(5.0) print "# Using solver ", solver.name() print "# %5s %9s %10s %9s" % ("iter", "root", "err", "err(est)") ok = 1 for iter in range(10): status = solver.iterate() x0 = x x = solver.root() status = roots.test_delta(x, x0, 0.0, 1e-3) r = solver.root() if status == errno.GSL_SUCCESS: print "# Convereged :" print "%5d %.7f % .6f % .6f" %(iter, r, r -r_expected, x - x0) if status == errno.GSL_SUCCESS: break else: raise ValueError, "Exeeded maximum number of iterations!"
def test_evolve_bsimp(): dimension = 2 step = odeiv.step_bsimp(dimension, func,jac, mu) control = odeiv.control_y_new(step, 1e-6, 1e-6) evolve = odeiv.evolve(step, control, dimension) step1 = odeiv.step_rkf45(dimension, func,jac,mu) control1 = odeiv.control_y_new(step1, 1e-6, 1e-6) evolve1 = odeiv.evolve(step1, control1, dimension) h = 1 t = 0.0 t1 = 100.0 y = numx.array((1.0, 0.0)) while t<t1: t, h, y = evolve.apply(t, t1, h, y) sys.stdout = file h = 1 t = 0.0 t1 = 100.0 y = (1.0, 0.0) while t<t1: t, h, y = evolve1.apply(t, t1, h, y)
def run(): def f2(x,y): return numx.sin(x) / x sys2 = integrate.gsl_function(f2, None) def f1(x,y): return 1 / x sys1 = integrate.gsl_function(f1, None) def f3(x,y): return 1 / -x sys3 = integrate.gsl_function(f3, None) w = integrate.workspace(1000000) cyclew = integrate.workspace(1000000) table1 = integrate.qawo_table(1, 100, integrate.SINE, 100) table2 = integrate.qawo_table(-1, 100, integrate.SINE, 100) # Borders and singualrity for gagp pts = numx.array((-numx.pi, 0, numx.pi)) flag, result1, error = integrate.qagp(sys2, pts, 1e-8, 1e-8, 100000, w) flag, result2, error = integrate.qawf(sys1, numx.pi, 1e-8, 100, w, cyclew, table1) flag, result3, error = integrate.qawf(sys3, numx.pi, 1e-8, 100, w, cyclew, table2) print "Result of integration is :", result1 + result2 + result3
def generate_data(): r = pygsl.rng.mt19937() a = numx.arange(20) / 10.# + .1 y0 = numx.exp(a) sigma = 0.1 * y0 dy = numx.array(map(r.gaussian, sigma)) return a, y0+dy, sigma
def test_qagp(self): def f1(x,y): return x / x sys = integrate.gsl_function(f1, None) pts = Numeric.array((-1, 0, 1)) flag, result, error = integrate.qagp(sys, pts, 1e-8, 1e-8, 100000, self.w) assert(Numeric.absolute(result - 2.) < 1e-3) assert(error<1e-8)
def setUp(self): t = Numeric.arange(self._getn()); y = testfunc(t, self.A, self.lambda_, self.b) sigma = Numeric.ones(self._getn()) * 0.1 self.data = Numeric.array((t,y,sigma), Float) self.sys = multifit_nlin.gsl_multifit_function_fdf(exp_f, exp_df, exp_fdf, self.data, self._getn(), self._getp())
def setUp(self): self.dim = 100 self.param = 2 self.a = 1000.1 self.b = 100 self.x = Numeric.arange(self.dim) x = self.x self.y = self.a + self.b * self.x self.w = Numeric.ones((self.dim,)) self.ws = multifit.linear_workspace(self.dim, self.param) self.X = Numeric.transpose(Numeric.array((Numeric.ones(self.dim,), x)))
def _run(self, solver): start_point = Numeric.array((5., 7.), Float) initial_steps = Numeric.array((0.1, 0.1), Float) solver.set(start_point, initial_steps) for i in range(100): status = solver.iterate() if status: break ssval = solver.size() rval = multiminimize.test_size (ssval, 1e-2); #if rval == 0: # print "converged to minimum at" fval = solver.getf() x = solver.getx() t = (i, x[0], x[1], fval, ssval) #print "iter %3d x % 10.3e % 10.3e f() = %-10.3f ssize = %.3f" % t if rval == 0: break else: raise ValueError, "Number of Iterations exceeded!"
def exp_df(x, params): A = x[0] lambda_ = x[1] b = x[2] t = params[0] yi = params[1] sigma = params[2] e = exp(-lambda_ * t) e_s = e/sigma df = numx.array((e_s, -t * A * e_s, 1/sigma)) df = numx.transpose(df) return df
def test_evolve(): dimension = 2 step = odeiv.step_bsimp(dimension, func, jac, mu) control = odeiv.control_y_new(step, 1e-6, 1e-6) evolve = odeiv.evolve(step, control, dimension) h = 1 t = 0.0 t1 = 1.0 y = numx.array([1.0, 0.0]) print step.name(), step.order() while t<t1: t, h, y = evolve.apply(t, t1, h, y) dimension = 2 steps = ( odeiv.step_rk2, odeiv.step_rk4, odeiv.step_rkf45, odeiv.step_rkck, odeiv.step_rk8pd, odeiv.step_rk2imp, odeiv.step_rk4imp, odeiv.step_gear1, odeiv.step_gear2) for s in steps: step = s(dimension, func, None, mu) print step.name(), step.order() control = odeiv.control_y_new(step, 1e-6, 1e-6) print control.name() evolve = odeiv.evolve(step, control, dimension) h = 1 t = 0.0 t1 = 1.0 y = (1.0, 0.0) while t<t1/2.0: t, h, y = evolve.apply(t, t1, h, y) y, yerr, dydt = step.apply(t, h, y, None) h, msg = control.hadjust(y, yerr, dydt, h) assert(msg == odeiv.HADJ_DEC or msg == odeiv.HADJ_INC or msg == odeiv.HADJ_NIL ) step.reset() evolve.reset() while t<t1: t, h, y = evolve.apply(t, t1, h, y) y = y
def run(): r = rng.rng() bw = bspline(4, nbreak) # Data to be fitted x = 15. / (N-1) * numx.arange(N) y = numx.cos(x) * numx.exp(0.1 * x) sigma = .1 w = 1.0 / sigma**2 * numx.ones(N) dy = r.gaussian(sigma, N) y = y + dy # use uniform breakpoints on [0, 15] bw.knots_uniform(0.0, 15.0) X = numx.zeros((N, ncoeffs)) for i in range(N): B = bw.eval(x[i]) X[i,:] = B # do the fit c, cov, chisq = multifit.wlinear(X, w, y, multifit.linear_workspace(N, ncoeffs)) # output the smoothed curve res_y = [] res_y_err = [] for i in range(N): B = bw.eval(x[i]) yi, yi_err = multifit.linear_est(B, c, cov) res_y.append(yi) res_y_err.append(yi_err) #print yi, yerr res_y = numx.array(res_y) res_y_err = numx.array(res_y_err) return (x, y,), (x, res_y), res_y_err
def _run(self, solver): tmp = Numeric.array((-10., -5.), Float) solver.set(tmp) for iter in range(100): status = solver.iterate() r = solver.root() x = solver.getx() f = solver.getf() status = multiroots.test_residual(f, 1e-7) if status == 0: break else: raise ValueError, "Number of Iterations exceeded!" assert(Numeric.absolute(x[0] - 1)<1e-6) assert(Numeric.absolute(x[1] - 1)<1e-6) assert(Numeric.absolute(f[0])<1e-6) assert(Numeric.absolute(f[1])<1e-6)
def _run(self, solver): #x = Numeric.array((1.0, .4, .1)) x = Numeric.array((1.0, 0.0, 0.0)) solver.set(x) #g.title('Start') #g.plot(Gnuplot.Data(self.data[0], self.data[1]), # Gnuplot.Data(self.data[0], testfunc(self.data[0]), # with = 'line'), # ) #raw_input() #print "Testing solver ", solver.name() #print "%5s %9s %9s %9s %10s" % ("iter", "A", "lambda", "b", "|f(x)|") for iter in range(20): status = solver.iterate() assert(status == 0 or status == -2) x = solver.getx() dx = solver.getdx() f = solver.getf() J = solver.getJ() tdx = multifit_nlin.gradient(J, f) status = multifit_nlin.test_delta(dx, x, 1e-8, 1e-8) #status = multifit_nlin.test_gradient(dx, 1e-4) fn = Numeric.sqrt(Numeric.sum(f*f)) #g.title('Iteration') if status == 0: break #print "%5d % .7f % .7f % .7f % .7f" %(iter, x[0], x[1], x[2], fn) #g.plot(Gnuplot.Data(self.data[0], self.data[1]), # Gnuplot.Data(self.data[0], # testfunc(self.data[0], x[0], x[1], x[2]), # with = 'line', title='iteration ' + str(iter)), # ) #raw_input() else: raise ValueError, "Number of Iterations exceeded!" #print "Convereged :" #print "%5d % .7f % .7f %.7f % .7f" %(iter, x[0], x[1], x[2], fn) assert(Numeric.absolute(x[0] - self.A) < _eps) assert(Numeric.absolute(x[1] - self.lambda_) < _eps) assert(Numeric.absolute(x[2] - self.b) < _eps) #J = solver.getJ() #print "shape = ", J.shape covar = multifit_nlin.covar(solver.getJ(), 0.0)
def test_qawf(self): def f2(x,y): return Numeric.sin(x) / x sys2 = integrate.gsl_function(f2, None) def f1(x,y): return 1 / x sys1 = integrate.gsl_function(f1, None) def f3(x,y): return 1 / -x sys3 = integrate.gsl_function(f3, None) pts = Numeric.array((-Numeric.pi, 0, Numeric.pi)) flag, result1, error = integrate.qagp(sys2, pts, 1e-8, 1e-8, 100000, self.w) table1 = integrate.qawo_table(1, 100, integrate.SINE, 100) cyclew = integrate.workspace(1000000) flag, result2, error = integrate.qawf(sys1, Numeric.pi, 1e-8, 100, self.w, cyclew, table1) table2 = integrate.qawo_table(-1, 100, integrate.SINE, 100) flag, result3, error = integrate.qawf(sys3, Numeric.pi, 1e-8, 100, self.w, cyclew, table2) assert(Numeric.absolute(result1+result2+result3 - Numeric.pi) < 1e-8)
def _run(self, solver): tmp = Numeric.array((5., 7.), Float) solver.set(tmp, 0.01, 1e-4) #print "Testing solver ", solver.name() #print "%5s %9s %9s %9s %9s %9s" % ("iter", "x", "y", "f", "dx", "dy") for iter in range(200): status = solver.iterate() gradient = solver.gradient() x = solver.getx() f = solver.getf() status = multiminimize.test_gradient(gradient, 1e-3) if status == 0: break #print "%5d % .7f % .7f % .7f % .7f % .7f" %(iter, x[0], x[1], f, gradient[0], gradient[1]) else: raise ValueError, "Number of Iterations exceeded!" assert(Numeric.absolute(x[0] - 1)<1e-3) assert(Numeric.absolute(x[1] - 2)<1e-3) assert(Numeric.absolute(f - 30)<1e-3) assert(Numeric.absolute(gradient[0])<1e-3) assert(Numeric.absolute(gradient[1])<1e-3)
def fdf_solver(): #solver = multiminimize.conjugate_pr(sys, 2) #solver = multiminimize.conjugate_fr(sys, 2) #solver = multiminimize.vector_bfgs(sys, 2) #solver = multiminimize.vector_bfgs2(sys, 2) solver = multiminimize.steepest_descent(sys, 2) start = numx.array((5., 7.), ) solver.set(start, 0.01, 1e-4) print( "Using solver ", solver.name() ) print( "%5s %9s %9s %9s %9s %9s" % ("iter", "x", "y", "f", "dx", "dy")) for iter in range(200): status = solver.iterate() gradient = solver.gradient() x = solver.getx() f = solver.getf() status = multiminimize.test_gradient(gradient, 1e-3) if status == errno.GSL_SUCCESS: print( "Converged ") print( "%5d % .7f % .7f % .7f % .7f % .7f" %(iter, x[0], x[1], f, gradient[0], gradient[1])) if status == errno.GSL_SUCCESS: break else: raise ValueError("Number of Iterations exceeded!")
def test_variance_long(self): self.failIf(long.variance(numx.array([-1,-3,1])) != 4.0) return
def test_variance_m_long(self): self.failIf(long.variance_m(numx.array([-1,-3,1]), long.mean(numx.array([-1,-3,1]))) != 4) return
def test_variance_m(self): self.failIf(variance_m(numx.array([-1.,-3.,1.]), mean(numx.array([-1.,-3.,1.]))) != 4.0) return
def test_mean_uchar(self): self.failIf(uchar.mean(numx.array([1,2,3], UInt8)) != 2) self.failIf(uchar.mean([1,2,3]) != 2)
def test_min_long(self): self.failIf(long.min(numx.array([1,2,3])) != 1)
def test_sd_m(self): self.failIf(sd_m(numx.array([-1.,-3.,1.]), mean(numx.array([-1.,-3.,1.]))) != 2.0)
def setUp(self): a = 1.0 b = 0.0 c = -5.0 self.sys = roots.gsl_function_fdf(quadratic, quadratic_deriv, quadratic_fdf, Numeric.array((a,b,c)))
def test_max_index(self): self.failIf(long.max_index(numx.array([1,2,3])) != 2)
def test_max_long(self): self.failIf(long.max(numx.array([1,2,3])) != 3)
def test_mean_float(self): self.failIf(float.mean(numx.array([-1.,-3.,1.], 'f')) != -1) self.failIf(float.mean([1.,2.,3.]) != 2)
def test_mean_char(self): tmp = (ord("1") + ord("2") + ord("3")) / 3 self.failIf(char.mean(numx.array([1,2,3], Int8)) != 2) self.failIf(char.mean([1,2,3]) != 2)
def setUp(self): self._assertIsNotNone(self._t_type) tmp = numx.array((1., 2.), ) self._dim = 2 self._sys = multiminimize.gsl_multimin_function(my_f, tmp, self._dim) self._solver = self._t_type(self._sys, self._dim)
def test_sd_long(self): self.failIf(long.sd(numx.array([-1,-3,1])) != 2)
def test_sd(self): self.failIf(sd(numx.array([-1.,-3.,1.])) != 2.0)
def setUp(self): tmp = Numeric.array((1., 2.), Float) self.sys = multiminimize.gsl_multimin_function(my_f, tmp, self._getsize())
City("Phoenix", -112.07, 33.54,), City("Albuquerque", -106.62, 35.12,), City("Clovis", -103.20, 34.41,), City("Durango", -107.87, 37.29,), City("Dallas", -96.77, 32.79,), City("Tesuque", -105.92, 35.77,), City("Grants", -107.84, 35.15,), City("Los Alamos", -106.28, 35.89,), City("Las Cruces", -106.76, 32.34,), City("Cortez", -108.58, 37.35,), City("Gallup", -108.74, 35.52,)) n_cities = len(cities) distance_matrix = numx.zeros((n_cities, n_cities)) cities_vec = numx.array(map(lambda x: x.GetData(), cities)) for i in range(len(cities)): cities[i].SetNumber(i) earth_radius = 6375.000 #n_cities = 6 # distance between two cities def city_distance(c): la = c[:,0] lo = c[:,1] sla = sin(la*pi/180) cla = cos(la*pi/180) slo = sin(lo*pi/180) clo = sin(lo*pi/180)
def test_sd_m_long(self): self.failIf(long.sd_m(numx.array([-1,-3,1]), long.mean(numx.array([-1,-3,1]))) != 2)
def setUp(self): tmp = Numeric.array((1., 10.), Float) self.sys = multiroots.gsl_multiroot_function_fdf( rosenbrock_f, rosenbrock_df, rosenbrock_fdf, tmp, self._getsize())
def generate_data(): r = pygsl.rng.mt19937() a = numx.arange(20) / 10. # + .1 y0 = numx.exp(a) sigma = 0.1 * y0 tmp = tuple(map(r.gaussian, sigma)) dy = numx.array(tmp) tmp = y0 + dy return a, tmp, sigma if __name__ == '__main__': x, y, sigma = generate_data() data = (x, y, sigma) data = numx.array(data).transpose() for tmp in data: print("%e %e %e" % tuple(tmp)) #c, cov , chisq = calculate(x, y, sigma) #import Gnuplot #g = Gnuplot.Gnuplot() #xref = numx.arange(100) / 50. #yref = c[0] + c[1] * xref + c[2] * xref **2 #t1 = Gnuplot.Data(x,y, with='points') #t2 = Gnuplot.Data(xref, yref, with='line') #g.plot(t1,t2) #print "Press return !" #raw_input()
-106.76, 32.34, ), City( "Cortez", -108.58, 37.35, ), City( "Gallup", -108.74, 35.52, )) n_cities = len(cities) distance_matrix = numx.zeros((n_cities, n_cities)) cities_vec = numx.array(map(lambda x: x.GetData(), cities)) for i in range(len(cities)): cities[i].SetNumber(i) earth_radius = 6375.000 #n_cities = 6 # distance between two cities def city_distance(c): la = c[:, 0] lo = c[:, 1] sla = sin(la * pi / 180) cla = cos(la * pi / 180) slo = sin(lo * pi / 180)
def test_mean_long(self): self.failIf(long.mean(numx.array([-1,-3,1])) != -1) self.failIf(long.mean([1,2,3]) != 2)
def test_mean_short(self): self.failIf(short.mean([1,2,3]) != 2) self.failIf(short.mean(numx.array([-1,-3,1], Int16)) != -1)