示例#1
0
    def testdifferential_evolution(self):
        def myrosen(pars):
            return rosen(list(pars.values()))

        self.m = matk.matk(model=myrosen)
        self.m.add_par('p1', min=0, max=2)
        self.m.add_par('p2', min=0, max=2)
        self.m.add_par('p3', min=0, max=2)
        self.m.add_par('p4', min=0, max=2)
        self.m.add_obs('o1', value=0)
        result = self.m.differential_evolution()
        self.assertTrue(
            result.fun < 1.e-8,
            'Objective function of Rosenbrock problem is larger than tolerance of 1.e-8: '
            + str(result.fun))

        def ackley(pars):
            x = list(pars.values())
            arg1 = -0.2 * numpy.sqrt(0.5 * (x[0]**2 + x[1]**2))
            arg2 = 0.5 * (numpy.cos(2. * numpy.pi * x[0]) +
                          numpy.cos(2. * numpy.pi * x[1]))
            return -20. * numpy.exp(arg1) - numpy.exp(arg2) + 20. + numpy.e

        self.m2 = matk.matk(model=ackley)
        self.m2.add_par('p1', min=-5, max=5)
        self.m2.add_par('p2', min=-5, max=5)
        self.m2.add_obs('o1', value=0)
        result2 = self.m2.differential_evolution()
        self.assertTrue(
            result2.fun < 1.e-8,
            'Objective function for Ackley problem is larger than tolerance of 1.e-8: '
            + str(result.fun))
示例#2
0
 def setUp(self):
     # Sampling model
     self.p = matk.matk(model=dbexpl)
     self.p.add_par('par1', min=0, max=1)
     self.p.add_par('par2', min=0, max=0.2)
     self.p.add_par('par3', min=0, max=1)
     self.p.add_par('par4', min=0, max=0.2)
     # Calibration sine model
     # create data to be fitted
     self.x = numpy.linspace(0, 15, 301)
     self.c = matk.matk(model=sine_decay, model_args=(self.x, ))
     self.c.add_par('amp', value=5, min=0.)
     self.c.add_par('decay', value=0.025)
     self.c.add_par('shift',
                    value=-0.1,
                    min=-numpy.pi / 2.,
                    max=numpy.pi / 2.)
     self.c.add_par('omega', value=2.0)
     self.c.forward()
     self.c.obsvalues = self.c.simvalues
     self.c.parvalues = {
         'amp': 10.,
         'decay': 0.1,
         'shift': 0.,
         'omega': 3.0
     }
     # Model for testing jacobian
     self.j = matk.matk(model=fv)
     self.j.add_par('a0', value=0.7)
     self.j.add_par('a1', value=10.)
     self.j.add_par('a2', value=-0.4)
示例#3
0
 def testsobol(self):
     # This test is based on the test problem at: http://salib.readthedocs.io/en/latest/getting-started.html#testing-installation
     m = matk.matk(model=myIshigami)
     m.add_par('x1', min=-3.14159265359, max=3.14159265359)
     m.add_par('x2', min=-3.14159265359, max=3.14159265359)
     m.add_par('x3', min=-3.14159265359, max=3.14159265359)
     m.add_obs('res')
     # Generate samples
     ss = m.saltelli(1000)
     # Run model
     ss.run(verbose=False)
     # Perform analysis
     Si = ss.sobol('res', print_to_console=False)
     # Test results
     self.assertTrue(
         numpy.abs(Si['S1'][0] - 0.306) / 0.306 < 1.e-2,
         'First order sensitivity for parameter x1 should be around 0.306 but is '
         + str(Si['S1'][0]))
     self.assertTrue(
         numpy.abs(Si['S1'][1] - 0.448) / 0.448 < 1.e-2,
         'First order sensitivity for parameter x2 should be around 0.448 but is '
         + str(Si['S1'][1]))
     self.assertTrue(
         numpy.abs(Si['S1'][2]) < 0.01,
         'First order sensitivity for parameter x3 should be a very small number but is '
         + str(Si['S1'][2]))
示例#4
0
def run():
	# Setup MATK model with parameters
	p = matk.matk(model=dbexpl)
	p.add_par('par1',min=0,max=1)
	p.add_par('par2',min=0,max=0.2)
	p.add_par('par3',min=0,max=1)
 	vals = numpy.linspace(0,0.2,21)
 	probs = [1./20.]*21
    
	p.add_par('par4',discrete_vals = (vals,probs))
	
	# Create LHS sample
	s = p.lhs('lhs', siz=500, seed=1000)
	
	# Look at sample parameter histograms, correlations, and panels
	s.samples.hist(ncols=2,title='Parameter Histograms by Counts')
	s.samples.hist(ncols=2,title='Parameter Histograms by Frequency',frequency=True)
	parcor = s.samples.corr(plot=True, title='Parameter Correlations')
	s.samples.panels(title='Parameter Panels')
	
	# Run model with parameter samples
	s.run( cpus=2, outfile='results.dat', logfile='log.dat',verbose=False)
	
	# Look at response histograms, correlations, and panels
	s.responses.hist(ncols=3,title='Model Response Histograms by Counts')
	s.responses.hist(ncols=3,title='Model Response Histograms by Frequency',frequency=True)
	rescor = s.responses.corr(plot=True, title='Model Response Correlations')
	s.responses.panels(title='Response Panels')
	
	# Print and plot parameter/response correlations
	print "\nPearson Correlation Coefficients:"
	pcorr = s.corr(plot=True,title='Pearson Correlation Coefficients') 
	print "\nSpearman Correlation Coefficients:"
	scorr = s.corr(plot=True,type='spearman',title='Spearman Rank Correlation Coefficients') 
	s.panels(figsize=(10,8))
示例#5
0
文件: parstudy.py 项目: dharp/matk
def run():
	# Setup MATK model with parameters
	p = matk.matk(model=dbexpl)
	p.add_par('par1',min=0,max=1)
	p.add_par('par2',min=0,max=0.2)
	p.add_par('par3',min=0,max=1)
	p.add_par('par4',min=0,max=0.2)
	
	# Create full factorial parameter study with 3 values for each parameter
	s = p.parstudy(nvals=[3,3,3,3])
	
    # Print values to make sure you got what you wanted
	print "\nParameter values:"
	print s.samples.values

	# Look at sample parameter histograms
	s.samples.hist(ncols=2,title='Parameter Histograms by Counts')
	s.samples.hist(ncols=2,title='Parameter Histograms by Frequency',frequency=True)
	
	# Run model with parameter samples
	s.run( cpus=2, outfile='results.dat', logfile='log.dat',verbose=False)
	
	# Look at response histograms, correlations, and panels
	s.responses.hist(ncols=2, bins=30, title='Model Response Histograms')
	rescor = s.responses.corr(plot=True, title='Model Response Correlations')
	s.responses.panels(title='Response Panels')
	
	# Print and plot parameter/response correlations
	print "\nPearson Correlation Coefficients:"
	pcorr = s.corr(plot=True,title='Pearson Correlation Coefficients') 
	print "\nSpearman Correlation Coefficients:"
	scorr = s.corr(plot=True,type='spearman',title='Spearman Rank Correlation Coefficients') 
	s.panels(figsize=(10,8))
示例#6
0
def run():
    # Setup MATK model with parameters
    p = matk.matk(model=dbexpl)
    p.add_par('par1',min=0,max=1)
    p.add_par('par2',min=0,max=0.2)
    p.add_par('par3',min=0,max=1)
    p.add_par('par4',min=0,max=0.2)
    
    # Create LHS sample
    s = p.lhs(siz=500, seed=1000)
    
    # Look at sample parameter histograms, correlations, and panels
    s.samples.hist(ncols=2,title='Parameter Histograms')
    parcor = s.samples.corr(plot=True, title='Parameter Correlations')
    s.samples.panels(title='Parameter Panels')
    
    # Run model with parameter samples
    s.run( cpus=2, outfile='results.dat', logfile='log.dat',verbose=False)
    
    # Look at sample response histograms, correlations, and panels
    s.responses.hist(ncols=3,title='Model Response Histograms')
    
    # Copy sampleset and subset to only samples with nan responses
    snan = s.copy()
    ss = snan.subset(numpy.isnan, 'obs1')
    
    # Evaluate parameter combination resulting in nans
    # Note that it is easy to identify that the culprit is par1 with values less than 0.5
    ss.samples.hist(ncols=2,title='NAN Parameter Histograms')
    parcor = ss.samples.corr(plot=True, title='NAN Parameter Correlations')
    ss.samples.panels(title='NAN Parameter Panels')
示例#7
0
 def testrbd_fast(self):
     # This test is based on running the SALib example at https://github.com/SALib/SALib/blob/master/examples/rbd_fast/rbd_fast.py
     m = matk.matk(model=myIshigami)
     m.add_par('x1', min=-3.14159265359, max=3.14159265359)
     m.add_par('x2', min=-3.14159265359, max=3.14159265359)
     m.add_par('x3', min=-3.14159265359, max=3.14159265359)
     m.add_obs('res')
     # Generate samples
     ss = m.lhs(siz=1000)
     # Run model
     ss.run(verbose=False)
     # Perform analysis
     Si = ss.rbd_fast('res', print_to_console=False)
     # Test results
     self.assertTrue(
         numpy.abs(Si['S1'][0] - 0.32) / 0.32 < 5.e-1,
         'First order sensitivity for parameter x1 should be around 0.306 but is '
         + str(Si['S1'][0]))
     self.assertTrue(
         numpy.abs(Si['S1'][1] - 0.448) / 0.448 < 5.e-1,
         'First order sensitivity for parameter x2 should be around 0.448 but is '
         + str(Si['S1'][1]))
     self.assertTrue(
         numpy.abs(Si['S1'][2]) < 0.1,
         'First order sensitivity for parameter x3 should be a very small number but is '
         + str(Si['S1'][2]))
示例#8
0
 def testemcee2(self):
     self.m = matk.matk(model=fmcmc)
     # Add parameters with 'true' parameters
     self.m.add_par('a', min=0, max=10, value=2)
     self.m.add_par('c', min=0, max=30, value=5)
     # Run model using 'true' parameters
     self.m.forward()
     # Create 'true' observations with zero mean, 0.5 st. dev. gaussian noise added
     self.m.obsvalues = self.m.simvalues + numpy.random.normal(
         0, 0.5, len(self.m.simvalues))
     # Run MCMC with 100000 samples burning (discarding) the first 10000
     #lnprob = matk.logposteriorwithvariance(self.m)
     #print lnprob([2., 5., 10.])
     #print lnprob([2., 8., 10.])
     sampler = self.m.emcee(lnprob=matk.logposteriorwithvariance(self.m),
                            nwalkers=10,
                            nsamples=10000,
                            burnin=1000)
     samples = sampler.chain.reshape((-1, len(self.m.pars)))
     #print samples.shape
     mean_a, mean_c = numpy.mean(samples, 0)
     self.assertTrue(
         abs(mean_a - 2.) < 0.2,
         'Mean of parameter a is not close to 2: mean(a) = ' + str(mean_a))
     self.assertTrue(
         abs(mean_c - 5.) < 1.,
         'Mean of parameter c is not close to 5: mean(c) = ' + str(mean_c))
示例#9
0
    def testminimize(self):
        def fun(pars):
            o = (pars['x1'] - 1)**2 + (pars['x2'] - 2.5)**2
            return -o

        cons = ({
            'type': 'ineq',
            'fun': lambda x: x[0] - 2 * x[1] + 2
        }, {
            'type': 'ineq',
            'fun': lambda x: -x[0] - 2 * x[1] + 6
        }, {
            'type': 'ineq',
            'fun': lambda x: -x[0] + 2 * x[1] + 2
        })
        self.m = matk.matk(model=fun)
        self.m.add_par('x1', min=0, value=2)
        self.m.add_par('x2', min=0, value=0)
        self.m.add_obs('obs1', value=0)
        r = self.m.minimize(constraints=cons,
                            options={'eps': 1.4901161193847656e-08})
        self.assertTrue(
            abs(r['x'][0] - 1.4) < 1.e-8,
            'Calibrated parameter 1 should be 1.4 but is ' + str(r['x'][0]))
        self.assertTrue(
            abs(r['x'][1] - 1.7) < 1.e-8,
            'Calibrated parameter 1 should be 1.7 but is ' + str(r['x'][1]))
示例#10
0
 def testmcmc(self):
     try:
         import pymc
     except:
         print("\nPymc module not installed")
         print("Skipping mcmc unittest")
         return
     self.m = matk.matk(model=fmcmc)
     # Add parameters with 'true' parameters
     self.m.add_par('a', min=0, max=10, value=2)
     self.m.add_par('c', min=0, max=30, value=5)
     # Run model using 'true' parameters
     self.m.forward()
     # Create 'true' observations with zero mean, 0.5 st. dev. gaussian noise added
     self.m.obsvalues = self.m.simvalues + numpy.random.normal(
         0, 1, len(self.m.simvalues))
     # Run MCMC with 100000 samples burning (discarding) the first 10000
     M = self.m.MCMC(nruns=10000, burn=1000, verbose=-1)
     mean_a = M.trace('a').stats()['mean']
     mean_c = M.trace('c').stats()['mean']
     mean_sig = M.trace('error_std').stats()['mean']
     self.assertTrue(
         abs(mean_a - 2.) < 0.2,
         'Mean of parameter a is not close to 2: mean(a) = ' + str(mean_a))
     self.assertTrue(
         abs(mean_c - 5.) < 1.,
         'Mean of parameter c is not close to 5: mean(c) = ' + str(mean_c))
     self.assertTrue(
         abs(mean_sig - 1) < 1.,
         'Mean of model error std. dev. is not close to 0.1: mean(sig) = ' +
         str(mean_sig))
示例#11
0
def run():
    # create data to be fitted
    x = np.linspace(0, 15, 301)
    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
            np.random.normal(size=len(x), scale=0.2) )

    # Create MATK object
    p = matk.matk(model=sine_decay, model_args=(x,data,))

    # Create parameters
    p.add_par('amp', value=10, min=0.)
    p.add_par('decay', value=0.1)
    p.add_par('shift', value=0.0, min=-np.pi/2., max=np.pi/2.)
    p.add_par('omega', value=3.0)

    # Create observation names and set observation values
    for i in range(len(data)):
        p.add_obs('obs'+str(i+1), value=data[i])

    # Look at initial fit
    p.forward()
    plt.plot(x,data, 'k+')
    plt.plot(x,p.simvalues, 'r')
    plt.title("Before Calibration")
    plt.show(block=True)

    # Calibrate parameters to data, results are printed to screen
    p.calibrate(verbose=True,h=1.e-8)

    # Look at calibrated fit
    plt.plot(x,data, 'k+')
    plt.plot(x,p.simvalues, 'r')
    plt.title("After Calibration")
    plt.show()
示例#12
0
 def testdiscreteparstudy(self):
     # Ensure that discrete parameter parstudies are correct
     p = matk.matk()
     vals = list(range(5))
     probs = (.1, .2, .3, .2, .2)
     p.add_par('par1', discrete_vals=(vals, probs))
     ps = p.parstudy(1)
     self.assertEqual(ps.recarray['par1'][0], 2,
                      'Discrete parstudy of size 1 is incorrect')
     ps = p.parstudy(2)
     self.assertEqual(ps.recarray['par1'][0], 0.,
                      'Discrete parstudy of size 2 is incorrect')
     self.assertEqual(ps.recarray['par1'][1], 4.,
                      'Discrete parstudy of size 2 is incorrect')
     ps = p.parstudy(3)
     self.assertEqual(ps.recarray['par1'][0], 0.,
                      'Discrete parstudy of size 3 is incorrect')
     self.assertEqual(ps.recarray['par1'][1], 2.,
                      'Discrete parstudy of size 3 is incorrect')
     self.assertEqual(ps.recarray['par1'][2], 4.,
                      'Discrete parstudy of size 3 is incorrect')
     ps = p.parstudy(5)
     self.assertEqual(ps.recarray['par1'][0], 0.,
                      'Discrete parstudy of size 3 is incorrect')
     self.assertEqual(ps.recarray['par1'][1], 1.,
                      'Discrete parstudy of size 3 is incorrect')
     self.assertEqual(ps.recarray['par1'][2], 2.,
                      'Discrete parstudy of size 3 is incorrect')
     self.assertEqual(ps.recarray['par1'][3], 3.,
                      'Discrete parstudy of size 3 is incorrect')
     self.assertEqual(ps.recarray['par1'][4], 4.,
                      'Discrete parstudy of size 3 is incorrect')
示例#13
0
def run():
    # Setup MATK model with parameters
    p = matk.matk(model=dbexpl)
    p.add_par('par1',min=0,max=1)
    p.add_par('par2',min=0,max=0.2)
    p.add_par('par3',min=0,max=1)
    p.add_par('par4',min=0,max=0.2)
    
    # Create LHS sample
    s = p.lhs(siz=500, seed=1000)
    
    # Look at sample parameter histograms, correlations, and panels
    s.samples.hist(ncols=2,title='Parameter Histograms')
    parcor = s.samples.corr(plot=True, title='Parameter Correlations')
    s.samples.panels(title='Parameter Panels')
    
    # Run model with parameter samples
    s.run( cpus=2, outfile='results.dat', logfile='log.dat',verbose=False)
    
    # Look at sample response histograms, correlations, and panels
    s.responses.hist(ncols=3,title='Model Response Histograms')
    
    # Copy sampleset and subset to only samples with nan responses
    snan = s.copy()
    snan.subset(numpy.isnan, obs='obs1')
    
    # Evaluate parameter combination resulting in nans
    # Note that it is easy to identify that the culprit is par1 with values less than 0.5
    snan.samples.hist(ncols=2,title='NAN Parameter Histograms')
    parcor = snan.samples.corr(plot=True, title='NAN Parameter Correlations')
    snan.samples.panels(title='NAN Parameter Panels')
示例#14
0
 def testemcee(self):
     self.m = matk.matk(model=femcee)
     self.m.add_par("k", value=.5, min=-10, max=10)
     self.m.obsvalues = numpy.array([1., 2., 3.])
     samples = self.m.emcee(nwalkers=100, nsamples=1000, burnin=100)
     mean = numpy.mean(samples)
     std = numpy.std(samples)
     self.assertTrue( abs(mean - 1.) < 0.1, 'Mean of parameter a is not close to 1: mean(samples) = ' + str(mean) )
     self.assertTrue( abs(std - 0.267) < 0.0267, 'Standard deviation is not close to 0.267: std(samples) = ' + str(std) )
示例#15
0
文件: jacobian.py 项目: dharp/matk
def run():
    p = matk.matk(model=fv)
    p.add_par('a0', value=0.7)
    p.add_par('a1', value=10.)
    p.add_par('a2', value=-0.4)

    J = p.Jac()

    print np.dot(J.T,J)
示例#16
0
def run():

    nms, pars = matk.pest_io.read_par_files('*.par')

    p = matk.matk()
    for n in nms:
        p.add_par(n)

    s = p.create_sampleset(pars)

    s.savetxt('sampleset.matk')
示例#17
0
def run():

    nms, pars = matk.pest_io.read_par_files( '*.par' )

    p = matk.matk()
    for n in nms:
        p.add_par( n )
    
    s = p.create_sampleset( pars )

    s.savetxt('sampleset.matk')
示例#18
0
 def testdiscretesample(self):
     # Create 100 discrete samples and make sure they adhere to assigned probabilities
     p = matk.matk()
     vals = list(range(5))
     probs = (.1, .2, .3, .2, .2)
     p.add_par('par1', discrete_vals=(vals, probs))
     ss = p.lhs(siz=1000000)
     for i, prob in enumerate(probs):
         self.assertTrue(
             numpy.abs(
                 len(numpy.where(ss.recarray['par1'] == i)[0]) / 1000000. -
                 prob) / prob < 0.01, 'Discrete probability is incorrect')
示例#19
0
文件: calibrate.py 项目: 5l1v3r1/matk
def run():
    p = matk.matk(model=fv)
    p.add_par('a0', value=0.7, min=-2000., max=2000.)
    #p.add_par('a0', value=0.7)
    #p.add_par('a1', value=10., min=-2000., max=2000.)
    p.add_par('a1', value=10.)
    #p.add_par('a2', value=-0.4, min=-20000., max=20000.)
    p.add_par('a2', value=-0.4)
    #p.forward()
    p.obsvalues = [5.308,7.24,9.638,12.866,17.069,23.192,31.443,38.558,50.156,62.948,75.995,91.972]

    p.calibrate(cpus=6,verbose=True)
示例#20
0
 def setUp(self):
     # Sampling model
     self.p = matk.matk(model=dbexpl)
     self.p.add_par('par1',min=0,max=1)
     self.p.add_par('par2',min=0,max=0.2)
     self.p.add_par('par3',min=0,max=1)
     self.p.add_par('par4',min=0,max=0.2)
     # Calibration sine model
     # create data to be fitted
     self.x = numpy.linspace(0, 15, 301)
     self.c = matk.matk(model=sine_decay, model_args=(self.x,))
     self.c.add_par('amp', value=5, min=0.)
     self.c.add_par('decay', value=0.025)
     self.c.add_par('shift', value=-0.1, min=-numpy.pi/2., max=numpy.pi/2.)
     self.c.add_par('omega', value=2.0)
     self.c.forward()
     self.c.obsvalues = self.c.simvalues
     self.c.parvalues = {'amp':10.,'decay':0.1,'shift':0.,'omega':3.0}
     # Model for testing jacobian
     self.j = matk.matk(model=fv)
     self.j.add_par('a0', value=0.7)
     self.j.add_par('a1', value=10.)
     self.j.add_par('a2', value=-0.4)
示例#21
0
文件: ext_sim.py 项目: dharp/matk
def run():
    # Setup MATK model with parameters
    p = matk.matk(model=fehm)
    p.add_par('por0',min=0.1,max=0.3)

    # Create LHS sample
    s = p.parstudy(nvals=[3])

    # Run model with parameter samples
    s.run( ncpus=2, workdir_base='workdir', outfile='results.dat', logfile='log.dat',verbose=False,reuse_dirs=True)

    # Look at response histograms, correlations, and panels
    print 'Parameter Response'
    for pa,re in zip(s.samples.values, s.responses.values): print pa[0], re[0]
    s.responses.hist(ncols=2,title='Model Response Histograms')
示例#22
0
 def testemcee(self):
     self.m = matk.matk(model=femcee)
     self.m.add_par("k", value=.5, min=-10, max=10)
     self.m.obsvalues = numpy.array([1., 2., 3.])
     sampler = self.m.emcee(nwalkers=100, nsamples=1000, burnin=100)
     samples = sampler.chain.reshape((-1, len(self.m.pars)))
     mean = numpy.mean(samples)
     std = numpy.std(samples)
     self.assertTrue(
         abs(mean - 1.) < 0.1,
         'Mean of parameter a is not close to 1: mean(samples) = ' +
         str(mean))
     self.assertTrue(
         abs(std - 0.267) < 0.0267,
         'Standard deviation is not close to 0.267: std(samples) = ' +
         str(std))
示例#23
0
def run():
    # create data to be fitted
    x = np.linspace(0, 15, 301)
    np.random.seed(1000)
    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
            np.random.normal(size=len(x), scale=0.2) )

    # Create MATK object
    p = matk.matk(model=calibrate, model_args=(x,data,))

    # Create parameters
    p.add_par('amp', value=10, min=9., max=11.)
    p.add_par('decay', value=0.1, min=0.09, max=0.11)
    p.add_par('shift', value=0.0, min=-np.pi/6., max=np.pi/6.)
    p.add_par('omega', value=3.0, min=2.5, max=3.5)

    ## Create observation names and set observation values
    #for i in range(len(data)):
    #    p.add_obs('obs'+str(i+1), value=data[i])

    s = p.lhs(siz=30,seed=40)
    s.run(cpus=5)

    best_id = np.argmin(s.responses.values[:,-1])
    print "Lowest objective function value found:"
    print s.responses.values[best_id][-1]
    print "Best parameters found:"
    print s.responses.values[best_id][:-1]
    
    # Look at initial fit
    simvalues = sine_decay(dict(zip(p.parnames,s.samples.values[best_id])),x,data).values()
    f, (ax1,ax2) = plt.subplots(2,sharex=True)
    ax1.plot(x,data, 'k+')
    ax1.plot(x,simvalues, 'r')
    ax1.set_ylabel("Model Response")
    ax1.set_title("Before Calibration")

    # Look at calibrated fit
    simvalues = sine_decay(dict(zip(p.parnames,s.responses.values[best_id][:-1])),x,data).values()
    ax2.plot(x,data, 'k+')
    ax2.plot(x,simvalues, 'r')
    ax2.set_ylabel("Model Response")
    ax2.set_xlabel("x")
    ax2.set_title("After Calibration")
    plt.show(block=True)
示例#24
0
def run():
    # create data to be fitted
    x = np.linspace(0, 15, 301)
    np.random.seed(1000)
    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x * x * 0.025) +
            np.random.normal(size=len(x), scale=0.2))

    # Create MATK object
    p = matk.matk(model=sine_decay, model_args=(
        x,
        data,
    ))

    # Create parameters
    p.add_par('amp', value=10, min=0.)
    p.add_par('decay', value=0.1)
    p.add_par('shift', value=0.0, min=-np.pi / 2., max=np.pi / 2.)
    p.add_par('omega', value=3.0)

    # Create observation names and set observation values
    for i in range(len(data)):
        p.add_obs('obs' + str(i + 1), value=data[i])

    # Look at initial fit
    p.forward()
    f, (ax1, ax2) = plt.subplots(2, sharex=True)
    ax1.plot(x, data, 'k+')
    ax1.plot(x, p.simvalues, 'r')
    ax1.set_ylabel("Model Response")
    ax1.set_title("Before Calibration")

    # Calibrate parameters to data, results are printed to screen
    res, pars, evals = p.lmfit(cpus=2,
                               verbose=True,
                               save_evals=True,
                               difference_type='central')
    evals.savetxt('evals.txt', sse=True)

    # Look at calibrated fit
    ax2.plot(x, data, 'k+')
    ax2.plot(x, p.simvalues, 'r')
    ax2.set_ylabel("Model Response")
    ax2.set_xlabel("x")
    ax2.set_title("After Calibration")
    plt.show(block=True)
示例#25
0
文件: mcmc.py 项目: dharp/matk
def run():
    # Create matk object
    prob = matk.matk(model=f)

    # Add parameters with 'true' parameters
    prob.add_par('a', min=0, max=10, value=2)
    prob.add_par('c', min=0, max=30, value=5)

    # Run model using 'true' parameters
    prob.forward()

    # Create 'true' observations with zero mean, 0.5 st. dev. gaussian noise added
    prob.obsvalues = prob.simvalues + random.normal(0,0.1,len(prob.simvalues))

    # Run MCMC with 100000 samples burning (discarding) the first 10000
    M = prob.MCMC(nruns=100000,burn=10000)

    # Plot results, PNG files will be created in current directory
    prob.MCMCplot(M)
示例#26
0
def calibrate(params,x,data):
    pc = matk.matk(model=sine_decay,model_args=(x,data,))

    # Create parameters
    pc.add_par('amp', value=10, min=0.)
    pc.add_par('decay', value=0.1)
    pc.add_par('shift', value=0.0, min=-np.pi/2., max=np.pi/2.)
    pc.add_par('omega', value=3.0)

    # Create observation names and set observation values
    for i in range(len(data)):
        pc.add_obs('obs'+str(i+1), value=data[i])

    # Set initial values
    pc.parvalues = params.values()

    # Calibrate
    pc.lmfit(report_fit=False)
    return pc.parvalues.tolist() + [pc.ssr]
示例#27
0
文件: mcmc.py 项目: 5l1v3r1/matk
def run():
    # Create matk object
    prob = matk.matk(model=f)

    # Add parameters with 'true' parameters
    prob.add_par('a', min=0, max=10, value=2)
    prob.add_par('c', min=0, max=30, value=5)

    # Run model using 'true' parameters
    prob.forward()

    # Create 'true' observations with zero mean, 0.5 st. dev. gaussian noise added
    prob.obsvalues = prob.simvalues + random.normal(0, 0.1, len(
        prob.simvalues))

    # Run MCMC with 100000 samples burning (discarding) the first 10000
    M = prob.MCMC(nruns=100000, burn=10000)

    # Plot results, PNG files will be created in current directory
    prob.MCMCplot(M)
示例#28
0
def run():
    # Setup MATK model with parameters
    p = matk.matk(model=dbexpl)
    p.add_par('par1', min=0, max=1)
    p.add_par('par2', min=0, max=0.2)
    p.add_par('par3', min=0, max=1)
    vals = numpy.linspace(0, 0.2, 21)
    probs = [1. / 20.] * 21

    p.add_par('par4', discrete_vals=(vals, probs))

    # Create LHS sample
    s = p.lhs('lhs', siz=500, seed=1000)

    # Look at sample parameter histograms, correlations, and panels
    s.samples.hist(ncols=2, title='Parameter Histograms by Counts')
    s.samples.hist(ncols=2,
                   title='Parameter Histograms by Frequency',
                   frequency=True)
    parcor = s.samples.corr(plot=True, title='Parameter Correlations')
    s.samples.panels(title='Parameter Panels')

    # Run model with parameter samples
    s.run(cpus=2, outfile='results.dat', logfile='log.dat', verbose=False)

    # Look at response histograms, correlations, and panels
    s.responses.hist(ncols=3, title='Model Response Histograms by Counts')
    s.responses.hist(ncols=3,
                     title='Model Response Histograms by Frequency',
                     frequency=True)
    rescor = s.responses.corr(plot=True, title='Model Response Correlations')
    s.responses.panels(title='Response Panels')

    # Print and plot parameter/response correlations
    print "\nPearson Correlation Coefficients:"
    pcorr = s.corr(plot=True, title='Pearson Correlation Coefficients')
    print "\nSpearman Correlation Coefficients:"
    scorr = s.corr(plot=True,
                   type='spearman',
                   title='Spearman Rank Correlation Coefficients')
    s.panels(figsize=(10, 8))
示例#29
0
文件: ext_sim.py 项目: 5l1v3r1/matk
def run():
    # Setup MATK model with parameters
    p = matk.matk(model=fehm)
    p.add_par('por0', min=0.1, max=0.3)

    # Create LHS sample
    s = p.parstudy(nvals=[3])

    # Run model with parameter samples
    s.run(ncpus=2,
          workdir_base='workdir',
          outfile='results.dat',
          logfile='log.dat',
          verbose=False,
          reuse_dirs=True)

    # Look at response histograms, correlations, and panels
    print 'Parameter Response'
    for pa, re in zip(s.samples.values, s.responses.values):
        print pa[0], re[0]
    s.responses.hist(ncols=2, title='Model Response Histograms')
示例#30
0
def run():
    # create data to be fitted
    x = np.linspace(0, 15, 301)
    np.random.seed(1000)
    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
            np.random.normal(size=len(x), scale=0.2) )

    # Create MATK object
    p = matk.matk(model=sine_decay, model_args=(x,data,))

    # Create parameters
    p.add_par('amp', value=10, min=0.)
    p.add_par('decay', value=0.1)
    p.add_par('shift', value=0.0, min=-np.pi/2., max=np.pi/2.)
    p.add_par('omega', value=3.0)

    # Create observation names and set observation values
    for i in range(len(data)):
        p.add_obs('obs'+str(i+1), value=data[i])

    # Look at initial fit
    p.forward()
    f, (ax1,ax2) = plt.subplots(2,sharex=True)
    ax1.plot(x,data, 'k+')
    ax1.plot(x,p.simvalues, 'r')
    ax1.set_ylabel("Model Response")
    ax1.set_title("Before Calibration")

    # Calibrate parameters to data, results are printed to screen
    res,pars,evals = p.lmfit(cpus=2,verbose=True,save_evals=True)
    evals.savetxt('evals.txt',sse=True)

    # Look at calibrated fit
    ax2.plot(x,data, 'k+')
    ax2.plot(x,p.simvalues, 'r')
    ax2.set_ylabel("Model Response")
    ax2.set_xlabel("x")
    ax2.set_title("After Calibration")
    plt.show(block=True)
示例#31
0
 def testemcee2(self):
     self.m = matk.matk(model=fmcmc)
     # Add parameters with 'true' parameters
     self.m.add_par('a', min=0, max=10, value=2)
     self.m.add_par('c', min=0, max=30, value=5)
     self.m.add_par('var', min=0, max=1)
     # Run model using 'true' parameters
     self.m.forward()
     # Create 'true' observations with zero mean, 0.5 st. dev. gaussian noise added
     self.m.obsvalues = self.m.simvalues + numpy.random.normal(0,0.5,len(self.m.simvalues))
     # Run MCMC with 100000 samples burning (discarding) the first 10000
     pos0 = [[2+numpy.random.normal(0, 1),5+numpy.random.normal(0, 1),0.5+numpy.random.normal(0, 0.1)] for i in range(10)]
     #lnprob = matk.logposteriorwithvariance(self.m)
     #print lnprob([2., 5., 10.])
     #print lnprob([2., 8., 10.])
     samples = self.m.emcee(lnprob=matk.logposteriorwithvariance(self.m), nwalkers=10, nsamples=10000, burnin=1000, pos0=pos0)
     #print samples.shape
     mean_a, mean_c, mean_sig = numpy.mean(samples, 0)
     mean_sig = numpy.sqrt(mean_sig)
     self.assertTrue( abs(mean_a - 2.) < 0.2, 'Mean of parameter a is not close to 2: mean(a) = ' + str(mean_a) )
     self.assertTrue( abs(mean_c - 5.) < 1., 'Mean of parameter c is not close to 5: mean(c) = ' + str(mean_c) )
     self.assertTrue( abs(mean_sig - 0.5) < 0.2, 'Mean of model error std. dev. is not close to 0.1: mean(sig) = ' + str(mean_sig) )
示例#32
0
def run():
    # Setup MATK model with parameters
    p = matk.matk(model=dbexpl)
    p.add_par('par1', min=0, max=1)
    p.add_par('par2', min=0, max=0.2)
    p.add_par('par3', min=0, max=1)
    p.add_par('par4', min=0, max=0.2)

    # Create LHS sample
    s = p.lhs('lhs', siz=500, seed=1000)

    # Run model with parameter samples
    s.run(cpus=2, outfile='results.dat', logfile='log.dat', verbose=False)

    # Save stats for all parameters and responses, use default quantiles
    s.savestats('sampleset.stats')
    # Save stats just for parameters
    s.samples.savestats('parameters.stats')
    # Save stats just for responses
    s.responses.savestats('responses.stats')
    # Specify quantiles
    s.savestats('sampleset_qs.stats', q=[25, 50, 75])
示例#33
0
def run():
    # Setup MATK model with parameters
    p = matk.matk(model=dbexpl)
    p.add_par('par1',min=0,max=1)
    p.add_par('par2',min=0,max=0.2)
    p.add_par('par3',min=0,max=1)
    p.add_par('par4',min=0,max=0.2)

    # Create LHS sample
    s = p.lhs('lhs', siz=500, seed=1000)

    # Run model with parameter samples
    s.run( cpus=2, outfile='results.dat', logfile='log.dat',verbose=False)

    # Save stats for all parameters and responses, use default quantiles
    s.savestats('sampleset.stats')
    # Save stats just for parameters
    s.samples.savestats('parameters.stats')
    # Save stats just for responses
    s.responses.savestats('responses.stats')
    # Specify quantiles
    s.savestats('sampleset_qs.stats',q=[25,50,75])
示例#34
0
 def testmcmc(self):
     try:
         import pymc
     except:
         print "\nPymc module not installed"
         print "Skipping mcmc unittest"
         return
     self.m = matk.matk(model=fmcmc)
     # Add parameters with 'true' parameters
     self.m.add_par('a', min=0, max=10, value=2)
     self.m.add_par('c', min=0, max=30, value=5)
     # Run model using 'true' parameters
     self.m.forward()
     # Create 'true' observations with zero mean, 0.5 st. dev. gaussian noise added
     self.m.obsvalues = self.m.simvalues + numpy.random.normal(0,1,len(self.m.simvalues))
     # Run MCMC with 100000 samples burning (discarding) the first 10000
     M = self.m.MCMC(nruns=10000,burn=1000, verbose=-1)
     mean_a = M.trace('a').stats()['mean']
     mean_c = M.trace('c').stats()['mean']
     mean_sig = M.trace('error_std').stats()['mean']
     self.assertTrue( abs(mean_a - 2.) < 0.2, 'Mean of parameter a is not close to 2: mean(a) = ' + str(mean_a) )
     self.assertTrue( abs(mean_c - 5.) < 1., 'Mean of parameter c is not close to 5: mean(c) = ' + str(mean_c) )
     self.assertTrue( abs(mean_sig - 1) < 1., 'Mean of model error std. dev. is not close to 0.1: mean(sig) = ' + str(mean_sig) )
示例#35
0
def run_extern(params):
    pest_io.tpl_write(params, '../sine.tpl', 'sine.py')
    ierr = call('python sine.py', shell=True)
    out = pickle.load(open('sine.pkl', 'rb'))
    return out


# create data to be fitted
x = np.linspace(0, 15, 301)
np.random.seed(1000)
data = (5. * np.sin(2 * x - 0.1) * np.exp(-x * x * 0.025) +
        np.random.normal(size=len(x), scale=0.2))

# Create MATK object
p = matk(model=run_extern)

# Create parameters
p.add_par('amp', value=10, min=0.)
p.add_par('decay', value=0.1)
p.add_par('shift', value=0.0, min=-np.pi / 2., max=np.pi / 2.)
p.add_par('omega', value=3.0)

# Create observation names and set observation values
for i in range(len(data)):
    p.add_obs('obs' + str(i + 1), value=data[i])

# Look at initial fit
init_vals = p.forward(workdir='initial', reuse_dirs=True)
f, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(x, data, 'k+')
示例#36
0
文件: sample.py 项目: amanzi/ats-dev
    # x,z = 7.17946807, 4.65764252
    # index for this location is 1733, see below how to find this
    # Create output dictionary that matches MATK observations
    out = {}
    out['Sl'] = f[u'saturation_liquid.cell.0/'+k[-1]][1733]
    out['T'] = f[u'temperature.cell.0/'+k[-1]][1733]

    # Return simulated values of interest
    return out

# Create host dictionary so that cpu sets can be explicitly defined for ats runs
# The host (dictionary key) 'dum' will be ignored in this case
# The list of strings will be used as the cpu sets for the runs
hosts = {'dum':['0,1,2,3','4,5,6,7','8,9,10,11']}
# Create MATK object specifying the 'model' function above as the model
p = matk(model=model)

# Add some parameters
# Mineral soil porosity
p.add_par('poro_m',min=0.586, max=0.606, value=0.596)
# Peat porosity
p.add_par('poro_p', min=0.866, max=0.886, value=0.876)
# Mineral soil permeability
p.add_par('perm_m',min=-13.5, max=-12.5, value=-13)
# Peat permeability
p.add_par('perm_p', min=-12.5, max=-11.5, value=-12)

# Create observations
# Saturation
p.add_obs('Sl',value=0.5)
# Temperature
示例#37
0
from matk import matk


# Create function
def fun(pars):
    o = (pars['x1'] - 1)**2 + (pars['x2'] - 2.5)**2
    return -o


# Set inequality constraints
cons = ({
    'type': 'ineq',
    'fun': lambda x: x[0] - 2 * x[1] + 2
}, {
    'type': 'ineq',
    'fun': lambda x: -x[0] - 2 * x[1] + 6
}, {
    'type': 'ineq',
    'fun': lambda x: -x[0] + 2 * x[1] + 2
})

p = matk(model=fun)

p.add_par('x1', min=0, value=2)
p.add_par('x2', min=0, value=0)
p.add_obs('obs1', value=0)
r = p.minimize(constraints=cons)

print "x1 should be 1.4: ", r['x'][0]
print "x2 should be 1.7: ", r['x'][1]
示例#38
0
import numpy as np
import matplotlib.pyplot as plt


def fv(a):
    a0 = a['a0']
    a1 = a['a1']
    a2 = a['a2']
    X = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.])
    out = a0 / (1. + a1 * np.exp(X * a2))
    return out
    #obsnames = ['obs'+str(i) for i in range(1,len(out)+1)]
    #return dict(zip(obsnames,out))


p = matk.matk(model=fv)
p.add_par('a0', value=0.7)
p.add_par('a1', value=10.)
p.add_par('a2', value=-0.4)

J = p.Jac()

print np.dot(J.T, J)

m = matrix(x=J, row_names=p.obsnames, col_names=p.parnames)
parcov = cov(np.linalg.inv(np.dot(J.T, J)), names=p.parnames)
obscov = cov(np.linalg.inv(np.dot(J, J.T)), names=p.obsnames)

la = pyemu.errvar(jco=m, parcov=parcov, obscov=obscov)

s = la.qhalfx.s
示例#39
0
from matk import matk
from scipy.optimize import rosen
import numpy as np

def myrosen(pars):
        return rosen(pars.values())

p = matk(model=myrosen)

p.add_par('p1',min=0,max=2)
p.add_par('p2',min=0,max=2)
p.add_par('p3',min=0,max=2)
p.add_par('p4',min=0,max=2)
p.add_obs('o1',value=0)

result = p.differential_evolution()

print "Rosenbrock problem:"
print "Parameters should be all ones: ", result.x
print "Objective function: ", result.fun

def ackley(pars):
    x = pars.values()
    arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
    arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
    return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e

p2 = matk(model=ackley)

p2.add_par('p1',min=-5,max=5)
p2.add_par('p2',min=-5,max=5)
示例#40
0
#hostnames.remove(host) # Remove host to use as designated master if desired

# Create dictionary of lists of processor ids to use keyed by hostname
hosts = {}
for h in hostnames:
    hosts[h] = range(0,16,6) # create lists of processor numbers for each host
print 'host dictionary: ', hosts

# create data to be fitted
x = np.linspace(0, 15, 301)
np.random.seed(1000)
data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
        np.random.normal(size=len(x), scale=0.2) )

# Create MATK object
p = matk(model=run_extern)

# Create parameters
p.add_par('amp', value=10, min=0.)
p.add_par('decay', value=0.1)
p.add_par('shift', value=0.0, min=-np.pi/2., max=np.pi/2.)
p.add_par('omega', value=3.0)

# Create observation names and set observation values
for i in range(len(data)):
    p.add_obs('obs'+str(i+1), value=data[i])

# Look at initial fit
init_vals = p.forward(workdir='initial',hostname=hosts.keys()[0],processor=0,reuse_dirs=True)
plt.plot(x,data, 'k+')
plt.plot(x,p.sim_values, 'r')
示例#41
0
from matk import matk
from scipy.optimize import rosen
import numpy as np


def myrosen(pars):
    return rosen(pars.values())


p = matk(model=myrosen)

p.add_par('p1', min=0, max=2)
p.add_par('p2', min=0, max=2)
p.add_par('p3', min=0, max=2)
p.add_par('p4', min=0, max=2)
p.add_obs('o1', value=0)

result = p.differential_evolution()

print "Rosenbrock problem:"
print "Parameters should be all ones: ", result.x
print "Objective function: ", result.fun


def ackley(pars):
    x = pars.values()
    arg1 = -0.2 * np.sqrt(0.5 * (x[0]**2 + x[1]**2))
    arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
    return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e

示例#42
0
    decay = params['decay']

    model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)

    obsnames = ['obs'+str(i) for i in range(1,len(data)+1)]
    return dict(zip(obsnames,model))


# create data to be fitted
x = np.linspace(0, 15, 301)
np.random.seed(1000)
data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
        np.random.normal(size=len(x), scale=0.2) )

# Create MATK object
p = matk.matk(model=sine_decay, model_args=(x,data,))

# Create parameters
p.add_par('amp', value=10, min=5., max=15.)
p.add_par('decay', value=0.1, min=0, max=10)
p.add_par('shift', value=0.0, min=-np.pi/2., max=np.pi/2.)
p.add_par('omega', value=3.0, min=0, max=10)

# Create observation names and set observation values
for i in range(len(data)):
    p.add_obs('obs'+str(i+1), value=data[i])

# Look at initial fit
p.forward()
f, (ax1,ax2) = plt.subplots(2,sharex=True)
ax1.plot(x,data, 'k+')
示例#43
0
from matk import matk

# Create function
def fun(pars):
    o = (pars['x1'] - 1)**2 + (pars['x2'] - 2.5)**2
    return -o

# Set inequality constraints
cons = ({'type': 'ineq', 'fun': lambda x:  x[0] - 2 * x[1] + 2},
    {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
    {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})

p = matk(model=fun)

p.add_par('x1',min=0,value=2)
p.add_par('x2',min=0,value=0)
p.add_obs('obs1',value=0)
r = p.minimize(constraints=cons)


print "x1 should be 1.4: ", r['x'][0]
print "x2 should be 1.7: ", r['x'][1]

示例#44
0
    	atsxml.run(m, nproc=1, mpiexec='mpirun', stdout='stdout.out', stderr='stdout.err', cpuset=processor)
	return True


# Create cpusets, 4 cpus to a set
# This is necessary so that the ATS runs get spread evenly over processors and doesn't stack up on processors
# On our servers, the host key ('dum' below) isn't necessary since we run all jobs on the same server.
# On clusters, you may be sending different runs to different hosts (computers). 
# In that case, the dictionary keys are important for indicating the host.
# The dictionary values (lists of integers) identify which processors to put each ATS run.
njobs = 32
nparams = 5
hosts = {'dum': map(str, range(njobs))}

# Instantiate MATK object specifying the "model" function defined above as the MATK "model"
p = matk(model=model)

# Add parameters that you want to sample over and their ranges
p.add_par('bac', min=0.01, max=0.22, value=0.1)
p.add_par('bct',min=0.02, max=0.4, value=0.14)
p.add_par('Kac',min=1.03e-3, max=2.8e-3, value=1.92e-3)
p.add_par('Kct',min=2.52e-6, max=3.51e-5, value = 5e-6)
p.add_par('Kmn',min=2.09e-6, max=1.25e-5, value = 5e-6)

# Create matrix of parameter combinations
ac = [0.01,0.1]
ct = [0.32,0.5]

Kac = [1.05e-11,1.57e-10]
Kct = [1.70e-12,3.90e-12]
Kmn = [1.05e-15,7.11e-14]
示例#45
0
from mat_handler import matrix,cov
import numpy as np
import matplotlib.pyplot as plt

def fv(a):
    a0 = a['a0']
    a1 = a['a1']
    a2 = a['a2']
    X = np.array([1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,11.,12.])
    out = a0 / (1. + a1 * np.exp( X * a2))
    return out
    #obsnames = ['obs'+str(i) for i in range(1,len(out)+1)]
    #return dict(zip(obsnames,out))


p = matk.matk(model=fv)
p.add_par('a0', value=0.7)
p.add_par('a1', value=10.)
p.add_par('a2', value=-0.4)

J = p.Jac()

print np.dot(J.T,J)

m = matrix(x=J,row_names=p.obsnames,col_names=p.parnames)
parcov = cov(np.linalg.inv(np.dot(J.T,J)),names=p.parnames)
obscov = cov(np.linalg.inv(np.dot(J,J.T)),names=p.obsnames)

la = pyemu.errvar(jco=m,parcov=parcov,obscov=obscov)

s = la.qhalfx.s