Beispiel #1
0
    def calibrate( self, cpus=1, maxiter=100, lambdax=0.001, minchange=1.0e-16, minlambdax=1.0e-6, verbose=False,
                  workdir=None, reuse_dirs=False, h=1.e-6):
        """ Calibrate MATK model using Levenberg-Marquardt algorithm based on 
            original code written by Ernesto P. Adorio PhD. 
            (UPDEPP at Clarkfield, Pampanga)

            :param cpus: Number of cpus to use
            :type cpus: int
            :param maxiter: Maximum number of iterations
            :type maxiter: int
            :param lambdax: Initial Marquardt lambda
            :type lambdax: fl64
            :param minchange: Minimum change between successive ChiSquares
            :type minchange: fl64
            :param minlambdax: Minimum lambda value
            :type minlambdax: fl4
            :param verbose: If True, additional information written to screen during calibration
            :type verbose: bool
            :returns: best fit parameters found by routine
            :returns: best Sum of squares.
            :returns: covariance matrix
        """
        from minimizer import Minimizer
        fitter = Minimizer(self)
        fitter.calibrate(cpus=cpus,maxiter=maxiter,lambdax=lambdax,minchange=minchange,
                         minlambdax=minlambdax,verbose=verbose,workdir=workdir,reuse_dirs=reuse_dirs,h=h)
Beispiel #2
0
	def test_three_dim_polynomial(self):
		class ThreeDimPolynomial(Function):
			"""
			2x^2 - y - 2y^2 + x = z
			Minimum at (4x+1==0, 4y-1==0)
			"""

			def value_and_gradient(self, point):
				x, y = point['x'], point['y']
				value = 2*x**2 - y - 2*y**2 + x
				gradient = Counter({'x' : 4*x+1, 'y' :  4*y-1})
				return (value, gradient)

			def value(self, point):
				x, y = point['x'], point['y']
				return 2*x**2 - y + 3*y**3 - 2*y**2 + x

		threedimfunc = ThreeDimPolynomial()

		start = Counter()
		start['x'] = 0
		start['y'] = 0

		min_point = Minimizer.minimize(threedimfunc, start, quiet=True)

		self.assertAlmostEqual(min_point['x'], -0.25, 3)
		self.assertAlmostEqual(min_point['y'], 0.25, 3)
Beispiel #3
0
    def test_three_dim_polynomial(self):
        class ThreeDimPolynomial(Function):
            """
			2x^2 - y - 2y^2 + x = z
			Minimum at (4x+1==0, 4y-1==0)
			"""
            def value_and_gradient(self, point):
                x, y = point['x'], point['y']
                value = 2 * x**2 - y - 2 * y**2 + x
                gradient = Counter({'x': 4 * x + 1, 'y': 4 * y - 1})
                return (value, gradient)

            def value(self, point):
                x, y = point['x'], point['y']
                return 2 * x**2 - y + 3 * y**3 - 2 * y**2 + x

        threedimfunc = ThreeDimPolynomial()

        start = Counter()
        start['x'] = 0
        start['y'] = 0

        min_point = Minimizer.minimize(threedimfunc, start, quiet=True)

        self.assertAlmostEqual(min_point['x'], -0.25, 3)
        self.assertAlmostEqual(min_point['y'], 0.25, 3)
    def train_with_features(self, labeled_features, sigma=None, quiet=False):
        print "Optimizing weights..."
        weight_function = MaxEntWeightFunction(labeled_features, self.labels, self.features)
        weight_function.sigma = sigma

        print "Building initial dictionary..."
        initial_weights = CounterMap()

        print "Training on %d labelled features" % (len(labeled_features))

        print "Minimizing..."
        self.weights = Minimizer.minimize(weight_function, initial_weights, quiet=quiet)
Beispiel #5
0
def feffit(params, datasets, _larch=None, rmax_out=10,
           path_outputs=True, **kws):

    def _resid(params, datasets=None, _larch=None, **kws):
        """ this is the residua function """
        # print '---feffit residual '
        #for i in dir(params):
        # print i, getattr(params, i)
        return concatenate([d.residual() for d in datasets])

    if isinstance(datasets, FeffitDataSet):
        datasets = [datasets]
    for ds in datasets:
        if not isinstance(ds, FeffitDataSet):
            print "feffit needs a list of FeffitDataSets"
            return
    fitkws = dict(datasets=datasets)
    fit = Minimizer(_resid, params, fcn_kws=fitkws, _larch=_larch)
    fit.leastsq()
    # scale uncertainties to sqrt(n_idp - n_varys)
    n_idp = 0
    for ds in datasets:
        n_idp += ds.transform.n_idp
    err_scale = sqrt(n_idp - params.nvarys)
    for name in dir(params):
        p = getattr(params, name)
        if isParameter(p) and p.vary:
            p.stderr *= err_scale

    # here we create outputs:
    for ds in datasets:
        ds.save_ffts(rmax_out=rmax_out, path_outputs=path_outputs)

    out = larch.Group(name='feffit fit results',
                      fit = fit,
                      params = params,
                      datasets = datasets)

    return out
    def train_with_features(self, labeled_features, sigma=None, quiet=False):
        print "Optimizing weights..."
        weight_function = MaxEntWeightFunction(labeled_features, self.labels,
                                               self.features)
        weight_function.sigma = sigma

        print "Building initial dictionary..."
        initial_weights = CounterMap()

        print "Training on %d labelled features" % (len(labeled_features))

        print "Minimizing..."
        self.weights = Minimizer.minimize(weight_function,
                                          initial_weights,
                                          quiet=quiet)
Beispiel #7
0
	def test_two_dim_polynomial(self):
		class TwoDimPolynomial(Function):
			"""
			2x^2 - 10x + 27
			Minimum at 4x-10 = 0: x = 2.5
			"""
			def value_and_gradient(self, point):
				value = 2 * (point['y']-5)**2 + 2 # 2(x-5)^2+2 => 2x^2 - 10x + 27
				gradient = Counter()
				gradient['y'] = 4 * point['y'] - 10
				return (value, gradient)

			def value(self, point):
				return 2 * (point['y']-5)**2 + 2

		Minimizer.max_iterations = 1000

		twodimfunc = TwoDimPolynomial()
		start = Counter()
		start['y'] = 0.0
		min_point = Minimizer.minimize(twodimfunc, start, quiet=True)

		self.assertAlmostEqual(min_point['y'], 2.5, 3)
Beispiel #8
0
    def test_two_dim_polynomial(self):
        class TwoDimPolynomial(Function):
            """
			2x^2 - 10x + 27
			Minimum at 4x-10 = 0: x = 2.5
			"""
            def value_and_gradient(self, point):
                value = 2 * (point['y'] -
                             5)**2 + 2  # 2(x-5)^2+2 => 2x^2 - 10x + 27
                gradient = Counter()
                gradient['y'] = 4 * point['y'] - 10
                return (value, gradient)

            def value(self, point):
                return 2 * (point['y'] - 5)**2 + 2

        Minimizer.max_iterations = 1000

        twodimfunc = TwoDimPolynomial()
        start = Counter()
        start['y'] = 0.0
        min_point = Minimizer.minimize(twodimfunc, start, quiet=True)

        self.assertAlmostEqual(min_point['y'], 2.5, 3)
Beispiel #9
0
 def __init__(self, pathlist, params, _larch=None, **kws):
     Minimizer.__init__(self, self._feffit, params, _larch=_larch, **kws)
Beispiel #10
0
r0 = 1
TL1 = PLS.Variable('TL1', varMin=.5, varMax=1.5)
TL2 = PLS.Variable('TL2', varMin=.5, varMax=1.5)

PLS.set_Track_Length(TL1=TL1, TL2=TL2)
PLS.begin_Lattice()

PLS.add_Bend(np.pi, r0, .45)
PLS.add_Drift(L=test)
PLS.add_Lens(L4, Bp4, rp4)
PLS.add_Drift()
PLS.add_Combiner()
PLS.add_Drift()
PLS.add_Lens(L1, Bp1, rp1)
PLS.add_Drift(L=.05)
PLS.add_Bend(np.pi, r0, .45)
PLS.add_Drift(L=.05)
PLS.add_Lens(L2, Bp2, rp2)
PLS.add_Drift()
PLS.add_Lens(L3, Bp3, rp3)
PLS.add_Drift(L=.05)
PLS.end_Lattice()

minimizer = Minimizer(PLS)
minimizer.find_Global_Min(mut=.75,
                          crossPop=.7,
                          iterations=100,
                          herds=1,
                          popPerDim=20,
                          strategy='best/1')
Beispiel #11
0
    dataloader = load_images(True)

    netG = create_generator()
    netD = create_discriminator()
    optimizerD, optimizerG = create_optimizers(netD, netG)

    criterion = nn.BCELoss()
    fixed_noise = torch.randn(
        64, nz, 1, 1, device=device
    )  # Latent vectors to visualize the progress of the generator

    real_label = 1
    fake_label = 0

    D_losses, G_losses, img_list = Minimizer.train(dataloader, netD, netG,
                                                   optimizerD, optimizerG,
                                                   criterion, num_epochs,
                                                   device, nz, fixed_noise)

    plt.figure(figsize=(10, 5))
    plt.title("Generator and Discriminator Loss During Training")
    plt.plot(G_losses, label="G")
    plt.plot(D_losses, label="D")
    plt.xlabel("iterations")
    plt.ylabel("Loss")
    plt.legend()
    plt.show()

    # %%capture
    fig = plt.figure(figsize=(8, 8))
    plt.axis("off")
    ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)]
Beispiel #12
0
 def __init__(self, pathlist, params, _larch=None, **kws):
     Minimizer.__init__(self, self._feffit, params, _larch=_larch, **kws)
Beispiel #13
0
def autobk(energy, mu, group=None, rbkg=1, nknots=None, e0=None,
           edge_step=None, kmin=0, kmax=None, kweight=1, dk=0,
           win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05,
           pre_edge_kws=None, debug=False, _larch=None, **kws):

    """Use Autobk algorithm to remove XAFS background
    Options are:
      rbkg -- distance out to which the chi(R) is minimized
    """
    if _larch is None:
        raise Warning("cannot calculate autobk spline -- larch broken?")

    if 'kw' in kws:
        kweight = kws['kw']

    energy = remove_dups(energy)

    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    if edge_step is None:
        if _larch.symtable.isgroup(group) and hasattr(group, 'edge_step'):
            edge_step = group.edge_step
    if e0 is None:
        if _larch.symtable.isgroup(group) and hasattr(group, 'e0'):
            e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3, nvict=0, pre1=None,
                       pre2=-50., norm1=100., norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        edge_step, e0 = pre_edge(energy, mu, group=group,
                                 _larch=_larch, **pre_kws)

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_nearest(energy, e0)
    rgrid = np.pi/(kstep*nfft)
    if rbkg < 2*rgrid: rbkg = 2*rgrid
    irbkg = int(1.01 + rbkg/rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    kraw = np.sqrt(ETOK*(energy[ie0:] - e0))
    if kmax is None:
        kmax = max(kraw)
    kout  = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64')

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)

    ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax,
                                     window=win, dx=dk)

    # calc k-value and initial guess for y-values of spline params
    nspline = max(4, min(60, 2*int(rbkg*(kmax-kmin)/np.pi) + 1))
    spl_y  = np.zeros(nspline)
    spl_k  = np.zeros(nspline)
    spl_e  = np.zeros(nspline)
    for i in range(nspline):
        q = kmin + i*(kmax-kmin)/(nspline - 1)
        ik = index_nearest(kraw, q)

        i1 = min(len(kraw)-1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik+ie0]
        spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    ncoefs = len(coefs)
    params = Group()
    for i in range(ncoefs):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i<len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw, mu[ie0:], knots, coefs, order, kout)

    fitkws = dict(ncoefs=len(coefs), chi_std=chi_std,
                  knots=knots, order=order, kraw=kraw, mu=mu[ie0:],
                  irbkg=irbkg, kout=kout, ftwin=ftwin, nfft=nfft)
    # do fit
    fit = Minimizer(__resid, params, fcn_kws=fitkws, _larch=_larch, toler=1.e-4)
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(ncoefs)]

    bkg, chi = spline_eval(kraw, mu[ie0:], knots, coefs, order, kout)
    obkg  = np.zeros(len(mu))
    obkg[:ie0] = mu[:ie0]
    obkg[ie0:] = bkg
    if _larch.symtable.isgroup(group):
        group.bkg  = obkg
        group.chie = (mu-obkg)/edge_step
        group.k    = kout
        group.chi  = chi/edge_step
        if debug:
            group.spline_params = params
            ix_bkg = np.zeros(len(mu))
            ix_bkg[:ie0] = mu[:ie0]
            ix_bkg[ie0:] = initbkg
            group.init_bkg = ix_bkg
            group.init_chi = initchi/edge_step
            group.spline_e = spl_e
            group.spline_y = np.array([coefs[i] for i in range(nspline)])
            group.spline_yinit = spl_y