Esempio n. 1
0
    def __init__(self, emunup, Imax=15, Jmax=21, Kmax=3):
        self.emunup = emunup
        self.zern = Zernike(self.emunup[0][0])
        self.Mu = self.emunup.shape[0]
        self.Nu = self.emunup.shape[1]
        self.Pmax = self.emunup.shape[2]
        self.Imax = Imax
        self.Jmax = Jmax
        self.Kmax = Kmax

        # get relevant matrices
        self.beta, self.alphaJ = self.make_matrices(self.Imax, self.Jmax)

        self.ndim = self.Imax * (self.Jmax + self.Kmax * self.Nu) + self.Mu
        p0 = 1e-2 * np.random.randn(self.ndim)
        p0[:3] = 1
        self.kmu, self.bij, self.cnuik = self.p_to_matrices(p0)
        self.loss_history = [np.inf]  # number, not object
        self.kmus = [self.kmu.copy()]
        self.bijs = [self.bij.copy()]
        self.cnuiks = [self.cnuik.copy()]
        self.logs = [[np.inf] * 3]
        self.ehat = fast_zernike.fast_calc(self.kmu, self.bij, self.cnuik, self.beta, self.alphaJ)
        self.ehats = [self.ehat.copy()]
Esempio n. 2
0
    def fit(self, xtol=1e-8, ftol=1e-6, maxiter=10000, step_size=None,
            maxfun=None, verbose=False, learning_rate_decay=0,
            **kwargs):
        """
        ala scipy.optimize:
        xtol : float, optional
            Relative error in xopt acceptable for convergence.
        ftol : number, optional
            Relative error in func(xopt) acceptable for convergence.
        maxiter : int, optional
            Maximum number of iterations to perform.
        maxfun : number, optional
            Maximum number of function evaluations to make.
            TODO: Currently not implimented

        learning_rate_decay : if step_size is specified, after every update, multiply step_size by (1 - learning_rate_decay)
        """
        # TODO: incorporate several different parameter update modes
        """
        if update == 'sgd':
          dx = -learning_rate * grads[p]
        elif update == 'momentum':
          if not p in self.step_cache:
            self.step_cache[p] = np.zeros(grads[p].shape)
          dx = np.zeros_like(grads[p]) # you can remove this after
          dx = momentum * self.step_cache[p] - learning_rate * grads[p]
          self.step_cache[p] = dx
        elif update == 'rmsprop':
          decay_rate = 0.99 # you could also make this an option
          if not p in self.step_cache:
            self.step_cache[p] = np.zeros(grads[p].shape)
          dx = np.zeros_like(grads[p]) # you can remove this after
          self.step_cache[p] = self.step_cache[p] * decay_rate + (1.0 - decay_rate) * grads[p] ** 2
          dx = -(learning_rate * grads[p]) / np.sqrt(self.step_cache[p] + 1e-8)
        """
        for it in xrange(maxiter):
            if verbose: print(it, self.loss_history[-1])
            lnlike_old = self.loss_history[-1]

            self.ehat = fast_zernike.fast_calc(self.kmu, self.bij, self.cnuik, self.beta, self.alphaJ)
            self.dlogldb = fast_zernike.fast_calc_dlogldb(self.kmu, self.bij, self.cnuik, self.emunup, self.ehat, self.beta, self.alphaJ)
            self.dlogldc = fast_zernike.fast_calc_dlogldc(self.kmu, self.bij, self.cnuik, self.emunup, self.ehat, self.beta, self.alphaJ)
            self.dlogldk = fast_zernike.fast_calc_dlogldk(self.kmu, self.bij, self.cnuik, self.emunup, self.ehat, self.beta, self.alphaJ)
            lnlike = np.mean(np.square(self.emunup - self.ehat))
            if lnlike - lnlike_old > 1e-2 * lnlike_old:
                print(it, lnlike)
                return
            # apply derivatives
            self.kmus.append(self.kmu.copy())
            self.bijs.append(self.bij.copy())
            self.cnuiks.append(self.cnuik.copy())
            self.ehats.append(self.ehat.copy())
            self.logs.append([self.dlogldb.copy(), self.dlogldc.copy(), self.dlogldk.copy()])
            self.kmu -= step_size * self.dlogldk
            self.cnuik -= step_size * self.dlogldc
            self.bij -= step_size * self.dlogldb

            self.loss_history.append(lnlike)


            if type(step_size) != type(None):
                step_size *= 1 - learning_rate_decay

            # check changes
            if np.abs((lnlike - lnlike_old) / lnlike) < ftol:
                print ('ftol reached')
                return
            # if np.all(asdf < xtol):
            #     print ('xtol reached')
            #     return
        print('maxiter reached')
        return
Esempio n. 3
0
# manually elevate important bij terms
bij[:3] = 0
bij[3, 3] = 1.
bij[4, 4] = 1.
bij[5, 5] = -1.
bij[6, 1] = 1.
bij[7, 2] = -1.
bij[8, 8] = 1
bij[9, 9] = 1
bij[10, 10] = 1
# set unphysical cnuik to zero
cnuik[:, :3] = 0
cnuik[:, 8:] = 0

beta, alphaJ = GradientDissenter.make_matrices(Imax, Jmax)
emunup = fast_zernike.fast_calc(kmu, bij, cnuik, beta, alphaJ)

x = np.linspace(-1, 1, 101)
y = np.linspace(-1, 1, 101)
x, y = np.meshgrid(x, y)
x = x.flatten()
y = y.flatten()
r = np.sqrt(x ** 2 + y ** 2)
conds = r < 1
x = x[conds]
y = y[conds]

gd = GradientDissenter(emunup)
# for i in range(3):
#     fig, ax = gd.plot(x, y, ep=emunup[i][0])
#     ax.set_title('e{0}'.format(i))