Exemplo n.º 1
0
def test_gd_rosen():
    obj = Rosenbrock()
    opt = GradientDescent(obj.pars, obj.fprime, step_rate=0.01, momentum=.9)
    for i, info in enumerate(opt):
        if i > 5000:
            break
    assert ((1 - obj.pars) < 0.01).all(), 'did not find solution'
Exemplo n.º 2
0
def get_optimizer(wrt,
                  fprime,
                  optimize_method,
                  step_rate,
                  momentum=0.0,
                  decay=0.9,
                  **kwargs):
    '''Get an optimizer.'''
    if optimize_method == 'rmsprop':
        optimizer = RmsProp(wrt=wrt,
                            fprime=fprime,
                            step_rate=step_rate,
                            decay=decay,
                            momentum=momentum)
    elif optimize_method == 'adam':
        optimizer = Adam(wrt=wrt, fprime=fprime, step_rate=step_rate)
    elif optimize_method == 'gd':
        optimizer = GradientDescent(wrt=wrt,
                                    fprime=fprime,
                                    step_rate=step_rate,
                                    momentum=momentum)
    else:
        raise ValueError('Can not load predefined optimization method %s' %
                         optimize_method)
    return optimizer
Exemplo n.º 3
0
def test_gd_quadratic():
    obj = Quadratic()
    opt = GradientDescent(obj.pars, obj.fprime, step_rate=0.01, momentum=.9)
    for i, info in enumerate(opt):
        if i > 500:
            break
    assert obj.solved(), 'did not find solution'
Exemplo n.º 4
0
def test_gd_continue():
    obj = LogisticRegression(n_inpt=2, n_classes=2)
    args = itertools.repeat(((obj.X, obj.Z), {}))
    opt = GradientDescent(
        obj.pars, obj.fprime, step_rate=0.01, momentum=.9,
        momentum_type='nesterov',
        args=args)

    continuation(opt)
Exemplo n.º 5
0
def test_gd_lr():
    obj = LogisticRegression()
    args = itertools.repeat(((obj.X, obj.Z), {}))
    opt = GradientDescent(
        obj.pars, obj.fprime, step_rate=0.01, momentum=.9, args=args)
    for i, info in enumerate(opt):
        if i > 500:
            break
    assert obj.solved(), 'did not find solution'
Exemplo n.º 6
0
    def test_carleo2D(self):
        el=[]
        fname='data/eng-%s-%s%s.dat'%(self.nsite,self.model,'p' if self.periodic else 'o')
        #generate a random rbm and the corresponding vector v
        group=(TIGroup(self.nsite if not isinstance(self.h,HeisenbergH2D) else 2*[int(sqrt(self.nsite))])) if self.periodic else NoGroup()
        self.rbm=random_rbm(nin=self.nsite,nhid=self.nsite,group=group)
        self.rbm.var_mask=[False,True,True]

        #reg_params=('delta',{'lambda0':1e-4})
        #reg_params=('trunc',{'lambda0':0.1,'eps_trunc':1e-5})
        reg_params=('carleo',{'lambda0':100,'b':0.9})
        #reg_params=('identity',{})
        #reg_params=('pinv',{})
        sr=SR(self.h,self.rbm,handler=self.vmc,reg_params=reg_params)
        #optimizer=RmsProp(wrt=self.rbm.dump_arr(),fprime=sr.compute_gradient,step_rate=1e-2,decay=0.9,momentum=0.9)
        #optimizer=Adam(wrt=self.rbm.dump_arr(),fprime=sr.compute_gradient,step_rate=1e-2)
        optimizer=GradientDescent(wrt=self.rbm.dump_arr(),fprime=sr.compute_gradient,step_rate=1e-2,momentum=0.5)
        print 'Running optimizer = %s, regularization = %s, nsite = %s, periodic = %s'%(optimizer,reg_params,self.nsite,self.periodic)
        self.rbm.a[...]=0
        arr_old=self.rbm.dump_arr()
        for k,info in enumerate(optimizer):
            #if isinstance(optimizer,GradientDescent): optimizer.step_rate=0.2*0.99**k
            #optimizer.step_rate*=0.98
            print 'Running %s-th Iteration.'%k
            ei=sr._opq_vals[1]/self.nsite
            print 'E/site = %s'%ei
            el.append(ei)
            #if k>50:
                #print 'setting momentum!'
                #optimizer.momentum=0.9
            if k>500: break
            arr=self.rbm.dump_arr()
            print 'diff rate = %s(norm=%s)'%(norm(arr-arr_old)/norm(arr_old),norm(arr_old))
            arr_old=arr
        savetxt(fname,el)
        assert_(err<0.05)
Exemplo n.º 7
0
quadratic = lambda x: (x**2).sum()
quadraticprime = lambda x: 2 * x
dim = 10

### stop criterions in general, stops and pauses

# for this example, we use a very simple stop criterion,
# which is called `after_1000_iterations`, to illustrate
# how stop criterions work. There are two ways in which
# criterions can be used:

# The first is to check the stop criterions manually,
# which we will call a "pause".

wrt = scipy.random.standard_normal((dim, )) * 10 + 5
opt = GradientDescent(wrt, quadraticprime, steprate=0.01)

for info in opt:
    print "iteration %3i loss=%g" % (info['n_iter'], quadratic(wrt))
    if after_1000_iterations(info):
        print "1000 iterations done."
        break

# as you can see above, a stop criterion always takes the info
# dictionary as argument, and returns either True or False.
# Here, we break if the criterion returns True, but this is not
# necessary. We could just react in some other way and afterwards
# continue minimization. That's why it is called a "pause".

# The second option is to run the optimizer until a stop condition
# occurs, without requiring control in between steps: