예제 #1
0
파일: main2.py 프로젝트: kolia/subunits
def single_objective_u():
    arg     = ['u','STA','STC','V2','v1','N_spike','T']
    result  = ['LQLEP_wPrior','dLQLEP_wPrior','barrier']
    vardict = LQLEP_wBarrier( **LQLEP( **thetaM( **linear_reparameterization())))
    vardict['dLQLEP_wPrior'] = th.grad(cost = vardict['LQLEP_wPrior'],
                                       wrt  = vardict['u'],
                                       consider_constant = extract( vardict, arg[1:]))
    print 'Simplifying single objective_u...'
    sys.stdout.flush()
    t0 = time.time()
    inputs, outputs = kolia_theano.simplify( extract(vardict,arg), extract(vardict,result) )
    t1 = time.time()
    print 'done simplifying single objective_u in ', t1-t0, ' sec.'
    sys.stdout.flush()
    return inputs, outputs
예제 #2
0
파일: main.py 프로젝트: kolia/subunits
def _test_LNP( rgc_type='off parasol' ):
    vardict   = LNP( **thetaM( **linear_reparameterization()))
    init_LNP  = LNP_model( init_sv1(rgc_type) )
    indices = extract( linear_stats( rgc_type, (5,0) ), ['sparse_index', 'subunit_index'] )
    indices['N_subunits'] = len(cones)
    unknown = extract(init_LNP,['sv1'])
    train_LNP = global_objective( unknown, extract(init_LNP,['u','V2']), 
                                  vardict, run=linear_stats( rgc_type, (5,0) ),
                                  indices=indices)
    train_LNP.with_callback(callback)
    train_LNP.description = 'LNP'
    sv1 = optimize.optimizer( train_LNP )( init_params=unknown, maxiter=5000, gtol=1e-7 )
    model = LNP_model( train_LNP.unflat( sv1 )['sv1'] )
    model['LL'] = global_objective( unknown, extract(init_LNP,['u','V2']), 
                             vardict, run=linear_stats( rgc_type, (-5,0)), 
                             indices=indices).LL(sv1)
    save(model,'LNP_'+rgc_type)
    return model
예제 #3
0
파일: main.py 프로젝트: kolia/subunits
def optimize_LQLEP( rgc_type, filename=None, maxiter=maxiter, indices=None, description='',
    unknowns=['sv1','V2','u','uc','c','ud','d'],
    vardict = LQLEP_wBarrier( **LQLEP( **thetaM( **u2c_parameterization())))):
#    unknowns = {'sv1':sv1, 'V2':init_V2 , 'u':old['u'], 'uc':old['u'], 'c':init_V2}
    defaults = extract( { 'sv1':init_sv1( rgc_type ), 'V2':init_V2 , 'u':init_u, 
                          'uc':numpy.zeros_like(init_u), 'c':init_V2,
                          'ud':0.001*numpy.ones_like(init_u), 'd':0.0001+init_V2},
                        list( set(unknowns).intersection( set(vardict.keys()) ) ) + ['sv1'])
    if filename is not None:
        print 'Re-optimizing',filename
        unknowns = kb.load(filename)
        for name in unknowns.keys():
            if not defaults.has_key(name): del unknowns[name]
        default(unknowns,defaults)
    else:
        unknowns = defaults
#    if rgc_type[:3] == 'off':
#        unknowns['u'] = -0.01*numpy.abs(unknowns['u'])
    if vardict.has_key('barrier_positiveV1'):
        unknowns['sv1'] = numpy.abs(unknowns['sv1'])
    else:
        unknowns['sv1'] = unknowns['sv1']*0.01
    train_LQLEP = global_objective( unknowns, {}, vardict, run=linear_stats(rgc_type,( 5,0)), indices=indices)
    test_LQLEP  = global_objective( unknowns, {}, vardict, run=linear_stats(rgc_type,(-5,0)), indices=indices)
    test_LQLEP.description = 'Test_LQLEP'
    train_LQLEP.with_callback(partial(callback,other=
                 {'Test_LNP':test_LNP(rgc_type)['LL'], 'nonlinearity':test_LQLEP.nonlinearity},
                  objectives=[test_LQLEP]))
    train_LQLEP.description = description+rgc_type
    unknowns['V2'] = unknowns['V2']*0.001
    trained = optimize_objective( train_LQLEP, unknowns, gtol=1e-10 , maxiter=maxiter)
    print 'RGC type:', rgc_type
    test_global_objective( train_LQLEP, trained )
    train_LQLEP.callback( trained, force=True )
    train_LQLEP.callback( train_LQLEP.unflat( trained ), force=True )
    return trained