Exemplo n.º 1
0
def single_objective( param_templates , vardict , outputs ):
    arg     = set(['u','V2','STA','STC','v1','N_spike','T','Cm1','C','c','uc'])
    print 'Simplifying single objective...'
    sys.stdout.flush()
    t0 = time.time()
    params = extract(vardict,param_templates.keys())
    args   = extract(vardict,arg-set(param_templates.keys()))
    differentiate = []
    if outputs.has_key('f'): differentiate += ['f']
    obj = kolia_theano.Objective( params, param_templates, args, outputs,
                                  differentiate=differentiate, mode='FAST_RUN' )
    t1 = time.time()
    print 'done simplifying single objective for', param_templates.keys(),
    print 'in ', t1-t0, ' sec.'
    sys.stdout.flush()
    return obj
Exemplo n.º 2
0
def _test_LNP( rgc_type='off parasol' ):
    vardict   = LNP( **thetaM( **linear_reparameterization()))
    init_LNP  = LNP_model( init_sv1(rgc_type) )
    indices = extract( linear_stats( rgc_type, (5,0) ), ['sparse_index', 'subunit_index'] )
    indices['N_subunits'] = len(cones)
    unknown = extract(init_LNP,['sv1'])
    train_LNP = global_objective( unknown, extract(init_LNP,['u','V2']), 
                                  vardict, run=linear_stats( rgc_type, (5,0) ),
                                  indices=indices)
    train_LNP.with_callback(callback)
    train_LNP.description = 'LNP'
    sv1 = optimize.optimizer( train_LNP )( init_params=unknown, maxiter=5000, gtol=1e-7 )
    model = LNP_model( train_LNP.unflat( sv1 )['sv1'] )
    model['LL'] = global_objective( unknown, extract(init_LNP,['u','V2']), 
                             vardict, run=linear_stats( rgc_type, (-5,0)), 
                             indices=indices).LL(sv1)
    save(model,'LNP_'+rgc_type)
    return model
Exemplo n.º 3
0
def make_global_objective(unknowns,knowns,vardict,variables,outputs,indices,NRGC):
    t0 = time.time()
    print 'unknowns:',unknowns.keys()
    print 'knowns:',  knowns.keys()
    single_obj = single_objective( split_params( unknowns, 0, indices ), vardict, outputs)
    objectives = [single_obj.where({},**split_params(knowns,i,indices))
                                   for i in range(NRGC)]
    symbolic_params = extract(vardict,unknowns.keys())
    args   = extract(vardict,set(variables)-set(unknowns.keys()))
    global_obj = kolia_theano.Objective( symbolic_params, unknowns, args, {})
    global_obj = global_obj.where({'indices':indices}, **knowns)
#    ipdb.set_trace()
    for fname in outputs.keys():
        if hasattr(objectives[0],fname):
            setattr(global_obj,fname,partial(_sum_objectives, objectives, global_obj, fname))
            setattr(getattr(global_obj,fname),'__name__',fname)
#    test_global_objective( global_obj, unknowns )
    global_obj.description = ''
    print '... done preparing objective in', time.time()-t0,'sec.'
    return global_obj
Exemplo n.º 4
0
def load_model( filename=None, rgctype='off parasol' ):
#    filename += rgctype
    print 'Loading model', filename
    indices = extract( linear_stats(rgctype,(5,0)), ['sparse_index', 'subunit_index'] )
    indices['N_subunits'] = len(cones)
    spikes = which_spikes( rgctype )
    data_generator = retina.read_stimulus( spikes,
                                 stimulus_pattern='cone_input_%d.mat' ,
                                 skip_pattern=(-5,0) )
    stimdata = data_generator.next()
    print 'stimdata', stimdata.keys()    
    model = kb.load(filename)
    for n,v in model.items():
        if isinstance(v,type({})):
            model.update(v)
    return forward_LQLEP( stimdata['stimulus'], stimdata['spikes'], model, indices)
Exemplo n.º 5
0
def optimize_LQLEP( rgc_type, filename=None, maxiter=maxiter, indices=None, description='',
    unknowns=['sv1','V2','u','uc','c','ud','d'],
    vardict = LQLEP_wBarrier( **LQLEP( **thetaM( **u2c_parameterization())))):
#    unknowns = {'sv1':sv1, 'V2':init_V2 , 'u':old['u'], 'uc':old['u'], 'c':init_V2}
    defaults = extract( { 'sv1':init_sv1( rgc_type ), 'V2':init_V2 , 'u':init_u, 
                          'uc':numpy.zeros_like(init_u), 'c':init_V2,
                          'ud':0.001*numpy.ones_like(init_u), 'd':0.0001+init_V2},
                        list( set(unknowns).intersection( set(vardict.keys()) ) ) + ['sv1'])
    if filename is not None:
        print 'Re-optimizing',filename
        unknowns = kb.load(filename)
        for name in unknowns.keys():
            if not defaults.has_key(name): del unknowns[name]
        default(unknowns,defaults)
    else:
        unknowns = defaults
#    if rgc_type[:3] == 'off':
#        unknowns['u'] = -0.01*numpy.abs(unknowns['u'])
    if vardict.has_key('barrier_positiveV1'):
        unknowns['sv1'] = numpy.abs(unknowns['sv1'])
    else:
        unknowns['sv1'] = unknowns['sv1']*0.01
    train_LQLEP = global_objective( unknowns, {}, vardict, run=linear_stats(rgc_type,( 5,0)), indices=indices)
    test_LQLEP  = global_objective( unknowns, {}, vardict, run=linear_stats(rgc_type,(-5,0)), indices=indices)
    test_LQLEP.description = 'Test_LQLEP'
    train_LQLEP.with_callback(partial(callback,other=
                 {'Test_LNP':test_LNP(rgc_type)['LL'], 'nonlinearity':test_LQLEP.nonlinearity},
                  objectives=[test_LQLEP]))
    train_LQLEP.description = description+rgc_type
    unknowns['V2'] = unknowns['V2']*0.001
    trained = optimize_objective( train_LQLEP, unknowns, gtol=1e-10 , maxiter=maxiter)
    print 'RGC type:', rgc_type
    test_global_objective( train_LQLEP, trained )
    train_LQLEP.callback( trained, force=True )
    train_LQLEP.callback( train_LQLEP.unflat( trained ), force=True )
    return trained
Exemplo n.º 6
0
def simulate_data( spike_generator, stim_generator=None ):
    while 1:
        stimdata = stim_generator.next()
#        try:
        stimdata['spikes'] = spike_generator( stimdata )
        yield kb.extract( stimdata, ['stimulus','spikes'] )