Ejemplo n.º 1
0
def load_model( filename=None, rgctype='off parasol' ):
#    filename += rgctype
    print 'Loading model', filename
    indices = extract( linear_stats(rgctype,(5,0)), ['sparse_index', 'subunit_index'] )
    indices['N_subunits'] = len(cones)
    spikes = which_spikes( rgctype )
    data_generator = retina.read_stimulus( spikes,
                                 stimulus_pattern='cone_input_%d.mat' ,
                                 skip_pattern=(-5,0) )
    stimdata = data_generator.next()
    print 'stimdata', stimdata.keys()    
    model = kb.load(filename)
    for n,v in model.items():
        if isinstance(v,type({})):
            model.update(v)
    return forward_LQLEP( stimdata['stimulus'], stimdata['spikes'], model, indices)
Ejemplo n.º 2
0
def optimize_LQLEP( rgc_type, filename=None, maxiter=maxiter, indices=None, description='',
    unknowns=['sv1','V2','u','uc','c','ud','d'],
    vardict = LQLEP_wBarrier( **LQLEP( **thetaM( **u2c_parameterization())))):
#    unknowns = {'sv1':sv1, 'V2':init_V2 , 'u':old['u'], 'uc':old['u'], 'c':init_V2}
    defaults = extract( { 'sv1':init_sv1( rgc_type ), 'V2':init_V2 , 'u':init_u, 
                          'uc':numpy.zeros_like(init_u), 'c':init_V2,
                          'ud':0.001*numpy.ones_like(init_u), 'd':0.0001+init_V2},
                        list( set(unknowns).intersection( set(vardict.keys()) ) ) + ['sv1'])
    if filename is not None:
        print 'Re-optimizing',filename
        unknowns = kb.load(filename)
        for name in unknowns.keys():
            if not defaults.has_key(name): del unknowns[name]
        default(unknowns,defaults)
    else:
        unknowns = defaults
#    if rgc_type[:3] == 'off':
#        unknowns['u'] = -0.01*numpy.abs(unknowns['u'])
    if vardict.has_key('barrier_positiveV1'):
        unknowns['sv1'] = numpy.abs(unknowns['sv1'])
    else:
        unknowns['sv1'] = unknowns['sv1']*0.01
    train_LQLEP = global_objective( unknowns, {}, vardict, run=linear_stats(rgc_type,( 5,0)), indices=indices)
    test_LQLEP  = global_objective( unknowns, {}, vardict, run=linear_stats(rgc_type,(-5,0)), indices=indices)
    test_LQLEP.description = 'Test_LQLEP'
    train_LQLEP.with_callback(partial(callback,other=
                 {'Test_LNP':test_LNP(rgc_type)['LL'], 'nonlinearity':test_LQLEP.nonlinearity},
                  objectives=[test_LQLEP]))
    train_LQLEP.description = description+rgc_type
    unknowns['V2'] = unknowns['V2']*0.001
    trained = optimize_objective( train_LQLEP, unknowns, gtol=1e-10 , maxiter=maxiter)
    print 'RGC type:', rgc_type
    test_global_objective( train_LQLEP, trained )
    train_LQLEP.callback( trained, force=True )
    train_LQLEP.callback( train_LQLEP.unflat( trained ), force=True )
    return trained
Ejemplo n.º 3
0
from   QuadPoiss import LQLEP_wBarrier, LQLEP, thetaM, UV12_input, UVi, \
                        linear_reparameterization

from IPython.Debugger import Tracer; debug_here = Tracer()

# Memoizing results using joblib;  makes life easier
from joblib import Memory
memory = Memory(cachedir='/Users/kolia/Documents/joblibcache', verbose=0)

data = retina.read_data()

cones             = data['cone_locations']
possible_subunits = cones

from kolia_base import load
stats = load('Linear_localization')

def n_largest( values , keep=10 ):
    sorted_values = numpy.abs(values)
    sorted_values.sort()
    cutoff = sorted_values[-keep]
    return numpy.nonzero( numpy.abs(values) >= cutoff )[0]
    
@memory.cache
def fit_U_stats( rgc_type='off midget', keep=15, stats=stats ):
    which_rgc = [i for i,ind in enumerate(data['rgc_ids'])
                   if  ind   in data['rgc_types'][rgc_type]]
    spikes = data['spikes'][which_rgc,:]
    sparse_index = [n_largest( sta, keep=keep ) for sta in stats['STA']]
    stats['rgc_type']  = rgc_type
    stats['rgc_index'] = which_rgc