Esempio n. 1
0
def STA_stats( spikes = data['spikes'] ):
    stats = retina.accumulate_statistics( 
            retina.read_stimulus( data['spikes'],
                                 stimulus_pattern='cone_input_%d.mat' ,
                                 skip_pattern=(5,0) ) )
    print 'calculated STAs'
    sys.stdout.flush()
    return stats
Esempio n. 2
0
def make_sparse_stats( rgc_type='off midget', stats={}, skip_pattern=None ):
    print 'Fitting sparse stats for',len(stats['rgc_index']),'RGCs'
    stats.update( retina.accumulate_statistics( 
        data_generator = retina.read_stimulus( stats['spikes'] , 
                                               skip_pattern=skip_pattern) ,
        feature        = lambda x : x                   ,
        pipelines      = retina.fit_U                    ,
        sparse_index   = stats['sparse_index']          ))
    return stats
Esempio n. 3
0
def fit_U_stats( rgc_type='off midget', keep=15, stats=stats ):
    which_rgc = [i for i,ind in enumerate(data['rgc_ids'])
                   if  ind   in data['rgc_types'][rgc_type]]
    spikes = data['spikes'][which_rgc,:]
    sparse_index = [n_largest( sta, keep=keep ) for sta in stats['STA']]
    stats['rgc_type']  = rgc_type
    stats['rgc_index'] = which_rgc
    stats.update( retina.accumulate_statistics( 
        data_generator = retina.read_stimulus( spikes ) ,
        feature        = lambda x : x                   ,
        pipelines      = retina.fit_U                    ,
        sparse_index   = sparse_index                   ))
    return stats
Esempio n. 4
0
def simulated_STAC( filename=None, rgctype='off parasol' ):
    print 'Calculating STAC for spikes generated with model', filename
    forward = load_model( filename, rgctype )
    def spike_generator( d ):
        return [numpy.random.poisson(r) for r in forward.rates(d)]
    stats = STA_stats()
    stats = make_sparse_indices( rgctype, stats )
    stats.update( retina.accumulate_statistics( 
        data_generator = retina.simulate_data( spike_generator,
                         retina.read_stimulus( which_spikes( rgctype) , 
                                               skip_pattern=(-5,0))) ,
        pipelines      = retina.fit_U                    ,
        sparse_index   = stats['sparse_index']          ))
    return stats
Esempio n. 5
0
def load_model( filename=None, rgctype='off parasol' ):
#    filename += rgctype
    print 'Loading model', filename
    indices = extract( linear_stats(rgctype,(5,0)), ['sparse_index', 'subunit_index'] )
    indices['N_subunits'] = len(cones)
    spikes = which_spikes( rgctype )
    data_generator = retina.read_stimulus( spikes,
                                 stimulus_pattern='cone_input_%d.mat' ,
                                 skip_pattern=(-5,0) )
    stimdata = data_generator.next()
    print 'stimdata', stimdata.keys()    
    model = kb.load(filename)
    for n,v in model.items():
        if isinstance(v,type({})):
            model.update(v)
    return forward_LQLEP( stimdata['stimulus'], stimdata['spikes'], model, indices)
Esempio n. 6
0
def exact_normalized_LLs( filename=None, rgctype='off parasol' ):
    def normalizer( spikes=None, **other ):
        lnfact  = numpy.sum( scipy.special.gammaln( numpy.vstack( spikes['spikes'] + 1 ) ) )
        N_total = numpy.sum( numpy.vstack( spikes['spikes'] ) )
        return numpy.array([lnfact, N_total])
    lnfact, N_total = sum( map( normalizer, 
                           retina.read_stimulus( which_spikes( rgctype ),
                                                stimulus_pattern='cone_input_%d.mat' ,
                                                skip_pattern=(-5,0) ) ) )
    LQLEP_LL = exact_LL( filename, rgctype )
    LNP_LL   = exact_LL( 'LNP_'+rgctype, rgctype )
    print 'LL', (LQLEP_LL+lnfact)/N_total,'LNPLL',(LNP_LL+lnfact)/N_total
    return {'LQLEP_LL':float((LQLEP_LL+lnfact)/N_total), 'LNP_LL':float((LNP_LL+lnfact)/N_total),
            'log_factorial':lnfact/N_total, 'N_total':N_total,
            'source':''''LQLEP_LL':float((LQLEP_LL+lnfact)/N_total), 
                        'LNP_LL':float((LNP_LL+lnfact)/N_total),
                        'log_factorial':lnfact/N_total, 'N_total':N_total'''}
Esempio n. 7
0
def exact_LL( filename=None, rgctype='off parasol' ):
    print 'Calculating exact LL for', filename
    forward = load_model( filename, rgctype ).LL
    return sum( map( forward, retina.read_stimulus( which_spikes( rgctype ),
                                     stimulus_pattern='cone_input_%d.mat' ,
                                     skip_pattern=(-5,0) ) ) )