Exemple #1
0
def single_run(filter, spk_src, bg_src, params):
    import evaluate as eval

    sp_win    = params['sp_win']
    spadd_win = params['sp_add_win']
    pow_frac  = params['pow_frac']
    f_filter  = params['f_filter']
    type      = params['sp_type']
    thresh    = params['thresh']
    feats     = params['features']
    contacts  = params['contacts']
    n_pts     = params['n_pts']

    sp, stim, spt_real = eval.mix_cellbg(filter, spk_src, bg_src, 
                                         spadd_win, pow_frac)
    sp = eval.filter_data(sp, f_filter)

    spt, clust_idx, n_missing = eval.spike_clusters(sp, spt_real,
                                                    stim,
                                                    thresh,
                                                    type, sp_win) 
    
    features = eval.calc_features(sp, spt, sp_win, feats, contacts)
    
    uni_metric = eval.univariate_metric(eval.mutual_information, 
                                        features, clust_idx)

    multi_metric = eval.k_nearest(features, clust_idx, n_pts=n_pts)

    n_total = len(spt_real['data'])

    result_dict= {"cell" : spk_src,
                  "electrode" : bg_src,
                  "spikes_total" : n_total,
                  "spikes_missed" : n_missing,
                  "mutual_information" : uni_metric,
                  "k_nearest" : multi_metric}

    result_dict.update(params)
    
    import socket
    #from sim_manager import get_version

    result_dict['host'] = socket.gethostname()
    #result_dict['dependencies'] ={'evaluate': get_version(eval),
    #                              'spike_sort': get_version(eval.sort)}
    
    return result_dict
Exemple #2
0
    h5filter = PyTablesFilter(h5_fname)

    dataset = "/TestSubject/sSession01/el1"
    sp_win = [-0.4, 0.8]
    f_filter=None
    thresh = 'auto'
    type='max'
    
    sp = h5filter.read_sp(dataset)
    spt_orig = h5filter.read_spt(dataset+"/cell1_orig")
    stim = h5filter.read_spt(dataset+"/stim")
    
    sp = eval.filter_data(sp, f_filter)

    spt, clust_idx, n_missing = eval.spike_clusters(sp, spt_orig,
                                                    stim,
                                                    thresh,
                                                    type, sp_win) 
    
    features = eval.calc_features(sp, spt, sp_win, ["P2P", "PCs"], [0,1])
    
    single_metric, knearest_metric = calc_metrics(features, clust_idx)
    
    n_total = len(spt_orig['data'])
    print "Total n/o spikes:", n_total
    print "Number of undetected spikes: %d (%f)" % (n_missing,
                                                    n_missing*1./n_total)
    print "Univariate MI:", single_metric
    print "K-nearest class. rate:", knearest_metric
    
    spike_sort.ui.plotting.plot_features(features, clust_idx)
    #plt.figure()