Ejemplo n.º 1
0
def vl_look():
    mpl.rcParams['font.size'] = 26
    animal = 66
    session = 60
    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal, fn)
    print 'npw'
    cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in range(1, 17)}
    _, good_clusters = get_good_clusters(0)
    print 'kw'
    t_cells = count_cells(vl, cls, trigger_tm, good_clusters)
    print 'w'

    L1 = 59
    L2 = 70
    clusters = [[] for _ in range(L2 - L1)]
    for cell, spk_i in t_cells.items():
        spk_i = np.unique(spk_i)
        for spk in np.nonzero((spk_i < L2) & (spk_i >= L1))[0]:
            clusters[spk_i[spk] - L1].append(cell)

    out = zip(clusters, vl['xs'][L1:L2], vl['ys'][L1:L2], vl['vxs'][L1:L2],
              vl['vys'][L1:L2])

    for tt in out:
        print '%s, (%.1f,%.1f), (%.1f,%.1f)' % ((str(tt[0]), ) + tt[1:])
    import pdb
    pdb.set_trace()
Ejemplo n.º 2
0
def rate_graph():
    animal = 66
    session = 60  # This is August 7, 2013 run
    room_shape = [[-55, 55], [-55, 55]]
    tetrodes = range(1, 17)
    cluster_profile = 0
    bin_size = 5

    _, good_clusters = get_good_clusters(cluster_profile)

    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal, fn)
    cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in tetrodes}
    tpp = 1.0 * np.mean(vl["Time"][1:] - vl["Time"][:-1])

    t_cells = count_cells(vl, cls, trigger_tm, good_clusters)

    label_l = vl["Task"]

    # rates[cell, lbl, xbin, ybin] = firing rate
    rates = get_fracs(vl["xs"], vl["ys"], label_l, room_shape, bin_size, t_cells)
    rates /= tpp

    plot_rates(rates, label_l, t_cells)

    plt.show()
Ejemplo n.º 3
0
def rate_graph():
    animal = 66
    session = 60  # This is August 7, 2013 run
    room_shape = [[-55, 55], [-55, 55]]
    tetrodes = range(1, 17)
    cluster_profile = 0
    bin_size = 5

    _, good_clusters = get_good_clusters(cluster_profile)

    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal, fn)
    cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in tetrodes}
    tpp = 1.0 * np.mean(vl['Time'][1:] - vl['Time'][:-1])

    t_cells = count_cells(vl, cls, trigger_tm, good_clusters)

    label_l = vl['Task']

    # rates[cell, lbl, xbin, ybin] = firing rate
    rates = get_fracs(vl['xs'], vl['ys'], label_l, room_shape, bin_size,
                      t_cells)
    rates /= tpp

    plot_rates(rates, label_l, t_cells)

    plt.show()
Ejemplo n.º 4
0
def vl_look():
    mpl.rcParams['font.size'] = 26
    animal=66
    session=60
    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal,fn)
    print 'npw'
    cls = {tetrode:load_cl(animal,fn,tetrode) for tetrode in range(1,17)}
    _, good_clusters = get_good_clusters(0)
    print 'kw'
    t_cells = count_cells(vl,cls,trigger_tm,good_clusters)
    print 'w'
    
    L1 = 59
    L2 = 70
    clusters = [[] for _ in range(L2-L1)]
    for cell, spk_i in t_cells.items():
        spk_i = np.unique(spk_i)
        for spk in np.nonzero((spk_i<L2) & (spk_i>=L1))[0]:
            clusters[spk_i[spk]-L1].append(cell)
    
    out = zip(clusters,
              vl['xs'][L1:L2],
              vl['ys'][L1:L2],
              vl['vxs'][L1:L2],
              vl['vys'][L1:L2])
    
    for tt in out:
        print '%s, (%.1f,%.1f), (%.1f,%.1f)'%(  (str(tt[0]),) + tt[1:])
    import pdb; pdb.set_trace()
def run():
    logging.basicConfig(level=logging.INFO)

    good_trials = try_cache("Good trials")
    animal_sess_combs = [(animal, session) for animal in [66, 70] for session in good_trials[animal]]

    _, good_clusters = get_good_clusters(0)

    for animal, session in animal_sess_combs:
        fn, trigger_tm = load_mux(animal, session)
        vl = load_vl(animal, fn)
        cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in range(1, 17)}

        for tetrode, cl in cls.items():
            if tetrode not in good_clusters:
                import pdb

                pdb.set_trace()
                continue
            for cell in good_clusters[tetrode]:
                logging.info(
                    "Finding spike locations for animal %i, session %i, tetrode %i, cell %i",
                    animal,
                    session,
                    tetrode,
                    cell,
                )
                cache_key = (cl["Label"][::10], vl["xs"][::10], trigger_tm, cell)
                spk_i = spike_loc(cl, vl, trigger_tm, cell, key=None)
                if spk_i is np.NAN:
                    break
                store_in_cache(cache_key, spk_i)
Ejemplo n.º 6
0
def mk_grph(animal, session):
    room_shape = [[-60, 60], [-60, 60]]
    cntrx = cntry = 0

    fn, _ = load_mux(animal, session)
    vl = load_vl(animal, fn)

    if len(np.unique(vl['Task'])) <= 1:
        raise Exception('VL Task not right.')
    xs = np.linspace(0, len(vl['Task']) / vl_sample_rate / 60, len(vl['Task']))

    plt.figure()
    angls = count_angle(vl, room_shape)
    plt.plot(xs, angls, label='Angle')

    scale = (np.max(angls) - np.min(angls)) / 2.0
    plt.plot(xs, vl['Task'] * scale + (np.max(angls) - scale), label='Task')

    # Overwrite sections with discrepencies

    orient = get_orientation(vl, cntrx, cntry)
    #plt.plot(orient*np.max(angls),label='Orientation')
    discrep = np.sum(orient != vl['Task'])
    radial = np.sum(orient == 0)
    txt = ('Discrepency data: %.3f of %i \n' + 'Radial data: %.3f of %i') % (
        1.0 * (discrep - radial) / (len(orient) - radial),
        len(orient) - radial, 1.0 * radial / len(orient), len(orient))
    txt2 = 'Filename: %s' % (fn, )
    plt.autoscale(axis='x', tight=True)
    plt.text(0, plt.ylim()[0], txt)
    plt.text(plt.xlim()[1], plt.ylim()[0], txt2, horizontalalignment='right')
    plt.ylabel('Angle (radians)')
    plt.xlabel('Time (min)')
    plt.legend()
    plt.title('Animal:%i  Session:%i Filename:%s' % (animal, session, fn))
Ejemplo n.º 7
0
def run():
    logging.basicConfig(level=logging.INFO)
    
    good_trials = try_cache('Good trials')
    animal_sess_combs = [(animal,session) for animal in [66,70] 
                         for session in good_trials[animal]]
    
    _, good_clusters = get_good_clusters(0)
    
    for animal, session in animal_sess_combs:
        fn, trigger_tm = load_mux(animal, session)
        vl = load_vl(animal,fn)
        cls = {tetrode:load_cl(animal,fn,tetrode) for tetrode in range(1,17)}
        
        
        for tetrode,cl in cls.items():
            if tetrode not in good_clusters: 
                import pdb; pdb.set_trace()
                continue
            for cell in good_clusters[tetrode]:
                logging.info('Finding spike locations for animal %i, session %i, tetrode %i, cell %i',animal, session, tetrode,cell)
                cache_key = (cl['Label'][::10],vl['xs'][::10],trigger_tm,cell)
                spk_i = spike_loc(cl, vl, trigger_tm, cell, key=None)
                if spk_i is np.NAN: break
                store_in_cache(cache_key,spk_i)
def find_ambiguous_data():
    # Final data structure will be a dictionary:
    #  amb[animal][session] = (#ambig, total)
    
    # First load cache
    
    cache = try_cache(cache_key)
    if cache is not None:
        amb = cache
    else:
        amb = {}

    # Fix center
    cntrx = cntry = 0
    
    # Animal range
    animals = range(65,74)
    
    not_task_trials = []
    for animal in animals:
    
        # Add to dictionary
        if animal not in amb:
            amb[animal] = {}
        
        for session in range(1,100):
            if animal in amb and session in amb[animal]: #and amb[animal][session]:
                logging.info('Found (Animal %i, Session %i) in cache',animal,session)
                continue
            try:
                fn, _ = load_mux(animal, session)
            except:
                logging.info('Animal %i has no sessions greater than %i',animal,session-1)
                break
            try:
                vl = load_vl(animal,fn)
            except:
                traceback.print_exc()
                logging.info('No data found for (Animal %i, Session %i)',animal,session)
                amb[animal][session] = None
                not_task_trials.append([animal,session])
                continue
            
            logging.info('Checking ambiguous data for (Animal %i, Session %i)',animal,session)
            
            orientation = get_orientation(vl,cntrx,cntry)
            
            # Assume that orientation and task labels are matched correctly
            radial = np.sum(0 == orientation)
            discrepency = np.sum(vl['Task'] != orientation)
            tot = len(vl['xs'])
            
            
            amb[animal][session] = (radial, discrepency,  tot)
        
    # Store to cache
    store_in_cache(cache_key, amb)
    
    return amb
Ejemplo n.º 9
0
def rate_graph():
    #mpl.rcParams['axes.titlesize'] = 18
    #mpl.rcParams['axes.labelsize'] = 18
    mpl.rcParams['font.size'] = 26
    
    
    animal = 66
    session = 60 # This is August 7, 2013 run
    room_shape = [[-55,55],[-55,55]]
    tetrodes = [1]
    cluster_profile = 0
    bin_size = 5
    
    
    _, good_clusters = get_good_clusters(cluster_profile)
    
    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal,fn)
    cls = {tetrode:load_cl(animal,fn,tetrode) for tetrode in tetrodes}
    
    t_cells = count_cells(vl,cls,trigger_tm,good_clusters)
    
    label_l = vl['Task']
    
    # rates[cell, lbl, xbin, ybin] = firing rate
    rates = get_fracs(vl['xs'], vl['ys'], label_l, room_shape, bin_size, t_cells)

    for lbl in range(len(np.unique(label_l))):
        
        plt.figure(figsize=(10,10))
        x = np.concatenate([np.arange(room_shape[0][0],room_shape[0][1],bin_size),[room_shape[0][1]]])
        y = np.concatenate([np.arange(room_shape[1][0],room_shape[1][1],bin_size),[room_shape[1][1]]])
        Xs, Ys = np.meshgrid(x, y)
        cntr = plt.pcolor(Ys,Xs,rates[3,lbl])
        
        t=plt.colorbar(cntr, extend='both')
        t.set_label('Frequency (Hz)')
        plt.xlabel('Position (in)')
        plt.ylabel('Position (in)')
        if lbl == 0: plt.title('Clockwise')
        else: plt.title('Counterclockwise')
        
        #plt.axis('equal')
        plt.xlim(room_shape[0])
        plt.ylim(room_shape[1])

        '''
        plt.figure()
        x = np.arange(room_shape[0][0],room_shape[0][1],bin_size)
        y = np.arange(room_shape[1][0],room_shape[1][1],bin_size)
        Xs, Ys = np.meshgrid(x, y)
        cntr = plt.contourf(Ys,Xs,rate_dict[contxt][2])
        t = plt.colorbar(cntr, extend='both')
        t.set_label('Frequency (Hz)')
        plt.xlabel('Position (in)')
        plt.ylabel('Position (in)')'''
    plt.show()
    
Ejemplo n.º 10
0
def dp_accuracy():
    logging.basicConfig(level=10) # 5 for more stuff
    CL = CL3
    animal = 66
    session = 60 
    
    room =[[-55,55],[-55,55]]
    bin_size = 5
    K =  50      # Segment length used to calculate firing rates
    CL.delt_t = K*.02
    cluster_profile = 0
    label = 'Task'
    
    cl_prof_name, good_clusters = get_good_clusters(cluster_profile)
    try:
        adat = try_cache('One big data structure')
        correct_dp = adat[CL.name][animal][session][cl_prof_name][bin_size][label][K]
        logging.info('Got data from Cache.cache.')
    except:
        logging.info('Calculating classifications...')
        CL.delt_t=K
        
        fn, trigger_tm = load_mux(animal, session)
        vl = load_vl(animal,fn)
        cls = {tetrode:load_cl(animal,fn,tetrode) for tetrode in range(1,17)}
        label_l = vl['Task']
        
        t_cells = count_cells(vl,cls,trigger_tm,good_clusters)
    
        logging.info('About to generate population vector.')
        #X, Y = gpv(vl, t_cells, label_l, K)
        s=time()
        X, Y = gpv(vl, t_cells, label_l, K, bin_size, room)
        logging.info('%i population vectors generated in %.3f.',X.shape[0],time()-s)
        Y = Y.reshape([-1])
        
        correct_dp = check_classifier(range(X.shape[0]),range(X.shape[0]), 
                                      X, Y, CL, room, bin_size) 

    # Accuracy meter
    plt.figure()
    plt.hist(correct_dp,normed=True)
    plt.xlabel('Accuracy')
    tt = '%s,  K: %i, ClPr: %s, Label:%s'%(CL.name,K,cl_prof_name,
                                                   label)
    plt.title(tt)

    msg = []
    for i in [1,50,75,90,95,99]:
        perc = 1.0*np.sum(correct_dp > i/100.0)/len(correct_dp)*100.0
        msg.append('>%i%%:  %.1f%%'%(i,perc))
    msg = '\n'.join(msg)
    plt.xlim([0,1])
    xcoord = plt.xlim()[0] + (plt.xlim()[1]-plt.xlim()[0])*.1
    ycoord = plt.ylim()[0] + (plt.ylim()[1]-plt.ylim()[0])*.5
    plt.text(xcoord,ycoord,msg)
    plt.show()
    
Ejemplo n.º 11
0
def cluster_graphs():
    animal = 70
    session = 8

    # Filenames (fn) are named descriptively:
    # session 18:14:04 on day 10/25/2013
    # load virmenLog75\20131025T181404.cmb.mat

    #for tetrode in range(1,17):
    for tetrode in [13]:
        for context in [1, -1]:
            global clrs
            clrs = ['b', 'g', 'r', 'c', 'm', 'k', 'b', 'g', 'r', 'c', 'm', 'k']

            fn, trigger_tm = load_mux(animal, session)
            cl = load_cl(animal, fn, tetrode)
            vl = load_vl(animal, fn)

            spk_is = []
            #for cell in range(2,100):
            for cell in [3]:
                cache_key = (cl['Label'][::10], vl['xs'][::10], trigger_tm,
                             cell)
                spk_i = spike_loc(cl, vl, trigger_tm, cell, cache_key)
                if spk_i is np.NAN: break
                cntx_is = np.nonzero(vl['Task'] == context)[0]
                spk_i = np.intersect1d(cntx_is, spk_i)
                spk_is.append(spk_i)

            tot_spks = len(spk_is)
            if tot_spks == 0: continue
            subp_x, subp_y = get_subplot_size(tot_spks)
            #plt.figure()
            for spk_i, i in zip(spk_is, range(tot_spks)):
                plt.subplot(subp_x, subp_y, i + 1)
                if context == 1:
                    plt.plot(vl['xs'], vl['ys'], zorder=1, color='k')
                    plt.scatter(vl['xs'][spk_i],
                                vl['ys'][spk_i],
                                zorder=2,
                                color='b',
                                label='Clockwise')
                else:
                    plt.scatter(vl['xs'][spk_i],
                                vl['ys'][spk_i],
                                zorder=2,
                                color='r',
                                label='Counterclockwise')

                #plot_spks(vl, spk_i, i+2)
            #plt.suptitle('Animal %i, Tetrode %i, Session %i, Context:%i'%(animal,tetrode,session,context))

            plt.xlim([-60, 60])
            plt.ylim([-60, 60])
            plt.xlabel('Position (in)')
            plt.ylabel('Position (in)')
        plt.show()
Ejemplo n.º 12
0
def smoothing():
    logging.basicConfig(level=logging.INFO)
    room = [[-55, 55], [-55, 55]]
    bin_size = 5
    xs = range(room[0][0], room[0][1], bin_size)
    ys = range(room[1][0], room[1][1], bin_size)
    X, Y = np.meshgrid(xs, ys)

    session = 60
    animal = 66
    _, good_clusters = get_good_clusters(0)
    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal, fn)
    cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in range(1, 17)}
    t_cells = count_cells(vl, cls, trigger_tm, good_clusters)
    x = vl['xs']
    y = vl['ys']
    tpp = np.mean(vl['Time'][1:] - vl['Time'][:-1]) * 24 * 60 * 60
    label_l = vl['Task']

    # rates[cell id, lbl, xbin, ybin] = rate
    rates1 = get_fracs(x,
                       y,
                       label_l,
                       room,
                       bin_size,
                       t_cells,
                       smooth_flag=True)
    rates1 /= tpp
    logging.info('Got smoothed rates')

    rates2 = get_fracs(x,
                       y,
                       label_l,
                       room,
                       bin_size,
                       t_cells,
                       smooth_flag=False)
    rates2 /= tpp
    logging.info('Got unsmoothed rates')

    for i in range(5):  # or rates1.shape[0]
        logging.info('Cell %i', i)
        plt.figure()
        plt.pcolor(X, Y, rates1[i, 0])
        plt.colorbar()
        plt.autoscale(tight=True)
        plt.xlabel('Position (in)')
        plt.ylabel('Position (in)')

        plt.figure()
        plt.pcolor(rates2[i, 0])
        plt.autoscale(tight=True)
        plt.xlabel('Position (in)')
        plt.ylabel('Position (in)')
        plt.show()
Ejemplo n.º 13
0
def cluster_graphs():
    animal = 70
    session = 8
    
    # Filenames (fn) are named descriptively:
    # session 18:14:04 on day 10/25/2013
    # load virmenLog75\20131025T181404.cmb.mat
    
    #for tetrode in range(1,17):  
    for tetrode in [13]:  
        for context in [1,-1]:
            global clrs
            clrs = ['b','g','r','c','m','k','b','g','r','c','m','k']
            
            fn, trigger_tm = load_mux(animal, session)
            cl = load_cl(animal,fn,tetrode)
            vl = load_vl(animal,fn)
        
            spk_is = []
            #for cell in range(2,100):
            for cell in [3]:
                cache_key = (cl['Label'][::10],vl['xs'][::10],trigger_tm,cell)
                spk_i = spike_loc(cl, vl, trigger_tm, cell,cache_key)
                if spk_i is np.NAN: break
                cntx_is = np.nonzero(vl['Task']==context)[0]
                spk_i = np.intersect1d(cntx_is, spk_i)
                spk_is.append(spk_i)
    
            tot_spks = len(spk_is)
            if tot_spks == 0: continue
            subp_x, subp_y = get_subplot_size(tot_spks)
            #plt.figure()
            for spk_i, i in zip(spk_is, range(tot_spks)):
                plt.subplot(subp_x,subp_y, i+1)
                if context==1: 
                    plt.plot(vl['xs'],vl['ys'],zorder=1,color='k')
                    plt.scatter(vl['xs'][spk_i],vl['ys'][spk_i],zorder=2,color='b',
                                label='Clockwise')
                else:
                    plt.scatter(vl['xs'][spk_i],vl['ys'][spk_i],zorder=2,color='r',
                                label='Counterclockwise')
    
                #plot_spks(vl, spk_i, i+2)
            #plt.suptitle('Animal %i, Tetrode %i, Session %i, Context:%i'%(animal,tetrode,session,context))
            
            plt.xlim([-60,60])
            plt.ylim([-60,60])
            plt.xlabel('Position (in)')
            plt.ylabel('Position (in)')
        plt.show()
Ejemplo n.º 14
0
def smoothing():
    logging.basicConfig(level=logging.INFO)
    room = [[-55,55],[-55,55]]
    bin_size = 5
    xs = range(room[0][0],room[0][1],bin_size)
    ys = range(room[1][0],room[1][1],bin_size)
    X,Y = np.meshgrid(xs,ys)
    
    session = 60
    animal=66
    _, good_clusters = get_good_clusters(0)
    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal,fn)
    cls = {tetrode:load_cl(animal,fn,tetrode) for tetrode in range(1,17)}
    t_cells = count_cells(vl,cls,trigger_tm,good_clusters)
    x = vl['xs']
    y = vl['ys']
    tpp = np.mean(vl['Time'][1:]-vl['Time'][:-1])*24*60*60
    label_l = vl['Task']
    
    # rates[cell id, lbl, xbin, ybin] = rate
    rates1 = get_fracs(x,y,label_l, room, bin_size, t_cells, smooth_flag=True)
    rates1 /= tpp
    logging.info('Got smoothed rates')
    
    rates2 = get_fracs(x,y,label_l, room, bin_size, t_cells, smooth_flag=False)
    rates2 /= tpp
    logging.info('Got unsmoothed rates')
    
    for i in range(5): # or rates1.shape[0]
        logging.info('Cell %i',i)
        plt.figure()
        plt.pcolor(X,Y,rates1[i,0])
        plt.colorbar()
        plt.autoscale(tight=True)
        plt.xlabel('Position (in)')
        plt.ylabel('Position (in)')


        plt.figure()
        plt.pcolor(rates2[i,0])
        plt.autoscale(tight=True)
        plt.xlabel('Position (in)')
        plt.ylabel('Position (in)')
        plt.show()
Ejemplo n.º 15
0
def gpv_rates():
    logging.basicConfig(level=logging.INFO)
    animal = 66
    session = 60
    room_shape = [[-55, 55], [-55, 55]]
    tetrodes = [1]
    cells = range(2, 10)
    bin_size = 5
    K = 50  # Segment length used to calculate firing rates

    #xbins = ybins = (room_shape[0][1]-room_shape[0][0])/bin_size
    good_clusters = {tetrode: cells for tetrode in tetrodes}
    #good_clusters = get_good_clusters(0)

    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal, fn)
    cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in tetrodes}

    label_l = vl['Task']
    t_cells = count_cells(vl, cls, trigger_tm, good_clusters)

    logging.info('About to generate population vector.')
    X, Y = gpv(vl, t_cells, label_l, K, bin_size, room_shape)
    logging.info('%i population vectors generated.', X.shape[0])
    Y = Y.reshape([-1])

    # GPV rates
    rates = fracs_from_pv(X, Y, bin_size, room_shape, smooth_flag=True)

    # Now get normally calculate rates
    real_rates = get_fracs(vl['xs'],
                           vl['ys'],
                           label_l,
                           room_shape,
                           bin_size,
                           t_cells,
                           smooth_flag=True)

    try:
        assert np.all(rates == real_rates)
    except:
        print 'DOESNT WORK!!'
    plot_rates(rates, Y, t_cells)
    plot_rates(real_rates, Y, t_cells)
    plt.show()
Ejemplo n.º 16
0
def count():
    logging.basicConfig(level=logging.INFO)
    
    animals = [66,70]
    sessions = range(1,100)
    tetrodes = range(1,17)
    
    good_trials = try_cache('Good trials')
    
    key = (animals,sessions,tetrodes,'count clusters')
    cache = try_cache(key)
    cache=None
    if cache is not None:
        cls = cache
    else:
        cls = []
        for animal in animals:
            print 'Animal ', animal
            for session in good_trials[animal]:
                print 'Session', session
                fn, _ = load_mux(animal, session)
                vl = load_vl(animal,fn)
                if len(np.unique(vl['Task'])) != 2:continue

                cells = 0
                for tetrode in tetrodes:
                    cl = load_cl(animal, fn, tetrode)
                    cells += len(np.unique(cl['Label']))-1
                if cells == 0: continue
                cls.append((animal, session, cells,len(vl)))
        store_in_cache(key,cls)
    
    cls.sort(key=lambda x:x[2])
    txt = '%i    %i    %i    %i'
    print 'Animal    Session    Cells    Length'
    for animal,session,cells, length in cls:
        print txt%(animal,session,cells, length)
    
    import pdb; pdb.set_trace()
    
    print 'Mean length:', np.mean([cl[3] for cl in cls])
    
        
                    
Ejemplo n.º 17
0
def run():
    logging.basicConfig(level=logging.INFO)
    cache_key = 'Good trials'
    animals = [66, 73]
    sessions = range(100)
    _, good_clusters = goodClusters.get_good_clusters(0)

    good_trials = try_cache(cache_key)

    if good_trials is None: good_trials = {}

    for animal in animals:
        if animal not in good_trials: good_trials[animal] = []
        for session in sessions:
            if session in good_trials[animal]: continue
            try:
                fn, trigger_tm = load_mux(animal, session)
            except:
                logging.info('Animal %i has no sessions greater than %i',
                             animal, session + 1)
                break

            try:
                vl = load_vl(animal, fn)
            except:
                logging.info('Animal %i session %i is not a task trial',
                             animal, session + 1)
                continue

            cls = {
                tetrode: load_cl(animal, fn, tetrode)
                for tetrode in range(1, 17)
            }

            try:
                count_cells(vl, cls, trigger_tm, good_clusters)
            except:
                # No cells found
                continue

            if session not in good_trials[animal]:
                good_trials[animal].append(session)
    store_in_cache(cache_key, good_trials)
Ejemplo n.º 18
0
def count():
    logging.basicConfig(level=logging.INFO)

    animals = [66, 70]
    sessions = range(1, 100)
    tetrodes = range(1, 17)

    good_trials = try_cache('Good trials')

    key = (animals, sessions, tetrodes, 'count clusters')
    cache = try_cache(key)
    cache = None
    if cache is not None:
        cls = cache
    else:
        cls = []
        for animal in animals:
            print 'Animal ', animal
            for session in good_trials[animal]:
                print 'Session', session
                fn, _ = load_mux(animal, session)
                vl = load_vl(animal, fn)
                if len(np.unique(vl['Task'])) != 2: continue

                cells = 0
                for tetrode in tetrodes:
                    cl = load_cl(animal, fn, tetrode)
                    cells += len(np.unique(cl['Label'])) - 1
                if cells == 0: continue
                cls.append((animal, session, cells, len(vl)))
        store_in_cache(key, cls)

    cls.sort(key=lambda x: x[2])
    txt = '%i    %i    %i    %i'
    print 'Animal    Session    Cells    Length'
    for animal, session, cells, length in cls:
        print txt % (animal, session, cells, length)

    import pdb
    pdb.set_trace()

    print 'Mean length:', np.mean([cl[3] for cl in cls])
Ejemplo n.º 19
0
def gpv_rates():
    logging.basicConfig(level=logging.INFO)
    animal = 66
    session = 60 
    room_shape = [[-55,55],[-55,55]]
    tetrodes = [1]
    cells = range(2,10)
    bin_size = 5
    K =  50# Segment length used to calculate firing rates
    
    #xbins = ybins = (room_shape[0][1]-room_shape[0][0])/bin_size
    good_clusters = {tetrode:cells for tetrode in tetrodes}
    #good_clusters = get_good_clusters(0)
    
    
    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal,fn)
    cls = {tetrode:load_cl(animal,fn,tetrode) for tetrode in tetrodes}
    
    label_l = vl['Task']
    t_cells = count_cells(vl,cls,trigger_tm, good_clusters)
    
    
    logging.info('About to generate population vector.')
    X, Y = gpv(vl, t_cells, label_l, K, bin_size, room_shape)
    logging.info('%i population vectors generated.',X.shape[0])
    Y = Y.reshape([-1])
            
    # GPV rates
    rates = fracs_from_pv(X,Y,bin_size,room_shape,smooth_flag=True)
    
    # Now get normally calculate rates
    real_rates = get_fracs(vl['xs'],vl['ys'],label_l,room_shape,bin_size, t_cells,smooth_flag=True)
    
    try:
        assert np.all(rates == real_rates)
    except:
        print 'DOESNT WORK!!'
    plot_rates(rates,Y,t_cells)
    plot_rates(real_rates,Y,t_cells)
    plt.show()
    
Ejemplo n.º 20
0
def count_vl():
    logging.basicConfig(level=logging.INFO)
    
    llist = []
    good_trials = try_cache('Good trials').items()
    for animal, sessions in good_trials:
        print 'Animal', animal
        for session in sessions:
            print 'Session', session
            fn, _ = load_mux(animal, session)
            vl = load_vl(animal,fn)
            llist.append(len(vl['xs']))
            
    print np.mean(llist)
    print len(llist)
    plt.hist(llist)
    plt.xlabel('Number of recorded points')
    plt.ylabel('Count')
    plt.title('Recorded Points per Session')
    plt.show()
    import pdb; pdb.set_trace()   
Ejemplo n.º 21
0
def view_PCA():
    animal = 66
    session = 60
    bin_size = 5

    K = 50  # Segment length used to calculate firing rates
    label = 'Task'
    room = [[-55, 55], [-55, 55]]
    _, good_clusters = get_good_clusters(0)
    xbins = (room[0][1] - [0][0]) / bin_size
    ybins = (room[1][1] - [1][0]) / bin_size

    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal, fn)
    cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in range(1, 17)}

    if label == 'Task':
        label_l = vl['Task']
    else:
        raise Exception('Not implemented yet.')

    t_cells = count_cells(vl, cls, trigger_tm, good_clusters)

    logging.info('About to generate population vector.')
    #X, Y = gpv(vl, t_cells, label_l, K)
    X, Y = gpv(vl, t_cells, label_l, K, bin_size, room)

    pcas = np.zeros([xbins, ybins])
    for xbin, ybin in product(xbins, ybins):
        pca = PCA()
        Xtmp = np.zeros([
            X.shape[0],
        ])
        X = pca.fit_transform(X[:, :len(t_cells)])
        pcas[xbin, ybin] = pca

        plt.plot(pca.explained_variance_ratio_)
        print pca.components_
        plt.show()
def mk_grph(animal,session):
    room_shape = [[-60,60],[-60,60]]
    cntrx = cntry = 0
    
    fn, _ = load_mux(animal, session)
    vl = load_vl(animal,fn)
    
    if len(np.unique(vl['Task'])) <= 1:
        raise Exception('VL Task not right.')
    xs = np.linspace(0,len(vl['Task'])/vl_sample_rate/60,len(vl['Task']))

    plt.figure()
    angls = count_angle(vl,room_shape)
    plt.plot(xs,angls,label='Angle')
    
    
    scale = (np.max(angls)-np.min(angls))/2.0
    plt.plot(xs,vl['Task']*scale+(np.max(angls)-scale),label='Task')
    
    # Overwrite sections with discrepencies
    
    
    orient = get_orientation(vl,cntrx,cntry)
    #plt.plot(orient*np.max(angls),label='Orientation')
    discrep = np.sum(orient != vl['Task'])
    radial = np.sum(orient == 0)
    txt = ('Discrepency data: %.3f of %i \n'+
           'Radial data: %.3f of %i')%(1.0*(discrep-radial)/(len(orient)-radial), 
                                       len(orient)-radial,
                                       1.0*radial/len(orient), len(orient))
    txt2 = 'Filename: %s'%(fn,)
    plt.autoscale(axis='x',tight=True)
    plt.text(0,plt.ylim()[0],txt)
    plt.text(plt.xlim()[1],plt.ylim()[0],txt2,horizontalalignment='right')
    plt.ylabel('Angle (radians)')
    plt.xlabel('Time (min)')
    plt.legend()
    plt.title('Animal:%i  Session:%i Filename:%s'%(animal,session, fn))
def run():
    logging.basicConfig(level=logging.INFO)
    cache_key = 'Good trials'
    animals = [66,73]
    sessions = range(100)
    _, good_clusters = goodClusters.get_good_clusters(0)
    
    good_trials = try_cache(cache_key)
    
    if good_trials is None: good_trials = {}
    
    for animal in animals:
        if animal not in good_trials: good_trials[animal] = []
        for session in sessions:
            if session in good_trials[animal]: continue
            try:
                fn, trigger_tm = load_mux(animal, session)
            except:
                logging.info('Animal %i has no sessions greater than %i',animal,session+1)
                break
            
            try:
                vl = load_vl(animal,fn)
            except:
                logging.info('Animal %i session %i is not a task trial',animal,session+1)
                continue
            
            cls = {tetrode:load_cl(animal,fn,tetrode) for tetrode in range(1,17)}
            
            try:
                count_cells(vl,cls,trigger_tm, good_clusters)
            except:
                # No cells found
                continue
            
            if session not in good_trials[animal]:
                good_trials[animal].append(session)
    store_in_cache(cache_key,good_trials)
Ejemplo n.º 24
0
def view_PCA():
    animal = 66
    session = 60
    bin_size = 5

    K = 50  # Segment length used to calculate firing rates
    label = "Task"
    room = [[-55, 55], [-55, 55]]
    _, good_clusters = get_good_clusters(0)
    xbins = (room[0][1] - [0][0]) / bin_size
    ybins = (room[1][1] - [1][0]) / bin_size

    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal, fn)
    cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in range(1, 17)}

    if label == "Task":
        label_l = vl["Task"]
    else:
        raise Exception("Not implemented yet.")

    t_cells = count_cells(vl, cls, trigger_tm, good_clusters)

    logging.info("About to generate population vector.")
    # X, Y = gpv(vl, t_cells, label_l, K)
    X, Y = gpv(vl, t_cells, label_l, K, bin_size, room)

    pcas = np.zeros([xbins, ybins])
    for xbin, ybin in product(xbins, ybins):
        pca = PCA()
        Xtmp = np.zeros([X.shape[0]])
        X = pca.fit_transform(X[:, : len(t_cells)])
        pcas[xbin, ybin] = pca

        plt.plot(pca.explained_variance_ratio_)
        print pca.components_
        plt.show()
Ejemplo n.º 25
0
def view_simulation():
    logging.basicConfig(level=logging.INFO)
    mpl.rcParams['font.size'] = 26
    lw = 3
    
    CLs = [CL6,CL7, CL2]
    Folds = 6
    
    # Good trials is a dictionary
    #  good_trials[animal] = [list of sessions that are task trials
    #                         and have at least one labeled cluster]
    #good_trials = try_cache('Good trials')
    #animal_sess_combs = [(animal,session) for animal in range(65,74) 
    #                     for session in good_trials[animal]]
    animal_sess_combs = [(66,60)]
    
    bin_sizes = [5]
    cl_profs = [0]
    label = 'Task'
    exceptions = []
    
    room = [[-55,55],[-55,55]]
    
    adat = try_cache('One big data structure for %i folds'%(Folds,))
    #adat = try_cache('One big data structure')
    if adat is None: raise Exception()

    print adat.keys()
    good_trials = try_cache('Good trials')

    for animal, session in animal_sess_combs:
        # Get time per point
        fn, _ = load_mux(animal, session)
        vl = load_vl(animal,fn)
        tms = vl['Time']*24*60*60
        tpp = np.mean(tms[1:]-tms[:-1])
        print tpp
        
        plt.figure()
        for CL, cluster_profile, bin_size in product(CLs, cl_profs, bin_sizes):
            if (CL,cluster_profile) in exceptions: continue
            if animal not in adat[CL.name] or session not in adat[CL.name][animal]:continue
            
            cl_prof_name, _ = get_good_clusters(cluster_profile)
            pts = []
            try:
                for K, correct_dp in adat[CL.name][animal][session][cl_prof_name][bin_size][label].items():
                    if len(correct_dp) == 0: continue
                    y = 100.0*np.sum(correct_dp>.5)/len(correct_dp)     
                    pts.append((K,y))
            except:
                logging.warning('Something fishy with %s',CL.name)
            if len(pts) == 0: continue
            
            pts.sort(key=lambda x: x[0])
            
            # Get the right color
            CL_i = CLs.index(CL)
            cp_i = cl_profs.index(cluster_profile)
            b_i = bin_sizes.index(bin_size)
            clr_i = CL_i*len(cl_profs)+cp_i
            
            clr_str = clrs[clr_i]+ln_typs[b_i]
            xs,ys = zip(*pts)
            plt.plot(np.array(xs)*tpp,ys,clr_str,label=CL.name,
                     linewidth=lw)

        plt.legend(fontsize='x-small',loc='lower right')
        plt.xlabel('Segment Length (s)')
        plt.ylabel('Percent Correct')
        #plt.title('Accuracy vs Segment Size, Animal %i Session %i'%(animal, session))
    plt.ylim([60,95])
    plt.title('%i-Fold Validation'%(Folds,))
    plt.show()
            
Ejemplo n.º 26
0
def checkGPV():
    logging.basicConfig(level=5)
    animal = 66
    session = 60
    room_shape = [[-55, 55], [-55, 55]]
    tetrodes = [1]
    cells = range(2, 10)
    bin_size = 5
    K = 1  # Segment length used to calculate firing rates

    maxs = 10000

    assert maxs % K == 0

    #xbins = ybins = (room_shape[0][1]-room_shape[0][0])/bin_size
    good_clusters = {tetrode: cells for tetrode in tetrodes}
    #good_clusters = get_good_clusters(0)

    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal, fn)
    cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in tetrodes}

    label_l = vl['Task']
    t_cells = count_cells(vl, cls, trigger_tm, good_clusters)
    ''''''
    # For debugging purposes, make sure it's not too big
    if len(label_l) > maxs:
        vl['xs'] = vl['xs'][:maxs * 10:10]
        vl['ys'] = vl['ys'][:maxs * 10:10]
        label_l = label_l[:maxs * 10:10]
        for key in t_cells:
            tmp = np.array(t_cells[key])
            tmp = tmp[tmp < maxs]
            t_cells[key] = tmp

    for gpv in [gpv1]:

        logging.info('About to generate population vector.')
        X, Y = gpv(vl, t_cells, label_l, K, bin_size, room_shape)
        logging.info('%i population vectors generated.', X.shape[0])
        Y = Y.reshape([-1])

        # Debug code
        # This is only true if no points are thrown away
        if X.shape[0] * K == maxs:
            tot_spks = np.sum(
                [len(np.unique(spki)) for spki in t_cells.values()])
            assert tot_spks == np.sum(X[:, :len(t_cells)]) * K

        # GPV rates
        rates = fracs_from_pv(X, Y, bin_size, room_shape, smooth_flag=False)

        # Now get normally calculate rates
        real_rates = get_fracs(vl['xs'],
                               vl['ys'],
                               label_l,
                               room_shape,
                               bin_size,
                               t_cells,
                               smooth_flag=False)

        try:
            assert np.all(rates == real_rates)
        except:
            print 'DOESNT WORK!!'
            plot_rates(rates, Y, t_cells)
            plot_rates(real_rates, Y, t_cells)
            plt.show()
Ejemplo n.º 27
0
    # sgn are the indices of task_i where
    #  task_i[j] != task_i[j+1]
    sgn = np.nonzero(task_ip1 != task_i)[0]
    
    sgn_i = np.concatenate([[sgn[0]],sgn])
    sgn_ip1 = np.concatenate([sgn,[len(task)]])
    
    run_len = (sgn_ip1-sgn_i)[1:]
    
    return sgn, run_len

if __name__ == '__main__':
    from matplotlib import pyplot as plt
    from Data.readData import load_mux, load_vl
    num = 66
    session = 60
    
    fn, _= load_mux(num,session)
    vl = load_vl(num,fn)
    task = get_orientation(vl,0,0)
    sgn, run_len = find_runs(task)
    
    n,bins, _ = plt.hist(run_len,bins=range(1,np.max(run_len)+1))
    plt.title('Run length')
    
    import pdb; pdb.set_trace()

    plt.figure()
    plt.hist(run_len,bins=range(1,np.max(run_len)+1),cumulative=-1)
    plt.title('Reverse Cumulative run length')
    plt.show()
Ejemplo n.º 28
0
def rate_graph():
    #mpl.rcParams['axes.titlesize'] = 18
    #mpl.rcParams['axes.labelsize'] = 18
    mpl.rcParams['font.size'] = 26

    animal = 66
    session = 60  # This is August 7, 2013 run
    room_shape = [[-55, 55], [-55, 55]]
    tetrodes = [1]
    cluster_profile = 0
    bin_size = 5

    _, good_clusters = get_good_clusters(cluster_profile)

    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal, fn)
    cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in tetrodes}

    t_cells = count_cells(vl, cls, trigger_tm, good_clusters)

    label_l = vl['Task']

    # rates[cell, lbl, xbin, ybin] = firing rate
    rates = get_fracs(vl['xs'], vl['ys'], label_l, room_shape, bin_size,
                      t_cells)

    for lbl in range(len(np.unique(label_l))):

        plt.figure(figsize=(10, 10))
        x = np.concatenate([
            np.arange(room_shape[0][0], room_shape[0][1], bin_size),
            [room_shape[0][1]]
        ])
        y = np.concatenate([
            np.arange(room_shape[1][0], room_shape[1][1], bin_size),
            [room_shape[1][1]]
        ])
        Xs, Ys = np.meshgrid(x, y)
        cntr = plt.pcolor(Ys, Xs, rates[3, lbl])

        t = plt.colorbar(cntr, extend='both')
        t.set_label('Frequency (Hz)')
        plt.xlabel('Position (in)')
        plt.ylabel('Position (in)')
        if lbl == 0: plt.title('Clockwise')
        else: plt.title('Counterclockwise')

        #plt.axis('equal')
        plt.xlim(room_shape[0])
        plt.ylim(room_shape[1])
        '''
        plt.figure()
        x = np.arange(room_shape[0][0],room_shape[0][1],bin_size)
        y = np.arange(room_shape[1][0],room_shape[1][1],bin_size)
        Xs, Ys = np.meshgrid(x, y)
        cntr = plt.contourf(Ys,Xs,rate_dict[contxt][2])
        t = plt.colorbar(cntr, extend='both')
        t.set_label('Frequency (Hz)')
        plt.xlabel('Position (in)')
        plt.ylabel('Position (in)')'''
    plt.show()
Ejemplo n.º 29
0
def run(Folds):
    # Toggle-able parameters
    #CLs = [CL2,CL6,CL5]
    #CLs = [CL6, CL7]
    CLs = [CL10]
    Ks = np.arange(10,200,20) # Segment length used to calculate firing rates
    

    # Sort of toggle-able parameters
    #animal_sess_combs = [(66,60),(70,8),(70,10),(66,61)]
    animal_sess_combs = [(66,60)]
    #good_trials = try_cache('Good trials')
    #animal_sess_combs = [(animal,session) for animal in range(65,74) 
    #                     for session in good_trials[animal]]
    bin_sizes = [5]
    label = 'Task'
    exceptions = []
    cl_profs = [0]
    
    # Not really toggle-able parameters
    room = [[-55,55],[-55,55]]
    
    
    
    cache = try_cache('One big data structure for %i folds'%(Folds,))
    adat = ({} if cache is None else cache)

    for animal, session in animal_sess_combs:
        fn, trigger_tm = load_mux(animal, session)
        vl = load_vl(animal,fn)
        cls = {tetrode:load_cl(animal,fn,tetrode) for tetrode in range(1,17)}
        
        if label == 'Task': label_l = vl['Task']
        else: raise Exception('Not implemented yet.')
        
        for clust_prof in cl_profs:
            cl_prof_name, good_clusters = get_good_clusters(clust_prof)
            t_cells = count_cells(vl,cls,trigger_tm,good_clusters)
            
            for bin_size, K in product(bin_sizes,Ks):
                cached = np.zeros(len(CLs))
                for CL in CLs:
                    i = CLs.index(CL)
                    try:
                        raise Exception
                        adat[CL.name][animal][session][cl_prof_name][bin_size][label][K]
                        cached[i] = True
                    except:
                        cached[i] = False
                
                if np.sum(cached) == len(CLs): 
                    print 'Everything already cached'
                    continue # Everything is already cached!
                
                
                logging.info('About to generate population vector.')
                X, Y = gpv(vl, t_cells, label_l, K, bin_size, room)
                
                
                # The main data stricture
                dps = {CL:[] for CL in CLs if CL not in cached}
                
                if Folds >0: kf = cross_validation.KFold(len(Y),n_folds=Folds,shuffle=True)
                else: kf = [(range(len(Y)),range(len(Y)))]
                for train_index, test_index in kf:
                    logging.warning('Training/testing: %i/%i',len(train_index),len(test_index))
                    for CL in CLs:
                        if cached[CLs.index(CL)]: continue
                        logging.warning('%s, %i seg, (%i, %i)',CL.name, K, animal, session)
                        if (CL,clust_prof) in exceptions: continue
                        CL.delt_t = K
                        correct_dp = check_classifier(train_index,test_index,X,Y,CL, room, bin_size)
        
                        dps[CL].extend(correct_dp.tolist())
                for CL in CLs:
                    if cached[CLs.index(CL)]: continue
                    to_add = np.array(dps[CL]).reshape([-1])
                    add(adat, CL.name, animal, session, cl_prof_name, bin_size, label, K, to_add)

    store_in_cache('One big data structure for %i folds'%(Folds,),adat)
Ejemplo n.º 30
0
def run(Folds):
    # Toggle-able parameters
    #CLs = [CL2,CL6,CL5]
    #CLs = [CL6, CL7]
    CLs = [CL10]
    Ks = np.arange(10, 200,
                   20)  # Segment length used to calculate firing rates

    # Sort of toggle-able parameters
    #animal_sess_combs = [(66,60),(70,8),(70,10),(66,61)]
    animal_sess_combs = [(66, 60)]
    #good_trials = try_cache('Good trials')
    #animal_sess_combs = [(animal,session) for animal in range(65,74)
    #                     for session in good_trials[animal]]
    bin_sizes = [5]
    label = 'Task'
    exceptions = []
    cl_profs = [0]

    # Not really toggle-able parameters
    room = [[-55, 55], [-55, 55]]

    cache = try_cache('One big data structure for %i folds' % (Folds, ))
    adat = ({} if cache is None else cache)

    for animal, session in animal_sess_combs:
        fn, trigger_tm = load_mux(animal, session)
        vl = load_vl(animal, fn)
        cls = {
            tetrode: load_cl(animal, fn, tetrode)
            for tetrode in range(1, 17)
        }

        if label == 'Task': label_l = vl['Task']
        else: raise Exception('Not implemented yet.')

        for clust_prof in cl_profs:
            cl_prof_name, good_clusters = get_good_clusters(clust_prof)
            t_cells = count_cells(vl, cls, trigger_tm, good_clusters)

            for bin_size, K in product(bin_sizes, Ks):
                cached = np.zeros(len(CLs))
                for CL in CLs:
                    i = CLs.index(CL)
                    try:
                        raise Exception
                        adat[CL.name][animal][session][cl_prof_name][bin_size][
                            label][K]
                        cached[i] = True
                    except:
                        cached[i] = False

                if np.sum(cached) == len(CLs):
                    print 'Everything already cached'
                    continue  # Everything is already cached!

                logging.info('About to generate population vector.')
                X, Y = gpv(vl, t_cells, label_l, K, bin_size, room)

                # The main data stricture
                dps = {CL: [] for CL in CLs if CL not in cached}

                if Folds > 0:
                    kf = cross_validation.KFold(len(Y),
                                                n_folds=Folds,
                                                shuffle=True)
                else:
                    kf = [(range(len(Y)), range(len(Y)))]
                for train_index, test_index in kf:
                    logging.warning('Training/testing: %i/%i',
                                    len(train_index), len(test_index))
                    for CL in CLs:
                        if cached[CLs.index(CL)]: continue
                        logging.warning('%s, %i seg, (%i, %i)', CL.name, K,
                                        animal, session)
                        if (CL, clust_prof) in exceptions: continue
                        CL.delt_t = K
                        correct_dp = check_classifier(train_index, test_index,
                                                      X, Y, CL, room, bin_size)

                        dps[CL].extend(correct_dp.tolist())
                for CL in CLs:
                    if cached[CLs.index(CL)]: continue
                    to_add = np.array(dps[CL]).reshape([-1])
                    add(adat, CL.name, animal, session, cl_prof_name, bin_size,
                        label, K, to_add)

    store_in_cache('One big data structure for %i folds' % (Folds, ), adat)
def generate_DP_accuracy_graph():
    
    animal = 66
    session = 60 # This is August 7, 2013 run
    
    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal,fn)
    cls = {tetrode:load_cl(animal,fn,tetrode) for tetrode in range(1,17)}
    
    
    room_shape = [[-60,60],[-60,60]]
    bin_size = 8
    # Actual Contexts
    
    
    '''
    cached = try_cache(Classifier,Classifier.name,vl,cls,trigger_tm,label_is,room_shape,bin_size)
    if cached is not None:
        classifier, Xs, Ys = cached
        logging.info('Got classifier and population vectors from Cache.cache.')
    else:
        classifier = Classifier(vl,cls,trigger_tm, label_is, room_shape, bin_size)
        Xs, Ys = classifier.generate_population_vectors()
        store_in_cache(Classifier,Classifier.name,vl,cls,trigger_tm,label_is,room_shape,bin_size,
                       [classifier,Xs,Ys])'''
    
    # Label based on task
    '''
    labels = np.unique(vl['Task'])
    label_is = {contxt: np.nonzero(vl['Task']==contxt)[0] for contxt in labels}
    label_l = vl['Task']'''
    
    ''''''
    label_l = get_orientation(vl,cntrx=0,cntry=0)
    labels = np.unique(label_l)
    label_is = {contxt: np.nonzero(label_l==contxt)[0] for contxt in labels}
    
    
    classifier = Classifier(vl,cls,trigger_tm, label_is, room_shape, bin_size)
    Xs, Ys = classifier.generate_population_vectors(label_l)
    
    correct_dp = []
    incorrect_dp = []
    
    for (xbin,ybin),vecs,lbls in zip(Xs.keys(),Xs.values(),Ys.values()):
        for vec,lbl in zip(vecs,lbls):
            if lbl == 0:
                crct, incrct = classifier.classifiy(xbin, ybin, vec)
            else:
                incrct, crct  = classifier.classifiy(xbin, ybin, vec)
            correct_dp.append(crct)
            incorrect_dp.append(incrct)
    
    # Process
    correct_dp = np.array(correct_dp)
    incorrect_dp = np.array(incorrect_dp)
    nonzero_is = (correct_dp > 0) | (incorrect_dp > 0)
    correct_dp = correct_dp[np.nonzero(nonzero_is)[0]]
    incorrect_dp = incorrect_dp[np.nonzero(nonzero_is)[0]]
    
    from matplotlib import pyplot as plt
    
    # 2d Histogram
    plt.figure()
    hist,xedges,yedges = np.histogram2d(correct_dp, incorrect_dp, 150)
    Xs, Ys = np.meshgrid(xedges, yedges)
    grph = plt.pcolor(Xs,Ys,hist)
    plt.xlim([0,xedges[-1]])
    plt.ylim([0,yedges[-1]])
    plt.colorbar(grph, extend='both')
    plt.title('Dot Product Classifier Accuracy')
    plt.xlabel('Population vector x Correct Template')
    plt.ylabel('Population vector x Incorrect Template')
    
    # Accuracy meter
    plt.figure()
    accuracy = correct_dp / np.sqrt(correct_dp**2+incorrect_dp**2)
    plt.hist(accuracy,normed=True)
    plt.xlabel('Accuracy')
    plt.title(classifier.name)

    msg = []
    for i in [1,50,75,90,95,99]:
        perc = 1.0*np.sum(accuracy > i/100.0)/len(accuracy)*100.0
        msg.append('>%i%%:  %.1f%%'%(i,perc))
    msg = '\n'.join(msg)
    plt.xlim([0,1])
    xcoord = plt.xlim()[0] + (plt.xlim()[1]-plt.xlim()[0])*.1
    ycoord = plt.ylim()[0] + (plt.ylim()[1]-plt.ylim()[0])*.5
    plt.text(xcoord,ycoord,msg)
    plt.show()
    
#from ContextPredictors.DotProduct import DotProduct

timeout_rate = 15
animation_step_size = 10

if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)
    
    room_shape = [[-55,55],[-55,55]]
    animal = 66
    session = 60
    tetrode=3
    
    
    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal,fn)
    cl = load_cl(animal,fn,tetrode)
    wv = load_wv(animal,fn,tetrode)
    wv_iters = match_cl_to_vl(cl['Time'],vl, trigger_tm)
    
    #WAVE_L = WR.read(vl[2],vl[3], is_clockwise(*vl[2:6]))
    #actual_prediction = np.array([is_clockwise(x,y,vx,vy) for 
    #                              x,y,vx,vy in zip(*vl.values()[2:6])])
    #logging.info('Starting to generate waveforms...')
    #global WAVE_L
    #WAVE_L = WR.read(vl[2],vl[3],actual_prediction)
    #WAVE_L = np.array([WR.read(x,y,actual) for actual, x,y in zip(actual_prediction, *vl[2:4])])
    #logging.info('Finished generating waveform.')
    
    '''
    HMM = PiecewiseHMM(vl[2],vl[3],WAVE_L,actual_prediction)
Ejemplo n.º 33
0
def find_ambiguous_data():
    # Final data structure will be a dictionary:
    #  amb[animal][session] = (#ambig, total)

    # First load cache

    cache = try_cache(cache_key)
    if cache is not None:
        amb = cache
    else:
        amb = {}

    # Fix center
    cntrx = cntry = 0

    # Animal range
    animals = range(65, 74)

    not_task_trials = []
    for animal in animals:

        # Add to dictionary
        if animal not in amb:
            amb[animal] = {}

        for session in range(1, 100):
            if animal in amb and session in amb[
                    animal]:  #and amb[animal][session]:
                logging.info('Found (Animal %i, Session %i) in cache', animal,
                             session)
                continue
            try:
                fn, _ = load_mux(animal, session)
            except:
                logging.info('Animal %i has no sessions greater than %i',
                             animal, session - 1)
                break
            try:
                vl = load_vl(animal, fn)
            except:
                traceback.print_exc()
                logging.info('No data found for (Animal %i, Session %i)',
                             animal, session)
                amb[animal][session] = None
                not_task_trials.append([animal, session])
                continue

            logging.info('Checking ambiguous data for (Animal %i, Session %i)',
                         animal, session)

            orientation = get_orientation(vl, cntrx, cntry)

            # Assume that orientation and task labels are matched correctly
            radial = np.sum(0 == orientation)
            discrepency = np.sum(vl['Task'] != orientation)
            tot = len(vl['xs'])

            amb[animal][session] = (radial, discrepency, tot)

    # Store to cache
    store_in_cache(cache_key, amb)

    return amb
Ejemplo n.º 34
0
    sgn = np.nonzero(task_ip1 != task_i)[0]

    sgn_i = np.concatenate([[sgn[0]], sgn])
    sgn_ip1 = np.concatenate([sgn, [len(task)]])

    run_len = (sgn_ip1 - sgn_i)[1:]

    return sgn, run_len


if __name__ == '__main__':
    from matplotlib import pyplot as plt
    from Data.readData import load_mux, load_vl
    num = 66
    session = 60

    fn, _ = load_mux(num, session)
    vl = load_vl(num, fn)
    task = get_orientation(vl, 0, 0)
    sgn, run_len = find_runs(task)

    n, bins, _ = plt.hist(run_len, bins=range(1, np.max(run_len) + 1))
    plt.title('Run length')

    import pdb
    pdb.set_trace()

    plt.figure()
    plt.hist(run_len, bins=range(1, np.max(run_len) + 1), cumulative=-1)
    plt.title('Reverse Cumulative run length')
    plt.show()
Ejemplo n.º 35
0
def checkGPV():
    logging.basicConfig(level=5)
    animal = 66
    session = 60 
    room_shape = [[-55,55],[-55,55]]
    tetrodes = [1]
    cells = range(2,10)
    bin_size = 5
    K =  1# Segment length used to calculate firing rates
    
    
    maxs = 10000
    
    assert maxs%K==0
    
    #xbins = ybins = (room_shape[0][1]-room_shape[0][0])/bin_size
    good_clusters = {tetrode:cells for tetrode in tetrodes}
    #good_clusters = get_good_clusters(0)
    
    
    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal,fn)
    cls = {tetrode:load_cl(animal,fn,tetrode) for tetrode in tetrodes}
    
    label_l = vl['Task']
    t_cells = count_cells(vl,cls,trigger_tm, good_clusters)
    
    ''''''
    # For debugging purposes, make sure it's not too big
    if len(label_l) > maxs:
        vl['xs'] = vl['xs'][:maxs*10:10]
        vl['ys'] = vl['ys'][:maxs*10:10]
        label_l = label_l[:maxs*10:10]
        for key in t_cells:
            tmp = np.array(t_cells[key])
            tmp = tmp[tmp<maxs]
            t_cells[key] = tmp
    
    for gpv in [gpv1]:
    
        logging.info('About to generate population vector.')
        X, Y = gpv(vl, t_cells, label_l, K, bin_size, room_shape)
        logging.info('%i population vectors generated.',X.shape[0])
        Y = Y.reshape([-1])
        
        # Debug code
        # This is only true if no points are thrown away
        if X.shape[0]*K == maxs:
            tot_spks = np.sum([len(np.unique(spki)) for spki in t_cells.values()])
            assert tot_spks == np.sum(X[:,:len(t_cells)])*K

        # GPV rates
        rates = fracs_from_pv(X,Y,bin_size,room_shape,smooth_flag=False)
        
        # Now get normally calculate rates
        real_rates = get_fracs(vl['xs'],vl['ys'],label_l,room_shape,bin_size, t_cells,smooth_flag=False)
        
        try:
            assert np.all(rates == real_rates)
        except:
            print 'DOESNT WORK!!'
            plot_rates(rates,Y,t_cells)
            plot_rates(real_rates,Y,t_cells)
            plt.show()
Ejemplo n.º 36
0
def generate_DP_accuracy_graph():

    animal = 66
    session = 60  # This is August 7, 2013 run

    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal, fn)
    cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in range(1, 17)}

    room_shape = [[-60, 60], [-60, 60]]
    bin_size = 8
    # Actual Contexts
    '''
    cached = try_cache(Classifier,Classifier.name,vl,cls,trigger_tm,label_is,room_shape,bin_size)
    if cached is not None:
        classifier, Xs, Ys = cached
        logging.info('Got classifier and population vectors from Cache.cache.')
    else:
        classifier = Classifier(vl,cls,trigger_tm, label_is, room_shape, bin_size)
        Xs, Ys = classifier.generate_population_vectors()
        store_in_cache(Classifier,Classifier.name,vl,cls,trigger_tm,label_is,room_shape,bin_size,
                       [classifier,Xs,Ys])'''

    # Label based on task
    '''
    labels = np.unique(vl['Task'])
    label_is = {contxt: np.nonzero(vl['Task']==contxt)[0] for contxt in labels}
    label_l = vl['Task']'''
    ''''''
    label_l = get_orientation(vl, cntrx=0, cntry=0)
    labels = np.unique(label_l)
    label_is = {contxt: np.nonzero(label_l == contxt)[0] for contxt in labels}

    classifier = Classifier(vl, cls, trigger_tm, label_is, room_shape,
                            bin_size)
    Xs, Ys = classifier.generate_population_vectors(label_l)

    correct_dp = []
    incorrect_dp = []

    for (xbin, ybin), vecs, lbls in zip(Xs.keys(), Xs.values(), Ys.values()):
        for vec, lbl in zip(vecs, lbls):
            if lbl == 0:
                crct, incrct = classifier.classifiy(xbin, ybin, vec)
            else:
                incrct, crct = classifier.classifiy(xbin, ybin, vec)
            correct_dp.append(crct)
            incorrect_dp.append(incrct)

    # Process
    correct_dp = np.array(correct_dp)
    incorrect_dp = np.array(incorrect_dp)
    nonzero_is = (correct_dp > 0) | (incorrect_dp > 0)
    correct_dp = correct_dp[np.nonzero(nonzero_is)[0]]
    incorrect_dp = incorrect_dp[np.nonzero(nonzero_is)[0]]

    from matplotlib import pyplot as plt

    # 2d Histogram
    plt.figure()
    hist, xedges, yedges = np.histogram2d(correct_dp, incorrect_dp, 150)
    Xs, Ys = np.meshgrid(xedges, yedges)
    grph = plt.pcolor(Xs, Ys, hist)
    plt.xlim([0, xedges[-1]])
    plt.ylim([0, yedges[-1]])
    plt.colorbar(grph, extend='both')
    plt.title('Dot Product Classifier Accuracy')
    plt.xlabel('Population vector x Correct Template')
    plt.ylabel('Population vector x Incorrect Template')

    # Accuracy meter
    plt.figure()
    accuracy = correct_dp / np.sqrt(correct_dp**2 + incorrect_dp**2)
    plt.hist(accuracy, normed=True)
    plt.xlabel('Accuracy')
    plt.title(classifier.name)

    msg = []
    for i in [1, 50, 75, 90, 95, 99]:
        perc = 1.0 * np.sum(accuracy > i / 100.0) / len(accuracy) * 100.0
        msg.append('>%i%%:  %.1f%%' % (i, perc))
    msg = '\n'.join(msg)
    plt.xlim([0, 1])
    xcoord = plt.xlim()[0] + (plt.xlim()[1] - plt.xlim()[0]) * .1
    ycoord = plt.ylim()[0] + (plt.ylim()[1] - plt.ylim()[0]) * .5
    plt.text(xcoord, ycoord, msg)
    plt.show()
Ejemplo n.º 37
0
            t_cells[(tetrode, cell)] = spk_i
    return t_cells


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)

    animal = 66
    session = 60

    room_shape = [[-60, 60], [-60, 60]]
    bin_size = 8
    K = 32  # Segment length used to calculate firing rates

    fn, trigger_tm = load_mux(animal, session)
    vl = load_vl(animal, fn)
    cls = {tetrode: load_cl(animal, fn, tetrode) for tetrode in range(1, 17)}
    ''''''
    # Label with task
    labels = np.unique(vl['Task'])
    label_is = {
        contxt: np.nonzero(vl['Task'] == contxt)[0]
        for contxt in labels
    }
    label_l = vl['Task']

    t_cells = count_cells(vl, cls, trigger_tm, good_clusters)

    X, Y = gpv(vl, t_cells, room_shape, bin_size, label_l, K=32)

    classifier = Classifier(X, Y)
def view_simulation():
    logging.basicConfig(level=logging.INFO)
    mpl.rcParams['font.size'] = 26
    lw = 3

    CLs = [CL6, CL7, CL2]
    Folds = 6

    # Good trials is a dictionary
    #  good_trials[animal] = [list of sessions that are task trials
    #                         and have at least one labeled cluster]
    #good_trials = try_cache('Good trials')
    #animal_sess_combs = [(animal,session) for animal in range(65,74)
    #                     for session in good_trials[animal]]
    animal_sess_combs = [(66, 60)]

    bin_sizes = [5]
    cl_profs = [0]
    label = 'Task'
    exceptions = []

    room = [[-55, 55], [-55, 55]]

    adat = try_cache('One big data structure for %i folds' % (Folds, ))
    #adat = try_cache('One big data structure')
    if adat is None: raise Exception()

    print adat.keys()
    good_trials = try_cache('Good trials')

    for animal, session in animal_sess_combs:
        # Get time per point
        fn, _ = load_mux(animal, session)
        vl = load_vl(animal, fn)
        tms = vl['Time'] * 24 * 60 * 60
        tpp = np.mean(tms[1:] - tms[:-1])
        print tpp

        plt.figure()
        for CL, cluster_profile, bin_size in product(CLs, cl_profs, bin_sizes):
            if (CL, cluster_profile) in exceptions: continue
            if animal not in adat[CL.name] or session not in adat[
                    CL.name][animal]:
                continue

            cl_prof_name, _ = get_good_clusters(cluster_profile)
            pts = []
            try:
                for K, correct_dp in adat[CL.name][animal][session][
                        cl_prof_name][bin_size][label].items():
                    if len(correct_dp) == 0: continue
                    y = 100.0 * np.sum(correct_dp > .5) / len(correct_dp)
                    pts.append((K, y))
            except:
                logging.warning('Something fishy with %s', CL.name)
            if len(pts) == 0: continue

            pts.sort(key=lambda x: x[0])

            # Get the right color
            CL_i = CLs.index(CL)
            cp_i = cl_profs.index(cluster_profile)
            b_i = bin_sizes.index(bin_size)
            clr_i = CL_i * len(cl_profs) + cp_i

            clr_str = clrs[clr_i] + ln_typs[b_i]
            xs, ys = zip(*pts)
            plt.plot(np.array(xs) * tpp,
                     ys,
                     clr_str,
                     label=CL.name,
                     linewidth=lw)

        plt.legend(fontsize='x-small', loc='lower right')
        plt.xlabel('Segment Length (s)')
        plt.ylabel('Percent Correct')
        #plt.title('Accuracy vs Segment Size, Animal %i Session %i'%(animal, session))
    plt.ylim([60, 95])
    plt.title('%i-Fold Validation' % (Folds, ))
    plt.show()