def test_ppf_vf(self):
        te = dbfn.TaskEntry(2425, dbname='testing')
        dec_record = te.decoder_record
        dec = dec_record.load()
        
        training_block = dbfn.TaskEntry(dec_record.entry_id, dbname='testing')
        files = training_block.datafiles
        
        from riglib.bmi import extractor
        extractor_cls = extractor.BinnedSpikeCountsExtractor
        extractor_kwargs = dict()
        extractor_kwargs['units'] = dec.units
        extractor_kwargs['n_subbins'] = dec.n_subbins 

        dec_new = train.train_PPFDecoder(files, extractor_cls, extractor_kwargs, train.get_plant_pos_vel, dec.ssm, dec.units, update_rate=dec.binlen, kin_source='task', pos_key='cursor', vel_key=None, tslice=dec.tslice)        
        for attr in dec.filt.model_attrs:
            old_attr = getattr(dec.filt, attr)
            new_attr = getattr(dec_new.filt, attr)
            self.assertTrue(np.all((old_attr - new_attr) < 1e-10))

        self.assertTrue(np.array_equal(dec_new.units, dec.units))
        self.assertTrue(np.array_equal(dec_new.bounding_box[0], dec.bounding_box[0]))
        self.assertTrue(np.array_equal(dec_new.bounding_box[1], dec.bounding_box[1]))
        self.assertTrue(dec_new.states == dec.states)
        self.assertTrue(dec_new.states_to_bound == dec.states_to_bound)
        self.assertTrue(np.array_equal(dec_new.drives_neurons, dec.drives_neurons))
def save_new_dec(task_entry_id, dec_obj, suffix):
    '''
    Summary: Method to save decoder to DB -- saves to TE that original decoder came from
    Input param: task_entry_id: original task to save decoder to
    Input param: dec_obj: KF decoder new
    Input param: suffix:
    Output param: 
    '''

    te = dbfn.TaskEntry(task_entry_id)
    try:
        te_id = te.te_id
    except:
        dec_nm = te.name
        te_ix = re.search('te[0-9]',dec_nm)
        ix = te_ix.start() + 2
        sub_dec_nm = dec_nm[ix:]
        
        te_ix_end = sub_dec_nm.find('_')
        if te_ix_end == -1:
            te_ix_end = len(sub_dec_nm)
        te_id = int(sub_dec_nm[:te_ix_end])

    old_dec_obj = te.decoder_record
    if old_dec_obj is None:
        old_dec_obj = faux_decoder_obj(task_entry_id)
    trainbmi.save_new_decoder_from_existing(dec_obj, old_dec_obj, suffix=suffix)
def _get_te(te, **kwargs):
    # dbname = kwargs.pop('dbname', 'default')
    te = dbfn.TaskEntry(te, **kwargs)
    try:
        return tasks[te.record.task.name](te.record.id, **kwargs)
    except:
        return te
def get_seed_block(te, max_depth=np.inf):
    '''
    Determine how much time was spent on CLDA for a given decoder on a particular day
    '''
    decoder_record = te.decoder_record
    from db.tracker import models
    depth = 0
    total_time = 0
    while depth < max_depth:
        rec = models.TaskEntry.objects.using(
            te.record._state.db).get(id=decoder_record.entry_id)
        t = datetime.datetime.strptime(
            rec.offline_report()['Runtime'],
            r'%H:%M:%S') - datetime.datetime.strptime('', '')
        total_time += t.seconds
        try:
            new_decoder_record = dbfn.TaskEntry(
                rec, dbname=te.record._state.db).decoder_record
            if new_decoder_record == decoder_record:
                print 'circle'
                break
            else:
                decoder_record = new_decoder_record

            if decoder_record.entry_id == None:
                break
        except:
            break

        depth += 1
    return rec
Example #5
0
def get(id, task_specific=True, **kwargs):
	if task_specific:
		import performance
		return performance._get_te(id, **kwargs)
	else:
		from db import dbfunctions
		return dbfunctions.TaskEntry(id)
Example #6
0
def get_decoder_corr(task_entry_id, decoder_entry_id, get_dec_used=True):
    '''
    Summary: get KF decoder either from entry that has trained the decoder (if this, need decoder_entry_id if > 1 decoder), 
        or decoder that was used during task_entry_id
    Input param: task_entry_id: dbname task entry ID
    Input param: decoder_entry_id: decoder entry id: (models.Decoder.objects.get(entry=entry))
    Output param: KF Decoder
    '''
    ld = True
    if get_dec_used is False:
        decoder_entries = dbfn.TaskEntry(
            task_entry_id).get_decoders_trained_in_block()
        if len(decoder_entries) > 0:
            print 'Loading decoder TRAINED from task %d' % task_entry_id
            if type(decoder_entries) is models.Decoder:
                decoder = decoder_entries
                ld = False
            else:  # list of decoders. Search for the right one.
                try:
                    dec_ids = [de.pk for de in decoder_entries]
                    _ix = np.nonzero(dec_ids == decoder_entry_id)[0]
                    decoder = decoder_entries[_ix]
                    ld = False
                except:
                    if decoder_entry_id is None:
                        print 'Too many decoder entries trained from this TE, specify decoder_entry_id'
                    else:
                        print 'Too many decoder entries trained from this TE, no match to decoder_entry_id %d' % decoder_entry_id
    if ld is False:
        kfdec = decoder.load()
    else:
        try:
            kfdec = dbfn.TaskEntry(task_entry_id).decoder
            print 'Loading decoder USED in task %s' % dbfn.TaskEntry(
                task_entry_id).task
        except:
            raise Exception('Cannot load decoder from TE%d' % task_entry_id)
    return kfdec
def get_decoder_bias(decoder_name):
    if isinstance(decoder_name, int):
        decoder_name = dbf.TaskEntry(decoder_name,
                                     db_name=dbf.db_name).decoder_record.name
    decoder = models.Decoder.objects.using(dbf.db_name).get(name=decoder_name)
    decoder = decoder.load(db_name=dbf.db_name)
    if 'sskf' in decoder_name:
        #if isinstance(decoder, sskfdecoder.SSKFDecoder): #'sskf' in decoder_name:
        unbiased_name = decoder_name[:-6]
        unbiased_decoder = models.Decoder.objects.get(
            name=unbiased_name).load()
        bias_vec = decoder.filt.F[3:6, -1] - unbiased_decoder.filt.F[3:6, -1]
    else:
        bias_vec = np.asarray(decoder.filt.A)[3:6, -1]
    gain = np.linalg.norm(bias_vec)
    angle = np.arctan2(bias_vec[-1], bias_vec[0])
    if angle < 0: angle += 2 * np.pi
    return dict(gain=gain, angle=angle * 180 / np.pi)
    def offline_report(self):
        Exp = self.task.get(self.feats.all())
        
        if len(self.report) == 0:
            return dict()
        else:
            report = json.loads(self.report)
            rpt = Exp.offline_report(report)

            ## If this is a BMI block, add the decoder name to the report (doesn't show up properly in drop-down menu for old blocks)
            try:
                from db import dbfunctions
                te = dbfunctions.TaskEntry(self.id, dbname=self._state.db)
                rpt['Decoder name'] = te.decoder_record.name + ' (trained in block %d)' % te.decoder_record.entry_id
            except AttributeError:
                pass
            except:
                import traceback
                traceback.print_exc()
            return rpt
Example #9
0
#!/usr/bin/python
'''
A set of tests to ensure that all the dbfunctions are still functional
'''
import os
import django

os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'db.settings')
django.setup()

from db.tracker import models
import db.dbfunctions as dbfn

id = 77
te = dbfn.TaskEntry(id)

print(dbfn.get_date(id))
print(dbfn.get_notes(id))
print(dbfn.get_subject(id))
print(dbfn.get_length(id))
print(dbfn.get_success_rate(id))

#print(dbfn.get_plx_file(id))
#print(dbfn.get_decoder_name(id))
#print(dbfn.get_decoder_name_full(id))
#print(dbfn.get_decoder(id))
#print(dbfn.get_params(id))
#print(dbfn.get_param(id, 'decoder'))

#id = 1956
#print(dbfn.get_bmiparams_file(id))
Example #10
0
import os
import django

os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'db.settings')
django.setup()

from db.tracker import models

fe = models.Feature.objects.all()
print(fe)

ID_NUMBER = 77
te = models.TaskEntry.objects.get(id=ID_NUMBER)
print(te.subject)

# try a different method
# need to take out the dot in the .tracker import
# this method gets depreciated
import db.dbfunctions as dbfn
te_1 = dbfn.TaskEntry(ID_NUMBER)
print(te_1.date)
'''

'''
#Open new file:
f = open(os.path.expandvars('$HOME/files_to_backup'), 'w')

#Append to previous file:
f_list = open(os.path.expandvars('$HOME/files_to_backup_te_list'), 'a')

for k, te in enumerate(backed_up_tes):

    #Check if already backed up:
    if te.id in already_te_added:
        pass
    else:
        #Array for datafile names:
        datafiles = []
        try:
            te = dbfn.TaskEntry(te)
            te_datafiles = te.datafiles
            for sysname in systems_to_back_up:
                if sysname in te_datafiles:
                    datafiles.append(te_datafiles[sysname])

            #Close HDF file to free memory:
            try:
                te.hdf.close()
            except:
                pass

            if k % 100 == 0:
                print(k)

            if not te.decoder_record is None:
 def training_tau(self):
     try:
         return dbfn.TaskEntry(self.decoder_record.entry).params['tau']
     except:
         return np.nan
Example #13
0
    def __init__(self,
                 n_iter=None,
                 entry_id=None,
                 hdf_filename=None,
                 decoder_filename=None,
                 params=dict(),
                 *args,
                 **kwargs):
        if entry_id is None and (hdf_filename is None
                                 or decoder_filename is None):
            raise ValueError(
                "Not enough data to reconstruct a BMI! Specify a database entry OR an HDF file + decoder file"
            )
        if entry_id is not None:
            from db import dbfunctions as dbfn
            te = dbfn.TaskEntry(entry_id)
            self.hdf_ref = te.hdf
            self.decoder = te.decoder
            self.params = te.params

            if self.hdf_ref is None:
                raise ValueError("Database is unable to locate HDF file!")
            if self.decoder is None:
                raise ValueError("Database is unable to locate HDF file!")
        elif hdf_filename is not None:
            self.hdf_ref = tables.open_file(hdf_filename)
            self.decoder = pickle.load(
                open(decoder_filename, 'rb'),
                encoding='latin1')  # extra args to get py3 to read py2 pickles
            self.params = params

        self.n_iter = min(n_iter, len(self.hdf_ref.root.task))

        try:
            self.starting_pos = self.hdf_ref.root.task[0]['decoder_state'][0:3,
                                                                           0]
        except:
            # The statement above appears to not always work...
            self.starting_pos = self.hdf_ref.root.task[0][
                'cursor']  # #(0, 0, 0)

        # if 'plant_type' in te.params:
        #     self.plant_type = te.params['plant_type']
        # elif 'arm_class' in te.params:
        #     plant_type = te.params['arm_class']
        #     if plant_type == 'CursorPlant':
        #         self.plant_type = 'cursor_14x14'
        #     else:
        #         self.plant_type = plant_type
        # else:
        #     self.plant_type = 'cursor_14x14'

        # TODO overly specific
        self.plant = plants.CursorPlant(endpt_bounds=(-14, 14, 0., 0., -14,
                                                      14))

        ## Set the target radius because the old assist method changes the assist speed
        # when the cursor is inside the target
        self.target_radius = params['target_radius']
        self.cursor_radius = params['cursor_radius']
        self.assist_level = params['assist_level']

        self.idx = 0
        gen = sim_target_seq_generator_multi(8, 1000)

        super(BMIReconstruction, self).__init__(gen, *args, **kwargs)

        self.hdf = SimHDF()
        self.learn_flag = True

        task_msgs = self.hdf_ref.root.task_msgs[:]
        self.update_bmi_msgs = task_msgs[task_msgs['msg'] == 'update_bmi']
        task_msgs = list(
            filter(lambda x: x['msg'] not in ['update_bmi'], task_msgs))
        # print task_msgs
        self.task_state = np.array([None] * n_iter)
        for msg, next_msg in zip(task_msgs[:-1], task_msgs[1:]):
            self.task_state[msg['time']:next_msg['time']] = msg['msg']

        self.update_bmi_inds = np.zeros(len(self.hdf_ref.root.task))
        self.update_bmi_inds[self.update_bmi_msgs['time']] = 1
        self.recon_update_bmi_inds = np.zeros(len(self.hdf_ref.root.task))

        self.target_hold_msgs = list(
            filter(lambda x: x['msg'] in ['target', 'hold'],
                   self.hdf_ref.root.task_msgs[:]))
    def generate_FA_matrices(self,
                             training_task_entry,
                             plot=False,
                             hdf=None,
                             dec=None,
                             bin_spk=None):

        import utils.fa_decomp as pa
        if bin_spk is None:
            if training_task_entry is not None:
                from db import dbfunctions as dbfn
                te = dbfn.TaskEntry(training_task_entry)
                hdf = te.hdf
                dec = te.decoder

            bin_spk, targ_pos, targ_ix, z, zz = self.extract_trials_all(
                hdf, dec)

        #Zscore is in time x neurons
        zscore_X, mu = self.zscore_spks(bin_spk)

        # #Find optimal number of factors:
        LL, psv = pa.find_k_FA(zscore_X, iters=3, max_k=10, plot=False)

        #Np.nanmean:
        nan_ix = np.isnan(LL)
        samp = np.sum(nan_ix == False, axis=0)
        ll = np.nansum(LL, axis=0)
        LL_new = np.divide(ll, samp)

        num_factors = 1 + (np.argmax(LL_new))
        print 'optimal LL factors: ', num_factors

        FA = skdecomp.FactorAnalysis(n_components=num_factors)

        #Samples x features:
        FA.fit(zscore_X)

        #FA matrices:
        U = np.mat(FA.components_).T
        i = np.diag_indices(U.shape[0])
        Psi = np.mat(np.zeros((U.shape[0], U.shape[0])))
        Psi[i] = FA.noise_variance_
        A = U * U.T
        B = np.linalg.inv(U * U.T + Psi)
        mu_vect = np.array([mu[0, :]]).T  #Size = N x 1
        sharL = A * B

        #Calculate shared / priv scaling:
        bin_spk_tran = bin_spk.T
        mu_mat = np.tile(np.array([mu[0, :]]).T, (1, bin_spk_tran.shape[1]))
        demn = bin_spk_tran - mu_mat
        shared_bin_spk = (sharL * demn)
        priv_bin_spk = bin_spk_tran - mu_mat - shared_bin_spk

        #Scaling:
        eps = 1e-15
        x_var = np.var(np.mat(bin_spk_tran), axis=1) + eps
        pr_var = np.var(priv_bin_spk, axis=1) + eps
        sh_var = np.var(shared_bin_spk, axis=1) + eps

        priv_scalar = np.sqrt(np.divide(x_var, pr_var))
        shared_scalar = np.sqrt(np.divide(x_var, sh_var))

        if plot:
            tmp = np.diag(U.T * U)
            plt.plot(np.arange(1, num_factors + 1),
                     np.cumsum(tmp) / np.sum(tmp), '.-')
            plt.plot([0, num_factors + 1], [.9, .9], '-')

        #Get main shared space:
        u, s, v = np.linalg.svd(A)
        s_red = np.zeros_like(s)
        s_hd = np.zeros_like(s)

        ix = np.nonzero(np.cumsum(s**2) / float(np.sum(s**2)) > .90)[0]
        if len(ix) > 0:
            n_dim_main_shared = ix[0] + 1
        else:
            n_dim_main_shared = len(s)
        if n_dim_main_shared < 2:
            n_dim_main_shared = 2
        print "main shared: n_dim: ", n_dim_main_shared, np.cumsum(s) / float(
            np.sum(s))
        s_red[:n_dim_main_shared] = s[:n_dim_main_shared]
        s_hd[n_dim_main_shared:] = s[n_dim_main_shared:]

        main_shared_A = u * np.diag(s_red) * v
        hd_shared_A = u * np.diag(s_hd) * v
        main_shared_B = np.linalg.inv(main_shared_A + hd_shared_A + Psi)

        uut_psi_inv = main_shared_B.copy()
        u_svd = u[:, :n_dim_main_shared]

        main_sharL = main_shared_A * main_shared_B

        main_shar = main_sharL * demn
        main_shar_var = np.var(main_shar, axis=1) + eps
        main_shar_scal = np.sqrt(np.divide(x_var, main_shar_var))

        main_priv = demn - main_shar
        main_priv_var = np.var(main_priv, axis=1) + eps
        main_priv_scal = np.sqrt(np.divide(x_var, main_priv_var))

        # #Get PCA decomposition:
        #LL, ax = pa.FA_all_targ_ALLms(hdf, iters=2, max_k=20, PCA_instead=True)
        #num_PCs = 1+(np.argmax(np.mean(LL, axis=0)))

        # Main PCA space:
        # Get cov matrix:
        cov_pca = np.cov(zscore_X.T)
        eig_val, eig_vec = np.linalg.eig(cov_pca)

        tot_var = sum(eig_val)
        cum_var_exp = np.cumsum(
            [i / tot_var for i in sorted(eig_val, reverse=True)])
        n_PCs = np.nonzero(cum_var_exp > 0.9)[0][0] + 1

        proj_mat = eig_vec[:, :n_PCs]
        proj_trans = np.mat(proj_mat) * np.mat(proj_mat.T)

        #PC matrices:
        return dict(fa_sharL=sharL,
                    fa_mu=mu_vect,
                    fa_shar_var_sc=shared_scalar,
                    fa_priv_var_sc=priv_scalar,
                    U=U,
                    Psi=Psi,
                    training_task_entry=training_task_entry,
                    FA_iterated_power=FA.iterated_power,
                    FA_score=FA.score(zscore_X),
                    FA_LL=np.array(FA.loglike_),
                    fa_main_shared=main_sharL,
                    fa_main_shared_sc=main_shar_scal,
                    fa_main_private_sc=main_priv_scal,
                    fa_main_shar_n_dim=n_dim_main_shared,
                    sing_vals=s,
                    own_pc_trans=proj_trans,
                    FA_model=FA,
                    uut_psi_inv=uut_psi_inv,
                    u_svd=u_svd)
def zscore_units(task_entry_id, calc_zscore_from_te, pos_key = 'cursor', decoder_entry_id=None, 
    training_method=train.train_KFDecoder, retrain_flag = False, **kwargs):
    '''
    Summary: Method to be able to 'convert' a trained decoder (that uses zscoring) to one that uses z-scored from another session
         (e.g. you train a decoder from VFB, but you want to zscore unit according to a passive / still session earlier). You 
         would use the task_entry_id that was used to train the decoder OR entry that used the decoder. Then 'calc_zscore_from_te'
         is the task entry ID used to compute the z-scored units. You can either retrain the decoder iwth the new z-scored units, or not

    Input param: task_entry_id:
    Input param: decoder_entry_id:
    Input param: calc_zscore_from_te:
    Output param: 
    '''
    if 'decoder_path' in kwargs:
        decoder = pickle.load(open(kwargs['decoder_path']))
    else:
        decoder = get_decoder_corr(task_entry_id, decoder_entry_id)

    assert (hasattr(decoder, 'zscore') and decoder.zscore is True)," Cannot update mFR /sdFR of decoder that was not trained as zscored decoder. Retrain!"

    # Init mFR / sdFR
    if 'hdf_path' in kwargs:
        hdf = tables.openFile(kwargs['hdf_path'])
    else:
        hdf = dbfn.TaskEntry(calc_zscore_from_te).hdf
    
    # Get HDF update rate from hdf file. 
    try:
        hdf_update_rate = np.round(np.mean(hdf.root.task[:]['loop_time'])*1000.)/1000.
    except:
        from config import config
        if config.recording_system == 'blackrock':
            hdf_update_rate = .05;
        elif config.recording_system == 'plexon':
            hdf_update_rate = 1/60.

    spk_counts = hdf.root.task[:]['spike_counts'][:, :, 0]
    
    # Make sure not repeated entries:
    sum_spk_counts = np.sum(spk_counts, axis=1)
    ix = np.nonzero(sum_spk_counts)[0][0]
    sample = 1+ sum_spk_counts[ix:ix+6] - sum_spk_counts[ix]
    assert np.sum(sample) != 6

    decoder_update_rate = decoder.binlen
    bin_spks, _ = bin_(None, spk_counts.T, hdf_update_rate, decoder_update_rate, only_neural=True)
    mFR = np.squeeze(np.mean(bin_spks, axis=1))
    sdFR = np.std(bin_spks, axis=1)
    kwargs2 = dict(mFR=mFR, sdFR=sdFR)

    #Retrain decoder w/ new zscoring:
    if 'te_id' in kwargs:
        training_id = kwargs['te_id']
    else:
        training_id = decoder.te_id
    

    if retrain_flag:
        raise NotImplementedError("Need to test retraining with real data")
        saved_files = models.DataFile.objects.filter(entry_id=training_id)
        files = {}
        for fl in saved_files:
            files[fl.system.name] = fl.get_path()
        import bmilist
        decoder = training_method(files, decoder.extractor_cls, decoder.extractor_kwargs, bmilist.kin_extractors[''], decoder.ssm, 
            decoder.units, update_rate=decoder.binlen, tslice=decoder.tslice, pos_key=pos_key, zscore=True, **kwargs2)
        suffx = '_zscore_set_from_'+str(calc_zscore_from_te)+'_retrained'
    else:
        decoder.mFR = 0.
        decoder.sdFR = 1.
        decoder.init_zscore(mFR, sdFR)
        decoder.mFR = mFR
        decoder.sdFR = sdFR

        suffx = '_zscore_set_from_'+str(calc_zscore_from_te)

    if task_entry_id is not None:
        save_new_dec(task_entry_id, decoder, suffx)
    else:
        return decoder, suffx