def load_decoder(self):
        # decoder_fname = '/storage/decoders/grom20150102_14_BPPF01021655.pkl'
        # decoder = pickle.load(open(decoder_fname))

        # beta = decoder.filt.C
        # self.init_beta = beta.copy()

        # inds = np.arange(beta.shape[0])
        # # np.random.shuffle(inds)
        # beta_dec = beta[inds, :]

        # dt = decoder.filt.dt 
        # self._init_neural_encoder()

        self.decoder = train._train_PPFDecoder_sim_known_beta(self.beta_full, self.encoder.units, dt=1./180)
X = data['hand_vel'].T

beta = data['beta']
beta = np.vstack([beta[1:, :], beta[0,:]]).T
n_neurons = beta.shape[0]
dt = 0.005 #truedata['T_loop'][0,0]

encoder = sim_neurons.load_ppf_encoder_2D_vel_tuning(fname, dt=dt)
states = ['hand_px', 'hand_py', 'hand_pz', 'hand_vx', 'hand_vy', 'hand_vz', 'offset']
decoding_states = ['hand_vx', 'hand_vz', 'offset'] 

# initialze estimate of beta
beta_est = beta.copy()
beta_est[:,0:2] = 0
beta_est = train.inflate(beta_est, decoding_states, states, axis=1)
decoder_sb = train._train_PPFDecoder_sim_known_beta(beta_est, encoder.units, dt=dt, dist_units='m')
decoder_rml = train._train_PPFDecoder_sim_known_beta(beta_est, encoder.units, dt=dt, dist_units='m')

# Initialize learner and updater
batch_time = 60.
batch_size = batch_time/dt
half_life = 120.
rho = np.exp(np.log(0.5) / (half_life/batch_time))

learner = clda.BatchLearner(batch_size)
updater_sb = clda.PPFSmoothbatchSingleThread()
updater_sb.rho = rho


updater_cont = clda.PPFContinuousBayesianUpdater(decoder_rml)
updater_cont.rho = -1
Exemple #3
0
beta = np.vstack([beta[1:, :], beta[0, :]]).T
n_neurons = beta.shape[0]
dt = 0.005  #truedata['T_loop'][0,0]

encoder = sim_neurons.load_ppf_encoder_2D_vel_tuning(fname, dt=dt)
states = [
    'hand_px', 'hand_py', 'hand_pz', 'hand_vx', 'hand_vy', 'hand_vz', 'offset'
]
decoding_states = ['hand_vx', 'hand_vz', 'offset']

# initialze estimate of beta
beta_est = beta.copy()
beta_est[:, 0:2] = 0
beta_est = train.inflate(beta_est, decoding_states, states, axis=1)
decoder_sb = train._train_PPFDecoder_sim_known_beta(beta_est,
                                                    encoder.units,
                                                    dt=dt,
                                                    dist_units='m')
decoder_rml = train._train_PPFDecoder_sim_known_beta(beta_est,
                                                     encoder.units,
                                                     dt=dt,
                                                     dist_units='m')

# Initialize learner and updater
batch_time = 60.
batch_size = batch_time / dt
half_life = 120.
rho = np.exp(np.log(0.5) / (half_life / batch_time))

learner = clda.BatchLearner(batch_size)
updater_sb = clda.PPFSmoothbatchSingleThread()
updater_sb.rho = rho
Exemple #4
0
spike_counts = data['spike_counts'].astype(np.float64)
intended_kin = data['intended_kin']
beta_hat = data['beta_hat']
aimPos = data['aimPos']
n_iter = data['n_iter'][0, 0]
stimulant_index = data['stimulant_index']
param_noise_variances = data['param_noise_variances'].ravel()
stoch_beta_index = data['stoch_beta_index']
det_beta_index = data['det_beta_index']

## Create the object representing the initial decoder
init_beta = beta_hat[:, :, 0]
init_beta = np.vstack([init_beta[1:, :], init_beta[0, :]]).T
decoder = train._train_PPFDecoder_sim_known_beta(init_beta,
                                                 units=[],
                                                 dist_units='cm')

updater = clda.PPFContinuousBayesianUpdater(decoder, units='cm')
m_to_cm = 100.
cm_to_m = 0.01

dt = 0.005
beta_hat_recon_error = np.nan * np.ones(beta_hat.shape)
inds = []
n_iter = 20000
for idx in range(1, n_iter):
    if idx % 1000 == 0:
        try:
            print idx, np.max(np.abs(beta_hat_recon_error[:, :, inds]))
        except:
dt = 0.005
encoder = sim_neurons.PointProcessEnsemble(beta, dt)

# simulate spike counts
T = 10000 #hand_vel.shape[1]
spike_counts = np.zeros([n_cells, T])
for t in range(T):
    spike_counts[:,t] = encoder(hand_vel[:,t])


savemat('sample_spikes_and_kinematics.mat', dict(spike_counts=spike_counts))

# run the decoder
beta_full = np.zeros([n_cells, 7])
beta_full[:,[3,5,6]] = beta
dec = train._train_PPFDecoder_sim_known_beta(beta_full, encoder.units, dt=dt, dist_units='m')

dec_output = np.zeros([7, T])
for t in range(T):
    dec_output[:,t] = dec.predict(spike_counts[:,t])

plt.figure()
axes = plotutil.subplots2(2, 1)
axes[0,0].plot(dec_output[3,:])
axes[0,0].plot(hand_vel[0,:])
axes[1,0].plot(dec_output[5,:])
axes[1,0].plot(hand_vel[1,:])
plotutil.set_axlim(axes, [0, T], axis='x')
plt.show()
plt.draw()
spike_counts = data['spike_counts'].astype(np.float64)
intended_kin = data['intended_kin']
beta_hat = data['beta_hat']
aimPos = data['aimPos']
n_iter = data['n_iter'][0,0]
stimulant_index = data['stimulant_index']
param_noise_variances = data['param_noise_variances'].ravel()
stoch_beta_index = data['stoch_beta_index']
det_beta_index = data['det_beta_index']



## Create the object representing the initial decoder
init_beta = beta_hat[:,:,0]
init_beta = np.vstack([init_beta[1:,:], init_beta[0,:]]).T
decoder = train._train_PPFDecoder_sim_known_beta(init_beta, units=[], dist_units='cm')

updater = clda.PPFContinuousBayesianUpdater(decoder, units='cm')
m_to_cm = 100.
cm_to_m = 0.01

dt = 0.005
beta_hat_recon_error = np.nan * np.ones(beta_hat.shape)
inds = []
n_iter = 20000
for idx in range(1, n_iter):
    if idx % 1000 == 0: 
        try:
            print idx, np.max(np.abs(beta_hat_recon_error[:,:,inds]))
        except:
            pass