# In[ ]:

from afs_plotting import plot_prefered_directions

figure_K_mat, axes_K_mat = plt.subplots(
    1,
    2,
    figsize=(GLOBAL_FIGURE_VERTICAL_SIZE * 2, GLOBAL_FIGURE_VERTICAL_SIZE))

axes_K_mat[0].imshow(kf_K_last)
axes_K_mat[0].set_ylabel('States')
axes_K_mat[0].set_xlabel('Neurons')
axes_K_mat[0].set_title('KF matrix')

plot_prefered_directions(kf_K_last.T, ax=axes_K_mat[1])
axes_K_mat[1].set_title('Preferred directions')

# ## Batch analyses

# In[ ]:

batch_of_interest = 0
spike_count_one_batch_in_neuron_by_frame = spike_counts_batch[
    batch_of_interest, :, :]

print(f'we just count one batch')
spike_count_sum_in_neuron = np.sum(spike_count_one_batch_in_neuron_by_frame,
                                   axis=1)

figure_neuron_count_sum, axes_neuron_count_sum = plt.subplots()

# # Pre-experiment check: check the Kalman filter before training

# In[19]:


# we plot the encoder directions
from afs_plotting import plot_prefered_directions

figure_encoder_direction, axes_encoder = plt.subplots()

encoder_mat_C = exps_np[0].encoder.C


plot_prefered_directions(encoder_mat_C, ax = axes_encoder)
axes_encoder.set_title('Distributions of encoder preferred directions')
axes_encoder.set_xlabel('C matrix weights \n a lot of vectors are clustered around ')


# In[20]:




print('we replace the encoder using the weights')
print('assume, they are all randomly initialized get the first decoder')

first_decoder = exps_np[0].decoder
target_C = first_decoder.filt.C
target_Q = np.copy(first_decoder.filt.Q)
    e.record_feature_active_set(e.decoder)

print('we check the new decoder C matrix:')

figure_decoder_C, axs_decoder_C = plt.subplots(
    nrows=2,
    ncols=NUM_EXP,
    figsize=[
        GLOBAL_FIGURE_VERTICAL_SIZE * NUM_EXP, GLOBAL_FIGURE_VERTICAL_SIZE * 2
    ],
    squeeze=False)
figure_decoder_C.suptitle('KF C Matrix Before Training ')

for i, e in enumerate(exps_np):
    C = e.decoder.filt.C
    plot_prefered_directions(C, ax=axs_decoder_C[0, i])
    #axs_decoder_C[0,i].set_title(exp_conds[i])

#

# In[41]:

from graphviz import Digraph

sim_diagram = Digraph()

#TODO make this into a function
sim_diagram.node('encoder', f'{exps_np[0].encoder}')
sim_diagram.node('decoder', f'{exps_np[0].decoder}')
sim_diagram.node(
    'learner',
예제 #4
0
figure_compare_kf, axes_compare_kf = plt.subplots(1, 3, figsize=(12, 4))

# In[ ]:

time_of_interest = 0
kf_slice_start = np.squeeze(kf_C[time_of_interest, :, :])
print(f'kf_slice has kf_slice has shape {kf_slice_start.shape}')

kf_slice_end = np.squeeze(kf_C[-1, :, :])

#plot the trajectjory
axes_compare_kf[0].plot(history_of_L2_norms)

#plot the begining
plot_prefered_directions(kf_slice_start, ax=axes_compare_kf[1])
axes_compare_kf[0].set_title('Prefered direction vector L2 norm')
axes_compare_kf[0].set_xlabel('update batch number')

#plot the end
plot_prefered_directions(kf_slice_end, ax=axes_compare_kf[2])
axes_compare_kf[2].set_title('Preffered directions at conclusion')

figure_compare_kf

# # Measure ssms

# In[ ]:

spike_counts_batch = clda_params_dict['spike_counts_batch']
intended_kin = clda_params_dict['intended_kin']
# In[ ]:

from afs_plotting import plot_prefered_directions

TEXT_OFFSET_VERTICAL = -0.2

figure_decoder_C.suptitle('KF C matrix before and after CLDA')

print('steady state tuning curves:')

for i, e in enumerate(exps):

    e = exps[i]
    C = e.decoder.filt.C

    plot_prefered_directions(C, ax=axs_decoder_C[1, i])
    axs_decoder_C[1, i].set_title(f'{exp_conds[i]}')

figure_decoder_C

# ## Decoder compared to the encoder
#
#

# In[ ]:

len(exps)

# In[ ]:

import convergence_analysis