def mutual_information(net_file, net_key, readout_window, nbins=350): (samps, all_stim_classes, Ninternal) = get_samples(net_file, net_key, readout_window) nsamps = len(samps) all_samps = np.array([x[0] for x in samps]) class_indx = np.array([x[1] for x in samps]) Hc = conditional_entropy(all_samps, class_indx, nbins=nbins) H = entropy(all_samps) return (H - Hc, H)
def plot_readout_data(net_file, net_key, readout_window): clrs = ['r', 'g', 'b', 'k', 'c'] (samps, all_stim_classes, Ninternal) = get_samples(net_file, net_key, readout_window) states = np.array([x[0] for x in samps]) stim_classes = np.array([x[1] for x in samps]) fig = plt.figure() ax = fig.add_subplot(1, 1, 1, projection='3d') for k,sc in enumerate(all_stim_classes): sc_states = states[stim_classes == sc, :] ax.scatter(sc_states[:, 0], sc_states[:, 1], sc_states[:, 2], c=clrs[k]) ax.set_title('Post-Stimulus Network State') plt.axis('tight') plt.show()
def entropy_ratio(net_file, net_key, readout_window, nbins=27): (samps, all_stim_classes, Ninternal) = get_samples(net_file, net_key, readout_window) nsamps = len(samps) all_samps = np.zeros([nsamps, Ninternal+1]) for k,(state,sc) in enumerate(samps): all_samps[k, 0:Ninternal] = state all_samps[k, -1] = sc #compute unconditional entropy uc_entropy = entropy(all_samps[:, 0:Ninternal], nbins=nbins) #compute entropy conditioned on each class sc_entropy = {} for sc in all_stim_classes: indx = all_samps[:, -1] == sc sc_entropy[sc] = entropy(all_samps[indx, 0:Ninternal], nbins=nbins) sc_entropies = np.array(sc_entropy.values()) sc_entropy_all = sc_entropies.sum() / len(all_stim_classes) entropy_ratio = sc_entropy_all / uc_entropy return (entropy_ratio, uc_entropy, sc_entropy)