示例#1
0
    def information(self, test_var, normalize=False):
        """
        Computes the mutual information between the output of the task, and another variable
        """
        # given b is positive
        ab = util.decimal(self(test_var)).numpy()
        # pab = np.unique(ab, return_counts=True)[1]/np.min([self.num_cond,(2**self.num_var)])
        pab = np.unique(ab, return_counts=True)[1] / len(test_var)

        # given b is negative
        b_ = np.setdiff1d(range(self.num_cond), test_var)
        ab_ = util.decimal(self(b_)).numpy()
        # pab_ = np.unique(ab_, return_counts=True)[1]/np.min([self.num_cond,(2**self.num_var)])
        pab_ = np.unique(ab_, return_counts=True)[1] / len(test_var)

        # entropy of outputs (in case they are degenerate)
        a = util.decimal(self(np.arange(self.num_cond)))
        pa = np.unique(a, return_counts=True)[1] / self.num_cond
        Ha = -np.sum(pa * np.log2(pa))

        # I(a,b) = H(a) - H(a | b)
        MI = Ha + 0.5 * (np.sum(pab * np.log2(pab)) +
                         np.sum(pab_ * np.log2(pab_)))

        return MI
示例#2
0
 def __call__(self, labels, noise=None):
     these = np.array([np.isin(labels, p)
                       for p in self.positives]).astype(float)
     if self.use_mse:
         return assistants.Indicator(self.dim_output, self.dim_output)(
             util.decimal(these.T).astype(int)).float()
     else:
         return torch.tensor(util.decimal(these.T)).int()
示例#3
0
if which_data == 'assoc':

    p = 2**num_var
    allowed_actions = [0, 1, 2]
    # allowed_actions = [0,1,2,4]
    # allowed_actions = [0]
    p_action = [0.7, 0.15, 0.15]
    # p_action = [0.61, 0.13, 0.13, 0.13]
    # p_action = [1.0]

    output_states = (this_exp.train_data[0][:ndat, :].data + 1) / 2
    # output_states = this_exp.train_data[1][:ndat,:].data

    input_states = (this_exp.train_data[0][:ndat, :].data + 1) / 2

    abstract_conds = util.decimal(this_exp.train_data[1])[:ndat]
    cond_set = np.unique(abstract_conds)

    # draw the "actions" for each data point
    actns = torch.tensor(np.random.choice(allowed_actions, ndat,
                                          p=p_action)).int()
    actions = torch.stack([(actns & (2**i)) / 2**i
                           for i in range(num_var)]).float().T

    # act_rep = assistants.Indicator(p,p)(util.decimal(actions).int())
    act_rep = actions.data

    # inputs = np.concatenate([input_states,act_rep], axis=1)
    # # inputs = np.concatenate([input_states, this_exp.train_data[1]], axis=1)
    inputs = input_states.float()
示例#4
0
#%% Projecting onto specific dichotmy subspaces
these_dichotomies = (0,9)
this_network = -1

model = all_nets[0][this_network]
args = all_args[0][this_network]
vecs = coding_vectors[this_network]

this_exp.load_other_info(args)
this_exp.load_data(SAVE_DIR)

z = model(this_exp.train_data[0])[2].detach().numpy()

fake_task = util.RandomDichotomies(d=this_exp.task.positives)
colorby = util.decimal(fake_task(this_exp.train_conditions)[:5000])

# vals = util.cosine_sim()

orth_vecs = la.orth(vecs[these_dichotomies,...].squeeze().T) # basis for subspace
Proj = orth_vecs@orth_vecs.T

z_proj = [email protected]

U, S, _ = la.svd(z_proj-z_proj.mean(1)[:,None], full_matrices=False)

pcs = z_proj.T[:5000,:]@U[:3,:].T

proj_perf = spc.expit(model.dec(torch.tensor(z_proj).float().T).detach())[fake_task(this_exp.train_conditions)==1].mean()

# plt.figure()
示例#5
0
# apply_rotation = True

input_task = util.RandomDichotomies(d=[(0, 1, 2, 3), (0, 2, 4, 6), (0, 1, 4,
                                                                    5)])
# output_task = util.RandomDichotomies(d=[(0,1,6,7), (0,2,5,7)]) # xor of first two
output_task = util.RandomDichotomies(d=[(0, 3, 5, 6)])  # 3d xor
# output_task = util.RandomDichotomies(d=[(0,1,6,7), (0,4,5,6)]) # 3d xor
# output_task = util.RandomDichotomies(d=[(0,1,2,3),(0,2,4,6)])
# output_task = util.RandomDichotomies(d=[(0,1,4,5),(0,2,5,7),(0,1,6,7)]) # 3 incompatible dichotomies
# input_task = util.RandomDichotomies(d=[(0,1),(0,2)])
# output_task = util.RandomDichotomies(d=[(0,1)])

inp_condition = np.random.choice(2**num_var, num_data)
# var_bit = (np.random.rand(num_var, num_data)>0.5).astype(int)
var_bit = input_task(inp_condition).numpy().T
inp_subcondition = util.decimal(var_bit[:num_recoded, :].T).astype(int)

means = np.random.randn(num_var, dim_inp)
cat_means = np.random.randn(2**num_recoded, dim_inp * num_recoded)

mns = (means[:, None, :] * var_bit[:, :, None]) - (means[:, None, :] *
                                                   (1 - var_bit[:, :, None]))

clus_mns = np.reshape(mns.transpose((0, 2, 1)), (dim_inp * num_var, -1)).T
clus_mns[:, :num_recoded * dim_inp] = cat_means[inp_subcondition, :]

if apply_rotation:
    C = np.random.rand(num_var * dim_inp, num_var * dim_inp)
    clus_mns = clus_mns @ la.qr(C)[0][:num_var * dim_inp, :]

inputs = torch.tensor(clus_mns + np.random.randn(num_data, num_var * dim_inp) *
示例#6
0
p_action = [0.8,0.1,0.1]
# p_action = [1.0]

# output_states = this_exp.train_data[1].data
# output_states = util.decimal(this_exp.train_data[1])
output_states = this_exp.train_data[0].data
# output_states = ContinuousEmbedding(N_, 1.0)(this_exp.train_data[1])

# output_states = assistants.Indicator(p,p)(util.decimal(this_exp.train_data[1]).int())

input_states = this_exp.train_data[0].data
# input_states = 1*this_exp.train_data[1].data
# input_states = assistants.Indicator(p,p)(util.decimal(this_exp.train_data[1]).int())@W2.T+np.random.randn(56000,N_)*0.2
# input_states = this_exp.train_data[1]@W1.T + np.random.randn(56000,N_)*0.2

abstract_conds = util.decimal(this_exp.train_data[1])
cond_set = np.unique(abstract_conds)

# draw the "actions" for each data point
actns = torch.tensor(np.random.choice(allowed_actions, this_exp.train_data[0].shape[0], p=p_action)).int()
actions = torch.stack([(actns&(2**i))/2**i for i in range(num_var)]).float().T

# act_rep = assistants.Indicator(p,p)(util.decimal(actions).int())
act_rep = actions.data

# inputs = np.concatenate([input_states,act_rep], axis=1)
# # inputs = np.concatenate([input_states, this_exp.train_data[1]], axis=1)
inputs = input_states.float().detach().numpy()

# # sample the successor states, i.e. input + action
successors = np.mod(this_exp.train_data[1]+actions, 2)
示例#7
0
# fake_task.positives = [(0,1,2,6)]

idx = np.random.choice(inputs.shape[0], n_compute, replace=False)

z = model(inputs[idx,...].float()).detach().numpy()

# ans = this_exp.train_data[1][idx,...]
# ans = output_task(this_exp.train_conditions)[idx]

# cond = util.decimal(ans)
# cond = this_exp.train_conditions[idx]
cond = inp_condition[idx]

# colorby = cond
# colorby = this_exp.train_conditions[idx]
point_col = util.decimal(fake_task(cond))

centr_col = util.decimal(fake_task(np.unique(cond)))


mds = manifold.MDS(n_components=n_mds)

# emb = mds.fit_transform(la.norm(z.T[:,:,None] - z.T[:,None,:],axis=0))
# emb = mds.fit(z)
emb = mds.fit_transform(np.round(z,2))

if n_mds == 2:
    scat = plt.scatter(emb[:,0],emb[:,1], c=colorby)
    plt.xlabel('MDS1')
    plt.ylabel('MDS2')
elif n_mds == 3:
示例#8
0
#     print('Epoch %d: loss=%.3f'%(epoch, running_loss/(i+1)))

#%%
n_mds = 2
n_compute = 500

idx = np.random.choice(this_exp.train_data[0].shape[0],
                       n_compute,
                       replace=False)

z = (W @ (this_exp.train_data[0][idx, ...].detach().numpy().T) + b).T
# z = lin(this_exp.train_data[0])[idx,...].detach().numpy()
# z = targ[idx,...]
# ans = this_exp.train_conditions[idx,...]
ans = this_exp.train_data[1][idx, ...]
cond = util.decimal(ans)

mds = manifold.MDS(n_components=2)

emb = mds.fit_transform(z)

scat = plt.scatter(emb[:, 0], emb[:, 1], c=cond)
plt.xlabel('MDS1')
plt.ylabel('MDS2')
cb = plt.colorbar(scat,
                  ticks=np.unique(cond),
                  drawedges=True,
                  values=np.unique(cond))
cb.set_ticklabels(np.unique(cond) + 1)
cb.set_alpha(1)
cb.draw_all()
示例#9
0
                       ax=axs[0])
an2 = dicplt.LineAnime(weights_proj[:, :, 0].T,
                       weights_proj[:, :, 1].T,
                       colors=cm.bwr(grp / 1),
                       ax=axs[1])

dicplt.AnimeCollection(an1, an2).save(SAVE_DIR + '/vidya/tempmovie.mp4',
                                      fps=30)

#%%
this_grp = 0

ax = dicplt.scatter3d(8 * inp_coefs.T,
                      s=200,
                      marker='*',
                      c=cm.viridis(util.decimal(y_.T) / 7))
dicplt.LineAnime3D(
    weights_proj[:, grp == this_grp, 0].T,
    weights_proj[:, grp == this_grp, 1].T,
    weights_proj[:, grp == this_grp, 2].T,
    view_period=100,
    ax=ax,
    colors=cm.viridis(grp[grp == this_grp] / 7),
    rotation_period=500).save(SAVE_DIR + 'vidya/tempmovie_%s.mp4' %
                              grp_weights[:, this_grp])

#%%

p_abs = 0.0

inp_align = []
示例#10
0
dim_inp = 32  # dimension per variable
num_data = 5000  # total
noise = 0.05

switch_fraction = 0.1

input_task = util.StandardBinary(3)
# output_task = util.RandomDichotomies(d=[(0,1,6,7), (0,2,5,7)]) # xor of first two
output_task = util.RandomDichotomies(d=[(0, 3, 5, 6)])  # 3d xor

inp_condition = np.random.choice(2**3, num_data)
var_bit = input_task(inp_condition).numpy().T
action_outcome = var_bit[:2, :]
context = var_bit[2, :]

stimulus = util.decimal(action_outcome.T).astype(int)
stimulus[context == 1] = np.mod(stimulus[context == 1] + 1,
                                4)  # effect of context

means_pos = np.random.randn(2, dim_inp)
means_neg = np.random.randn(2, dim_inp)
stim_pattern = np.random.randn(4, dim_inp)

mns = (means_pos[:, None, :] *
       action_outcome[:, :, None]) + (means_neg[:, None, :] *
                                      (1 - action_outcome[:, :, None]))

ao_t = np.reshape(mns.transpose((0, 2, 1)), (dim_inp * 2, -1)).T
s_t = stim_pattern[stimulus, :]

prev_trial = np.arange(num_data)
#%%
num_cond = 2**10
num_var = 10

task = util.RandomDichotomies(num_cond,num_var,0)
# task = util.ParityMagnitude()t

# this_exp = exp.mnist_multiclass(task, SAVE_DIR, abstracts=abstract_variables)
this_exp = exp.random_patterns(task, SAVE_DIR, 
                               num_class=num_cond,
                               dim=100,
                               var_means=1)

#%% Rotation from square to tetrahedron
abstract_conds = util.decimal(this_exp.train_data[1])
dim = 50
noise = 0.2

ndat = 2000

# lin_clf = svm.LinearSVC
# lin_clf = linear_model.LogisticRegression
# lin_clf = linear_model.Perceptron
# lin_clf = linear_model.RidgeClassifier

emb = util.ContinuousEmbedding(dim, 0.0)
z = emb(this_exp.train_data[1])
_, _, V = la.svd(z-z.mean(0)[None,:],full_matrices=False)
# V = V[:3,:]
cond = this_exp.train_conditions