Ejemplo n.º 1
0
    def __init__(self):
        super(ParityMagnitudeFourunit, self).__init__()
        self.num_var = 4
        self.dim_output = 4

        self.obs_distribution = students.Bernoulli(4)
        self.link = None
Ejemplo n.º 2
0
    def __init__(self):
        super(ParityMagnitude, self).__init__()
        self.num_var = 2
        self.dim_output = 2

        self.obs_distribution = students.Bernoulli(2)
        self.link = None

        self.positives = [np.array([0, 2, 4, 6]), np.array([0, 1, 2, 3])]
Ejemplo n.º 3
0
    def __init__(self, c=None, n=None, overlap=0, d=None, use_mse=False):
        """overlap is given as the log2 of the dot product on their +/-1 representation"""
        super(RandomDichotomies, self).__init__()

        if d is None:
            if c is None:
                raise ValueError('Must supply either (c,n), or d')
            if n > c:
                raise ValueError(
                    'Cannot have more dichotomies than conditions!!')

            if overlap == 0:
                # generate uncorrelated dichotomies, only works for powers of 2
                H = la.hadamard(c)[:, 1:]
                pos = np.nonzero(
                    H[:, np.random.choice(c - 1, n, replace=False)] > 0)
                self.positives = [pos[0][pos[1] == d] for d in range(n)]
            elif overlap == 1:
                prot = 2 * (np.random.permutation(c) >= (c / 2)) - 1
                pos = np.where(prot > 0)[0]
                neg = np.where(prot < 0)[0]
                idx = np.random.choice((c // 2)**2, n - 1, replace=False)
                # print(idx)
                swtch = np.stack((pos[idx % (c // 2)], neg[idx // (c // 2)])).T
                # print(swtch)
                ps = np.ones((n - 1, 1)) * prot
                ps[np.arange(n - 1), swtch[:, 0]] *= -1
                ps[np.arange(n - 1), swtch[:, 1]] *= -1
                pos = [np.nonzero(p > 0)[0] for p in ps]
                pos.append(np.nonzero(prot > 0)[0])
                self.positives = pos
        else:
            self.positives = d
            n = len(self.positives)
            if c is None:
                c = 2 * len(self.positives[0])

        self.__name__ = 'RandomDichotomies_%d-%d-%d' % (c, n, overlap)
        self.num_var = n
        self.dim_output = n
        self.num_cond = c

        if use_mse:
            self.obs_distribution = students.GausId(n)
        else:
            self.obs_distribution = students.Bernoulli(n)
        self.link = None
Ejemplo n.º 4
0
    def __init__(self, d, function_class, use_mse=False):
        """overlap is given as the log2 of the dot product on their +/-1 representation"""
        super(LogicalFunctions, self).__init__()

        self.__name__ = 'LogicalFunctions_%dbit-%d' % (len(d), function_class)
        self.num_var = 1
        self.dim_output = 1
        self.num_cond = 2**len(d)

        self.bits = d
        self.function_class = function_class

        self.positives = [
            np.nonzero(self(np.arange(self.num_cond).squeeze()).numpy())[0]
        ]
        # print(self(np.arange(self.num_cond)))
        # print(self.positives)

        if use_mse:
            self.obs_distribution = students.GausId(1)
        else:
            self.obs_distribution = students.Bernoulli(1)
        self.link = None
Ejemplo n.º 5
0
    def __init__(self, n, q=None, use_mse=False):
        """overlap is given as the log2 of the dot product on their +/-1 representation"""
        super(StandardBinary, self).__init__()

        if q is None:
            q = n

        bits = np.nonzero(
            1 -
            np.mod(np.arange(2**n)[:, None] // (2**np.arange(n)[None, :]), 2))
        pos_conds = np.split(bits[0][np.argsort(bits[1])], n)[:q]

        self.positives = pos_conds
        self.__name__ = 'StandardBinary%d-%d' % (n, q)
        self.num_var = q
        self.dim_output = q
        self.num_cond = 2**n

        if use_mse:
            self.obs_distribution = students.GausId(n)
        else:
            self.obs_distribution = students.Bernoulli(n)
        self.link = None
Ejemplo n.º 6
0
 def __init__(self, n=3):
     super(DigitsBitwise, self).__init__()
     self.num_var = n
     self.dim_output = n
     self.obs_distribution = students.Bernoulli(n)
     self.link = None
Ejemplo n.º 7
0
#     ax.plot(U[c,0],U[c,1],U[c,2],'k')
util.set_axes_equal(ax)

# plt.title('PCA dimension: %.2f'%((np.sum(S**2)**2)/np.sum(S**4)))

#%%
N = 100

# net = students.Feedforward([inputs.shape[1],100,2],['ReLU',None])
# net = students.MultiGLM(students.Feedforward([inputs.shape[1], N], ['ReLU']),
#                         students.Feedforward([N, targets.shape[1]], [None]),
#                         students.GausId(targets.shape[1]))
net = students.MultiGLM(
    students.Feedforward([dim_inp * num_var, N], ['ReLU']),
    students.Feedforward([N, output_task.dim_output], [None]),
    students.Bernoulli(output_task.dim_output))
# net = students.MultiGLM(students.Feedforward([inputs.shape[1],N], ['ReLU']),
#                         students.Feedforward([N, p], [None]),
#                         students.Categorical(p))

n_trn = int(0.8 * num_data)
trn = np.random.choice(num_data, n_trn, replace=False)
tst = np.setdiff1d(range(num_data), trn)

optimizer = optim.Adam(net.enc.parameters(), lr=1e-4)
dset = torch.utils.data.TensorDataset(inputs[trn, :].float(),
                                      outputs[trn, :].float())
dl = torch.utils.data.DataLoader(dset, batch_size=64, shuffle=True)

n_compute = np.min([len(tst), 1000])
                             input_task=input_task,
                             SAVE_DIR=SAVE_DIR,
                             time_between=empty_time)

num_data = this_exp.ntrain

inputs = this_exp.train_data[0]

outputs = this_exp.train_data[1]

which_inp = inputs.abs().cumsum(1).detach().numpy()

#%%
net = students.GenericRNN(inputs.shape[-1],
                          N,
                          students.Bernoulli(1),
                          rnn_type=nonlin)
# net = recurrent.GenericRNN(1, N, students.Bernoulli(1), rnn_type=nonlin)

n_trn = int(0.8 * num_data)
trn = np.random.choice(num_data, n_trn, replace=False)
tst = np.setdiff1d(range(num_data), trn)

optimizer = optim.Adam(net.rnn.parameters(), lr=1e-4)
# dset = torch.utils.data.TensorDataset(inputs[trn,:,None].float(),
#                                       outputs[trn,:].float())
dset = torch.utils.data.TensorDataset(inputs[trn, :, :].float(),
                                      outputs[trn, :].float())
dl = torch.utils.data.DataLoader(dset, batch_size=200, shuffle=True)
n_compute = np.min([len(tst), 100])
Ejemplo n.º 9
0
#     c = [int(i), int(np.mod(i+1,U.shape[0]))]
#     ax.plot(U[c,0],U[c,1],U[c,2],'k')
util.set_axes_equal(ax)

# plt.title('PCA dimension: %.2f'%((np.sum(S**2)**2)/np.sum(S**4)))

#%%
N = 200

# net = students.Feedforward([inputs.shape[1],100,2],['ReLU',None])
# net = students.MultiGLM(students.Feedforward([inputs.shape[1], N], ['ReLU']),
#                         students.Feedforward([N, targets.shape[1]], [None]),
#                         students.GausId(targets.shape[1]))
net = students.MultiGLM(
    students.Feedforward([dim_inp * 4, N, N], ['ReLU', 'ReLU']),
    students.Feedforward([N, 2], [None]), students.Bernoulli(2))
# net = students.MultiGLM(students.Feedforward([inputs.shape[1],N], ['ReLU']),
#                         students.Feedforward([N, p], [None]),
#                         students.Categorical(p))

n_trn = int(0.8 * num_data)
trn = np.random.choice(num_data, n_trn, replace=False)
tst = np.setdiff1d(range(num_data), trn)

optimizer = optim.Adam(net.enc.parameters(), lr=1e-4)
dset = torch.utils.data.TensorDataset(inputs[trn, :].float(),
                                      outputs[trn, :].float())
dl = torch.utils.data.DataLoader(dset, batch_size=128, shuffle=True)

n_compute = np.min([len(tst), 1000])