Пример #1
0
def compute_p_wa(w, x, y, alpha):
    w_0 = tt.set_subtensor(w[alpha], 0)  # (n_dim_in, n_dim_out)
    w_1 = tt.set_subtensor(w[alpha], 1)  # (n_dim_in, n_dim_out)
    z_0 = tt.nnet.sigmoid(x.dot(w_0))  # (n_samples, n_dim_out)
    z_1 = tt.nnet.sigmoid(x.dot(w_1))  # (n_samples, n_dim_out)
    log_likelihood_ratio = tt.sum(tt.log(bernoulli(y, z_1))-tt.log(bernoulli(y, z_0)), axis = 0)  # (n_dim_out, )
    p_wa = tt.nnet.sigmoid(log_likelihood_ratio)  # (n_dim_out, )
    return p_wa
Пример #2
0
def compute_p_wa(w, x, y, alpha):
    w_0 = tt.set_subtensor(w[alpha], 0)  # (n_dim_in, n_dim_out)
    w_1 = tt.set_subtensor(w[alpha], 1)  # (n_dim_in, n_dim_out)
    z_0 = tt.nnet.sigmoid(x.dot(w_0))  # (n_samples, n_dim_out)
    z_1 = tt.nnet.sigmoid(x.dot(w_1))  # (n_samples, n_dim_out)
    log_likelihood_ratio = tt.sum(tt.log(bernoulli(y, z_1))-tt.log(bernoulli(y, z_0)), axis = 0)  # (n_dim_out, )
    p_wa = tt.nnet.sigmoid(log_likelihood_ratio)  # (n_dim_out, )
    return p_wa
Пример #3
0
 def train(x, y):
     w_0 = tt.set_subtensor(w[alpha], 0)  # (n_dim_in, n_dim_out)
     w_1 = tt.set_subtensor(w[alpha], 1)  # (n_dim_in, n_dim_out)
     z_0 = tt.nnet.sigmoid(x.dot(w_0))  # (n_samples, n_dim_out)
     z_1 = tt.nnet.sigmoid(x.dot(w_1))  # (n_samples, n_dim_out)
     log_likelihood_ratio = tt.sum(tt.log(bernoulli(y, z_1))-tt.log(bernoulli(y, z_0)), axis = 0)  # (n_dim_out, )
     p_wa = tt.nnet.sigmoid(log_likelihood_ratio)  # (n_dim_out, )
     w_sample = rng.binomial(p=p_wa)  # (n_dim_out, )
     w_new = tt.set_subtensor(w[alpha], w_sample)  # (n_dim_in, n_dim_out)
     return [(w, w_new), (alpha, (alpha+1) % n_dim_in)]
Пример #4
0
 def train(x, y):
     w_0 = tt.set_subtensor(w[alpha], 0)  # (n_dim_in, n_dim_out)
     w_1 = tt.set_subtensor(w[alpha], 1)  # (n_dim_in, n_dim_out)
     z_0 = tt.nnet.sigmoid(x.dot(w_0))  # (n_samples, n_dim_out)
     z_1 = tt.nnet.sigmoid(x.dot(w_1))  # (n_samples, n_dim_out)
     log_likelihood_ratio = tt.sum(tt.log(bernoulli(y, z_1))-tt.log(bernoulli(y, z_0)), axis = 0)  # (n_dim_out, )
     p_wa = tt.nnet.sigmoid(log_likelihood_ratio)  # (n_dim_out, )
     w_sample = rng.binomial(p=p_wa)  # (n_dim_out, )
     w_new = tt.set_subtensor(w[alpha], w_sample)  # (n_dim_in, n_dim_out)
     add_update(w, w_new)
     add_update(alpha, (alpha+1) % n_dim_in)
Пример #5
0
 def compute_p_wa(w, x, y, alpha):
     """
     Compute the probability the weights at index alpha taking on
     value 1.
     """
     w_0 = tt.set_subtensor(w[alpha], 0)  # (n_dim_in, n_dim_out)
     w_1 = tt.set_subtensor(w[alpha], 1)  # (n_dim_in, n_dim_out)
     z_0 = tt.nnet.sigmoid(x.dot(w_0))  # (n_samples, n_dim_out)
     z_1 = tt.nnet.sigmoid(x.dot(w_1))  # (n_samples, n_dim_out)
     log_likelihood_ratio = tt.sum(tt.log(bernoulli(y, z_1))-tt.log(bernoulli(y, z_0)), axis = 0)  # (n_dim_out, )
     p_wa = tt.nnet.sigmoid(log_likelihood_ratio)  # (n_dim_out, )
     return p_wa
Пример #6
0
 def compute_p_wa(w, x, y, alpha):
     """
     Compute the probability the weights at index alpha taking on
     value 1.
     """
     w_0 = tt.set_subtensor(w[alpha], 0)  # (n_dim_in, n_dim_out)
     w_1 = tt.set_subtensor(w[alpha], 1)  # (n_dim_in, n_dim_out)
     z_0 = tt.nnet.sigmoid(x.dot(w_0))  # (n_samples, n_dim_out)
     z_1 = tt.nnet.sigmoid(x.dot(w_1))  # (n_samples, n_dim_out)
     log_likelihood_ratio = tt.sum(tt.log(bernoulli(y, z_1))-tt.log(bernoulli(y, z_0)), axis = 0)  # (n_dim_out, )
     p_wa = tt.nnet.sigmoid(log_likelihood_ratio)  # (n_dim_out, )
     return p_wa
Пример #7
0
 def compute_p_wa(w, x, y, alpha, possible_ws = np.array([0, 1])):
     """
     Compute the probability the weights at index alpha taking on each of the values in possible_ws
     """
     assert x.tag.test_value.ndim == y.tag.test_value.ndim == 2
     assert x.tag.test_value.shape[0] == y.tag.test_value.shape[0]
     assert w.get_value().shape[1] == y.tag.test_value.shape[1]
     v_current = x.dot(w)  # (n_samples, n_dim_out)
     v_0 = v_current[None, :, :] - w[alpha, None, :]*x.T[alpha, :, None]  # (n_alpha, n_samples, n_dim_out)
     possible_vs = v_0[:, :, :, None] + possible_ws[None, None, None, :]*x.T[alpha, :, None, None]  # (n_alpha, n_samples, n_dim_out, n_possible_ws)
     all_zs = tt.nnet.sigmoid(possible_vs)  # (n_alpha, n_samples, n_dim_out, n_possible_ws)
     log_likelihoods = tt.sum(tt.log(bernoulli(y[None, :, :, None], all_zs[:, :, :, :])), axis = 1)  # (n_alpha, n_dim_out, n_possible_ws)
     # Question: Need to shift for stability here or will Theano take care of that?
     # Stupid theano didn't implement softmax very nicely so we have to do some reshaping.
     return tt.nnet.softmax(log_likelihoods.reshape([alpha.shape[0]*w.shape[1], possible_ws.shape[0]]))\
         .reshape([alpha.shape[0], w.shape[1], possible_ws.shape[0]])  # (n_alpha, n_dim_out, n_possible_ws)
 def compute_p_wa(w, x, y, alpha, possible_ws=np.array([0, 1])):
     """
     Compute the probability the weights at index alpha taking on each of the values in possible_ws
     """
     assert x.tag.test_value.ndim == y.tag.test_value.ndim == 2
     assert x.tag.test_value.shape[0] == y.tag.test_value.shape[0]
     assert w.get_value().shape[1] == y.tag.test_value.shape[1]
     v_current = x.dot(w)  # (n_samples, n_dim_out)
     v_0 = v_current[None, :, :] - w[alpha, None, :] * x.T[
         alpha, :, None]  # (n_alpha, n_samples, n_dim_out)
     possible_vs = v_0[:, :, :, None] + possible_ws[
         None, None, None, :] * x.T[
             alpha, :, None,
             None]  # (n_alpha, n_samples, n_dim_out, n_possible_ws)
     all_zs = tt.nnet.sigmoid(
         possible_vs)  # (n_alpha, n_samples, n_dim_out, n_possible_ws)
     log_likelihoods = tt.sum(tt.log(
         bernoulli(y[None, :, :, None], all_zs[:, :, :, :])),
                              axis=1)  # (n_alpha, n_dim_out, n_possible_ws)
     # Question: Need to shift for stability here or will Theano take care of that?
     # Stupid theano didn't implement softmax very nicely so we have to do some reshaping.
     return tt.nnet.softmax(log_likelihoods.reshape([alpha.shape[0]*w.shape[1], possible_ws.shape[0]]))\
         .reshape([alpha.shape[0], w.shape[1], possible_ws.shape[0]])  # (n_alpha, n_dim_out, n_possible_ws)