Exemplo n.º 1
0
    def forward(self, X, temp, phi_prior):
        """
            For one Monte Carlo sample

            :param X: [batch_size, input_dim]
            :return: output for one MC sample, size = [batch_size, output_dim]
        """
        # sample weights and biases
        sigma_w = torch.log(1 + torch.exp(self.w_rho))
        sigma_b = torch.log(1 + torch.exp(self.b_rho))
        sigma_prior = torch.log(1 + torch.exp(self.rho_prior))

        u_w = torch.rand(self.w_theta.shape)
        u_b = torch.rand(self.b_theta.shape)
        u_w = u_w.to(self.device)
        u_b = u_b.to(self.device)
        self.gamma_w = gumbel_softmax(self.w_theta, u_w, temp, hard=True)
        self.gamma_b = gumbel_softmax(self.b_theta, u_b, temp, hard=True)

        epsilon_w = Normal(0, 1).sample(self.w_mu.shape)
        epsilon_b = Normal(0, 1).sample(self.b_mu.shape)
        epsilon_w = epsilon_w.to(self.device)
        epsilon_b = epsilon_b.to(self.device)

        self.w = self.gamma_w * (self.w_mu + sigma_w * epsilon_w)
        self.b = self.gamma_b * (self.b_mu + sigma_b * epsilon_b)
        output = torch.mm(X, self.w) + self.b.expand(X.size()[0],
                                                     self.output_dim)

        # record KL at sampled weight and bias
        w_phi = sigmoid(self.w_theta)
        b_phi = sigmoid(self.b_theta)

        kl_w = w_phi * (torch.log(w_phi) - torch.log(phi_prior)) + \
               (1 - w_phi) * (torch.log(1 - w_phi) - torch.log(1 - phi_prior)) + \
               w_phi * (torch.log(sigma_prior) - torch.log(sigma_w) +
                        0.5 * (sigma_w ** 2 + self.w_mu ** 2) / sigma_prior ** 2 - 0.5)

        kl_b = b_phi * (torch.log(b_phi) - torch.log(phi_prior)) + \
               (1 - b_phi) * (torch.log(1 - b_phi) - torch.log(1 - phi_prior)) + \
               b_phi * (torch.log(sigma_prior) - torch.log(sigma_b) +
                        0.5 * (sigma_b ** 2 + self.b_mu ** 2) / sigma_prior ** 2 - 0.5)

        self.kl = torch.sum(kl_w) + torch.sum(kl_b)

        return output
Exemplo n.º 2
0
def costFunction(X, Y, Theta):
    m = X.shape[0]

    H = sigmoid(np.matmul(X, np.transpose(Theta)))
    # print(H.shape)
    J = -1 / m * np.sum(
        Y * np.log(H + 0.001) + (1 - Y) * np.log(1 - H + 0.001), axis=0)
    J = np.reshape(J, (Theta.shape[0], 1))
    return J
Exemplo n.º 3
0
Arquivo: lr.py Projeto: bbz6810/myNlp
 def train(self, x_train, y_train):
     w = np.ones((x_train.shape[1], 1))
     learning_rate = 0.001
     for i in range(100000):
         h = sigmoid(np.dot(x_train, w))
         error = y_train - h
         w = w + learning_rate * np.dot(x_train.transpose(), error)
     self.w = w
     print('系数w', self.w, self.w.shape)
Exemplo n.º 4
0
    def predict(self, question):
        """Returns probability of correct answer for given question.

        :param question: Asked question.
        :type question: :class:`pandas.Series` or :class:`Question`
        """
        item = self.items[question.user_id, question.place_id]
        prediction = tools.sigmoid(item.knowledge)
        return self.respect_guess(prediction, question.options)
Exemplo n.º 5
0
def _logistic_regression(x, weight, bias):
    """
        逻辑回归
    :param x: input data
    :param weight:
    :param bias:
    :return:
    """
    # numpy.matmul 函数返回两个数组的矩阵乘积
    return tools.sigmoid(np.matmul(x, weight) + bias)
Exemplo n.º 6
0
    def predict(self, question):
        """Returns probability of correct answer for given question.

        :param question: Asked question.
        :type question: :class:`pandas.Series` or :class:`Question`
        """
        user = self.users[question.user_id]
        place = self.places[question.place_id]

        prediction = tools.sigmoid(user.skill - place.difficulty)
        return self.respect_guess(prediction, question.options)
Exemplo n.º 7
0
    def reset(self, angles=None, p=None, noise=0, source_bandit=None):
        lin = np.linspace(0,
                          self.complexity,
                          self.precision + 1,
                          endpoint=True)
        x, y = np.meshgrid(lin, lin)

        self.grid_data, self.angles, self.p, self.unsmoothed = perlin(
            x, y, angles=angles, pre_p=p, noise=noise)

        if self.smooth_function == 'sigmoid':
            self.grid_data = sigmoid(1 * self.grid_data)
        if self.smooth_function == 'strongsigmoid':
            self.grid_data = sigmoid(10 * self.grid_data)
        self.grid_data = normalize(self.grid_data,
                                   offset=np.min(self.grid_data),
                                   scale=np.ptp(self.grid_data))

        self._value_landscape = None
        self.cached_contexts = None
Exemplo n.º 8
0
def gradientDescent(X, Y, Theta, learninRate, numIter):

    m = X.shape[0]
    for i in range(numIter):
        
        H = sigmoid(np.matmul(X, np.transpose(Theta)))
        Theta = Theta - learninRate / m * np.matmul(np.transpose(H - Y), X)

        cost = costFunction(X, Y, Theta)
        if (i % 100 == 0):
            print(i, ":", cost)
    return (Theta)
Exemplo n.º 9
0
def predict(theta, X):
    '''''Predict label using learned logistic regression parameters'''
    # m, n = X.shape
    p = np.zeros(len(X))
    # print(X.dot(theta.T))
    h = sigmoid(X.dot(theta.T))
    # print(h)
    for it in range(len(h)):
        if h[it]>0.5:
            p[it]=1
        else:
            p[it]=0
    return p
Exemplo n.º 10
0
    def __backpropogation(self, x, y):
        """ Backpropogation method used with supplied input and output.

            Returns a tuple of gradients for weights and biases: (nabla_b, nabla_w)

            Each element is column vector containing gradients for the respecting layer.

            REF: http://neuralnetworksanddeeplearning.com/chap2.html
        """

        # Empty matrices with bias shapes / sizes.
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]

        # The initial activation.
        activation = x

        # List of activations for each layer.
        activations = [x]

        # List of outputs.
        zs = []

        # Iterating through tuples of biases and weights.
        for b, w in zip(self.biases, self.weights):
            # Calculating neuron ouput:

            # Dot multiplication of weight and activation, added neuron biase.
            z = np.dot(w, activation) + b
            zs.append(z)

            # New activation.
            activation = t.sigmoid(z)
            activations.append(activation)

        # Backpropogation part.
        delta = self.cost_function_prime(activations[-1], y) * t.sigmoid_prime(
            zs[-1])

        nabla_b[-1] = delta
        nabla_w[-1] = np.dot(delta, activations[-2].transpose())

        for l in xrange(2, self.number_of_layers):
            z = zs[-l]
            sp = t.sigmoid_prime(z)
            delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
            nabla_b[-l] = delta
            nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())

        return (nabla_b, nabla_w)
Exemplo n.º 11
0
    def predict(self, question):
        """Returns probability of correct answer for given question.

        :param question: Asked question.
        :type question: :class:`pandas.Series` or :class:`Question`
        """
        item = self.items[question.user_id, question.place_id]

        if item.any_incorrect:
            strength = self.memory_strength(question)
        else:
            strength = 0

        prediction = tools.sigmoid(item.knowledge + strength)
        return self.respect_guess(prediction, question.options)
Exemplo n.º 12
0
    def predict(self, question):
        """Returns probability of correct answer for given question.

        :param question: Asked question.
        :type question: :class:`pandas.Series` or :class:`Question`
        """
        item = self.items[question.user_id, question.place_id]

        knowledge = (
            item.knowledge +
            self.gamma * len(item.correct) +
            self.delta * len(item.incorrect)
        )

        return tools.sigmoid(knowledge)
Exemplo n.º 13
0
    def predict(self, question):
        """Returns probability of correct answer for given question.

        :param question: Asked question.
        :type question: :class:`pandas.Series` or :class:`Question`
        """
        item = self.items[question.user_id, question.place_id]

        if item.practices:
            seconds = tools.time_diff(question.inserted, item.last_inserted)
            time_effect = self.time_effect(seconds)
        else:
            time_effect = 0

        prediction = tools.sigmoid(item.knowledge + time_effect)
        return self.respect_guess(prediction, question.options)
Exemplo n.º 14
0
    def predict(self, question):
        """Returns probability of correct answer for given question.

        :param question: Asked question.
        :type question: :class:`pandas.Series` or :class:`Question`
        """
        item = self.items[question.user_id, question.place_id]
        correct_weight, incorrect_weight = self.get_weights(item, question)

        knowledge = (
            item.knowledge +
            self.gamma * correct_weight +
            self.delta * incorrect_weight
        )

        prediction = tools.sigmoid(knowledge)
        return self.respect_guess(prediction, question.options)
Exemplo n.º 15
0
    def process(self, a):
        """ Feed forward processing of inputs.

            Input to the system is processed through the layers of the network,
            and an appropriate output is produced.

            a   Input to the system. Should be in the form of colunm vector.
            Just as np.random.randn(3,1) would produces a (3x1)
            column vector.
        """

        # With each step, activation of layer is updated to represent
        # the activation of the next layer.
        # For a [2,3,1] neural network.
        #   Step 1: Input to Hidden Layer   w(3x2) x a(2x1) + b(3x1) -> sigmoid -> a' (3x1)
        #   Step 2: Hidden Layer to Output  w(1x3) x a(3x1) + b(1x1) -> sigmoid -> a' which is the output.
        for b, w in zip(self.biases, self.weights):
            a = t.sigmoid(np.dot(w, a) + b)

        # We basically flow the information through the layers
        # until the output is reached.
        return a
Exemplo n.º 16
0
Arquivo: lr.py Projeto: bbz6810/myNlp
 def predict(self, x):
     wx = np.dot(x, self.w)
     return sigmoid(wx)
Exemplo n.º 17
0
 def predict(self, input):
     pred_1 = t.sigmoid(np.dot(input, self.weights1.T))
     pred_2 = t.relu(np.dot(pred_1, self.weights2.T))
     return t.relu(np.dot(pred_2, self.weights3.T))
Exemplo n.º 18
0
    def feedforward(self):

        self.layer1 = t.sigmoid(np.dot(self.input, self.weights1.T))
        self.layer2 = t.relu(np.dot(self.layer1, self.weights2.T))
        self.output = t.relu(np.dot(self.layer2, self.weights3.T))
Exemplo n.º 19
0
            one1_w = (net.l1.w != 0).float()
            one1_b = (net.l1.b != 0).float()
            one2_w = (net.l2.w != 0).float()
            one2_b = (net.l2.b != 0).float()
            one3_w = (net.l3.w != 0).float()
            one3_b = (net.l3.b != 0).float()
            one4_w = (net.l4.w != 0).float()
            one4_b = (net.l4.b != 0).float()
            sparsity = (torch.sum(one1_w) + torch.sum(one2_w) + torch.sum(one3_w) + torch.sum(one4_w) +
                        torch.sum(one1_b) + torch.sum(one2_b) + torch.sum(one3_b) + torch.sum(one4_b)) / total
            print('Epoch {}, Train_Loss: {}, phi_prior: {}, sparsity: {}'.format(epoch, np.mean(train_losses), phi_prior,
                                                                                 sparsity))
    print('Finished Training')

    # sparsity level
    one1_w = (sigmoid(net.l1.w_theta) > 0.5).float()
    one1_b = (sigmoid(net.l1.b_theta) > 0.5).float()
    one2_w = (sigmoid(net.l2.w_theta) > 0.5).float()
    one2_b = (sigmoid(net.l2.b_theta) > 0.5).float()
    one3_w = (sigmoid(net.l3.w_theta) > 0.5).float()
    one3_b = (sigmoid(net.l3.b_theta) > 0.5).float()
    one4_w = (sigmoid(net.l4.w_theta) > 0.5).float()
    one4_b = (sigmoid(net.l4.b_theta) > 0.5).float()
    sparse_overall = (torch.sum(one1_w) + torch.sum(one2_w) + torch.sum(one3_w) + torch.sum(one4_w) +
                      torch.sum(one1_b) + torch.sum(one2_b) + torch.sum(one3_b) + torch.sum(one4_b)) / total
    sparse_overalls.append(sparse_overall)
    sparse_overall2 = (torch.sum(sigmoid(net.l1.w_theta)) + torch.sum(sigmoid(net.l1.b_theta)) +\
                       torch.sum(sigmoid(net.l2.w_theta)) + torch.sum(sigmoid(net.l2.b_theta)) +\
                       torch.sum(sigmoid(net.l3.w_theta)) + torch.sum(sigmoid(net.l3.b_theta)))/total
    sparse_overalls2.append(sparse_overall2)
    torch.set_printoptions(profile="full")
Exemplo n.º 20
0
 for index, neuron in enumerate(network.layers[0].neurons):
     neuron.output = X[index]
 network.layers[0].add_outputs()
 network.layers[1].add_weights()
 network.layers[2].add_weights()
 network.layers[3].add_weights()
 network.layers[1].add_bias()
 network.layers[2].add_bias()
 network.layers[3].add_bias()
 ns_probs = [0 for _ in range(len(Y[:, 0]))]
 val_loss, loss, lr_val_auc, lr_auc = [], [], [], []
 for i in range(epochs):
     ############# feedforward
     ### Hidden layer 1 ###
     z_h_1 = X.dot(network.layers[1].weights.T) + network.layers[1].bias
     network.layers[1].outputs = sigmoid(z_h_1)
     ### Hidden layer 2 ###
     z_h_2 = network.layers[1].outputs.dot(
         network.layers[2].weights.T) + network.layers[2].bias
     network.layers[2].outputs = sigmoid(z_h_2)
     ###output###
     z_o = np.dot(network.layers[2].outputs,
                  network.layers[3].weights.T) + network.layers[3].bias
     network.layers[3].outputs = softmax(z_o)
     ### Backpropagation
     ## Output layer
     delta_z_o = network.layers[3].outputs - Y
     delta_w13 = network.layers[2].outputs
     dw_o = np.dot(delta_z_o.T, delta_w13) / X.shape[0]
     db_o = np.sum(delta_z_o, axis=0, keepdims=True) / X.shape[0]
     ## Hidden layer 2