def __contrastive_divergence_rsm__(self, vis, hid, D):
        neg_vis = dot(hid, self.weights.T) + self.visible_biases
        softmax_value = dbn.softmax(neg_vis)
        neg_vis *= 0
        for i in xrange(len(vis)):
            neg_vis[i] = random.multinomial(D[i], softmax_value[i], size=1)
        D = sum(neg_vis, axis=1)

        perplexity = nansum(vis * log(softmax_value))

        neg_hid_prob = dbn.sigmoid(dot(neg_vis, self.weights) + outer(D, self.hidden_biases))

        return neg_vis, neg_hid_prob, D, perplexity
Esempio n. 2
0
    def __contrastive_divergence_rsm__(self, vis, hid, D):
        neg_vis = dot(hid, self.weights.T) + self.visible_biases
        softmax_value = dbn.softmax(neg_vis)
        neg_vis *= 0
        for i in xrange(len(vis)):
            neg_vis[i] = random.multinomial(D[i], softmax_value[i], size=1)
        D = sum(neg_vis, axis=1)

        perplexity = nansum(vis * log(softmax_value))

        neg_hid_prob = dbn.sigmoid(
            dot(neg_vis, self.weights) + outer(D, self.hidden_biases))

        return neg_vis, neg_hid_prob, D, perplexity
def generate_output_data(x,
                         weight_matrices_added_biases,
                         binary_output=False,
                         sampled_noise=None):
    """
    Compute forwards-pass in the deep autoencoder and compute the output.

    @param x: The BOW.
    @param weight_matrices_added_biases: The weight matrices added biases.
    @param binary_output: If the output of the DBN must be binary. If so, Gaussian noise will be added to bottleneck.
    @param sampled_noise: The gaussian noise matrix in case of binary output units.
    """
    z_values = []
    NN = sum(x, axis=1)
    for i in range(len(weight_matrices_added_biases) - 1):
        if i == 0:
            z = dbn.sigmoid(
                dot(x[:, :-1], weight_matrices_added_biases[i][:-1, :]) +
                outer(NN, weight_matrices_added_biases[i][-1, :]))
        elif i == (len(weight_matrices_added_biases) / 2) - 1:
            act = dot(z_values[i - 1], weight_matrices_added_biases[i])
            if binary_output and not sampled_noise is None:
                z = act + sampled_noise
            else:
                z = act
        else:
            z = dbn.sigmoid(
                dot(z_values[i - 1], weight_matrices_added_biases[i]))

        z = append(z, ones((len(x), 1), dtype=float64), axis=1)
        z_values.append(z)

    neg_vis = dot(z_values[-1], weight_matrices_added_biases[-1])
    softmax_value = dbn.softmax(neg_vis)
    xout = softmax_value
    if len(xout[xout == 0]) > 0:
        w = where(xout == 0)
        for i in range(len(w[0])):
            row = w[0][i]
            col = w[1][i]
            xout[row, col] = finfo(float).eps
    return xout, z_values
def generate_output_data(x, weight_matrices_added_biases, binary_output=False, sampled_noise=None):
    """
    Compute forwards-pass in the deep autoencoder and compute the output.

    @param x: The BOW.
    @param weight_matrices_added_biases: The weight matrices added biases.
    @param binary_output: If the output of the DBN must be binary. If so, Gaussian noise will be added to bottleneck.
    @param sampled_noise: The gaussian noise matrix in case of binary output units.
    """
    z_values = []
    NN = sum(x, axis=1)
    for i in range(len(weight_matrices_added_biases) - 1):
        if i == 0:
            z = dbn.sigmoid(dot(x[:, :-1], weight_matrices_added_biases[i][:-1, :]) + outer(NN,
                                                                                            weight_matrices_added_biases[
                                                                                                i][-1, :]))
        elif i == (len(weight_matrices_added_biases) / 2) - 1:
            act = dot(z_values[i - 1], weight_matrices_added_biases[i])
            if binary_output and not sampled_noise is None:
                z = act + sampled_noise
            else:
                z = act
        else:
            z = dbn.sigmoid(dot(z_values[i - 1], weight_matrices_added_biases[i]))

        z = append(z, ones((len(x), 1), dtype=float64), axis=1)
        z_values.append(z)

    neg_vis = dot(z_values[-1], weight_matrices_added_biases[-1])
    softmax_value = dbn.softmax(neg_vis)
    xout = softmax_value
    if len(xout[xout == 0]) > 0:
        w = where(xout == 0)
        for i in range(len(w[0])):
            row = w[0][i]
            col = w[1][i]
            xout[row, col] = finfo(float).eps
    return xout, z_values