Esempio n. 1
0
 def _forward(self, inp, train):
     if train:
         mask = 1.0 - np.random.binomial(1, self.denoise_prob, len(inp))
     else:
         mask = np.ones(len(inp))
     masked_inp = mask * inp
     hid = sigmoid(masked_inp.dot(self.weight) + self.bias_enc)
     rec = sigmoid(hid.dot(self.weight.T) + self.bias_dec)
     return masked_inp, hid, rec
Esempio n. 2
0
def varbvsnormupdate(X, sigma, sa, logodds, xy, d, alpha0, Xr0, i):
    n, p = X.shape
    for j in i:
        s = sa * sigma / (sa * d[j] + 1)
        r = alpha[j] + mu[j]
        mu[j] = s / sigma * (xy[j] + d[j]*r - np.sum(x[:,j]*Xr))
        alpha[j] = misc.sigmoid(logodds[j] + (np.log(s/(sa*sigma)) + mu[j]**2/s)/2)
        Xr = Xr + (alpha[j] * mj[j] - r) * X[:, j]
    return {'alpha':alpha, 'mu':mu, 'Xr':Xr}
Esempio n. 3
0
 def compute_node_output(self, input):
     """Compute the output of the node layer, i.e. activation f(apply w'X + b) using the dot product."""
     a = np.zeros(self.n_nodes)
     i = 0
     for weights, bias in zip(self.input_node_weights, self.node_bias):
         z = np.dot(input, weights) + bias
         a[i] = misc.sigmoid(z)
         i += 1
     return a
Esempio n. 4
0
File: losses.py Progetto: osdf/utils
def mia(z, targets, predict=False, error=False, addon=0):
    """
    Multiple independent attributes.

    Feed model output _z_ through logistic to get
    bernoulli distributed variables. 
    """
    bern = sigmoid(z)
    if predict:
        return bern
    n, _ = bern.shape
    # loss is binary cross entropy
    # for every output variable
    bce = -(targets*np.log(bern) + (1-targets)*np.log(1-bern))
    bce = np.mean(np.sum(bce, axis=1))
    if error:
        return bce+addon, (bern-targets)/n
    else:
        return bce+addon
Esempio n. 5
0
def mia(z, targets, predict=False, error=False, addon=0):
    """
    Multiple independent attributes.

    Feed model output _z_ through logistic to get
    bernoulli distributed variables. 
    """
    bern = sigmoid(z)
    if predict:
        return bern
    n, _ = bern.shape
    # loss is binary cross entropy
    # for every output variable
    bce = -(targets * np.log(bern) + (1 - targets) * np.log(1 - bern))
    bce = np.mean(np.sum(bce, axis=1))
    if error:
        return bce + addon, (bern - targets) / n
    else:
        return bce + addon
Esempio n. 6
0
def score(weights, structure, inputs, predict=False, 
        error=False, **params):
    """
    Computes the sparisty penalty using exponential weighting.
    """
    hdim = structure["hdim"]
    _, idim = inputs.shape
    ih = idim * hdim

    hddn = sigmoid(np.dot(inputs, weights[:ih].reshape(idim, hdim)) + weights[ih:ih+hdim])
    z = np.dot(hddn, weights[:ih].reshape(idim, hdim).T) + weights[ih+hdim:]

    w = weights[:ih].reshape(idim, hdim)
    cae = np.sum(np.mean(Dsigmoid(hddn)**2, axis=0) * np.sum(w**2, axis=0))
    cae_weight = structure["cae"]
    cae *= cae_weight

    if error:
        structure["hiddens"] = hddn
    return structure["score"](z, inputs, predict=predict, error=error, addon=cae)
Esempio n. 7
0
    def ApproxPlot(self, update=False):
        if update == True:
            print('input generated sample number')
            s_num = np.int(input())
            print('input component number used for approxed')
            n_comp = np.int(input())
            self.NormApprox(sample_num=s_num, n_comp=n_comp)

        Norm = Norm2Dmix(mus=self.params['mus'],
                         covs=self.params['covs'],
                         pi=self.params['pi'])

        q = np.array([[Norm.pdf(x=np.array([x, y])) for x in self.xgrid]
                      for y in self.ygrid])

        g = sigmoid(self.a * q + self.b)

        plt.figure(figsize=(12, 8))

        plt.subplot(221)
        plt.title('original shade ratio')
        sns.heatmap(self.f, annot=False, cmap='YlGnBu_r', vmin=0, vmax=1)

        plt.subplot(222)
        plt.title('approxed shade ratio')
        sns.heatmap(g, annot=False, cmap='YlGnBu_r', vmin=0, vmax=1)

        plt.subplot(223)
        plt.title('outerpolated shade ratio')
        sns.heatmap(self.fouter, annot=False, cmap='YlGnBu_r', vmin=0, vmax=1)

        plt.subplot(224)
        plt.title('approxed probability density')
        sns.heatmap(q, annot=False, cmap='YlGnBu_r')

        sns.plt.show()
Esempio n. 8
0
def varbvsnormindep(X, y, sigma, sa, logodds):
    s = s*sigma / (s*misc.diagsq(X) + 1)
    mu = s*np.dot(y, X) / sigma
    alpha = misc.sigmoid(logodds + (np.log(s/(sa*sigma)) + mu**2/s)/2)
    return {"alpha":alpha, "mu":mu, "s":s}
def forward_propagation_with_dropout(X, parameters, keep_prob=0.5):
    """
    Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.

    Arguments:
    X -- input dataset, of shape (2, number of examples)
    parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
                    W1 -- weight matrix of shape (20, 2)
                    b1 -- bias vector of shape (20, 1)
                    W2 -- weight matrix of shape (3, 20)
                    b2 -- bias vector of shape (3, 1)
                    W3 -- weight matrix of shape (1, 3)
                    b3 -- bias vector of shape (1, 1)
    keep_prob - probability of keeping a neuron active during drop-out, scalar

    Returns:
    A3 -- last activation value, output of the forward propagation, of shape (1,1)
    cache -- tuple, information stored for computing the backward propagation
    """
    import numpy as np
    from misc import relu, sigmoid

    np.random.seed(1)

    # retrieve parameters
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    W3 = parameters["W3"]
    b3 = parameters["b3"]

    # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
    Z1 = np.dot(W1, X) + b1
    A1 = relu(Z1)
    ### START CODE HERE ### (approx. 4 lines)         # Steps 1-4 below correspond to the Steps 1-4 described above.
    D1 = np.random.rand(
        A1.shape[0],
        A1.shape[1])  # Step 1: initialize matrix D1 = np.random.rand(..., ...)
    D1 = (
        D1 < keep_prob
    )  # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)
    A1 = A1 * D1  # Step 3: shut down some neurons of A1
    A1 = A1 / keep_prob  # Step 4: scale the value of neurons that haven't been shut down
    ### END CODE HERE ###
    Z2 = np.dot(W2, A1) + b2
    A2 = relu(Z2)
    ### START CODE HERE ### (approx. 4 lines)
    D2 = np.random.rand(
        A2.shape[0],
        A2.shape[1])  # Step 1: initialize matrix D2 = np.random.rand(..., ...)
    D2 = (
        D2 < keep_prob
    )  # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)
    A2 = A2 * D2  # Step 3: shut down some neurons of A2
    A2 = A2 / keep_prob  # Step 4: scale the value of neurons that haven't been shut down
    ### END CODE HERE ###
    Z3 = np.dot(W3, A2) + b3
    A3 = sigmoid(Z3)

    cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)

    return A3, cache
Esempio n. 10
0
 def forward(self, in_train):
     self.in_train = in_train
     self.out_act = sigmoid(in_train.dot(self.weight) + self.bias)