コード例 #1
0
 def __call__(self, x):
     regularization = 0.
     if self.l1:
         regularization += B.sum(self.l1 * B.abs(x))
     if self.l2:
         regularization += B.sum(self.l2 * B.square(x))
     return regularization
コード例 #2
0
def _test_optimizer(optimizer):

    mbs = 10

    dataset = random.Random('probability')
    data = B.eval(dataset.train.data[0:mbs])
    pixels = data.shape[1]

    W0 = B.variable(np.random.normal(size=(pixels, )),
                    dtype=B.floatx(),
                    name='W0')
    W1 = B.variable(np.random.normal(size=(pixels, )),
                    dtype=B.floatx(),
                    name='W1')
    params = [W0, W1]
    inputs = B.placeholder((mbs, pixels), dtype=B.floatx())
    loss = B.sum(B.dot(inputs, B.square(W0) + B.square(W1)))

    updates = optimizer.get_updates(params, loss)

    f = B.function([inputs], [loss], updates=updates)

    output = f(data)
    assert len(output) == 1
    assert output[0].size == 1
コード例 #3
0
 def get_gradients(self, loss, params):
     grads = B.gradients(loss, params)
     if hasattr(self, 'clipnorm') and self.clipnorm > 0:
         norm = B.sqrt(sum([B.sum(B.square(g)) for g in grads]))
         grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
     if hasattr(self, 'clipvalue') and self.clipvalue > 0:
         grads = [B.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
     return grads
コード例 #4
0
def exact_logZ(dbm):
    """
    Exactly calculate the partition function for a RBM.
    
    # Arguments:
        dbm: DBM object; must be a RBM.
        
    # Returns:
        logZ: float; log of the exact partition function
    """

    if len(dbm.layers) != 2:
        raise ValueError('Exact log partition assumes a RBM')

    n0 = dbm.layers[0].dim
    n1 = dbm.layers[1].dim
    b0 = dbm.layers[0].b
    b1 = dbm.layers[1].b
    W = dbm.synapses[0].W

    # Pick whether to iterate over visible or hidden states.
    if n0 < n1:
        width = n0
        b_in = b0
        b_z = b1
    else:
        width = n1
        b_in = b1
        b_z = b0
        W = W.T

    inputs = B.placeholder(shape=(width**2, width), name='input')

    b_logZ = B.dot(inputs, b_in)
    z = b_z + B.dot(inputs, W)

    logZ_data = _pylearn2_allocation(width)

    logZ_all = b_logZ + B.sum(B.log(1 + B.exp(z)), axis=1)

    logZ_output = B.logsumexp(logZ_all)

    fn = B.function([inputs], logZ_output)

    logZ = fn(logZ_data)

    return logZ
コード例 #5
0
    def _free_energy_sumover_parity(self, parity, input_ls, beta):
        """Helper function that actually does the free energy analytical summation """

        beta = B.cast(beta, B.floatx())

        # Handle exactly summed out layers
        z_exact_ls = []
        for layer in self.layers[parity::2]:
            z_exact_ls.append(layer.input_z(input_ls))
        z = B.concatenate(z_exact_ls, axis=1)
        fe = -B.sum(B.log(1 + B.exp(beta * z)), axis=1)

        # Handle input dependent layers
        non_parity = int(not parity)
        for p, layer in zip(input_ls[non_parity::2],
                            self.layers[non_parity::2]):
            fe -= beta * B.dot(p, layer.b)

        return fe