示例#1
0
def _test_optimizer(optimizer):

    mbs = 10

    dataset = random.Random('probability')
    data = B.eval(dataset.train.data[0:mbs])
    pixels = data.shape[1]

    W0 = B.variable(np.random.normal(size=(pixels, )),
                    dtype=B.floatx(),
                    name='W0')
    W1 = B.variable(np.random.normal(size=(pixels, )),
                    dtype=B.floatx(),
                    name='W1')
    params = [W0, W1]
    inputs = B.placeholder((mbs, pixels), dtype=B.floatx())
    loss = B.sum(B.dot(inputs, B.square(W0) + B.square(W1)))

    updates = optimizer.get_updates(params, loss)

    f = B.function([inputs], [loss], updates=updates)

    output = f(data)
    assert len(output) == 1
    assert output[0].size == 1
示例#2
0
    def run_logZ(self):
        """Performs calculatations of AIS runs.
        
        Must be called before estimates.
        """

        # initial sample
        sample_ls = self.init_sample_ls

        # this is the inital beta=0 case
        log_ais_w = B.eval(self.dbm_a.free_energy_sumover_even(sample_ls, 1.0))

        log_ais_w = B.variable(log_ais_w, name='log_ais_w')
        index = B.variable(1, name='index', dtype=B.intx())

        scan_out, updates = B.scan(self._update,
                                   outputs_info=[log_ais_w, index] + sample_ls,
                                   n_steps=self.n_betas - 2,
                                   name='scan_ais')

        log_ais_w = scan_out[0][-1]
        sample_ls = [s[-1] for s in scan_out[2:]]

        # this is the final beta=1 case
        log_ais_w -= self.dbm_b.free_energy_sumover_even(sample_ls, 1.0)

        logZ_fn = B.function([], [log_ais_w], updates=updates)

        self.logZ = self.logZa + logZ_fn()
示例#3
0
    def _make_function(self, index, data, updates, name):
        givens = {self.inputs: data[index]}
        fn = B.function([index],
                        self.loss,
                        updates=updates,
                        givens=givens,
                        name=name)

        return fn
def test_training_loss(nnet_type, training_type):
    nb_pos_steps = 2
    nb_neg_steps = 2
    batch_size = 6

    nnet = nnet_for_testing(nnet_type)
    train = _init_training(training_type, nnet, nb_pos_steps, nb_neg_steps,
                           batch_size)
    inputs, data = _init_data(batch_size)

    loss = train.loss_fn()
    fn = B.function([inputs], loss(inputs), updates=train.updates)

    output = fn(data)
    assert output.size == 1
示例#5
0
def exact_logZ(dbm):
    """
    Exactly calculate the partition function for a RBM.
    
    # Arguments:
        dbm: DBM object; must be a RBM.
        
    # Returns:
        logZ: float; log of the exact partition function
    """

    if len(dbm.layers) != 2:
        raise ValueError('Exact log partition assumes a RBM')

    n0 = dbm.layers[0].dim
    n1 = dbm.layers[1].dim
    b0 = dbm.layers[0].b
    b1 = dbm.layers[1].b
    W = dbm.synapses[0].W

    # Pick whether to iterate over visible or hidden states.
    if n0 < n1:
        width = n0
        b_in = b0
        b_z = b1
    else:
        width = n1
        b_in = b1
        b_z = b0
        W = W.T

    inputs = B.placeholder(shape=(width**2, width), name='input')

    b_logZ = B.dot(inputs, b_in)
    z = b_z + B.dot(inputs, W)

    logZ_data = _pylearn2_allocation(width)

    logZ_all = b_logZ + B.sum(B.log(1 + B.exp(z)), axis=1)

    logZ_output = B.logsumexp(logZ_all)

    fn = B.function([inputs], logZ_output)

    logZ = fn(logZ_data)

    return logZ
示例#6
0
    def estimate_log_error_Z(self):
        """Error bars on estimate of partition function.
        
        The output is the mean and +- 3 standard deviations of the true
        (ie not logged) partition function. This is why the standard deviations
        are not symmetric about the mean.
        
        Returns:
            * mean logZ: float
            * -3 std logZ: float
            * +3 std logZ: float
        """

        if not hasattr(self, 'logZ'):
            raise RuntimeError(
                'You must run_logZ before calculating the final estimates.')

        logZ = B.placeholder(shape=self.logZ.shape)

        # this is the mean factor from the weights
        logZ_mean = B.logsumexp(logZ) - B.log(self.n_runs)

        # this is the standard deviation
        m = B.max(logZ)
        logstd_AIS = B.log(B.std(
            B.exp(logZ - m))) + m - B.log(self.n_runs) / 2.0

        # find +- 3 std
        l_input = B.stack([B.log(3) + logstd_AIS, logZ_mean])
        logZ_high = B.logsumexp(l_input)
        logZ_low = B.logdiffexp(l_input)

        # actually calculate the estimates
        logZ_est_fn = B.function([logZ], [logZ_mean, logZ_low, logZ_high])
        logZ_out, logZ_low_out, logZ_high_out = logZ_est_fn(self.logZ)

        # convert to floats
        logZ_out = logZ_out.item()
        logZ_low_out = logZ_low_out.item()
        logZ_high_out = logZ_high_out.item()

        # fix any nans
        if np.isnan(logZ_low_out):
            logZ_low_out = 0.0

        return logZ_out, logZ_low_out, logZ_high_out
def test_training_prob(nnet_type, training_type, pos_neg):
    nb_pos_steps = 2
    nb_neg_steps = 2
    batch_size = 6

    nnet = nnet_for_testing(nnet_type)
    train = _init_training(training_type, nnet, nb_pos_steps, nb_neg_steps,
                           batch_size)
    inputs, data = _init_data(batch_size)

    prob = train.pos_stats(inputs)
    if pos_neg == 'neg':
        prob = train.neg_stats(prob)

    fn = B.function([inputs], prob, updates=train.updates)

    output = fn(data)
    assert len(output) == len(nnet.layers)
    for out, size in zip(output, nnet.layer_size_list):
        assert out.shape == (batch_size, size)
def test_Sampler(nnet_type, constant, sampler_type):
    beta = 1.0
    nnet = nnet_for_testing(nnet_type)
    batch_size = 30

    constant_ls = []
    if constant is not None:
        constant_ls = [constant]

    if sampler_type == 'meanfield':
        sampler = samplers.Meanfield(nnet)
    elif sampler_type == 'gibbs':
        sampler = samplers.Gibbs(nnet)
    elif sampler_type == 'gibbs_prob':
        sampler = samplers.GibbsProb(nnet)
    else:
        raise NotImplementedError

    input_ls = [np.ones((batch_size, size)) for size in nnet.layer_size_list]
    input_ls_placeholder = [B.placeholder(in_np.shape) for in_np in input_ls]

    sampler.set_param(beta=beta, constant=constant_ls)

    prob_ls, updates = sampler.run_chain(input_ls_placeholder,
                                         beta=beta,
                                         constant=constant_ls)
    fn = B.function(input_ls_placeholder, prob_ls, updates=updates)
    prob_ls = fn(*input_ls)

    assert len(prob_ls) == len(input_ls)
    for i, p in enumerate(prob_ls):
        if i in constant_ls:
            assert_allclose(p, input_ls[i])
        else:
            m = np.ones((batch_size, nnet.layer_size_list[i]))
            if sampler_type == 'gibbs':
                assert_allclose((p + np.logical_not(p)), m)
            else:
                assert_allclose(p, 0.5 * m)