コード例 #1
0
ファイル: models.py プロジェクト: alexhunterlang/natural_bm
    def fit(self,
            x,
            batch_size=100,
            n_epoch=10,
            callbacks=None,
            validation_data=None,
            shuffle=True,
            initial_epoch=0):
        """Trains the model for a fixed number of epochs (iterations on a dataset).
        
        # Arguments
            x: Theano shared array of training data
            batch_size: integer. Number of samples per gradient update.
            n_epoch: integer, the number of times to iterate
                over the training data arrays.
            callbacks: list of callbacks to be called during training.
            validation_data: Theano shared array of data on which to evaluate
                the loss and any model metrics at the end of each epoch.
                The model will not be trained on this data.
            shuffle: boolean, whether to shuffle the training data
                before each epoch.
            initial_epoch: epoch at which to start training
                (useful for resuming a previous training run)
        
        # Returns
            A `History` instance. Its `history` attribute contains
            all information collected during training.
        """
        self.train_data = x
        self.n_train_sample = B.eval(x.shape[0])
        self.validation_data = validation_data

        # makes the generic indices to access data
        self.train_index = B.placeholder(shape=(batch_size,),
                                         dtype=B.intx(), name='train_index')

        # makes the training functions
        self._make_train_function()
        f = self.train_function

        # preps for validation
        out_labels = ['cost']
        if validation_data:
            self.valid_index = B.placeholder(shape=(batch_size,),
                                             dtype=B.intx(), name='valid_index')
            callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
            self._make_validation_function()
            val_f = self.validation_function
        else:
            callback_metrics = copy.copy(out_labels)
            val_f = None

        # delegate logic to _fit_loop
        return self._fit_loop(f, out_labels=out_labels,
                              batch_size=batch_size, n_epoch=n_epoch,
                              callbacks=callbacks,
                              val_f=val_f, shuffle=shuffle,
                              callback_metrics=callback_metrics,
                              initial_epoch=initial_epoch)
コード例 #2
0
def _init_data(batch_size):

    shape = (batch_size, 10)
    inputs = B.placeholder(shape=shape)
    data = np.random.uniform(size=shape)

    return inputs, data
コード例 #3
0
def _test_optimizer(optimizer):

    mbs = 10

    dataset = random.Random('probability')
    data = B.eval(dataset.train.data[0:mbs])
    pixels = data.shape[1]

    W0 = B.variable(np.random.normal(size=(pixels, )),
                    dtype=B.floatx(),
                    name='W0')
    W1 = B.variable(np.random.normal(size=(pixels, )),
                    dtype=B.floatx(),
                    name='W1')
    params = [W0, W1]
    inputs = B.placeholder((mbs, pixels), dtype=B.floatx())
    loss = B.sum(B.dot(inputs, B.square(W0) + B.square(W1)))

    updates = optimizer.get_updates(params, loss)

    f = B.function([inputs], [loss], updates=updates)

    output = f(data)
    assert len(output) == 1
    assert output[0].size == 1
コード例 #4
0
ファイル: models.py プロジェクト: alexhunterlang/natural_bm
    def predict_on_batch(self, x):
        """Runs a single gradient update on a single batch of data.
        # Arguments
            x: Numpy array of training data,
                or list of Numpy arrays if the model has multiple inputs.
                If all inputs in the model are named,
                you can also pass a dictionary
                mapping input names to Numpy arrays.
        # Returns
            Scalar training loss
            (if the model has a single output and no metrics)
            or list of scalars (if the model has multiple outputs
            and/or metrics).
        """

        # makes the generic indices to access data
        batch_size = B.eval(x.shape)[0]
        self.test_index = B.placeholder(shape=(batch_size,),
                                        dtype=B.intx(), name='test_index')
        self.test_data = x
        index = np.arange(batch_size)

        self._make_test_function()
        outputs = self.test_function(index)

        return outputs
コード例 #5
0
ファイル: models.py プロジェクト: alexhunterlang/natural_bm
    def __init__(self, nnet, optimizer, trainer):

        self.nnet = nnet
        self.optimizer = optimizer
        self.trainer = trainer

        self.inputs = B.placeholder(shape=(None, self.nnet.layer_size_list[0]), name='x')
        self.loss_fn = trainer.loss_fn()
        loss = self.loss_fn(self.inputs)
        for part in self.nnet.parts:
            for pl in part.losses:
                loss += pl                
        self.loss = loss

        self.trainable_weights = self.nnet.trainable_weights
        self._updates = self.trainer.updates
コード例 #6
0
def exact_logZ(dbm):
    """
    Exactly calculate the partition function for a RBM.
    
    # Arguments:
        dbm: DBM object; must be a RBM.
        
    # Returns:
        logZ: float; log of the exact partition function
    """

    if len(dbm.layers) != 2:
        raise ValueError('Exact log partition assumes a RBM')

    n0 = dbm.layers[0].dim
    n1 = dbm.layers[1].dim
    b0 = dbm.layers[0].b
    b1 = dbm.layers[1].b
    W = dbm.synapses[0].W

    # Pick whether to iterate over visible or hidden states.
    if n0 < n1:
        width = n0
        b_in = b0
        b_z = b1
    else:
        width = n1
        b_in = b1
        b_z = b0
        W = W.T

    inputs = B.placeholder(shape=(width**2, width), name='input')

    b_logZ = B.dot(inputs, b_in)
    z = b_z + B.dot(inputs, W)

    logZ_data = _pylearn2_allocation(width)

    logZ_all = b_logZ + B.sum(B.log(1 + B.exp(z)), axis=1)

    logZ_output = B.logsumexp(logZ_all)

    fn = B.function([inputs], logZ_output)

    logZ = fn(logZ_data)

    return logZ
コード例 #7
0
    def estimate_log_error_Z(self):
        """Error bars on estimate of partition function.
        
        The output is the mean and +- 3 standard deviations of the true
        (ie not logged) partition function. This is why the standard deviations
        are not symmetric about the mean.
        
        Returns:
            * mean logZ: float
            * -3 std logZ: float
            * +3 std logZ: float
        """

        if not hasattr(self, 'logZ'):
            raise RuntimeError(
                'You must run_logZ before calculating the final estimates.')

        logZ = B.placeholder(shape=self.logZ.shape)

        # this is the mean factor from the weights
        logZ_mean = B.logsumexp(logZ) - B.log(self.n_runs)

        # this is the standard deviation
        m = B.max(logZ)
        logstd_AIS = B.log(B.std(
            B.exp(logZ - m))) + m - B.log(self.n_runs) / 2.0

        # find +- 3 std
        l_input = B.stack([B.log(3) + logstd_AIS, logZ_mean])
        logZ_high = B.logsumexp(l_input)
        logZ_low = B.logdiffexp(l_input)

        # actually calculate the estimates
        logZ_est_fn = B.function([logZ], [logZ_mean, logZ_low, logZ_high])
        logZ_out, logZ_low_out, logZ_high_out = logZ_est_fn(self.logZ)

        # convert to floats
        logZ_out = logZ_out.item()
        logZ_low_out = logZ_low_out.item()
        logZ_high_out = logZ_high_out.item()

        # fix any nans
        if np.isnan(logZ_low_out):
            logZ_low_out = 0.0

        return logZ_out, logZ_low_out, logZ_high_out
コード例 #8
0
def test_Sampler(nnet_type, constant, sampler_type):
    beta = 1.0
    nnet = nnet_for_testing(nnet_type)
    batch_size = 30

    constant_ls = []
    if constant is not None:
        constant_ls = [constant]

    if sampler_type == 'meanfield':
        sampler = samplers.Meanfield(nnet)
    elif sampler_type == 'gibbs':
        sampler = samplers.Gibbs(nnet)
    elif sampler_type == 'gibbs_prob':
        sampler = samplers.GibbsProb(nnet)
    else:
        raise NotImplementedError

    input_ls = [np.ones((batch_size, size)) for size in nnet.layer_size_list]
    input_ls_placeholder = [B.placeholder(in_np.shape) for in_np in input_ls]

    sampler.set_param(beta=beta, constant=constant_ls)

    prob_ls, updates = sampler.run_chain(input_ls_placeholder,
                                         beta=beta,
                                         constant=constant_ls)
    fn = B.function(input_ls_placeholder, prob_ls, updates=updates)
    prob_ls = fn(*input_ls)

    assert len(prob_ls) == len(input_ls)
    for i, p in enumerate(prob_ls):
        if i in constant_ls:
            assert_allclose(p, input_ls[i])
        else:
            m = np.ones((batch_size, nnet.layer_size_list[i]))
            if sampler_type == 'gibbs':
                assert_allclose((p + np.logical_not(p)), m)
            else:
                assert_allclose(p, 0.5 * m)