Beispiel #1
0
    def activation(self, bottom_up_states, top_down_states, bottom_up_pre=None, top_down_pre=None):
        ''' Calculates the pre and post synaptic activation.
            
        :Parameters:
            bottom_up_states: activation comming from previous layer.
                             -type: numpy array [batch_size, input dim]
                     
            top_down_states:  activation comming from next layer.
                             -type: numpy array [batch_size, input dim]
                             
            bottom_up_pre:    pre-activation comming from previous layer of None.
                              if given this pre activation is used to avoid re-caluclations.
                             -type: None or numpy array [batch_size, input dim]
                     
            top_down_pre:     pre-activation comming from next layer of None.
                              if given this pre activation is used to avoid re-caluclations.
                             -type: None or numpy array [batch_size, input dim]
                             
        :Returns:
            Pre and post synaptic activation for this layer.
           -type: numpy array [batch_size, input dim]

        '''
        pre_act = self.bias
        if self.input_weight_layer is not None:
            if bottom_up_pre is None:
                pre_act = self.input_weight_layer.propagate_up(bottom_up_states) + pre_act
            else:
                pre_act = bottom_up_pre + pre_act
        if self.output_weight_layer is not None:
            if top_down_pre is None:
                pre_act = self.output_weight_layer.propagate_down(top_down_states) + pre_act
            else:
                pre_act = top_down_pre + pre_act
        return numx.exp(pre_act - numxExt.log_sum_exp(pre_act, axis=1).reshape(pre_act.shape[0], 1)), pre_act
Beispiel #2
0
def partition_function_factorize_v(model, 
                                   beta=None, 
                                   batchsize_exponent='AUTO', 
                                   status=False):
    ''' Computes the true partition function for the given model by factoring 
        over the visible units.
       
    :Info:
        Exponential increase of computations by the number of visible units.
        (16 usually ~ 20 seconds)
        
    :Parameters:
        model:              The model.
                           -type: Valid RBM model.
        
        beta:               Inverse temperature(s) for the models energy.
                           -type: None, float, numpy array [batchsize,1]
        
        batchsize_exponent: 2^batchsize_exponent will be the batch size.
                           -type: int
        
        status:             If true prints the progress to the console.
                           -type: bool
    
    :Returns:
        Log Partition function for the model.
       -type: float
        
    '''    
    if status is True:
        print "Calculating the partition function by factoring over v: "
        print '%3.2f' % (0.0), '%'
        
    bit_length = model.input_dim
    if batchsize_exponent == 'AUTO' or batchsize_exponent > 20:
        batchsize_exponent = numx.min([model.input_dim, 12])
    batchSize = numx.power(2, batchsize_exponent)
    num_combinations = numx.power(2, bit_length)

    num_batches = num_combinations / batchSize
    bitCombinations = numx.zeros((batchSize, model.input_dim))
    log_prob_vv_all = numx.zeros(num_combinations)
    
    for batch in range(1, num_batches + 1):
        # Generate current batch
        bitCombinations = npExt.generate_binary_code(bit_length, 
                                                     batchsize_exponent, 
                                                     batch - 1)

        # calculate LL
        log_prob_vv_all[(batch - 1) * batchSize:batch * batchSize] = model.\
        unnormalized_log_probability_v(bitCombinations, beta).reshape(
                                                    bitCombinations.shape[0])
        # print status if wanted    
        if status is True:
            print '%3.2f' %(100*numx.double(batch
                                            )/numx.double(num_batches)),'%'
    
    # return the log_sum of values
    return npExt.log_sum_exp(log_prob_vv_all)
    def f(cls, x):
        """ Calculates the function value of the SoftMax function value for a given input x.

        :param x: Input data.
        :type x: scalar or numpy array

        :return: Value of the SoftMax function for x.
        :rtype: scalar or numpy array with the same shape as x.
        """
        return numx.exp(x - log_sum_exp(x, axis=1).reshape(x.shape[0], 1))
Beispiel #4
0
def partition_function_factorize_h(model,
                                   beta=None,
                                   batchsize_exponent='AUTO',
                                   status=False):
    """ Computes the true partition function for the given model by factoring over the hidden units.

        :Info: Exponential increase of computations by the number of visible units. (16 usually ~ 20 seconds)

    :param model: The model.
    :type model: Valid RBM model.

    :param beta: Inverse temperature(s) for the models energy.
    :type beta: None, float, numpy array [batchsize,1]

    :param batchsize_exponent: 2^batchsize_exponent will be the batch size.
    :type batchsize_exponent: int

    :param status: If true prints the progress to the console.
    :type status: bool

    :return: Log Partition function for the model.
    :rtype: float
    """
    if status is True:
        print "Calculating the partition function by factoring over h: "
        print '%3.2f' % 0.0, '%'

    bit_length = model.output_dim
    if batchsize_exponent is 'AUTO' or batchsize_exponent > 20:
        batchsize_exponent = numx.min([model.output_dim, 12])
    batchsize = numx.power(2, batchsize_exponent)
    num_combinations = numx.power(2, bit_length)

    num_batches = num_combinations / batchsize
    log_prob_vv_all = numx.zeros(num_combinations)

    for batch in range(1, num_batches + 1):
        # Generate current batch
        bitcombinations = numxext.generate_binary_code(bit_length,
                                                       batchsize_exponent,
                                                       batch - 1)

        # calculate LL
        log_prob_vv_all[(batch - 1) * batchsize:batch *
                        batchsize] = model.unnormalized_log_probability_h(
                            bitcombinations,
                            beta).reshape(bitcombinations.shape[0])

        # print status if wanted
        if status is True:
            print '%3.2f' % (100 * numx.double(batch) /
                             numx.double(num_batches)), '%'

    # return the log_sum of values
    return numxext.log_sum_exp(log_prob_vv_all)
Beispiel #5
0
def _partition_function_exact_check(model, batchsize_exponent='AUTO'):
    ''' Computes the true partition function for the given model by factoring
        over the visible and hidden2 units.

        This is just proof of concept, use _partition_function_exact() instead,
        it is heaps faster!

    :Parameters:
        model:              The model
                           -type: Valid DBM model

        batchsize_exponent: 2^batchsize_exponent will be the batch size.
                           -type: int

    :Returns:
        Log Partition function for the model.
       -type: float

    '''
    bit_length = model.W1.shape[1]
    if batchsize_exponent is 'AUTO' or batchsize_exponent > 20:
        batchsize_exponent = numx.min([model.W1.shape[1], 12])
    batchSize = numx.power(2, batchsize_exponent)
    num_combinations = numx.power(2, bit_length)
    num_batches = num_combinations // batchSize
    bitCombinations = numx.zeros((batchSize, model.W1.shape[1]))
    log_prob_vv_all = numx.zeros(num_combinations)

    for batch in range(1, num_batches + 1):
        # Generate current batch
        bitCombinations = npExt.generate_binary_code(bit_length,
                                                     batchsize_exponent,
                                                     batch - 1)
        # calculate LL
        log_prob_vv_all[(batch - 1) * batchSize:batch *
                        batchSize] = model.unnormalized_log_probability_h1(
                            bitCombinations).reshape(bitCombinations.shape[0])
    # return the log_sum of values
    return npExt.log_sum_exp(log_prob_vv_all)
Beispiel #6
0
def reverse_annealed_importance_sampling(model,
                                         num_chains=100,
                                         k=1,
                                         betas=10000,
                                         status=False,
                                         data=None):
    """ Approximates the partition function for the given model using reverse annealed importance sampling.

    .. seealso:: Accurate and Conservative Estimates of MRF Log-likelihood using Reverse Annealing \
                 http://arxiv.org/pdf/1412.8566.pdf

    :param model: The model.
    :type model: Valid RBM model.

    :param num_chains: Number of AIS runs.
    :type num_chains: int

    :param k: Number of Gibbs sampling steps.
    :type k: int

    :param betas: Number or a list of inverse temperatures to sample from.
    :type betas: int, numpy array [num_betas]

    :param status: If true prints the progress on console.
    :type status: bool

    :param data: If data is given, initial sampling is started from data samples.
    :type data: numpy array

    :return: | Mean estimated log partition function,
             | Mean +3std estimated log partition function,
             | Mean -3std estimated log partition function.
    :rtype: float
    """
    # Setup temerpatures if not given
    if numx.isscalar(betas):
        betas = numx.linspace(0.0, 1.0, betas)

    if data is None:
        data = numx.zeros((num_chains, model.output_dim))
    else:
        data = model.sample_h(model.probability_h_given_v(numx.random.permutation(data)[0:num_chains]))

    # Sample the first time from the true model model
    v = model.probability_v_given_h(data, betas[betas.shape[0] - 1], True)
    v = model.sample_v(v, betas[betas.shape[0] - 1], True)

    # Calculate the unnormalized probabilties of v
    lnpvsum = model.unnormalized_log_probability_v(v, betas[betas.shape[0] - 1], True)

    # Setup temerpatures if not given
    if status is True:
        t = 1
        print("Calculating the partition function using AIS: ")
        print('%3.2f%%' % (0.0))
        print('%3.2f%%' % (100.0 * numx.double(t) / numx.double(betas.shape[0])))

    for beta in reversed(betas[1:betas.shape[0] - 1]):

        if status is True:
            t += 1
            print('%3.2f%%' % (100.0 * numx.double(t) / numx.double(betas.shape[0])))

        # Calculate the unnormalized probabilties of v
        lnpvsum -= model.unnormalized_log_probability_v(v, beta, True)

        # Sample k times from the intermidate distribution
        for _ in range(0, k):
            h = model.sample_h(model.probability_h_given_v(v, beta, True), beta, True)
            v = model.sample_v(model.probability_v_given_h(h, beta, True), beta, True)

        # Calculate the unnormalized probabilties of v
        lnpvsum += model.unnormalized_log_probability_v(v, beta, True)

    # Calculate the unnormalized probabilties of v
    lnpvsum -= model.unnormalized_log_probability_v(v, betas[0], True)

    lnpvsum = numx.longdouble(lnpvsum)

    # Calculate an estimate of logz .
    logz = numxext.log_sum_exp(lnpvsum) - numx.log(num_chains)

    # Calculate +/- 3 standard deviations
    lnpvmean = numx.mean(lnpvsum)
    lnpvstd = numx.log(numx.std(numx.exp(lnpvsum - lnpvmean))) + lnpvmean - numx.log(num_chains) / 2.0
    lnpvstd = numx.vstack((numx.log(3.0) + lnpvstd, logz))

    # Calculate partition function of base distribution
    baselogz = model._base_log_partition(True)

    # Add the base partition function
    logz = logz + baselogz
    logz_up = numxext.log_sum_exp(lnpvstd) + baselogz
    logz_down = numxext.log_diff_exp(lnpvstd) + baselogz

    if status is True:
        print('%3.2f%%' % 100.0)

    return logz, logz_up, logz_down