Exemple #1
0
    def use(self,dataset):
        outputs = []
        for example in dataset:
            input,dummy = example
            if len(input.shape) == 1:
                input = input.reshape((1,-1))

            data_vis_bias = np.dot(input,self.V)+self.b
            if self.n_inference_iterations > 0:
                if self.approximate_inference == 'mean_field':
                    target_singles = 1./(1+np.exp(-data_vis_bias))
                    for k in range(self.n_inference_iterations):
                        if self.damping_factor > 0:
                            target_singles *= self.damping_factor
                            target_singles += (1.-self.damping_factor)/(1+np.exp(-data_vis_bias-np.dot(target_singles,self.U)))
                        else:
                            target_singles = 1./(1+np.exp(-data_vis_bias-np.dot(target_singles,self.U)))                                
                elif self.approximate_inference == 'loopy_belief_propagation':
                    target_singles = np.zeros((input.shape[0],self.target_size))

                    log_messages = np.zeros((self.target_size,self.target_size))
                    softplus_out1 = np.zeros((self.target_size,self.target_size))
                    softplus_out2 = np.zeros((self.target_size,self.target_size))
                    for i in range(input.shape[0]):
                        # Loopy BP
                        data_bias = data_vis_bias[i]
                        log_messages[:] = 0
                        for k in range(self.n_inference_iterations):
                            # Update messages
                            acc_log_messages = data_bias + log_messages.sum(axis=1) - log_messages
                            if self.damping_factor > 0:
                                log_messages *= self.damping_factor
                                mlnonlin.softplus(acc_log_messages,softplus_out2)
                                acc_log_messages += self.U
                                mlnonlin.softplus(acc_log_messages,softplus_out1)
                                log_messages += (1-self.damping_factor) * (softplus_out1-softplus_out2)
                            else:
                                mlnonlin.softplus(acc_log_messages,softplus_out2)
                                acc_log_messages += self.U
                                mlnonlin.softplus(acc_log_messages,softplus_out1)
                                log_messages = softplus_out1-softplus_out2

                        # Single marginals
                        target_singles[i] = 1./(1+np.exp(-data_bias-log_messages.sum(axis=1)))
                else:
                    raise ValueError('approximate_inference \'%\' unknown' % self.approximate_inference)

            else:
                target_singles = 1./(1+np.exp(-data_vis_bias))

            if self.binary_outputs:
                outputs += [ target_singles > 0.5 ]
            else: 
                outputs += [ target_singles ]

        return outputs
def test_softplus():
    """
    Testing nonlinear softplus.
    """

    input = np.random.randn(20)
    output = np.zeros((20))
    nonlinear.softplus(input,output)
    print 'NumPy vs mathutils.nonlinear diff. output:',np.sum(np.abs(output-np.log(1+np.exp(input))))
    
    print 'Testing nonlinear reclin'
    input = np.random.randn(30,20)
    output = np.zeros((30,20))
    nonlinear.reclin(input,output)
    print 'NumPy vs mathutils.nonlinear diff. output:',np.sum(np.abs(output-(input>0)*input))
    
    print 'Testing nonlinear reclin deriv.'
    dinput = np.zeros((30,20))
    doutput = np.random.randn(30,20)
    nonlinear.dreclin(output,doutput,dinput)
    print 'NumPy vs mathutils.nonlinear diff. output:',np.sum(np.abs(dinput-(input>0)*doutput))
Exemple #3
0
def test_softplus():
    """
    Testing nonlinear softplus.
    """

    input = np.random.randn(20)
    output = np.zeros((20))
    nonlinear.softplus(input, output)
    print 'NumPy vs mathutils.nonlinear diff. output:', np.sum(
        np.abs(output - np.log(1 + np.exp(input))))

    print 'Testing nonlinear reclin'
    input = np.random.randn(30, 20)
    output = np.zeros((30, 20))
    nonlinear.reclin(input, output)
    print 'NumPy vs mathutils.nonlinear diff. output:', np.sum(
        np.abs(output - (input > 0) * input))

    print 'Testing nonlinear reclin deriv.'
    dinput = np.zeros((30, 20))
    doutput = np.random.randn(30, 20)
    nonlinear.dreclin(output, doutput, dinput)
    print 'NumPy vs mathutils.nonlinear diff. output:', np.sum(
        np.abs(dinput - (input > 0) * doutput))
Exemple #4
0
    def train(self,trainset):
        if self.stage == 0:
            self.initialize(trainset)

        for it in range(self.stage,self.n_stages):
            for example in trainset:
                input,target = example
                if len(input.shape) == 1:
                    input = input.reshape((1,-1))
                    target = target.reshape((1,-1))

                data_vis_bias = np.dot(input,self.V)+self.b
                
                if self.n_inference_iterations > 0:
                    if self.approximate_inference == 'mean_field':
                        target_singles = 1./(1+np.exp(-data_vis_bias))
                        for k in range(self.n_inference_iterations):
                            if self.damping_factor > 0:
                                target_singles *= self.damping_factor
                                target_singles += (1.-self.damping_factor)/(1+np.exp(-data_vis_bias-np.dot(target_singles,self.U)))
                            else:
                                target_singles = 1./(1+np.exp(-data_vis_bias-np.dot(target_singles,self.U)))                                
                        sum_target_pairs = np.dot(target_singles.T,target_singles)
                    elif self.approximate_inference == 'loopy_belief_propagation':
                        target_singles = np.zeros((input.shape[0],self.target_size))
                        sum_target_pairs = np.zeros((self.target_size,self.target_size))

                        log_messages = np.zeros((self.target_size,self.target_size))
                        softplus_out1 = np.zeros((self.target_size,self.target_size))
                        softplus_out2 = np.zeros((self.target_size,self.target_size))
                        for i in range(input.shape[0]):
                            # Loopy BP
                            data_bias = data_vis_bias[i]
                            log_messages[:] = 0
                            for k in range(self.n_inference_iterations):
                                # Update messages
                                acc_log_messages = data_bias + log_messages.sum(axis=1) - log_messages
                                if self.damping_factor > 0:
                                    log_messages *= self.damping_factor
                                    mlnonlin.softplus(acc_log_messages,softplus_out2)
                                    acc_log_messages += self.U
                                    mlnonlin.softplus(acc_log_messages,softplus_out1)
                                    log_messages += (1-self.damping_factor) * (softplus_out1-softplus_out2)
                                else:
                                    mlnonlin.softplus(acc_log_messages,softplus_out2)
                                    acc_log_messages += self.U
                                    mlnonlin.softplus(acc_log_messages,softplus_out1)
                                    log_messages = softplus_out1-softplus_out2

                            # Single marginals
                            target_singles[i] = 1./(1+np.exp(-data_bias-log_messages.sum(axis=1)))
                            # Pair-wise marginals
                            acc_log_messages = data_bias + log_messages.sum(axis=1) - log_messages
                            p11 = np.exp(self.U+acc_log_messages.T+acc_log_messages)
                            p10 = np.exp(acc_log_messages)
                            p01 = np.exp(acc_log_messages.T)
                            sum_p = 1 + p11 + p10 + p01
                            sum_target_pairs += p11 / sum_p
                    else:
                        raise ValueError('approximate_inference \'%\' unknown' % self.approximate_inference)

                else:
                    target_singles = 1./(1+np.exp(-data_vis_bias))

                # apply CRF gradient update
                db = np.sum(target_singles,axis=0) - np.sum(target,axis=0)
                dV = np.dot(input.T,target_singles-target)
                self.b -= self.lr/input.shape[0] * db
                self.V -= self.lr/input.shape[0] * dV

                if self.n_inference_iterations > 0:
                    dU = sum_target_pairs - np.dot(target.T,target)
                    self.U -= self.lr/input.shape[0] * dU 
                    # Ensuring symmetry and 0 diagonal
                    self.U = 0.5*(self.U+self.U.T)
                    self.U -= np.diag(np.diag(self.U))

        self.stage = self.n_stages
Exemple #5
0
doutput = np.random.randn(30, 20)
nonlinear.dsigmoid(output, doutput, dinput)
print 'NumPy vs mathutils.nonlinear diff. output:', np.sum(
    np.abs(dinput - doutput * output * (1 - output)))

print 'Testing nonlinear softmax'
input = np.random.randn(20)
output = np.zeros((20))
nonlinear.softmax(input, output)
print 'NumPy vs mathutils.nonlinear diff. output:', np.sum(
    np.abs(output - np.exp(input) / np.sum(np.exp(input))))

print 'Testing nonlinear softplus'
input = np.random.randn(20)
output = np.zeros((20))
nonlinear.softplus(input, output)
print 'NumPy vs mathutils.nonlinear diff. output:', np.sum(
    np.abs(output - np.log(1 + np.exp(input))))

print 'Testing nonlinear reclin'
input = np.random.randn(30, 20)
output = np.zeros((30, 20))
nonlinear.reclin(input, output)
print 'NumPy vs mathutils.nonlinear diff. output:', np.sum(
    np.abs(output - (input > 0) * input))

print 'Testing nonlinear reclin deriv.'
dinput = np.zeros((30, 20))
doutput = np.random.randn(30, 20)
nonlinear.dreclin(output, doutput, dinput)
print 'NumPy vs mathutils.nonlinear diff. output:', np.sum(
Exemple #6
0
print 'Testing nonlinear sigmoid deriv.'
dinput = np.zeros((30,20))
doutput = np.random.randn(30,20)
nonlinear.dsigmoid(output,doutput,dinput)
print 'NumPy vs mathutils.nonlinear diff. output:',np.sum(np.abs(dinput-doutput*output*(1-output)))

print 'Testing nonlinear softmax'
input = np.random.randn(20)
output = np.zeros((20))
nonlinear.softmax(input,output)
print 'NumPy vs mathutils.nonlinear diff. output:',np.sum(np.abs(output-np.exp(input)/np.sum(np.exp(input))))

print 'Testing nonlinear softplus'
input = np.random.randn(20)
output = np.zeros((20))
nonlinear.softplus(input,output)
print 'NumPy vs mathutils.nonlinear diff. output:',np.sum(np.abs(output-np.log(1+np.exp(input))))

print 'Testing nonlinear reclin'
input = np.random.randn(30,20)
output = np.zeros((30,20))
nonlinear.reclin(input,output)
print 'NumPy vs mathutils.nonlinear diff. output:',np.sum(np.abs(output-(input>0)*input))

print 'Testing nonlinear reclin deriv.'
dinput = np.zeros((30,20))
doutput = np.random.randn(30,20)
nonlinear.dreclin(output,doutput,dinput)
print 'NumPy vs mathutils.nonlinear diff. output:',np.sum(np.abs(dinput-(input>0)*doutput))