Beispiel #1
0
 def test_power_complex(self):
     inf = float('inf')
     ninf = -float('inf')
     nan = float('nan')
     cmpl = complex
     from math import copysign
     from numpypy import power, array, complex128, complex64
     # note: in some settings (namely a x86-32 build without the JIT),
     # gcc optimizes the code in rlib.rcomplex.c_pow() to not truncate
     # the 10-byte values down to 8-byte values.  It ends up with more
     # imprecision than usual (hence 2e-13 instead of 2e-15).
     for c,rel_err in ((complex128, 2e-13), (complex64, 4e-7)):
         a = array([cmpl(-5., 0), cmpl(-5., -5.), cmpl(-5., 5.),
                    cmpl(0., -5.), cmpl(0., 0.), cmpl(0., 5.),
                    cmpl(-0., -5.), cmpl(-0., 0.), cmpl(-0., 5.),
                    cmpl(-0., -0.), cmpl(inf, 0.), cmpl(inf, 5.),
                    cmpl(inf, -0.), cmpl(ninf, 0.), cmpl(ninf, 5.),
                    cmpl(ninf, -0.), cmpl(ninf, inf), cmpl(inf, inf),
                    cmpl(ninf, ninf), cmpl(5., inf), cmpl(5., ninf),
                    cmpl(nan, 5.), cmpl(5., nan), cmpl(nan, nan),
                  ], dtype=c)
         for p in (3, -1, 10000, 2.3, -10000, 10+3j):
             b = power(a, p)
             for i in range(len(a)):
                 try:
                     r = self.c_pow((float(a[i].real), float(a[i].imag)),
                             (float(p.real), float(p.imag)))
                 except ZeroDivisionError:
                     r = (nan, nan)
                 except OverflowError:
                     r = (inf, -copysign(inf, a[i].imag))
                 except ValueError:
                     r = (nan, nan)
                 msg = 'result of %r(%r)**%r got %r expected %r\n ' % \
                         (c,a[i], p, b[i], r)
                 t1 = float(r[0])
                 t2 = float(b[i].real)
                 self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
                 t1 = float(r[1])
                 t2 = float(b[i].imag)
                 self.rAlmostEqual(t1, t2, rel_err=rel_err, msg=msg)
Beispiel #2
0
    def test_power_int(self):
        import math
        from numpypy import power, array
        a = array([1, 2, 3])
        b = power(a, 3)
        for i in range(len(a)):
            assert b[i] == a[i] ** 3

        a = array([1, 2, 3])
        b = array([1, 2, 3])
        c = power(a, b)
        for i in range(len(a)):
            assert c[i] == a[i] ** b[i]

        # assert power(12345, 12345) == -9223372036854775808
        # assert power(-12345, 12345) == -9223372036854775808
        # assert power(-12345, 12346) == -9223372036854775808
        assert power(2, 0) == 1
        assert power(2, -1) == 0
        assert power(2, -2) == 0
        assert power(-2, -1) == 0
        assert power(-2, -2) == 0
        assert power(12345, -12345) == 0
    def backpropagation(self,
                        trainingset,
                        ERROR_LIMIT=1e-3,
                        learning_rate=0.3,
                        momentum_factor=0.9):

        assert trainingset[0].features.shape[0] == self.n_inputs, \
                "ERROR: input size varies from the defined input setting"

        assert trainingset[0].targets.shape[0]  == self.n_outputs, \
                "ERROR: output size varies from the defined output setting"

        training_data = np.array(
            [instance.features for instance in trainingset])
        training_targets = np.array(
            [instance.targets for instance in trainingset])

        MSE = ()  # inf
        neterror = None
        momentum = collections.defaultdict(int)

        batch_size = self.batch_size if self.batch_size != 0 else training_data.shape[
            0]

        epoch = 0
        while MSE > ERROR_LIMIT:
            epoch += 1

            for start in xrange(0, len(training_data), batch_size):
                batch = training_data[start:start + batch_size]
                input_layers = self.update(training_data, trace=True)
                out = input_layers[-1]

                error = out - training_targets
                delta = error
                MSE = np.mean(np.power(error, 2))

                loop = itertools.izip(
                    xrange(len(self.weights) - 1, -1, -1),
                    reversed(self.weights),
                    reversed(input_layers[:-1]),
                )

                for i, weight_layer, input_signals in loop:
                    # Loop over the weight layers in reversed order to calculate the deltas

                    if i == 0:
                        dropped = dropout(
                            add_bias(input_signals).T,
                            self.input_layer_dropout)
                    else:
                        dropped = dropout(
                            add_bias(input_signals).T,
                            self.hidden_layer_dropout)

                    # Calculate weight change
                    dW = learning_rate * np.dot(
                        dropped, delta) + momentum_factor * momentum[i]

                    if i != 0:
                        """Do not calculate the delta unnecessarily."""
                        # Skipping the bias weight during calculation.
                        weight_delta = np.dot(delta, weight_layer[1:, :].T)

                        # Calculate the delta for the subsequent layer
                        delta = np.multiply(
                            weight_delta,
                            self.activation_functions[i - 1](input_signals,
                                                             derivative=True))

                    # Store the momentum
                    momentum[i] = dW

                    # Update the weights
                    self.weights[i] -= dW

            if epoch % 1000 == 0:
                # Show the current training status
                print "* current network error (MSE):", MSE

        print "* Converged to error bound (%.4g) with MSE = %.4g." % (
            ERROR_LIMIT, MSE)
        print "* Trained for %d epochs." % epoch
Beispiel #4
0
 def backpropagation(self, trainingset, ERROR_LIMIT = 1e-3, learning_rate = 0.3, momentum_factor = 0.9  ):
     
     assert trainingset[0].features.shape[0] == self.n_inputs, \
             "ERROR: input size varies from the defined input setting"
     
     assert trainingset[0].targets.shape[0]  == self.n_outputs, \
             "ERROR: output size varies from the defined output setting"
     
     
     training_data    = np.array( [instance.features for instance in trainingset ] )
     training_targets = np.array( [instance.targets for instance in trainingset ] )
     
     MSE              = ( ) # inf
     neterror         = None
     momentum         = collections.defaultdict( int )
     
     batch_size       = self.batch_size if self.batch_size != 0 else training_data.shape[0]
     
     epoch = 0
     while MSE > ERROR_LIMIT:
         epoch += 1
         
         for start in xrange( 0, len(training_data), batch_size ):
             batch             = training_data[start : start+batch_size]
             input_layers      = self.update( training_data, trace=True )
             out               = input_layers[-1]
                           
             error             = out - training_targets
             delta             = error
             MSE               = np.mean( np.power(error,2) )
         
         
             loop  = itertools.izip(
                             xrange(len(self.weights)-1, -1, -1),
                             reversed(self.weights),
                             reversed(input_layers[:-1]),
                         )
         
             for i, weight_layer, input_signals in loop:
                 # Loop over the weight layers in reversed order to calculate the deltas
             
                 if i == 0:
                     dropped = dropout( add_bias(input_signals).T, self.input_layer_dropout  )
                 else:
                     dropped = dropout( add_bias(input_signals).T, self.hidden_layer_dropout )
             
                 # Calculate weight change
                 dW = learning_rate * np.dot( dropped, delta ) + momentum_factor * momentum[i]
             
                 if i!= 0:
                     """Do not calculate the delta unnecessarily."""
                     # Skipping the bias weight during calculation.
                     weight_delta = np.dot( delta, weight_layer[1:,:].T )
         
                     # Calculate the delta for the subsequent layer
                     delta = np.multiply(  weight_delta, self.activation_functions[i-1]( input_signals, derivative=True) )
             
                 # Store the momentum
                 momentum[i] = dW
             
                 # Update the weights
                 self.weights[ i ] -= dW
         
         if epoch%1000==0:
             # Show the current training status
             print "* current network error (MSE):", MSE
     
     print "* Converged to error bound (%.4g) with MSE = %.4g." % ( ERROR_LIMIT, MSE )
     print "* Trained for %d epochs." % epoch
Beispiel #5
0
    def test_power_float(self):
        import math
        from numpypy import power, array
        a = array([1., 2., 3.])
        b = power(a, 3)
        for i in range(len(a)):
            assert b[i] == a[i] ** 3

        a = array([1., 2., 3.])
        b = array([1., 2., 3.])
        c = power(a, b)
        for i in range(len(a)):
            assert c[i] == a[i] ** b[i]

        assert power(2, float('inf')) == float('inf')
        assert power(float('inf'), float('inf')) == float('inf')
        assert power(12345.0, 12345.0) == float('inf')
        assert power(-12345.0, 12345.0) == float('-inf')
        assert power(-12345.0, 12346.0) == float('inf')
        assert math.isnan(power(-1, 1.1))
        assert math.isnan(power(-1, -1.1))
        assert power(-2.0, -1) == -0.5
        assert power(-2.0, -2) == 0.25
        assert power(12345.0, -12345.0) == 0
        assert power(float('-inf'), 2) == float('inf')
        assert power(float('-inf'), 2.5) == float('inf')
        assert power(float('-inf'), 3) == float('-inf')