def test_multiply(self): from numpypy import array, multiply a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) c = multiply(a, b) for i in range(3): assert c[i] == a[i] * b[i]
def test_multiply(self): from numpypy import array, multiply, arange a = array([-5.0, -0.0, 1.0]) b = array([ 3.0, -2.0,-3.0]) c = multiply(a, b) for i in range(3): assert c[i] == a[i] * b[i] a = arange(15).reshape(5, 3) assert(multiply.reduce(a) == array([0, 3640, 12320])).all()
def backpropagation(self, trainingset, ERROR_LIMIT=1e-3, learning_rate=0.3, momentum_factor=0.9): assert trainingset[0].features.shape[0] == self.n_inputs, \ "ERROR: input size varies from the defined input setting" assert trainingset[0].targets.shape[0] == self.n_outputs, \ "ERROR: output size varies from the defined output setting" training_data = np.array( [instance.features for instance in trainingset]) training_targets = np.array( [instance.targets for instance in trainingset]) MSE = () # inf neterror = None momentum = collections.defaultdict(int) batch_size = self.batch_size if self.batch_size != 0 else training_data.shape[ 0] epoch = 0 while MSE > ERROR_LIMIT: epoch += 1 for start in xrange(0, len(training_data), batch_size): batch = training_data[start:start + batch_size] input_layers = self.update(training_data, trace=True) out = input_layers[-1] error = out - training_targets delta = error MSE = np.mean(np.power(error, 2)) loop = itertools.izip( xrange(len(self.weights) - 1, -1, -1), reversed(self.weights), reversed(input_layers[:-1]), ) for i, weight_layer, input_signals in loop: # Loop over the weight layers in reversed order to calculate the deltas if i == 0: dropped = dropout( add_bias(input_signals).T, self.input_layer_dropout) else: dropped = dropout( add_bias(input_signals).T, self.hidden_layer_dropout) # Calculate weight change dW = learning_rate * np.dot( dropped, delta) + momentum_factor * momentum[i] if i != 0: """Do not calculate the delta unnecessarily.""" # Skipping the bias weight during calculation. weight_delta = np.dot(delta, weight_layer[1:, :].T) # Calculate the delta for the subsequent layer delta = np.multiply( weight_delta, self.activation_functions[i - 1](input_signals, derivative=True)) # Store the momentum momentum[i] = dW # Update the weights self.weights[i] -= dW if epoch % 1000 == 0: # Show the current training status print "* current network error (MSE):", MSE print "* Converged to error bound (%.4g) with MSE = %.4g." % ( ERROR_LIMIT, MSE) print "* Trained for %d epochs." % epoch
def c_loop(a, b, c): return numpy.add(a, numpy.multiply(b, c))
def dropout( X, p = 0. ): if p > 0: retain_p = 1 - p X = np.multiply( bernoulli.rvs( retain_p, size = X.shape ), X ) X /= retain_p return X
def backpropagation(self, trainingset, ERROR_LIMIT = 1e-3, learning_rate = 0.3, momentum_factor = 0.9 ): assert trainingset[0].features.shape[0] == self.n_inputs, \ "ERROR: input size varies from the defined input setting" assert trainingset[0].targets.shape[0] == self.n_outputs, \ "ERROR: output size varies from the defined output setting" training_data = np.array( [instance.features for instance in trainingset ] ) training_targets = np.array( [instance.targets for instance in trainingset ] ) MSE = ( ) # inf neterror = None momentum = collections.defaultdict( int ) batch_size = self.batch_size if self.batch_size != 0 else training_data.shape[0] epoch = 0 while MSE > ERROR_LIMIT: epoch += 1 for start in xrange( 0, len(training_data), batch_size ): batch = training_data[start : start+batch_size] input_layers = self.update( training_data, trace=True ) out = input_layers[-1] error = out - training_targets delta = error MSE = np.mean( np.power(error,2) ) loop = itertools.izip( xrange(len(self.weights)-1, -1, -1), reversed(self.weights), reversed(input_layers[:-1]), ) for i, weight_layer, input_signals in loop: # Loop over the weight layers in reversed order to calculate the deltas if i == 0: dropped = dropout( add_bias(input_signals).T, self.input_layer_dropout ) else: dropped = dropout( add_bias(input_signals).T, self.hidden_layer_dropout ) # Calculate weight change dW = learning_rate * np.dot( dropped, delta ) + momentum_factor * momentum[i] if i!= 0: """Do not calculate the delta unnecessarily.""" # Skipping the bias weight during calculation. weight_delta = np.dot( delta, weight_layer[1:,:].T ) # Calculate the delta for the subsequent layer delta = np.multiply( weight_delta, self.activation_functions[i-1]( input_signals, derivative=True) ) # Store the momentum momentum[i] = dW # Update the weights self.weights[ i ] -= dW if epoch%1000==0: # Show the current training status print "* current network error (MSE):", MSE print "* Converged to error bound (%.4g) with MSE = %.4g." % ( ERROR_LIMIT, MSE ) print "* Trained for %d epochs." % epoch
def test_basic(self): import sys from numpypy import (dtype, add, array, dtype, subtract as sub, multiply, divide, negative, absolute as abs, floor_divide, real, imag, sign) from numpypy import (equal, not_equal, greater, greater_equal, less, less_equal, isnan) assert real(4.0) == 4.0 assert imag(0.0) == 0.0 a = array([complex(3.0, 4.0)]) b = a.real b[0] = 1024 assert a[0].real == 1024 assert b.dtype == dtype(float) a = array(complex(3.0, 4.0)) b = a.real assert b == array(3) assert a.imag == array(4) a.real = 1024 a.imag = 2048 assert a.real == 1024 and a.imag == 2048 assert b.dtype == dtype(float) a = array(4.0) b = a.imag assert b == 0 assert b.dtype == dtype(float) exc = raises(TypeError, 'a.imag = 1024') assert str(exc.value).startswith("array does not have imaginary") exc = raises(ValueError, 'a.real = [1, 3]') assert str(exc.value) == \ "could not broadcast input array from shape (2) into shape ()" a = array('abc') assert str(a.real) == 'abc' assert str(a.imag) == '' for t in 'complex64', 'complex128', 'clongdouble': complex_ = dtype(t).type O = complex(0, 0) c0 = complex_(complex(2.5, 0)) c1 = complex_(complex(1, 2)) c2 = complex_(complex(3, 4)) c3 = complex_(complex(-3, -3)) assert equal(c0, 2.5) assert equal(c1, complex_(complex(1, 2))) assert equal(c1, complex(1, 2)) assert equal(c1, c1) assert not_equal(c1, c2) assert not equal(c1, c2) assert less(c1, c2) assert less_equal(c1, c2) assert less_equal(c1, c1) assert not less(c1, c1) assert greater(c2, c1) assert greater_equal(c2, c1) assert not greater(c1, c2) assert add(c1, c2) == complex_(complex(4, 6)) assert add(c1, c2) == complex(4, 6) assert sub(c0, c0) == sub(c1, c1) == 0 assert sub(c1, c2) == complex(-2, -2) assert negative(complex(1,1)) == complex(-1, -1) assert negative(complex(0, 0)) == 0 assert multiply(1, c1) == c1 assert multiply(2, c2) == complex(6, 8) assert multiply(c1, c2) == complex(-5, 10) assert divide(c0, 1) == c0 assert divide(c2, -1) == negative(c2) assert divide(c1, complex(0, 1)) == complex(2, -1) n = divide(c1, O) assert repr(n.real) == 'inf' assert repr(n.imag).startswith('inf') #can be inf*j or infj assert divide(c0, c0) == 1 res = divide(c2, c1) assert abs(res.real-2.2) < 0.001 assert abs(res.imag+0.4) < 0.001 assert floor_divide(c0, c0) == complex(1, 0) assert isnan(floor_divide(c0, complex(0, 0)).real) assert floor_divide(c0, complex(0, 0)).imag == 0.0 assert abs(c0) == 2.5 assert abs(c2) == 5 assert sign(complex(0, 0)) == 0 assert sign(complex(-42, 0)) == -1 assert sign(complex(42, 0)) == 1 assert sign(complex(-42, 2)) == -1 assert sign(complex(42, 2)) == 1 assert sign(complex(-42, -3)) == -1 assert sign(complex(42, -3)) == 1 assert sign(complex(0, -42)) == -1 assert sign(complex(0, 42)) == 1 inf_c = complex_(complex(float('inf'), 0.)) assert repr(abs(inf_c)) == 'inf' assert repr(abs(complex(float('nan'), float('nan')))) == 'nan' # numpy actually raises an AttributeError, # but numpypy raises a TypeError if '__pypy__' in sys.builtin_module_names: exct, excm = TypeError, 'readonly attribute' else: exct, excm = AttributeError, 'is not writable' exc = raises(exct, 'c2.real = 10.') assert excm in exc.value[0] exc = raises(exct, 'c2.imag = 10.') assert excm in exc.value[0] assert(real(c2) == 3.0) assert(imag(c2) == 4.0)
def test_basic(self): from numpypy import (complex128, complex64, add, array, dtype, subtract as sub, multiply, divide, negative, absolute as abs, floor_divide, real, imag, sign) from numpypy import (equal, not_equal, greater, greater_equal, less, less_equal, isnan) complex_dtypes = [complex64, complex128] try: from numpypy import clongfloat complex_dtypes.append(clongfloat) except: pass assert real(4.0) == 4.0 assert imag(0.0) == 0.0 a = array([complex(3.0, 4.0)]) b = a.real b[0] = 1024 assert a[0].real == 1024 assert b.dtype == dtype(float) a = array(complex(3.0, 4.0)) b = a.real assert b == array(3) assert a.imag == array(4) a.real = 1024 a.imag = 2048 assert a.real == 1024 and a.imag == 2048 assert b.dtype == dtype(float) a = array(4.0) b = a.imag assert b == 0 assert b.dtype == dtype(float) exc = raises(TypeError, 'a.imag = 1024') assert str(exc.value).startswith("array does not have imaginary") exc = raises(ValueError, 'a.real = [1, 3]') assert str(exc.value) == \ "could not broadcast input array from shape (2) into shape ()" a = array('abc') assert str(a.real) == 'abc' # numpy imag for flexible types returns self assert str(a.imag) == 'abc' for complex_ in complex_dtypes: O = complex(0, 0) c0 = complex_(complex(2.5, 0)) c1 = complex_(complex(1, 2)) c2 = complex_(complex(3, 4)) c3 = complex_(complex(-3, -3)) assert equal(c0, 2.5) assert equal(c1, complex_(complex(1, 2))) assert equal(c1, complex(1, 2)) assert equal(c1, c1) assert not_equal(c1, c2) assert not equal(c1, c2) assert less(c1, c2) assert less_equal(c1, c2) assert less_equal(c1, c1) assert not less(c1, c1) assert greater(c2, c1) assert greater_equal(c2, c1) assert not greater(c1, c2) assert add(c1, c2) == complex_(complex(4, 6)) assert add(c1, c2) == complex(4, 6) assert sub(c0, c0) == sub(c1, c1) == 0 assert sub(c1, c2) == complex(-2, -2) assert negative(complex(1,1)) == complex(-1, -1) assert negative(complex(0, 0)) == 0 assert multiply(1, c1) == c1 assert multiply(2, c2) == complex(6, 8) assert multiply(c1, c2) == complex(-5, 10) assert divide(c0, 1) == c0 assert divide(c2, -1) == negative(c2) assert divide(c1, complex(0, 1)) == complex(2, -1) n = divide(c1, O) assert repr(n.real) == 'inf' assert repr(n.imag).startswith('inf') #can be inf*j or infj assert divide(c0, c0) == 1 res = divide(c2, c1) assert abs(res.real-2.2) < 0.001 assert abs(res.imag+0.4) < 0.001 assert floor_divide(c0, c0) == complex(1, 0) assert isnan(floor_divide(c0, complex(0, 0)).real) assert floor_divide(c0, complex(0, 0)).imag == 0.0 assert abs(c0) == 2.5 assert abs(c2) == 5 assert sign(complex(0, 0)) == 0 assert sign(complex(-42, 0)) == -1 assert sign(complex(42, 0)) == 1 assert sign(complex(-42, 2)) == -1 assert sign(complex(42, 2)) == 1 assert sign(complex(-42, -3)) == -1 assert sign(complex(42, -3)) == 1 assert sign(complex(0, -42)) == -1 assert sign(complex(0, 42)) == 1 inf_c = complex_(complex(float('inf'), 0.)) assert repr(abs(inf_c)) == 'inf' assert repr(abs(complex(float('nan'), float('nan')))) == 'nan' # numpy actually raises an AttributeError, # but numpypy raises a TypeError exc = raises((TypeError, AttributeError), 'c2.real = 10.') assert str(exc.value) == "readonly attribute" exc = raises((TypeError, AttributeError), 'c2.imag = 10.') assert str(exc.value) == "readonly attribute" assert(real(c2) == 3.0) assert(imag(c2) == 4.0)
def dropout(X, p=0.): if p > 0: retain_p = 1 - p X = np.multiply(bernoulli.rvs(retain_p, size=X.shape), X) X /= retain_p return X