def test_multivariate_rectified_linear_complete_fit(self): from stackly import xpy, normalize, Constant, Variable, Concat, ReLU, FullyConnected, SquaredLoss, Adam numpy.random.seed(0) xpy.random.seed(0) x = Variable('x', (3, ), dtype=xpy.float32) b = Constant(xpy.array([1], dtype=xpy.float32)) y1 = FullyConnected(Concat((x, b)), 3) y2 = ReLU(y1) #y2 = FullyConnected(Concat((y2, b)), 1) y2 = FullyConnected(y2, 1) self.assertEqual( str(y2), 'FullyConnected(ReLU(FullyConnected(Concat((Variable(x,(3,),<class \'numpy.float32\'>), Constant([ 1.]))),3,True)),1,True)' ) optim = Adam(y2, SquaredLoss) beta = xpy.array([1, 2, 3], dtype=xpy.float32).reshape(1, -1) n = 1000 data_x = normalize( xpy.arange(n * beta.size, dtype=xpy.float32).reshape(n, -1)) data_y = xpy.abs(1 + xpy.tensordot(data_x, beta, axes=([1], [1]))) batch_size = 100 nbatches = data_x.shape[0] // batch_size for t in range(1, 751): p = numpy.random.permutation(data_x.shape[0]) data_x, data_y = data_x[p], data_y[p] loss = 0.0 for batch in range(nbatches): start, end = batch * batch_size, (batch + 1) * batch_size batch_loss = optim.run({'x': data_x[start:end]}, data_y[start:end]) loss += batch_loss loss /= nbatches #print("t={:4d}: loss={:.5f}: W1={}, W2={}".format(t, loss, y1.w, y2.w)) self.assertAlmostEqual(loss, 0.0, places=FLOAT32_COARSE_PRECISION)
def test_normalize(self): from stackly import xpy, asnumpy, normalize X = xpy.array([ [-1, 9, 1], [1, 1, 1], [3, -7, 1], ], dtype=xpy.float32) Xnormalized = xpy.array([ [-1, 1, 0], [0, 0, 0], [1, -1, 0], ], dtype=xpy.float32) numpy.testing.assert_almost_equal(asnumpy(normalize(X)), asnumpy(Xnormalized), decimal=FLOAT32_FINE_PRECISION)
def test_multivariate_linear_complete_fit(self): from stackly import xpy, normalize, Variable, FullyConnected, SquaredLoss, Adam numpy.random.seed(0) xpy.random.seed(0) x = Variable('x', (3, ), dtype=xpy.float32) y = FullyConnected(x, 1) optim = Adam(y, SquaredLoss) self.assertEqual( str(y), 'FullyConnected(Variable(x,(3,),<class \'numpy.float32\'>),1,True)' ) beta = xpy.array([1, 2, 3], dtype=xpy.float32).reshape(1, -1) n = 1000 data_x = normalize( xpy.arange(n * beta.size, dtype=xpy.float32).reshape(n, -1)) data_y = xpy.tensordot(data_x, beta, axes=([1], [1])) batch_size = 100 nbatches = data_x.shape[0] // batch_size for t in range(1, 201): p = numpy.random.permutation(data_x.shape[0]) data_x, data_y = data_x[p], data_y[p] loss = 0.0 for batch in range(nbatches): start, end = batch * batch_size, (batch + 1) * batch_size batch_loss = optim.run({'x': data_x[start:end]}, data_y[start:end]) loss += batch_loss loss /= nbatches #print("t={:4d}: loss={:.5f}: W={}".format(t, loss, y.w)) self.assertAlmostEqual(loss, 0.0, places=FLOAT32_COARSE_PRECISION)
def test_dropout_forward_backward(self): import numpy from stackly import xpy, asnumpy, asxpy, Variable, Dropout numpy.random.seed(0) xpy.random.seed(0) x = Variable('x', (2, 5), dtype=xpy.float32) y = Dropout(x) data_x = asxpy( numpy.tile(numpy.arange(5, dtype=xpy.float32), 2).reshape(2, 5)) data_y = y.forward((data_x, ), training=True) data_y_expected = xpy.array([[0, 2, 4, 6, 0], [0, 0, 4, 6, 0]], dtype=xpy.float32) numpy.testing.assert_equal(asnumpy(data_y), asnumpy(data_y_expected)) ddata_y = xpy.arange(10, dtype=xpy.float32).reshape(2, 5) ddata_x = y.backward((data_x, ), data_y, ddata_y, None)[0] ddata_x_expected = xpy.array([[0, 1, 2, 3, 0], [5, 0, 7, 8, 0]], dtype=xpy.float32) numpy.testing.assert_equal(asnumpy(ddata_x), asnumpy(ddata_x_expected))
def test_multivariate_multiclass_logistic_complete_fit(self): from stackly import xpy, normalize, Constant, Variable, Concat, FullyConnected, NegativeSoftmaxCrossEntropyLoss, Adam numpy.random.seed(0) xpy.random.seed(0) x = Variable('x', (3, ), dtype=xpy.float32) b = Constant(xpy.array([1], dtype=xpy.float32)) y = FullyConnected(Concat((x, b)), 3) self.assertEqual( str(y), 'FullyConnected(Concat((Variable(x,(3,),<class \'numpy.float32\'>), Constant([ 1.]))),3,True)' ) optim = Adam(y, NegativeSoftmaxCrossEntropyLoss) nclasses = 3 n = 300 data_x, data_y = None, None for classid in range(nclasses): beta = xpy.array([classid * 3, classid * 3 + 1, classid * 3 + 2], dtype=xpy.float32).reshape(1, -1) x_ = normalize( xpy.arange(n * beta.size, dtype=xpy.float32).reshape(n, -1)) if data_x is None: data_x = x_ else: data_x = xpy.concatenate((data_x, x_)) y_ = xpy.full((1000, 1), classid) if data_y is None: data_y = y_ else: data_y = xpy.concatenate((data_y, y_)) batch_size = 30 nbatches = data_x.shape[0] // batch_size for t in range(1, 251): p = numpy.random.permutation(data_x.shape[0]) data_x, data_y = data_x[p], data_y[p] loss = 0.0 for batch in range(nbatches): start, end = batch * batch_size, (batch + 1) * batch_size batch_loss = optim.run({'x': data_x[start:end]}, data_y[start:end]) loss += batch_loss loss /= nbatches #print("t={:4d}: loss={:.5f}: W={}".format(t, loss, y.w)) self.assertAlmostEqual(loss, 0.0, places=FLOAT32_COARSE_PRECISION)
def test_max_pooling_2d_forward_backward(self): import numpy from stackly import xpy, asnumpy, asxpy, Variable, MaxPooling2D x = Variable('x', (3, 4, 3), dtype=xpy.float32) y = MaxPooling2D(x, (2, 2), (2, 1)) data_x = numpy.zeros((1, 3, 4, 3), dtype=numpy.float32) data_x[0, :, 2, 0] = data_x[0, :, 0, 1] = 1.0 data_x = asxpy(data_x) data_y = y.forward((data_x, ), training=True) data_y_expected = xpy.array([[[[1, 1], [1, 0]]]], dtype=xpy.float32).repeat(3, axis=1) numpy.testing.assert_equal(asnumpy(data_y), asnumpy(data_y_expected)) ddata_y = asxpy( numpy.tile( numpy.arange(1, 1 + numpy.prod(data_y.shape[2:]), dtype=xpy.float32), data_y.shape[1]).reshape(data_y.shape)) ddata_x = y.backward((data_x, ), data_y, ddata_y, None)[0] ddata_x_expected = xpy.array( [[[[0, 3, 0], [0, 0, 0], [3, 4, 4], [0, 4, 4]]]], dtype=xpy.float32).repeat(3, axis=1) numpy.testing.assert_equal(asnumpy(ddata_x), asnumpy(ddata_x_expected))
def __init__(self, path): super(MNISTDataset, self).__init__(path) self.data = {} for key in self.FILENAMES: filename = self.FILENAMES[key] self.download_file(os.path.join(self.SERVERURL, filename)) self.decompress_file(filename) filename, _ = self.detect_compression(filename) filedata = self.read_file(filename) if len(filedata) < 8: raise Exception('illegal header for file {}'.format(filename)) magic, nitems = unpack('>II', filedata[:8]) p = 8 if key.endswith('images'): print('reading {} image(s) in {}'.format(nitems, filename)) if not magic == 0x00000803: raise Exception( 'illegal magic for image file {}'.format(filename)) if len(filedata) - p < 8: raise Exception( 'illegal header for image file {}'.format(filename)) nrows, ncols = unpack('>II', filedata[p:p + 8]) p += 8 if not len(filedata) - p == nitems * nrows * ncols: raise Exception('malformed image file {}'.format(filename)) self.data[key] = xpy.array(bytearray(filedata[p:]), dtype=xpy.float16).reshape( nitems, nrows, ncols) else: print('reading {} label(s) in {}'.format(nitems, filename)) if not magic == 0x00000801: raise Exception( 'illegal magic for label file {}'.format(filename)) if not nitems == len(filedata) - p: raise Exception('malformed label file {}'.format(filename)) self.data[key] = xpy.array(bytearray(filedata[p:]))