def test_max_pooling_layer(): l = MaxPoolingLayer(2) x = np.array([[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]], [[17, 18, 19, 20], [21, 22, 23, 24]]]) print("Testing forward computation...") output = l.forward(x) target = np.array([[[6, 8]], [[14, 16]], [[22, 24]]]) assert (output.shape == target.shape), "Wrong output size" assert close_enough(output, target), "Wrong values in layer ouput" print("Forward computation implemented ok!") output_err = output print("Testing backward computation...") g = l.backward(x, output_err) print(g) print("Testing gradients") in_target = np.array([[[0, 0, 0, 0], [0, 6, 0, 8]], [[0, 0, 0, 0], [0, 14, 0, 16]], [[0, 0, 0, 0], [0, 22, 0, 24]]]) assert (g.shape == in_target.shape), "Wrong size" assert close_enough(g, in_target), "Wrong values in gradients" print(" OK") print("Backward computation implemented ok!")
def test_relu_layer(): l = ReluLayer() x = np.array( [-100.0, -10.0, -1.0, -.1, -.01, .0, .1, .01, .1, 1.0, 10.0, 100.0]) print("Testing forward computation...") output = l.forward(x) target = np.array( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.01, 0.1, 1.0, 10.0, 100.0]) assert (output.shape == target.shape), "Wrong output size" assert close_enough(output, target), "Wrong values in layer ouput" print("Forward computation implemented ok!") output_err = np.array([ -760.0, -154.0, -145.0, -45.1, -.7601, .0, 3.1, 23.01, 1.1, 14.0, 150.0, 1.5 ]) print("Testing backward computation...") g = l.backward(x, output_err) print("Testing gradients") in_target = np.array( [.0, .0, .0, .0, .0, .0, 3.1, 23.01, 1.1, 14., 150., 1.5]) assert (g.shape == in_target.shape), "Wrong size" assert close_enough(g, in_target), "Wrong values in gradients" print(" OK") print("Backward computation implemented ok!")
def test_starts_out_with_no_rates_or_count(): meter = Meter() assert meter.count == 0 assert close_enough(meter.mean_rate, 0) assert close_enough(meter.one_minute_rate, 0) assert close_enough(meter.five_minute_rate, 0) assert close_enough(meter.fifteen_minute_rate, 0)
def test_linearize_layer(): l = LinearizeLayer(2, 3, 4) x = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]) print("Testing forward computation...") output = l.forward(x) target = np.array([[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15], [16], [17], [18], [19], [20], [21], [22], [23], [24]]) assert (output.shape == target.shape), "Wrong output size" assert close_enough(output, target), "Wrong values in layer ouput" print("Forward computation implemented ok!") output_err = output print("Testing backward computation...") g = l.backward(x, output_err) print("Testing gradients") in_target = x assert (g.shape == in_target.shape), "Wrong size" assert close_enough(g, in_target), "Wrong values in gradients" print(" OK") print("Backward computation implemented ok!")
def test_linear_layer(): from transfer_functions import hyperbolic_tangent l = Layer(4, 5, hyperbolic_tangent) l.weights = np.array([[0.00828426, 0.35835909, -0.26848058, 0.37474081], [0.17125137, -0.10246062, 0.301141, -0.02042449], [0.3111425, -0.04866925, -0.04644496, 0.05068646], [-0.36114934, 0.40810522, -0.18082862, 0.01905515], [0.06907316, -0.1069273, -0.35200473, -0.29067378]]) l.biases = np.array([[-0.4146], [0.0982], [-0.3392], [0.4674], [0.0317]]) x = np.array([[0.123], [-0.124], [0.231], [-0.400]]) print("Testing forward computation...") output = l.forward(x) target = np.array([[-0.58493574], [0.20668163], [-0.31483002], [0.31219906], [0.08818176]]) assert (output.shape == target.shape), "Wrong output size" assert close_enough(output, target), "Wrong values in layer ouput" print("Forward computation implemented ok!") output_err = np.array([[.001], [.001], [.99], [.001], [.001]]) print("Testing backward computation...") g = l.backward(x, output_err) print(l.g_biases) print(l.g_weights) print(g) print(" i. testing gradients w.r.t. the bias terms...") gbias_target = np.array([[0.001], [0.001], [0.99], [0.001], [0.001]]) assert (l.g_biases.shape == gbias_target.shape), "Wrong size" assert close_enough(l.g_biases, gbias_target), "Wrong values" print(" OK") print(" ii. testing gradients w.r.t. the weights...") gweights_target = np.array( [[1.23000000e-04, -1.24000000e-04, 2.31000000e-04, -4.00000000e-04], [1.23000000e-04, -1.24000000e-04, 2.31000000e-04, -4.00000000e-04], [1.21770000e-01, -1.22760000e-01, 2.28690000e-01, -3.96000000e-01], [1.23000000e-04, -1.24000000e-04, 2.31000000e-04, -4.00000000e-04], [1.23000000e-04, -1.24000000e-04, 2.31000000e-04, -4.00000000e-04]]) assert (l.g_weights.shape == gweights_target.shape), "Wrong size" assert close_enough(l.g_weights, gweights_target), "Wrong values" print(" OK") print(" iii. testing gradients w.r.t. the inputs...") in_target = np.array([[0.30326003], [-0.04689319], [-0.04400043], [0.04222033]]) assert (g.shape == in_target.shape), "Wrong size" assert close_enough(g, in_target), "Wrong values in input gradients" print(" OK") print("Backward computation implemented ok!")
def test_meter(): clock = ManualClock() meter = Meter(clock) meter.mark() clock.add_seconds(10) meter.mark(2) assert close_enough(meter.mean_rate, 0.3) assert close_enough(meter.one_minute_rate, 0.1840) assert close_enough(meter.five_minute_rate, 0.1966) assert close_enough(meter.fifteen_minute_rate, 0.1988)
def test_linear_layer(): l = FcLayer(4, 5, identity) l.weights = np.array([[0.00828426, 0.35835909, -0.26848058, 0.37474081], [0.17125137, -0.10246062, 0.301141, -0.02042449], [0.3111425, -0.04866925, -0.04644496, 0.05068646], [-0.36114934, 0.40810522, -0.18082862, 0.01905515], [0.06907316, -0.1069273, -0.35200473, -0.29067378]]) l.biases = np.array([[-0.4146], [0.0982], [-0.3392], [0.4674], [0.0317]]) x = np.array([[0.123], [-0.124], [0.231], [-0.400]]) print("Testing forward computation...") output = l.forward(x) target = np.array([[-0.6699329], [0.2097024], [-0.32589786], [0.32298011], [0.0884114]]) assert (output.shape == target.shape), "Wrong output size" assert close_enough(output, target), "Wrong values in layer output" print("Forward computation implemented ok!") output_err = np.array([[.001], [.001], [.99], [.001], [.001]]) print("Testing backward computation...") g = l.backward(x, output_err) print(" i. testing gradients w.r.t. the bias terms...") gbias_target = np.array([[0.001], [0.001], [0.99], [0.001], [0.001]]) assert (l.g_biases.shape == gbias_target.shape), "Wrong size" assert close_enough(l.g_biases, gbias_target), "Wrong values" print(" OK") print(" ii. testing gradients w.r.t. the weights...") gweights_target = np.array( [[1.23000000e-04, -1.24000000e-04, 2.31000000e-04, -4.00000000e-04], [1.23000000e-04, -1.24000000e-04, 2.31000000e-04, -4.00000000e-04], [1.21770000e-01, -1.22760000e-01, 2.28690000e-01, -3.96000000e-01], [1.23000000e-04, -1.24000000e-04, 2.31000000e-04, -4.00000000e-04], [1.23000000e-04, -1.24000000e-04, 2.31000000e-04, -4.00000000e-04]]) assert (l.g_weights.shape == gweights_target.shape), "Wrong size" assert close_enough(l.g_weights, gweights_target), "Wrong values" print(" OK") print(" iii. testing gradients w.r.t. the inputs...") in_target = np.array([[0.30791853], [-0.04762548], [-0.04648068], [0.05026229]]) assert (g.shape == in_target.shape), "Wrong size" assert close_enough(g, in_target), "Wrong values in input gradients" print(" OK") print("Backward computation implemented ok!")
def test_decorator(): clock = ManualClock() @metered(clock=clock) def foo(): pass for i in xrange(10): foo() clock.add_seconds(10) assert close_enough(foo.meter.mean_rate, 0.1) assert close_enough(foo.meter.one_minute_rate, 0.119) assert close_enough(foo.meter.five_minute_rate, 0.174) assert close_enough(foo.meter.fifteen_minute_rate, 0.190)
def update_alpha(M, ips, alpha): old_alpha = alpha max_iter = 10 iter = 0 while True: iter += 1 if iter >= max_iter: break # print old_alpha g = gradient_g(M, old_alpha, ips) h, z = hessian_h_and_z(M, old_alpha) # print 'g:', g # print 'h:', h # print c = (g / h).sum() / (1./z + (1./h).sum()) new_alpha = old_alpha - (g - c) / h if close_enough(old_alpha, new_alpha): break else: old_alpha = new_alpha return new_alpha
def check_tanh(): softmax = TanHLayer() inputs = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).reshape((10, 1)) outputs = softmax.forward(inputs) outputs_target = np.array([[0.76159416], [0.96402758], [0.99505475], [0.9993293], [0.9999092], [0.99998771], [0.99999834], [0.99999977], [0.99999997], [1.]]) assert close_enough(outputs, outputs_target), "Wrong values for Tanh forward" output_errors = inputs result = softmax.backward(inputs, output_errors) target_result = np.array([[4.19974342e-01], [1.41301650e-01], [2.95981115e-02], [5.36380273e-03], [9.07916155e-04], [1.47459284e-04], [2.32827654e-05], [3.60112478e-06], [5.48279254e-07], [8.24461455e-08]]) assert close_enough(result, target_result), "Wrong values for Tanh backward"
def check_softmax(): softmax = SoftMaxLayer() inputs = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).reshape((10, 1)) outputs = softmax.forward(inputs) outputs_target = np.array([[7.80134161e-05], [2.12062451e-04], [5.76445508e-04], [1.56694135e-03], [4.25938820e-03], [1.15782175e-02], [3.14728583e-02], [8.55520989e-02], [2.32554716e-01], [6.32149258e-01]]) assert close_enough(outputs, outputs_target), "Wrong values for SoftMax forward" output_errors = inputs result = softmax.backward(inputs, output_errors) target_result = np.array([[-0.00065675], [-0.00157318], [-0.0036999], [-0.00849044], [-0.01882001], [-0.03957987], [-0.07611639], [-0.12135371], [-0.09731887], [0.36760914]]) assert close_enough(result, target_result), "Wrong values for SoftMax backward"
def test_transfer_functions(): X = np.array([ -100.0, -10.0, -1.0, -.1, -.01, .0, .1, .01, .1, 1.0, 10.0, 100.0 ]) logX = np.array([ 3.7200759760208555e-44, 4.539786870243442e-05, 0.2689414213699951, 0.47502081252106, 0.49750002083312506, 0.5, 0.52497918747894, 0.5024999791668749, 0.52497918747894, 0.7310585786300049, 0.9999546021312976, 1.0 ]) dlogX = np.array( [3.7200759760208555e-44, 4.53958077359517e-05, 0.19661193324148185, 0.24937604019289197, 0.2499937501041652, 0.25, 0.24937604019289197, 0.2499937501041652, 0.24937604019289197, 0.19661193324148185, 4.5395807735907655e-05, 0.0] ) tanhX = np.array([ -1.0, -0.99999999587769273, -0.76159415595576485, -0.099667994624955819, -0.0099996666799994603, 0.0, 0.099667994624955819, 0.0099996666799994603, 0.099667994624955819, 0.76159415595576485, 0.99999999587769273, 1.0 ]) dtanhX = np.array([ 0.0, 8.2446145466263943e-09, 0.41997434161402614, 0.9900662908474398, 0.99990000666628887, 1.0, 0.9900662908474398, 0.99990000666628887, 0.9900662908474398, 0.41997434161402614, 8.2446145466263943e-09, 0.0 ]) # Test the identity transfer function print("Testing the identity transfer function ...") assert close_enough(identity(X), X), \ "Identity function not good!" assert close_enough(identity(X, True), np.ones(X.shape)), \ "Identity derivative not good" print("Identity transfer function implemented ok!") # Test the sigmoid transfer function print("Testing the logistic transfer function ...") assert close_enough(logistic(X), logX), \ "Logistic function not good" assert close_enough(logistic(logX, True), dlogX), \ "Logistic function not good" print("Logistic transfer function implemented ok!") # Test the sigmoid transfer function print("Testing the hyperbolic tangent transfer function ...") assert close_enough(hyperbolic_tangent(X), tanhX), \ "Hyperbolic tangent function not good" assert close_enough(hyperbolic_tangent(tanhX, True), dtanhX), \ "Hyperbolic tangent function not good" print("Hyperbolic tangent transfer function implemented ok!")
def test_one_minute_ewma_with_a_value_of_three(): ewma = EWMA.one_minute_ewma() ewma.update(3) ewma.tick() assert close_enough(ewma.rate, 0.6, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.22072766, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.08120117, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.02987224, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.01098938, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.00404277, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.00148725, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.00054713, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.00020128, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.00007405, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.00002724, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.00001002, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.00000369, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.00000136, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.00000050, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.00000018, offset=0.000001)
def test_five_minute_ewma_with_a_value_of_three(): ewma = EWMA.five_minute_ewma() ewma.update(3) ewma.tick() assert close_enough(ewma.rate, 0.6, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.49123845, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.40219203, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.32928698, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.26959738, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.22072766, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.18071653, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.14795818, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.12113791, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.09917933, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.08120117, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.06648190, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.05443077, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.04456415, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.03648604, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.02987224, offset=0.000001)
def test_fifteen_minute_ewma_with_a_value_of_three(): ewma = EWMA.fifteen_minute_ewma() ewma.update(3) ewma.tick() assert close_enough(ewma.rate, 0.6, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.56130419, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.52510399, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.49123845, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.45955700, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.42991879, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.40219203, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.37625345, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.35198773, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.32928698, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.30805027, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.28818318, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.26959738, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.25221023, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.23594443, offset=0.000001) elapse_one_minute(ewma) assert close_enough(ewma.rate, 0.22072766, offset=0.000001)
def test_convolutional_layer(): np.random.seed(0) l = ConvolutionalLayer(2, 3, 4, 3, 2, 1) l.weights = np.random.rand(3, 2, 2, 2) l.biases = np.random.rand(3, 1) x = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]) print("Testing forward computation...") output = l.forward(x) target = np.array([[[34.55043437, 38.95942899, 43.36842361], [52.18641284, 56.59540746, 61.00440208]], [[30.72457988, 34.08923073, 37.45388158], [44.18318328, 47.54783413, 50.91248498]], [[28.2244684, 31.30220961, 34.37995083], [40.53543326, 43.61317448, 46.69091569]]]) assert (output.shape == target.shape), "Wrong output size" assert close_enough(output, target), "Wrong values in layer ouput" print("Forward computation implemented ok!") output_err = np.random.rand(3, 2, 3) print("Testing backward computation...") g = l.backward(x, output_err) # print(l.g_biases) # print(l.g_weights) # print(g) print(" i. testing gradients w.r.t. the bias terms...") gbias_target = np.array([[2.4595299], [3.86207926], [1.17504241]]) assert (l.g_biases.shape == gbias_target.shape), "Wrong size" assert close_enough(l.g_biases, gbias_target), "Wrong values" print(" OK") print(" ii. testing gradients w.r.t. the weights...") gweights_target = np.array([[[[12.19071134, 14.65024124], [22.02883093, 24.48836083]], [[41.70507011, 44.1646], [51.54318969, 54.00271959]]], [[[17.14269456, 21.00477382], [32.59101161, 36.45309087]], [[63.4876457, 67.34972496], [78.93596275, 82.79804201]]], [[[5.38434096, 6.55938337], [10.08451061, 11.25955302]], [[19.4848499, 20.65989231], [24.18501955, 25.36006196]]]]) assert (l.g_weights.shape == gweights_target.shape), "Wrong size" assert close_enough(l.g_weights, gweights_target), "Wrong values" print(" OK") print(" iii. testing gradients w.r.t. the inputs...") in_target = np.array([[[0.1873886, 1.2046128, 1.46196328, 0.55403977], [1.60530819, 2.33479767, 2.57498862, 1.37639801], [1.04216109, 0.97715783, 1.17609438, 0.71793208]], [[0.1055839, 0.66864782, 0.87158109, 0.36204295], [0.96613275, 1.97814629, 2.12435555, 0.97276618], [1.26778987, 1.01822369, 1.45453542, 0.45243098]]]) assert (g.shape == in_target.shape), "Wrong size" assert close_enough(g, in_target), "Wrong values in input gradients" print(" OK") print("Backward computation implemented ok!")