for x,y in test_loader: if torch.cuda.is_available(): x = x.to(device) y = y.to(device) else: x = x y = y y_pred, _ = model(x) y_pred_dnorm = denormalize_data(y_pred.view(-1, opt.n_inp, opt.n_points).cpu(), min_value, max_value) y_dnorm = denormalize_data(y.view(-1, opt.n_inp, opt.n_points).cpu(), min_value, max_value) loss_test = loss_fn(y_pred_dnorm, y_dnorm) logs_test['mse'] = loss_test.item() logs_test['rmse'] = np.sqrt(loss_test.item()) logs_test['bias'] = bias(y_pred_dnorm, y_dnorm) logs_test['err-rel'] = rel_error(y_pred_dnorm, y_dnorm) logger.log('test', logs_test) print("\n\n================================================") print(" * Test MSE: ", logs_test['mse'], "\n * Test RMSE: ", logs_test['rmse'], "\n * Test Bias: ", logs_test['bias'], "\n * Test Rel-Err (%): ", logs_test['err-rel']) print("================================================\n") logger.save(model)
def test_conv_layer_3_update(self): print('\n==================================') print(' Test conv layer update ') print('==================================') np.random.seed(123) in_dim, out_dim = 3, 3 kernel_size, stride, pad = 3, 1, 1 x = np.random.randn(1, in_dim, 5, 5) conv_layer = ConvolutionLayer(in_dim, out_dim, kernel_size, stride, pad) before_w = np.array(conv_layer.w, copy=True) before_b = np.array(conv_layer.b, copy=True) conv_out = conv_layer.forward(x) d_prev = np.random.randn(*conv_out.shape) dx = conv_layer.backward(d_prev, 0.01) conv_layer.update(learning_rate=0.05) after_w = conv_layer.w after_b = conv_layer.b correct_before_w = [[[[1.03972709, -0.40336604, -0.12602959], [-0.83751672, -1.60596276, 1.25523737], [-0.68886898, 1.66095249, 0.80730819]], [[-0.31475815, -1.0859024, -0.73246199], [-1.21252313, 2.08711336, 0.16444123], [1.15020554, -1.26735205, 0.18103513]], [[1.17786194, -0.33501076, 1.03111446], [-1.08456791, -1.36347154, 0.37940061], [-0.37917643, 0.64205469, -1.97788793]]], [[[0.71226464, 2.59830393, -0.02462598], [0.03414213, 0.17954948, -1.86197571], [0.42614664, -1.60540974, -0.4276796]], [[1.24286955, -0.73521696, 0.50124899], [1.01273905, 0.27874086, -1.37094847], [-0.33247528, 1.95941134, -2.02504576]], [[-0.27578601, -0.55210807, 0.12074736], [0.74821562, 1.60869097, -0.27023239], [0.81234133, 0.49974014, 0.4743473]]], [[[-0.56392393, -0.99732147, -1.10004311], [-0.75643721, 0.32168658, 0.76094939], [0.32346885, -0.5489551, 1.80597011]], [[1.51886562, -0.35400011, -0.82343141], [0.13021495, 1.26729865, 0.33276498], [0.5565487, -0.21208012, 0.4562709]], [[1.54454445, -0.23966878, 0.14330773], [0.25381648, 0.28372536, -1.41188888], [-1.87686866, -1.01965507, 0.1679423]]]] correct_before_b = [0., 0., 0.] correct_after_w = [[[[1.09689866, -0.54913364, 0.21465505], [-0.9246368, -1.56644397, 1.44112153], [-0.54805894, 1.841536, 0.74062891]], [[-0.37891291, -1.22891861, -0.5019351], [-1.23865077, 2.34514794, 0.07762718], [1.16021246, -1.3052236, 0.37142506]], [[1.37873781, -0.39730636, 1.03114803], [-1.0563443, -1.35941582, 0.4329204], [-0.43808719, 0.68949022, -1.96722015]]], [[[0.87755776, 2.22487363, -0.1994678], [-0.23072609, -0.15658328, -1.8894498], [0.49572071, -1.51721865, -0.148514]], [[1.33705152, -0.37684592, 0.43399247], [0.71835273, -0.27620022, -1.59097982], [-0.41237174, 2.27693752, -1.59355336]], [[-0.20864999, -0.46612975, 0.21899841], [0.5920708, 2.11845969, -0.37245876], [0.81519196, 0.7327657, 0.27502067]]], [[[-0.37367795, -0.73655095, -0.75468722], [-0.81425165, 0.46619831, 1.02432303], [0.76078817, -0.54626009, 1.76599841]], [[1.18035314, -0.38952389, -0.8655608], [0.11394674, 1.43136953, 0.39462669], [0.74596375, -0.1424926, 0.39133621]], [[1.54246971, 0.15246189, -0.02921384], [-0.01189834, 0.40150057, -1.33588103], [-1.95854526, -1.22778415, 0.20867483]]]] correct_after_b = [-0.07718945, 0.15356871, 0.38646942] before_w_e = rel_error(correct_before_w, before_w) before_b_e = rel_error(correct_before_b, before_b) after_w_e = rel_error(correct_after_w, after_w) after_b_e = rel_error(correct_after_b, after_b) print('Relative difference before_w:', before_w_e) print('Relative difference before_b:', before_b_e) print('Relative difference after_w :', after_w_e) print('Relative difference after_b :', after_b_e) self.assertTrue(before_w_e <= 5e-6) self.assertTrue(before_b_e <= 1e-11) self.assertTrue(after_w_e <= 5e-6) self.assertTrue(after_b_e <= 5e-6)
def error(model, X, Y): return rel_error(model, X, Y, meanY=mean_out)
def test_fc_layer_3_update(self): print('\n==================================') print(' Test fc layer update ') print('==================================') np.random.seed(123) in_dim, out_dim = 5, 7 x = np.random.randn(5, in_dim) fc_layer = FCLayer(in_dim, out_dim) before_w = np.array(fc_layer.w, copy=True) before_b = np.array(fc_layer.b, copy=True) fc_out = fc_layer.forward(x) d_prev = np.random.randn(*fc_out.shape) dx = fc_layer.backward(d_prev, 0.01) fc_layer.update(learning_rate=0.05) after_w = fc_layer.w after_b = fc_layer.b correct_before_w = [[ -0.40334947, 0.5737037, -0.90357701, -0.08858724, -0.54502165, -0.16166788, -1.76998316 ], [ -1.12041591, -0.44264123, 0.58657875, -0.10981685, 0.00179992, 0.43527026, -0.55626763 ], [ 0.17938167, -0.50935851, -1.09267413, -0.24722674, 0.36290669, 0.21414252, -0.00748226 ], [ 1.51306465, 0.26114858, 0.619007, 1.41552614, -0.81845142, -0.65698735, 1.10282044 ], [ -0.50473919, 0.01877332, 0.6762948, 0.56333218, 1.10988747, 0.94592841, 0.67634331 ]] correct_before_b = [0., 0., 0., 0., 0., 0., 0.] correct_after_w = [[ -0.58982108, 0.78081109, -0.94110661, -0.16360302, -0.45787255, -0.32758083, -1.6474044 ], [ -1.19924514, -0.81798274, 0.41813681, 0.14179691, -0.06741367, 0.52120198, -0.52673421 ], [ 0.51652469, -0.79749342, -1.09666714, -0.23378007, 0.51450328, 0.48249497, -0.00835405 ], [ 1.31292175, 0.33689368, 0.5087007, 1.37156548, -0.64157188, -0.75320515, 1.29581094 ], [ -0.43992258, -0.00885285, 0.74126341, 0.46880701, 1.12804097, 1.0715126, 0.66004662 ]] correct_after_b = [ 0.08653284, -0.12308412, -0.10339719, 0.12829831, 0.057812, -0.04044389, 0.06720047 ] before_w_e = rel_error(correct_before_w, before_w) before_b_e = rel_error(correct_before_b, before_b) after_w_e = rel_error(correct_after_w, after_w) after_b_e = rel_error(correct_after_b, after_b) print('Relative difference before_w:', before_w_e) print('Relative difference before_b:', before_b_e) print('Relative difference after_w :', after_w_e) print('Relative difference after_b :', after_b_e) self.assertTrue(before_w_e <= 5e-6) self.assertTrue(before_b_e <= 1e-11) self.assertTrue(after_w_e <= 5e-7) self.assertTrue(after_b_e <= 5e-8)
model = TwoLayerNet(input_dim=D, hidden_dim=H, num_classes=C, weight_scale=std, loss_name=loss) print('loss use {}'.format(loss)) print('----------------') print('Testing test-time forward pass') model.params['W1'] = np.linspace(-0.7, 0.3, num=D * H).reshape(D, H) model.params['b1'] = np.linspace(-0.1, 0.9, num=H) model.params['W2'] = np.linspace(-0.3, 0.4, num=H * C).reshape(H, C) model.params['b2'] = np.linspace(-0.9, 0.1, num=C) X = np.linspace(-5.5, 4.5, num=N * D).reshape(D, N).T scores = model.loss(X) print('scores are {}'.format(scores)) # test backward print('----------------') print('Testing training loss') y = np.asarray([0, 1, 0]) loss, grads = model.loss(X, y) print('loss is {}'.format(loss)) # gradient check for name in sorted(grads): f = lambda _: model.loss(X, y)[0] grad_num = eval_numerical_gradient(f, model.params[name], verbose=False) print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
estimate = .5 * np.ones((3, 1)) i = 0 newton_time = perf_counter() for direction in ray_points: newton_iteration = perf_counter() intersect, U, V, t = bpatch.intersect(Cx, Cy, Cz, viewpoint, direction, estimate=estimate) results_newton[i] = intersect i += 1 times_newton[size_index] = perf_counter() - newton_time rel_errs = [rel_error(a, b) for (a, b) in zip(results_newton, results_tri)] abs_errs = [abs_error(a, b) for (a, b) in zip(results_newton, results_tri)] errors[size_index] = [ max(rel_errs), min(rel_errs), np.average(rel_errs), max(abs_errs), min(abs_errs), np.average(abs_errs) ] size_index += 1 names = [ 'Max Relative Error', 'Min Relative Error', 'Average Relative Error', 'Max Absolute Error', 'Min Absolute Error', 'Average Absolute Error'
def tv_loss_test(correct): tv_weight = 2e-2 t_loss = tv_loss(model.image, tv_weight) output = sess.run(t_loss, {model.image: content_img_test}) error = rel_error(correct, output) print('Error is {}'.format(error))
def gram_matrix_test(correct): gram = gram_matrix(model.extract_features()[5]) output = sess.run(gram, {model.image: style_img_test}) error = rel_error(correct, output) print('Maximum error is {}'.format(error))
def test_conv_layer_2_backward(self): print('\n==================================') print(' Test conv layer backward ') print('==================================') np.random.seed(123) in_dim, out_dim = 3, 3 kernel_size, stride, pad = 3, 1, 1 x = np.random.randn(1, in_dim, 5, 5) conv_layer = ConvolutionLayer(in_dim, out_dim, kernel_size, stride, pad) conv_out = conv_layer.forward(x) d_prev = np.random.randn(*conv_out.shape) dx = conv_layer.backward(d_prev, 0.1) dw = conv_layer.dw db = conv_layer.db correct_dx = [[[[-1.79921076, -4.01635575, -4.51317017, 3.1799483 , 0.60601338], [-3.0409217 , -3.03893501, 0.263502 , -1.77473826, -0.66048859], [-2.70680836, 7.95821241, -0.05199919, 3.71902007, -1.95724218], [ 4.63502915, 10.48786242, 3.23741466, -1.10458816, 3.64821246], [ 1.74179073, 0.89016108, -5.95489576, -2.90265459, 3.82054991]], [[ 2.12426007, -8.61633801, -1.10075962, 0.69954115, 1.41973446], [-1.21497191, -0.76472356, -4.03381736, -1.49888202, -0.12346752], [ 5.45687502, -1.69046871, 2.26612641, -4.9606816 , -7.63459002], [-2.22526131, -1.22114106, 1.1399924 , -0.33830405, -2.6194636 ], [ 0.90182683, 0.23853391, -5.56596295, -1.43509046, -1.05353328]], [[ 3.18771876, -0.02184037, -2.03027136, -0.05773355, -3.82179085], [ 1.34224997, -2.91348884, -1.58173337, -5.66629621, 3.45049871], [-2.67463988, 2.86761068, 2.64368971, -3.71621604, -7.67662767], [-2.91132903, -4.52401895, 2.17502747, -4.60068269, -2.16018264], [ 1.36481716, 6.13043585, 1.7706074 , 2.06804708, 0.96884191]]]] correct_dw = [[[[ -1.04985591, 2.87904911, -6.82503529], [ 1.66702503, -0.93491251, -3.60471169], [ -2.87819899, -3.46218444, 1.40624326]], [[ 1.25476706, 2.76259291, -4.67645933], [ 0.41342563, -4.97285148, 1.75108074], [ -0.09661992, 0.64336928, -3.79150539]], [[ -3.91150984, 1.21576097, 0.09212882], [ -0.66208335, -0.20382688, -1.03624965], [ 1.14408925, -0.89092561, -0.39136559]]], [[[ -3.24175872, 7.70245334, 3.49461994], [ 5.30043722, 6.73881481, 0.38190406], [ -1.35312823, -1.90830871, -5.62180321]], [[ -1.77178115, -7.2335903 , 1.3902429 ], [ 5.978873 , 11.12390811, 4.27724157], [ 1.56800653, -6.17417643, -8.81210226]], [[ -1.36754116, -1.76925626, -1.95415376], [ 3.19023575, -10.05059218, 2.02020642], [ 0.01609822, -4.61553449, 4.02922391]]], [[[ -3.85567269, -5.30516932, -7.00612174], [ 1.08820947, -2.86128285, -5.19898729], [ -8.71727415, -0.10330606, 0.96197135]], [[ 6.90694753, 0.67861547, 0.76847915], [ 0.33708355, -3.16736077, -1.20728537], [ -3.73821152, -1.41083771, 1.33975801]], [[ 0.18050385, -7.86418369, 3.46332905], [ 5.33713985, -2.32996899, -1.64722694], [ 1.46461391, 4.07081265, -0.7995358 ]]]] correct_db = [ 1.5437889 , -3.07137414, -7.72938847] dx_e = rel_error(correct_dx, dx) dw_e = rel_error(correct_dw, dw) db_e = rel_error(correct_db, db) print('Relative difference dx:', dx_e) print('Relative difference dw:', dw_e) print('Relative difference db:', db_e) self.assertTrue(dx_e <= 5e-6) self.assertTrue(dw_e <= 5e-6) self.assertTrue(db_e <= 5e-6)