def test_multiply_unit32_convertion(transformer_factory): x = ng.placeholder(axes=(), dtype=np.uint32()) multiplier = 1 ng_mul = 0.5 * x * 0.5 with executor(ng_mul, x) as ex: ng_result = ex(multiplier) assert ng_result == 0.25
def test_tensor_const_sum(): op_map = parse_prototxt(join(PROTO_PATH, "tensor_const_sum.prototxt")) op = op_map.get("C") with executor(op) as ex: res = ex() a = np.full((2, 3), 4.) b = np.full((2, 3), 3.) c = a + b assert (np.array_equal(res, c))
def test_constant_add(transformer_factory): """TODO.""" a = ng.constant(1) b = ng.constant(2) c = a + b with executor(c) as ex: result = ex() print(result) assert result == 3
def template_two_placeholders(tuple_values, ng_fun, ng_placeholder1, ng_placeholder2, expected_values, description): with executor(ng_fun, ng_placeholder1, ng_placeholder2) as const_executor: print(description) for values, expected_value in zip(tuple_values, expected_values): flex = const_executor(values[0], values[1]) print("flex_value: ", flex) print("expected_value: ", expected_value) print(flex - expected_value) assert flex == expected_value
def template_one_placeholder(values, ng_fun, ng_placeholder, expected_values, description): with executor(ng_fun, ng_placeholder) as const_executor: print(description) for value, expected_value in zip(values, expected_values): flex = const_executor(value) print("flex_value: ", flex) print("expected_value: ", expected_value) print(flex - expected_value) assert (flex == expected_value).all()
def test_scalar(transformer_factory): """TODO.""" # Simple evaluation of a scalar val = 5 x = ng.constant(val) with executor(x) as ex: cval = ex() assert cval.shape == () ng.testing.assert_allclose(cval, val)
def baseline_value(self, y, t): ''' Use defined ngraph constructed computation to evaluate cost function on inputs y and t ''' N = ng.make_axis(length=y.shape[0]) Y, T = ng.placeholder([N]), ng.placeholder([N]) with executor(self.ng_computation(Y, T), Y, T) as ex: return ex(y, t)
def test_uniform_range_pos(transformer_factory, input_tensor): """TODO.""" ng_a = ng.uniform(input_tensor, low=0.0, high=0.5) with executor(ng_a) as ex: result = ex() print(result) assert np.all(result < 0.5) assert np.all(result >= 0.0) assert not np.all(result == 0.0)
def test_uniform_range_posneg(input_tensor): """TODO.""" ng_a = ng.uniform(input_tensor, low=-0.5, high=0.5) with executor(ng_a) as ex: result = ex() print(result) assert np.all(result < 0.5) assert np.all(result >= -0.5) assert not np.all(result >= 0.0)
def test_elementwise_fp16_out(transformer_factory): axes = ng.make_axes([ng.make_axis(length=2), ng.make_axis(length=2)]) a = ng.constant(np.array([[1.0, 2.0], [4.0, 12.0]], dtype='float32'), axes) b = ng.constant(np.array([[1.0, 2.0], [6.0, 12.0]], dtype='float32'), axes) c = ng.multiply(a, b, dtype=np.dtype(np.float16)) with executor(c) as ex: result = ex() ng.testing.assert_allclose(result, [[1.0, 4.0], [24.0, 144.0]])
def template_two_placeholders(operands, ng_fun): first_operand = operands[0][0] second_operand = operands[0][1] ng_placeholder1, axes = get_placeholder_from_operand(first_operand) ng_placeholder2, _ = get_placeholder_from_operand(second_operand, axes=axes) with executor(ng_fun(ng_placeholder1, ng_placeholder2), ng_placeholder1, ng_placeholder2) as const_executor: execute_calculation(operands, first_operand, const_executor)
def test_constant_tensor_multiply(transformer_factory): Y = ng.make_axis(length=2) N = ng.make_axis(length=2) a = ng.constant(np.array([[1.0, 1.0], [1.0, 1.0]], dtype='float32'), [Y, N]) b = ng.constant(np.array([[1.0, 1.0], [1.0, 1.0]], dtype='float32'), [Y, N]) c = ng.multiply(a, b) with executor(c) as ex: result = ex() ng.testing.assert_allclose(result, [[1.0, 1.0], [1.0, 1.0]])
def test_tensor_sum_single_reduction_axes(transformer_factory): """TODO.""" Y = ng.make_axis(length=2) N = ng.make_axis(length=2) a = ng.constant(np.array([[1.0, 1.0], [1.0, 1.0]], dtype='float32'), [N, Y]) b = ng.sum(a, reduction_axes=Y) with executor(b) as ex: result = ex() ng.testing.assert_allclose(result, [2.0, 2.0])
def test_4d_elementwise(transformer_factory, input_axes): x_val = np.absolute(np.random.randn(*input_axes.lengths)) y_val = np.absolute(np.random.randn(*input_axes.lengths)) x = ng.constant(x_val, input_axes) y = ng.constant(y_val, input_axes) out = ng.add(x, y) with executor(out) as ex: graph_val = ex() np_val = np.add(x_val, y_val) np.testing.assert_allclose(graph_val, np_val, rtol=1e-4)
def execute_convolution(image_height, image_width, filter_height, filter_width, channel=16, batch_size=32, filter_count=8, image_3rd_dim=1, filter_3rd_dim=1, padding=(0, 0, 0), stride=(1, 1, 1), dilation=1, np_comparison=False): pad_h, pad_w, pad_d = padding str_h, str_w, str_d = stride cf = ConvParams(C=channel, N=batch_size, K=filter_count, D=image_3rd_dim, H=image_height, W=image_width, T=filter_3rd_dim, R=filter_height, S=filter_width, pad_d=pad_d, pad_h=pad_h, pad_w=pad_w, str_d=str_d, str_h=str_h, str_w=str_w, dil_d=dilation, dil_h=dilation, dil_w=dilation) inputs = ng.placeholder(cf.ax_i) filters = ng.placeholder(cf.ax_f) rng = RandomTensorGenerator(0, np.float32) input_value = rng.uniform(-4, 4, cf.ax_i, dtype=int) filter_value = rng.uniform(-4, 4, cf.ax_f, dtype=int) error_value = rng.uniform(-0.5, 0.5, cf.ax_o) with executor( ng.convolution(cf.conv_params, inputs, filters, axes=cf.ax_o), inputs, filters) as const_executor: out = const_executor(input_value, filter_value) if np_comparison: np_out, gradInp, gradF_np = \ reference_conv(cf.dimI, cf.dimF, cf.dimO, cf.conv_params, input_value, filter_value, error_value) return out, np_out return out
def test_execute_non_placeholder(): """ Expect a failure if a non-input (Variable) is used as an argument to executor. """ N = ng.make_axis(length=1) x = ng.temporary([N]) y = ng.variable([N]) with pytest.raises(ValueError): with executor(x + y, x, y) as ex: ex
def test_4d_chained(transformer_factory, input_axes): x_val = np.absolute(np.random.randn(*input_axes.lengths)) y_val = np.absolute(np.random.randn(*input_axes.lengths)) x = ng.constant(x_val, input_axes) y = ng.constant(y_val, input_axes) im = ng.reciprocal(x) out = ng.sum(ng.add(im, y), reduction_axes=input_axes[0]) with executor(out) as ex: graph_val = ex() np_val = np.sum(np.add(np.reciprocal(x_val), y_val), 0) np.testing.assert_allclose(graph_val, np_val, rtol=1e-4)
def test_4d_reduction(transformer_factory, input_axes): x_val = np.absolute(np.random.randn(*input_axes.lengths)) x = ng.constant(x_val, input_axes) out1 = ng.sum(x, reduction_axes=input_axes[1]) out2 = ng.sum(x, reduction_axes=input_axes[3]) with executor([out1, out2]) as ex: graph_val1, graph_val2 = ex() np_val1 = np.sum(x_val, 1) np_val2 = np.sum(x_val, 3) np.testing.assert_allclose(graph_val1, np_val1, rtol=1e-4) np.testing.assert_allclose(graph_val2, np_val2, rtol=1e-4)
def test_missing_arguments_to_execute(): """ Expect a failure if the wrong number of arguments are passed to a computation. """ N = ng.make_axis(length=1) x = ng.placeholder([N]) y = ng.placeholder([N]) with executor(x + y, x, y) as f: with pytest.raises(ValueError): f(1)
def test_4d_chained(transformer_factory, input_axes): # Limiting maximum absolute value for tensors elements to 7.9. # See description in function test_exit_condition above # Limitting minimum absolute value for tensors being input to reciprocal operation to 1/7.9 # # This is consequence of the above and flexpoint accuracy. # Numbers very small have poor absolute accuracy. When reciprocal of them is calculated the # results becomes very large and has even worse accuracy. When small numbers would be accepted # as an input to reciprocal in the test the absolute maximum value of the result is undefined # and so absolute tolerance. # To have possibility to set atol in the test and test could pass with it minimum element of # the tensor that is input to reciprocal operation has to be limited. is_flex = is_flex_factory(transformer_factory) clip_val_max = 7.9 if is_flex else 0 clip_val_min = 1.0 / 7.9 if is_flex else 0 x_val = rng.randn_abs_clip(input_axes, clip_min=clip_val_min, clip_max=clip_val_max) y_val = rng.randn_abs_clip(input_axes, clip_max=clip_val_max) x = ng.constant(x_val, input_axes) y = ng.constant(y_val, input_axes) im = ng.reciprocal(x) out = ng.sum(ng.add(im, y), reduction_axes=input_axes[0]) with executor(out) as ex: graph_val = ex() np_val = np.sum(np.add(np.reciprocal(x_val), y_val), 0) # atol_multiplier = 15 * x_val.shape[0] # # x_val.shape[0] is number elements added together in operation # ng.sum(X, reduction_axes=input_axes[0]) # # 15 is calculated the following way: # # Input tensor has values from the range 1/7.9 - 7.9 # For DEC=12 absolute error is equal to 0.5*2^-12 = 0.000122 # 1/7.9 = 0.126582 with this error becomes 0.126704 # Reciprocal of 1/7.9 is 7.9 # Reciprocal of 1/7.9 + err = 7.892389 # Absolute difference is 0.007611 # It is 15.2 times larger then atol limit 5e-4 from Argon transformer ng.testing.assert_allclose(graph_val, np_val, rtol=1e-4, atol_multiplier=15 * x_val.shape[0])
def test_cputensor_add(transformer_factory): """TODO.""" Y = ng.make_axis(length=2) M = ng.make_axis(length=2) N = ng.make_axis(length=2) a = ng.constant(np.array([3, 5], dtype=np.float32), [Y]) b = ng.constant(np.array([3, 5], dtype=np.float32), [Y]) c = a + b with executor(c) as ex: result = ex() assert np.array_equal(result, [6, 10]) np_a = np.array([[1, 2], [3, 4]], dtype=np.float32) np_b = np.array([[1, 2], [3, 4]], dtype=np.float32) np_c = np_a + np_b a = ng.constant(np_a, [M, N]) b = ng.constant(np_b, [M, N]) c = a + b with executor(c) as ex: result = ex() assert np.array_equal(result, np_c)
def test_assign(transformer_factory, operands, test_name): v = ng.variable(()) ng_placeholder = ng.placeholder(()) vset = ng.sequential([ng.assign(v, ng_placeholder), v]) iterations = len(operands) != 1 with executor(vset, ng_placeholder) as ex: for i in operands: flex_result = ex(i[0]) print("flex: ", flex_result) print("expected: ", i[1]) if iterations: assert_allclose(flex_result, i[1]) else: assert flex_result == i[1]
def get_fprop_bprop(self, input_value): ip = ng.placeholder(axes=self.ax_i) ep = ng.placeholder(axes=self.ax_o) iv = np.array(input_value).astype(np.float32).reshape(self.dimI) ev = np.ones(self.dimO) * 4 output = ng.pooling(self.pool_params, ip, axes=self.ax_o) delta = BpropPoolOp(ep, ip, output) with executor([output, delta], ip, ep) as pool_executor: output_value, delta_value = pool_executor(iv, ev) return output_value, delta_value
def test_causal_convolution(conv1d_placeholder, spatial_onehot, output_size, width): """ Test that causal convolutions only operate on leftward inputs""" conv_layer = Convolution((3, output_size), lambda x: 1, padding="causal") output = conv_layer(conv1d_placeholder) output_width = output.axes.find_by_name("W")[0].length assert output_width == width, "Causal convolution output width != " \ "input width: {} != {}".format(output_width, width) with executor(output, conv1d_placeholder) as comp: output_val = comp(spatial_onehot) # First 1 is at width // 2, so anything before that should be 0 assert ( output_val[:, :width // 2] == 0).all(), "Acausal outputs in causal convolution"
def test_tensor_size(transformer_factory): n, m = 3, 4 N = ng.make_axis(length=n) M = ng.make_axis(length=m) aaxes = ng.make_axes([N, M]) x = ng.placeholder(aaxes) size_fun = ng.tensor_size(x) nptensor = np.arange(n * m).reshape(n, m) with executor(size_fun, x) as ex: assert ex(nptensor) == n * m
def test_cross_entropy_binary_logistic_shortcut(input_tensor): """TODO.""" p_u = input_tensor p_v = ng.placeholder(p_u.axes) u = rng.uniform(-3.0, 3.0, p_u.axes) v = np_softmax(rng.uniform(-3.0, 3.0, p_u.axes), 0) cel = cross_entropy_binary_logistic(u, v) cel_shortcut = cross_entropy_binary_logistic_shortcut(u, v) ng.testing.assert_allclose(cel, cel_shortcut, rtol=1e-5) with executor(ng.cross_entropy_binary_inner(ng.sigmoid(p_u), p_v), p_u, p_v) as ex: cel_graph = ex(u, v) ng.testing.assert_allclose(cel, cel_graph, rtol=1e-5)
def test_constant_multiply(transformer_factory): # TODO: better error message when missing axes length in cases where it # is needed Y = ng.make_axis(length=1) # TODO: don't require axes a = ng.constant(np.array([4.0], dtype='float32'), [Y]) b = ng.constant(np.array([2.0], dtype='float32'), [Y]) c = ng.multiply(a, b) with executor(c) as ex: result = ex() ng.testing.assert_allclose(result, [8])
def test_mean(transformer_factory, input_tensor): inputs = input_tensor targets = ng.placeholder(inputs.axes) inp_stat = ng.mean(inputs, reduction_axes=inputs.axes.batch_axes()) err = ng.sum(inp_stat - targets, out_axes=()) with executor(err, inputs, targets) as comp_func: input_value = rng.uniform(-1, 1, inputs.axes) target_value = rng.uniform(-1, 1, targets.axes) ng_f_res = comp_func(input_value, target_value) np_f_res = np.sum(np.mean(input_value, axis=1, keepdims=True) - target_value) ng.testing.assert_allclose(np_f_res, ng_f_res, atol=1e-4, rtol=1e-4)
def test_cputensor_add_constant(transformer_factory): """TODO.""" M = ng.make_axis(length=1) N = ng.make_axis(length=3) np_a = np.array([[1, 2, 3]], dtype=np.float32) np_c = np.add(np_a, 2) a = ng.constant(np_a, [M, N]) b = ng.constant(2) c = ng.add(a, b) with executor(c) as ex: result = ex() print(result) assert np.array_equal(result, np_c)
def test_extract_op(): # set up an op and Assign a value to it so we can read it out axes = ng.make_axes([ ng.make_axis(name='A', length=2), ng.make_axis(name='B', length=3), ]) x_op = ng.variable(axes) assign_op = ng.AssignOp(x_op, 1) # extract values out of it and make sure they match expected results with executor(assign_op) as comp_assignment: t = comp_assignment.transformer comp_assignment() x_out = serde_weights.extract_op(t, x_op) assert (x_out == np.ones(axes.lengths)).all()