def test_negative_3(self): """ Input graph: input1(1, 5, 3, 4) -----> stack(axis=1) -----> reshape(shape=(-1, 2, 5, 4, 3)) ---> out(1, 5, 6, 4) ^ | input2(1, 5, 3, 4) ---------- Output graph: Unchanged -- this graph is not equivalent to a concat. """ @mb.program(input_specs=[mb.TensorSpec(shape=(1, 5, 3, 4)), mb.TensorSpec(shape=(1, 5, 3, 4))]) def prog(x1, x2): a = mb.stack(values=[x1, x2], axis=1) a = mb.reshape(x=a, shape=[-1, 2, 5, 4, 3]) return a prev_prog, prev_block, block = apply_pass_and_basic_check( prog, "common::replace_stack_reshape" ) self.assertEqual( get_op_types_in_program(prev_prog), ["stack", "reshape"] ) self.assertEqual(get_op_types_in_program(prog), ["stack", "reshape"])
def test_sanitize_var_names_with_two_functions(self): """ Input: main(%x: (1, 3, 20, fp32)(Tensor)) { block0() { %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1!") } -> (%var_1!) } main_2(%x: (1, 3, 20, fp32)(Tensor)) { block0() { %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1!") } -> (%var_1!) } Output: main(%x: (1, 3, 20, fp32)(Tensor)) { block0() { %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1_") } -> (%var_1_) } main_2(%x: (1, 3, 20, fp32)(Tensor)) { block0() { %var_1!: (1, 3, 20, fp32)(Tensor) = relu(x=%x, name="var_1_") } -> (%var_1_) } """ @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20))]) def prog(x): z = mb.relu(x=x, name = "var_1!") return z @mb.program(input_specs=[mb.TensorSpec(shape=(1, 3, 20))]) def prog_2(x): z = mb.relu(x=x, name = "var_1!") return z prog.add_function("main_2", prog_2.functions["main"]) PASS_REGISTRY["mil_backend::sanitize_name_strings"](prog) block = prog.functions["main"] assert block.find_ops(op_type="relu")[0].outputs[0].name == "var_1_" assert prog["main"].outputs[0].name == "var_1_" assert block.find_ops(op_type="relu")[0].name == "var_1_" block = prog.functions["main_2"] assert block.find_ops(op_type="relu")[0].outputs[0].name == "var_1_" assert prog["main"].outputs[0].name == "var_1_" assert block.find_ops(op_type="relu")[0].name == "var_1_"
def test_multiple(self): @mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4)), mb.TensorSpec(shape=(1, 2, 3, 4)), mb.TensorSpec(shape=(1, 2, 3, 4)), mb.TensorSpec(shape=(1, 2, 3, 4))]) def prog(x1, x2, x3, x4): a = mb.stack(values=[x1, x2], axis=1) a = mb.reshape(x=a, shape=[1, 4, 3, 4]) b = mb.stack(values=[x3, x4], axis=1) b = mb.reshape(x=b, shape=[1, 4, 3, 4]) c = mb.stack(values=[a, b], axis=2) c = mb.reshape(x=c, shape=[1, 4, 6, 4]) return c prev_prog, prev_block, block = apply_pass_and_basic_check( prog, "common::replace_stack_reshape" ) self.assertEqual( get_op_types_in_program(prev_prog), ["stack", "reshape", "stack", "reshape", "stack", "reshape"] ) self.assertEqual(get_op_types_in_program(prog), ["concat", "concat", "concat"]) inputs = {"x1": (1, 2, 3, 4), "x2": (1, 2, 3, 4), "x3": (1, 2, 3, 4), "x4": (1, 2, 3, 4)} assert_model_is_valid( prog, inputs, expected_output_shapes={block.outputs[0].name: (1, 4, 6, 4)}, ) output_name = block.outputs[0].name mlmodel = ct.convert(prog, source="milinternal", convert_to="neuralnetwork") if not _IS_MACOS: # Can not get predictions unless on macOS. return input_dict = dict() for name, shape in inputs.items(): input_dict[name] = np.random.rand(*shape) branch_1 = np.reshape(np.stack([input_dict['x1'], input_dict['x2']], axis=1), newshape=[1, 4, 3, 4]) branch_2 = np.reshape(np.stack([input_dict['x3'], input_dict['x4']], axis=1), newshape=[1, 4, 3, 4]) old_prediction = np.reshape(np.stack([branch_1, branch_2], axis=2), newshape=[1, 4, 6, 4]) prediction = mlmodel.predict(input_dict, useCPUOnly=True) np.testing.assert_allclose(old_prediction, prediction[output_name], atol=1e-04, rtol=1e-05)
def test_loop_invariant_elimination2(): """ Invariant pattern: Block outputs var from outside of the block """ @mb.program(input_specs=[ mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)), ]) def prog(a, b): def body(a, bx): return mb.add(x=a, y=b), b def cond(a, bx): a_mean = mb.reduce_mean(x=a, axes=[0, 1]) b_mean = mb.reduce_mean(x=bx, axes=[0, 1]) return mb.less(x=a_mean, y=b_mean) # b is loop invariant return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] if len(while_op.blocks[0].inputs) != 2: raise AssertionError if len(while_op.outputs) != 2: raise AssertionError if len(while_op.loop_vars) != 2: raise AssertionError if while_op.blocks[0].inputs[0].name != "a.x": raise AssertionError if while_op.blocks[0].inputs[1].name != "b.x": raise AssertionError prev_prog = copy.deepcopy(prog) PASS_REGISTRY["common::loop_invariant_elimination"](prog) assert_same_output_names(prev_prog, prog) while_op = prog.find_ops(op_type="while_loop", exactly_one=True)[0] if len(while_op.blocks[0].inputs) != 1: raise AssertionError if len(while_op.outputs) != 1: raise AssertionError if len(while_op.loop_vars) != 1: raise AssertionError if while_op.blocks[0].inputs[0].name != "a.x": raise AssertionError if validate_model: assert_model_is_valid(prog, {"a": (1, 2), "b": (1, 2)})
def test_single_input_to_single_operation(self): @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))]) def prog(x): x = mb.square(x=x) return x self.assertEqual(get_op_types_in_program(prog), ['square']) apply_pass_and_basic_check( prog, transform.FP16ComputePrecision(op_selector=lambda op: True)) _, _, block = apply_pass_and_basic_check( prog, "common::dead_code_elimination") self.assertEqual(get_op_types_in_program(prog), ["cast", "square", "cast"]) # Asserting first cast configuration cast_1 = block.find_ops(op_type="cast")[0] self.assertEqual(cast_1.dtype.val, "fp16") self.assertEqual(len(cast_1.outputs), 1) self.assertEqual(len(cast_1.outputs[0].child_ops), 1) self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "square") # Asserting second cast configuration cast_2 = block.find_ops(op_type="cast")[1] self.assertEqual(cast_2.dtype.val, "fp32") self.assertEqual(len(cast_2.outputs), 1) self.assertEqual(len(cast_2.outputs[0].child_ops), 0) assert_model_is_valid( prog, {"x": (10, 20)}, expected_output_shapes={block.outputs[0].name: (10, 20)}, )
def test_0(self, reverse_order): x_shape = tuple(np.random.randint(low=1, high=4, size=5)) @mb.program(input_specs=[mb.TensorSpec(shape=x_shape)]) def program(x): sigmoid_x = mb.sigmoid(x=x) if not reverse_order: x = mb.mul(x=x, y=sigmoid_x) else: x = mb.mul(x=sigmoid_x, y=x) return x prev_prog, prev_block, block = apply_pass_and_basic_check( program, "mil_backend::fuse_activation_silu" ) assert get_op_types_in_program(prev_prog) == ["sigmoid", "mul"] assert get_op_types_in_program(program) == ["silu"] assert_model_is_valid( program=program, inputs={"x": x_shape}, backend=("mlprogram", "fp32"), expected_output_shapes={block.outputs[0].name: tuple(x_shape)}, )