def test_single_input_to_single_operation(self):
        @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))])
        def prog(x):
            x = mb.square(x=x)
            return x

        self.assertEqual(get_op_types_in_program(prog), ['square'])

        apply_pass_and_basic_check(
            prog, transform.FP16ComputePrecision(op_selector=lambda op: True))
        _, _, block = apply_pass_and_basic_check(
            prog, "common::dead_code_elimination")

        self.assertEqual(get_op_types_in_program(prog),
                         ["cast", "square", "cast"])

        # Asserting first cast configuration
        cast_1 = block.find_ops(op_type="cast")[0]
        self.assertEqual(cast_1.dtype.val, "fp16")
        self.assertEqual(len(cast_1.outputs), 1)
        self.assertEqual(len(cast_1.outputs[0].child_ops), 1)
        self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "square")

        # Asserting second cast configuration
        cast_2 = block.find_ops(op_type="cast")[1]
        self.assertEqual(cast_2.dtype.val, "fp32")
        self.assertEqual(len(cast_2.outputs), 1)
        self.assertEqual(len(cast_2.outputs[0].child_ops), 0)

        assert_model_is_valid(
            prog,
            {"x": (10, 20)},
            expected_output_shapes={block.outputs[0].name: (10, 20)},
        )
    def test_divide_by_zero_operation(self):
        @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))])
        def prog(x):
            eps = mb.const(val=1e-10)
            x = mb.real_div(x=x, y=eps)
            return x

        prev_prog, prev_block, block = apply_pass_and_basic_check(
            prog, transform.FP16ComputePrecision(op_selector=lambda op: True))

        mlmodel = ct.convert(prog, source="milinternal")
        input_dict = {"x": np.random.rand(10, 20)}

        if _IS_MACOS:
            prediction = mlmodel.predict(input_dict, useCPUOnly=True)
            assert (not np.isnan(prediction['real_div_0']).any())
            assert (np.isfinite(prediction['real_div_0']).all())