Exemple #1
0
    def test_skipped_ops(self):
        with context.eager_mode():
            x = constant_op.constant(np.ones((1, 1, 1, 1)).astype(np.float32))

            # Cast is on the hardcoded list of ops to skip
            gen_math_ops.cast(x, dtypes.float64)
            self.assertEmpty(self._get_new_node_defs())

            gen_nn_ops.conv2d(x, x, [1, 1, 1, 1], 'SAME')
            y = constant_op.constant(np.zeros((1, 1, 1, 1)).astype(np.float32))
            # Duplicate ops are skipped, even if input values are different
            gen_nn_ops.conv2d(x, y, [1, 1, 1, 1], 'SAME')
            if not IsMklEnabled():
                self.assertLen(self._get_new_node_defs(), 1)
            else:
                ndefs = self._get_new_node_defs()
                if (len(ndefs) >= 1 and ndefs[0].op != ndefs[1].op):
                    # One of the ops got rewritten by oneDNN optimization pass
                    self.assertLen(ndefs, 2)
                else:
                    self.assertLen(ndefs, 1)

            x = constant_op.constant(
                np.ones((1, 1, 1, 1, 1, 1)).astype(np.float32))
            paddings = constant_op.constant(np.ones((6, 2)).astype(np.int32))
            constant_values = constant_op.constant(0.)
            # If an host int32 input has more than 10 elements, the op is skipped
            gen_array_ops.pad_v2(x, paddings, constant_values)
            self.assertEmpty(self._get_new_node_defs())
Exemple #2
0
 def test_host_int32_inputs(self):
     with context.eager_mode():
         x = constant_op.constant(np.ones((2, 2)).astype(np.float32))
         paddings = constant_op.constant([[1, 2], [3, 4]])
         constant_values = constant_op.constant(0.)
         gen_array_ops.pad_v2(x, paddings, constant_values)
         node_defs = self._get_new_node_defs()
         self.assertLen(node_defs, 1)
         (node_def, ) = node_defs  # pylint: disable=unbalanced-tuple-unpacking
         self.assertEqual(node_def.op, 'PadV2')
         self.assertEqual(self._get_input_dtypes(node_def),
                          [dtypes.float32, dtypes.int32, dtypes.float32])
         self.assertEqual(self._get_input_shapes(node_def), [(2, 2), (2, 2),
                                                             ()])
         self.assertIsNone(self._get_input_tensor(node_def, 0))
         self.assertAllEqual(self._get_input_tensor(node_def, 1),
                             np.array([[1, 2], [3, 4]]))
         self.assertIsNone(self._get_input_tensor(node_def, 2))
Exemple #3
0
    def test_skipped_ops(self):
        with context.eager_mode():
            x = constant_op.constant(np.ones((1, 1, 1, 1)).astype(np.float32))

            # Cast is on the hardcoded list of ops to skip
            gen_math_ops.cast(x, dtypes.float64)
            self.assertEmpty(self._get_new_node_defs())

            gen_nn_ops.conv2d(x, x, [1, 1, 1, 1], 'SAME')
            y = constant_op.constant(np.zeros((1, 1, 1, 1)).astype(np.float32))
            # Duplicate ops are skipped, even if input values are different
            gen_nn_ops.conv2d(x, y, [1, 1, 1, 1], 'SAME')
            self.assertLen(self._get_new_node_defs(), 1)

            x = constant_op.constant(
                np.ones((1, 1, 1, 1, 1, 1)).astype(np.float32))
            paddings = constant_op.constant(np.ones((6, 2)).astype(np.int32))
            constant_values = constant_op.constant(0.)
            # If an host int32 input has more than 10 elements, the op is skipped
            gen_array_ops.pad_v2(x, paddings, constant_values)
            self.assertEmpty(self._get_new_node_defs())