def _testDequantizeOp(self, inputs, min_range, max_range, dtype): with self.test_session(): input_op = constant_op.constant(inputs, shape=[len(inputs)], dtype=dtype) dequantized = array_ops.dequantize(input_op, min_range, max_range) tf_ans = dequantized.eval() # TODO(vrv): Add support for DT_QINT32 quantization if needed. type_dict = { dtypes.quint8: np.uint8, dtypes.qint8: np.int8, dtypes.quint16: np.uint16, dtypes.qint16: np.int16 } self.assertTrue(dtype in type_dict.keys()) v_max = np.iinfo(type_dict[dtype]).max v_min = np.iinfo(type_dict[dtype]).min self.assertTrue(min_range >= v_min) self.assertTrue(max_range <= v_max) type_range = v_max - v_min if v_min < 0: half_range = (type_range + 1) / 2 else: half_range = 0.0 np_ans = ((inputs.astype(np.float32) + half_range) * (max_range - min_range) / type_range) + min_range self.assertAllClose(tf_ans, np_ans, rtol=1e-5, atol=1e-5)
def _testDequantizeOp(self, inputs, min_range, max_range, dtype): with self.cached_session(): input_op = constant_op.constant(inputs, shape=[len(inputs)], dtype=dtype) dequantized = array_ops.dequantize(input_op, min_range, max_range) tf_ans = dequantized.eval() # TODO(vrv): Add support for DT_QINT32 quantization if needed. type_dict = { dtypes.quint8: np.uint8, dtypes.qint8: np.int8, dtypes.quint16: np.uint16, dtypes.qint16: np.int16 } self.assertTrue(dtype in type_dict.keys()) v_max = np.iinfo(type_dict[dtype]).max v_min = np.iinfo(type_dict[dtype]).min self.assertTrue(min_range >= v_min) self.assertTrue(max_range <= v_max) type_range = v_max - v_min if v_min < 0: half_range = (type_range + 1) / 2 else: half_range = 0.0 np_ans = ((inputs.astype(np.float32) + half_range) * (max_range - min_range) / type_range) + min_range self.assertAllClose(tf_ans, np_ans, rtol=1e-5, atol=1e-5)
def testAxis(self): # Generates a tensor of the specified `shape` using values from `values` # scaled by (slice_idx + 1) along `axis` dimension. def scale_per_slice(shape, axis, values): # Note: repeats the values if the shape is larger than values. out = np.take(values, np.remainder(np.arange(np.prod(shape)), len(values))).reshape(shape) if axis is not None: scale_shape = [1] * len(shape) scale_shape[axis] = shape[axis] out *= np.arange(1, shape[axis] + 1).reshape(scale_shape) return out shape = np.array([2, 3, 4, 5]) values = np.array([-128, -64, 0, 38, 102, 71, 64], dtype=np.int32) dequant_values = np.array( [-2, -1.0, 0, 0.59375, 1.59375, 1.109375, 1.0], dtype=np.float32) for axis in [None, 0, 1, 2, 3]: inputs = constant_op.constant(scale_per_slice(shape, None, values), dtype=dtypes.qint8) expected_dequantized = scale_per_slice(shape, axis, dequant_values) if axis is None: min_range, max_range = -2.0, 1.6 else: num_slices = shape[axis] min_range, max_range = [], [] for slice_idx in range(num_slices): min_range.append(-2.0 * (slice_idx + 1)) max_range.append(1.6 * (slice_idx + 1)) dequantized = self.evaluate( array_ops.dequantize(inputs, min_range, max_range, mode="SCALED", axis=axis)) self.assertAllEqual(dequantized, expected_dequantized) if axis is not None: dequantized = self.evaluate( array_ops.dequantize(inputs, min_range, max_range, mode="SCALED", axis=(axis - 4))) self.assertAllClose(dequantized, expected_dequantized)
def testDequantizeOp(self): expected_output = [1.0, 2.0, 4.0, 8.0, 16.0, 255.0] inp = np.array([1, 2, 4, 8, 16, 255]).astype(np.uint8) with self.session(use_gpu=False) as sess: x = constant_op.constant(inp, shape=[6], dtype=dtypes.quint8) x_min = 0.0 x_max = 255.0 op = array_ops.dequantize(x, x_min, x_max, mode="MIN_FIRST") value = self.evaluate(op) self.assertArrayNear(expected_output, value, 0.1)
def testDequantizeOp(self): expected_output = [1.0, 2.0, 4.0, 8.0, 16.0, 255.0] inp = np.array([1, 2, 4, 8, 16, 255]).astype(np.uint8) with self.test_session(use_gpu=False) as sess: x = constant_op.constant(inp, shape=[6], dtype=dtypes.quint8) x_min = 0.0 x_max = 255.0 op = array_ops.dequantize(x, x_min, x_max, mode="MIN_FIRST") value = sess.run(op) self.assertArrayNear(expected_output, value, 0.1)
def _testDequantizeOp(self, inputs, min_range, max_range, dtype, mode="MIN_COMBINED", narrow_range=False): with self.cached_session(): input_op = constant_op.constant(inputs, shape=[len(inputs)], dtype=dtype) dequantized = array_ops.dequantize(input_op, min_range, max_range, mode=mode, narrow_range=narrow_range) tf_ans = self.evaluate(dequantized) # TODO(vrv): Add support for DT_QINT32 quantization if needed. type_dict = { dtypes.quint8: np.uint8, dtypes.qint8: np.int8, dtypes.quint16: np.uint16, dtypes.qint16: np.int16 } self.assertIn(dtype, type_dict.keys()) v_max = np.iinfo(type_dict[dtype]).max v_min = np.iinfo(type_dict[dtype]).min self.assertGreaterEqual(min_range, v_min) self.assertLessEqual(max_range, v_max) type_range = v_max - v_min if mode == "MIN_COMBINED": if v_min < 0: half_range = (type_range + 1) / 2 else: half_range = 0.0 np_ans = ((inputs.astype(np.float32) + half_range) * (max_range - min_range) / type_range) + min_range elif mode == "SCALED": if narrow_range: v_min += 1 scale_factor = max(min_range / v_min, max_range / v_max) np_ans = inputs.astype(np.float32) * scale_factor self.assertAllClose(tf_ans, np_ans, rtol=1e-5, atol=1e-5)