コード例 #1
0
ファイル: quantizer.py プロジェクト: yanboliang/pytorch
    def dequantize(self, apot_tensor) -> Tensor:
        apot_tensor_data = apot_tensor.data

        # map apot_to_float over tensor2quantize elements
        result = apot_tensor_data.apply_(lambda x: float(apot_to_float(x, self.quantization_levels, self.level_indices)))

        return result
コード例 #2
0
ファイル: quantizer.py プロジェクト: tongxin/pytorch
    def dequantize(self, float2apot: Tensor):  # type: ignore[override]
        float2apot = float2apot.float()

        quantization_levels = self.quantization_levels
        level_indices = self.level_indices

        # map apot_to_float over tensor2quantize elements
        result = float2apot.apply_(lambda x: float(
            apot_to_float(x, quantization_levels, level_indices)))

        return result
コード例 #3
0
    def quant_levels_visualization(self, obs_result, filename):
        xs = [float(x) / 1000.0 for x in range(1000)]
        ys = [apot_to_float(float_to_apot(x, obs_result[1], obs_result[2]),
                            obs_result[1], obs_result[2]).item() for x in xs]

        f = plt.figure(figsize=(15, 10))

        plt.plot(xs, ys)
        plt.title("APoT Quantization Plot")
        plt.xlabel("Full Precision")
        plt.ylabel("Quantized")
        plt.show()
コード例 #4
0
    def dequantize(self, apot_tensor) -> Tensor:
        apot_tensor_data = apot_tensor.data

        # map apot_to_float over tensor2quantize elements
        result_temp = np.empty(apot_tensor_data.size())
        for ele in apot_tensor_data:
            new_ele = apot_to_float(ele, self.quantization_levels,
                                    self.level_indices)
            np.append(result_temp, new_ele)

        result = torch.from_numpy(result_temp).int()

        return result
コード例 #5
0
ファイル: observer.py プロジェクト: huaxz1986/pytorch
    def quant_levels_visualization(self, signed=False):
        alpha, gamma, quantization_levels, level_indices = self.calculate_qparams(signed)

        xs = [float(x) / 1000.0 for x in range(1000)]
        ys = [apot_to_float(float_to_apot(x, quantization_levels, level_indices, alpha),
                            quantization_levels, level_indices).item() for x in xs]

        f = plt.figure(figsize=(15, 10))

        plt.plot(xs, ys)
        plt.title("APoT Quantization Plot")
        plt.xlabel("Full Precision")
        plt.ylabel("Quantized")
        plt.show()
コード例 #6
0
ファイル: quantizer.py プロジェクト: huaxz1986/pytorch
    def dequantize(self, apot_tensor) -> Tensor:
        orig_size = apot_tensor.data.size()
        apot_tensor_data = apot_tensor.data.flatten()

        print(apot_tensor_data)

        # map apot_to_float over tensor2quantize elements
        result_temp = np.empty(shape=apot_tensor_data.size())
        for i in range(len(apot_tensor_data)):
            new_ele = apot_to_float(apot_tensor_data[i],
                                    self.quantization_levels,
                                    self.level_indices)
            result_temp[i] = new_ele

        result = torch.from_numpy(result_temp).reshape(orig_size)

        return result