Ejemplo n.º 1
0
def backward_hook(module: nn.Module, grad_input: torch.Tensor,
                  grad_output: torch.Tensor) -> None:
    # print("Backward Module : {}, grad_inp: {}, grad_out: {}".format(module, len(grad_input), len(grad_output)))
    # Forward pass: compute predicted y using operations; we compute
    # ReLU using our custom autograd operation.
    if isinstance(module, nn.Conv2d):
        inp = QuantizationCrunch[str(module.__hash__())]["input"]
        # module.register_buffer("orig_output", output)
        # torch.utils.hooks.RemovableHandle(clone).remove()
        # print("w: {}, b: {}".format(module.weight, module.bias))
        quant_out, quant_weights = quantize(module, module.weight, module.bias,
                                            inp)
Ejemplo n.º 2
0
def forward_hook(module: nn.Module, input: tuple,
                 output: torch.Tensor) -> None:
    # print("Forward Module : {}. hash {}".format(module, module.__hash__()))
    # for layer in module.modules():
    if isinstance(module, nn.Conv2d):
        # print("Layer dict {}".format(module.state_dict().keys()))
        # str = "quant_{}_input".format(list(module.named_modules())
        # module.register_parameter("orig_input", input[0])
        # module.register_buffer("orig_output", output[0])
        QuantizationCrunch[str(module.__hash__())] = {
            "input": input[0],
            "output": output[0]
        }  # module.