def get_linear_mod_weight(mod: nn.Module) -> torch.Tensor:
    if isinstance(mod, nn.Linear):
        return mod.weight.detach()
    elif isinstance(mod, nni.LinearReLU):
        return mod[0].weight.detach()
    else:
        return mod._weight_bias()[0]  # type: ignore[operator]
def get_linear_mod_weight(mod: nn.Module) -> torch.Tensor:
    # TODO(future PR): make more generic, handle everything
    if isinstance(mod, nn.Linear):
        return mod.weight.detach()
    elif isinstance(mod, nni.LinearReLU):
        return mod[0].weight.detach()
    else:
        return mod._weight_bias()[0]  # type: ignore
def get_conv_mod_weight(mod: nn.Module) -> torch.Tensor:
    if (isinstance(mod, nn.Conv1d) or isinstance(mod, nn.Conv2d)
            or isinstance(mod, nn.Conv3d)):
        return mod.weight.detach()
    elif (isinstance(mod, nni.ConvReLU1d) or isinstance(mod, nni.ConvReLU2d)
          or isinstance(mod, nni.ConvReLU3d)):
        return mod[0].weight.detach()
    else:
        return mod._weight_bias()[0]  # type: ignore[operator]
Beispiel #4
0
def get_conv_mod_weight(mod: nn.Module) -> torch.Tensor:
    # TODO(future PR): handle QAT variants
    if (isinstance(mod, nn.Conv1d) or isinstance(mod, nn.Conv2d)
            or isinstance(mod, nn.Conv3d)):
        return mod.weight.detach()
    elif (isinstance(mod, nni.ConvReLU1d) or isinstance(mod, nni.ConvReLU2d)
          or isinstance(mod, nni.ConvReLU3d)):
        return mod[0].weight.detach()
    else:
        return mod._weight_bias()[0]  # type: ignore
Beispiel #5
0
def mod_weight_bias_0(mod: nn.Module) -> torch.Tensor:
    return mod._weight_bias()[0]  # type: ignore[operator]
Beispiel #6
0
def get_conv_mod_weight(mod: nn.Module) -> torch.Tensor:
    # TODO(future PR): make more generic, handle everything
    if isinstance(mod, nn.Conv2d):
        return mod.weight.detach()
    else:
        return mod._weight_bias()[0]  # type: ignore