コード例 #1
0
def test_conv2d():
    """
    Test the Conv2d module to ensure that it produces the exact same
    output as the primary torch implementation, in the same order.
    """

    model2 = nn2.Conv2d(1, 32, 3, bias=True)

    model = nn.Conv2d(
        in_channels=1,
        out_channels=32,
        kernel_size=3,
        stride=1,
        padding=0,
        dilation=1,
        groups=1,
        bias=True,
        padding_mode="zeros",
    )

    model2.weight = model.weight
    model2.bias = model.bias

    data = th.rand(10, 1, 28, 28)

    out = model(data)

    out2 = model2(data)

    assert th.isclose(out, out2, atol=1e-6).all()
コード例 #2
0
ファイル: test_nn.py プロジェクト: gmuraru/PySyft
def test_conv2d(workers):
    """
    Test the nn.Conv2d module to ensure that it produces the exact same
    output as the primary torch implementation, in the same order.
    """
    torch.manual_seed(
        121)  # Truncation might not always work so we set the random seed

    # Disable mkldnn to avoid rounding errors due to difference in implementation
    mkldnn_enabled_init = torch._C._get_mkldnn_enabled()
    torch._C._set_mkldnn_enabled(False)

    # Direct Import from Syft
    model = syft_nn.Conv2d(1, 2, 3, bias=True)
    model_1 = nn.Conv2d(1, 2, 3, bias=True)
    model.weight = model_1.weight.fix_prec()
    model.bias = model_1.bias.fix_prec()
    data = torch.rand(10, 1, 28, 28)  # eg. mnist data

    out = model(data.fix_prec()).float_prec()
    out_1 = model_1(data)

    assert torch.allclose(out, out_1, atol=1e-2)

    # Fixed Precision Tensor
    model_2 = model_1.copy().fix_prec()
    out_2 = model_2(data.fix_prec()).float_prec()

    # Note: absolute tolerance can be reduced by increasing precision_fractional of fix_prec()
    assert torch.allclose(out_1, out_2, atol=1e-2)

    # Additive Shared Tensor
    bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
    shared_data = data.fix_prec().share(bob, alice, crypto_provider=james)

    mode_3 = model_2.share(bob, alice, crypto_provider=james)
    out_3 = mode_3(shared_data).get().float_prec()

    assert torch.allclose(out_1, out_3, atol=1e-2)

    # Reset mkldnn to the original state
    torch._C._set_mkldnn_enabled(mkldnn_enabled_init)
コード例 #3
0
ファイル: test_conv.py プロジェクト: youben11/PySyft
def test_conv2d():
    """
    Test the Conv2d module to ensure that it produces the exact same
    output as the primary torch implementation, in the same order.
    """

    # Disable mkldnn to avoid rounding errors due to difference in implementation
    mkldnn_enabled_init = th._C._get_mkldnn_enabled()
    th._C._set_mkldnn_enabled(False)

    model2 = nn2.Conv2d(1, 32, 3, bias=True)

    model = nn.Conv2d(
        in_channels=1,
        out_channels=32,
        kernel_size=3,
        stride=1,
        padding=0,
        dilation=1,
        groups=1,
        bias=True,
        padding_mode="zeros",
    )

    model2.weight = model.weight
    model2.bias = model.bias

    data = th.rand(10, 1, 28, 28)

    out = model(data)

    out2 = model2(data)

    # Reset mkldnn to the original state
    th._C._set_mkldnn_enabled(mkldnn_enabled_init)

    assert th.eq(out, out2).all()