Beispiel #1
0
def conv2d_relu_forward(input,
                        weight,
                        bias,
                        output,
                        dilation,
                        block_size=(0, 16, 16)):
    cc.conv_relu_forward(input,
                         weight,
                         bias,
                         output,
                         dilation,
                         block_size=block_size)
Beispiel #2
0
def conv3d_relu_forward(input,
                        weight,
                        bias,
                        output,
                        dilation,
                        block_size=(2, 2, 32)):
    cc.conv_relu_forward(input,
                         weight,
                         bias,
                         output,
                         dilation,
                         block_size=block_size)
def test_conv_values():
    """Compare to pytorch convolution

    Check that the convolution agrees with a pytorch convolution
    on the output area that is not reflected.
    """
    dtype = torch.double
    intensive = False

    if intensive:
        test_params = [(b, c_in, c_out, dil, size) for b in [1, 3, 5]
                       for c_in in [1, 3, 4] for c_out in [1, 2, 3]
                       for dil in range(1, 10)
                       for size in [dil * 2 + 1, 50, 1023]]
    else:
        test_params = [(b, c_in, c_out, dil, size) for b in [2]
                       for c_in in [3] for c_out in [5] for dil in [1, 3, 10]
                       for size in [dil * 2 + 1, 29, 50]]

    for (B, C_in, C_out, dilation, size) in test_params:
        shape = (size, 2 * size)

        # Execute my own implementation
        x = torch.randn(B, C_in, *shape, dtype=dtype).cuda()
        k = torch.randn(C_out, C_in, 3, 3, dtype=dtype).cuda()
        bias = torch.randn(C_out, dtype=dtype).cuda()
        y = torch.zeros(B, C_out, *shape, dtype=dtype).cuda()
        cc.conv_relu_forward(x, k, bias, y, dilation)

        # Execute pytorch convolution:
        conv_torch = torch.nn.Conv2d(1,
                                     1,
                                     3,
                                     padding=dilation,
                                     dilation=dilation).cuda()
        conv_torch.weight.data = k
        conv_torch.bias.data = bias
        y1 = nn.ReLU()(conv_torch(x))

        assert y1.shape == y.shape

        # check shapes
        # Check center of output, where the output should be equal.
        d = dilation
        y_ = y[:, :, d:-d, d:-d]
        y1_ = y1[:, :, d:-d, d:-d]
        assert torch_equal(y1_,
                           y_), (f"for shape {shape} and dilation {dilation} "
                                 f"and bias {bias}"
                                 f"Your implementation:\n{y}"
                                 f"\nPyTorch:\n{y1}")
def test_dtype_check():
    """Test if dtype checks are performed correctly
    """
    d0 = torch.float
    d1 = torch.double
    dilation = 1

    x = torch.zeros(1, 1, 5, 5, dtype=d0).cuda()
    y = torch.zeros(1, 1, 5, 5, dtype=d0).cuda()
    bias = torch.zeros(1, dtype=d1).cuda()
    k = torch.zeros(1, 1, 3, 3, dtype=d1).cuda()

    with pytest.raises(RuntimeError):
        cc.conv_relu_forward(x, k, bias, y, dilation)
def test_conv():
    """Test convolution

    Check if an all-zero convolution runs without runtime errors.
    """
    dtype = torch.float  # or t.double
    dilation = 1

    x = torch.zeros(1, 1, 5, 5, dtype=dtype).cuda()
    y = torch.ones(1, 1, 5, 5, dtype=dtype).cuda()
    bias = torch.zeros(1, dtype=dtype).cuda()
    k = torch.zeros(1, 1, 3, 3, dtype=dtype).cuda()

    cc.conv_relu_forward(x, k, bias, y, dilation)
    assert y.sum().item() == approx(0.0)
Beispiel #6
0
    def forward(ctx, input, dilations, bias, *weights):
        depth = len(dilations)
        assert depth == len(weights), "number of weights does not match depth"

        num_out_channels = sum(w.shape[0] for w in weights)
        assert (
            len(bias) == num_out_channels
        ), "number of biases does not match number of output channels from weights"

        ctx.dilations = dilations
        ctx.depth = depth

        result = input.new_empty(input.shape[0],
                                 input.shape[1] + num_out_channels,
                                 *input.shape[2:])

        # Copy input into result buffer
        result[:, :input.shape[1]] = input

        result_start = input.shape[1]
        bias_start = 0

        for i in range(depth):
            # Extract variables
            sub_input = result[:, :result_start]
            sub_weight = weights[i]
            blocksize = sub_weight.shape[0]
            sub_bias = bias[bias_start:bias_start + blocksize]
            sub_result = result[:, result_start:result_start + blocksize]
            dilation = ctx.dilations[i]

            # Compute convolution. conv_relu_forward computes the
            # convolution and relu in one pass and stores the
            # output in sub_result.
            cc.conv_relu_forward(sub_input, sub_weight, sub_bias, sub_result,
                                 dilation)

            # Update steps etc
            result_start += blocksize
            bias_start += blocksize

        ctx.save_for_backward(bias, result, *weights)

        return result
Beispiel #7
0
def conv3d_relu_forward(input, weight, bias, output, grad_output, grad_input, grad_weight, dilation, block_size=(8, 8, 8)):
    cc.conv_relu_forward(input, weight, bias, output, dilation, block_size=block_size)