def linear(data, weight, bias, expected):
    """Fully conected (linear) transformation."""

    print('Input:', data)

    t = torch.nn.functional.linear(
        torch.as_tensor(data, dtype=torch.float).flatten().unsqueeze(0),  # Add batch dimension
        torch.as_tensor(weight, dtype=torch.float),
        torch.as_tensor(bias, dtype=torch.float) if bias is not None else None,
    ).int().squeeze().numpy()

    output = compute.linear(
        0,
        data.reshape(-1),
        weight,
        bias,
        in_features=data.size,
        out_features=weight.shape[0],
    )

    print("PYTORCH OK" if np.array_equal(output, t) else "*** FAILURE ***")
    assert np.array_equal(output, t)

    # MLP emulation
    emu_output = compute.conv2d(
        data.reshape(-1)[:, np.newaxis, np.newaxis],
        weight[:, :, np.newaxis, np.newaxis],
        bias,
        [data.size, 1, 1],
        [expected.shape[0], 1, 1],
        kernel_size=[1, 1],
        stride=[1, 1],
        pad=[0, 0],
        dilation=[1, 1],
        fractional_stride=[1, 1],
        output_pad=[0, 0],
        groups=1,
    ).squeeze()

    print("MLP EMULATION OK" if np.array_equal(emu_output, t) else "*** FAILURE ***")
    assert np.array_equal(emu_output, t)

    print(output)
    assert np.array_equal(output, expected)

    print('Output before division:', output)
    output += 64
    output //= 128
    print('Output:', output)

    expected += 64
    expected //= 128

    print('Expected:', expected)
    print("SUCCESS" if np.array_equal(output, expected) else "*** FAILURE ***")

    assert np.array_equal(output, expected)
def convolve(dilation, data, weight, expected):
    """Convolve data"""
    print('Input:\n', data)

    t = torch.nn.functional.conv2d(
        torch.as_tensor(data,
                        dtype=torch.float).unsqueeze(0),  # Add batch dimension
        torch.as_tensor(weight, dtype=torch.float),
        bias=None,
        stride=1,
        padding=1,  # Keep data dimensions
        groups=1,
        dilation=dilation,
    ).int().squeeze().numpy()

    print(t.shape)

    output = compute.conv2d(
        data,
        weight,
        None,
        data.shape,
        expected.shape,
        kernel_size=[3, 3],
        stride=[1, 1],
        pad=[1, 1],
        dilation=[dilation, dilation],
        fractional_stride=[1, 1],
        output_pad=[0, 0],
        groups=1,
    )

    print("PYTORCH OK" if np.array_equal(output, t) else "*** FAILURE ***")
    assert np.array_equal(output, t)

    print('Output before division:\n', output)
    output += 64
    output //= 128
    print('Output:\n', output)

    print('Expected:\n', expected)
    print("SUCCESS" if np.array_equal(output, expected) else "*** FAILURE ***")
    assert np.array_equal(output, expected)
Ejemplo n.º 3
0
def convolve(data, weight, expected):
    """Convolve data"""
    print('Input:\n', data)

    t = torch.nn.functional.conv2d(
        torch.as_tensor(data,
                        dtype=torch.float).unsqueeze(0),  # Add batch dimension
        torch.as_tensor(weight, dtype=torch.float),
        bias=None,
        stride=[1, 1],
        padding=0,  # Keep data dimensions
        groups=1,
        dilation=1,
    ).int().squeeze().numpy()

    output = compute.conv2d(
        data,
        weight,
        None,
        data.shape,
        expected.shape,
        kernel_size=[1, 1],
        stride=[1, 1],
        pad=[0, 0],
        dilation=[1, 1],
        fractional_stride=[1, 1],
        output_pad=[0, 0],
        groups=1,
    )

    print("PYTORCH OK" if np.array_equal(output, t) else "*** FAILURE ***")
    assert np.array_equal(output, t)

    print('Output before division:\n', output)
    output += 64
    output //= 128
    print('Output:\n', output)

    print('Expected:\n', expected)
    print("SUCCESS" if np.array_equal(output, expected) else "*** FAILURE ***")
    assert np.array_equal(output, expected)

    # Create 3x3 weights from 1x1 weights
    # and emulate using 3x3 kernels
    shape33 = (weight.shape[0], weight.shape[1], 3, 3)
    weight33 = np.zeros(shape33, dtype=np.int64)
    weight33[:, :, 1, 1] = weight[:, :, 0, 0]

    output = compute.conv2d(
        data,
        weight33,
        None,
        data.shape,
        expected.shape,
        kernel_size=[3, 3],
        stride=[1, 1],
        pad=[1, 1],
        dilation=[1, 1],
        fractional_stride=[1, 1],
        output_pad=[0, 0],
        groups=1,
    )
    print("PYTORCH OK" if np.array_equal(output, t) else "*** FAILURE ***")
    assert np.array_equal(output, t)
def convolve1d_via2d(data, weight, pad=1, stride=1, dilation=1, expected=None):
    """Convolve 1D data with dilation in 2D"""
    print('Input:\n', data)
    assert 0 <= pad <= 1  # Must be 0 or 1 so we can use 1-pad in 2D without edge issues
    assert stride == 1  # This is generally true for AI85
    assert 1 <= weight.shape[2] <= 3

    t = torch.nn.functional.conv1d(
        torch.as_tensor(data,
                        dtype=torch.float).unsqueeze(0),  # Add batch dimension
        torch.as_tensor(weight, dtype=torch.float),
        bias=None,
        stride=stride,
        padding=pad,
        groups=1,
        dilation=dilation,
    ).int().squeeze().numpy()

    # print(t.shape)
    # print('PyTorch 1D:\n', (t + 64) // 128)

    # Compute expected output so we can discard anything extra that was created by padding
    expected_length = (data.shape[1] - dilation *
                       (weight.shape[2] - 1) + 2 * pad) // stride
    assert t.shape[1] == expected_length

    # If pad = 1, insert a 0 on left and right. On MAX78000, this would be achieved by manipulating
    # the in_offset.
    if pad == 1:
        d_padded = np.insert(data, [0, data.shape[1]], 0, axis=1)
    else:
        d_padded = data

    # Pad out the input data so it can be folded
    if data.shape[1] % dilation != 0:
        d_padded = np.append(
            d_padded,
            np.zeros(
                (d_padded.shape[0], dilation - d_padded.shape[1] % dilation),
                dtype=np.int64),
            axis=1)
    # Fold the data and create the new weights
    d_folded = d_padded.reshape(d_padded.shape[0],
                                d_padded.shape[1] // dilation, dilation)
    w_folded = np.insert(weight.reshape(weight.shape[0], weight.shape[1],
                                        weight.shape[2], -1), [0, 1],
                         0,
                         axis=3)
    skip = dilation
    if weight.shape[2] == 2:
        skip = 0
        w_folded = np.insert(w_folded, 0, 0,
                             axis=2)  # Insert at top - throw away the padding
    elif weight.shape[2] == 1:
        skip = 0
        w_folded = np.insert(w_folded, [0, 1], 0, axis=2)  # Use center
    assert w_folded.shape[2] == w_folded.shape[3] == 3

    # Use the standard Conv2d method available in MAX78000 and MAX78002
    output = compute.conv2d(
        d_folded,
        w_folded,
        None,
        d_folded.shape,
        (weight.shape[0], d_folded.shape[1], d_folded.shape[2]),
        kernel_size=[w_folded.shape[2], w_folded.shape[3]],
        stride=[stride, stride],
        pad=[1, 1],
        dilation=[1, 1],
        fractional_stride=[1, 1],
        output_pad=[0, 0],
        groups=1,
    )
    # Discard extra data at beginning and end (this can be done by increasing in_offset on the
    # next layer or reducing out_offset on the current layer and by specifying in_dim on the next
    # layer). The amount to skip is 0 for kernel lengths 0 or 1 and 'dilation' for kernel length 3.
    output = output.reshape(output.shape[0], -1)[:,
                                                 skip:skip + expected_length]

    print("PYTORCH OK" if np.array_equal(output, t) else "*** FAILURE ***")
    assert np.array_equal(output, t)

    print('Output before division:\n', output)
    output += 64
    output //= 128
    print('Output:\n', output)

    print('Expected:\n', expected)
    print("SUCCESS" if np.array_equal(output, expected) else "*** FAILURE ***")
    assert np.array_equal(output, expected)