def linear(data, weight, bias, expected): """Fully conected (linear) transformation.""" print('Input:', data) t = torch.nn.functional.linear( torch.as_tensor(data, dtype=torch.float).unsqueeze(0), # Add batch dimension torch.as_tensor(weight, dtype=torch.float), torch.as_tensor(bias, dtype=torch.float) if bias is not None else None, ).int().squeeze().numpy() output = compute.linear( data, weight, bias, in_features=len(data), out_features=weight.shape[0], debug=True, ) print("PYTORCH OK" if np.array_equal(output, t) else "*** FAILURE ***") assert np.array_equal(output, t) # MLP emulation emu_output = compute.conv2d( data[:, np.newaxis, np.newaxis], weight[:, :, np.newaxis, np.newaxis], bias, [data.shape[0], 1, 1], [expected.shape[0], 1, 1], kernel_size=[1, 1], stride=[1, 1], pad=[0, 0], dilation=[1, 1], fractional_stride=[1, 1], output_pad=[0, 0], groups=1, debug=True, ).squeeze() print("MLP EMULATION OK" if np.array_equal(emu_output, t ) else "*** FAILURE ***") assert np.array_equal(emu_output, t) assert np.array_equal(output, expected) print('Output before division:', output) output += 64 output //= 128 print('Output:', output) expected += 64 expected //= 128 print('Expected:', expected) print("SUCCESS" if np.array_equal(output, expected) else "*** FAILURE ***") assert np.array_equal(output, expected)
def convolve(data, weight, expected): """Convolve data""" print('Input:\n', data) t = torch.nn.functional.conv2d( torch.as_tensor(data, dtype=torch.float).unsqueeze(0), # Add batch dimension torch.as_tensor(weight, dtype=torch.float), bias=None, stride=[1, 1], padding=0, # Keep data dimensions groups=1, dilation=1, ).int().squeeze().numpy() output = compute.conv2d( data, weight, None, data.shape, expected.shape, kernel_size=[1, 1], stride=[1, 1], pad=[0, 0], dilation=[1, 1], fractional_stride=[1, 1], output_pad=[0, 0], groups=1, debug=True, ) print("PYTORCH OK" if np.array_equal(output, t) else "*** FAILURE ***") assert np.array_equal(output, t) print('Output before division:\n', output) output += 64 output //= 128 print('Output:\n', output) print('Expected:\n', expected) print("SUCCESS" if np.array_equal(output, expected) else "*** FAILURE ***") assert np.array_equal(output, expected)
def conv2d_layer( layer, # pylint: disable=unused-argument verbose, verbose_data, input_size, kernel_size, output_shift, output_channels, padding, dilation, stride, activation, kernel, bias, data, bits=8, output_width=8, groups=1, device=84, # pylint: disable=unused-argument debug=False, ): """ Perform 2D convolution for one layer. """ if verbose: print(f"{kernel_size[0]}x{kernel_size[1]} KERNEL(S)", end='') if verbose_data: print(":") with np.printoptions(formatter={'int': '{0:4}'.format}): for i in range(output_channels): print(f'Output channel #{i}') if kernel_size[0] == kernel_size[1] == 1: print(np.squeeze(kernel[i])) else: print(kernel[i]) print(f"BIAS: {bias}\n") elif bias is not None: print(f"\nBIAS SIZE: {len(bias)}") else: print('') out_size = [ output_channels, (input_size[1] - dilation[0] * (kernel_size[0] - 1) - 1 + 2 * padding[0]) // stride[0] + 1, (input_size[2] - dilation[1] * (kernel_size[1] - 1) - 1 + 2 * padding[1]) // stride[1] + 1 ] if bias is not None: bias = bias * tc.dev.BIAS_DIV out_buf = conv2d( data=data, weight=kernel, bias=bias, input_size=input_size, output_size=out_size, kernel_size=kernel_size, stride=stride, pad=padding, dilation=dilation, fractional_stride=[1, 1], output_pad=[0, 0], groups=groups, debug=debug, ) if verbose and verbose_data: print(f"{out_size[0]}x{out_size[1]}x{out_size[2]} FULL-RES OUTPUT:") if out_size[1] == out_size[2] == 1: print(np.squeeze(out_buf)) else: print(out_buf) print('') stats.macc += (input_size[0] // groups) * kernel_size[0] * kernel_size[1] * out_size[0] \ * out_size[1] * out_size[2] if output_width != 32: out_buf = np.floor(0.5 + out_buf / (128 / 2.0**output_shift)).astype(np.int64). \ clip(-(2**(bits-1)), 2**(bits-1)-1) if verbose and verbose_data: print( f"{out_size[0]}x{out_size[1]}x{out_size[2]} OUTPUT " f"{'BEFORE ACTIVATION' if activation is not None else '(NO ACTIVATION)'}:" ) if out_size[1] == out_size[2] == 1: print(np.squeeze(out_buf)) else: print(out_buf) print('') if activation is not None: if activation == op.ACT_RELU: np.clip(out_buf, 0, 2**(bits - 1) - 1, out_buf) elif activation == op.ACT_ABS: out_buf = np.abs(out_buf).clip(0, 2**(bits - 1) - 1) if verbose and verbose_data: print(f"{out_size[0]}x{out_size[1]}x{out_size[2]} ACTIVATED OUTPUT" f" ({op.act_string(activation).upper()}):") if out_size[1] == out_size[2] == 1: print(np.squeeze(out_buf)) else: print(out_buf) print('') stats.comp += out_size[0] * out_size[1] * out_size[2] if verbose and not verbose_data: print(f"{out_size[0]}x{out_size[1]}x{out_size[2]} OUTPUT" f" ({op.act_string(activation).upper()})\n") return out_buf, out_size