Ejemplo n.º 1
0
def test_serialize():
    """Tests that it correctly [de]serializes.
    """
    in_shape = (32, 64, np.random.randint(1, 128))
    out_channels = np.random.randint(1, 128)
    window_shape = (4, 2)
    strides = (3, 1)
    pad = (1, 3)
    window_data = StridedWindowData(in_shape, window_shape, strides, pad,
                                    out_channels)
    serialized = window_data.serialize()
    assert serialized.in_height == 32
    assert serialized.in_width == 64
    assert serialized.in_channels == in_shape[2]

    assert serialized.window_height == 4
    assert serialized.window_width == 2
    assert serialized.out_channels == out_channels

    assert serialized.stride_height == 3
    assert serialized.stride_width == 1

    assert serialized.pad_height == 1
    assert serialized.pad_width == 3

    assert StridedWindowData.deserialize(serialized).serialize() == serialized
Ejemplo n.º 2
0
def test_compute():
    """Tests that the Conv2D layer correctly computes a Conv2D.
    """
    batch = 101
    width = 32
    height = 32
    channels = 3

    stride = (2, 2)
    pad = (0, 0)
    filter_height = 4
    filter_width = 4
    out_channels = 5

    inputs = np.random.uniform(size=(101, height * width * channels))

    # TODO(masotoud): use actual numbers for the filters and actually compute
    # true_outputs.
    filters = np.zeros(shape=(filter_height, filter_width, channels,
                              out_channels))
    biases = np.ones(shape=(out_channels))
    # out height/width = (32 - 2) / 2 = 15
    true_outputs = np.ones(shape=(batch, 15 * 15 * out_channels))

    window_data = StridedWindowData((height, width, channels),
                                    (filter_height, filter_width), stride, pad,
                                    out_channels)
    conv2d_layer = Conv2DLayer(window_data, filters, biases)
    assert np.allclose(conv2d_layer.compute(inputs), true_outputs)
    assert np.allclose(conv2d_layer.compute(inputs, jacobian=True),
                       np.zeros_like(true_outputs))

    torch_inputs = torch.FloatTensor(inputs)
    torch_outputs = conv2d_layer.compute(torch_inputs).numpy()
    assert np.allclose(torch_outputs, true_outputs)
Ejemplo n.º 3
0
def test_compute():
    """Tests that the MaxPool layer correctly computes a MaxPool.
    """
    batch = 101
    width = 32
    height = 32
    channels = 3
    inputs = np.random.uniform(size=(101, height * width * channels))

    true_outputs = inputs.reshape((batch, height, width, channels))
    true_outputs = true_outputs.reshape(
        (batch, height, width // 2, 2, channels))
    true_outputs = np.max(true_outputs, axis=3)
    true_outputs = true_outputs.reshape((batch, height // 2, 2, -1, channels))
    true_outputs = np.max(true_outputs, axis=2).reshape((batch, -1))

    window_data = StridedWindowData((height, width, channels), (2, 2), (2, 2),
                                    (0, 0), channels)
    maxpool_layer = MaxPoolLayer(window_data)
    assert np.allclose(maxpool_layer.compute(inputs), true_outputs)
    output, indices = maxpool_layer.compute(inputs, return_indices=True)
    assert np.allclose(output, true_outputs)
    # TODO: Actually check true_indices itself.
    assert np.allclose(maxpool_layer.from_indices(inputs, indices), output)

    torch_inputs = torch.FloatTensor(inputs)
    torch_outputs = maxpool_layer.compute(torch_inputs).numpy()
    assert np.allclose(torch_outputs, true_outputs)
    torch_outputs, torch_indices = maxpool_layer.compute(torch_inputs,
                                                         return_indices=True)
    assert np.allclose(torch_outputs.numpy(), true_outputs)
    torch_outputs = maxpool_layer.from_indices(torch_inputs, indices)
    assert np.allclose(torch_outputs.numpy(), true_outputs)
Ejemplo n.º 4
0
 def deserialize(cls, serialized):
     """Deserializes the layer from the Protobuf format.
     """
     if serialized.WhichOneof("layer_data") == "maxpool_data":
         window_data = StridedWindowData.deserialize(
             serialized.maxpool_data.window_data)
         return cls(window_data)
     return None
Ejemplo n.º 5
0
def test_out_shape():
    """Tests that the out_* methods work correctly.
    """
    in_shape = (32, 64, np.random.randint(1, 128))
    out_channels = np.random.randint(1, 128)
    window_shape = (4, 2)
    strides = (3, 1)
    pad = (1, 3)
    window_data = StridedWindowData(in_shape, window_shape, strides, pad,
                                    out_channels)
    # After padding, height is 34.
    # [0 - 4), [3 - 7), [6 - 10), ..., [30 - 34)
    assert window_data.out_height() == 11
    # After padding, width is 70.
    # [0 - 2), [1 - 3), [2 - 4), ..., [68 - 70)
    assert window_data.out_width() == 69
    assert window_data.out_shape() == (11, 69, out_channels)
Ejemplo n.º 6
0
def test_serialize():
    """Tests that the MaxPool layer correctly [de]serializes itself.
    """
    height, width, channels = np.random.choice([8, 16, 32, 64, 128], size=3)
    window_height, window_width = np.random.choice([2, 4, 8], size=2)
    window_data = StridedWindowData(
        (height, width, channels), (window_height, window_width),
        (window_height, window_width), (0, 0), channels)

    serialized = MaxPoolLayer(window_data).serialize()
    assert serialized.WhichOneof("layer_data") == "maxpool_data"

    serialized_window_data = serialized.maxpool_data.window_data
    assert serialized_window_data == window_data.serialize()

    deserialized = MaxPoolLayer.deserialize(serialized)
    assert deserialized.serialize() == serialized

    serialized.relu_data.SetInParent()
    assert MaxPoolLayer.deserialize(serialized) is None
Ejemplo n.º 7
0
 def deserialize(cls, serialized):
     """Deserializes from the Protobuf format.
     """
     if serialized.WhichOneof("layer_data") == "conv2d_data":
         window_data = StridedWindowData.deserialize(
             serialized.conv2d_data.window_data)
         filters = np.array(serialized.conv2d_data.filters)
         filters = filters.reshape(window_data.window_shape +
                                   (window_data.input_shape[2],
                                    window_data.out_channels,))
         biases = np.array(serialized.conv2d_data.biases)
         return cls(window_data, filters, biases)
     return None
Ejemplo n.º 8
0
def test_compute_channels():
    """Tests that the Concat layer correctly computes.

    Uses concat_along = CHANNELS
    """
    batch = 15
    height, width, channels = (32, 32, 3)
    out_channels = 6

    inputs = np.random.uniform(size=(batch, height * width * channels))
    inputs = inputs.astype(np.float32)

    filters = np.random.uniform(size=(2, 2, channels, out_channels))
    filters = filters.astype(np.float32)
    biases = np.random.uniform(size=(out_channels)).astype(np.float32)

    conv_window_data = StridedWindowData((height, width, channels), (2, 2),
                                         (2, 2), (0, 0), out_channels)
    conv2d_layer = Conv2DLayer(conv_window_data, filters, biases)
    conv2d_outputs = conv2d_layer.compute(inputs)

    pool_window_data = StridedWindowData((height, width, channels), (2, 2),
                                         (2, 2), (0, 0), channels)
    averagepool_layer = AveragePoolLayer(pool_window_data)
    pool_outputs = averagepool_layer.compute(inputs)

    true_outputs = np.concatenate([
        conv2d_outputs.reshape((-1, out_channels)),
        pool_outputs.reshape((-1, channels))
    ],
                                  axis=1).reshape((batch, -1))

    concat_layer = ConcatLayer([conv2d_layer, averagepool_layer],
                               ConcatAlong.CHANNELS)
    assert np.allclose(concat_layer.compute(inputs), true_outputs)

    torch_inputs = torch.FloatTensor(inputs)
    torch_outputs = concat_layer.compute(torch_inputs).numpy()
    assert np.allclose(torch_outputs, true_outputs)
Ejemplo n.º 9
0
def test_serialize():
    """Tests Conv2D.{serialize, deserialize}.py.
    """
    height, width, channels, out_channels = np.random.choice(
            [8, 16, 32, 64, 128], size=4)
    window_height, window_width = np.random.choice([2, 4, 8], size=2)
    pad = (0, 0)
    window_data = StridedWindowData((height, width, channels),
                                    (window_height, window_width),
                                    (window_height, window_width),
                                    pad, out_channels)

    filters = np.random.uniform(size=(window_height, window_width,
                                      channels, out_channels))
    biases = np.random.uniform(size=(out_channels))
    serialized = Conv2DLayer(window_data, filters, biases).serialize()
    assert serialized.WhichOneof("layer_data") == "conv2d_data"

    serialized_window_data = serialized.conv2d_data.window_data
    assert serialized_window_data.in_height == height
    assert serialized_window_data.in_width == width
    assert serialized_window_data.in_channels == channels
    assert serialized_window_data.window_height == window_height
    assert serialized_window_data.window_width == window_width
    assert serialized_window_data.stride_height == window_height
    assert serialized_window_data.stride_width == window_width
    assert serialized_window_data.pad_height == 0
    assert serialized_window_data.pad_width == 0
    assert serialized_window_data.out_channels == out_channels

    serialized_filters = np.array(serialized.conv2d_data.filters)
    assert np.allclose(serialized_filters.flatten(), filters.flatten())

    serialized_biases = np.array(serialized.conv2d_data.biases)
    assert np.allclose(serialized_biases.flatten(), biases.flatten())

    deserialized = Conv2DLayer.deserialize(serialized)
    assert deserialized.serialize() == serialized

    serialized.relu_data.SetInParent()
    assert Conv2DLayer.deserialize(serialized) is None
Ejemplo n.º 10
0
def test_compute():
    """Tests that the AveragePool layer correctly computes a AveragePool.
    """
    batch = 101
    width = 32
    height = 32
    channels = 3
    inputs = np.random.uniform(size=(101, height * width * channels))

    true_outputs = inputs.reshape((batch, height, width, channels))
    true_outputs = true_outputs.reshape((batch, height, width // 2, 2, channels))
    true_outputs = np.mean(true_outputs, axis=3)
    true_outputs = true_outputs.reshape((batch, height // 2, 2, -1, channels))
    true_outputs = np.mean(true_outputs, axis=2).reshape((batch, -1))

    window_data = StridedWindowData((height, width, channels),
                                    (2, 2), (2, 2), (0, 0), channels)
    averagepool_layer = AveragePoolLayer(window_data)
    assert np.allclose(averagepool_layer.compute(inputs), true_outputs)

    torch_inputs = torch.FloatTensor(inputs)
    torch_outputs = averagepool_layer.compute(torch_inputs).numpy()
    assert np.allclose(torch_outputs, true_outputs)
Ejemplo n.º 11
0
    def layer_from_onnx(graph, node):
        """Reads a layer from an ONNX node.

        Specs for the ONNX operators are available at:
        https://github.com/onnx/onnx/blob/master/docs/Operators.md
        """
        # First, we get info about inputs to the layer (including previous
        # layer outputs & things like weight matrices).
        inputs = node.input
        deserialized_inputs = []
        deserialized_input_shapes = []
        for input_name in inputs:
            # We need to find the initializers (which I think are basically
            # weight tensors) for the particular input.
            initializers = [init for init in graph.initializer
                            if str(init.name) == str(input_name)]
            if initializers:
                assert len(initializers) == 1
                # Get the weight tensor as a Numpy array and save it.
                deserialized_inputs.append(numpy_helper.to_array(initializers[0]))
            else:
                # This input is the output of another node, so just store the
                # name of that other node (we'll link them up later). Eg.
                # squeezenet0_conv0_fwd.
                deserialized_inputs.append(str(input_name))
            # Get metadata about the input (eg. its shape).
            infos = [info for info in graph.value_info
                     if info.name == input_name]
            if infos:
                # This is an input with a particular shape.
                assert len(infos) == 1
                input_shape = [d.dim_value
                               for d in infos[0].type.tensor_type.shape.dim]
                deserialized_input_shapes.append(input_shape)
            elif input_name == "data":
                # This is an input to the entire network, its handled
                # separately.
                net_input_shape = graph.input[0].type.tensor_type.shape
                input_shape = [d.dim_value for d in net_input_shape.dim]
                deserialized_input_shapes.append(input_shape)
            else:
                # This doesn't have any inputs.
                deserialized_input_shapes.append(None)

        layer = None

        # Standardize some of the data shared by the strided-window layers.
        if node.op_type in {"Conv", "MaxPool", "AveragePool"}:
            # NCHW -> NHWC
            input_shape = deserialized_input_shapes[0]
            input_shape = [input_shape[2], input_shape[3], input_shape[1]]
            strides = list(Network.onnx_ints_attribute(node, "strides"))
            pads = list(Network.onnx_ints_attribute(node, "pads"))
            # We do not support separate begin/end padding.
            assert pads[0] == pads[2]
            assert pads[1] == pads[3]
            pads = pads[1:3]

        # Now, parse the actual layers.
        if node.op_type == "Conv":
            # We don't support dilations or non-1 groups.
            dilations = list(Network.onnx_ints_attribute(node, "dilations"))
            assert all(dilation == 1 for dilation in dilations)
            group = Network.onnx_ints_attribute(node, "group")
            assert not group or group == 1

            # biases are technically optional, but I don't *think* anyone uses
            # that feature.
            assert len(deserialized_inputs) == 3
            input_data, filters, biases = deserialized_inputs
            # OIHW -> HWIO
            filters = filters.transpose((2, 3, 1, 0))

            window_data = StridedWindowData(input_shape, filters.shape[:2],
                                            strides, pads, biases.shape[0])
            layer = Conv2DLayer(window_data, filters, biases)
        elif node.op_type == "Relu":
            layer = ReluLayer()
        elif node.op_type == "MaxPool":
            kernel_shape = Network.onnx_ints_attribute(node, "kernel_shape")
            window_data = StridedWindowData(input_shape, list(kernel_shape),
                                            strides, pads, input_shape[2])
            layer = MaxPoolLayer(window_data)
        elif node.op_type == "AveragePool":
            kernel_shape = Network.onnx_ints_attribute(node, "kernel_shape")
            window_data = StridedWindowData(input_shape, list(kernel_shape),
                                            strides, pads, input_shape[2])
            layer = AveragePoolLayer(window_data)
        elif node.op_type == "Gemm":
            input_data, weights, biases = deserialized_inputs

            alpha = Network.onnx_ints_attribute(node, "alpha")
            if alpha:
                weights *= alpha
            beta = Network.onnx_ints_attribute(node, "beta")
            if beta:
                biases *= beta

            trans_A = Network.onnx_ints_attribute(node, "transA")
            trans_B = Network.onnx_ints_attribute(node, "transB")

            # We compute (X . W) [+ C].
            assert not trans_A
            if trans_B:
                weights = weights.transpose()
            layer = FullyConnectedLayer(weights, biases)
        elif node.op_type == "BatchNormalization":
            epsilon = Network.onnx_ints_attribute(node, "epsilon")
            input_data, scale, B, mean, var = deserialized_inputs
            # We don't yet support separate scale/bias parameters, though they
            # can be rolled in to mean/var.
            assert np.allclose(scale, 1.0) and np.allclose(B, 0.0)
            layer = NormalizeLayer(mean, np.sqrt(var + epsilon))
        elif node.op_type == "Concat":
            layer = list(inputs)
        elif node.op_type in {"Dropout", "Reshape", "Flatten"}:
            # These are (more-or-less) handled implicitly since we pass around
            # flattened activation vectors and only work with testing.
            layer = False
        else:
            raise NotImplementedError
        assert len(node.output) == 1
        return (inputs[0], node.output[0], layer)
Ejemplo n.º 12
0
    def from_eran(cls, net_file):
        """Helper method to read an ERAN net_file into a Network.

        Currently only supports a subset of those supported by the original
        read_net_file.py. See an example of the type of network file we're
        reading here:

        https://files.sri.inf.ethz.ch/eran/nets/tensorflow/mnist/mnist_relu_3_100.tf

        This code has been adapted (with heavy modifications) from the ERAN
        source code. Each layer has a header line that describes the type of
        layer, which is then followed by the weights (if applicable). Note that
        some layers are rolled together in ERAN but we do separately (eg.
        "ReLU" in the ERAN format corresponds to Affine + ReLU in our
        representation).
        """
        layers = []
        net_file = open(net_file, "r")
        while True:
            curr_line = net_file.readline()[:-1]
            if curr_line in {"Affine", "ReLU", "HardTanh"}:
                # Parses a fully-connected layer, possibly followed by
                # non-linearity.
                # ERAN files use (out_dims, in_dims), we use the opposite.
                weights = cls.parse_np_array(net_file).transpose()
                biases = cls.parse_np_array(net_file)

                if len(layers) > 1 and isinstance(layers[-2], Conv2DLayer):
                    # When there's an affine after a 2D convolution, ERAN's
                    # files assume the input is CHW when it's actually HWC. We
                    # correct that here by permuting the dimensions.
                    conv_layer = layers[-2]
                    output_size = weights.shape[-1]
                    weights = weights.reshape(
                        (conv_layer.window_data.out_channels,
                         conv_layer.window_data.out_height(),
                         conv_layer.window_data.out_width(),
                         output_size))
                    weights = weights.transpose(1, 2, 0, 3)
                    weights = weights.reshape((-1, output_size))

                # Add the fully-connected layer.
                layers.append(FullyConnectedLayer(weights, biases))

                # Maybe add a non-linearity.
                if curr_line == "ReLU":
                    layers.append(ReluLayer())
                elif curr_line == "HardTanh":
                    layers.append(HardTanhLayer())
            elif curr_line.startswith("Normalize"):
                # Parses a Normalize layer.
                means = curr_line.split("mean=")[1].split("std=")[0].strip()
                means = cls.parse_np_array(means)

                stds = curr_line.split("std=")[1].strip()
                stds = cls.parse_np_array(stds)

                layers.append(NormalizeLayer(means, stds))
            elif curr_line.startswith("Conv2D"):
                # Parses a 2D-Convolution layer. The info line looks like:
                # ReLU, filters=16, kernel_size=[4, 4], \
                # input_shape=[28, 28, 1], stride=[2, 2], padding=0
                # But, we can get filters and kernel_size from the actual
                # filter weights, so no need to parse that here.
                info_line = net_file.readline()[:-1].strip()
                activation = info_line.split(",")[0]

                stride = cls.parse_np_array(
                    info_line.split("stride=")[1].split("],")[0] + "]")

                input_shape = info_line.split("input_shape=")[1].split("],")[0]
                input_shape += "]"
                input_shape = cls.parse_np_array(input_shape)

                pad = (0, 0)
                if "padding=" in info_line:
                    pad = int(info_line.split("padding=")[1])
                    pad = (pad, pad)

                # (f_h, f_w, i_c, o_c)
                filter_weights = cls.parse_np_array(net_file)
                # (o_c,)
                biases = cls.parse_np_array(net_file)

                window_data = StridedWindowData(
                    input_shape, filter_weights.shape[:2],
                    stride, pad, filter_weights.shape[3])
                layers.append(Conv2DLayer(window_data, filter_weights, biases))

                if activation == "ReLU":
                    layers.append(ReluLayer())
                elif activation == "HardTanh":
                    layers.append(HardTanhLayer())
                else:
                    # As far as I know, all Conv2D layers should have an
                    # associated activation function in the ERAN format.
                    raise NotImplementedError
            elif curr_line.strip() == "":
                break
            else:
                raise NotImplementedError
        return cls(layers)