Esempio n. 1
0
def input(op_name, shape, dtype='float32', **kwargs):
    # type: (str, List[int], str) -> XLayer
    """
    Create a Input parameters layer

    Arguments
    ---------
    op_name: str
        The name of this input layer
    shape: List[int]
        The input shape
    dtype: str (optional, default None)
        The input data type
    """

    shape[0] = -1
    shape = TensorShape(shape)

    attrs = kwargs
    attrs.update({'dtype': dtype})

    X = XLayer()
    X = X._replace(name=op_name,
                   type=['Input'],
                   shapes=shape,
                   sizes=shape.get_size(),
                   layer=[op_name],
                   tops=[],
                   bottoms=[],
                   attrs=attrs,
                   targets=[])

    return X
Esempio n. 2
0
def batch_flatten(op_name, input_layer, **kwargs):
    # type: (str, XLayer) -> XLayer
    """
    Create a batch flatten layer

    Arguments
    ---------
    op_name: str
        The name of this batch flatten layer operation
    input_layer: XLayer
        The input layer to this batch flatten layer
    """
    flattened_shape = TensorShape([list(input_layer.shapes)[0]] +
                                  [int(np.prod(list(input_layer.shapes)[1:]))])

    bottoms = [input_layer.name]
    attrs = kwargs

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=['Flatten'],
        # TODO
        shapes=flattened_shape,
        sizes=flattened_shape.get_size(),
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[])

    return X
Esempio n. 3
0
def constant(op_name, value, **kwargs):
    # type: (str, numpy.ndarray) -> XLayer
    """
    Create a Constant parameters layer

    Arguments
    ---------
    op_name: str
        The name of this constant layer
    value: numpy.ndarray
        The value of this constant layer
    """
    if not isinstance(value, np.ndarray):
        value = np.array(value)

    dtype = str(value.dtype)

    attrs = kwargs
    attrs.update({'dtype': dtype})

    shape = TensorShape(list(value.shape))

    X = XLayer()
    X = X._replace(name=op_name,
                   type=['Constant'],
                   shapes=shape,
                   sizes=shape.get_size(),
                   data=[value],
                   layer=[op_name],
                   tops=[],
                   bottoms=[],
                   attrs=attrs,
                   targets=[])

    return X
Esempio n. 4
0
def pad(op_name,
        input_layer,
        padding,
        pad_value,
        # layout,
        **kwargs):
    # type: (str, List[List[int]],  float, str, XLayer) -> XLayer
    """
    Create a padding layer

    Arguments
    ---------
    op_name: str
        The name of this padding layer operation
    padding: List[List[int]]
        The padding width to the edges of each axis
    pad_value: float
        The padding value. Unsupported for now and always zero
    layout: str
        The layout of the padding layer input (`NCHW` or `NHWC` supported)
    input_layer: XLayer
        The input layer to this pooling layer
    """
    if pad_value != 0:
        raise NotImplementedError("Unsupported padding value: {}, only 0 is"
                                  " supported for now.".format(pad_value))

    if not len(input_layer.shapes) == 4:
        raise NotImplementedError("Padding layer only supported after layer in"
                                  " `NCHW` or `NHWC` format, but found layer"
                                  " with {} dims"
                                  .format(len(input_layer.shapes)))

    unpadded_dims = [[0, 0]] * len(input_layer.shapes[:len(padding)])
    padding = unpadded_dims + [list(pad) for pad in padding]

    shape = TensorShape([s + p[0] + p[1]
                         for s, p in zip(input_layer.shapes, padding)])
    logger.debug("-- Pad shape: {}".format(shape))

    attrs = kwargs
    attrs.update({
        'padding': padding
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=['Pad'],
        shapes=shape,
        sizes=shape.get_size(),
        # data=padding,
        attrs=attrs,
        layer=[op_name],
        tops=[],
        bottoms=[input_layer.name],
        targets=[]
    )

    return X
def mean(op_name, input_layer, axes, keepdims, exclude, **kwargs):
    # type: (str, XLayer, List[int], boolean, List[int]) -> XLayer
    """
    Compute the mean of the input layer over some axes

    Arguments
    ---------
    op_name: str
        The name of this elementwise addition operation
    axes: List[int]
        The axes over which to compute the mean
    ... TODO
    input_layer: XLayer
        The input layer
    """

    attrs = kwargs

    logger.debug("Attrs: {}".format(attrs))

    bottoms = [input_layer.name]

    in_shape = input_layer.shapes[:]

    if exclude:
        axes = [i for i in range(len(in_shape)) if i not in axes]

    if keepdims:
        newshape = [dim if i not in axes else 1
                    for i, dim in enumerate(in_shape)]
    else:
        newshape = [dim for i, dim in enumerate(in_shape)
                    if i not in axes]

    newshape = TensorShape(newshape)
    logger.debug("Mean axes: {}, in shape: {}, out shape: {}"
                 .format(axes, in_shape, newshape))

    attrs.update({
        'axes': axes,
        'keepdims': keepdims,
        # 'exclude': exclude
        #  TODO: dtype??
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=['Mean'],
        shapes=newshape,
        sizes=newshape.get_size(),
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[]
    )

    return X
Esempio n. 6
0
def global_pool2d(op_name, input_layer, pool_type, layout, **kwargs):
    # type: (str, XLayer, str, str) -> XLayer
    """
    Create a global pooling 2dparameters layer

    Arguments
    ---------
    op_name: str
        The name of this pooling layer operation
    pool_type: str
        Indicates which pooling operation to use (Max or Avg)
    layout: str
        The layout of the pooling layer input (`NCHW` or `NHWC`)
    input_layer: XLayer
        The input layer to this pooling layer
    """

    if pool_type not in ['Max', 'Avg']:
        raise NotImplementedError("Invalid pooling type: {}, can either be"
                                  " `Max` or `Avg`.".format(pool_type))

    # NCHW by design
    insize = [input_layer.shapes[2], input_layer.shapes[3]]
    batches, channels = input_layer.shapes[0], input_layer.shapes[1]

    strides = [1, 1]
    padding = [0, 0]
    pool_size = insize

    out_h, out_w = 1, 1

    attrs = kwargs
    attrs.update({
        'padding': [[0, 0], [0, 0], [0, 0], [0, 0]],
        'insize': insize,
        'outsize': [out_h, out_w],
        'data_layout': layout,
        'strides': strides,
        'kernel_size': pool_size,
        'pool_type': pool_type,
        # 'channels': [channels, channels]
    })
    out_shape = TensorShape([batches, channels, out_h, out_w] if layout ==
                            'NCHW' else [batches, out_h, out_w, channels])

    X = XLayer()
    X = X._replace(name=op_name,
                   type=['Pooling'],
                   shapes=out_shape,
                   sizes=out_shape.get_size(),
                   attrs=attrs,
                   layer=[op_name],
                   tops=[],
                   bottoms=[input_layer.name],
                   targets=[])

    return X
Esempio n. 7
0
def global_pool2d(op_name: str, input_layer: XLayer, pool_type: str,
                  layout: str, **kwargs) -> XLayer:
    """
    Create a global pooling XLayer

    Arguments
    ---------
    op_name: str
        The name of this pooling layer operation
    pool_type: str
        Indicates which pooling operation to use (Max or Avg)
    layout: str
        The layout of the pooling layer input (`NCHW` or `NHWC`)
    input_layer: XLayer
        The input layer to this pooling layer
    """

    if pool_type not in ["Max", "Avg"]:
        raise NotImplementedError("Invalid pooling type: {}, can either be"
                                  " `Max` or `Avg`.".format(pool_type))

    # NCHW by design
    insize = [input_layer.shapes[2], input_layer.shapes[3]]
    batches, channels = input_layer.shapes[0], input_layer.shapes[1]

    strides = [1, 1]
    padding = [0, 0]
    pool_size = insize

    out_h, out_w = 1, 1

    attrs = kwargs
    attrs.update({
        "padding": [[0, 0], [0, 0], [0, 0], [0, 0]],
        "insize": insize,
        "outsize": [out_h, out_w],
        "data_layout": layout,
        "strides": strides,
        "kernel_size": pool_size,
        "pool_type": pool_type,
    })
    out_shape = TensorShape([batches, channels, out_h, out_w] if layout ==
                            "NCHW" else [batches, out_h, out_w, channels])

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=["Pooling"],
        shapes=out_shape,
        sizes=out_shape.get_size(),
        attrs=attrs,
        layer=[op_name],
        tops=[],
        bottoms=[input_layer.name],
        targets=[],
    )
    return X
Esempio n. 8
0
def yolo_reorg(op_name, input_layer, stride, layout, **kwargs):
    # type: (str, XLayer, int, str) -> XLayer
    """
    Shuffle and shape transform input data based on stride

    TODO: example

    Arguments
    ---------
    op_name: str
        The name of this elementwise addition operation
    stride: int
        The stride to be used for reorganization
    input_layer: XLayer
        The input layer
    """

    if layout != 'NCHW':
        raise NotImplementedError("YoloReorg is only supported for NCHW data"
                                  " layout")

    attrs = kwargs
    attrs.update({'stride': stride, 'layout': layout})

    in_shape = input_layer.shapes[:]

    if in_shape[2] % stride != 0:
        raise ValueError("Invalid YoloReorg operation: height dimension size:"
                         " {} should be divisible by: {}".format(
                             in_shape[2], stride))
    if in_shape[3] % stride != 0:
        raise ValueError("Invalid YoloReorg operation: height dimension size:"
                         " {} should be divisible by: {}".format(
                             in_shape[3], stride))

    shape = TensorShape([
        in_shape[0], in_shape[1] * stride * stride,
        int(in_shape[2] / stride),
        int(in_shape[3] / stride)
    ])

    X = XLayer()
    X = X._replace(name=op_name,
                   type=['YoloReorg'],
                   shapes=shape,
                   sizes=shape.get_size(),
                   layer=[op_name],
                   tops=[],
                   bottoms=[input_layer.name],
                   attrs=attrs,
                   targets=[])

    return X
Esempio n. 9
0
def cvx(op_name, input_layer, cvx_key, shape, dtype, **kwargs):
    # type: (str, XLayer, str, List[int], str) -> XLayer
    """
    Create a cvx input XLayer

    Arguments
    ---------
    op_name: str
        The name of this input layer
    cvx_key: str
        The cvx key to be used for preprocessing
    shape: List[int]
        The input shape
    layout: str
        The data layout (`NCHW` and `NHWC` supported for now)
    dtype: str (optional, default None)
        The input data type
    """

    bottoms = [input_layer.name]

    shape[0] = -1
    shape = TensorShape(shape)

    # if cvx_key.split("__")[-1].split("-")[0] == 'transpose':
    #     axes_str = cvx_key.split("__")[-1].split("-")[1]
    #     t_axes = [0] + [int(axis) + 1 for axis in axes_str.split(",")]
    #     shape = [shape[axis] for axis in t_axes]

    attrs = kwargs
    attrs.update({'dtype': dtype, 'cvx_key': cvx_key})

    X = defaultXLayer()
    X = X._replace(name=op_name,
                   type=['Cvx'],
                   shapes=shape,
                   sizes=shape.get_size(),
                   layer=[op_name],
                   bottoms=bottoms,
                   attrs=attrs)

    return X
def squeeze(op_name, input_layer, axis, **kwargs):
    # type: (str, XLayer, List[int]) -> XLayer
    """
    Create a Squeeze XLayer

    Arguments
    ---------
    op_name: str
        The name of this constant layer
    axis: List[int]
        The set of axes to squeeze
    input_layer: XLayer
        The input layer to this scaling layer
    """
    assert (isinstance(axis, list) or axis is None)

    bottoms = [input_layer.name]

    attrs = kwargs
    attrs.update({'axis': axis})
    in_shapes = input_layer.shapes[:]
    if axis is None:
        shape = TensorShape([dim for dim in in_shapes if dim != 1])
    else:
        shape = TensorShape(
            [dim for i, dim in enumerate(in_shapes) if i not in axis])

    X = XLayer()
    X = X._replace(name=op_name,
                   type=['Squeeze'],
                   shapes=shape,
                   sizes=shape.get_size(),
                   layer=[op_name],
                   tops=[],
                   bottoms=bottoms,
                   attrs=attrs,
                   targets=[])

    return X
def transpose(op_name, input_layer, axes, **kwargs):
    # type: (str, XLayer, List[int]) -> XLayer
    """
    Create a Transpose XLayer

    Arguments
    ---------
    op_name: str
        The name of this constant layer
    axes: List[int]
        The axes defining how to do the transpose
    input_layer: XLayer
        The input layer to this scaling layer
    """
    if 'Constant' in input_layer.type:
        # precompute
        X = input_layer._replace(
            data=np.transpose(input_layer.data, tuple(axes)))
    else:
        bottoms = [input_layer.name]

        new_shape = TensorShape([input_layer.shapes[i] for i in axes])

        attrs = kwargs
        attrs.update({'axes': axes})

        X = XLayer()
        X = X._replace(name=op_name,
                       type=['Transpose'],
                       shapes=new_shape,
                       sizes=new_shape.get_size(),
                       layer=[op_name],
                       tops=[],
                       bottoms=bottoms,
                       attrs=attrs,
                       targets=[])

    return X
Esempio n. 12
0
def transpose(op_name: str,
              input_layer: XLayer,
              axes: List[int],
              internal=0,
              **kwargs):
    """
    Create a Transpose XLayer

    Arguments
    ---------
    op_name: str
        The name of this constant layer
    axes: List[int]
        The axes defining how to do the transpose
    input_layer: XLayer
        The input layer to this scaling layer
    """
    bottoms = [input_layer.name]

    new_shape = TensorShape([input_layer.shapes[i] for i in axes])

    attrs = kwargs
    attrs.update({'axes': axes})

    X = XLayer()
    X = X._replace(name=op_name,
                   type=['Transpose'],
                   shapes=new_shape,
                   sizes=new_shape.get_size(),
                   layer=[op_name],
                   tops=[],
                   bottoms=bottoms,
                   attrs=attrs,
                   internal=internal,
                   targets=[])
    return X
Esempio n. 13
0
def pool2d(op_name,
           input_layer,
           pool_type,
           pool_size,
           strides,
           padding,
           layout,
           ceil_mode=False,
           count_include_pad=False,
           **kwargs):
    # type: (str, XLayer, str, List[int], List[int], str, bool) -> XLayer
    """
    Create a pooling parameters layer

    Arguments
    ---------
    op_name: str
        The name of this pooling layer operation
    pool_type: str
        Indicates which pooling operation to use (Max or Avg)
    pool_size: List[int]
        The size of the pooling window
    strides: List[int]
        The pooling operation strides
    padding: List[int]
        The padding to be added before pooling
    layout: str
        The layout of the pooling layer input (`NCHW` or `NHWC`)
    ceil_mode: bool
        Whether to use ceiling or floor rounding while pooling
    count_include_pad: boolean
        Whether to include padding to compute average
        (only for average pooling)
    input_layer: XLayer
        The input layer to this pooling layer
    """
    if layout not in ['NCHW', 'NHWC']:
        raise ValueError("Unsupported layout: {}, supported layouts are"
                         "NCHW and NHWC".format(layout))

    if pool_type not in ['Max', 'Avg']:
        raise NotImplementedError("Invalid pooling type: {}, can either be"
                                  " `Max` or `Avg`.".format(pool_type))

    def valid(x, k, p1, p2, s):
        return math.floor((x + p1 + p2 - k) / s) + 1

    def full(x, k, p1, p2, s):
        return math.ceil((x + p1 + p2 - k) / s) + 1

    # TODO: this is very similar as for NNVM operators -> merge
    if len(padding) == 4:
        # top bottom left right = h_before h_after w_before w_after
        full_paddings = \
            [[0, 0], [0, 0], [padding[0], padding[2]],
             [padding[1], padding[3]]]
    elif len(padding) == 2:
        full_paddings = \
            [[0, 0], [0, 0], [padding[0], padding[0]],
             [padding[1], padding[1]]]
    elif len(padding) == 1:
        full_paddings = [[0, 0], [0, 0], [padding, padding],
                         [padding, padding]]
    else:
        raise ValueError(
            "Invalid padding size passed by Relay operator, "
            " Sizes of 1, 2 and 4 are supported but not {}".format(
                len(padding)))

    # if full_paddings[2][0] != full_paddings[2][1] \
    #         or full_paddings[3][0] != full_paddings[3][1]:
    #     warnings.warn("[WARNING] Asymmetric padding for layer: {}. "
    #                   "Padding will be symmetrized for running on FPGA."
    #                   .format(op_name))

    padding = [
        min(full_paddings[2][0], full_paddings[2][1]),
        min(full_paddings[3][0], full_paddings[3][1])
    ]

    if layout == 'NCHW':
        insize = [input_layer.shapes[2], input_layer.shapes[3]]
        batches, channels = input_layer.shapes[0], input_layer.shapes[1]
    else:
        # NHWC
        insize = [input_layer.shapes[1], input_layer.shapes[2]]
        batches, channels = input_layer.shapes[0], input_layer.shapes[3]
        full_paddings = [full_paddings[i] for i in [0, 2, 3, 1]]

    outsize = []
    calc_func = full if ceil_mode else valid

    outsize = [
        calc_func(insize[1], pool_size[1], full_paddings[3][0],
                  full_paddings[3][1], strides[1]),
        calc_func(insize[0], pool_size[0], full_paddings[2][0],
                  full_paddings[2][1], strides[0])
    ]

    attrs = kwargs
    attrs.update({
        'type': pool_type,
        'padding': full_paddings,
        'strides': strides,  # HW
        'kernel_size': pool_size,  # HW
        'insize': insize,  # HW
        'outsize': [outsize[1], outsize[0]],  # HW
        'data_layout': layout,
        'pool_type': pool_type,
        # 'channels': [channels, channels]
    })
    if pool_type == 'Avg':
        attrs['count_include_pad'] = count_include_pad

    out_h, out_w = outsize[1], outsize[0]
    out_shape = TensorShape([batches, channels, out_h, out_w] if layout ==
                            'NCHW' else [batches, out_h, out_w, channels])

    X = XLayer()
    X = X._replace(name=op_name,
                   type=['Pooling'],
                   shapes=out_shape,
                   sizes=out_shape.get_size(),
                   attrs=attrs,
                   layer=[op_name],
                   tops=[],
                   bottoms=[input_layer.name],
                   targets=[])

    return X
Esempio n. 14
0
    def test_get_size(self):

        ts = TensorShape(IntVector(lpx.IntVector([-1, 2, 3, 4])))

        assert ts.get_size() == [24]
Esempio n. 15
0
def conv2d_transpose(op_name, input_layer, weights_layer, kernel_size, strides,
                     padding_hw, dilation, groups, channels, data_layout,
                     kernel_layout, **kwargs):
    # type: (str, XLayer, XLayer, List[int], List[int], List[int],
    #        List[int], int, int, str, str) -> XLayer
    """
    Create a conv2d parameters layer

    Arguments
    ---------
    op_name: str
        The name of this conv2d layer operation
    kernel_size: List[int]
        The size of the kernel windows
    strides: List[int]
        The convolution operation strides
    padding: List[int]
        The padding to be added before convolution operation, can be length
        2 or 4: [pad_h, pad_w] or [pad_h_top, pad_h_bottom, pad_w_left,
        pad_w_right]
    dilation: List[int]
        The dilation to be used for this convolution operation
    groups: int
        Number of groups for grouped convolution.
    channels: int (or None!)
        Number of output channels for this convolution.
    data_layout: str
        The layout of the conv2d layer input (`NCHW` or `NHWC`)
    kernel_layout: str
        The layout of the conv2d layer kernel (`OIHW` or `HWIO`)
    input_layer: XLayer
        The input layer to this conv2d layer
    weights_layer: XLayer
        The weights input layer to this conv2d layer
    """
    bottoms = [input_layer.name]

    logger.debug("-- Conv2DTranspose Kernel layout: {}".format(kernel_layout))
    logger.debug("-- Conv2DTranspose W shape: {}".format(
        weights_layer.data[0].shape))

    # Convert kernel to 'OIHW' layout
    if kernel_layout == 'OIHW':
        W = weights_layer.data[0]
    elif kernel_layout == 'HWIO':
        W = np.transpose(weights_layer.data[0], (3, 2, 0, 1))
    elif kernel_layout == 'IOHW':
        W = np.transpose(weights_layer.data[0], (1, 0, 2, 3))
    else:
        raise NotImplementedError("Unsupported kernel layout: {} for"
                                  " convolution: {}, should be one of `OIHW`"
                                  ", `HWIO` or `IOHW`.".format(
                                      kernel_layout, op_name))

    assert len(padding_hw) in [2, 4]
    if len(padding_hw) == 4:
        pad_ht, pad_hb, pad_wl, pad_wr = padding_hw
    elif len(padding_hw) == 2:
        pad_ht, pad_wl = padding_hw
        pad_hb, pad_wr = padding_hw
    else:
        raise ValueError("'padding_hw' argument should be a list of length 2"
                         " but got: {}".format(len(padding_hw)))

    # W is now in OIHW shape
    in_ch, out_ch = W.shape[1], W.shape[0]
    logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch))
    logger.debug("-- channels: {}".format(channels))

    assert channels is None or out_ch == channels

    B = np.zeros([out_ch], dtype=np.float32)
    data = ConvData(W, B)

    # Shape
    # Input layer is always in NCHW by design
    insize = [input_layer.shapes[2], input_layer.shapes[3]]
    batches = input_layer.shapes[0]
    logger.debug("{} {}".format(input_layer.shapes, in_ch))
    assert input_layer.shapes[1] == in_ch

    if padding_hw[0] == (kernel_size[0] - strides[0]) / 2 and\
            padding_hw[1] == (kernel_size[1] - strides[1]) / 2:
        padding_type = 'SAME'
    elif padding_hw[0] == 0 and padding_hw[1] == 0:
        padding_type = 'VALID'
    else:
        raise NotImplementedError(
            "Unsupported padding for Conv2DTranspose"
            " Only Tensorflow padding 'SAME' and 'VALID'"
            " are supported but got: {} which does not"
            " translate to 'SAME' == [{}, {}] or 'VALID'"
            " == [0, 0]".format(padding_hw, (kernel_size[0] - strides[0]) / 2,
                                (kernel_size[1] - strides[1]) / 2))

    if padding_type == 'SAME':
        out_h = insize[0] * strides[0]
        out_w = insize[1] * strides[1]
    elif padding_type == 'VALID':
        out_h = (insize[0] - 1) * strides[0] + kernel_size[0]
        out_w = (insize[1] - 1) * strides[1] + kernel_size[1]

    out_shape = TensorShape([batches, out_ch, out_h, out_w])

    padding = [[0, 0], [0, 0], [pad_ht, pad_hb], [pad_wl, pad_wr]]
    padding = [padding['NCHW'.index(i)] for i in data_layout]

    attrs = kwargs
    attrs.update({
        'padding': padding,
        'data_layout': data_layout,
        'kernel_layout': 'OIHW',
        'shape': out_shape.tolist(),
        'kernel_size': kernel_size,
        'strides': strides,
        'groups': groups,
        'dilation': dilation,
        'channels': [in_ch, out_ch]
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=['Conv2DTranspose'],
        shapes=out_shape,
        sizes=out_shape.get_size(),  # [int(out_ch * out_h * out_w)],
        data=data,
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[])

    return X
Esempio n. 16
0
def conv2d(op_name, input_layer, weights_layer, kernel_size, strides,
           padding_hw, dilation, groups, channels, data_layout, kernel_layout,
           **kwargs):
    # type: (str, List[int], List[int], List[int], List[int], int, int, str,
    #   str, XLayer, XLayer) -> XLayer
    """
    Create a conv2d XLayer

    Arguments
    ---------
    op_name: str
        The name of this conv2d layer operation
    kernel_size: List[int]
        The size of the kernel windows
    strides: List[int]
        The convolution operation strides
    padding_hw: List[int]
        The padding to be added before convolution operation, can be length
        2 or 4: [pad_h, pad_w] or [pad_h_top, pad_h_bottom, pad_w_left,
        pad_w_right]
    dilation: List[int]
        The dilation to be used for this convolution operation
    groups: int
        Number of groups for grouped convolution.
    channels: int (or None!)
        Number of output channels for this convolution.
    data_layout: str
        The layout of the conv2d layer input (`NCHW` or `NHWC`)
    kernel_layout: str
        The layout of the conv2d layer kernel (`OIHW` or `HWIO`)
    input_layer: XLayer
        The input layer to this conv2d layer
    weights_layer: XLayer
        The weights input layer to this conv2d layer
    """

    assert 'Constant' in weights_layer.type

    assert len(kernel_size) == 2
    assert len(dilation) == 2
    assert len(strides) == 2
    assert len(padding_hw) in [2, 4]

    bottoms = [input_layer.name]

    logger.debug("-- Conv2D Kernel layout: {}".format(kernel_layout))
    logger.debug("-- Conv2D W shape: {}".format(weights_layer.data[0].shape))

    if len(kernel_layout) != 4 or \
            sorted(kernel_layout) != ['H', 'I', 'O', 'W']:
        raise NotImplementedError("Unsupported kernel layout: {} for"
                                  " convolution: {}, should be a permutation"
                                  " of `OIHW`".format(kernel_layout, op_name))
    transpose_axes = tuple([kernel_layout.index(e) for e in 'OIHW'])
    W = np.transpose(weights_layer.data[0], transpose_axes)

    if len(padding_hw) == 4:
        pad_ht, pad_hb, pad_wl, pad_wr = padding_hw
    elif len(padding_hw) == 2:
        pad_ht, pad_wl = padding_hw
        pad_hb, pad_wr = padding_hw
    else:
        raise ValueError("'padding_hw' argument should be a list of length 2"
                         " but got: {}".format(len(padding_hw)))

    # W is now in OIHW shape
    in_ch, out_ch = W.shape[1], W.shape[0]
    logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch))
    logger.debug("-- channels: {}".format(channels))

    assert (channels is None or out_ch == channels)

    B = np.zeros([out_ch], dtype=np.float32)
    data = ConvData(W, B)

    # input layer is always in NCHW by design
    insize = [input_layer.shapes[2], input_layer.shapes[3]]
    batches = input_layer.shapes[0]
    logger.debug("-- in shape: {}".format(input_layer.shapes))
    assert (input_layer.shapes[1] == in_ch * groups)

    logger.debug("-- padding (t,b,l,r): {}".format(
        (pad_ht, pad_hb, pad_wl, pad_wr)))

    # TODO dilation
    out_h = \
        int((insize[0] + pad_ht + pad_hb - kernel_size[0]) / strides[0] + 1)
    out_w = \
        int((insize[1] + pad_wl + pad_wr - kernel_size[1]) / strides[1] + 1)

    out_shape = TensorShape([batches, out_ch, out_h, out_w])

    padding_hh = [pad_ht, pad_hb]
    padding_ww = [pad_wl, pad_wr]

    if data_layout == 'NCHW':
        granular_padding = [[0, 0], [0, 0], padding_hh, padding_ww]
    else:
        granular_padding = [[0, 0], padding_hh, padding_ww, [0, 0]]

    logger.debug("-- out shape: {}".format(out_shape))

    attrs = kwargs
    attrs.update({
        'padding': granular_padding,
        'data_layout': data_layout,
        'kernel_layout': 'OIHW',
        'shape': out_shape.tolist(),
        'kernel_size': kernel_size,
        'strides': strides,
        'groups': groups,
        'dilation': dilation,
        'channels': [in_ch, out_ch]
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=['Convolution'],
        shapes=out_shape,
        sizes=out_shape.get_size(),  # [int(out_ch * out_h * out_w)],
        data=data,
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[])

    return X
Esempio n. 17
0
def conv2d_transpose(op_name: str,
                     input_layer: XLayer,
                     weights_layer: XLayer,
                     kernel_size: List[int],
                     strides: List[int] = [1, 1],
                     padding_hw: List[int] = [0, 0, 0, 0],
                     dilation: List[int] = [1, 1],
                     groups: int = 1,
                     channels: int = None,
                     data_layout: str = "NCHW",
                     kernel_layout: str = "OIHW",
                     target_kernel_layout: str = "OIHW",
                     **kwargs) -> XLayer:
    """
    Create a Conv2DTranspose XLayer

    Arguments
    ---------
    op_name: str
        The name of this conv2d layer operation
    input_layer: XLayer
        The input layer to this conv2d layer
    weights_layer: XLayer
        The weights input layer to this conv2d layer
    kernel_size: List[int]
        The size of the kernel windows
    strides: List[int]
        The convolution operation strides
    padding: List[int]
        The padding to be added before convolution operation, can be length
        2 or 4: [pad_h, pad_w] or [pad_h_top, pad_h_bottom, pad_w_left,
        pad_w_right]
    dilation: List[int]
        The dilation to be used for this convolution operation
    groups: int
        Number of groups for grouped convolution.
    channels: int (or None!)
        Number of output channels for this convolution.
    data_layout: str
        The layout of the conv2d layer input (`NCHW` or `NHWC`)
    kernel_layout: str
        The layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`)
    target_kernel_layout: str
        The target layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`)
    """
    bottoms = [input_layer.name]

    layout_idx = tuple([data_layout.index(e) for e in "NCHW"])
    layout_idx_transpose = tuple(["NCHW".index(e) for e in data_layout])
    B_idx, C_idx, H_idx, W_idx = layout_idx

    logger.debug("-- Conv2DTranspose Kernel layout: {}".format(kernel_layout))
    logger.debug("-- Conv2DTranspose W shape: {}".format(
        weights_layer.data[0].shape))

    if len(kernel_layout) != 4 or sorted(kernel_layout) != [
            "H", "I", "O", "W"
    ]:
        raise NotImplementedError("Unsupported kernel layout: {} for"
                                  " convolution: {}, should be a permutation"
                                  " of `OIHW`".format(kernel_layout, op_name))
    transpose_axes = tuple(
        [kernel_layout.index(e) for e in target_kernel_layout])
    W = np.transpose(weights_layer.data[0], transpose_axes)
    kernel_layout_idx = tuple([target_kernel_layout.index(e) for e in "OIHW"])
    kO_idx, kI_idx, kH_idx, kW_idx = kernel_layout_idx

    assert len(padding_hw) in [2, 4]
    if len(padding_hw) == 4:
        pad_ht, pad_hb, pad_wl, pad_wr = padding_hw
    elif len(padding_hw) == 2:
        pad_ht, pad_wl = padding_hw
        pad_hb, pad_wr = padding_hw
    else:
        raise ValueError("'padding_hw' argument should be a list of length 2"
                         " but got: {}".format(len(padding_hw)))

    in_ch, out_ch = W.shape[kI_idx] * groups, W.shape[kO_idx]
    logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch))
    logger.debug("-- channels: {}".format(channels))

    assert channels is None or out_ch == channels
    channels = out_ch if channels is None else channels

    B = np.zeros([out_ch], dtype=np.float32)
    data = ConvData(W, B)

    # Shape
    # Input layer is always in NCHW by design
    insize = [input_layer.shapes[2], input_layer.shapes[3]]
    batches = input_layer.shapes[0]
    logger.debug("{} {}".format(input_layer.shapes, in_ch))
    assert input_layer.shapes[C_idx] == in_ch

    if ((pad_ht + pad_hb) == (kernel_size[0] - strides[0])
            and abs(pad_ht - pad_hb) <= 1
            and (pad_wl + pad_wr) == (kernel_size[1] - strides[1])
            and abs(pad_wl - pad_wr) <= 1):
        padding_type = "SAME"
    elif pad_ht == 0 and pad_wl == 0:
        padding_type = "VALID"
    else:
        raise NotImplementedError(
            "Unsupported padding for Conv2DTranspose"
            " Only Tensorflow padding 'SAME' and 'VALID'"
            " are supported but got: {} which does not"
            " translate to 'SAME' == [pad_ht + pad_hb = {}, pad_wl + pad_wr = {}] or 'VALID'"
            " == [0, 0]".format(
                (pad_ht, pad_hb, pad_wl, pad_wr),
                (kernel_size[0] - strides[0]),
                (kernel_size[1] - strides[1]),
            ))

    if padding_type == "SAME":
        out_h = insize[0] * strides[0]
        out_w = insize[1] * strides[1]
    elif padding_type == "VALID":
        out_h = (insize[0] - 1) * strides[0] + kernel_size[0]
        out_w = (insize[1] - 1) * strides[1] + kernel_size[1]

    out_shape = TensorShape([[batches, out_ch, out_h, out_w][i]
                             for i in layout_idx_transpose])

    padding = [[0, 0], [0, 0], [pad_ht, pad_hb], [pad_wl, pad_wr]]
    padding = [padding["NCHW".index(i)] for i in data_layout]

    attrs = kwargs
    attrs.update({
        "padding": padding,
        "data_layout": data_layout,
        "kernel_layout": "OIHW",
        "shape": out_shape.tolist(),
        "kernel_size": kernel_size,
        "strides": strides,
        "groups": groups,
        "dilation": dilation,
        "channels": [in_ch, out_ch],
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=["Conv2DTranspose"],
        shapes=out_shape,
        sizes=out_shape.get_size(),
        data=data,
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[],
    )
    return X
Esempio n. 18
0
def conv2d(op_name: str,
           input_layer: XLayer,
           weights_layer: XLayer,
           kernel_size: List[int],
           strides: List[int] = [1, 1],
           padding_hw: List[int] = [0, 0, 0, 0],
           dilation: List[int] = [1, 1],
           groups: int = 1,
           channels: int = None,
           data_layout: str = "NCHW",
           kernel_layout: str = "OIHW",
           target_kernel_layout: str = "OIHW",
           **kwargs) -> XLayer:
    """
    Create a conv2d XLayer

    Arguments
    ---------
    op_name: str
        The name of this conv2d layer operation
    input_layer: XLayer
        The input layer to this conv2d layer
    weights_layer: XLayer
        The weights input layer to this conv2d layer
    kernel_size: List[int]
        The size of the kernel windows
    strides: List[int]
        The convolution operation strides
    padding_hw: List[int]
        The padding to be added before convolution operation, can be length
        2 or 4: [pad_h, pad_w] or [pad_h_top, pad_h_bottom, pad_w_left,
        pad_w_right]
    dilation: List[int]
        The dilation to be used for this convolution operation
    groups: int
        Number of groups for grouped convolution.
    channels: int (or None!)
        Number of output channels for this convolution.
    data_layout: str
        The layout of the conv2d layer input (`NCHW` or `NHWC`)
    kernel_layout: str
        The layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`)
    target_kernel_layout: str
        The target layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`)
    """

    assert "Constant" in weights_layer.type

    assert len(kernel_size) == 2
    assert len(dilation) == 2
    assert len(strides) == 2
    assert len(padding_hw) in [2, 4]

    layout_idx = tuple([data_layout.index(e) for e in "NCHW"])
    layout_idx_transpose = tuple(["NCHW".index(e) for e in data_layout])
    B_idx, C_idx, H_idx, W_idx = layout_idx

    bottoms = [input_layer.name]

    logger.debug("-- Conv2D Kernel layout: {}".format(kernel_layout))
    logger.debug("-- Conv2D W shape: {}".format(weights_layer.data[0].shape))

    if len(kernel_layout) != 4 or sorted(kernel_layout) != [
            "H", "I", "O", "W"
    ]:
        raise NotImplementedError("Unsupported kernel layout: {} for"
                                  " convolution: {}, should be a permutation"
                                  " of `OIHW`".format(kernel_layout, op_name))
    transpose_axes = tuple(
        [kernel_layout.index(e) for e in target_kernel_layout])
    W = np.transpose(weights_layer.data[0], transpose_axes)
    kernel_layout_idx = tuple([target_kernel_layout.index(e) for e in "OIHW"])
    kO_idx, kI_idx, kH_idx, kW_idx = kernel_layout_idx

    if len(padding_hw) == 4:
        pad_ht, pad_hb, pad_wl, pad_wr = padding_hw
    elif len(padding_hw) == 2:
        pad_ht, pad_wl = padding_hw
        pad_hb, pad_wr = padding_hw
    else:
        raise ValueError("'padding_hw' argument should be a list of length 2"
                         " but got: {}".format(len(padding_hw)))

    in_ch, out_ch = W.shape[kI_idx] * groups, W.shape[kO_idx]
    logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch))
    logger.debug("-- channels: {}".format(channels))

    assert channels is None or out_ch == channels
    channels = out_ch if channels is None else channels

    B = np.zeros([out_ch], dtype=np.float32)
    data = ConvData(W, B)

    # input layer is always in NCHW by design
    insize = [input_layer.shapes[H_idx], input_layer.shapes[W_idx]]
    batches = input_layer.shapes[0]
    logger.debug("-- in shape: {}".format(input_layer.shapes))
    logger.debug("-- padding (t,b,l,r): {}".format(
        (pad_ht, pad_hb, pad_wl, pad_wr)))

    out_h = int((insize[0] + pad_ht + pad_hb - dilation[0] *
                 (kernel_size[0] - 1) - 1) / strides[0] + 1)
    out_w = int((insize[1] + pad_wl + pad_wr - dilation[1] *
                 (kernel_size[1] - 1) - 1) / strides[1] + 1)

    out_shape = TensorShape([[batches, out_ch, out_h, out_w][i]
                             for i in layout_idx_transpose])

    padding_hh = [pad_ht, pad_hb]
    padding_ww = [pad_wl, pad_wr]

    if data_layout == "NCHW":
        granular_padding = [[0, 0], [0, 0], padding_hh, padding_ww]
    else:
        granular_padding = [[0, 0], padding_hh, padding_ww, [0, 0]]

    logger.debug("-- out shape: {}".format(out_shape))

    attrs = kwargs
    attrs.update({
        "padding": granular_padding,
        "data_layout": data_layout,
        "kernel_layout": target_kernel_layout,
        "shape": out_shape.tolist(),
        "kernel_size": kernel_size,
        "strides": strides,
        "groups": groups,
        "dilation": dilation,
        "channels": [in_ch, out_ch],
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=["Convolution"],
        shapes=out_shape,
        sizes=out_shape.get_size(),
        data=data,
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[],
    )

    return X