Esempio n. 1
0
    def test_to_list(self):

        ts = TensorShape(IntVector(lpx.IntVector([1, 2, 3, 4])))

        assert ts == [1, 2, 3, 4]
        lst = ts.tolist()
        lst2 = list(ts)

        ts[0] = -1
        assert ts == [-1, 2, 3, 4]
        assert lst == [1, 2, 3, 4]
        assert lst2 == [1, 2, 3, 4]
def relay_op(op_name: str, expr: Expr, in_xlayers: List[XLayer]):
    """Insert generic RelayOp operator"""

    logger.debug("-- op_name: {}".format(op_name))
    logger.debug("-- expr: {}".format(expr.op))

    try:
        ty = expr.checked_type
    except ValueError as e:
        # TODO, this is not correct
        if expr.type_args and len(expr.type_args) > 0:
            ty = expr.type_args[0]
        else:
            raise e
        
    if isinstance(ty, relay.ty.TensorType):
        relay_shape = TensorShape([int(s.value) for s in list(ty.shape)])
        dtype = str(ty.dtype)
    else:
        relay_shape = TupleShape(
            [TensorShape([int(i) for i in list(t_ty.shape)])
             for t_ty in ty.fields])
        dtype = [str(t_ty.dtype) for t_ty in ty.fields]

    # TODO
    # relay_shape.set_value(axis=0, value=-1)

    attrs = {}
    for attr in dir(expr.attrs):
        value = getattr(expr.attrs, attr)
        attrs[attr] = str(value)

    if 'dtype' in attrs:
        dtype = attrs['dtype']
        del attrs['dtype']

    X = xlf.get_xop_factory_func('RelayOp')(op_name, in_xlayers,
                                            relay_shape=relay_shape.tolist(),
                                            dtype=dtype,
                                            relay_id=[hash(expr)],
                                            **attrs)

    return X
def relay_op(op_name, expr, in_xlayers):
    # type: (str, tvm.relay.expr.Expr, List[XLayer]) -> XLayer
    """ Insert generic relay op operator """

    logger.debug("-- op_name: {}".format(op_name))
    logger.debug("-- expr: {}".format(expr.op))

    ty = expr.checked_type
    if isinstance(ty, relay.ty.TensorType):
        relay_shape = TensorShape([int(i) for i in list(ty.shape)])
        dtype = str(ty.dtype)
    else:
        relay_shape = TupleShape([
            TensorShape([int(i) for i in list(t_ty.shape)])
            for t_ty in ty.fields
        ])
        dtype = [str(t_ty.dtype) for t_ty in ty.fields]

    # TODO
    relay_shape.set_value(axis=0, value=-1)

    attrs = {}
    for attr in dir(expr.attrs):
        value = getattr(expr.attrs, attr)
        attrs[attr] = str(value)

    if 'dtype' in attrs:
        dtype = attrs['dtype']
        del attrs['dtype']

    X = xlf.get_xop_factory_func('RelayOp')(op_name,
                                            in_xlayers,
                                            relay_shape=relay_shape.tolist(),
                                            dtype=dtype,
                                            relay_id=[hash(expr)],
                                            **attrs)

    return X
Esempio n. 4
0
def conv2d_transpose(op_name, input_layer, weights_layer, kernel_size, strides,
                     padding_hw, dilation, groups, channels, data_layout,
                     kernel_layout, **kwargs):
    # type: (str, XLayer, XLayer, List[int], List[int], List[int],
    #        List[int], int, int, str, str) -> XLayer
    """
    Create a conv2d parameters layer

    Arguments
    ---------
    op_name: str
        The name of this conv2d layer operation
    kernel_size: List[int]
        The size of the kernel windows
    strides: List[int]
        The convolution operation strides
    padding: List[int]
        The padding to be added before convolution operation, can be length
        2 or 4: [pad_h, pad_w] or [pad_h_top, pad_h_bottom, pad_w_left,
        pad_w_right]
    dilation: List[int]
        The dilation to be used for this convolution operation
    groups: int
        Number of groups for grouped convolution.
    channels: int (or None!)
        Number of output channels for this convolution.
    data_layout: str
        The layout of the conv2d layer input (`NCHW` or `NHWC`)
    kernel_layout: str
        The layout of the conv2d layer kernel (`OIHW` or `HWIO`)
    input_layer: XLayer
        The input layer to this conv2d layer
    weights_layer: XLayer
        The weights input layer to this conv2d layer
    """
    bottoms = [input_layer.name]

    logger.debug("-- Conv2DTranspose Kernel layout: {}".format(kernel_layout))
    logger.debug("-- Conv2DTranspose W shape: {}".format(
        weights_layer.data[0].shape))

    # Convert kernel to 'OIHW' layout
    if kernel_layout == 'OIHW':
        W = weights_layer.data[0]
    elif kernel_layout == 'HWIO':
        W = np.transpose(weights_layer.data[0], (3, 2, 0, 1))
    elif kernel_layout == 'IOHW':
        W = np.transpose(weights_layer.data[0], (1, 0, 2, 3))
    else:
        raise NotImplementedError("Unsupported kernel layout: {} for"
                                  " convolution: {}, should be one of `OIHW`"
                                  ", `HWIO` or `IOHW`.".format(
                                      kernel_layout, op_name))

    assert len(padding_hw) in [2, 4]
    if len(padding_hw) == 4:
        pad_ht, pad_hb, pad_wl, pad_wr = padding_hw
    elif len(padding_hw) == 2:
        pad_ht, pad_wl = padding_hw
        pad_hb, pad_wr = padding_hw
    else:
        raise ValueError("'padding_hw' argument should be a list of length 2"
                         " but got: {}".format(len(padding_hw)))

    # W is now in OIHW shape
    in_ch, out_ch = W.shape[1], W.shape[0]
    logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch))
    logger.debug("-- channels: {}".format(channels))

    assert channels is None or out_ch == channels

    B = np.zeros([out_ch], dtype=np.float32)
    data = ConvData(W, B)

    # Shape
    # Input layer is always in NCHW by design
    insize = [input_layer.shapes[2], input_layer.shapes[3]]
    batches = input_layer.shapes[0]
    logger.debug("{} {}".format(input_layer.shapes, in_ch))
    assert input_layer.shapes[1] == in_ch

    if padding_hw[0] == (kernel_size[0] - strides[0]) / 2 and\
            padding_hw[1] == (kernel_size[1] - strides[1]) / 2:
        padding_type = 'SAME'
    elif padding_hw[0] == 0 and padding_hw[1] == 0:
        padding_type = 'VALID'
    else:
        raise NotImplementedError(
            "Unsupported padding for Conv2DTranspose"
            " Only Tensorflow padding 'SAME' and 'VALID'"
            " are supported but got: {} which does not"
            " translate to 'SAME' == [{}, {}] or 'VALID'"
            " == [0, 0]".format(padding_hw, (kernel_size[0] - strides[0]) / 2,
                                (kernel_size[1] - strides[1]) / 2))

    if padding_type == 'SAME':
        out_h = insize[0] * strides[0]
        out_w = insize[1] * strides[1]
    elif padding_type == 'VALID':
        out_h = (insize[0] - 1) * strides[0] + kernel_size[0]
        out_w = (insize[1] - 1) * strides[1] + kernel_size[1]

    out_shape = TensorShape([batches, out_ch, out_h, out_w])

    padding = [[0, 0], [0, 0], [pad_ht, pad_hb], [pad_wl, pad_wr]]
    padding = [padding['NCHW'.index(i)] for i in data_layout]

    attrs = kwargs
    attrs.update({
        'padding': padding,
        'data_layout': data_layout,
        'kernel_layout': 'OIHW',
        'shape': out_shape.tolist(),
        'kernel_size': kernel_size,
        'strides': strides,
        'groups': groups,
        'dilation': dilation,
        'channels': [in_ch, out_ch]
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=['Conv2DTranspose'],
        shapes=out_shape,
        sizes=out_shape.get_size(),  # [int(out_ch * out_h * out_w)],
        data=data,
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[])

    return X
Esempio n. 5
0
def conv2d(op_name, input_layer, weights_layer, kernel_size, strides,
           padding_hw, dilation, groups, channels, data_layout, kernel_layout,
           **kwargs):
    # type: (str, List[int], List[int], List[int], List[int], int, int, str,
    #   str, XLayer, XLayer) -> XLayer
    """
    Create a conv2d XLayer

    Arguments
    ---------
    op_name: str
        The name of this conv2d layer operation
    kernel_size: List[int]
        The size of the kernel windows
    strides: List[int]
        The convolution operation strides
    padding_hw: List[int]
        The padding to be added before convolution operation, can be length
        2 or 4: [pad_h, pad_w] or [pad_h_top, pad_h_bottom, pad_w_left,
        pad_w_right]
    dilation: List[int]
        The dilation to be used for this convolution operation
    groups: int
        Number of groups for grouped convolution.
    channels: int (or None!)
        Number of output channels for this convolution.
    data_layout: str
        The layout of the conv2d layer input (`NCHW` or `NHWC`)
    kernel_layout: str
        The layout of the conv2d layer kernel (`OIHW` or `HWIO`)
    input_layer: XLayer
        The input layer to this conv2d layer
    weights_layer: XLayer
        The weights input layer to this conv2d layer
    """

    assert 'Constant' in weights_layer.type

    assert len(kernel_size) == 2
    assert len(dilation) == 2
    assert len(strides) == 2
    assert len(padding_hw) in [2, 4]

    bottoms = [input_layer.name]

    logger.debug("-- Conv2D Kernel layout: {}".format(kernel_layout))
    logger.debug("-- Conv2D W shape: {}".format(weights_layer.data[0].shape))

    if len(kernel_layout) != 4 or \
            sorted(kernel_layout) != ['H', 'I', 'O', 'W']:
        raise NotImplementedError("Unsupported kernel layout: {} for"
                                  " convolution: {}, should be a permutation"
                                  " of `OIHW`".format(kernel_layout, op_name))
    transpose_axes = tuple([kernel_layout.index(e) for e in 'OIHW'])
    W = np.transpose(weights_layer.data[0], transpose_axes)

    if len(padding_hw) == 4:
        pad_ht, pad_hb, pad_wl, pad_wr = padding_hw
    elif len(padding_hw) == 2:
        pad_ht, pad_wl = padding_hw
        pad_hb, pad_wr = padding_hw
    else:
        raise ValueError("'padding_hw' argument should be a list of length 2"
                         " but got: {}".format(len(padding_hw)))

    # W is now in OIHW shape
    in_ch, out_ch = W.shape[1], W.shape[0]
    logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch))
    logger.debug("-- channels: {}".format(channels))

    assert (channels is None or out_ch == channels)

    B = np.zeros([out_ch], dtype=np.float32)
    data = ConvData(W, B)

    # input layer is always in NCHW by design
    insize = [input_layer.shapes[2], input_layer.shapes[3]]
    batches = input_layer.shapes[0]
    logger.debug("-- in shape: {}".format(input_layer.shapes))
    assert (input_layer.shapes[1] == in_ch * groups)

    logger.debug("-- padding (t,b,l,r): {}".format(
        (pad_ht, pad_hb, pad_wl, pad_wr)))

    # TODO dilation
    out_h = \
        int((insize[0] + pad_ht + pad_hb - kernel_size[0]) / strides[0] + 1)
    out_w = \
        int((insize[1] + pad_wl + pad_wr - kernel_size[1]) / strides[1] + 1)

    out_shape = TensorShape([batches, out_ch, out_h, out_w])

    padding_hh = [pad_ht, pad_hb]
    padding_ww = [pad_wl, pad_wr]

    if data_layout == 'NCHW':
        granular_padding = [[0, 0], [0, 0], padding_hh, padding_ww]
    else:
        granular_padding = [[0, 0], padding_hh, padding_ww, [0, 0]]

    logger.debug("-- out shape: {}".format(out_shape))

    attrs = kwargs
    attrs.update({
        'padding': granular_padding,
        'data_layout': data_layout,
        'kernel_layout': 'OIHW',
        'shape': out_shape.tolist(),
        'kernel_size': kernel_size,
        'strides': strides,
        'groups': groups,
        'dilation': dilation,
        'channels': [in_ch, out_ch]
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=['Convolution'],
        shapes=out_shape,
        sizes=out_shape.get_size(),  # [int(out_ch * out_h * out_w)],
        data=data,
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[])

    return X
Esempio n. 6
0
def conv2d_transpose(op_name: str,
                     input_layer: XLayer,
                     weights_layer: XLayer,
                     kernel_size: List[int],
                     strides: List[int] = [1, 1],
                     padding_hw: List[int] = [0, 0, 0, 0],
                     dilation: List[int] = [1, 1],
                     groups: int = 1,
                     channels: int = None,
                     data_layout: str = "NCHW",
                     kernel_layout: str = "OIHW",
                     target_kernel_layout: str = "OIHW",
                     **kwargs) -> XLayer:
    """
    Create a Conv2DTranspose XLayer

    Arguments
    ---------
    op_name: str
        The name of this conv2d layer operation
    input_layer: XLayer
        The input layer to this conv2d layer
    weights_layer: XLayer
        The weights input layer to this conv2d layer
    kernel_size: List[int]
        The size of the kernel windows
    strides: List[int]
        The convolution operation strides
    padding: List[int]
        The padding to be added before convolution operation, can be length
        2 or 4: [pad_h, pad_w] or [pad_h_top, pad_h_bottom, pad_w_left,
        pad_w_right]
    dilation: List[int]
        The dilation to be used for this convolution operation
    groups: int
        Number of groups for grouped convolution.
    channels: int (or None!)
        Number of output channels for this convolution.
    data_layout: str
        The layout of the conv2d layer input (`NCHW` or `NHWC`)
    kernel_layout: str
        The layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`)
    target_kernel_layout: str
        The target layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`)
    """
    bottoms = [input_layer.name]

    layout_idx = tuple([data_layout.index(e) for e in "NCHW"])
    layout_idx_transpose = tuple(["NCHW".index(e) for e in data_layout])
    B_idx, C_idx, H_idx, W_idx = layout_idx

    logger.debug("-- Conv2DTranspose Kernel layout: {}".format(kernel_layout))
    logger.debug("-- Conv2DTranspose W shape: {}".format(
        weights_layer.data[0].shape))

    if len(kernel_layout) != 4 or sorted(kernel_layout) != [
            "H", "I", "O", "W"
    ]:
        raise NotImplementedError("Unsupported kernel layout: {} for"
                                  " convolution: {}, should be a permutation"
                                  " of `OIHW`".format(kernel_layout, op_name))
    transpose_axes = tuple(
        [kernel_layout.index(e) for e in target_kernel_layout])
    W = np.transpose(weights_layer.data[0], transpose_axes)
    kernel_layout_idx = tuple([target_kernel_layout.index(e) for e in "OIHW"])
    kO_idx, kI_idx, kH_idx, kW_idx = kernel_layout_idx

    assert len(padding_hw) in [2, 4]
    if len(padding_hw) == 4:
        pad_ht, pad_hb, pad_wl, pad_wr = padding_hw
    elif len(padding_hw) == 2:
        pad_ht, pad_wl = padding_hw
        pad_hb, pad_wr = padding_hw
    else:
        raise ValueError("'padding_hw' argument should be a list of length 2"
                         " but got: {}".format(len(padding_hw)))

    in_ch, out_ch = W.shape[kI_idx] * groups, W.shape[kO_idx]
    logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch))
    logger.debug("-- channels: {}".format(channels))

    assert channels is None or out_ch == channels
    channels = out_ch if channels is None else channels

    B = np.zeros([out_ch], dtype=np.float32)
    data = ConvData(W, B)

    # Shape
    # Input layer is always in NCHW by design
    insize = [input_layer.shapes[2], input_layer.shapes[3]]
    batches = input_layer.shapes[0]
    logger.debug("{} {}".format(input_layer.shapes, in_ch))
    assert input_layer.shapes[C_idx] == in_ch

    if ((pad_ht + pad_hb) == (kernel_size[0] - strides[0])
            and abs(pad_ht - pad_hb) <= 1
            and (pad_wl + pad_wr) == (kernel_size[1] - strides[1])
            and abs(pad_wl - pad_wr) <= 1):
        padding_type = "SAME"
    elif pad_ht == 0 and pad_wl == 0:
        padding_type = "VALID"
    else:
        raise NotImplementedError(
            "Unsupported padding for Conv2DTranspose"
            " Only Tensorflow padding 'SAME' and 'VALID'"
            " are supported but got: {} which does not"
            " translate to 'SAME' == [pad_ht + pad_hb = {}, pad_wl + pad_wr = {}] or 'VALID'"
            " == [0, 0]".format(
                (pad_ht, pad_hb, pad_wl, pad_wr),
                (kernel_size[0] - strides[0]),
                (kernel_size[1] - strides[1]),
            ))

    if padding_type == "SAME":
        out_h = insize[0] * strides[0]
        out_w = insize[1] * strides[1]
    elif padding_type == "VALID":
        out_h = (insize[0] - 1) * strides[0] + kernel_size[0]
        out_w = (insize[1] - 1) * strides[1] + kernel_size[1]

    out_shape = TensorShape([[batches, out_ch, out_h, out_w][i]
                             for i in layout_idx_transpose])

    padding = [[0, 0], [0, 0], [pad_ht, pad_hb], [pad_wl, pad_wr]]
    padding = [padding["NCHW".index(i)] for i in data_layout]

    attrs = kwargs
    attrs.update({
        "padding": padding,
        "data_layout": data_layout,
        "kernel_layout": "OIHW",
        "shape": out_shape.tolist(),
        "kernel_size": kernel_size,
        "strides": strides,
        "groups": groups,
        "dilation": dilation,
        "channels": [in_ch, out_ch],
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=["Conv2DTranspose"],
        shapes=out_shape,
        sizes=out_shape.get_size(),
        data=data,
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[],
    )
    return X
Esempio n. 7
0
def conv2d(op_name: str,
           input_layer: XLayer,
           weights_layer: XLayer,
           kernel_size: List[int],
           strides: List[int] = [1, 1],
           padding_hw: List[int] = [0, 0, 0, 0],
           dilation: List[int] = [1, 1],
           groups: int = 1,
           channels: int = None,
           data_layout: str = "NCHW",
           kernel_layout: str = "OIHW",
           target_kernel_layout: str = "OIHW",
           **kwargs) -> XLayer:
    """
    Create a conv2d XLayer

    Arguments
    ---------
    op_name: str
        The name of this conv2d layer operation
    input_layer: XLayer
        The input layer to this conv2d layer
    weights_layer: XLayer
        The weights input layer to this conv2d layer
    kernel_size: List[int]
        The size of the kernel windows
    strides: List[int]
        The convolution operation strides
    padding_hw: List[int]
        The padding to be added before convolution operation, can be length
        2 or 4: [pad_h, pad_w] or [pad_h_top, pad_h_bottom, pad_w_left,
        pad_w_right]
    dilation: List[int]
        The dilation to be used for this convolution operation
    groups: int
        Number of groups for grouped convolution.
    channels: int (or None!)
        Number of output channels for this convolution.
    data_layout: str
        The layout of the conv2d layer input (`NCHW` or `NHWC`)
    kernel_layout: str
        The layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`)
    target_kernel_layout: str
        The target layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`)
    """

    assert "Constant" in weights_layer.type

    assert len(kernel_size) == 2
    assert len(dilation) == 2
    assert len(strides) == 2
    assert len(padding_hw) in [2, 4]

    layout_idx = tuple([data_layout.index(e) for e in "NCHW"])
    layout_idx_transpose = tuple(["NCHW".index(e) for e in data_layout])
    B_idx, C_idx, H_idx, W_idx = layout_idx

    bottoms = [input_layer.name]

    logger.debug("-- Conv2D Kernel layout: {}".format(kernel_layout))
    logger.debug("-- Conv2D W shape: {}".format(weights_layer.data[0].shape))

    if len(kernel_layout) != 4 or sorted(kernel_layout) != [
            "H", "I", "O", "W"
    ]:
        raise NotImplementedError("Unsupported kernel layout: {} for"
                                  " convolution: {}, should be a permutation"
                                  " of `OIHW`".format(kernel_layout, op_name))
    transpose_axes = tuple(
        [kernel_layout.index(e) for e in target_kernel_layout])
    W = np.transpose(weights_layer.data[0], transpose_axes)
    kernel_layout_idx = tuple([target_kernel_layout.index(e) for e in "OIHW"])
    kO_idx, kI_idx, kH_idx, kW_idx = kernel_layout_idx

    if len(padding_hw) == 4:
        pad_ht, pad_hb, pad_wl, pad_wr = padding_hw
    elif len(padding_hw) == 2:
        pad_ht, pad_wl = padding_hw
        pad_hb, pad_wr = padding_hw
    else:
        raise ValueError("'padding_hw' argument should be a list of length 2"
                         " but got: {}".format(len(padding_hw)))

    in_ch, out_ch = W.shape[kI_idx] * groups, W.shape[kO_idx]
    logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch))
    logger.debug("-- channels: {}".format(channels))

    assert channels is None or out_ch == channels
    channels = out_ch if channels is None else channels

    B = np.zeros([out_ch], dtype=np.float32)
    data = ConvData(W, B)

    # input layer is always in NCHW by design
    insize = [input_layer.shapes[H_idx], input_layer.shapes[W_idx]]
    batches = input_layer.shapes[0]
    logger.debug("-- in shape: {}".format(input_layer.shapes))
    logger.debug("-- padding (t,b,l,r): {}".format(
        (pad_ht, pad_hb, pad_wl, pad_wr)))

    out_h = int((insize[0] + pad_ht + pad_hb - dilation[0] *
                 (kernel_size[0] - 1) - 1) / strides[0] + 1)
    out_w = int((insize[1] + pad_wl + pad_wr - dilation[1] *
                 (kernel_size[1] - 1) - 1) / strides[1] + 1)

    out_shape = TensorShape([[batches, out_ch, out_h, out_w][i]
                             for i in layout_idx_transpose])

    padding_hh = [pad_ht, pad_hb]
    padding_ww = [pad_wl, pad_wr]

    if data_layout == "NCHW":
        granular_padding = [[0, 0], [0, 0], padding_hh, padding_ww]
    else:
        granular_padding = [[0, 0], padding_hh, padding_ww, [0, 0]]

    logger.debug("-- out shape: {}".format(out_shape))

    attrs = kwargs
    attrs.update({
        "padding": granular_padding,
        "data_layout": data_layout,
        "kernel_layout": target_kernel_layout,
        "shape": out_shape.tolist(),
        "kernel_size": kernel_size,
        "strides": strides,
        "groups": groups,
        "dilation": dilation,
        "channels": [in_ch, out_ch],
    })

    X = XLayer()
    X = X._replace(
        name=op_name,
        type=["Convolution"],
        shapes=out_shape,
        sizes=out_shape.get_size(),
        data=data,
        layer=[op_name],
        tops=[],
        bottoms=bottoms,
        attrs=attrs,
        targets=[],
    )

    return X