コード例 #1
0
def tuple(op_name, input_layers, **kwargs):
    # type: (str, int, List[XLayer]) -> XLayer
    """
    Create an tuple XLayer for grouping a list of input layers

    Arguments
    ---------
    input_layers: List[XLayer]
        The input layers to be grouped in a tuple data structure
    """
    bottoms = [input_layer.name for input_layer in input_layers]
    shapes = TupleShape([TensorShape(il.shapes[:]) for il in input_layers])

    X = XLayer()
    X = X._replace(name=op_name,
                   type=['Tuple'],
                   shapes=shapes,
                   sizes=shapes.get_size(),
                   layer=[op_name],
                   tops=[],
                   bottoms=bottoms,
                   attrs=kwargs,
                   targets=[])

    return X
コード例 #2
0
    def test_tuple_get_item(self):
        A = np.array([0.1, 0.05], dtype=np.float32)
        B = np.array([0.1, 0.05, 0.1], dtype=np.float32)

        layers = [
            TupleLayer(
                name='tuple',
                xtype='Tuple',
                shape=TupleShape([TensorShape([2]), TensorShape([3])]),
                dtype='float32',
                inputs=['input1', 'input2'],
                input_shapes=[TensorShape([2]), TensorShape([3])],
                data=[],
                attrs={},
                subgraph=None
            ),
            TupleGetItemLayer(
                name='tgi',
                xtype='TupleGetItem',
                shape=TensorShape([3]),
                dtype='float32',
                inputs=['tuple'],
                input_shapes=[TupleShape([TensorShape([2]), TensorShape([3])])],
                subgraph=None,
                data=[],
                attrs={
                    'index': 1
                }
            )
        ]

        tupl = layers[0].forward_exec([A, B])
        outpt = layers[1].forward_exec([tupl])

        np.testing.assert_array_equal(outpt, B)
コード例 #3
0
    def test_get_item_slice(self):

        ts = TupleShape([TensorShape([1]), TensorShape([2])])

        ts_slice = ts[:]

        assert len(ts_slice) == 2
        assert ts_slice == [[1], [2]]
        assert isinstance(ts_slice[0], TensorShape)
        assert isinstance(ts_slice[1], TensorShape)

        _shape = lpx.IntVector2D([lpx.IntVector([1]),
                                  lpx.IntVector([2])])
        ts = TupleShape(IntVector2D(_shape))

        ts_slice = ts[:]

        assert len(ts_slice) == 2
        assert ts_slice == [[1], [2]]
        assert isinstance(ts_slice[0], TensorShape)
        assert isinstance(ts_slice[1], TensorShape)

        ts[0:2] = [[-1, 2], [-1, 3]]

        assert ts == [[-1, 2], [-1, 3]]
コード例 #4
0
ファイル: test_xlayer.py プロジェクト: eghasemi89/pyxir
    def test_xlayer_shapes(self):

        # TensorShape
        X = XLayer(shapes=[-1, 2, 4, 4])

        assert X.shapes == [-1, 2, 4, 4]
        assert X.shapes == TensorShape([-1, 2, 4, 4])

        X.shapes[1] = 3
        assert X.shapes == [-1, 3, 4, 4]
        assert X.shapes == TensorShape([-1, 3, 4, 4])

        X.shapes = [-1, 3, 5, 5]
        assert X.shapes == [-1, 3, 5, 5]
        assert X.shapes == TensorShape([-1, 3, 5, 5])
        assert X.shapes.get_size() == [75]

        shapes2 = X.shapes._replace(5, 6)
        assert shapes2 == TensorShape([-1, 3, 6, 6])

        # TupleShape
        X = XLayer(shapes=[[-1, 2, 4, 4], [-1, 2, 3, 3]])

        assert X.shapes == [[-1, 2, 4, 4], [-1, 2, 3, 3]]
        assert X.shapes == TupleShape(
            [TensorShape([-1, 2, 4, 4]),
             TensorShape([-1, 2, 3, 3])])
        assert X.shapes.get_size() == [32, 18]

        assert X.shapes[0] == [-1, 2, 4, 4]
        assert X.shapes[1] == [-1, 2, 3, 3]
        assert X.shapes[0] == TensorShape([-1, 2, 4, 4])
        assert X.shapes[1] == TensorShape([-1, 2, 3, 3])

        X.shapes[0] = [-1, 1, 2, 2]
        assert X.shapes == [[-1, 1, 2, 2], [-1, 2, 3, 3]]

        X.shapes[0][1] = 3
        assert X.shapes == [[-1, 3, 2, 2], [-1, 2, 3, 3]]
        assert X.shapes.get_size() == [12, 18]
        assert X.shapes.tolist() == [[-1, 3, 2, 2], [-1, 2, 3, 3]]

        X.shapes[1] = [-1, 3, 4, 4]
        assert X.shapes.get_size() == [12, 48]

        shapes2 = X.shapes._replace(4, 6)
        assert shapes2 == [[-1, 3, 2, 2], [-1, 3, 6, 6]]
        assert shapes2 == TupleShape([[-1, 3, 2, 2], [-1, 3, 6, 6]])
        assert shapes2.get_size() == [12, 108]
        assert X.shapes.get_size() == [12, 48]

        # Tuple one element
        X.shapes = [[1, 2, 3, 3]]
        assert X.shapes == [[1, 2, 3, 3]]
        assert X.shapes == TupleShape([[1, 2, 3, 3]])
コード例 #5
0
def split(attrs: Dict[str, Any],
          in_xlayers: List[XLayer]) -> Dict[str, List[int]]:
    """
    Registration of 'split' operator and shape computation.

    Split the input tensor along specified axis by the provided indices

    Attributes:
    -----------
        - Axis: int
            The axis along which to do the split
        - Indices: int or tuple[int]
            If indices attribute is an integer, split the input tensor in
            tensors of equal size along the given axis
            If indices is a tuple of (sorted) integers, the entries specify
            the indices where the tensor should be split along the given axis

    Returns:
    --------
        - xinfo: dict
            A dictionary containing necessary XOp information (shape)
    """
    # Some operation checks
    assert len(in_xlayers) == 1
    assert 'axis' in attrs
    assert 'indices' in attrs

    indices = attrs['indices']
    axis = attrs['axis']

    inshape = in_xlayers[0].shapes[:]
    assert isinstance(inshape, TensorShape)
    axes_size = inshape[axis]

    new_shape = TupleShape([])

    if isinstance(indices, int):
        if axes_size % indices != 0:
            raise ValueError("Split operation has integer indices attribute"
                             " {} but this is not a divisor of the dimension"
                             " of the input tensor with shape {} along axis:"
                             " {}".format(indices, inshape, axis))
        new_dim_size = axes_size // indices
        for _ in range(0, indices):
            shape = inshape[:]
            shape[axis] = new_dim_size
            new_shape.append(shape)
    else:
        prev = 0
        for i in indices:
            shape = inshape[:]
            shape[axis] = i - prev
            new_shape.append(shape)
            prev = i

        shape = inshape[:]
        shape[axis] = axes_size - prev
        new_shape.append(shape)

    return {'shape': new_shape}
コード例 #6
0
    def test_tuple(self):
        var1 = relay.var("var1", relay.TensorType((-1, 4, 2, 2), "int64"))
        var2 = relay.var("var2", relay.TensorType((-1, 3, 2, 2), "int64"))

        t = relay.Tuple([var1, var2])
        net = relay.Function([var1, var2], t)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xg = xf_relay.from_relay(mod, {})
        layers = xg.get_layers()

        assert layers[0].type[0] == 'Input'
        assert isinstance(layers[0].attrs['dtype'], str)
        assert layers[0].attrs['dtype'] == 'int64'
        assert 'relay_id' in layers[0].attrs

        assert layers[1].type[0] == 'Input'
        assert isinstance(layers[0].attrs['dtype'], str)
        assert layers[0].attrs['dtype'] == 'int64'
        assert 'relay_id' in layers[0].attrs

        assert layers[2].type[0] == 'Tuple'
        assert layers[2].shapes == TupleShape([[-1, 4, 2, 2], [-1, 3, 2, 2]])
コード例 #7
0
def any_op(attrs: Dict, in_xlayers: List[XLayer]):
    """
    Create an AnyOp. This operation can have any number of inputs and
    attributes and returns one output. Only the 'any_shape' attribute
    is required to generate an operation shape

    Attributes:
    -----------
    op_name: str
        The name of the operation
    in_xlayers: List[XLayer]
        A list of the input_layers
    any_shape: List[int] / List[List[int]]
        The shape of the operation
    """

    shape = attrs['any_shape']
    if len(shape) > 0 and isinstance(shape[0], list):
        shape = TupleShape(attrs['any_shape'][:])
    else:
        shape = TensorShape(attrs['any_shape'][:])

    logger.debug("--anyshape: {}".format(shape))

    return {'shape': shape}
コード例 #8
0
    def test_split_tuple(self):
        data = relay.var("data", relay.TensorType((-1, 5, 4, 4), "float32"))

        net = relay.split(data, indices_or_sections=(1, 4), axis=1)\
                   .astuple()

        net = relay.Function([data], net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})

        layers = xgraph.get_layers()

        assert layers[0].type[0] == 'Input'
        assert layers[1].type[0] == 'Split'
        assert 'relay_id' in layers[1].attrs
        assert layers[1].attrs['axis'] == 1
        assert layers[1].attrs['indices'] == (1, 4)
        assert layers[1].shapes == TupleShape([
            TensorShape([-1, 1, 4, 4]),
            TensorShape([-1, 3, 4, 4]),
            TensorShape([-1, 1, 4, 4])
        ])
コード例 #9
0
    def test_tuple_get_item(self):
        var1 = relay.var("var1", relay.TensorType((-1, 4, 2, 2), "int64"))
        var2 = relay.var("var2", relay.TensorType((-1, 3, 2, 2), "int64"))

        t = relay.Tuple([var1, var2])
        tgi = relay.TupleGetItem(t, 0)
        net = relay.Function([var1, var2], tgi)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xg = xf_relay.from_relay(mod, {})
        layers = xg.get_layers()

        assert len(layers) == 4

        assert layers[0].type[0] == "Input"
        assert isinstance(layers[0].attrs["dtype"], str)
        assert layers[0].attrs["dtype"] == "int64"
        assert "relay_id" in layers[0].attrs

        assert layers[1].type[0] == "Input"
        assert isinstance(layers[0].attrs["dtype"], str)
        assert layers[0].attrs["dtype"] == "int64"
        assert "relay_id" in layers[0].attrs

        assert layers[2].type[0] == "Tuple"
        assert layers[2].shapes == TupleShape([[-1, 4, 2, 2], [-1, 3, 2, 2]])

        assert layers[3].type[0] == "TupleGetItem"
        assert layers[3].attrs["index"] == 0
        assert layers[3].shapes == TensorShape([-1, 4, 2, 2])
コード例 #10
0
    def test_tuple_get_item_transpose(self):
        A = np.ones((1, 4, 4, 3), dtype=np.float32)
        B = np.ones((1, 4, 4, 3), dtype=np.float32)

        X = xlayer.XLayer(name='tgi',
                          type=['TupleGetItem'],
                          shapes=[1, 3, 4, 4],
                          sizes=[48],
                          bottoms=['in'],
                          tops=[],
                          attrs={
                              'index': 1,
                              'transpose': True,
                              'axes': [0, 3, 1, 2]
                          },
                          targets=[])

        input_shapes = {
            'in':
            TupleShape([TensorShape([1, 4, 4, 3]),
                        TensorShape([1, 4, 4, 3])])
        }
        params = {}
        layers = X_2_TF['TupleGetItem'](X, input_shapes, params)
        assert len(layers) == 1

        outpt = layers[0].forward_exec([A, B])

        assert outpt.shape == (1, 3, 4, 4)
コード例 #11
0
    def test_split_default(self):
        a = np.zeros((1, 6), dtype=np.float32)

        node = onnx.helper.make_node(
            'Split',
            inputs=['a'],
            outputs=['x', 'y', 'z'],
            axis=1
        )

        wrapped_node = NodeWrapper(node)

        aX = xlf.get_xop_factory_func('Input')('a', list(a.shape),
                                               dtype='float32')

        xmap = {'a': aX}
        params = {}

        Xs = ol3.split(wrapped_node, params, xmap)

        assert len(Xs) == 4
        X = Xs[0]

        assert X.name == 'split-x'
        assert 'Split' in X.type
        assert X.attrs['axis'] == 1
        assert X.attrs['indices'] == [2, 4]
        assert X.shapes == TupleShape([TensorShape([-1, 2]),
                                       TensorShape([-1, 2]),
                                       TensorShape([-1, 2])])

        assert Xs[1].name == 'x'
        assert Xs[2].name == 'y'
        assert Xs[3].name == 'z'
コード例 #12
0
ファイル: l0_other.py プロジェクト: volcacius/pyxir
def relay_op(attrs, in_xlayers):
    # type: (str, List[XLayer]) -> XLayer
    """ Return RelayOp registration information (shape) """

    relay_shape = attrs['relay_shape']
    if len(relay_shape) > 0 and isinstance(relay_shape[0], list):
        shape = TupleShape(attrs['relay_shape'][:])
    else:
        shape = TensorShape(attrs['relay_shape'][:])

    return {'shape': shape}
コード例 #13
0
    def test_tuple_get_item_transpose(self):
        A = np.ones((1, 4, 4, 3), dtype=np.float32)
        B = np.ones((1, 4, 4, 3), dtype=np.float32)

        layers = [
            TupleLayer(
                name='tuple',
                xtype='Tuple',
                shape=TupleShape([TensorShape([1, 4, 4, 3]), TensorShape([1, 4, 4, 3])]),
                dtype='float32',
                inputs=['input1', 'input2'],
                input_shapes=[TensorShape([1, 4, 4, 3]), TensorShape([1, 4, 4, 3])],
                data=[],
                attrs={},
                subgraph=None
            ),
            TupleGetItemLayer(
                name='tgi',
                xtype='TupleGetItem',
                shape=TensorShape([1, 3, 4, 4]),
                dtype='float32',
                inputs=['tuple'],
                input_shapes=[TupleShape([TensorShape([1, 4, 4, 3]),
                                          TensorShape([1, 4, 4, 3])])],
                subgraph=None,
                data=[],
                attrs={
                    'index': 1,
                    'transpose': True,
                    'axes': [0, 3, 1, 2]
                }
            )
        ]

        tupl = layers[0].forward_exec([A, B])
        outpt = layers[1].forward_exec([tupl])

        assert outpt.shape == (1, 3, 4, 4)
コード例 #14
0
    def shapes(self):
        _shapes = self._xlayer.shapes
        _shapes_t = self._xlayer.shapes_t
        if _shapes_t == 'TensorShape' and len(_shapes) != 1:
            raise ValueError("TensorShape can only be one dimensional"
                             " but got: {}".format(len(_shapes)))

        if _shapes_t == 'TensorShape':
            return TensorShape(IntVector(_shapes[0]))
        elif _shapes_t == 'TupleShape':
            return TupleShape(IntVector2D(_shapes))
        else:
            raise ValueError("Unsupported shapes type: {}, should be"
                             " TensorShape or TupleShape".format(_shapes_t))
コード例 #15
0
    def test_tuple(self):
        layers = [
            InputLayer(name='in1',
                       shape=TensorShape([1, 1, 4, 4]),
                       dtype='float32',
                       inputs=['in1'],
                       input_shapes=[TensorShape([1, 1, 4, 4])],
                       subgraph=None),
            InputLayer(name='in2',
                       shape=TensorShape([1, 1, 4, 4]),
                       dtype='float32',
                       inputs=['in2'],
                       input_shapes=[TensorShape([1, 1, 4, 4])],
                       subgraph=None),
            TupleLayer(name='tuple',
                       xtype='Tuple',
                       shape=TupleShape([
                           TensorShape([1, 1, 4, 4]),
                           TensorShape([1, 1, 4, 4])
                       ]),
                       dtype='float32',
                       inputs=['in1', 'in2'],
                       input_shapes=[
                           TensorShape([1, 1, 4, 4]),
                           TensorShape([1, 1, 4, 4])
                       ],
                       data=[],
                       subgraph=None,
                       attrs={}),
        ]

        in1 = np.reshape(
            np.array([[1, -1, 0, 4, -5, 1, 0, 8, 3, -5, 1, 0, 1, 9, -3, -4]],
                     dtype=np.float32), (1, 1, 4, 4))
        in2 = np.reshape(
            np.array([[1, 0, 0, 4, 0, 1, 0, 8, 3, 0, 1, 0, 1, 9, 0, 0]],
                     dtype=np.float32), (1, 1, 4, 4))

        inputs = {'in1': in1, 'in2': in2}

        for layer in layers:
            inpts = [inputs[name] for name in layer.inputs]
            outpt = layer.forward_exec(inpts)

            inputs[layer.name] = outpt

        np.testing.assert_array_equal(inputs['tuple'][0], in1)
        np.testing.assert_array_equal(inputs['tuple'][1], in2)
コード例 #16
0
    def test_get_set_item(self):

        _shape = lpx.IntVector2D([lpx.IntVector([-1, 2]),
                                  lpx.IntVector([-1, 2, 4])])
        ts = TupleShape(IntVector2D(_shape))

        assert len(ts) == 2
        assert isinstance(ts[0], TensorShape)
        assert ts[0] == [-1, 2]
        assert ts[1] == [-1, 2, 4]

        with self.assertRaises(IndexError):
            ts[2]

        ts[0] = [-1, 3]
        assert ts == [[-1, 3], [-1, 2, 4]]
コード例 #17
0
    def test_set_value(self):

        ts = TupleShape([TensorShape([1, 2]), TensorShape([2, 4])])

        ts.set_value(0, -1)

        assert len(ts) == 2
        assert ts[0] == TensorShape([-1, 2])
        assert ts[1] == TensorShape([-1, 4])

        _shape = lpx.IntVector2D([lpx.IntVector([1, 2]),
                                  lpx.IntVector([2, 4])])
        ts = TupleShape(IntVector2D(_shape))

        ts.set_value(0, -1)

        assert len(ts) == 2
        assert ts[0] == TensorShape([-1, 2])
        assert ts[1] == TensorShape([-1, 4])
コード例 #18
0
    def test_to_list(self):

        ts = TupleShape([TensorShape([1, 2]), TensorShape([2, 4])])

        assert ts == [[1, 2], [2, 4]]
        assert ts.tolist() == [[1, 2], [2, 4]]

        _shape = lpx.IntVector2D([lpx.IntVector([1, 2]),
                                  lpx.IntVector([2, 4])])
        ts = TupleShape(IntVector2D(_shape))

        assert ts.tolist() == [[1, 2], [2, 4]]
コード例 #19
0
def get_mse_quantize_eltwise_layer(X, input_shapes, params, **kwargs):
    # (XLayer, dict, dict, QuantParams) -> List[rt_layer.RtLayer]
    """
    TODO formalize checks
    """
    bitwidth = X.attrs['quant_bitwidth']
    axis = X.attrs['axis']
    dtype = X.attrs['dtype']
    mse_opt_num = X.attrs['mse_opt_num']

    use_relu = 'activation' in X.attrs and X.attrs['activation'] == 'ReLU'

    assert len(X.bottoms) in [5]
    assert bitwidth == 8
    assert dtype == 'float32'
    assert axis in [0, 1, 2, 3]

    layers = [
        MSEQuantizeEltwiseLayer(
            name=X.name,
            shape=X.shapes[:],
            dtype=dtype,
            inputs=X.bottoms,
            input_shapes=[input_shapes[bottom] for bottom in X.bottoms],
            subgraph=X.subgraph,
            axis=axis,  # TODO
            bitwidth=bitwidth,
            relu=use_relu,
            do_rounding=True,
            mse_opt_num=mse_opt_num)
    ]

    if use_relu:
        layers.append(
            ReluLayer(name=X.name,
                      xtype='ReLU',
                      shape=X.shapes,
                      dtype='float32',
                      inputs=[X.name],
                      input_shapes=TupleShape([X.shapes[:]]),
                      subgraph=X.subgraph,
                      attrs={}))

    return layers
コード例 #20
0
def relay_op(op_name: str, expr: Expr, in_xlayers: List[XLayer]):
    """Insert generic RelayOp operator"""

    logger.debug("-- op_name: {}".format(op_name))
    logger.debug("-- expr: {}".format(expr.op))

    try:
        ty = expr.checked_type
    except ValueError as e:
        # TODO, this is not correct
        if expr.type_args and len(expr.type_args) > 0:
            ty = expr.type_args[0]
        else:
            raise e
        
    if isinstance(ty, relay.ty.TensorType):
        relay_shape = TensorShape([int(s.value) for s in list(ty.shape)])
        dtype = str(ty.dtype)
    else:
        relay_shape = TupleShape(
            [TensorShape([int(i) for i in list(t_ty.shape)])
             for t_ty in ty.fields])
        dtype = [str(t_ty.dtype) for t_ty in ty.fields]

    # TODO
    # relay_shape.set_value(axis=0, value=-1)

    attrs = {}
    for attr in dir(expr.attrs):
        value = getattr(expr.attrs, attr)
        attrs[attr] = str(value)

    if 'dtype' in attrs:
        dtype = attrs['dtype']
        del attrs['dtype']

    X = xlf.get_xop_factory_func('RelayOp')(op_name, in_xlayers,
                                            relay_shape=relay_shape.tolist(),
                                            dtype=dtype,
                                            relay_id=[hash(expr)],
                                            **attrs)

    return X
コード例 #21
0
    def test_tuple_get_item(self):
        A = np.array([0.1, 0.05], dtype=np.float32)
        B = np.array([0.1, 0.05, 0.1], dtype=np.float32)

        X = xlayer.XLayer(name='tgi',
                          type=['TupleGetItem'],
                          shapes=[3],
                          sizes=[3],
                          bottoms=['in'],
                          tops=[],
                          attrs={'index': 1},
                          targets=[])

        input_shapes = {'in': TupleShape([TensorShape([2]), TensorShape([3])])}
        params = {}
        layers = X_2_TF['TupleGetItem'](X, input_shapes, params)
        assert len(layers) == 1

        outpt = layers[0].forward_exec([A, B])

        np.testing.assert_array_almost_equal(outpt, B)
コード例 #22
0
    def test_split_layer_int(self):

        iX = XLayer(type=['Input'],
                    name='in1',
                    shapes=[1, 6, 4, 4],
                    sizes=[96],
                    bottoms=[],
                    tops=[],
                    targets=[])

        sX = px.ops.split('split1', [iX], axis=1, indices=3)

        assert sX.type[0] == 'Split'
        assert sX.shapes == TupleShape([
            TensorShape([1, 2, 4, 4]),
            TensorShape([1, 2, 4, 4]),
            TensorShape([1, 2, 4, 4])
        ])
        assert sX.sizes == [32, 32, 32]
        assert sX.attrs['axis'] == 1
        assert sX.attrs['indices'] == 3
        assert sX.bottoms == ['in1']
コード例 #23
0
    def test_split_layer_tuple(self):

        iX = XLayer(type=['Input'],
                    name='in1',
                    shapes=[1, 5, 4, 4],
                    sizes=[80],
                    bottoms=[],
                    tops=[],
                    targets=[])

        sX = px.ops.split('split1', [iX], axis=1, indices=[1, 4])

        assert sX.type[0] == 'Split'
        assert sX.shapes == TupleShape([
            TensorShape([1, 1, 4, 4]),
            TensorShape([1, 3, 4, 4]),
            TensorShape([1, 1, 4, 4])
        ])
        assert sX.sizes == [16, 48, 16]
        assert sX.attrs['axis'] == 1
        assert sX.attrs['indices'] == (1, 4)
        assert sX.bottoms == ['in1']
コード例 #24
0
def relay_op(op_name, expr, in_xlayers):
    # type: (str, tvm.relay.expr.Expr, List[XLayer]) -> XLayer
    """ Insert generic relay op operator """

    logger.debug("-- op_name: {}".format(op_name))
    logger.debug("-- expr: {}".format(expr.op))

    ty = expr.checked_type
    if isinstance(ty, relay.ty.TensorType):
        relay_shape = TensorShape([int(i) for i in list(ty.shape)])
        dtype = str(ty.dtype)
    else:
        relay_shape = TupleShape([
            TensorShape([int(i) for i in list(t_ty.shape)])
            for t_ty in ty.fields
        ])
        dtype = [str(t_ty.dtype) for t_ty in ty.fields]

    # TODO
    relay_shape.set_value(axis=0, value=-1)

    attrs = {}
    for attr in dir(expr.attrs):
        value = getattr(expr.attrs, attr)
        attrs[attr] = str(value)

    if 'dtype' in attrs:
        dtype = attrs['dtype']
        del attrs['dtype']

    X = xlf.get_xop_factory_func('RelayOp')(op_name,
                                            in_xlayers,
                                            relay_shape=relay_shape.tolist(),
                                            dtype=dtype,
                                            relay_id=[hash(expr)],
                                            **attrs)

    return X
コード例 #25
0
    def test_dynamic_quantize_linear(self):
        a = np.zeros((1, 2, 3, 3), dtype=np.float32)

        node = onnx.helper.make_node('DynamicQuantizeLinear',
                                     inputs=['a'],
                                     outputs=['x', 'y', 'z'])

        wrapped_node = NodeWrapper(node)

        aX = xlf.get_xop_factory_func('Input')('a',
                                               list(a.shape),
                                               dtype='float32')

        xmap = {'a': aX}
        params = {}

        Xs = ol11.dynamic_quantize_linear(wrapped_node, params, xmap)

        assert len(Xs) == 4
        X = Xs[0]

        assert X.name == 'dql-x'
        assert 'AnyOp' in X.type
        assert X.shapes == TupleShape(
            [TensorShape([-1, 2, 3, 3]),
             TensorShape([1]),
             TensorShape([1])])

        assert Xs[1].name == 'x'
        assert Xs[1].shapes == TensorShape([-1, 2, 3, 3])
        assert Xs[2].name == 'y'
        assert 'TupleGetItem' in Xs[2].type
        assert Xs[2].shapes == TensorShape([1])
        assert Xs[3].name == 'z'
        assert 'TupleGetItem' in Xs[3].type
        assert Xs[3].shapes == TensorShape([1])
コード例 #26
0
    def test_split_layer_int(self):

        iX = XLayer(
            type=["Input"],
            name="in1",
            shapes=[1, 6, 4, 4],
            sizes=[96],
            bottoms=[],
            tops=[],
            targets=[],
        )

        sX = px.ops.split("split1", [iX], axis=1, indices=3)

        assert sX.type[0] == "Split"
        assert sX.shapes == TupleShape([
            TensorShape([1, 2, 4, 4]),
            TensorShape([1, 2, 4, 4]),
            TensorShape([1, 2, 4, 4]),
        ])
        assert sX.sizes == [32, 32, 32]
        assert sX.attrs["axis"] == 1
        assert sX.attrs["indices"] == 3
        assert sX.bottoms == ["in1"]
コード例 #27
0
    def test_split_layer_tuple(self):

        iX = XLayer(
            type=["Input"],
            name="in1",
            shapes=[1, 5, 4, 4],
            sizes=[80],
            bottoms=[],
            tops=[],
            targets=[],
        )

        sX = px.ops.split("split1", [iX], axis=1, indices=[1, 4])

        assert sX.type[0] == "Split"
        assert sX.shapes == TupleShape([
            TensorShape([1, 1, 4, 4]),
            TensorShape([1, 3, 4, 4]),
            TensorShape([1, 1, 4, 4]),
        ])
        assert sX.sizes == [16, 48, 16]
        assert sX.attrs["axis"] == 1
        assert sX.attrs["indices"] == (1, 4)
        assert sX.bottoms == ["in1"]
コード例 #28
0
    def test_split_int(self):
        data = relay.var("data", relay.TensorType((-1, 6, 4, 4), "float32"))

        net = relay.split(data, indices_or_sections=3, axis=1).astuple()

        net = relay.Function([data], net)

        mod = tvm.IRModule.from_expr(net)
        mod = relay.transform.InferType()(mod)

        xgraph = xf_relay.from_relay(mod, {})

        layers = xgraph.get_layers()

        assert layers[0].type[0] == "Input"
        assert layers[1].type[0] == "Split"
        assert "relay_id" in layers[1].attrs
        assert layers[1].attrs["axis"] == 1
        assert layers[1].attrs["indices"] == 3
        assert layers[1].shapes == TupleShape([
            TensorShape([-1, 2, 4, 4]),
            TensorShape([-1, 2, 4, 4]),
            TensorShape([-1, 2, 4, 4]),
        ])
コード例 #29
0
    def get_subgraphs(self, xgraph):
        # type: (XGraph) -> List[XGraph]
        """ Return a list of subgraphs for the given xgraph in XGraph format.
        """

        # ALGO:
        # 1. loop through all the XLayers
        # 2. For every layer, if it's a target layer, either add it to the
        #   corresponding partition if a bottom is already added to
        #   this layer, else start a new partition
        # TODO set bottoms and tops to [] for partition input respectively
        #   output layers

        in_idx = 0

        visited = {}
        subgraphs = {}

        for X in xgraph.get_layers():

            if X.subgraph is not None:

                X_copy = copy.deepcopy(X)

                if X.subgraph not in subgraphs:
                    new_subgraph = xlayer.defaultXLayer()
                    new_subgraph = new_subgraph._replace(
                        name=X.subgraph,
                        type=['SubGraph'],
                        data=[],
                        shapes=TupleShape([]),
                        sizes=[],
                        internal=1,
                        attrs={
                            'target': X.target,
                            '__bottom_tensors': {},
                            'orig_bottom_tensors': {},
                            '__top_tensors': {},
                            'orig_top_tensors': {},
                        })
                    subgraphs[X.subgraph] = new_subgraph
                    visited[X.subgraph] = set()

                # First check if this layer is a subgraph input layer
                #   by looking at the visited subgraph layers
                for b in X.bottoms:

                    if b not in visited[X.subgraph]:

                        bX = xgraph.get(b)

                        x_in_name = 'xinput' + str(in_idx)

                        def find_original_bottom_layers(rX):
                            if not bool(rX.internal):
                                return [rX.name]

                            bottom_layers = []
                            for r_bottom_name in rX.bottoms:
                                rbX = xgraph.get(r_bottom_name)
                                rec_bottom_layers = \
                                    find_original_bottom_layers(rbX)
                                bottom_layers.extend(rec_bottom_layers)

                            return bottom_layers

                        orig_bottoms = find_original_bottom_layers(bX)

                        if 'input_names' not in subgraphs[X.subgraph].attrs:
                            subgraphs[X.subgraph].attrs['input_names'] =\
                                [x_in_name]
                        else:
                            subgraphs[X.subgraph].attrs['input_names']\
                                .append(x_in_name)

                        # Keep track of input - bottom connections
                        sg_bottoms_ext = \
                            subgraphs[X.subgraph].attrs['__bottom_tensors']
                        if X.name not in sg_bottoms_ext:
                            sg_bottoms_ext.update({x_in_name: [b]})
                        else:
                            new_bottoms_ext = sg_bottoms_ext[x_in_name] + [b]
                            sg_bottoms_ext.update({x_in_name: new_bottoms_ext})

                        # Keep track of input - original (model) bottom
                        #   connections, i.e. exclude internally added
                        #   operations here
                        sg_orig_bottoms_ext = \
                            subgraphs[X.subgraph].attrs['orig_bottom_tensors']
                        if X.name not in sg_orig_bottoms_ext:
                            sg_orig_bottoms_ext.update(
                                {x_in_name: orig_bottoms})
                        else:
                            new_orig_bottoms_ext = \
                                sg_orig_bottoms_ext[x_in_name] + orig_bottoms
                            sg_orig_bottoms_ext.update(
                                {x_in_name: new_orig_bottoms_ext})

                        new_in_X = xlayer.defaultXLayer()
                        new_in_X = new_in_X._replace(
                            name=x_in_name,
                            type=['Input'],
                            shapes=bX.shapes[:],
                            sizes=bX.sizes[:],
                            # Keep track of the first original layer of the
                            #   operation in front of which we are adding an
                            #   input layer
                            layer=[X.layer[0]],
                            tops=[X.name],
                            bottoms=[],
                            internal=1,
                            attrs={},
                            targets=[])
                        in_idx += 1

                        X_copy.bottoms[:] = \
                            [(bc if bc != b else new_in_X.name)
                             for bc in X_copy.bottoms]

                        subgraphs[X.subgraph].subgraph_data =\
                            subgraphs[X.subgraph].subgraph_data + [new_in_X]
                        # subgraphs[X.subgraph].shapes[:] = new_in_X.shapes[:]
                        # subgraphs[X.subgraph].sizes[:] = new_in_X.sizes[:]
                        subgraphs[X.subgraph].bottoms.append(b)

                        visited[X.subgraph].add(new_in_X.name)

                if X.tops == []:
                    sg_tops_ext = \
                        subgraphs[X.subgraph].attrs['__top_tensors']
                    sg_orig_tops_ext = \
                        subgraphs[X.subgraph].attrs['orig_top_tensors']
                    sg_tops_ext.update({X.name: []})
                    sg_orig_tops_ext.update({X.name: []})

                    if 'output_names' not in subgraphs[X.subgraph].attrs:
                        subgraphs[X.subgraph].attrs['output_names'] =\
                            [X.name]
                    else:
                        subgraphs[X.subgraph].attrs['output_names']\
                            .append(X.name)

                for t in X.tops:
                    tX = xgraph.get(t)

                    if tX.subgraph != X.subgraph:

                        def find_original_top_layers(rX):
                            if not bool(rX.internal):
                                return [rX.name]

                            top_layers = []
                            for r_top_name in rX.tops:
                                rtX = xgraph.get(r_top_name)
                                rec_top_layers = find_original_top_layers(rtX)
                                top_layers.extend(rec_top_layers)

                            return top_layers

                        orig_tops = find_original_top_layers(tX)

                        if 'output_names' not in subgraphs[X.subgraph].attrs:
                            subgraphs[X.subgraph].attrs['output_names'] =\
                                [X.name]
                        else:
                            subgraphs[X.subgraph].attrs['output_names']\
                                .append(X.name)

                        # Keep track of output - top connections
                        sg_tops_ext = \
                            subgraphs[X.subgraph].attrs['__top_tensors']
                        if X.name not in sg_tops_ext:
                            sg_tops_ext.update({X.name: [t]})  # X.tops[:]
                        else:
                            new_tops_ext = sg_tops_ext[X.name] + [t]  # X.tops
                            sg_tops_ext.update({X.name: new_tops_ext})

                        # Keep track of output - original (model) top
                        #   connections, i.e. exclude internally added
                        #   operations here
                        sg_orig_tops_ext = \
                            subgraphs[X.subgraph].attrs['orig_top_tensors']
                        if X.name not in sg_orig_tops_ext:
                            sg_orig_tops_ext.update({X.name: orig_tops})
                        else:
                            new_orig_tops_ext = \
                                sg_orig_tops_ext[X.name] + orig_tops
                            sg_orig_tops_ext.update(
                                {X.name: new_orig_tops_ext})

                        X_copy.tops.remove(t)
                        subgraphs[X.subgraph].tops.append(t)
                        subgraphs[X.subgraph].shapes.append(X.shapes[:])
                        subgraphs[X.subgraph].sizes.extend(X.sizes[:])

                # If no tops
                if X.tops == []:
                    subgraphs[X.subgraph].shapes.append(X.shapes[:])
                    subgraphs[X.subgraph].sizes.extend(X.sizes[:])

                subgraphs[X.subgraph].subgraph_data = \
                    subgraphs[X.subgraph].subgraph_data + [X_copy]
                visited[X.subgraph].add(X_copy.name)

        sg_list = []
        for sg, sgX in subgraphs.items():
            # (len(sgX.tops) == len(sgX.shapes))
            # if len(sgX.tops) != 1:
            #    raise ValueError("Subgraphs are only supported for one output"
            #        " but got: {}".format(sgX.tops))

            # TODO Sort xlayers in topological order
            # sub_xgraph = XGraphPartitioner.xgraph_factory.build_from_xlayer(
            #     net=sgX.data,
            #     name=sg
            # )

            sg_list.append(
                sgX._replace(
                    # shapes = sgX.shapes[0],
                    # sizes=[sum([s[0] for s in sgX.sizes])],
                    subgraph_data=sgX.subgraph_data))

        return sg_list
コード例 #30
0
    def test_get_size(self):

        ts = TupleShape([TensorShape([-1, 2]), TensorShape([-1, 2, 4])])

        assert ts.get_size() == [2, 8]