コード例 #1
0
 def test_shape_for_layout_unknown_parameter(self):
     with self.assertRaises(Error):
         shape_for_layout('NHWC',
                          batch=2,
                          features=3,
                          height=4,
                          width=5,
                          unknown_parameter=123)
コード例 #2
0
    def infer(node: Node):
        in_shape = node.in_node().shape
        if in_shape.size != 4:
            raise Error(
                'TensorFlow DepthToSpace operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']

        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if C % (block_size**2):
            raise Error(
                'Feature dimensions of input tensor of DepthToSpace operation have to be divisible by square '
                'of DepthToSpace \'block_size\' parameter. Input tensor shape = {}. Feature dimension = {}. '
                'block_size = {}'.format(in_shape, C, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=int(C / (block_size**2)),
                                     height=int(H * block_size),
                                     width=int(W * block_size))

        assert np.prod(in_shape) == np.prod(out_shape)
        node.out_node().shape = int64_array(out_shape)
コード例 #3
0
ファイル: roialign.py プロジェクト: zoeysgithub/openvino
    def infer(node):
        layout = node.graph.graph['layout']
        node_name = node.soft_get('name', node.id)

        assert len([port for port in node.in_ports().values() if not port.disconnected()]) == 3, \
            'The node "{}" must 3 inputs'.format(node_name)

        assert node.has_valid('pooled_w'), '"pooled_w" attribute is not set for node "{}"'.format(node_name)
        assert node.has_valid('pooled_h'), '"pooled_h" attribute is not set for node "{}"'.format(node_name)
        assert node.has_valid('mode'), '"mode" attribute is not set for node "{}"'.format(node_name)
        assert node.mode in ['avg', 'max'], \
            '"mode" attribute range of values is ["avg", "max"], got {} for node "{}"'.format(node.mode, node_name)

        input_shape = node.in_port(0).data.get_shape()
        rois_shape = node.in_port(1).data.get_shape()
        indices_shape = node.in_port(2).data.get_shape()
        assert input_shape is not None and rois_shape is not None and indices_shape is not None, \
            'The node "{}" input shape is None'.format(node_name)
        assert rois_shape[0] == indices_shape[0], 'The number of batch indices does not correspond to number of ROIs ' \
                                                  'for node "{}"'.format(node_name)
        assert rois_shape[1] == 4, 'The size of ROI element must be 4 for node "{}"'.format(node_name)
        assert len(input_shape) == 4, 'The rank of port 0 input tensor of node "{}" must be 4.'.format(node_name)

        node.out_port(0).data.set_shape(
            shape_for_layout(layout,
                             batch=rois_shape[0],
                             features=input_shape[get_features_dim(layout, 4)],
                             height=node.pooled_h,
                             width=node.pooled_w)
        )
コード例 #4
0
    def infer(node: Node):
        layout = node.graph.graph['layout']

        assert len(layout) == 4
        assert len(
            [p for p in node.in_ports().values() if not p.disconnected()])
        assert node.has_valid('mode')
        assert node.has_valid('axes')

        src_shape = node.in_port(0).data.get_shape()
        assert src_shape is not None
        dst_shape = node.in_port(1).data.get_value()
        assert dst_shape is not None

        out_height = dst_shape[0]
        out_width = dst_shape[1]

        node.out_node().shape = shape_for_layout(
            layout,
            batch=src_shape[get_batch_dim(layout, 4)],
            features=src_shape[get_features_dim(layout, 4)],
            height=out_height,
            width=out_width)

        PermuteAttrs.create_permute_attrs(node, attrs=[('axes', 'input:0')])
コード例 #5
0
    def infer(node: Node):
        in_shape = node.in_node().shape
        if in_shape.size != 4:
            raise Error(
                'TensorFlow SpaceToDepth operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']
        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if H % block_size or W % block_size:
            raise Error(
                'Spatial dimensions of input tensor of SpaceToDepth operation have to be divisible by '
                'SpaceToDepth \'block_size\' parameter. Input tensor shape = {}. Spatial dimensions = {},{}. '
                'block_size = {}'.format(in_shape, H, W, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=int(C * (block_size**2)),
                                     height=int(H / block_size),
                                     width=int(W / block_size))

        assert np.prod(in_shape) == np.prod(out_shape)
        node.out_node().shape = int64_array(out_shape)
コード例 #6
0
    def find_and_replace_pattern(self, graph: Graph):
        layout = graph.graph['layout']
        for eltwise_op_node in graph.get_op_nodes(is_eltwise=True):
            out_shape = eltwise_op_node.out_port().data.get_shape()
            if 4 <= len(out_shape) <= 5:
                out_features = out_shape[get_features_dim(
                    layout, len(out_shape))]
                for port, node in eltwise_op_node.in_nodes().items():
                    if len(node.shape) != len(out_shape) and len(
                            node.shape) == 1 and out_features == node.shape[0]:
                        new_shape = shape_for_layout(
                            layout,
                            batch=1,
                            features=out_features,
                            height=1,
                            width=1,
                            depth=1 if len(out_shape) == 5 else None)
                        dim_const = Const(graph, {
                            'value': new_shape,
                            'name': node.id + '/Dim'
                        }).create_node()
                        reshape_op = Reshape(graph,
                                             attrs={
                                                 'dim': new_shape,
                                                 'name': node.id + '/Broadcast'
                                             }).create_node()

                        eltwise_op_node.in_port(port).get_source(
                        ).node.out_port(0).get_connection().set_destination(
                            reshape_op.in_port(0))
                        reshape_op.in_port(1).connect(dim_const.out_port(0))

                        reshape_op.out_port(0).connect(
                            eltwise_op_node.in_port(port))
コード例 #7
0
def roipooling_infer(node: Node):
    """
    Sets shape of output node according specified parameters input blobs and node
    Sets number from the first input blob, channels from the second one, height and width are specified
    Parameters
    ----------
    node
    """
    shapes = [node.in_node(i).shape for i in range(len(node.in_nodes()))]
    if any(s is None for s in shapes):
        return
    if len(node.in_nodes()) == 4:  # TensorFlow case of CropAndResize operation
        crop_size = node.in_node(3).value
        if crop_size is None:
            log.error('The ROIPooling size is not known for node {}'.format(
                node.soft_get('name')))
            return
        if not isinstance(crop_size, np.ndarray) or len(crop_size) != 2:
            log.error(
                'The ROIPooling size is should have 2 elements for node {}'.
                format(node.soft_get('name')))
        node.pooled_h = crop_size[0]
        node.pooled_w = crop_size[1]
        node.graph.remove_edge(node.in_node(3).id, node.id)
        node.graph.remove_edge(node.in_node(2).id, node.id)

    layout = node.graph.graph['layout']
    assert len(layout) == 4

    node.out_node().shape = shape_for_layout(
        layout,
        batch=shapes[1][get_batch_dim(layout, 4)],
        features=shapes[0][get_features_dim(layout, 4)],
        height=node.pooled_h,
        width=node.pooled_w)
コード例 #8
0
    def infer(node: Node):
        assert [port.idx for port in node.in_ports().values() if not port.disconnected()] == [0], \
            'Wrong input nodes number for node {} with type ExtractImagePatches'.format(node.soft_get('name', node.id))
        input_shape = node.in_port(0).data.get_shape()
        name = node.soft_get('name', node.id)
        assert input_shape is not None, 'Input shape is not set for node {} with type ExtractImagePatches'.format(name)

        assert len(input_shape) == 4, 'ExtractImagePatches operation supports only 4D tensors'

        layout = node.graph.graph['layout']
        N = input_shape[get_batch_dim(layout, 4)]
        C = input_shape[get_features_dim(layout, 4)]

        size_spatial = int64_array(node.sizes)[node.spatial_dims]

        input_spatial_shape = input_shape[node.spatial_dims]
        stride_spatial_shape = node.strides[node.spatial_dims]

        size_extent = node.rates[node.spatial_dims] * (size_spatial - 1) + 1

        pad_spatial_shape, output_spatial_shape = tf_window_op_pad_infer(input_spatial_shape,
                                                                         size_extent,
                                                                         stride_spatial_shape,
                                                                         node.auto_pad,
                                                                         False)

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C * np.prod(size_spatial),
                                     height=output_spatial_shape[0],
                                     width=output_spatial_shape[1])

        node.out_port(0).data.set_shape(int64_array(out_shape))
コード例 #9
0
ファイル: depth_to_space.py プロジェクト: yding10/openvino
    def infer(node: Node):
        in_shape = node.in_port(0).data.get_shape()
        if in_shape.size != 4:
            raise Error(
                'TensorFlow DepthToSpace operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']

        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if C is not dynamic_dimension and C % (block_size**2):
            raise Error(
                'Feature dimensions of input tensor of DepthToSpace operation have to be divisible by square '
                'of DepthToSpace \'block_size\' parameter. Input tensor shape = {}. Feature dimension = {}. '
                'block_size = {}'.format(in_shape, C, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C // (block_size * block_size),
                                     height=H * block_size,
                                     width=W * block_size)

        if is_fully_defined(in_shape) and is_fully_defined(
                out_shape) and np.prod(in_shape) != np.prod(out_shape):
            raise Error(
                'Number of input elements "{}" is not equal to number of output elements "" for node "{}"'
                ''.format(in_shape, out_shape, node.soft_get('name', node.id)))
        node.out_port(0).data.set_shape(out_shape)
コード例 #10
0
ファイル: upsample.py プロジェクト: zkzt/openvino
    def upsample_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4

        input_shape = node.in_node(0).shape
        if input_shape is None:
            return

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            out_height = math.floor(in_height * node.height_scale)
            out_width = math.floor(in_width * node.width_scale)
            node.out_node().shape = shape_for_layout(
                layout,
                batch=input_shape[get_batch_dim(layout, 4)],
                features=input_shape[get_features_dim(layout, 4)],
                height=out_height,
                width=out_width)
        else:
            assert node.in_node(1).value is not None
            eps = 1e-5  # This is to make rounding in case of very close number to round to closest instead of down
            # generic output shape calculation to support 5D input shape case
            node.out_node().shape = np.array(
                (input_shape + eps) * node.in_node(1).value).astype(np.int64)
コード例 #11
0
ファイル: upsample.py プロジェクト: srinivasdasu24/dldt
    def upsample_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4

        input_shape = node.in_node(0).shape
        if input_shape is None:
            return

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            out_height = math.floor(in_height * node.height_scale)
            out_width = math.floor(in_width * node.width_scale)
            node.out_node().shape = shape_for_layout(
                layout,
                batch=input_shape[get_batch_dim(layout, 4)],
                features=input_shape[get_features_dim(layout, 4)],
                height=out_height,
                width=out_width)
        else:
            assert node.in_node(1).value is not None
            # generic output shape calculation to support 5D input shape case
            node.out_node().shape = input_shape * node.in_node(1).value
コード例 #12
0
ファイル: space_to_depth.py プロジェクト: yding10/openvino
    def infer(node: Node):
        in_shape = node.in_node().shape
        if in_shape.size != 4:
            raise Error(
                'TensorFlow SpaceToDepth operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']
        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if (H is not dynamic_dimension
                and H % block_size) or (W is not dynamic_dimension
                                        and W % block_size):
            raise Error(
                'Spatial dimensions of input tensor of SpaceToDepth operation have to be divisible by '
                'SpaceToDepth \'block_size\' parameter. Input tensor shape = {}. Spatial dimensions = {},{}. '
                'block_size = {}'.format(in_shape, H, W, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C * (block_size**2),
                                     height=H // block_size,
                                     width=W // block_size)

        node.out_port(0).data.set_shape(out_shape)
コード例 #13
0
 def test_shape_for_layout_NHWC(self):
     self.assertListEqual([2, 4, 5, 3],
                          list(
                              shape_for_layout('NHWC',
                                               batch=2,
                                               features=3,
                                               height=4,
                                               width=5)))
コード例 #14
0
 def test_shape_for_layout_NCHW(self):
     self.assertListEqual([2, 3, 4, 5],
                          list(
                              shape_for_layout('NCHW',
                                               batch=2,
                                               features=3,
                                               height=4,
                                               width=5)))
コード例 #15
0
ファイル: resample.py プロジェクト: pc2/CustoNN2
    def resample_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4

        input_shape = node.in_node(0).shape
        if input_shape is None:
            return
        in_height = input_shape[get_height_dim(layout, 4)]
        in_width = input_shape[get_width_dim(layout, 4)]

        if node.has('fw') and node.fw == 'tf':
            dst_shape = node.in_node(1).value
            if dst_shape is None or len(input_shape) != 4 or len(
                    dst_shape) != 2:
                log.error(
                    'Node {} with op {} cannot be converted to Resample layer because there is no enough info about '
                    'src/dst shapes: src_shape = {}, dst_shape = {}'.format(
                        node.name, node.op, input_shape, dst_shape))
                node.type = None  # prevent translation to a valid IE layer
                return
            out_height = dst_shape[0]
            out_width = dst_shape[1]
            node.graph.remove_edge(node.in_node(1).id, node.id)
        else:
            if len(node.in_nodes()) == 1:
                if node.has('width') and node.has('height'):
                    out_height = node.height
                    out_width = node.width
                else:
                    out_height = node.factor * in_height
                    out_width = node.factor * in_width
            else:
                out_height = node.in_node(1).shape[get_height_dim(layout, 4)]
                out_width = node.in_node(1).shape[get_width_dim(layout, 4)]

        node.factor = factor_update(
            node.factor,
            [float(out_height) / in_height,
             float(out_width) / in_width], [in_height, in_width],
            [out_height, out_width], node.soft_get('name'))

        node.out_node().shape = shape_for_layout(
            layout,
            batch=input_shape[get_batch_dim(layout, 4)],
            features=input_shape[get_features_dim(layout, 4)],
            height=out_height,
            width=out_width)
コード例 #16
0
 def find_and_replace_pattern(self, graph: Graph):
     layout = graph.graph['layout']
     for n in list(graph.nodes()):
         if 'type' in graph.node[n] and graph.node[n]['type'] == 'Eltwise' and get_value_id(Node(graph, n)) is None:
             eltwise_op_node = Node(graph, n)
             out_shape = eltwise_op_node.out_node().shape
             if 4 <= len(out_shape) <= 5:
                 out_features = out_shape[get_features_dim(layout, len(out_shape))]
                 for port, node in eltwise_op_node.in_nodes().items():
                     if len(node.shape) != len(out_shape) and len(node.shape) == 1 and out_features == node.shape[0]:
                         in_atts = deepcopy(graph.get_edge_data(node.id, n)[0])
                         graph.remove_edge(node.id, n)
                         new_shape = shape_for_layout(layout, batch=1, features=out_features, height=1, width=1,
                                                      depth=1 if len(out_shape) == 5 else None)
                         reshape_data_op = Reshape(graph, attrs={'dim': new_shape, 'name': node.id + '/Broadcast'})
                         reshape_data_node = reshape_data_op.create_node_with_data([node])
                         graph.add_edge(reshape_data_node.id, eltwise_op_node.id, **in_atts)
コード例 #17
0
    def upsample_infer(node: Node):
        node_name = node.soft_get('name', node.id)
        layout = node.graph.graph['layout']
        assert len(
            layout
        ) == 4, 'Input tensor rank must be equal to 4 for node "{}"'.format(
            node_name)

        input_shape = node.in_port(0).data.get_shape()

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            if in_height is not dynamic_dimension:
                out_height = math.floor(in_height * node.height_scale)
            else:
                out_height = dynamic_dimension
            if in_width is not dynamic_dimension:
                out_width = math.floor(in_width * node.width_scale)
            else:
                out_width = dynamic_dimension
            node.out_port(0).data.set_shape(
                shape_for_layout(layout,
                                 batch=input_shape[get_batch_dim(layout, 4)],
                                 features=input_shape[get_features_dim(
                                     layout, 4)],
                                 height=out_height,
                                 width=out_width))
        else:
            scales = node.in_port(1).data.get_value()
            assert scales is not None, 'The input with scales for node "{}" is not constant'.format(
                node_name)
            eps = 1e-5  # This is to make rounding in case of very close number to round to closest instead of down
            # generic output shape calculation to support 5D input shape case
            output_shape = shape_array(
                [dynamic_dimension for _ in range(len(input_shape))])
            for idx in range(len(output_shape)):
                if input_shape[idx] is not dynamic_dimension:
                    output_shape[idx] = int(
                        (input_shape[idx] + eps) * scales[idx])
                else:
                    output_shape[idx] = dynamic_dimension_value
            node.out_port(0).data.set_shape(output_shape)
コード例 #18
0
ファイル: psroipooling.py プロジェクト: projectceladon/dldt
 def psroipooling_infer(node: Node):
     """
     Sets shape of output node according specified parameters input blobs and node
     Sets number from the first input blob, channels from the second one, height and width are specified
     Parameters
     ----------
     node
     """
     shapes = [node.in_node(i).shape for i in range(len(node.in_nodes()))]
     if any(s is None for s in shapes):
         return
     layout = node.graph.graph['layout']
     assert len(layout) == 4
     node.out_node().shape = shape_for_layout(layout,
                                              batch=shapes[1][get_batch_dim(layout, 4)],
                                              features=node.output_dim,
                                              height=node.group_size,
                                              width=node.group_size)
コード例 #19
0
ファイル: regionyolo.py プロジェクト: zhenlusu500/openvino
    def regionyolo_infer(node: Node):
        input_shape = node.in_node(0).shape
        if input_shape is None:
            return
        axis = get_canonical_axis_index(input_shape, node.axis)
        end_axis = get_canonical_axis_index(input_shape, node.end_axis)
        node.axis = axis
        node.end_axis = end_axis
        if node.do_softmax:
            flat_dim = np.prod(input_shape[axis: end_axis + 1])
            node.out_node().shape = np.array([*input_shape[:axis], flat_dim, *input_shape[end_axis + 1:]])
        else:
            layout = node.graph.graph['layout']
            assert len(layout) == 4

            node.out_node().shape = shape_for_layout(layout,
                                                     batch=input_shape[get_batch_dim(layout, 4)],
                                                     features=(node.classes + node.coords + 1) * len(node.mask),
                                                     height=input_shape[get_height_dim(layout, 4)],
                                                     width=input_shape[get_width_dim(layout, 4)])
コード例 #20
0
    def regionyolo_infer(node: Node):
        input_shape = node.in_port(0).data.get_shape()
        axis = get_canonical_axis_index(input_shape, node.axis)
        end_axis = get_canonical_axis_index(input_shape, node.end_axis)
        node.axis = axis
        node.end_axis = end_axis
        if node.do_softmax:
            dims_to_flatten = input_shape[axis: end_axis + 1]
            if is_fully_defined(dims_to_flatten):
                flat_dim = np.ma.prod(dims_to_flatten)
            else:
                flat_dim = dynamic_dimension_value
            node.out_port(0).data.set_shape([*input_shape[:axis], flat_dim, *input_shape[end_axis + 1:]])
        else:
            layout = node.graph.graph['layout']
            assert len(layout) == 4

            node.out_port(0).data.set_shape(shape_for_layout(layout,
                                                             batch=input_shape[get_batch_dim(layout, 4)],
                                                             features=(node.classes + node.coords + 1) * len(node.mask),
                                                             height=input_shape[get_height_dim(layout, 4)],
                                                             width=input_shape[get_width_dim(layout, 4)]))
コード例 #21
0
    def interp_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4
        if len(node.in_nodes()) == 2:
            src_shape = node.in_node(0).shape
            dst_shape = node.in_node(1).shape

            # in Caffe can be 2 inputs too, but shape should be got from shape of the second input
            if node.parse_2nd_input == 'shape':
                dst_shape = [
                    dst_shape[get_height_dim(layout, 4)],
                    dst_shape[get_width_dim(layout, 4)]
                ]
            else:
                # it is TF case
                dst_shape = node.in_node(1).value

            if src_shape is None or dst_shape is None or len(
                    src_shape) != 4 or len(dst_shape) != 2:
                log.error(
                    'Node {} with op {} cannot be converted to Resample layer because there is no enough info about '
                    'src/dst shapes: src_shape = {}, dst_shape = {}'.format(
                        node.name, node.op, src_shape, dst_shape))
                node.type = None  # prevent translation to a valid IE layer
                return
            in_height = src_shape[get_height_dim(layout, 4)]
            in_width = src_shape[get_width_dim(layout, 4)]
            out_height = dst_shape[0]
            out_width = dst_shape[1]

            node.factor = factor_update(
                node.factor,
                [float(out_height) / in_height,
                 float(out_width) / in_width], [in_height, in_width],
                [out_height, out_width], node.soft_get('name'))

            if node.factor is None:
                node['width'] = out_width
                node['height'] = out_height

            node.out_node().shape = shape_for_layout(
                layout,
                batch=src_shape[get_batch_dim(layout, 4)],
                features=src_shape[get_features_dim(layout, 4)],
                height=out_height,
                width=out_width)
            node.graph.remove_edge(node.in_node(1).id, node.id)
        else:
            outn = node.out_node(0)

            in_shape = node.in_node(0)
            num_ = in_shape.shape[get_batch_dim(layout, 4)]
            channels_ = in_shape.shape[get_features_dim(layout, 4)]
            height_in_ = in_shape.shape[get_height_dim(layout, 4)]
            width_in_ = in_shape.shape[get_width_dim(layout, 4)]

            height_out_ = height_in_ + node.pad_beg + node.pad_end
            width_out_ = width_in_ + node.pad_beg + node.pad_end

            if node.shrink_factor != 1 and node.zoom_factor == 1:
                shrink_factor = node.shrink_factor
                if shrink_factor < 1:
                    log.error(
                        'Shrink factor should be positive in node {}'.format(
                            node.id))
                    return None
                height_out_ = (height_out_ - 1) / shrink_factor + 1
                width_out_ = (width_out_ - 1) / shrink_factor + 1
            elif node.shrink_factor == 1 and node.zoom_factor != 1:
                zoom_factor = node.zoom_factor
                if zoom_factor < 1:
                    log.error(
                        'Zoom factor should be positive in node {}'.format(
                            node.id))
                    return None

                node['debug_message'] = 'Interp layer shape inference function may be wrong, please, try to update ' \
                                        'layer shape inference function in the file (extensions/ops/interp.op at the ' \
                                        'line {}).'.format(inspect.currentframe().f_lineno) + refer_to_faq_msg(100)
                # Reshape methods can be different in some cases
                # Commented out section represents reshape that used in deeplab-caffe
                # Uncomment the following lines, if your model was trained with deeplab-caffe
                # or have the same reshape method
                # height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1)
                # width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1)

                # Comment out the following lines if you use the reshape method from previous section
                height_out_ = height_out_ * zoom_factor
                width_out_ = width_out_ * zoom_factor
            elif node.width != 0 and node.height != 0:
                height_out_ = node.height
                width_out_ = node.width
            elif node.shrink_factor != 1 and node.zoom_factor != 1:
                shrink_factor = node.shrink_factor
                zoom_factor = node.zoom_factor
                if shrink_factor < 1:
                    log.error(
                        'Shrink factor should be positive in node {}'.format(
                            node.id))
                    return None
                if zoom_factor < 1:
                    log.error(
                        'Zoom factor should be positive in node {}'.format(
                            node.id))
                    return None
                height_out_ = (height_out_ - 1) / shrink_factor + 1
                width_out_ = (width_out_ - 1) / shrink_factor + 1
                height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor -
                                                                 1)
                width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1)

            outn.shape = shape_for_layout(layout,
                                          batch=num_,
                                          features=channels_,
                                          height=height_out_,
                                          width=width_out_)
コード例 #22
0
 def test_shape_for_layout_missing_features(self):
     with self.assertRaises(Error):
         shape_for_layout('NCHW', batch=2, height=4, width=5)
コード例 #23
0
 def test_shape_for_layout_missing_width(self):
     with self.assertRaises(Error):
         shape_for_layout('NHWC', batch=2, features=3, height=4)