コード例 #1
0
    def extract(cls, node):
        attrs = get_mxnet_layer_attrs(node.symbol_dict)
        num_classes = 21
        top_k = attrs.int("nms_topk", -1)
        keep_top_k = top_k
        variance_encoded_in_target = 0
        code_type = "caffe.PriorBoxParameter.CENTER_SIZE"
        share_location = 1
        nms_threshold = attrs.float("nms_threshold", 0.5)
        confidence_threshold = attrs.float("threshold", 0.01)
        background_label_id = 0
        clip = 0 if not attrs.bool("clip", True) else 1

        node_attrs = {
            'type': 'DetectionOutput',
            'op': __class__.op,
            'num_classes': num_classes,
            'keep_top_k': keep_top_k,
            'variance_encoded_in_target': variance_encoded_in_target,
            'code_type': code_type,
            'share_location': share_location,
            'confidence_threshold': confidence_threshold,
            'background_label_id': background_label_id,
            'nms_threshold': nms_threshold,
            'top_k': top_k,
            'decrease_label_id': 1,
            'clip_before_nms': clip,
            'normalized': 1,
        }

        DetectionOutput.update_node_stat(node, node_attrs)

        return cls.enabled
コード例 #2
0
    def generate_sub_graph(self, graph: Graph, match: SubgraphMatch):
        # IE DetectionOutput layer consumes flattened confidences and locations tensors.
        # That is why we add reshapes before them.
        locs_node = match.single_input_node(0)
        conf_node = match.single_input_node(1)
        prior_boxes_node = match.single_input_node(2)

        locs_out_nodes = locs_node[0].out_nodes()
        assert len(locs_out_nodes) == 1
        locs_out_node = locs_out_nodes[list(locs_out_nodes.keys())[0]]
        assert locs_out_node.op == "OpOutput", locs_out_node.op
        graph.remove_node(locs_out_node.id)

        conf_out_nodes = conf_node[0].out_nodes()
        assert len(conf_out_nodes) == 1
        conf_out_node = conf_out_nodes[list(conf_out_nodes.keys())[0]]
        assert conf_out_node.op == "OpOutput", conf_out_node.op
        graph.remove_node(conf_out_node.id)

        # reshape operation to flatten confidence tensor
        reshape_loc_op = Reshape(graph, {'dim': np.array([0, -1])})
        reshape_loc_node = reshape_loc_op.create_node(
            [locs_node], dict(name='DetectionOutput_Reshape_loc_'))

        # reshape operation to flatten confidence tensor
        reshape_conf_op = Reshape(graph, {'dim': np.array([0, -1])})
        reshape_conf_node = reshape_conf_op.create_node(
            [conf_node], dict(name='DetectionOutput_Reshape_conf_'))

        # remove the OpOutput node after the priors node
        assert prior_boxes_node[0].out_node().op == "OpOutput"
        graph.remove_node(prior_boxes_node[0].out_node().id)

        # reshape operation for prior boxes tensor
        reshape_priors_op = Reshape(graph, {'dim': np.array([1, 2, -1])})
        reshape_priors_node = reshape_priors_op.create_node(
            [prior_boxes_node], dict(name='DetectionOutput_Reshape_priors_'))
        # create Detection Output node with three inputs: locations, confidences and prior boxes
        detection_output_op = DetectionOutput(
            graph, match.custom_replacement_desc.custom_attributes)
        detection_output_node = detection_output_op.create_node(
            [reshape_loc_node, reshape_conf_node, reshape_priors_node],
            dict(name=detection_output_op.attrs['type'] + '_'))
        PermuteAttrs.set_permutation(reshape_priors_node,
                                     detection_output_node, None)

        # create Output node to mark DetectionOutput as a graph output operation
        output_op = Output(graph)
        output_op.create_node([detection_output_node], dict(name='sink_'))
        return {}
コード例 #3
0
 def extract(cls, node):
     attrs = {
         'variance_encoded_in_target':
         int(node.module.variance_encoded_in_target),
         'nms_threshold':
         node.module.nms_threshold,
         'confidence_threshold':
         node.module.confidence_threshold,
         'top_k':
         node.module.top_k,
         'keep_top_k':
         node.module.keep_top_k,
         'code_type':
         node.module.code_type,
     }
     DetectionOutput.update_node_stat(node, attrs)
     return cls.enabled
    def replace_sub_graph(self, graph: Graph, match: dict):
        box_nms = match['box_nms']
        top_k = box_nms.topk
        nms_threshold = box_nms.overlap_thresh

        ssd_concats = {}
        concat_names = ['ssd_concat1', 'ssd_concat0', 'ssd_concat2']

        for i, concat_match in enumerate(self.concats_pattern):
            for matches in find_pattern_matches(graph, concat_match['nodes'], concat_match['edges'], None, None):
                for match in matches:
                    if graph.has_node(match):
                        n = Node(graph, match)
                        if n.op == 'Concat':
                            ssd_concats.update({concat_names[i]: n})
                            break

        assert concat_names[0] in ssd_concats
        assert concat_names[1] in ssd_concats
        assert concat_names[2] in ssd_concats

        graph.remove_nodes_from(graph.get_nodes_with_attributes(op='Result'))
        detection_output_node = DetectionOutput(graph, dict(name=graph.unique_id() + '/DetectionOutput_',
                                                            top_k=top_k, keep_top_k=top_k, nms_threshold=nms_threshold,
                                                            background_label_id=0, clip=0, decrease_label_id=1,
                                                            code_type="caffe.PriorBoxParameter.CENTER_SIZE",
                                                            confidence_threshold=0.01, share_location=1,
                                                            variance_encoded_in_target=0, normalized=1)).create_node()

        reshape_node = create_op_node_with_second_input(graph, Reshape, int64_array([0, -1]),
                                                        dict(name=graph.unique_id() + '/DetectionOutput_'))

        ssd_softmax_node = ssd_concats['ssd_concat0'].out_node().out_node()
        ssd_softmax_node.out_port(0).disconnect()
        ssd_softmax_node.out_port(0).connect(reshape_node.in_port(0))
        reshape_node.out_port(0).connect(detection_output_node.in_port(1))

        ssd_concats['ssd_concat2'].axis = 2
        self.reshape_priorboxes(ssd_concats['ssd_concat2'])

        ssd_concats['ssd_concat1'].out_port(0).get_connection().set_destination(detection_output_node.in_port(0))
        ssd_concats['ssd_concat2'].out_port(0).get_connection().set_destination(detection_output_node.in_port(2))

        Result(graph, {'name': detection_output_node.id + '/Result'}).create_node([detection_output_node])
コード例 #5
0
    def extract(cls, node):
        attrs = get_mxnet_layer_attrs(node.symbol_dict)
        # We can not get num_classes attribute from the operation, so it must be set to None.
        # In this case num_classes attribute will be defined in the infer function in
        # mo/front/common/partial_infer/multi_box_detection.py
        num_classes = None
        top_k = attrs.int("nms_topk", -1)
        keep_top_k = top_k
        variance_encoded_in_target = 0
        code_type = "caffe.PriorBoxParameter.CENTER_SIZE"
        share_location = 1
        nms_threshold = attrs.float("nms_threshold", 0.5)
        confidence_threshold = attrs.float("threshold", 0.01)
        background_label_id = 0
        clip = 0 if not attrs.bool("clip", True) else 1

        node_attrs = {
            'type': 'DetectionOutput',
            'op': __class__.op,
            'num_classes': num_classes,
            'keep_top_k': keep_top_k,
            'variance_encoded_in_target': variance_encoded_in_target,
            'code_type': code_type,
            'share_location': share_location,
            'confidence_threshold': confidence_threshold,
            'background_label_id': background_label_id,
            'nms_threshold': nms_threshold,
            'top_k': top_k,
            'decrease_label_id': 1,
            'clip_before_nms': clip,
            'normalized': 1,
        }

        DetectionOutput.update_node_stat(node, node_attrs)

        return cls.enabled
コード例 #6
0
    def generate_sub_graph(self, graph: Graph, match: SubgraphMatch):
        reshape_classes_node = create_op_node_with_second_input(
            graph, Reshape, int64_array([0, -1]),
            dict(name='do_reshape_classes'),
            match.single_input_node(1)[0])

        priors_node = match.single_input_node(2)[0]

        placeholder = [
            Node(graph, node_id) for node_id in graph.nodes()
            if Node(graph, node_id).op == 'Parameter'
        ][0]
        im_height = placeholder.shape[1]
        im_width = placeholder.shape[2]

        # scale prior boxes to the [0, 1] interval
        priors_scale_const_node = Const(
            graph, {
                'value':
                np.array(
                    [1 / im_width, 1 / im_height, 1 / im_width, 1 / im_height])
            }).create_node([])
        priors_scale_node = Mul(graph, {
            'name': 'scale_priors'
        }).create_node([priors_node, priors_scale_const_node])

        # calculate prior boxes widths and heights
        split_node = SplitV(graph, {
            'axis': 2,
            'size_splits': [1, 1, 1, 1],
            'out_ports_count': 4
        }).create_node([priors_scale_node])

        priors_width_node = Sub(
            graph, dict(name=split_node.name + '/sub_2-0_')).create_node([
                (split_node, 2), (split_node, 0)
            ])
        priors_height_node = Sub(graph, dict(name=split_node.name +
                                             '/sub_3-1_')).create_node([
                                                 (split_node, 3),
                                                 (split_node, 1)
                                             ])

        # concat weights and heights into a single tensor and multiple with the box coordinates regression values
        concat_width_height_node = Concat(graph, {
            'name': 'concat_priors_width_height',
            'axis': -1,
            'in_ports_count': 4
        }).create_node([
            priors_width_node, priors_height_node, priors_width_node,
            priors_height_node
        ])
        applied_width_height_regressions_node = Mul(graph, {
            'name': 'final_regressions'
        }).create_node(
            [concat_width_height_node,
             match.single_input_node(0)[0]])

        # reshape to 2D tensor as Inference Engine Detection Output layer expects
        reshape_regression_node = create_op_node_with_second_input(
            graph, Reshape, int64_array([0, -1]),
            dict(name='reshape_regression'),
            applied_width_height_regressions_node)

        detection_output_op = DetectionOutput(
            graph, match.custom_replacement_desc.custom_attributes)
        detection_output_op.attrs['old_infer'] = detection_output_op.attrs[
            'infer']
        detection_output_op.attrs['infer'] = __class__.do_infer
        detection_output_node = detection_output_op.create_node(
            [reshape_regression_node, reshape_classes_node, priors_scale_node],
            dict(name=detection_output_op.attrs['type'],
                 clip=1,
                 normalized=1,
                 variance_encoded_in_target=0))

        return {'detection_output_node': detection_output_node}
コード例 #7
0
    def generate_sub_graph(self, graph: Graph, match: SubgraphMatch):
        reshape_classes_node = create_op_node_with_second_input(
            graph, Reshape, int64_array([0, -1]),
            dict(name='do_reshape_classes'),
            match.single_input_node(1)[0])

        initial_priors_node = match.single_input_node(2)[0]
        priors_name = initial_priors_node.soft_get('name',
                                                   initial_priors_node.id)
        # model calculates identical prior boxes for each batch, so we take first slice of them
        begin = Const(graph, {
            'value': np.array([0, 0, 0], dtype=np.int32)
        }).create_node()
        end = Const(graph, {
            'value': np.array([1, 0, 0], dtype=np.int32)
        }).create_node()
        stride = Const(graph, {
            'value': np.array([1, 1, 1], dtype=np.int32)
        }).create_node()

        priors_node = StridedSlice(
            graph, {
                'name': priors_name + '/0_batch_slice',
                'begin_mask': np.array([1, 1, 1], dtype=np.int32),
                'end_mask': np.array([1, 0, 0], dtype=np.int32),
                'new_axis_mask': np.array([0], dtype=np.int32),
                'shrink_axis_mask': np.array([0], dtype=np.int32),
                'ellipsis_mask': np.array([0], dtype=np.int32)
            }).create_node()

        initial_priors_node.out_port(0).connect(priors_node.in_port(0))
        begin.out_port(0).connect(priors_node.in_port(1))
        end.out_port(0).connect(priors_node.in_port(2))
        stride.out_port(0).connect(priors_node.in_port(3))

        placeholders = graph.get_op_nodes(type='Parameter')
        assert len(placeholders) == 1, "{} replacer requires model to have one Placeholder, but current model has " \
                                       "{} placeholders".format(self.replacement_id, len(placeholders))
        placeholder = placeholders[0]

        # scale prior boxes to the [0, 1] interval
        node_with_scales_for_prior_boxes = self.placeholder_scales(placeholder)
        priors_scale_node = Mul(graph, {'name': 'scale_priors'}).create_node()

        broadcast = Broadcast(graph, {
            'name': 'scales_broadcast'
        }).create_node()
        shape_of_priors = Shape(graph, {'name': 'priors_shape'}).create_node()
        priors_node.out_port(0).connect(shape_of_priors.in_port(0))
        broadcast.in_port(1).connect(shape_of_priors.out_port(0))
        broadcast.in_port(0).connect(
            node_with_scales_for_prior_boxes.out_port(0))

        priors_scale_node.in_port(0).connect(priors_node.out_port(0))
        priors_scale_node.in_port(1).connect(broadcast.out_port(0))

        try:
            variance = match.custom_replacement_desc.custom_attributes[
                'variance']
        except:
            raise Error(
                'There is no variance attribute in {} replacement config file `custom_attributes`'
                ''.format(self.replacement_id))

        priors = self.append_variances(priors_scale_node, variance)

        # calculate prior boxes widths and heights
        split_node = create_op_with_const_inputs(graph, VariadicSplit, {
            1: int64_array(2),
            2: int64_array([1, 1, 1, 1])
        }, {'out_ports_count': 4}, priors_scale_node)

        priors_width_node = Sub(
            graph, dict(name=split_node.name + '/sub_2-0_')).create_node([
                (split_node, 2), (split_node, 0)
            ])
        priors_height_node = Sub(graph, dict(name=split_node.name +
                                             '/sub_3-1_')).create_node([
                                                 (split_node, 3),
                                                 (split_node, 1)
                                             ])

        # concat weights and heights into a single tensor and multiple with the box coordinates regression values
        # WA with 3 Concats instead of 1 for keeping model reshapable
        # concat_width_height_node = Concat(graph, {'name': 'concat_priors_width_height', 'axis': -1,
        #                                           'in_ports_count': 4}).create_node(
        # [priors_width_node, priors_height_node, priors_width_node, priors_height_node])

        concat_1 = Concat(graph, {
            'name': 'concat_width_height',
            'axis': -1,
            'in_ports_count': 2
        }).create_node([priors_width_node, priors_height_node])
        concat_2 = Concat(graph, {
            'name': 'concat_width_height_width',
            'axis': -1,
            'in_ports_count': 2
        }).create_node([concat_1, priors_width_node])
        concat_width_height_node = Concat(graph, {
            'name': 'concat_priors_width_height',
            'axis': -1,
            'in_ports_count': 2
        }).create_node([concat_2, priors_height_node])

        applied_width_height_regressions_node = Mul(graph, {
            'name': 'final_regressions'
        }).create_node(
            [concat_width_height_node,
             match.single_input_node(0)[0]])

        # reshape to 2D tensor as Inference Engine Detection Output layer expects
        reshape_regression_node = create_op_node_with_second_input(
            graph, Reshape, int64_array([0, -1]),
            dict(name='reshape_regression'),
            applied_width_height_regressions_node)

        detection_output_op = DetectionOutput(
            graph, match.custom_replacement_desc.custom_attributes)
        # get nms from the original network
        iou_threshold = None
        nms_nodes = graph.get_op_nodes(op='NonMaxSuppression')
        if len(nms_nodes) > 0:
            # it is highly unlikely that for different classes NMS has different
            # moreover DetectionOutput accepts only scalar values for iou_threshold (nms_threshold)
            iou_threshold = nms_nodes[0].in_node(3).value
        if iou_threshold is None:
            raise Error(
                'During {} `iou_threshold` was not retrieved from RetinaNet graph'
                .format(self.replacement_id))

        detection_output_node = detection_output_op.create_node(
            [reshape_regression_node, reshape_classes_node, priors],
            dict(name=detection_output_op.attrs['type'],
                 nms_threshold=iou_threshold,
                 clip_after_nms=1,
                 normalized=1,
                 variance_encoded_in_target=0,
                 background_label_id=1000))

        return {'detection_output_node': detection_output_node}
コード例 #8
0
    def extract(cls, node):
        nms_threshold = onnx_attr(node, 'nms_threshold', 'f', default=0.0)
        eta = onnx_attr(node, 'eta', 'f', default=0.0)
        top_k = onnx_attr(node, 'top_k', 'i', default=-1)

        code_type_values = {
            b"CORNER": "caffe.PriorBoxParameter.CORNER",
            b"CENTER_SIZE": "caffe.PriorBoxParameter.CENTER_SIZE",
        }

        code_type = onnx_attr(node,
                              'code_type',
                              's',
                              default=code_type_values[b"CORNER"])
        try:
            code_type = code_type_values[code_type]
        except KeyError:
            raise Error(
                "Incorrect value of code_type parameter {}".format(code_type))

        resize_mode_values = {
            b"":
            "",
            b"WARP":
            "caffe.ResizeParameter.WARP",
            b"FIT_SMALL_SIZE":
            "caffe.ResizeParameter.FIT_SMALL_SIZE",
            b"FIT_LARGE_SIZE_AND_PAD":
            "caffe.ResizeParameter.FIT_LARGE_SIZE_AND_PAD",
        }
        resize_mode = onnx_attr(node, 'resize_mode', 's', default=b"")
        try:
            resize_mode = resize_mode_values[resize_mode]
        except KeyError:
            raise Error("Incorrect value of resize_mode parameter {}".format(
                resize_mode))

        pad_mode_values = {
            b"": "",
            b"CONSTANT": "caffe.ResizeParameter.CONSTANT",
            b"MIRRORED": "caffe.ResizeParameter.MIRRORED",
            b"REPEAT_NEAREST": "caffe.ResizeParameter.REPEAT_NEAREST"
        }
        pad_mode = onnx_attr(node, 'pad_mode', 's', default=b"")
        try:
            pad_mode = pad_mode_values[pad_mode]
        except KeyError:
            raise Error(
                "Incorrect value of pad_mode parameter {}".format(pad_mode))

        interp_mode_values = {
            b"": "",
            b"LINEAR": "caffe.ResizeParameter.LINEAR",
            b"AREA": "caffe.ResizeParameter.AREA",
            b"NEAREST": "caffe.ResizeParameter.NEAREST",
            b"CUBIC": "caffe.ResizeParameter.CUBIC",
            b"LANCZOS4": "caffe.ResizeParameter.LANCZOS4"
        }
        interp_mode = onnx_attr(node, 'interp_mode', 's', default=b"")
        try:
            interp_mode = interp_mode_values[interp_mode]
        except KeyError:
            raise Error("Incorrect value of interp_mode parameter {}".format(
                interp_mode))

        attrs = {
            'num_classes':
            onnx_attr(node, 'num_classes', 'i', default=0),
            'share_location':
            onnx_attr(node, 'share_location', 'i', default=0),
            'background_label_id':
            onnx_attr(node, 'background_label_id', 'i', default=0),
            'code_type':
            code_type,
            'variance_encoded_in_target':
            onnx_attr(node, 'variance_encoded_in_target', 'i', default=0),
            'keep_top_k':
            onnx_attr(node, 'keep_top_k', 'i', default=0),
            'confidence_threshold':
            onnx_attr(node, 'confidence_threshold', 'f', default=0),
            'visualize_threshold':
            onnx_attr(node, 'visualize_threshold', 'f', default=0.6),
            # nms_param
            'nms_threshold':
            nms_threshold,
            'top_k':
            top_k,
            'eta':
            eta,
            # save_output_param.resize_param
            'prob':
            onnx_attr(node, 'prob', 'f', default=0),
            'resize_mode':
            resize_mode,
            'height':
            onnx_attr(node, 'height', 'i', default=0),
            'width':
            onnx_attr(node, 'width', 'i', default=0),
            'height_scale':
            onnx_attr(node, 'height_scale', 'i', default=0),
            'width_scale':
            onnx_attr(node, 'width_scale', 'i', default=0),
            'pad_mode':
            pad_mode,
            'pad_value':
            onnx_attr(node, 'pad_value', 's', default=""),
            'interp_mode':
            interp_mode,
            'input_width':
            onnx_attr(node, 'input_width', 'i', default=1),
            'input_height':
            onnx_attr(node, 'input_height', 'i', default=1),
            'normalized':
            onnx_attr(node, 'normalized', 'i', default=1),
        }

        # update the attributes of the node
        DetectionOutput.update_node_stat(node, attrs)
        return cls.enabled
コード例 #9
0
    def transform_graph(self, graph: Graph, replacement_descriptions: dict):
        parameter_node = graph.get_op_nodes(op='Parameter')[0]
        parameter_node['data_type'] = data_type_str_to_np(
            parameter_node.graph.graph['cmd_params'].data_type)
        parameter_node.out_port(0).disconnect()

        # remove existing Result operations to remove unsupported sub-graph
        graph.remove_nodes_from(
            [node.id
             for node in graph.get_op_nodes(op='Result')] + ['detections'])

        # determine if the op which is a input/final result of mean value and scale applying to the input tensor
        # then connect it to the input of the first convolution of the model, so we remove the image pre-processing
        # which includes padding and resizing from the model
        preprocessing_input_node_id = replacement_descriptions[
            'preprocessing_input_node']
        assert preprocessing_input_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \
                                                           'should be a last node before image normalization and is specified' \
                                                           ' in the json file.'.format(preprocessing_input_node_id)
        preprocessing_input_node = Node(graph, preprocessing_input_node_id)
        consumer_node = preprocessing_input_node.out_port(
            0).get_connection().get_destination().node
        consumer_node.in_port(0).get_connection().set_source(
            parameter_node.out_port(0))

        preprocessing_output_node_id = replacement_descriptions[
            'preprocessing_output_node']
        assert preprocessing_output_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \
                                                            'node should provide scaled image output and is specified' \
                                                            ' in the json file.'.format(preprocessing_output_node_id)
        preprocessing_output_node = Node(graph, preprocessing_output_node_id)
        preprocessing_output_node.out_port(0).disconnect()

        convolution_nodes = [
            n for n in graph.pseudo_topological_sort()
            if n.soft_get('type') == 'Convolution'
        ]
        convolution_nodes[0].in_port(0).get_connection().set_source(
            preprocessing_output_node.out_port(0))

        # create prior boxes (anchors) generator
        aspect_ratios = replacement_descriptions['aspect_ratios']
        assert len(aspect_ratios) % 2 == 0
        aspect_ratios = list(zip(aspect_ratios[::2], aspect_ratios[1::2]))
        priors_generator = self.AnchorGenerator(
            min_level=int(replacement_descriptions['min_level']),
            aspect_ratios=aspect_ratios,
            num_scales=int(replacement_descriptions['num_scales']),
            anchor_scale=replacement_descriptions['anchor_scale'])

        prior_boxes = []
        for i in range(100):
            inp_name = 'box_net/box-predict{}/BiasAdd'.format('_%d' %
                                                              i if i else '')
            if inp_name not in graph:
                break
            widths, heights = priors_generator.get(i)
            prior_box_op = PriorBoxClusteredOp(
                graph, {
                    'width': np.array(widths),
                    'height': np.array(heights),
                    'clip': 0,
                    'flip': 0,
                    'variance': replacement_descriptions['variance'],
                    'offset': 0.5
                })
            prior_boxes.append(
                prior_box_op.create_node(
                    [Node(graph, inp_name), parameter_node]))

        # concatenate prior box operations
        concat_prior_boxes = Concat(graph, {'axis': -1}).create_node()
        for idx, node in enumerate(prior_boxes):
            concat_prior_boxes.add_input_port(idx)
            concat_prior_boxes.in_port(idx).connect(node.out_port(0))

        conf = Sigmoid(graph, dict(name='concat/sigmoid')).create_node(
            [Node(graph, 'concat')])
        reshape_size_node = Const(graph, {
            'value': int64_array([0, -1])
        }).create_node([])
        logits = Reshape(graph, dict(name=conf.name + '/Flatten')).create_node(
            [conf, reshape_size_node])
        deltas = Reshape(graph, dict(name='concat_1/Flatten')).create_node(
            [Node(graph, 'concat_1'), reshape_size_node])

        # revert convolution boxes prediction weights from yxYX to xyXY (convolutions share weights and bias)
        weights = Node(graph, 'box_net/box-predict/pointwise_kernel')
        weights.value = weights.value.reshape(-1, 4)[:, [1, 0, 3, 2]].reshape(
            weights.shape)
        bias = Node(graph, 'box_net/box-predict/bias')
        bias.value = bias.value.reshape(-1,
                                        4)[:, [1, 0, 3, 2]].reshape(bias.shape)

        detection_output_node = DetectionOutput(
            graph,
            dict(
                name='detections',
                num_classes=int(replacement_descriptions['num_classes']),
                share_location=1,
                background_label_id=int(
                    replacement_descriptions['num_classes']) + 1,
                nms_threshold=replacement_descriptions['nms_threshold'],
                confidence_threshold=replacement_descriptions[
                    'confidence_threshold'],
                top_k=100,
                keep_top_k=100,
                code_type='caffe.PriorBoxParameter.CENTER_SIZE',
            )).create_node([deltas, logits, concat_prior_boxes])

        output_op = Result(graph, dict(name='output'))
        output_op.create_node([detection_output_node])
コード例 #10
0
    def extract(cls, node):
        pl = node.pb
        assert pl, 'Protobuf layer can not be empty'

        param = pl.detection_output_param

        # TODO rewrite params as complex structures
        if hasattr(param, 'nms_param'):
            nms_threshold = param.nms_param.nms_threshold
            eta = param.nms_param.eta
            if param.nms_param.top_k == 0:
                top_k = -1
            else:
                top_k = param.nms_param.top_k

        code_type_values = [
            "", "caffe.PriorBoxParameter.CORNER",
            "caffe.PriorBoxParameter.CENTER_SIZE",
            "caffe.PriorBoxParameter.CORNER_SIZE"
        ]

        code_type = code_type_values[1]
        if hasattr(param, 'code_type'):
            if param.code_type < 1 or param.code_type > 3:
                log.error("Incorrect value of code_type parameter")
                return
            code_type = code_type_values[param.code_type]

        visualize_threshold = param.visualize_threshold if param.visualize_threshold else 0.6

        resize_mode_values = [
            "", "caffe.ResizeParameter.WARP",
            "caffe.ResizeParameter.FIT_SMALL_SIZE",
            "caffe.ResizeParameter.FIT_LARGE_SIZE_AND_PAD"
        ]

        if param.save_output_param.resize_param.resize_mode < 1 or param.save_output_param.resize_param.resize_mode > 3:
            log.error("Incorrect value of resize_mode parameter")
            return
        resize_mode = resize_mode_values[
            param.save_output_param.resize_param.resize_mode]

        pad_mode_values = [
            "", "caffe.ResizeParameter.CONSTANT",
            "caffe.ResizeParameter.MIRRORED",
            "caffe.ResizeParameter.REPEAT_NEAREST"
        ]

        if param.save_output_param.resize_param.pad_mode < 1 or param.save_output_param.resize_param.pad_mode > 3:
            log.error("Incorrect value of pad_mode parameter")
        else:
            pad_mode = pad_mode_values[
                param.save_output_param.resize_param.pad_mode]

        interp_mode_values = [
            "", "caffe.ResizeParameter.LINEAR", "caffe.ResizeParameter.AREA",
            "caffe.ResizeParameter.NEAREST", "caffe.ResizeParameter.CUBIC",
            "caffe.ResizeParameter.LANCZOS4"
        ]
        interp_mode = ""
        for x in param.save_output_param.resize_param.interp_mode:
            if x < 1 or x > 5:
                log.error("Incorrect value of interp_mode parameter")
                return
            interp_mode += interp_mode_values[x]

        attrs = {
            'num_classes':
            param.num_classes,
            'share_location':
            int(param.share_location),
            'background_label_id':
            param.background_label_id,
            'code_type':
            code_type,
            'variance_encoded_in_target':
            int(param.variance_encoded_in_target),
            'keep_top_k':
            param.keep_top_k,
            'confidence_threshold':
            param.confidence_threshold,
            'visualize':
            param.visualize,
            'visualize_threshold':
            visualize_threshold,
            'save_file':
            param.save_file,
            # nms_param
            'nms_threshold':
            nms_threshold,
            'top_k':
            top_k,
            'eta':
            eta,
            # save_output_param
            'output_directory':
            param.save_output_param.output_directory,
            'output_name_prefix':
            param.save_output_param.output_name_prefix,
            'output_format':
            param.save_output_param.output_format,
            'label_map_file':
            param.save_output_param.label_map_file,
            'name_size_file':
            param.save_output_param.name_size_file,
            'num_test_image':
            param.save_output_param.num_test_image,
            # save_output_param.resize_param
            'prob':
            param.save_output_param.resize_param.prob,
            'resize_mode':
            resize_mode,
            'height':
            param.save_output_param.resize_param.height,
            'width':
            param.save_output_param.resize_param.width,
            'height_scale':
            param.save_output_param.resize_param.height_scale,
            'width_scale':
            param.save_output_param.resize_param.width_scale,
            'pad_mode':
            pad_mode,
            'pad_value':
            ','.join(
                str(x)
                for x in param.save_output_param.resize_param.pad_value),
            'interp_mode':
            interp_mode,
        }

        # these params can be omitted in caffe.proto and in param as consequence,
        # so check if it is set or set to default
        fields = [field[0].name for field in param.ListFields()]
        if 'input_width' in fields:
            attrs['input_width'] = param.input_width
        if 'input_height' in fields:
            attrs['input_height'] = param.input_height
        if 'normalized' in fields:
            attrs['normalized'] = int(param.normalized)
        if 'objectness_score' in fields:
            attrs['objectness_score'] = param.objectness_score

        mapping_rule = merge_attrs(param, attrs)

        # force setting infer function because it doesn't exist in proto so merge_attrs will not set it
        mapping_rule.update({'infer': multi_box_detection_infer})

        # update the attributes of the node
        DetectionOutput.update_node_stat(node, mapping_rule)
        return cls.enabled