def __init__(self, nodes):
     super(GraphSequence, self).__init__()
     self._output_nodes = []
     for node in nodes:
         if node.identifier in self:
             raise ConverterError('Node with id already defined {}'.format(
                 node.identifier))
         self[node.identifier] = node
Beispiel #2
0
 def get_split_positions(cls, input_shape, split_count, split_sizes, split_axis):
     split_points = []
     if len(split_sizes) > 0:
         if sum(split_sizes) != input_shape[split_axis]:
             raise ConverterError(code_to_message.get_error_message('ERROR_TF_SLICE_SIZE_MISMATCH'))
         split_index = split_sizes[0]
         for size in split_sizes[1:]:
             split_points.append(int(split_index))
             split_index += size
     else:
         split_axis_dim = input_shape[split_axis]
         split_size = split_axis_dim // split_count
         if split_axis_dim % split_count:
             raise ConverterError(code_to_message.get_error_message('ERROR_TF_SLICE_UNEVEN_SPLIT'))
         for index in range(1, split_count):
             split_points.append(int(index * split_size))
     return split_points
Beispiel #3
0
    def build_layer(self, converter_context, descriptor, input_descriptors, output_descriptors):
        """
        :type input_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type output_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type converter_context: converters.tensorflow.converter.ConverterContext
        :type descriptor: StridedSliceLayerResolver.Descriptor
        :rtype: int
        """
        input_name = self.get_input_name(converter_context, descriptor, input_descriptors)
        output_name = descriptor.output_names[0]

        if descriptor.ellipsis_mask != 0 or descriptor.new_axis_mask != 0:
            raise ConverterError(code_to_message.get_error_message('ERROR_TF_STRIDED_SLICE_UNSUPPORTED_MASKS'))

        input_rank = len(descriptor.input_shape)
        strides_rank = descriptor.strides.shape[0]

        # Extend to match input rank
        begin = np.append(descriptor.begin, np.zeros(input_rank - strides_rank, dtype=np.int32)).tolist()
        strides = np.append(descriptor.strides, np.ones(input_rank - strides_rank, dtype=np.int32)).tolist()
        end = np.append(descriptor.end, descriptor.input_shape[strides_rank:]).astype(np.int32).tolist()

        # Apply the binary masks
        for i in range(len(strides)):
            begin_mask_bit = self.get_bit(descriptor.begin_mask, i)
            end_mask_bit = self.get_bit(descriptor.end_mask, i)
            shrink_mask_bit = self.get_bit(descriptor.shrink_axis_mask, i)

            # Convert negative indices
            if begin[i] < 0:
                begin[i] += descriptor.input_shape[i]
            if end[i] < 0:
                end[i] += descriptor.input_shape[i]

            # Apply mask bits
            if strides[i] > 0:
                if begin_mask_bit:
                    begin[i] = 0
                if end_mask_bit:
                    end[i] = descriptor.input_shape[i]
            else:
                if begin_mask_bit:
                    begin[i] = descriptor.input_shape[i] - 1
                if end_mask_bit:
                    end[i] = -1

            # Apply shrink_axis_mask
            if shrink_mask_bit:
                strides[i] = 1
                end[i] = begin[i] + strides[i]

        return converter_context.model.add_strided_slice_layer(name=descriptor.layer_name,
                                                               input_name=input_name,
                                                               output_name=output_name,
                                                               begin=begin,
                                                               end=end,
                                                               strides=strides,
                                                               shrink_axis_mask=descriptor.shrink_axis_mask)
 def get_biases(self, graph_helper, conv_op, bias_op):
     _, biases_tensor = GraphHelper.get_op_input_tensors(
         bias_op, ('?', '?'))
     if biases_tensor.op.type not in ['Identity', 'Const'] and \
             not graph_helper.check_tensor_const_origin(biases_tensor):
         raise ConverterError(
             code_to_message.get_error_message('ERROR_TF_CONV_RESOLVE_BIAS')
             (conv_op.name))
     biases = graph_helper.evaluate_tensor_output(biases_tensor)
     return biases
 def get_weights(self, graph_helper, conv_op):
     _, weights_tensor = GraphHelper.get_op_input_tensors(
         conv_op, ('?', '?'))
     if weights_tensor.op.type not in [
             'Identity', 'Const', 'Split', 'FakeQuantWithMinMaxVars'
     ]:
         raise ConverterError(
             code_to_message.get_error_message(
                 'ERROR_TF_CONV_RESOLVE_WEIGHTS')(conv_op.name))
     weights = graph_helper.evaluate_tensor_output(weights_tensor)
     return weights
    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.graph_sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            conv_op = match['conv_op']
            strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
            padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
            weights = self.get_weights(graph_helper, conv_op)
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.graph_sequence.output_nodes
            ]
            try:
                batch_to_space_op = match['batch_to_space']
                conv_output_ops = graph_helper.get_op_outputs(
                    batch_to_space_op)
                bias_op = GraphHelper.filter_single_op_by_type(
                    conv_output_ops, 'BiasAdd')
                biases = self.get_biases(graph_helper, conv_op, bias_op)
                consumed_nodes.append(bias_op)
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
            except OperationNotFoundError:
                bias_op = None
                biases = np.zeros(weights.shape[-1], dtype=np.float32)
            dilation_sizes = match['dilation_sizes']
            dilation_sizes = graph_helper.evaluate_tensor_output(
                dilation_sizes.outputs[0])
            if np.shape(dilation_sizes) != (2, ):
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_CONV_RESOLVE_DILATION')(conv_op.name))

            d = ConvolutionLayerResolver.Descriptor(
                str(conv_op.name),
                consumed_nodes,
                conv_op,
                bias_op,
                strides,
                padding,
                weights,
                biases,
                output_names=output_op_nodes_names)
            d.dilationY = int(dilation_sizes[0])
            d.dilationX = int(dilation_sizes[1])
            d.input_ops = [match['space_to_batch']]
            descriptors.append(d)
        return descriptors
Beispiel #7
0
    def build_layer(self, converter_context, descriptor, input_descriptors, output_descriptors):
        """
        :type input_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type output_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type converter_context: converters.tensorflow.converter.ConverterContext
        :type descriptor: ConcatLayerResolver.Descriptor
        :rtype: int
        """
        if len(input_descriptors) < 2:
            raise ConverterError(code_to_message.get_error_message('ERROR_TF_CONCAT_INPUT'))

        input_names = self.get_input_names(converter_context, descriptor, input_descriptors)
        return converter_context.model.add_concatenation_layer(descriptor.layer_name,
                                                               input_names,
                                                               descriptor.output_names[0],
                                                               descriptor.axis)
Beispiel #8
0
    def build_layer(self, converter_context, descriptor, input_descriptors,
                    output_descriptors):
        """
        :type input_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type output_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type converter_context: converters.tensorflow.converter.ConverterContext
        :type descriptor: DeConvolutionLayerResolver.Descriptor
        :rtype: int
        """
        input_dims = converter_context.graph_helper.get_op_output_shape(
            descriptor.input_tensor.op)
        if descriptor.bias_op:
            output_dims = converter_context.graph_helper.get_op_output_shape(
                descriptor.bias_op)
        else:
            output_dims = converter_context.graph_helper.get_op_output_shape(
                descriptor.deconv_op)

        pad_y, pad_x, padding_strategy = ConvolutionLayerBuilder.calculate_padding_size(
            input_size=output_dims[-3:-1],
            output_size=input_dims[-3:-1],
            strides=descriptor.strides[1:3],
            padding=descriptor.padding,
            filter_dims=descriptor.weights.shape,
            dilation=[1, 1])
        if pad_y != pad_x:
            raise ConverterError(
                code_to_message.get_error_message(
                    'ERROR_TF_DECONV_NO_SUPPORT_RECT_PADDING'))

        weights = np.transpose(descriptor.weights, (0, 1, 3, 2)).copy()

        input_names = self.get_input_name(converter_context, descriptor,
                                          input_descriptors)
        return converter_context.model.add_deconvolution_layer(
            name=descriptor.layer_name,
            weights=weights,
            bias=descriptor.biases,
            stride=descriptor.strides[1],
            padding_size_strategy=padding_strategy,
            padding=pad_y,
            input_name=input_names,
            output_name=descriptor.output_names[0],
            output_width=output_dims[-2],
            output_height=output_dims[-3],
            groups=1)
Beispiel #9
0
    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []

        for match in graph_matcher.match_sequence(self.sequence):
            strided_slice_op = match['root']
            input_op = match['input']

            if input_op.type == "Const":
                continue

            begin_op = match['begin']
            end_op = match['end']
            strides_op = match['strides']

            begin_tensor = graph_helper.evaluate_tensor_output(begin_op.outputs[0])
            end_tensor = graph_helper.evaluate_tensor_output(end_op.outputs[0])
            strides_tensor = graph_helper.evaluate_tensor_output(strides_op.outputs[0])
            input_tensor = graph_helper.evaluate_tensor_output(input_op.outputs[0])

            begin_shape = graph_helper.get_op_output_shape(begin_op)
            end_shape = graph_helper.get_op_output_shape(end_op)
            strides_shape = graph_helper.get_op_output_shape(strides_op)
            input_shape = graph_helper.get_op_output_shape(input_op)

            if begin_shape != end_shape or begin_shape != strides_shape:
                raise ConverterError(code_to_message.get_error_message('ERROR_TF_STRIDED_SLICE_SHAPE_MISMATCH'))

            begin_mask = strided_slice_op.get_attr("begin_mask")
            end_mask = strided_slice_op.get_attr("end_mask")
            ellipsis_mask = strided_slice_op.get_attr("ellipsis_mask")
            new_axis_mask = strided_slice_op.get_attr("new_axis_mask")
            shrink_axis_mask = strided_slice_op.get_attr("shrink_axis_mask")

            consumed_nodes = match.consumed_nodes
            pad_descriptor = StridedSliceLayerResolver.Descriptor(
                str(strided_slice_op.name), consumed_nodes, input_shape,
                begin_tensor, end_tensor, strides_tensor, begin_mask, end_mask, ellipsis_mask,
                new_axis_mask, shrink_axis_mask, output_names=[str(strided_slice_op.outputs[0].name)])
            descriptors.extend([pad_descriptor])

        return descriptors
Beispiel #10
0
    def build_layer(self, converter_context, descriptor, input_descriptors,
                    output_descriptors):
        """
        :type input_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type output_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type converter_context: converters.tensorflow.converter.ConverterContext
        :type descriptor: ConcatLayerResolver.Descriptor
        :rtype: int
        """
        input_names = self.get_input_names(converter_context, descriptor,
                                           input_descriptors)
        if len(input_names) < 2:
            raise ConverterError(
                code_to_message.get_error_message(
                    'ERROR_TF_ADD_N_NUM_OF_INPUTS')(descriptor.layer_name))
        output_name = descriptor.output_names[0]
        current_input_names = [input_names[0], input_names[1]]
        current_output_name = descriptor.layer_name + '_unroll_1'
        converter_context.model.add_elementwise_sum_layer(
            descriptor.layer_name + '_unroll_1',
            [1.0 for _ in current_input_names], current_input_names,
            current_output_name)

        for input_index in range(2, len(input_names) - 1):
            current_input_names = [
                current_output_name, input_names[input_index]
            ]
            current_output_name = descriptor.layer_name + '_unroll_' + str(
                input_index)
            converter_context.model.add_elementwise_sum_layer(
                descriptor.layer_name + '_unroll_' + str(input_index),
                [1.0 for _ in current_input_names], current_input_names,
                current_output_name)
        current_input_names = [current_output_name, input_names[-1]]
        return converter_context.model.add_elementwise_sum_layer(
            descriptor.layer_name, [1.0 for _ in current_input_names],
            current_input_names, output_name)
    def build_layer(self, converter_context, descriptor, input_descriptors,
                    output_descriptors):
        """
        :type input_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type output_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type converter_context: converters.tensorflow.converter.ConverterContext
        :type descriptor: NonMaxSuppressionLayerResolver.Descriptor
        :rtype: int
        """

        names = {}
        for input_descriptor in input_descriptors:
            if input_descriptor.is_output_op(descriptor.input_boxes_op):
                names[descriptor.
                      input_boxes_op] = input_descriptor.output_names[0]
            elif input_descriptor.is_output_op(descriptor.input_scores_op):
                names[descriptor.
                      input_scores_op] = input_descriptor.output_names[0]

        if len(names) != 2:
            raise ConverterError("Failed to detect inputs for nms op.")

        input_names = [
            names[descriptor.input_boxes_op], names[descriptor.input_scores_op]
        ]
        input_names.extend(
            list(
                set(
                    self.get_input_names(converter_context, descriptor,
                                         input_descriptors)) -
                set(input_names)))

        # input/output ops list
        input_output_ops_pairs = [
            (descriptor.input_boxes_op, descriptor.output_boxes_op),
            (descriptor.input_scores_op, descriptor.output_scores_op)
        ]

        # add reshape input layers as needed to input layers to work with snpe multiclassnms layer
        self._build_input_layers(converter_context, descriptor, names)

        input_names[0] = names[descriptor.input_boxes_op]
        input_names[1] = names[descriptor.input_scores_op]
        output_names = descriptor.output_names[:]

        # adding suffix for boxes and scores since we need to do post reshape(below) to get back to TF shape
        for input_op, output_op in input_output_ops_pairs:
            for i in range(0, len(output_names)):
                if output_names[i] == output_op.outputs[0].name:
                    output_names[i] = output_names[i] + "_intermediate"

        converter_context.model.add_multi_class_nms_layer(
            name=descriptor.layer_name,
            input_names=input_names,
            output_names=output_names,
            scoreThreshold=descriptor.score_threshold,
            iouThreshold=descriptor.iou_threshold,
            maxDetectionPerClass=descriptor.max_output_size,
            maxTotalDetections=descriptor.max_output_size)

        # Post-processing, revert back reshaped layers to the expected output shape from Tensorflow
        for input_op, output_op in input_output_ops_pairs:
            for i in range(0, len(output_names)):
                if output_op.outputs[0].name in output_names[i]:
                    output_name = output_op.outputs[0].name
                    shape = converter_context.graph_helper.get_op_output_shape(
                        output_op)
                    converter_context.model.add_reshape_layer(
                        output_name + '_post_reshape_to_' + str(len(shape)) +
                        'd', shape, output_names[i], output_name)
    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for sequence in self.sequences:
            for match in graph_matcher.match_sequence(sequence):

                # resolve layer for nms operation
                nms_op = match['nms']
                boxes_op = match['boxes']
                scores_ops = [
                    match[k] for k in match.keys() if k.startswith("score")
                ]

                input_boxes_op = match[
                    'boxes_input'] if 'boxes_input' in match else boxes_op
                input_scores_op = match[
                    'scores_input'] if 'scores_input' in match else match[
                        'scores']

                max_output_size = graph_helper.evaluate_tensor_output(
                    match['max_output_size'].outputs[0])
                iou_threshold = graph_helper.evaluate_tensor_output(
                    match['iou_threshold'].outputs[0])
                score_threshold = graph_helper.evaluate_tensor_output(
                    match['score_threshold']
                ) if 'score_threshold' in match else 0

                consumed_nodes = match.consumed_nodes

                nms_descriptor = NonMaxSuppressionLayerResolver.Descriptor(
                    str(nms_op.name),
                    consumed_nodes,
                    max_output_size,
                    iou_threshold,
                    score_threshold,
                    nms_op,
                    boxes_op,
                    scores_ops,
                    input_boxes_op,
                    input_scores_op,
                    output_names=[str(nms_op.outputs[0].name)])

                descriptors.extend([nms_descriptor])

                # TODO: following added for VIVO support of nms + gather in 1.23.0 to support features as inputs
                #       remove for 1.24.0 release
                # resolve layer for gather operation
                self._resolve_for_gather_layer(graph_matcher, graph_helper,
                                               nms_descriptor)

                if input_boxes_op.type == 'Const':
                    boxes_tensor = graph_helper.evaluate_tensor_output(
                        input_boxes_op.outputs[0])
                    boxes_shape = graph_helper.get_op_output_shape(
                        input_boxes_op)
                    if len(boxes_shape) == 2:
                        boxes_shape.insert(0, 1)
                    else:
                        raise ConverterError(
                            code_to_message.get_error_message(
                                'ERROR_TF_NMS_BOXES_SHAPE'), len(boxes_shape))
                    const_descriptor = ConstantLayerResolver.Descriptor(
                        str(input_boxes_op.name), [input_boxes_op],
                        boxes_tensor, boxes_shape, nms_descriptor)
                    descriptors.append(const_descriptor)

        return descriptors
Beispiel #13
0
    def load(self, graph_pb_or_meta_path, input_nodes_names,
             input_nodes_shapes, input_nodes_types, out_node_names, session):
        """
        Loads the Tensorflow Graph into the specified Session's Graph and builds a Model instance
        with all the relevant information for a ModelConverter to use during conversion.
        :type graph_pb_or_meta_path: str
        :type input_nodes_names: list[str]
        :type input_nodes_shapes: list[str]
        :type input_nodes_types: list[str]
        :type out_node_names: list[str]
        :type session: tensorflow.Session
        :rtype: Model
        """
        if len(input_nodes_names) != len(input_nodes_shapes):
            raise ConverterError(
                code_to_message.get_error_message(
                    'ERROR_TF_INPUT_NODE_SHAPE_DIMS_MISMATCH'))
        if input_nodes_types is not None:
            if len(input_nodes_names) != len(input_nodes_types):
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_INPUT_TYPES_AND_NAMES_NOT_IN_PAIRS'))
        else:
            # Set all types to default
            input_nodes_types = [Model.Input.INPUT_TYPE_DEFAULT
                                 ] * len(input_nodes_names)

        graph_def = self.__import_graph(graph_pb_or_meta_path, session,
                                        out_node_names)
        with session.graph.as_default():
            inputs = []
            for name, shape, input_type in zip(input_nodes_names,
                                               input_nodes_shapes,
                                               input_nodes_types):
                self.__assert_node_in_graph(graph_def, name)
                input_tensor = session.graph.get_tensor_by_name(
                    GraphHelper.indexed_tensor_name(name))

                batched_shape = []
                try:
                    tensor_shape = input_tensor.get_shape().as_list()
                    input_shape = list(map(int, shape.split(',')))
                    if len(input_shape) != len(tensor_shape):
                        raise ConverterError(
                            code_to_message.get_error_message(
                                'ERROR_TF_INPUT_NODE_SHAPE_DIMS_MISMATCH'))
                    batched_shape = [1] * len(tensor_shape)
                    batched_shape[-len(input_shape):] = input_shape
                except ValueError:
                    pass

                if len(batched_shape) == 0:
                    try:
                        batched_shape = list(map(int, shape.split(',')))
                    except ValueError:
                        raise ConverterError(
                            code_to_message.get_error_message(
                                'ERROR_TF_INVALID_INPUT_DIMS')(shape))

                inputs.append(Model.Input(name, batched_shape, input_type))

            visitable_graph = VisitableGraph(
                self.__get_graph_operations(graph_def, session.graph))
            visitable_graph.accept(GraphPrinter())

            return Model(graph_def, session, inputs, out_node_names)
Beispiel #14
0
 def __assert_node_in_graph(cls, graph_def, node_name):
     if node_name not in [node.name for node in graph_def.node]:
         raise ConverterError(
             code_to_message.get_error_message(
                 'ERROR_TF_NODE_NOT_FOUND_IN_GRAPH')(node_name))
Beispiel #15
0
 def _broadcast_tensor(self, tensor, shape):
     raise ConverterError(
         'ElementWise resolver must implement broadcast method.')
Beispiel #16
0
    def build_layer(self, converter_context, descriptor, input_descriptors, output_descriptors):
        """
        :type input_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type output_descriptors: [converters.tensorflow.common.LayerDescriptor]
        :type converter_context: converters.tensorflow.converter.ConverterContext
        :type descriptor: LstmLayerResolver.UnrolledTimeStepDescriptor
        :rtype: int
        """
        if isinstance(descriptor, LstmLayerResolver.StateDescriptor):
            return

        input_descriptors = [d for d in input_descriptors if not isinstance(d, LstmLayerResolver.StateDescriptor)]
        if len(input_descriptors) not in [1, 3]:
            raise ConverterError('LSTM layer requires 1 or 3 inputs')

        input_shape = converter_context.graph_helper.get_op_output_shape(descriptor.cell_input_concat_op.inputs[0].op)
        state_shape = converter_context.graph_helper.get_op_output_shape(descriptor.cell_input_concat_op.inputs[1].op)

        gates_weights, input_weights = self._resolve_weights(descriptor, converter_context.graph_helper, state_shape)
        gates_biases = self._resolve_biases(descriptor, converter_context.graph_helper)

        def is_cell_input_descriptor(d):
            output_shape = []
            output_ops = [op for op in d.child_ops if d.is_output_op(op)]
            if len(output_ops) > 0:
                output_shape = converter_context.graph_helper.get_op_output_shape(output_ops[0])
            return len(output_shape) == 2 and output_shape[1] == descriptor.time_steps()

        cell_input_descriptors = list(filter(is_cell_input_descriptor, input_descriptors))
        cell_state_descriptors = [d for d in input_descriptors if d not in cell_input_descriptors]

        user_initial_state = len(list(cell_state_descriptors)) == 2

        is_stacked_above_cell = self.is_stacked_cell(input_descriptors)
        if not is_stacked_above_cell:
            if len(list(cell_input_descriptors)) != 1:
                raise ConverterError('Unable to resolve LSTM input layer name.')

            cell_input_name = cell_input_descriptors[0].output_names[0]
            input_layer_name = self._add_reshape_to_restore_time_dimension(
                converter_context, descriptor, cell_input_name, input_shape)
        else:
            input_layer_name = input_descriptors[0].output_names[0]

        is_stacked_below_cell = self.is_stacked_cell(output_descriptors)
        descriptor.set_is_stacked_cell(is_stacked_below_cell)

        output_names = [descriptor.stacked_cell_output_name]
        if user_initial_state or (not is_stacked_below_cell and len(output_descriptors) > 0):
            output_names.append('{}_state'.format(descriptor.output_names[0]))
            output_names.append(descriptor.output_names[0])

        h_0_input_name = cell_state_descriptors[0].output_names[0] if user_initial_state else ''
        c_0_input_name = cell_state_descriptors[1].output_names[0] if user_initial_state else ''
        return converter_context.model.add_lstm_layer(name=descriptor.output_names[0],
                                                      w_xc=input_weights,
                                                      b_c=gates_biases,
                                                      w_hc=gates_weights,
                                                      w_xc_static=None,
                                                      backward=False,
                                                      reset_state_at_time_step_0=not user_initial_state,
                                                      input_name=input_layer_name,
                                                      sequence_continuation_input_name='',
                                                      x_static_input_name='',
                                                      c_0_input_name=c_0_input_name,
                                                      h_0_input_name=h_0_input_name,
                                                      output_names=output_names
                                                      )
    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.graph_sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            conv_op = match['conv_op']
            strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
            padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
            weights = self.get_weights(graph_helper, conv_op)
            weights = np.transpose(weights, [0, 1, 3, 2])
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.graph_sequence.output_nodes
            ]
            try:
                batch_to_space_op = match['batch_to_space']
                conv_output_ops = graph_helper.get_op_outputs(
                    batch_to_space_op)
                bias_op = GraphHelper.filter_single_op_by_type(
                    conv_output_ops, 'BiasAdd')
                biases = self.get_biases(graph_helper, conv_op, bias_op)
                consumed_nodes.append(bias_op)
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
            except OperationNotFoundError:
                bias_op = None
                biases = np.zeros(np.shape(weights)[-1], dtype=np.float32)
            dilation_sizes = match['dilation_sizes']
            dilation_sizes = graph_helper.evaluate_tensor_output(
                dilation_sizes.outputs[0])
            if np.shape(dilation_sizes) != (2, ):
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_CONV_RESOLVE_DILATION')(conv_op.name))

            space_to_batch_op = match['space_to_batch']
            paddings_op = match['paddings']
            paddings_tensor = graph_helper.evaluate_tensor_output(
                paddings_op.outputs[0])
            input_op = conv_op

            batch_to_space_op = match['batch_to_space']
            crop_op = match['crops']
            crops_tensor = graph_helper.evaluate_tensor_output(
                crop_op.outputs[0])
            output_names = [str(conv_op.outputs[0].name)]

            if paddings_tensor.any() and not np.array_equal(
                    paddings_tensor, crops_tensor):
                paddings_tensor = np.pad(paddings_tensor, ((1, 1), (0, 0)),
                                         'constant')
                pad_descriptor = PadLayerResolver.Descriptor(
                    str(space_to_batch_op.name), [
                        match['space_to_batch'], match['dilation_sizes'],
                        match['paddings']
                    ],
                    paddings_tensor,
                    modeltools.PADDING_CONSTANT,
                    0.0,
                    output_names=[str(space_to_batch_op.outputs[0].name)])
                descriptors.append(pad_descriptor)
            else:
                consumed_nodes.extend(
                    [space_to_batch_op, paddings_op, match['dilation_sizes']])
                input_op = space_to_batch_op

            if crops_tensor.any() and not np.array_equal(
                    paddings_tensor, crops_tensor):
                crops_tensor = np.pad(crops_tensor, ((1, 1), (0, 0)),
                                      'constant')
                offsets = crops_tensor[:, 0]
                size = np.array(graph_helper.get_op_output_shape(
                    match['batch_to_space']),
                                dtype=np.int32)
                crop_descriptor = CropLayerResolver.Descriptor(
                    str(match['batch_to_space'].name), [
                        match['batch_to_space'], match['block_shape_out'],
                        match['crops']
                    ],
                    offsets,
                    size,
                    output_names=[
                        str(match['batch_to_space'].outputs[0].name)
                    ])
                descriptors.append(crop_descriptor)
            else:
                consumed_nodes.extend(
                    [batch_to_space_op, crop_op, match['block_shape_out']])
                output_names = output_op_nodes_names

            d = ConvolutionLayerResolver.Descriptor(str(conv_op.name),
                                                    consumed_nodes,
                                                    conv_op,
                                                    bias_op,
                                                    strides,
                                                    padding,
                                                    weights,
                                                    biases,
                                                    output_names=output_names)

            d.groups = graph_helper.get_op_output_shape(space_to_batch_op)[-1]
            d.dilationY = int(dilation_sizes[0])
            d.dilationX = int(dilation_sizes[1])
            d.input_ops = [input_op]
            descriptors.append(d)

        return descriptors
Beispiel #18
0
 def resolve_layer(self, graph_matcher, graph_helper):
     raise ConverterError(
         code_to_message.get_error_message(
             'ERROR_TF_GENERAL_ABSTRACT_CLASS_MUST_BE_INHERITED'))
Beispiel #19
0
 def resolve_layer(self, graph_matcher, graph_helper):
     raise ConverterError(
         'Constant layers are resolved by other resolvers!')