Exemple #1
0
 def convert(self, dlc_output_path, copyright_file, model_version,
             converter_command):
     """
     :type dlc_output_path: str
     :type copyright_file: str
     :type model_version: str
     :type converter_command: str
     :rtype: None
     """
     self._graph_helper = GraphHelper(self._model.session, self._model,
                                      self._ops)
     self._topology_resolver = TopologyResolver()
     self._context = ConverterContext(self._model, modeltools.Model(),
                                      self._graph_helper,
                                      self._topology_resolver, self._logger)
     self._logger.info(
         code_to_message.get_progress_message('INFO_ALL_BUILDING_NETWORK'))
     self._context.model.add_validation_targets(
         self._context.model.get_validation_targets())
     self._convert_input_layers()
     self._convert_layers()
     self._set_model_version(model_version)
     self._context.model.set_converter_command(converter_command)
     self._context.model.set_model_copyright(
         snpe_converter_utils.get_string_from_txtfile(copyright_file))
     self._context.model.save(dlc_output_path)
Exemple #2
0
    def get_split_axis_and_sizes(cls, graph_helper, split_op):
        try:
            _, split_sizes, split_axis = GraphHelper.get_op_input_tensors(split_op, ('?', 'Const', 'Const'))
            split_sizes = list(graph_helper.evaluate_tensor_output(split_sizes))
        except TensorNotFoundError:
            split_axis, _ = GraphHelper.get_op_input_tensors(split_op, ('Const', '?'))
            split_sizes = []

        split_axis = int(graph_helper.evaluate_tensor_output(split_axis))
        return split_axis, split_sizes
    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            conv_op = match['root']
            bias_op = None
            biases = None
            strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
            padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
            weights = self.get_weights(graph_helper, conv_op)
            consumed_nodes = list(match.consumed_nodes)
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            try:
                conv_output_ops = graph_helper.get_op_outputs(conv_op)
                bias_op = GraphHelper.filter_single_op_by_type(
                    conv_output_ops, 'BiasAdd')
                biases = self.get_biases(graph_helper, conv_op, bias_op)

            except OperationNotFoundError:
                pass

            if bias_op is None:
                try:
                    conv_output_ops = graph_helper.get_op_outputs(conv_op)
                    bias_op = GraphHelper.filter_single_op_by_type(
                        conv_output_ops, 'Add')
                    biases = self.get_biases(graph_helper, conv_op, bias_op)
                except OperationNotFoundError:
                    pass

            if bias_op is not None and biases is not None:
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
                consumed_nodes.append(bias_op)
            else:
                biases = np.zeros(weights.shape[-1], dtype=np.float32)

            descriptor = ConvolutionLayerResolver.Descriptor(
                str(conv_op.name),
                consumed_nodes,
                conv_op,
                bias_op,
                strides,
                padding,
                weights,
                biases,
                output_names=output_op_nodes_names)
            descriptors.append(descriptor)
        return descriptors
Exemple #4
0
    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            conv_trans_op = match['root']
            _, weights_tensor, input_tensor = GraphHelper.get_op_input_tensors(
                conv_trans_op, ('?', '?', '?'))
            if weights_tensor.op.type not in ['Identity', 'Const']:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_DECONV_CANT_FIND_WEIGHTS_NODE'))
            strides = conv_trans_op.get_attr('strides')
            padding = conv_trans_op.get_attr('padding')
            weights = graph_helper.evaluate_tensor_output(weights_tensor)
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            bias_op = None
            try:
                output_ops = graph_helper.get_op_outputs(conv_trans_op)
                bias_op = GraphHelper.filter_single_op_by_type(
                    output_ops, 'BiasAdd')

                _, biases = GraphHelper.get_op_input_tensors(
                    bias_op, ('?', '?'))
                if biases.op.type not in ['Const', 'Identity']:
                    raise ConverterError(
                        code_to_message.get_error_message(
                            'ERROR_TF_DECONV_CANT_FIND_BIAS_NODE'))
                biases = graph_helper.evaluate_tensor_output(biases)
                consumed_nodes.append(bias_op)
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
            except OperationNotFoundError:
                biases = np.zeros(np.shape(weights)[-2], dtype=np.float32)

            descriptors.append(
                DeConvolutionOptimizedLayerResolver.Descriptor(
                    str(conv_trans_op.name),
                    consumed_nodes,
                    conv_trans_op,
                    bias_op,
                    weights,
                    strides,
                    padding,
                    biases,
                    input_tensor,
                    output_names=output_op_nodes_names))
        return descriptors
Exemple #5
0
    def resolve_layer(self, graph_matcher, graph_helper):
        potential_descriptors = []
        for sequence in self.sequences:
            matches = graph_matcher.match_sequence(sequence)
            for match in matches:
                crop_and_resize = match['crop_and_resize']

                try:
                   _, _, _, crop_size = GraphHelper.get_op_input_tensors(crop_and_resize, ('?', '?', '?', 'Const'))
                except TensorNotFoundError:
                    raise ConverterError(
                        code_to_message.get_message('ERROR_TF_RESOLVE_CROP_AND_RESIZE_SIZE_NOT_CONST'))

                crop_size_value = graph_helper.evaluate_tensor_output(crop_size)
                if crop_size_value.size != 2:
                    raise ConverterError(
                        code_to_message.get_message('ERROR_TF_RESOLVE_CROP_AND_RESIZE_SIZE'))

                consumed_nodes = match.consumed_nodes

                interpolation_method = str(crop_and_resize.get_attr('method'))
                extrapolation_value = float(crop_and_resize.get_attr('extrapolation_value'))

                potential_descriptors.append(
                    CropAndResizeLayerResolver.Descriptor(str(crop_and_resize.name), consumed_nodes,
                                          crop_size_value[1], crop_size_value[0], interpolation_method,
                                          extrapolation_value))
        return potential_descriptors
Exemple #6
0
    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for sequence in self.sequences:
            matches = graph_matcher.match_sequence(sequence)
            for match in matches:
                resize_op = match['root']
                align_corners_bool = resize_op.get_attr(self.TF_ATTRIBUTE_ALIGN_CORNERS)
                input_tensor, _ = GraphHelper.get_op_input_tensors(resize_op, ('?', '?'))
                input_tensor_shape = graph_helper.get_op_output_shape(input_tensor)
                consumed_nodes = match.consumed_nodes
                mul_const = [0, 0]

                if('mul_const' in match):
                    mul_const_op = match['mul_const']
                    mul_const = graph_helper.evaluate_tensor_output(mul_const_op.outputs[0])
                    if(len(mul_const) < 2):
                        mul_const = [0, 0]

                descriptors.append(
                    ResizeBilinearLayerResolver.Descriptor(str(resize_op.name),
                                                           consumed_nodes,
                                                           input_tensor_shape,
                                                           resize_op,
                                                           align_corners_bool,
                                                           mul_const))

        return descriptors
 def get_biases(self, graph_helper, conv_op, bias_op):
     _, biases_tensor = GraphHelper.get_op_input_tensors(
         bias_op, ('?', '?'))
     if biases_tensor.op.type not in ['Identity', 'Const'] and \
             not graph_helper.check_tensor_const_origin(biases_tensor):
         raise ConverterError(
             code_to_message.get_error_message('ERROR_TF_CONV_RESOLVE_BIAS')
             (conv_op.name))
     biases = graph_helper.evaluate_tensor_output(biases_tensor)
     return biases
 def get_weights(self, graph_helper, conv_op):
     _, weights_tensor = GraphHelper.get_op_input_tensors(
         conv_op, ('?', '?'))
     if weights_tensor.op.type not in [
             'Identity', 'Const', 'Split', 'FakeQuantWithMinMaxVars'
     ]:
         raise ConverterError(
             code_to_message.get_error_message(
                 'ERROR_TF_CONV_RESOLVE_WEIGHTS')(conv_op.name))
     weights = graph_helper.evaluate_tensor_output(weights_tensor)
     return weights
    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.graph_sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            conv_op = match['conv_op']
            strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
            padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
            weights = self.get_weights(graph_helper, conv_op)
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.graph_sequence.output_nodes
            ]
            try:
                batch_to_space_op = match['batch_to_space']
                conv_output_ops = graph_helper.get_op_outputs(
                    batch_to_space_op)
                bias_op = GraphHelper.filter_single_op_by_type(
                    conv_output_ops, 'BiasAdd')
                biases = self.get_biases(graph_helper, conv_op, bias_op)
                consumed_nodes.append(bias_op)
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
            except OperationNotFoundError:
                bias_op = None
                biases = np.zeros(weights.shape[-1], dtype=np.float32)
            dilation_sizes = match['dilation_sizes']
            dilation_sizes = graph_helper.evaluate_tensor_output(
                dilation_sizes.outputs[0])
            if np.shape(dilation_sizes) != (2, ):
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_CONV_RESOLVE_DILATION')(conv_op.name))

            d = ConvolutionLayerResolver.Descriptor(
                str(conv_op.name),
                consumed_nodes,
                conv_op,
                bias_op,
                strides,
                padding,
                weights,
                biases,
                output_names=output_op_nodes_names)
            d.dilationY = int(dilation_sizes[0])
            d.dilationX = int(dilation_sizes[1])
            d.input_ops = [match['space_to_batch']]
            descriptors.append(d)
        return descriptors
    def _resolve_from_match(self, descriptors, graph_helper, match):
        conv_op = match['conv']
        strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
        padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
        weights = self.get_weights(graph_helper, conv_op)
        weights = np.transpose(weights, [0, 1, 3, 2])

        if 'bias' in match:
            biases = self.get_biases(graph_helper, conv_op, match['bias'])
        else:
            biases = np.zeros(np.shape(weights)[-1], dtype=np.float32)
        consumed_nodes = match.consumed_nodes
        d = ConvolutionLayerResolver.Descriptor(str(conv_op.name),
                                                consumed_nodes, conv_op, None,
                                                strides, padding, weights,
                                                biases)
        input_tensor, _ = GraphHelper.get_op_input_tensors(conv_op, ('?', '?'))
        d.groups = graph_helper.get_op_output_shape(input_tensor)[-1]
        descriptors.append(d)
Exemple #11
0
    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            concat_op = match['root']
            consumed_nodes = match.consumed_nodes
            concat_descriptor = ConcatLayerResolver.Descriptor(str(concat_op.name), consumed_nodes,
                                                               None, [concat_op.outputs[0].name])

            non_const_inputs = [tensor for tensor in concat_op.inputs if tensor.op.type != 'Const']
            const_ops = [tensor.op for tensor in concat_op.inputs if tensor.op.type == 'Const']
            axis_tensor = None
            if len(non_const_inputs) < 2 or len(const_ops) > 1:
                for i in range(0, len(const_ops) - 1):
                    const_value = graph_helper.evaluate_tensor_output(const_ops[i].outputs[0])
                    const_shape = graph_helper.get_op_output_shape(const_ops[i].outputs[0])
                    descriptors.append(ConstantLayerResolver.Descriptor(str(const_ops[i]),
                                                                        [const_ops[i]],
                                                                        const_value,
                                                                        const_shape,
                                                                        concat_descriptor))
                # Make the assumption that the axis is always the last constant
                axis_tensor = const_ops[-1]

            max_shape = 0
            for t in non_const_inputs:
                shape = graph_helper.get_op_output_shape(t.op)
                if len(shape) > max_shape:
                    max_shape = len(shape)

            if not axis_tensor:
                axis_tensor = GraphHelper.filter_single_op_by_type([t.op for t in concat_op.inputs], 'Const')
            axis = int(graph_helper.evaluate_tensor_output(axis_tensor.outputs[0]))
            if axis < 0:
                axis += max_shape

            concat_descriptor.axis = axis
            descriptors.append(concat_descriptor)

        return descriptors
Exemple #12
0
    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            fill_op = match['root']
            consumed_nodes = match.consumed_nodes
            shape_tensor, scalar_tensor = GraphHelper.get_op_input_tensors(
                fill_op, ('?', 'Const'))
            shape = graph_helper.evaluate_tensor_output(shape_tensor).tolist()
            while len(shape) > 4:
                shape = shape[1:]

            while len(shape) < 4:
                shape = [1] + shape
            scalar = graph_helper.evaluate_tensor_output(scalar_tensor)

            d = FillLayerResolver.Descriptor(str(fill_op.name), consumed_nodes,
                                             shape, scalar)
            descriptors.append(d)

        return descriptors
Exemple #13
0
class DlcConverter(object):
    def __init__(self, model, strict_node_resolution):
        """
        :type model: converters.tensorflow.loader.Model
        :type strict_node_resolution: bool
        """
        self._logger = logging.getLogger()  # type: logging.Logger
        self._context = None  # type: ConverterContext
        self._model = model
        self._strict_node_resolution = strict_node_resolution
        self._all_ops_consumed = True
        self._ops = self._resolve_graph_operations_from_model(model)
        self._graph_helper = None
        self._input_descriptors = []
        self._topology_resolver = None

    def convert(self, dlc_output_path, copyright_file, model_version,
                converter_command):
        """
        :type dlc_output_path: str
        :type copyright_file: str
        :type model_version: str
        :type converter_command: str
        :rtype: None
        """
        self._graph_helper = GraphHelper(self._model.session, self._model,
                                         self._ops)
        self._topology_resolver = TopologyResolver()
        self._context = ConverterContext(self._model, modeltools.Model(),
                                         self._graph_helper,
                                         self._topology_resolver, self._logger)
        self._logger.info(
            code_to_message.get_progress_message('INFO_ALL_BUILDING_NETWORK'))
        self._context.model.add_validation_targets(
            self._context.model.get_validation_targets())
        self._convert_input_layers()
        self._convert_layers()
        self._set_model_version(model_version)
        self._context.model.set_converter_command(converter_command)
        self._context.model.set_model_copyright(
            snpe_converter_utils.get_string_from_txtfile(copyright_file))
        self._context.model.save(dlc_output_path)

    def _convert_input_layers(self):
        """
        :rtype: None
        """
        for model_input in self._context.inputs:
            input_operation = self._context.graph.get_operation_by_name(
                model_input.name)
            shape = self._graph_helper.get_op_output_shape(input_operation)
            if None in shape:
                message = code_to_message.get_error_message(
                    'ERROR_TF_UNABLE_TO_RESOLVE_GRAPH_INPUT_DIMS')
                raise ConverterError(message(model_input.name))
            if model_input.shape != shape:
                message = code_to_message.get_error_message(
                    'ERROR_TF_UNEXPECTED_INPUT_SHAPE')
                raise ConverterError(message(model_input.shape, shape))

            self._logger.info(
                code_to_message.get_progress_message(
                    'INFO_TF_BUILDING_INPUT_LAYER')(input_operation.name,
                                                    shape))

            layer_name = str(input_operation.outputs[0].name)
            descriptor = InputLayerDescriptor(layer_name, [input_operation])
            self._input_descriptors.append(descriptor)
            self._ops.remove(input_operation)
            self._context.model.add_data_layer(descriptor.output_names[0],
                                               shape, 'rgb', 'rgb',
                                               model_input.type)

    def _convert_layers(self):
        """
        :rtype: None
        """
        graph_ops = list(self._ops)
        descriptors = self._resolve_descriptors_from_nodes(graph_ops)
        descriptors = self._resolve_hierarchical_resolution_conflicts(
            descriptors)
        original_descriptors = descriptors

        self._topology_resolver.resolve_topology(self._input_descriptors +
                                                 descriptors)
        descriptors = self._topology_resolver.sort_descriptors_in_execution_order(
            descriptors, self._input_descriptors)
        descriptors = self._filter_disconnected_descriptors(descriptors)

        if self._strict_node_resolution:
            self._assert_all_ops_supported(original_descriptors, graph_ops)
            self._assert_all_descriptors_consumed(descriptors,
                                                  original_descriptors)
            if self._all_ops_consumed is False:
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_OPERATION_NOT_MAPPED_TO_LAYER'))

        self._transform_descriptors(descriptors)
        self._topology_resolver.resolve_topology(self._input_descriptors +
                                                 descriptors)
        descriptors = [d for d in descriptors if not d.is_ignored]

        self._create_layers(descriptors)

    def _assert_all_ops_supported(self, descriptors, graph_ops):
        graph_ops = self._filter_unconsumed_ops(descriptors, graph_ops)

        def is_parameter_op(o):
            return o.type in ['Const', 'Identity', 'Variable']

        remaining_ops = [op for op in graph_ops if not is_parameter_op(op)]
        if len(remaining_ops) > 0:
            self._all_ops_consumed = False
            self._logger.warning(
                code_to_message.get_warning_message(
                    'WARNING_UNSUPPORTED_OPS_FOUND'))
        for op in remaining_ops:
            self._logger.warning(
                code_to_message.get_warning_message(
                    'WARNING_TF_OP_NOT_SUPPORTED')(op.name, op.type))

    def _assert_all_descriptors_consumed(self, descriptors,
                                         original_descriptors):
        unconsumed_descriptors = list(
            set(original_descriptors) - set(descriptors))
        unconsumed_descriptors = unconsumed_descriptors + [
            d for d in descriptors if d.is_ignored
        ]
        unconsumed_descriptors = [
            d for d in unconsumed_descriptors
            if d.layer_type not in ['Constant', 'IgnoredLayer']
        ]
        if len(unconsumed_descriptors) > 0:
            self._all_ops_consumed = False
            self._logger.warning(
                code_to_message.get_warning_message(
                    'WARNING_UNCONSUMED_LAYERS'))
        for d in unconsumed_descriptors:
            self._logger.warning(
                code_to_message.get_warning_message(
                    'WARNING_TF_LAYER_NOT_CONSUMED')(d.layer_name,
                                                     d.layer_type))

    def _filter_disconnected_descriptors(self, descriptors):
        output_descriptors = [
            descriptor for op, descriptor in list(
                self._topology_resolver.descriptor_ops_map.items())
            if op.name in self._model.out_nodes_names
        ]
        descriptors_queue = list(set(output_descriptors))
        result = list(output_descriptors)
        while len(descriptors_queue) > 0:
            current_descriptor = descriptors_queue.pop(0)
            inputs = self._topology_resolver.get_input_layers_for(
                current_descriptor)
            for input_descriptor in inputs:
                if input_descriptor in descriptors and input_descriptor not in result:
                    descriptors_queue.append(input_descriptor)
                    result.append(input_descriptor)
        descriptors_to_ignore = set(descriptors) - set(result)
        for descriptor in descriptors:
            if descriptor in descriptors_to_ignore:
                descriptor.set_ignored(True)
        return descriptors

    def _create_layers(self, descriptors):
        for descriptor in descriptors:
            layer_builder = self._create_layer_builder(descriptor)
            self._create_layer(layer_builder, descriptor)

    def _transform_descriptors(self, descriptors):
        for descriptor in descriptors:
            layer_builder = self._create_layer_builder(descriptor)
            inputs = self._topology_resolver.get_input_layers_for(descriptor)
            outputs = self._topology_resolver.get_output_layers_for(descriptor)
            layer_builder.transform_layer(self._context, descriptor, inputs,
                                          outputs)

    def _resolve_hierarchical_resolution_conflicts(self, descriptors):
        """
        :type descriptors: list(LayerDescriptor)
        :rtype: list(LayerDescriptor)
        """
        input_ops = set(
            [o for d in self._input_descriptors for o in d.child_ops])
        op_to_descriptor = OrderedDict()
        for d in descriptors:
            for o in d.child_ops:
                if o in input_ops and len(d.child_ops) == 1:
                    continue

                current_descriptor = op_to_descriptor.get(o, None)
                if current_descriptor:
                    if (len(d.child_ops) > len(current_descriptor.child_ops)) or \
                            (len(d.child_ops) == len(current_descriptor.child_ops) and
                             isinstance(current_descriptor, IgnoredLayersResolver.Descriptor)):
                        op_to_descriptor[o] = d
                        for op, descriptor in list(op_to_descriptor.items()):
                            if descriptor == current_descriptor:
                                op_to_descriptor[op].child_ops.remove(o)
                                op_to_descriptor[op].set_ignored(True)
                                op_to_descriptor[op].layer_name += '_ignored'
                    else:
                        break
                else:
                    op_to_descriptor[o] = d
        return uniques(list(op_to_descriptor.values()))

    @classmethod
    def _filter_unconsumed_ops(cls, descriptors, ops):
        filtered = ops[:]
        for d in descriptors:
            for o in d.child_ops:
                filtered.remove(o)
        return filtered

    @classmethod
    def _remove_descriptors_with_removed_ops(cls, _descriptors, ops):
        descriptors = []
        for descriptor in _descriptors:
            do_filter = False
            for op in descriptor.child_ops:
                if op not in ops:
                    do_filter = True
                    break
            if not do_filter:
                descriptors.append(descriptor)
        return descriptors

    def _resolve_descriptors_from_nodes(self, ops):
        """
        :type nodes: list(tf.Operations)
        :rtype: list(LayerDescriptor)
        """
        descriptors = []
        resolvers = self._create_layer_resolvers()

        constructor = TFGraphBuilder(ops)
        constructor.link_nodes()

        graph_matcher = GraphMatcher(constructor.nodes)
        for resolver in resolvers:
            resolved_descriptors = resolver.resolve_layer(
                graph_matcher, self._graph_helper)
            if len(resolved_descriptors) == 0:
                continue

            resolved_descriptors = self._remove_descriptors_with_removed_ops(
                resolved_descriptors, ops)

            if resolver.is_final_resolution():
                ops_to_remove = [
                    n for d in resolved_descriptors for n in d.child_ops
                ]
                constructor = TFGraphBuilder(
                    [o for o in ops if o not in ops_to_remove])
                constructor.link_nodes()
                graph_matcher = GraphMatcher(constructor.nodes)
            descriptors.extend(resolved_descriptors)
        return descriptors

    @classmethod
    def _create_layer_resolvers(cls):
        return [resolver_class() for resolver_class in layers.layer_resolvers]

    def _create_layer(self, layer_builder, descriptor):
        """
        :type descriptor: converters.tensorflow.common.LayerDescriptor
        :rtype: None
        """
        self._logger.info(
            code_to_message.get_progress_message(
                'INFO_ALL_BUILDING_LAYER_W_NODES')(
                    descriptor.layer_type,
                    [op.name for op in descriptor.child_ops]))

        inputs = self._topology_resolver.get_input_layers_for(descriptor)
        outputs = self._topology_resolver.get_output_layers_for(descriptor)
        layer_builder.build_layer(self._context, descriptor, inputs, outputs)

    @classmethod
    def _create_layer_builder(cls, descriptor):
        builder_class = layers.layer_builders.get(type(descriptor), None)
        if builder_class is None:
            raise ConverterError(
                code_to_message.get_error_message(
                    'ERROR_TF_NO_INPUT_TO_CREATE_LAYER')(type(descriptor)))
        return builder_class()

    def _set_model_version(self, model_version):
        """
        :type model_version:  str
        :rtype:
        """
        if model_version is not None:
            self._context.model.set_model_version(model_version[:64])

    @classmethod
    def _resolve_graph_operations_from_model(cls, model):
        """
        :type model: converters.tensorflow.loader.Model
        :rtype: list[tensorflow.Operation]
        """
        operations_map = dict()
        for op in model.session.graph.get_operations():
            operations_map[str(op.name)] = op

        input_ops = set()
        for i in model.inputs:
            input_ops.add(operations_map[i.name])

        all_ops_in_paths = set()
        for output_op_name in model.out_nodes_names:
            queue = [operations_map[output_op_name]]
            visited = set()
            while len(queue) > 0:
                head = queue.pop(0)
                if head in visited:
                    continue
                visited.add(head)

                if head in input_ops:
                    continue

                for t in head.inputs:
                    queue.append(t.op)

            all_ops_in_paths.update(visited)

        return list(all_ops_in_paths)
    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for sequence in self.sequences:
            for match in graph_matcher.match_sequence(sequence):
                embedding_lookup_op = match['gather']
                consumed_nodes = match.consumed_nodes
                output_dim = graph_helper.get_op_output_shape(
                    embedding_lookup_op)

                # get rid of axis op from inputs
                inputs_sanitized = []
                if len(embedding_lookup_op.inputs) == 2:
                    inputs_sanitized.extend(embedding_lookup_op.inputs)
                elif len(embedding_lookup_op.inputs) == 3:
                    for tensor in embedding_lookup_op.inputs:
                        # exclude axis op
                        if tensor.op.type == 'Const' and len(
                                graph_helper.get_op_output_shape(
                                    tensor.op)) == 0:
                            continue
                        else:
                            inputs_sanitized.append(tensor)

                # take ids always as input, params as input or not. So ids tensor type is Placeholder
                if all(tensor.op.type == 'Placeholder'
                       for tensor in inputs_sanitized):
                    ids_candidate, params_candidate = inputs_sanitized[
                        0], inputs_sanitized[1]
                    ids_candidate_shape = graph_helper.get_op_output_shape(
                        ids_candidate.op)
                    params_candidate_shape = graph_helper.get_op_output_shape(
                        params_candidate.op)
                    # Do shape check to determine which are ids and params.
                    # Make assumption that ids shape comes firstly in output dim.
                    # If they have the same shape, then we've got the only way to
                    # determine ids and params by checking name. Otherwise raise error.
                    if ids_candidate_shape == params_candidate_shape:
                        ids_candidate = [
                            tensor for tensor in inputs_sanitized
                            if tensor.name.find("ids") != -1
                        ]
                        params_candidate = [
                            tensor for tensor in inputs_sanitized
                            if tensor.name.find("params") != -1
                        ]
                        if len(ids_candidate) == 0 or len(
                                params_candidate) == 0:
                            raise ConverterError(
                                get_error_message(
                                    'ERROR_TF_EMBEDDING_CANNOT_RESOLVE_PARAMS_AND_IDS'
                                ))
                    else:
                        if output_dim[:len(ids_candidate_shape)] != ids_candidate_shape and \
                                output_dim[:len(params_candidate_shape)] == params_candidate_shape:
                            ids_candidate, params_candidate = params_candidate, ids_candidate
                    descriptors.append(
                        EmbeddingLayerResolver.Descriptor(
                            str(embedding_lookup_op.name), consumed_nodes,
                            output_dim, [
                                str(ids_candidate.name),
                                str(params_candidate.name)
                            ]))
                else:
                    ids = [
                        tensor for tensor in inputs_sanitized
                        if tensor.op.type == 'Placeholder'
                    ][0]
                    params_candidate_op = [
                        tensor.op for tensor in inputs_sanitized
                        if tensor.op.type != 'Placeholder'
                    ][0]

                    const_consumed_ops = [params_candidate_op]
                    while params_candidate_op.type == 'Identity':
                        params_candidate_op = params_candidate_op.inputs[0].op
                        const_consumed_ops.append(params_candidate_op)

                    embedding_descriptor = EmbeddingLayerResolver.Descriptor(
                        str(embedding_lookup_op.name), consumed_nodes,
                        output_dim, [
                            str(ids.name),
                            GraphHelper.indexed_tensor_name(
                                params_candidate_op.name)
                        ])
                    descriptors.append(embedding_descriptor)
                    if params_candidate_op.type == 'Const':
                        embedding_shape = graph_helper.get_op_output_shape(
                            params_candidate_op)
                        const_tensor = graph_helper.evaluate_tensor_output(
                            params_candidate_op.outputs[0])
                        const_descriptor = ConstantLayerResolver.Descriptor(
                            str(params_candidate_op.name), const_consumed_ops,
                            const_tensor, embedding_shape,
                            embedding_descriptor)
                        descriptors.append(const_descriptor)
        return descriptors
Exemple #15
0
    def load(self, graph_pb_or_meta_path, input_nodes_names,
             input_nodes_shapes, input_nodes_types, out_node_names, session):
        """
        Loads the Tensorflow Graph into the specified Session's Graph and builds a Model instance
        with all the relevant information for a ModelConverter to use during conversion.
        :type graph_pb_or_meta_path: str
        :type input_nodes_names: list[str]
        :type input_nodes_shapes: list[str]
        :type input_nodes_types: list[str]
        :type out_node_names: list[str]
        :type session: tensorflow.Session
        :rtype: Model
        """
        if len(input_nodes_names) != len(input_nodes_shapes):
            raise ConverterError(
                code_to_message.get_error_message(
                    'ERROR_TF_INPUT_NODE_SHAPE_DIMS_MISMATCH'))
        if input_nodes_types is not None:
            if len(input_nodes_names) != len(input_nodes_types):
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_INPUT_TYPES_AND_NAMES_NOT_IN_PAIRS'))
        else:
            # Set all types to default
            input_nodes_types = [Model.Input.INPUT_TYPE_DEFAULT
                                 ] * len(input_nodes_names)

        graph_def = self.__import_graph(graph_pb_or_meta_path, session,
                                        out_node_names)
        with session.graph.as_default():
            inputs = []
            for name, shape, input_type in zip(input_nodes_names,
                                               input_nodes_shapes,
                                               input_nodes_types):
                self.__assert_node_in_graph(graph_def, name)
                input_tensor = session.graph.get_tensor_by_name(
                    GraphHelper.indexed_tensor_name(name))

                batched_shape = []
                try:
                    tensor_shape = input_tensor.get_shape().as_list()
                    input_shape = list(map(int, shape.split(',')))
                    if len(input_shape) != len(tensor_shape):
                        raise ConverterError(
                            code_to_message.get_error_message(
                                'ERROR_TF_INPUT_NODE_SHAPE_DIMS_MISMATCH'))
                    batched_shape = [1] * len(tensor_shape)
                    batched_shape[-len(input_shape):] = input_shape
                except ValueError:
                    pass

                if len(batched_shape) == 0:
                    try:
                        batched_shape = list(map(int, shape.split(',')))
                    except ValueError:
                        raise ConverterError(
                            code_to_message.get_error_message(
                                'ERROR_TF_INVALID_INPUT_DIMS')(shape))

                inputs.append(Model.Input(name, batched_shape, input_type))

            visitable_graph = VisitableGraph(
                self.__get_graph_operations(graph_def, session.graph))
            visitable_graph.accept(GraphPrinter())

            return Model(graph_def, session, inputs, out_node_names)
    def resolve_layer(self, graph_matcher, graph_helper):
        descriptors = []
        for match in graph_matcher.match_sequence(self.sequence):
            conv_op = match['conv_op_1']
            strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
            padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
            weights = match['weights']
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.sequence.output_nodes
            ]
            try:
                concat_op = match['concat_op']
                concat_op_output_ops = graph_helper.get_op_outputs(concat_op)
                bias_op = GraphHelper.filter_single_op_by_type(
                    concat_op_output_ops, 'BiasAdd')
                # need to consume input of bias
                biases = self.get_biases(graph_helper, conv_op, bias_op)
                consumed_nodes.append(bias_op)
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
            except OperationNotFoundError:
                bias_op = None
                biases = np.zeros(weights.outputs[0].get_shape()[-1],
                                  dtype=np.float32)

            weights = graph_helper.evaluate_tensor_output(weights.outputs[0])
            descriptor = ConvolutionLayerResolver.Descriptor(
                str(conv_op.name),
                consumed_nodes,
                conv_op,
                bias_op,
                strides,
                padding,
                weights,
                biases,
                output_names=output_op_nodes_names)
            descriptor.input_ops = [match['a'], match['b']]
            descriptors.append(descriptor)

        for match in graph_matcher.match_sequence(
                self.sequence_with_strided_slice):
            if not match.consumed_nodes:
                continue
            input_op = match['input']
            concat_op = match['concat']
            axis_op = match['axis']
            conv_ops = self._get_repeatable_op_by_id(match, 'conv')
            weight_ops = self._get_repeatable_op_by_id(match, 'weights')
            bias_ops = self._get_repeatable_op_by_id(match, 'biases')
            bias_add_ops = self._get_repeatable_op_by_id(match, 'bias')
            ss_ops = self._get_repeatable_op_by_id(match, 'ss')

            input_shape = graph_helper.get_op_output_shape(input_op)
            weight_shapes = [
                graph_helper.get_op_output_shape(weight_op)
                for weight_op in weight_ops
            ]

            ss_strides = [
                graph_helper.evaluate_tensor_output(
                    ss_strides_op.outputs[0]).tolist()
                for ss_strides_op in self._get_repeatable_op_by_id(
                    match, 'ss_strides')
            ]
            ss_begins = [
                graph_helper.evaluate_tensor_output(
                    ss_begin_op.outputs[0]).tolist()
                for ss_begin_op in self._get_repeatable_op_by_id(
                    match, 'ss_begin')
            ]
            ss_ends = [
                graph_helper.evaluate_tensor_output(
                    ss_end_op.outputs[0]).tolist()
                for ss_end_op in self._get_repeatable_op_by_id(
                    match, 'ss_end')
            ]

            bias_add_shapes = [
                graph_helper.get_op_output_shape(bias_add_op)
                for bias_add_op in bias_add_ops
            ]

            strides = [
                conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
                for conv_op in conv_ops
            ]
            paddings = [
                conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
                for conv_op in conv_ops
            ]

            ss_shapes = [
                graph_helper.get_op_output_shape(ss_op.outputs[0])
                for ss_op in ss_ops
            ]

            num_groups = len(conv_ops)

            axis = graph_helper.evaluate_tensor_output(axis_op.outputs[0])

            is_grouped_convolution = True
            is_grouped_convolution &= self._elements_are_same(bias_add_shapes)
            is_grouped_convolution &= self._elements_are_same(weight_shapes)
            is_grouped_convolution &= self._elements_are_same(strides)
            is_grouped_convolution &= self._elements_are_same(paddings)
            is_grouped_convolution &= self._elements_are_same(ss_shapes)
            is_grouped_convolution &= self._elements_are_same(ss_strides)
            is_grouped_convolution &= not self._elements_are_same(ss_begins)
            is_grouped_convolution &= not self._elements_are_same(ss_ends)
            # stride slices must evenly divide the last dimension of input to number of groups
            is_grouped_convolution &= ss_shapes[0][
                -1] * num_groups == input_shape[-1]
            # strides must be all ones at all dimensions
            is_grouped_convolution &= ss_strides[0] == [1] * len(ss_strides[0])
            # concat must be on the last axis in grouped convolution
            is_grouped_convolution &= axis == -1 or axis == (
                len(bias_add_shapes[0]) - 1)

            if not is_grouped_convolution:
                logging.getLogger().warning(
                    code_to_message.get_error_message(
                        'WARNING_TF_GROUP_CONV_RESOLVE'))
                continue

            weight_tensors = [
                graph_helper.evaluate_tensor_output(weight_op.outputs[0])
                for weight_op in weight_ops
            ]
            weights = np.concatenate(weight_tensors, axis=-1)

            bias_tensors = [
                graph_helper.evaluate_tensor_output(bias_op.outputs[0])
                for bias_op in bias_ops
            ]
            biases = np.concatenate(bias_tensors, axis=-1)

            descriptor = ConvolutionLayerResolver.Descriptor(
                str(concat_op.name),
                match.consumed_nodes,
                conv_ops[0],
                None,
                strides[0],
                paddings[0],
                weights,
                biases,
                output_names=[str(concat_op.outputs[0].name)])
            descriptor.input_ops = ss_ops
            descriptor.output_op = concat_op
            descriptors.append(descriptor)

        return descriptors
    def resolve_layer(self, graph_matcher, graph_helper):
        matches = graph_matcher.match_sequence(self.graph_sequence)
        if len(matches) == 0:
            return []
        descriptors = []
        for match in matches:
            conv_op = match['conv_op']
            strides = conv_op.get_attr(self.TF_ATTRIBUTE_STRIDES)
            padding = conv_op.get_attr(self.TF_ATTRIBUTE_PADDING)
            weights = self.get_weights(graph_helper, conv_op)
            weights = np.transpose(weights, [0, 1, 3, 2])
            consumed_nodes = match.consumed_nodes
            output_op_nodes_names = [
                str(match[node.identifier].outputs[0].name)
                for node in self.graph_sequence.output_nodes
            ]
            try:
                batch_to_space_op = match['batch_to_space']
                conv_output_ops = graph_helper.get_op_outputs(
                    batch_to_space_op)
                bias_op = GraphHelper.filter_single_op_by_type(
                    conv_output_ops, 'BiasAdd')
                biases = self.get_biases(graph_helper, conv_op, bias_op)
                consumed_nodes.append(bias_op)
                output_op_nodes_names = [str(bias_op.outputs[0].name)]
            except OperationNotFoundError:
                bias_op = None
                biases = np.zeros(np.shape(weights)[-1], dtype=np.float32)
            dilation_sizes = match['dilation_sizes']
            dilation_sizes = graph_helper.evaluate_tensor_output(
                dilation_sizes.outputs[0])
            if np.shape(dilation_sizes) != (2, ):
                raise ConverterError(
                    code_to_message.get_error_message(
                        'ERROR_TF_CONV_RESOLVE_DILATION')(conv_op.name))

            space_to_batch_op = match['space_to_batch']
            paddings_op = match['paddings']
            paddings_tensor = graph_helper.evaluate_tensor_output(
                paddings_op.outputs[0])
            input_op = conv_op

            batch_to_space_op = match['batch_to_space']
            crop_op = match['crops']
            crops_tensor = graph_helper.evaluate_tensor_output(
                crop_op.outputs[0])
            output_names = [str(conv_op.outputs[0].name)]

            if paddings_tensor.any() and not np.array_equal(
                    paddings_tensor, crops_tensor):
                paddings_tensor = np.pad(paddings_tensor, ((1, 1), (0, 0)),
                                         'constant')
                pad_descriptor = PadLayerResolver.Descriptor(
                    str(space_to_batch_op.name), [
                        match['space_to_batch'], match['dilation_sizes'],
                        match['paddings']
                    ],
                    paddings_tensor,
                    modeltools.PADDING_CONSTANT,
                    0.0,
                    output_names=[str(space_to_batch_op.outputs[0].name)])
                descriptors.append(pad_descriptor)
            else:
                consumed_nodes.extend(
                    [space_to_batch_op, paddings_op, match['dilation_sizes']])
                input_op = space_to_batch_op

            if crops_tensor.any() and not np.array_equal(
                    paddings_tensor, crops_tensor):
                crops_tensor = np.pad(crops_tensor, ((1, 1), (0, 0)),
                                      'constant')
                offsets = crops_tensor[:, 0]
                size = np.array(graph_helper.get_op_output_shape(
                    match['batch_to_space']),
                                dtype=np.int32)
                crop_descriptor = CropLayerResolver.Descriptor(
                    str(match['batch_to_space'].name), [
                        match['batch_to_space'], match['block_shape_out'],
                        match['crops']
                    ],
                    offsets,
                    size,
                    output_names=[
                        str(match['batch_to_space'].outputs[0].name)
                    ])
                descriptors.append(crop_descriptor)
            else:
                consumed_nodes.extend(
                    [batch_to_space_op, crop_op, match['block_shape_out']])
                output_names = output_op_nodes_names

            d = ConvolutionLayerResolver.Descriptor(str(conv_op.name),
                                                    consumed_nodes,
                                                    conv_op,
                                                    bias_op,
                                                    strides,
                                                    padding,
                                                    weights,
                                                    biases,
                                                    output_names=output_names)

            d.groups = graph_helper.get_op_output_shape(space_to_batch_op)[-1]
            d.dilationY = int(dilation_sizes[0])
            d.dilationX = int(dilation_sizes[1])
            d.input_ops = [input_op]
            descriptors.append(d)

        return descriptors