Exemple #1
0
def _eliminate_named_tuples(data):
    def transform(data_):
        if isinstance(data_, tuple):
            return tuple(data_)
        return data_

    return utils.recursive_transform(data, transform)
Exemple #2
0
def _transform_inputs_before_print(inputs):
    def transform(input_):
        if isinstance(input_, NNEFTensor):
            if input_.is_constant and input_.rank == 0:
                return input_.data[0]
            else:
                return nnef.Identifier(input_.name)
        return input_

    return utils.recursive_transform(inputs, transform)
def convert_default(converter, nnef_op, caffe2_graph):
    # type: (Converter, NNEFOperation, Caffe2Graph)->None

    print("Warning: Converter of {} is not implemented, doing default conversion.".format(nnef_op.name))

    Caffe2Operation(graph=caffe2_graph,
                    name=nnef_op.name,
                    inputs=converter.converted_tensors(nnef_op.inputs),
                    attribs=utils.recursive_transform(nnef_op.attribs, lambda x: x if x is not None else "None"),
                    outputs=converter.converted_tensors(nnef_op.outputs))
Exemple #4
0
def _format_args(args):
    parts = []
    for k, v in args.items():
        arg = utils.recursive_transform(v, _transform_arg)
        if k == "dtype":  # TODO less hack
            arg_str = arg if arg is None else "tf.{}".format(arg[1:-1])
        else:
            arg_str = _format_rec(arg)
        parts.append("{}={}".format(k, arg_str))
    return ", ".join(parts)
Exemple #5
0
def _result_to_identifiers(result):
    def transform(result_):
        assert isinstance(
            result_, NNEFTensor
        ), "Results must be NNEF tensors, or lists/tuples of that."
        return nnef.Identifier(result_.name)

    result = utils.recursive_transform(result, transform)
    if isinstance(result, nnef.Identifier) or isinstance(result, list):
        return [result]
    elif isinstance(result, tuple):
        return list(result)
    else:
        assert False, "Unexpected result type: {}".format(type(result))
Exemple #6
0
    def _str_dict(self):
        if any(
                isinstance(value, np.ndarray)
                for value in six.itervalues(self.attribs)):

            def arr_to_str(x):
                return _ndarray_to_str(
                    x, MAX_ARRAY_LENGTH_TO_PRINT) if isinstance(
                        x, np.ndarray) else x

            attribs = utils.recursive_transform(self.attribs, arr_to_str)
        else:
            attribs = self.attribs
        return OrderedDict([('inputs', self._inputs),
                            ('outputs', self._outputs), ('attribs', attribs)])
Exemple #7
0
    def _match_attribs(self, op, settings, attrib_patterns):
        def trafo(arg):
            return arg if isinstance(arg, Pattern) else _Const(arg)

        attrib_patterns = utils.recursive_transform(attrib_patterns, trafo)  # type: typing.Dict[str, Pattern]

        dict_ = {self: op}
        for attrib_name, attrib_pattern in six.iteritems(attrib_patterns):
            attrib_value = op.attribs[attrib_name]
            match_ = attrib_pattern._match(attrib_value,
                                           settings.copy(allow_multi_consumer=settings.allow_multi_consumer_inside,
                                                         dict_so_far=utils.dict_union(settings.dict_so_far, dict_)))
            if not match_:
                return Match()
            dict_.update(match_.dict)

        return Match(did_match=True, root=op, dict_=dict_)
Exemple #8
0
def _read(parser_graph, with_weights=True):
    # type: (typing.Any, bool)->NNEFGraph

    tensor_by_name = {}
    g = NNEFGraph(name=parser_graph.name)

    def add_to_tensor_by_name(tensor):
        assert tensor.name not in tensor_by_name, "Tensor {} defined multiple times".format(
            tensor.name)
        tensor_by_name[tensor.name] = tensor

    def transform_input(input_):
        if isinstance(input_, nnef.Identifier):
            assert str(
                input_
            ) in tensor_by_name, "Tensor {} not defined before use".format(
                str(input_))
            return tensor_by_name[str(input_)]
        else:
            return NNEFTensor(
                graph=g,
                name=None,
                shape=[],
                dtype=NNEFDTypeByNumpyDType[np.array(input_).dtype.name],
                data=[input_])

    def transform_result(result_):
        if isinstance(result_, nnef.Identifier):
            quantization = parser_graph.tensors[str(result_)].quantization
            if quantization:
                quantization = NNEFQuantization(name=quantization['op-name'],
                                                attribs=quantization)
                del quantization.attribs['op-name']
            else:
                quantization = None

            tensor = NNEFTensor(graph=g,
                                name=str(result_),
                                shape=list(
                                    parser_graph.tensors[str(result_)].shape),
                                dtype=parser_graph.tensors[str(result_)].dtype,
                                quantization=quantization)

            add_to_tensor_by_name(tensor)
            return tensor
        else:
            return result_

    for parser_op in parser_graph.operations:

        inputs = utils.recursive_transform(parser_op.inputs, transform_input)
        if any(isinstance(i, list) for i in six.itervalues(inputs)):
            inputs = utils.recursive_collect(inputs)
        else:
            inputs = tuple(utils.recursive_collect(inputs))

        outputs = utils.recursive_transform(parser_op.outputs,
                                            transform_result)
        if any(isinstance(o, list) for o in six.itervalues(outputs)):
            outputs = utils.recursive_collect(outputs)
        else:
            outputs = tuple(utils.recursive_collect(outputs))

        if parser_op.name == "variable":
            outputs[0].label = parser_op.attribs["label"]
            if with_weights:
                outputs[0].data = parser_graph.tensors[
                    parser_op.outputs["output"]].data
                assert outputs[0].data is not None
            else:
                outputs[0].data = np.array(
                    [], dtype=NumpyDTypeByNNEFDType[parser_op.dtype])
        if parser_op.name == "constant":
            outputs[0].data = parser_op.attribs["value"]

        if parser_op.name not in ["external", "constant", "variable"]:
            NNEFOperation(graph=g,
                          name=parser_op.name,
                          attribs=dict(parser_op.attribs),
                          inputs=inputs,
                          outputs=outputs)

    input_tensors = []

    for input_ in parser_graph.inputs:
        assert str(
            input_
        ) in tensor_by_name, "Input tensor {} was not declared".format(
            str(input_))
        input_tensors.append(tensor_by_name[str(input_)])

    output_tensors = []

    for output_ in parser_graph.outputs:
        assert str(
            output_
        ) in tensor_by_name, "Output tensor {} was not declared".format(
            str(output_))
        output_tensors.append(tensor_by_name[str(output_)])

    g.inputs = OrderedDict((t.name, t) for t in input_tensors)
    g.outputs = OrderedDict((t.name, t) for t in output_tensors)

    g.generate_missing_names()
    return g
Exemple #9
0
def _to_tf_graph(name, invocations, output, op_proto_by_name):
    # type: (str, typing.List[_Invocation], typing.Any, typing.Dict[str, OpProto])->TFGraph
    g = TFGraph(name)
    tensor_by_tf_tensor = {}
    inputs = OrderedDict()
    outputs = OrderedDict()
    const_value_by_tensor = {}  # type: typing.Dict[TFTensor, np.ndarray]
    for invocation in invocations:
        if invocation.function_name == "tf.get_variable" and invocation.result.value(
        ) in tensor_by_tf_tensor:
            continue

        def arg_transform(arg):
            if isinstance(arg, tf.Variable):
                arg = arg.value()

            if isinstance(arg, tf.Tensor):
                assert arg in tensor_by_tf_tensor, "Undefined tensor: {}".format(
                    arg)
                return tensor_by_tf_tensor[arg]
            return _normalize_types(arg)

        args = utils.recursive_transform(invocation.args, arg_transform)

        def result_transform(result_):
            if isinstance(result_, tf.Variable):
                result_ = result_.value()

            if isinstance(result_, tf.Tensor):
                if result_ in tensor_by_tf_tensor:
                    print(
                        "Warning: {} was returned multiple times.\nInvocation: {}"
                        .format(tensor_by_tf_tensor[result_], invocation))
                    t = TFTensor(
                        graph=g,
                        name=_normalize_types(result_.name) + ":duplicate",
                        shape=_unify_shape(_normalize_types(result_.shape)),
                        dtype=_normalize_types(result_.dtype))
                else:
                    t = TFTensor(graph=g,
                                 name=_normalize_types(result_.name),
                                 shape=_unify_shape(
                                     _normalize_types(result_.shape)),
                                 dtype=_normalize_types(result_.dtype))
                    tensor_by_tf_tensor[result_] = t
                return t
            return result_

        result = utils.recursive_transform(invocation.result, result_transform)

        if invocation.function_name == "tf.placeholder":
            assert isinstance(result, TFTensor)
            if result not in six.itervalues(inputs):
                input_name = _get_nice_input_name(result.name)
                inputs[input_name] = result
        elif invocation.function_name == "tf.constant":
            assert isinstance(result, TFTensor)
            result.data = np.array(args["value"],
                                   dtype=result.dtype).flatten().tolist()

            tf_py_eval.evaluate_constant(result, const_value_by_tensor)
            tf_py_shape_inference.evaluate_shape_of_constant(
                result, const_value_by_tensor)
        elif invocation.function_name in ["tf.Variable", "tf.get_variable"]:
            assert isinstance(result, TFTensor)
            result.data = np.array([])
            result.label = result.name
        else:
            op_proto = op_proto_by_name[invocation.function_name]
            op_attrs = _get_attrs(invocation.function_name, args, op_proto)

            def eval_tensors(x):
                if isinstance(x, TFTensor):
                    assert x in const_value_by_tensor, "Tensor could not be evaluated: {}".format(
                        x)
                    return _tolist_safe(const_value_by_tensor[x])
                return x

            op_attrs = utils.recursive_transform(op_attrs, eval_tensors)
            op_attrs = _unify_attrs(op_attrs, op_proto)

            op_inputs = _get_inputs(g, invocation.function_name, args,
                                    op_proto)
            for input in op_inputs:  # evaluate newly generated constant tensors
                if input is not None and input not in const_value_by_tensor:
                    if input.is_constant:
                        tf_py_eval.evaluate_constant(input,
                                                     const_value_by_tensor)
                        tf_py_shape_inference.evaluate_shape_of_constant(
                            input, const_value_by_tensor)
                    elif input.producer is not None and input.producer.name == "tf.expand_dims":
                        tf_py_eval.evaluate_expand_dims(
                            input.producer, const_value_by_tensor)
                        tf_py_shape_inference.evaluate_shape_of_expand_dims(
                            input.producer)

            op_outputs = _get_outputs(result)
            op = TFOperation(graph=g,
                             name=invocation.function_name,
                             attribs=op_attrs,
                             inputs=op_inputs,
                             outputs=op_outputs,
                             location=_get_location_summary(invocation.stack))
            tf_py_eval.try_to_evaluate_operation(op, const_value_by_tensor)
            tf_py_shape_inference.evaluate_shape_of_operation(
                op, const_value_by_tensor)

    def visit(output_, path):
        # type: (typing.Any, str)->None

        if isinstance(output_, tf.Variable):
            output_ = output_.value()

        if isinstance(output_, tf.Tensor):
            assert output_ in tensor_by_tf_tensor, "Undefined tensor: {}".format(
                output_)
            output_tensor = tensor_by_tf_tensor[output_]
            path = path[1:]
            if not path:
                path = "output"
            elif path[0].isdigit():
                path = "output" + path
            assert utils.is_identifier(path), \
                "Bad name_override '{}' for tensor {}. " \
                "Please use valid identifiers as keys in the dict(s) " \
                "returned by your network function.".format(path, output_tensor.name)
            outputs[path] = output_tensor

    _recursive_visit_with_path(output, visit)

    g.inputs = inputs
    g.outputs = outputs
    return g