コード例 #1
0
ファイル: onnx_io.py プロジェクト: mmmika/NNEF-Tools
def _get_attribute(attribute_proto):
    if attribute_proto.HasField('ref_attr_name'):
        raise ParseException('Unexpected ref_attr_name in main graph')

    name = _fixstr(attribute_proto.name)

    if attribute_proto.HasField('f'):
        value = float(attribute_proto.f)
    elif attribute_proto.HasField('i'):
        value = utils.anyint_to_int(attribute_proto.i)
    elif attribute_proto.HasField('s'):
        value = utils.anystr_to_str(attribute_proto.s)
    elif attribute_proto.HasField('t'):
        value = _get_tensor(attribute_proto.t)
    elif attribute_proto.HasField('g'):
        value = _get_graph(attribute_proto.g)
    elif attribute_proto.floats:
        value = [float(f) for f in attribute_proto.floats]
    elif attribute_proto.ints:
        value = [utils.anyint_to_int(i) for i in attribute_proto.ints]
    elif attribute_proto.strings:
        value = [utils.anystr_to_str(s) for s in attribute_proto.strings]
    elif attribute_proto.tensors:
        value = [_get_tensor(t) for t in attribute_proto.tensors]
    elif attribute_proto.graphs:
        value = [_get_graph(g) for g in attribute_proto.graphs]
    else:
        value = []

    return name, value
コード例 #2
0
def _get_attribute(attribute_proto):
    if attribute_proto.HasField('ref_attr_name'):
        raise ParseException('Unexpected ref_attr_name in main graph')

    name = _fixstr(attribute_proto.name)

    if attribute_proto.HasField('f'):
        value = float(attribute_proto.f)
    elif attribute_proto.HasField('i'):
        value = utils.anyint_to_int(attribute_proto.i)
    elif attribute_proto.HasField('s'):
        value = utils.anystr_to_str(attribute_proto.s)
    elif attribute_proto.HasField('t'):
        value = _get_tensor(attribute_proto.t)
        # raise ParseException("Attribute '{}' with type TENSOR in unsupported".format(name))
    elif attribute_proto.HasField('g'):
        value = _get_graph(attribute_proto.g)
        # raise ParseException("Attribute '{}' with type GRAPH in unsupported".format(name))
    elif attribute_proto.floats:
        value = [float(f) for f in attribute_proto.floats]
    elif attribute_proto.ints:
        value = [utils.anyint_to_int(i) for i in attribute_proto.ints]
    elif attribute_proto.strings:
        value = [utils.anystr_to_str(s) for s in attribute_proto.strings]
    elif attribute_proto.tensors:
        # raise ParseException("Attribute '{}' with type TENSOR LIST in unsupported".format(name))
        value = [_get_tensor(t) for t in attribute_proto.tensors]
    elif attribute_proto.graphs:
        # raise ParseException("Attribute '{}' with type GRAPH LIST in unsupported".format(name))
        value = [_get_graph(g) for g in attribute_proto.graphs]
    else:
        value = []

    return name, value
コード例 #3
0
def evaluate_shape_tensor_simple(tensor):
    # type:(ONNXTensor)->typing.List[int]
    if tensor.data is not None:
        return [utils.anyint_to_int(i) for i in tensor.data.tolist()]
    elif tensor.producer is not None and tensor.producer.name == 'Shape':
        return list(tensor.producer.input.shape)
    else:
        try:
            return [
                utils.anyint_to_int(dim)
                for dim in evaluate_tensor(tensor).astype(np.int64).tolist()
            ]
        except CantEvaluate as e:
            assert False, "Shape tensors must be constant tensors or results of Shape for now. " \
                          "Some other operations can also be evaluated, but not {}".format(e.args[0])
コード例 #4
0
ファイル: shape_fixer.py プロジェクト: stjordanis/NNEF-Tools
 def get_shape_for(name):
     if isinstance(source_shapes, dict) and name in source_shapes:
         return source_shapes[name]
     elif isinstance(source_shapes, list):
         return list(source_shapes)
     elif utils.is_anyint(source_shapes):
         return utils.anyint_to_int(source_shapes)
     return None
コード例 #5
0
def evaluate_shape_tensor_simple(tensor):
    # type:(ONNXTensor)->typing.List[int]
    if tensor.data is not None:
        return [utils.anyint_to_int(i) for i in tensor.data.tolist()]
    elif tensor.producer is not None and tensor.producer.name == 'Shape':
        return list(tensor.producer.input.shape)
    else:
        assert False, "Shape tensors must be constant tensors or results of Shape for now."
コード例 #6
0
def evaluate_scalar_int_tensor_simple(tensor):
    # type: (ONNXTensor)->int
    if tensor.data is not None:
        return utils.anyint_to_int(tensor.data.item())
    elif tensor.producer is not None and tensor.producer.name == 'Size':
        return tensor.producer.input.count
    else:
        assert False, "Scalar int tensors must be constant or results of Size to be evaluable for now."
コード例 #7
0
 def get_shape_for(name):
     if isinstance(input_shape, dict) and name in input_shape:
         return input_shape[name]
     elif isinstance(input_shape, list):
         return list(input_shape)
     elif utils.is_anyint(input_shape):
         return utils.anyint_to_int(input_shape)
     return None
コード例 #8
0
def propagate_upsample(op):
    # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]

    if 'scales' not in op.attribs:
        assert len(op.inputs) == 2
        op.attribs['scales'] = evaluate_float_list_tensor_simple(op.inputs[1])

    op.inputs = (op.inputs[0], )

    return [[utils.anyint_to_int(math.floor(i * s)) for i, s in zip(op.inputs[0].shape, op.attribs['scales'])]], \
           [op.inputs[0].dtype]
コード例 #9
0
def _small_variables_to_consts(g):
    # type: (NNEFGraph)->None

    MaxSize = 4
    MaxNonOneDims = 1

    for tensor in g.tensors:
        if tensor.is_variable and tensor.count <= MaxSize and sum(dim > 1 for dim in tensor.shape) <= MaxNonOneDims:
            tensor.data = tensor.data.flatten().tolist()
            if tensor.dtype == 'integer':
                tensor.data = [utils.anyint_to_int(i) for i in tensor.data]
コード例 #10
0
def propagate_upsample(op):
    # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]

    if 'scales' in op.attribs:
        scales = op.attribs['scales']
    else:
        scales = evaluate_float_list_tensor_simple(op.inputs[1])

    return [[
        utils.anyint_to_int(math.floor(i * s))
        for i, s in zip(op.inputs[0].shape, scales)
    ]], [op.inputs[0].dtype]
コード例 #11
0
ファイル: onnx_io.py プロジェクト: mmmika/NNEF-Tools
def _get_tensor(tensor_proto):
    if tensor_proto.HasField('segment'):
        raise ParseException('TensorProto.segment is not yet supported.',
                             (_fixint(tensor_proto.segment.begin),
                              _fixint(tensor_proto.segment.end)))
    name = _fixstr(tensor_proto.name)
    shape = [utils.anyint_to_int(dim) for dim in tensor_proto.dims]
    dtype = _get_dtype(tensor_proto.data_type)

    if not NumpyDTypeByONNXDType.get(dtype):
        raise ParseException("Unsupported '{}' dtype for '{}'".format(
            dtype, name))

    if tensor_proto.HasField('raw_data'):
        if dtype == 'STRING':
            raise ParseException('Unexpected raw_data when dtype is STRING')

        data = np.frombuffer(tensor_proto.raw_data,
                             NumpyDTypeByONNXDType[dtype])
        if not _is_little_endian_system:
            data = data.byteswap()
    else:
        if dtype == 'FLOAT':
            data = np.array(tensor_proto.float_data,
                            NumpyDTypeByONNXDType[dtype])
        elif dtype == 'DOUBLE':
            data = np.array(tensor_proto.double_data,
                            NumpyDTypeByONNXDType[dtype])
        elif dtype == 'INT64':
            data = np.array(tensor_proto.int64_data,
                            NumpyDTypeByONNXDType[dtype])
        elif dtype == 'STRING':
            data = np.array(_fixstr(tensor_proto.string_data))
        elif dtype == 'FLOAT16':
            data = np.array(tensor_proto.int32_data,
                            np.uint16).view(np.float16)
        elif dtype == 'COMPLEX64':
            data = np.array(tensor_proto.float_data, np.float32)
            data = data[0::2] + data[1::2] * 1j
        elif dtype == 'COMPLEX128':
            data = np.array(tensor_proto.double_data, np.float64)
            data = data[0::2] + data[1::2] * 1j
        elif dtype in ['INT8', 'UINT8', 'INT16', 'UINT16', 'INT32', 'BOOL']:
            data = np.array(tensor_proto.int32_data,
                            NumpyDTypeByONNXDType[dtype])
        elif dtype in ['UINT32', 'UINT64']:
            data = np.array(tensor_proto.uint64_data,
                            NumpyDTypeByONNXDType[dtype])
        else:
            raise ParseException('Unsupported dtype: {}'.format(dtype))
    data = data.reshape(shape)
    doc_string = _fixstr(_get_field(tensor_proto, 'doc_string'))
    return name, shape, dtype, data, doc_string
コード例 #12
0
def _get_attribute(field, value, graph):
    if field == 'i' or field == 'f' or field == 'b' or field == 'placeholder':
        if utils.is_anyint(value):
            return utils.anyint_to_int(value)
        return value
    elif field == 's':
        return utils.anystr_to_str(value.decode())
    elif field == 'shape':
        return _get_shape(value)
    elif field == 'type':
        return _get_dtype(value)
    elif field == 'tensor':
        return _get_tensor(value, graph)
    elif field == 'func':
        return _get_func(value)
    elif field == 'list':
        field, items = _get_nonempty_items(value, fields=['i', 'f', 'b', 's', 'shape', 'type', 'tensor', 'func'])
        if items is None:
            return []
        return [_get_attribute(field, item, graph) for item in items]

    assert False
コード例 #13
0
def _normalize_types(arg):
    if utils.is_anyint(arg):
        return utils.anyint_to_int(arg)
    elif utils.is_anystr(arg):
        return utils.anystr_to_str(arg)
    elif isinstance(arg, np.ndarray):
        return arg.tolist()
    elif isinstance(arg, tf.TensorShape):
        if arg.dims is None:
            return None
        return [None if dim is None else int(dim) for dim in arg.as_list()]
    elif isinstance(arg, tf.Dimension):
        return arg.value
    elif isinstance(arg, tf.DType):
        return arg.name
    elif isinstance(
            arg,
        (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32,
         np.uint64, np.float16, np.float32, np.float64, np.bool_)):
        return arg.item()
    else:
        return arg
コード例 #14
0
ファイル: tf_pb_to_tf_py.py プロジェクト: benmbark/NNEF-Tools
def fix_types(list_):
    # type: (typing.Any)->typing.Any
    if isinstance(list_, list) and len(list_) >= 1 and utils.is_anyint(list_[0]):
        list_ = [utils.anyint_to_int(i) for i in list_]
    return list_
コード例 #15
0
def _get_shape(shape_proto):
    return [utils.anyint_to_int(dim.size) if utils.is_anyint(dim.size) else dim.size for dim in shape_proto.dim]
コード例 #16
0
ファイル: onnx_io.py プロジェクト: mmmika/NNEF-Tools
def _get_graph(graph_proto):
    graph = ONNXGraph(name=_fixstr(_get_field(graph_proto, 'name')))

    if graph.name and not (graph.name[0].isalpha() or graph.name[0] == '_'):
        graph.name = 'graph_' + graph.name

    tensors_by_name = {}
    for node in graph_proto.node:
        for tensor_name in node.output:
            tensor_name = _fixstr(tensor_name)
            if tensor_name not in tensors_by_name:
                tensors_by_name[tensor_name] = ONNXTensor(graph=graph,
                                                          name=tensor_name)
    for value_info in graph_proto.input:
        tensor_name = _fixstr(value_info.name)
        if tensor_name not in tensors_by_name:
            tensors_by_name[tensor_name] = ONNXTensor(graph=graph,
                                                      name=tensor_name)
    for value_info in graph_proto.output:
        tensor_name = _fixstr(value_info.name)
        if tensor_name not in tensors_by_name:
            tensors_by_name[tensor_name] = ONNXTensor(graph=graph,
                                                      name=tensor_name)
    for value_info in graph_proto.value_info:
        tensor_name = _fixstr(value_info.name)
        if tensor_name not in tensors_by_name:
            tensors_by_name[tensor_name] = ONNXTensor(graph=graph,
                                                      name=tensor_name)
    for tensor_proto in graph_proto.initializer:
        tensor_name = _fixstr(tensor_proto.name)
        if tensor_name not in tensors_by_name:
            tensors_by_name[tensor_name] = ONNXTensor(graph=graph,
                                                      name=tensor_name)

    const_or_var_names = {
        _fixstr(model_proto.name)
        for model_proto in graph_proto.initializer
    }
    input_names = [
        _fixstr(value_info.name) for value_info in graph_proto.input
        if _fixstr(value_info.name) not in const_or_var_names
    ]
    output_names = [
        _fixstr(value_info.name) for value_info in graph_proto.output
    ]
    graph.inputs = [tensors_by_name[name] for name in input_names]
    graph.outputs = [tensors_by_name[name] for name in output_names]

    for value_info in graph_proto.input:
        name, shape, dtype, doc_string = _get_value_info(value_info)
        tensor = tensors_by_name[name]
        tensor.shape, tensor.dtype = shape, dtype
    for value_info in graph_proto.output:
        name, shape, dtype, doc_string = _get_value_info(value_info)
        tensor = tensors_by_name[name]
        tensor.shape, tensor.dtype = shape, dtype
    for value_info in graph_proto.value_info:
        name, shape, dtype, doc_string = _get_value_info(value_info)
        tensor = tensors_by_name[name]
        tensor.shape, tensor.dtype = shape, dtype
    for tensor_proto in graph_proto.initializer:
        name, shape, dtype, data, doc_string = _get_tensor(tensor_proto)
        tensor = tensors_by_name[name]
        tensor.shape, tensor.dtype, tensor.data = shape, dtype, data

    for node in graph_proto.node:
        if _fixstr(node.op_type) == 'Constant':
            inputs, outputs, name, domain, op_type, attributes, doc_string = _get_node(
                node)
            if len(outputs) != 1:
                raise ParseException(
                    'Constant must have one output, we have: {}'.format(
                        len(outputs)))
            tensor = tensors_by_name[outputs[0]]
            _name, tensor.shape, tensor.dtype, tensor.data, _doc_string = attributes[
                'value']
            if not tensor.shape:
                tensor.data = tensor.data.flatten().tolist()
                if utils.is_anyint(tensor.data[0]):
                    tensor.data[0] = utils.anyint_to_int(tensor.data[0])
        else:
            inputs, outputs, name, domain, op_type, attributes, doc_string = _get_node(
                node)
            if op_type == 'ConstantOfShape':
                if 'value' in attributes:
                    _tensor_name, _tensor_shape, tensor_dtype, tensor_data, _tensor_doc_string = attributes[
                        'value']
                    attributes['dtype'] = tensor_dtype
                    attributes['value'] = (utils.anyint_to_int(
                        tensor_data.item()) if 'INT' in tensor_dtype else
                                           tensor_data.item())
                else:
                    attributes['dtype'] = 'FLOAT'
                    attributes['value'] = 0.0
            elif op_type == 'ConstantFill':
                attributes['dtype'] = _get_dtype(attributes['dtype'])
            ONNXOperation(graph=graph,
                          name=op_type,
                          inputs=tuple([
                              tensors_by_name[name]
                              if name else ONNXTensor.create_null(graph)
                              for name in inputs
                          ]),
                          outputs=tuple([
                              tensors_by_name[name]
                              if name else ONNXTensor.create_null(graph)
                              for name in outputs
                          ]),
                          attribs=attributes)
    return graph
コード例 #17
0
ファイル: onnx_io.py プロジェクト: mmmika/NNEF-Tools
def _fixint(i):
    return utils.anyint_to_int(i) if i is not None else None