Esempio n. 1
0
def _pool_helper(name, tf_node, inputs, uff_graph, **kwargs):
    func = kwargs["func"]
    window_size = tf2uff.get_tf_int_list(tf_node.attr['ksize'])
    strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    inputs, padding, fields = tf2uff.apply_fused_padding(
        tf_node, inputs, kwargs["tf_nodes"])
    data_format = tf2uff.convert_tf2uff_data_format(fmt)
    if fmt == 'NCHW':
        window_size = window_size[2:]
        strides = strides[2:]
        if padding is not None:
            padding = padding[2:]
    elif fmt == 'NHWC':
        window_size = [window_size[1], window_size[2]]
        strides = [strides[1], strides[2]]
        if padding is not None:
            padding = [padding[1], padding[2]]
    else:
        raise ValueError("Unsupported data format: " + fmt)
    uff_graph.pool(
        inputs[0], func, window_size, strides, padding,
        data_format=data_format, name=name, fields=fields)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 2
0
def _conv2d_transpose_helper(name, tf_node, inputs, uff_graph, **kwargs):
    kwargs.pop("func")  # FIXME support depthwise transpose
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])

    fields = {}
    padding = None
    number_groups = None

    tf_padding = convert_to_str(tf_node.attr['padding'].s)
    if tf_padding == "SAME":
        fields['implicit_padding'] = "same"
    elif tf_padding != "VALID":
        raise ValueError("Padding mode %s not supported" % tf_padding)

    lhs_fmt = tf2uff.convert_tf2uff_data_format(fmt)
    rhs_fmt = '+KC'

    if fmt == 'NCHW':
        strides = strides[2:]
    elif fmt == 'NHWC':
        strides = [strides[1], strides[2]]
    else:
        raise ValueError("Unsupported data format: " + fmt)

    uff_graph.conv_transpose(
        inputs[2], inputs[1], inputs[0],
        strides, padding,
        dilation=None, number_groups=number_groups,
        left_format=lhs_fmt, right_format=rhs_fmt,
        name=name, fields=fields)

    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 3
0
def convert_fused_batch_norm(name, tf_node, inputs, uff_graph, **kwargs):
    input_node, gamma, beta, mean, variance = inputs
    eps = tf_node.attr['epsilon'].f
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    data_fmt = tf2uff.convert_tf2uff_data_format(fmt)
    uff_graph.batchnorm(input_node, gamma, beta, mean,
                        variance, eps, data_fmt, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 4
0
def convert_transpose(name, tf_node, inputs, uff_graph, **kwargs):
    tf_permutation_node = kwargs["tf_nodes"][inputs[1]]
    if tf_permutation_node.op != "Const":
        raise UffException("Transpose permutation has op " + str(tf_permutation_node.op) + ", expected Const. Only constant permuations are supported in UFF.")
    permutation = tf2uff.convert_tf2numpy_const_node(
        tf_permutation_node).tolist()
    inputs = inputs[:1]
    uff_graph.transpose(inputs[0], permutation, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 5
0
def convert_reshape(name, tf_node, inputs, uff_graph, **kwargs):
    str_name = tf_node.name.split('/')
    if len(str_name) > 1 and tf_node.name.split('/')[-2].lower().find('flatten') != -1:
        print('DEBUG: convert reshape to flatten node')
        uff_graph.flatten(inputs[0], name=name)  # flatten axis is ignored here
        return [tf2uff.split_node_name_and_output(inputs[0])[0]]  # second input of shape is dropped
    else:
        uff_graph.reshape(inputs[0], inputs[1], name)
        return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 6
0
def convert_argmin(name, tf_node, inputs, uff_graph, **kwargs):
    # Retrieve and remove the axis node.
    tf_axis_input_node = kwargs["tf_nodes"][inputs[-1]]
    if tf_axis_input_node.op != "Const":
        raise UffException("ArgMin Axis node has op " + str(tf_axis_input_node.op) + ", expected Const. The axis must be specified as a Const node.")
    axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_input_node))
    inputs.pop(-1)
    # Add the op.
    uff_graph.argmin(inputs[0], axis, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 7
0
def convert_resource_gather(name, tf_node, inputs, uff_graph, **kwargs):
    if len(inputs) > 2:
        tf_axis_node = kwargs["tf_nodes"][inputs[-1]]
        axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node))
        inputs = inputs[:-1]
    else:
        axis = 0
    indices_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tindices'].type)
    params_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['dtype'].type)
    uff_graph.gather_v2(inputs, name, axis, indices_dtype, params_dtype)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 8
0
def convert_concatv2(name, tf_node, inputs, uff_graph, **kwargs):
    if "axis" in tf_node.attr:
        # Handle cases where the axis is not a node, but an attribute instead.
        axis = tf_node.attr["axis"].i
    else:
        tf_axis_node = kwargs["tf_nodes"][inputs[-1]]
        if tf_axis_node.op != "Const":
            raise UffException("Concat Axis node has op " + str(tf_axis_node.op) + ", expected Const. The axis for a Concat op must be specified as either an attribute, or a Const node.")
        axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node))
        inputs = inputs[:-1]
    uff_graph.concat(inputs, axis, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 9
0
def _reduce_helper(name, tf_node, inputs, uff_graph, **kwargs):
    func = kwargs.pop("func")

    tf_axes_node = kwargs["tf_nodes"][inputs[1]]
    array = tf2uff.convert_tf2numpy_const_node(tf_axes_node)
    axes = array.tolist()
    inputs = inputs[:1]
    keepdims = tf_node.attr['keep_dims'].b

    print("Warning: keepdims is ignored by the UFF Parser and defaults to True")

    uff_graph.reduce(inputs[0], func, axes, keepdims, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 10
0
def convert_softmax(name, tf_node, inputs, uff_graph, **kwargs):
    # Some Softmax ops don't have an axis node.
    if len(inputs) > 1:
        tf_axis_node = kwargs["tf_nodes"][inputs[-1]]
        axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node))
        inputs = inputs[:-1]
    else:
        axis = 0
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    data_fmt = tf2uff.convert_tf2uff_data_format(fmt)
    uff_graph.softmax(inputs[0], axis, data_fmt, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 11
0
def convert_bias_add(name, tf_node, inputs, uff_graph, **kwargs):
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    biases_name = inputs[1]
    biases_array = tf2uff.convert_tf2numpy_const_node(
        kwargs["tf_nodes"][biases_name])
    inputs = inputs[:1]
    if fmt == 'NCHW':
        ndim = 4
        new_shape = [-1] + [1] * (ndim - 2)
        biases_array = biases_array.reshape(new_shape)
    uff_graph.const(biases_array, biases_name)
    uff_graph.binary(inputs[0], biases_name, 'add', name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 12
0
def convert_matmul(name, tf_node, inputs, uff_graph, **kwargs):
    lhs, rhs = inputs
    trans_a = tf_node.attr['transpose_a'].b
    trans_b = tf_node.attr['transpose_b'].b
    lhs_fmt = 'CN' if trans_a else 'NC'
    rhs_fmt = 'KC' if trans_b else 'CK'
    uff_graph.fully_connected(
        lhs, rhs, lhs_fmt, rhs_fmt, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 13
0
def convert_lrn(name, tf_node, inputs, uff_graph, **kwargs):
    lhs = inputs[0]
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NC+"
    window_size = tf_node.attr["depth_radius"].i
    alpha = tf_node.attr["alpha"].f
    beta = tf_node.attr["beta"].f
    bias = tf_node.attr["bias"].f
    uff_graph.lrn(lhs, window_size, alpha, beta, bias, fmt, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 14
0
def convert_strided_slice(name, tf_node, inputs, uff_graph, **kwargs):
    begin_mask = tf_node.attr['begin_mask'].i
    end_mask = tf_node.attr['end_mask'].i
    shrink_axis_mask = tf_node.attr['shrink_axis_mask'].i

    if tf_node.attr['ellipsis_mask'].i != 0:
        raise ValueError("ellipsis_mask not supported")

    if tf_node.attr['new_axis_mask'].i != 0:
        raise ValueError("new_axis_mask not supported")

    uff_graph.strided_slice(inputs[0], inputs[1], inputs[2], inputs[3],
                            begin_mask, end_mask, shrink_axis_mask, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 15
0
def convert_gather(name, tf_node, inputs, uff_graph, **kwargs):
    indices_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tindices'].type)
    params_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tparams'].type)
    validate_indices = tf_node.attr['validate_indices'].b
    uff_graph.gather(inputs, name, indices_dtype, params_dtype, validate_indices)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 16
0
def convert_pad(name, tf_node, inputs, uff_graph, **kwargs):
    pad = inputs[1]
    uff_graph.pad(inputs[0], pad, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 17
0
def convert_squeeze(name, tf_node, inputs, uff_graph, **kwargs):
    axis = tf2uff.get_tf_int_list(tf_node.attr['squeeze_dims'])
    uff_graph.squeeze(inputs[0], name=name, axis=axis)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 18
0
def convert_shape(name, tf_node, inputs, uff_graph, **kwargs):
    uff_graph.shape(inputs[0], name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 19
0
def convert_placeholder(name, tf_node, inputs, uff_graph, **kwargs):
    uff_graph.identity(inputs[0], name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 20
0
def convert_div(name, tf_node, inputs, uff_graph, **kwargs):
    uff_graph.binary(inputs[0], inputs[1], 'div', name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 21
0
def convert_const(name, tf_node, inputs, uff_graph, **kwargs):
    array = tf2uff.convert_tf2numpy_const_node(tf_node)
    uff_node = uff_graph.const(array, name)
    uff_node.array = array
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 22
0
def convert_pack(name, tf_node, inputs, uff_graph, **kwargs):
    axis = tf_node.attr['axis'].i
    inputs = inputs
    uff_graph.stack(inputs, axis, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 23
0
def _conv2d_helper(name, tf_node, inputs, uff_graph, **kwargs):
    func = kwargs["func"]
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"

    strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])
    inputs, padding, fields = tf2uff.apply_fused_padding(
        tf_node, inputs, kwargs["tf_nodes"])
    lhs_fmt = tf2uff.convert_tf2uff_data_format(fmt)
    rhs_fmt = '+CK'
    if fmt == 'NCHW':
        strides = strides[2:]
        if padding is not None:
            padding = padding[2:]
    elif fmt == 'NHWC':
        strides = [strides[1], strides[2]]
        if padding is not None:
            padding = [padding[1], padding[2]]
    else:
        raise ValueError("Unsupported data format: " + fmt)
    if func == "depthwise":
        wt = kwargs["tf_nodes"][inputs[1]]
        number_groups = int(wt.attr['value'].tensor.tensor_shape.dim[2].size)
    else:
        number_groups = None
    # If this node represents a dilated conv, pull in the dilations.
    dilation = None
    if "dilations" in tf_node.attr:
        if fmt == "NCHW":
            dilation = tf2uff.get_tf_int_list(tf_node.attr['dilations'])[2:]
        else:
            dilation = tf2uff.get_tf_int_list(tf_node.attr['dilations'])[1:3]

    # FIXME: Need a better way to check for dilated convs. This just checks if the block_shape input is as expected.
    # Ideally we should have a 'get_input_by_name' function. Maybe we can leverage GS here.
    # Another possibility is that GS can add these as attributes to the node rather than maintaining them as
    # separate const nodes.
    tf_block_shape_node = kwargs["tf_nodes"][inputs[1]]
    if "block_shape" in tf_block_shape_node.name.split('/')[-1] and tf_block_shape_node.op == "Const":
        # Get the second input (block_shape) - of the form [1, dilation_value, dilation_value]
        dilation = np.frombuffer(tf_block_shape_node.attr["value"].tensor.tensor_content, dtype=np.int32).tolist()
        if len(dilation) > 2:
            dilation = [dilation[1], dilation[2]]
        inputs.pop(1)

    tf_paddings_node = kwargs["tf_nodes"][inputs[1]]
    if "paddings" in tf_paddings_node.name.split('/')[-1] and tf_paddings_node.op == "Const":
        # Get the second input (paddings, since block_shape is already removed)
        paddings_temp = np.frombuffer(tf_paddings_node.attr["value"].tensor.tensor_content, dtype=np.int32).tolist()
        inputs.pop(1)

        # Get cropping information, but only if paddings is also present.
        tf_crops_node = kwargs["tf_nodes"][inputs[1]]
        if "crops" in tf_crops_node.name.split('/')[-1] and tf_crops_node.op == "Const":
            # Get the second input (crops, since block_shape is already removed)
            crops = np.frombuffer(tf_crops_node.attr["value"].tensor.tensor_content, dtype=np.int32)
            inputs.pop(1)
            paddings_temp = (np.array(paddings_temp) - crops).tolist()

        # TF paddings are [[top,bottom], [left,right]], so we need to rearrange.
        perm = [0, 2, 1, 3]
        # HACK: Sometimes paddings has [0, 0] at the front.
        if len(paddings_temp) == 6:
            paddings_temp = paddings_temp[2:]
        paddings_temp = [paddings_temp[p] for p in perm]
        # Symmetric padding ("same")
        if paddings_temp[0] == paddings_temp[2] and paddings_temp[1] == paddings_temp[3]:
            paddings_temp = paddings_temp[0:2]
            padding = paddings_temp if not padding else [p + pt for p, pt in zip(padding, paddings_temp)]
        else:
            print("Asymmetric padding for dilated convolutions is currently unsupported in the UFF converter.")

    uff_graph.conv(
        inputs[0], inputs[-1], strides, padding,
        dilation=dilation, number_groups=number_groups,
        left_format=lhs_fmt, right_format=rhs_fmt,
        name=name, fields=fields)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 24
0
def convert_log(name, tf_node, inputs, uff_graph, **kwargs):
    uff_graph.unary(inputs[0], 'log', name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 25
0
def convert_softplus(name, tf_node, inputs, uff_graph, **kwargs):
    uff_graph.activation(inputs[0], 'softplus', name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Esempio n. 26
0
def convert_placeholder(name, tf_node, inputs, uff_graph, **kwargs):
    dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['dtype'].type)
    shape = tf2uff.get_tf_shape_as_int_list(tf_node.attr['shape'])
    uff_graph.input(shape, dtype, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]