Example #1
0
def _pool_helper(name, tf_node, inputs, uff_graph, **kwargs):
    func = kwargs["func"]
    window_size = tf2uff.get_tf_int_list(tf_node.attr['ksize'])
    strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    inputs, padding, fields = tf2uff.apply_fused_padding(
        tf_node, inputs, kwargs["tf_nodes"])
    data_format = tf2uff.convert_tf2uff_data_format(fmt)
    if fmt == 'NCHW':
        window_size = window_size[2:]
        strides = strides[2:]
        if padding is not None:
            padding = padding[2:]
    elif fmt == 'NHWC':
        window_size = [window_size[1], window_size[2]]
        strides = [strides[1], strides[2]]
        if padding is not None:
            padding = [padding[1], padding[2]]
    else:
        raise ValueError("Unsupported data format: " + fmt)
    uff_graph.pool(
        inputs[0], func, window_size, strides, padding,
        data_format=data_format, name=name, fields=fields)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Example #2
0
def _conv2d_transpose_helper(name, tf_node, inputs, uff_graph, **kwargs):
    kwargs.pop("func")  # FIXME support depthwise transpose
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])

    fields = {}
    padding = None
    number_groups = None

    tf_padding = convert_to_str(tf_node.attr['padding'].s)
    if tf_padding == "SAME":
        fields['implicit_padding'] = "same"
    elif tf_padding != "VALID":
        raise ValueError("Padding mode %s not supported" % tf_padding)

    lhs_fmt = tf2uff.convert_tf2uff_data_format(fmt)
    rhs_fmt = '+KC'

    if fmt == 'NCHW':
        strides = strides[2:]
    elif fmt == 'NHWC':
        strides = [strides[1], strides[2]]
    else:
        raise ValueError("Unsupported data format: " + fmt)

    uff_graph.conv_transpose(
        inputs[2], inputs[1], inputs[0],
        strides, padding,
        dilation=None, number_groups=number_groups,
        left_format=lhs_fmt, right_format=rhs_fmt,
        name=name, fields=fields)

    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Example #3
0
def convert_squeeze(name, tf_node, inputs, uff_graph, **kwargs):
    axis = tf2uff.get_tf_int_list(tf_node.attr['squeeze_dims'])
    uff_graph.squeeze(inputs[0], name=name, axis=axis)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Example #4
0
def _conv2d_helper(name, tf_node, inputs, uff_graph, **kwargs):
    func = kwargs["func"]
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"

    strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])
    inputs, padding, fields = tf2uff.apply_fused_padding(
        tf_node, inputs, kwargs["tf_nodes"])
    lhs_fmt = tf2uff.convert_tf2uff_data_format(fmt)
    rhs_fmt = '+CK'
    if fmt == 'NCHW':
        strides = strides[2:]
        if padding is not None:
            padding = padding[2:]
    elif fmt == 'NHWC':
        strides = [strides[1], strides[2]]
        if padding is not None:
            padding = [padding[1], padding[2]]
    else:
        raise ValueError("Unsupported data format: " + fmt)
    if func == "depthwise":
        wt = kwargs["tf_nodes"][inputs[1]]
        number_groups = int(wt.attr['value'].tensor.tensor_shape.dim[2].size)
    else:
        number_groups = None
    # If this node represents a dilated conv, pull in the dilations.
    dilation = None
    if "dilations" in tf_node.attr:
        if fmt == "NCHW":
            dilation = tf2uff.get_tf_int_list(tf_node.attr['dilations'])[2:]
        else:
            dilation = tf2uff.get_tf_int_list(tf_node.attr['dilations'])[1:3]

    # FIXME: Need a better way to check for dilated convs. This just checks if the block_shape input is as expected.
    # Ideally we should have a 'get_input_by_name' function. Maybe we can leverage GS here.
    # Another possibility is that GS can add these as attributes to the node rather than maintaining them as
    # separate const nodes.
    tf_block_shape_node = kwargs["tf_nodes"][inputs[1]]
    if "block_shape" in tf_block_shape_node.name.split('/')[-1] and tf_block_shape_node.op == "Const":
        # Get the second input (block_shape) - of the form [1, dilation_value, dilation_value]
        dilation = np.frombuffer(tf_block_shape_node.attr["value"].tensor.tensor_content, dtype=np.int32).tolist()
        if len(dilation) > 2:
            dilation = [dilation[1], dilation[2]]
        inputs.pop(1)

    tf_paddings_node = kwargs["tf_nodes"][inputs[1]]
    if "paddings" in tf_paddings_node.name.split('/')[-1] and tf_paddings_node.op == "Const":
        # Get the second input (paddings, since block_shape is already removed)
        paddings_temp = np.frombuffer(tf_paddings_node.attr["value"].tensor.tensor_content, dtype=np.int32).tolist()
        inputs.pop(1)

        # Get cropping information, but only if paddings is also present.
        tf_crops_node = kwargs["tf_nodes"][inputs[1]]
        if "crops" in tf_crops_node.name.split('/')[-1] and tf_crops_node.op == "Const":
            # Get the second input (crops, since block_shape is already removed)
            crops = np.frombuffer(tf_crops_node.attr["value"].tensor.tensor_content, dtype=np.int32)
            inputs.pop(1)
            paddings_temp = (np.array(paddings_temp) - crops).tolist()

        # TF paddings are [[top,bottom], [left,right]], so we need to rearrange.
        perm = [0, 2, 1, 3]
        # HACK: Sometimes paddings has [0, 0] at the front.
        if len(paddings_temp) == 6:
            paddings_temp = paddings_temp[2:]
        paddings_temp = [paddings_temp[p] for p in perm]
        # Symmetric padding ("same")
        if paddings_temp[0] == paddings_temp[2] and paddings_temp[1] == paddings_temp[3]:
            paddings_temp = paddings_temp[0:2]
            padding = paddings_temp if not padding else [p + pt for p, pt in zip(padding, paddings_temp)]
        else:
            print("Asymmetric padding for dilated convolutions is currently unsupported in the UFF converter.")

    uff_graph.conv(
        inputs[0], inputs[-1], strides, padding,
        dilation=dilation, number_groups=number_groups,
        left_format=lhs_fmt, right_format=rhs_fmt,
        name=name, fields=fields)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]