Пример #1
0
def _conv2d_transpose_helper(name, tf_node, inputs, uff_graph, **kwargs):
    kwargs.pop("func")  # FIXME support depthwise transpose
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])

    fields = {}
    padding = None
    number_groups = None

    tf_padding = convert_to_str(tf_node.attr['padding'].s)
    if tf_padding == "SAME":
        fields['implicit_padding'] = "same"
    elif tf_padding != "VALID":
        raise ValueError("Padding mode %s not supported" % tf_padding)

    lhs_fmt = tf2uff.convert_tf2uff_data_format(fmt)
    rhs_fmt = '+KC'

    if fmt == 'NCHW':
        strides = strides[2:]
    elif fmt == 'NHWC':
        strides = [strides[1], strides[2]]
    else:
        raise ValueError("Unsupported data format: " + fmt)

    uff_graph.conv_transpose(
        inputs[2], inputs[1], inputs[0],
        strides, padding,
        dilation=None, number_groups=number_groups,
        left_format=lhs_fmt, right_format=rhs_fmt,
        name=name, fields=fields)

    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Пример #2
0
def _pool_helper(name, tf_node, inputs, uff_graph, **kwargs):
    func = kwargs["func"]
    window_size = tf2uff.get_tf_int_list(tf_node.attr['ksize'])
    strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    inputs, padding, fields = tf2uff.apply_fused_padding(
        tf_node, inputs, kwargs["tf_nodes"])
    data_format = tf2uff.convert_tf2uff_data_format(fmt)
    if fmt == 'NCHW':
        window_size = window_size[2:]
        strides = strides[2:]
        if padding is not None:
            padding = padding[2:]
    elif fmt == 'NHWC':
        window_size = [window_size[1], window_size[2]]
        strides = [strides[1], strides[2]]
        if padding is not None:
            padding = [padding[1], padding[2]]
    else:
        raise ValueError("Unsupported data format: " + fmt)
    uff_graph.pool(
        inputs[0], func, window_size, strides, padding,
        data_format=data_format, name=name, fields=fields)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Пример #3
0
def convert_fused_batch_norm(name, tf_node, inputs, uff_graph, **kwargs):
    input_node, gamma, beta, mean, variance = inputs
    eps = tf_node.attr['epsilon'].f
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    data_fmt = tf2uff.convert_tf2uff_data_format(fmt)
    uff_graph.batchnorm(input_node, gamma, beta, mean,
                        variance, eps, data_fmt, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Пример #4
0
def convert_lrn(name, tf_node, inputs, uff_graph, **kwargs):
    lhs = inputs[0]
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NC+"
    window_size = tf_node.attr["depth_radius"].i
    alpha = tf_node.attr["alpha"].f
    beta = tf_node.attr["beta"].f
    bias = tf_node.attr["bias"].f
    uff_graph.lrn(lhs, window_size, alpha, beta, bias, fmt, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Пример #5
0
def convert_softmax(name, tf_node, inputs, uff_graph, **kwargs):
    # Some Softmax ops don't have an axis node.
    if len(inputs) > 1:
        tf_axis_node = kwargs["tf_nodes"][inputs[-1]]
        axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node))
        inputs = inputs[:-1]
    else:
        axis = 0
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    data_fmt = tf2uff.convert_tf2uff_data_format(fmt)
    uff_graph.softmax(inputs[0], axis, data_fmt, name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Пример #6
0
def convert_bias_add(name, tf_node, inputs, uff_graph, **kwargs):
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"
    biases_name = inputs[1]
    biases_array = tf2uff.convert_tf2numpy_const_node(
        kwargs["tf_nodes"][biases_name])
    inputs = inputs[:1]
    if fmt == 'NCHW':
        ndim = 4
        new_shape = [-1] + [1] * (ndim - 2)
        biases_array = biases_array.reshape(new_shape)
    uff_graph.const(biases_array, biases_name)
    uff_graph.binary(inputs[0], biases_name, 'add', name)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
Пример #7
0
 def apply_fused_padding(cls, tf_node, inputs, tf_nodes):
     tf_padding = convert_to_str(tf_node.attr['padding'].s)
     padding = None
     fields = {}
     if tf_padding == 'SAME':
         fields['implicit_padding'] = 'same'
     elif tf_padding == 'VALID':
         fields['implicit_padding'] = None
         tf_lhs_node = tf_nodes[inputs[0]]
         if tf_lhs_node.op == 'Pad':
             tf_padding_node = tf_nodes[tf_lhs_node.input[1]]
             p = cls.convert_tf2numpy_const_node(tf_padding_node)
             before, after = p[:, 0].tolist(), p[:, 1].tolist()
             if before == after:
                 padding = before
                 inputs[0] = tf_lhs_node.input[0]
     else:
         raise ValueError("Padding mode %s not supported" % tf_padding)
     return inputs, padding, fields
Пример #8
0
def _conv2d_helper(name, tf_node, inputs, uff_graph, **kwargs):
    func = kwargs["func"]
    fmt = convert_to_str(tf_node.attr['data_format'].s)
    fmt = fmt if fmt else "NHWC"

    strides = tf2uff.get_tf_int_list(tf_node.attr['strides'])
    inputs, padding, fields = tf2uff.apply_fused_padding(
        tf_node, inputs, kwargs["tf_nodes"])
    lhs_fmt = tf2uff.convert_tf2uff_data_format(fmt)
    rhs_fmt = '+CK'
    if fmt == 'NCHW':
        strides = strides[2:]
        if padding is not None:
            padding = padding[2:]
    elif fmt == 'NHWC':
        strides = [strides[1], strides[2]]
        if padding is not None:
            padding = [padding[1], padding[2]]
    else:
        raise ValueError("Unsupported data format: " + fmt)
    if func == "depthwise":
        wt = kwargs["tf_nodes"][inputs[1]]
        number_groups = int(wt.attr['value'].tensor.tensor_shape.dim[2].size)
    else:
        number_groups = None
    # If this node represents a dilated conv, pull in the dilations.
    dilation = None
    if "dilations" in tf_node.attr:
        if fmt == "NCHW":
            dilation = tf2uff.get_tf_int_list(tf_node.attr['dilations'])[2:]
        else:
            dilation = tf2uff.get_tf_int_list(tf_node.attr['dilations'])[1:3]

    # FIXME: Need a better way to check for dilated convs. This just checks if the block_shape input is as expected.
    # Ideally we should have a 'get_input_by_name' function. Maybe we can leverage GS here.
    # Another possibility is that GS can add these as attributes to the node rather than maintaining them as
    # separate const nodes.
    tf_block_shape_node = kwargs["tf_nodes"][inputs[1]]
    if "block_shape" in tf_block_shape_node.name.split('/')[-1] and tf_block_shape_node.op == "Const":
        # Get the second input (block_shape) - of the form [1, dilation_value, dilation_value]
        dilation = np.frombuffer(tf_block_shape_node.attr["value"].tensor.tensor_content, dtype=np.int32).tolist()
        if len(dilation) > 2:
            dilation = [dilation[1], dilation[2]]
        inputs.pop(1)

    tf_paddings_node = kwargs["tf_nodes"][inputs[1]]
    if "paddings" in tf_paddings_node.name.split('/')[-1] and tf_paddings_node.op == "Const":
        # Get the second input (paddings, since block_shape is already removed)
        paddings_temp = np.frombuffer(tf_paddings_node.attr["value"].tensor.tensor_content, dtype=np.int32).tolist()
        inputs.pop(1)

        # Get cropping information, but only if paddings is also present.
        tf_crops_node = kwargs["tf_nodes"][inputs[1]]
        if "crops" in tf_crops_node.name.split('/')[-1] and tf_crops_node.op == "Const":
            # Get the second input (crops, since block_shape is already removed)
            crops = np.frombuffer(tf_crops_node.attr["value"].tensor.tensor_content, dtype=np.int32)
            inputs.pop(1)
            paddings_temp = (np.array(paddings_temp) - crops).tolist()

        # TF paddings are [[top,bottom], [left,right]], so we need to rearrange.
        perm = [0, 2, 1, 3]
        # HACK: Sometimes paddings has [0, 0] at the front.
        if len(paddings_temp) == 6:
            paddings_temp = paddings_temp[2:]
        paddings_temp = [paddings_temp[p] for p in perm]
        # Symmetric padding ("same")
        if paddings_temp[0] == paddings_temp[2] and paddings_temp[1] == paddings_temp[3]:
            paddings_temp = paddings_temp[0:2]
            padding = paddings_temp if not padding else [p + pt for p, pt in zip(padding, paddings_temp)]
        else:
            print("Asymmetric padding for dilated convolutions is currently unsupported in the UFF converter.")

    uff_graph.conv(
        inputs[0], inputs[-1], strides, padding,
        dilation=dilation, number_groups=number_groups,
        left_format=lhs_fmt, right_format=rhs_fmt,
        name=name, fields=fields)
    return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]