Beispiel #1
0
def grouped_conv2d_Discriminator(input,
                                 num_outputs,
                                 groups,
                                 is_train,
                                 info=False,
                                 k=3,
                                 s=1,
                                 activation_fn=lrelu,
                                 norm='batch',
                                 name='groupedConv2d'):
    with tf.variable_scope(name):
        _ = grouped_convolution(input, num_outputs, [k, k], groups, stride=s)
        _ = norm_and_act(_, is_train, norm=norm, activation_fn=activation_fn)
        if info: print_info(name, _.get_shape().as_list(), activation_fn)
    return _
Beispiel #2
0
def grouped_conv2d_GsoP(input,
                        num_outputs,
                        groups,
                        is_train=True,
                        info=False,
                        a=1,
                        b=1,
                        s=1,
                        name='groupedConv2d'):
    with tf.variable_scope(name):
        _ = grouped_convolution(input,
                                num_outputs, [a, b],
                                groups,
                                stride=s,
                                padding='VALID')
        # _ = norm_and_act(_, is_train, norm=norm, activation_fn=activation_fn)
        if info: print_info(name, _.get_shape().as_list(), activation_fn=None)
    return _
Beispiel #3
0
def grouped_conv2d_Discriminator_one(input,
                                     num_outputs,
                                     groups,
                                     is_train,
                                     info=False,
                                     a=1,
                                     b=2,
                                     s=1,
                                     activation_fn=lrelu,
                                     norm='batch',
                                     name='groupedConv2dKdifferent'):
    with tf.variable_scope(name):
        _ = grouped_convolution(input,
                                num_outputs, [a, b],
                                groups,
                                stride=s,
                                padding='VALID')
        _ = norm_and_act(_, is_train, norm=norm, activation_fn=activation_fn)
        if info: print_info(name, _.get_shape().as_list(), activation_fn)
    return _
def squeezenext_unit(inputs, filters, stride, height_first_order, groups,
                     seperate_relus):
    """
    Squeezenext unit according to:
    https://arxiv.org/pdf/1803.10615.pdf

    :param inputs:
        Input tensor
    :param filters:
        Number of filters at output of this unit
    :param stride:
        Input stride
    :param height_first_order:
        Whether to first perform seperable convolution in the vertical direcation or horizontal direction
    :param groups:
        Number of groups for some of the convolutions (which ones are different from the paper but equal to:
        https://github.com/amirgholami/SqueezeNext/blob/master/1.0-G-SqNxt-23/train_val.prototxt)
    :return:
        Output tensor, not(height_first_order)
    """
    input_channels = inputs.get_shape().as_list()[-1]
    shortcut = inputs
    out_activation = tf.nn.relu if seperate_relus else None
    # shorcut convolution only to be executed if input channels is different from output channels or
    # stride is greater than 1.
    if input_channels != filters or stride != 1:
        shortcut = slim.conv2d(shortcut,
                               filters, [1, 1],
                               stride=stride,
                               activation_fn=out_activation)

    # input 1x1 reduction convolutions
    block = tfe.grouped_convolution(inputs,
                                    filters / 2, [1, 1],
                                    groups,
                                    stride=stride)
    block = slim.conv2d(block, block.get_shape().as_list()[-1] / 2, [1, 1])

    # seperable convolutions
    if height_first_order:
        input_channels_seperated = block.get_shape().as_list()[-1]
        block = tfe.grouped_convolution(block, input_channels_seperated * 2,
                                        [3, 1], groups)
        block = tfe.grouped_convolution(block,
                                        block.get_shape().as_list()[-1],
                                        [1, 3], groups)

    else:
        input_channels_seperated = block.get_shape().as_list()[-1]
        block = tfe.grouped_convolution(block, input_channels_seperated * 2,
                                        [1, 3], groups)
        block = tfe.grouped_convolution(block,
                                        block.get_shape().as_list()[-1],
                                        [3, 1], groups)
    # switch order next unit
    height_first_order = not height_first_order

    # output convolutions
    block = slim.conv2d(block,
                        block.get_shape().as_list()[-1] * 2, [1, 1],
                        activation_fn=out_activation)
    assert block.get_shape().as_list(
    )[-1] == filters, "Block output channels not equal to number of specified filters"

    return tf.nn.relu(block + shortcut), height_first_order