Exemple #1
0
def mobilenet_conv_layers(input, batch_size, num_unrolls):
    input = tf.to_float(input) - IMAGENET_MEAN
    conv1_1 = conv2d('conv_1',
                     input,
                     num_filters=int(round(32 * 1)),
                     kernel_size=(3, 3),
                     padding='SAME',
                     stride=(2, 2),
                     activation=tf.nn.relu6,
                     batchnorm_enabled=False,
                     is_training=True,
                     l2_strength=0.0,
                     bias=0.0)
    #self.__add_to_nodes([conv1_1])
    ############################################################################################
    conv2_1_dw, conv2_1_pw = depthwise_separable_conv2d(
        'conv_ds_2',
        conv1_1,
        width_multiplier=1,
        num_filters=64,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv2_1_dw, conv2_1_pw])
    conv2_2_dw, conv2_2_pw = depthwise_separable_conv2d(
        'conv_ds_3',
        conv2_1_pw,
        width_multiplier=1,
        num_filters=128,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(2, 2),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv2_2_dw, conv2_2_pw])
    ############################################################################################
    #with tf.variable_scope('conv1_skip'):

    if 0:
        pool2 = tf.nn.max_pool(conv2_2_pw,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool2')
        lrn2 = tf.nn.local_response_normalization(pool2,
                                                  depth_radius=2,
                                                  alpha=2e-5,
                                                  beta=0.75,
                                                  bias=1.0,
                                                  name='norm2')

        prelu_skip = tf_util.get_variable('prelu',
                                          shape=[16],
                                          dtype=tf.float32,
                                          initializer=prelu_initializer)
        conv1_skip = tf_util.prelu(
            tf_util.conv_layer(lrn2, 16, 1, activation=None), prelu_skip)
        conv1_skip = tf.transpose(conv1_skip, perm=[0, 3, 1, 2])
        conv1_skip_flat = tf_util.remove_axis(conv1_skip, [2, 3])

    conv3_1_dw, conv3_1_pw = depthwise_separable_conv2d(
        'conv_ds_4',
        conv2_2_pw,
        width_multiplier=1,
        num_filters=128,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv3_1_dw, conv3_1_pw])

    conv3_2_dw, conv3_2_pw = depthwise_separable_conv2d(
        'conv_ds_5',
        conv3_1_pw,
        width_multiplier=1,
        num_filters=256,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(2, 2),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv3_2_dw, conv3_2_pw])
    ############################################################################################

    conv4_1_dw, conv4_1_pw = depthwise_separable_conv2d(
        'conv_ds_6',
        conv3_2_pw,
        width_multiplier=1,
        num_filters=256,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv4_1_dw, conv4_1_pw])

    conv4_2_dw, conv4_2_pw = depthwise_separable_conv2d(
        'conv_ds_7',
        conv4_1_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(2, 2),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv4_2_dw, conv4_2_pw])
    ############################################################################################
    if 0:
        pool3 = tf.nn.max_pool(conv4_2_pw,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool2')
        lrn3 = tf.nn.local_response_normalization(pool2,
                                                  depth_radius=2,
                                                  alpha=2e-5,
                                                  beta=0.75,
                                                  bias=1.0,
                                                  name='norm2')

        with tf.variable_scope('conv2_skip'):
            prelu_skip = tf_util.get_variable('prelu',
                                              shape=[16],
                                              dtype=tf.float32,
                                              initializer=prelu_initializer)
            conv2_skip = tf_util.prelu(
                tf_util.conv_layer(lrn3, 16, 1, activation=None), prelu_skip)
            conv2_skip = tf.transpose(conv2_skip, perm=[0, 3, 1, 2])
            conv2_skip_flat = tf_util.remove_axis(conv2_skip, [2, 3])

    conv5_1_dw, conv5_1_pw = depthwise_separable_conv2d(
        'conv_ds_8',
        conv4_2_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_1_dw, conv5_1_pw])

    conv5_2_dw, conv5_2_pw = depthwise_separable_conv2d(
        'conv_ds_9',
        conv5_1_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_2_dw, conv5_2_pw])

    conv5_3_dw, conv5_3_pw = depthwise_separable_conv2d(
        'conv_ds_10',
        conv5_2_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_3_dw, conv5_3_pw])

    conv5_4_dw, conv5_4_pw = depthwise_separable_conv2d(
        'conv_ds_11',
        conv5_3_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_4_dw, conv5_4_pw])

    conv5_5_dw, conv5_5_pw = depthwise_separable_conv2d(
        'conv_ds_12',
        conv5_4_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_5_dw, conv5_5_pw])

    conv5_6_dw, conv5_6_pw = depthwise_separable_conv2d(
        'conv_ds_13',
        conv5_5_pw,
        width_multiplier=1,
        num_filters=1024,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(2, 2),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_6_dw, conv5_6_pw])
    ############################################################################################
    conv6_1_dw, conv6_1_pw = depthwise_separable_conv2d(
        'conv_ds_14',
        conv5_6_pw,
        width_multiplier=1,
        num_filters=1024,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv6_1_dw, conv6_1_pw])
    ############################################################################################
    avg_pool = avg_pool_2d(conv6_1_pw, size=(7, 7), stride=(1, 1))
    dropped = dropout(avg_pool, -1, True)
    #print("dropout:shape:")
    #print(dropped.get_shape())
    if 1:
        logits = flatten(
            conv2d('fc',
                   dropped,
                   kernel_size=(1, 1),
                   num_filters=32,
                   l2_strength=0.0,
                   bias=0.0,
                   padding='SAME'))
    else:
        logits = (conv2d('fc',
                         dropped,
                         kernel_size=(1, 1),
                         num_filters=32,
                         l2_strength=0.0,
                         bias=0.0,
                         padding='SAME'))
        logits = tf_util.remove_axis(logits, [2, 3])
    if 1:
        logits_shape = logits.get_shape().as_list()
        pool5_reshape = tf.reshape(
            logits, [batch_size, num_unrolls, 2, logits_shape[-1]])

    else:
        skip_concat = tf.concat([conv1_skip_flat, logits], 1)
        #skip_concat = tf.concat([conv1_skip_flat, conv2_skip_flat, logits], 1)
        #
        #print("logitss:shape:")
        #print(logits_shape)
        #
        skip_concat_shape = skip_concat.get_shape().as_list()
        print("Ship_concat shape")
        print(skip_concat_shape)
        # Split and merge image pairs
        # (BxTx2)xHxWxC
        pool5_reshape = tf.reshape(
            skip_concat, [batch_size, num_unrolls, 2, skip_concat_shape[-1]])
        # (BxT)x(2xHxWxC)
    reshaped = tf_util.remove_axis(pool5_reshape, [1, 3])
    return reshaped
Exemple #2
0
def alexnet_conv_layers(input, batch_size, num_unrolls):
    input = tf.to_float(input) - IMAGENET_MEAN
    with tf.variable_scope('conv1'):
        conv1 = tf_util.conv_layer(input, 96, 11, 4, padding='VALID')
        pool1 = tf.nn.max_pool(conv1,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool1')
        lrn1 = tf.nn.local_response_normalization(pool1,
                                                  depth_radius=2,
                                                  alpha=2e-5,
                                                  beta=0.75,
                                                  bias=1.0,
                                                  name='norm1')

    with tf.variable_scope('conv1_skip'):
        prelu_skip = tf_util.get_variable('prelu',
                                          shape=[16],
                                          dtype=tf.float32,
                                          initializer=prelu_initializer)

        conv1_skip = tf_util.prelu(
            tf_util.conv_layer(lrn1, 16, 1, activation=None), prelu_skip)
        conv1_skip = tf.transpose(conv1_skip, perm=[0, 3, 1, 2])
        conv1_skip_flat = tf_util.remove_axis(conv1_skip, [2, 3])

    with tf.variable_scope('conv2'):
        conv2 = tf_util.conv_layer(lrn1, 256, 5, num_groups=2, padding='SAME')
        pool2 = tf.nn.max_pool(conv2,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool2')
        lrn2 = tf.nn.local_response_normalization(pool2,
                                                  depth_radius=2,
                                                  alpha=2e-5,
                                                  beta=0.75,
                                                  bias=1.0,
                                                  name='norm2')

    with tf.variable_scope('conv2_skip'):
        prelu_skip = tf_util.get_variable('prelu',
                                          shape=[32],
                                          dtype=tf.float32,
                                          initializer=prelu_initializer)

        conv2_skip = tf_util.prelu(
            tf_util.conv_layer(lrn2, 32, 1, activation=None), prelu_skip)
        conv2_skip = tf.transpose(conv2_skip, perm=[0, 3, 1, 2])
        conv2_skip_flat = tf_util.remove_axis(conv2_skip, [2, 3])

    with tf.variable_scope('conv3'):
        conv3 = tf_util.conv_layer(lrn2, 384, 3, padding='SAME')

    with tf.variable_scope('conv4'):
        conv4 = tf_util.conv_layer(conv3, 384, 3, num_groups=2, padding='SAME')

    with tf.variable_scope('conv5'):
        conv5 = tf_util.conv_layer(conv4, 256, 3, num_groups=2, padding='SAME')
        pool5 = tf.nn.max_pool(conv5,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool5')
        pool5 = tf.transpose(pool5, perm=[0, 3, 1, 2])
        pool5_flat = tf_util.remove_axis(pool5, [2, 3])

    with tf.variable_scope('conv5_skip'):
        prelu_skip = tf_util.get_variable('prelu',
                                          shape=[64],
                                          dtype=tf.float32,
                                          initializer=prelu_initializer)

        conv5_skip = tf_util.prelu(
            tf_util.conv_layer(conv5, 64, 1, activation=None), prelu_skip)
        conv5_skip = tf.transpose(conv5_skip, perm=[0, 3, 1, 2])
        conv5_skip_flat = tf_util.remove_axis(conv5_skip, [2, 3])

    with tf.variable_scope('big_concat'):
        # Concat all skip layers.
        skip_concat = tf.concat(
            [conv1_skip_flat, conv2_skip_flat, conv5_skip_flat, pool5_flat], 1)
        skip_concat_shape = skip_concat.get_shape().as_list()

        # Split and merge image pairs
        # (BxTx2)xHxWxC
        pool5_reshape = tf.reshape(
            skip_concat, [batch_size, num_unrolls, 2, skip_concat_shape[-1]])
        # (BxT)x(2xHxWxC)
        reshaped = tf_util.remove_axis(pool5_reshape, [1, 3])

        return reshaped
Exemple #3
0
def mobilenet_v2(model_file,
                 input,
                 batch_size,
                 num_unrolls,
                 image_size=128,
                 depth_multiplier=0.5):
    from nets.mobilenet import mobilenet_v2
    if model_file is None:
        model_file = "/home/waechter/repos/tf-models/research/slim/nets/mobilenet/checkpoint/mobilenet_v2_" + str(
            depth_multiplier) + "_" + str(image_size) + ".ckpt"
    with tf.Session() as sess:
        print("input: ", input.shape)

        #image2 = tf.gather(input, [None,1,None,None, None])
        #images = tf.expand_dims(input, 0)
        # image1 = images[:,0,:,:,:] #tf.gather(input, [None,0,None,None, None])
        # image2 = images[:,1,:,:,:] #tf.gather(input, [None,0,None,None, None])
        # print("input: ", input.shape, " image: ", images.shape)
        # print("input: ", input.shape, " image: ", images.shape, "image1: ", image1.shape)
        images = tf.cast(input, tf.float32) / 128. - 1
        print(images.shape)
        images.set_shape((None, None, None, 3))
        print("images shape after: ", input.shape)
        images = tf.image.resize(images, (image_size, image_size))
        with tf.contrib.slim.arg_scope(
                mobilenet_v2.training_scope(is_training=True)):
            # logits, endpoints = mobilenet_v2.mobilenet(images, depth_multiplier=depth_multiplier)
            logits, endpoints = mobilenet_v2.mobilenet_v2_050(images)
        ema = tf.train.ExponentialMovingAverage(0.999)
        vars = ema.variables_to_restore()
        changed_vars = {}
        search_string = "MobilenetV2"
        for key in vars.keys():
            pos = int(key.rfind(search_string))
            #print(key, "pos: ", pos, " str1:", key[pos:])
            if (pos >= 0):
                changed_vars[key[pos:]] = vars[key]
        #print(zip(changed_vars.keys(), vars.keys()))
        # print("Vars: ", vars)
        saver = tf.train.Saver(changed_vars)
        # saver.restore(sess, "/home/waechter/repos/tf-models/research/slim/nets/mobilenet/checkpoint/mobilenet_v2_1.0_192.ckpt")
        saver.restore(sess, model_file)
        print("Model mobilenet_v2 restored.")

        conv_skip_connection1 = endpoints['layer_3']
        conv_skip_connection2 = endpoints['layer_10']
        print("skip1 shape: ", conv_skip_connection1.shape)
        print("skip2 shape: ", conv_skip_connection2.shape)
        final_conv = endpoints['layer_17']
        print("layer_17 shape: ", conv_skip_connection1.shape)
        final_conv_flat = tf_util.remove_axis(final_conv, [2, 3])
        print("layer_17_flat shape: ", conv_skip_connection1.shape)

        with tf.variable_scope('conv_skip1'):
            prelu_skip = tf_util.get_variable('prelu',
                                              shape=[16],
                                              dtype=tf.float32,
                                              initializer=prelu_initializer)

            conv1_skip = tf_util.prelu(
                tf_util.conv_layer(tf.stop_gradient(conv_skip_connection1),
                                   16,
                                   1,
                                   activation=None), prelu_skip)
            conv1_skip = tf.transpose(conv1_skip, perm=[0, 3, 1, 2])
            conv1_skip_flat = tf_util.remove_axis(conv1_skip, [2, 3])
        with tf.variable_scope('conv_skip2'):
            prelu_skip = tf_util.get_variable('prelu',
                                              shape=[16],
                                              dtype=tf.float32,
                                              initializer=prelu_initializer)

            conv2_skip = tf_util.prelu(
                tf_util.conv_layer(tf.stop_gradient(conv_skip_connection2),
                                   16,
                                   1,
                                   activation=None), prelu_skip)
            conv2_skip = tf.transpose(conv2_skip, perm=[0, 3, 1, 2])
            conv2_skip_flat = tf_util.remove_axis(conv2_skip, [2, 3])

    final_conv_flat = tf.stop_gradient(final_conv_flat)

    with tf.variable_scope('big_concat'):
        skip_concat = tf.concat(
            [conv1_skip_flat, conv2_skip_flat, final_conv_flat], 1)
        skip_concat_shape = skip_concat.get_shape().as_list()
        print("skip_concat shape: ", skip_concat.shape)

        # Split and merge image pairs
        # (BxTx2)xHxWxC
        concat_reshape = tf.reshape(
            skip_concat, [batch_size, num_unrolls, 2, skip_concat_shape[-1]])
        # (BxT)x(2xHxWxC)
        reshaped = tf_util.remove_axis(concat_reshape, [1, 3])

        return reshaped