Example #1
0
def generator_forward(z, reuse=None, name="generator"):
    with tf.variable_scope(name, reuse=reuse):
        z_shape = tf.shape(z)
        out = layers.fully_connected(z,
                                     num_outputs=1568,
                                     activation_fn=leaky_rectify,
                                     normalizer_fn=identity)
        out = tf.reshape(out, tf.pack([z_shape[0], 7, 7, 32]))
        out = layers.convolution2d_transpose(out,
                                             num_outputs=64,
                                             kernel_size=4,
                                             stride=2,
                                             activation_fn=leaky_rectify,
                                             normalizer_fn=identity)
        out = layers.convolution2d_transpose(out,
                                             num_outputs=32,
                                             kernel_size=4,
                                             stride=2,
                                             activation_fn=leaky_rectify,
                                             normalizer_fn=identity)
        out = layers.convolution2d(out,
                                   num_outputs=1,
                                   kernel_size=1,
                                   stride=1,
                                   activation_fn=tf.nn.sigmoid)
    return out
    def forward(self, input_tensor, is_training):
        # inputs has shape [batch, 513, 513, 3]
        input_tensor = tf.image.resize_images(input_tensor, [512, 512])
        with slim.arg_scope(resnet_v1.resnet_arg_scope(is_training)):
            net, end_points = resnet_v1.resnet_v1_101(input_tensor,
                                                      None,
                                                      global_pool=False,
                                                      output_stride=16)
            print(net.get_shape())

        h = L.convolution2d_transpose(net,
                                      64, [5, 5], [4, 4],
                                      activation_fn=None)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=0.5, is_training=is_training)

        h = L.convolution2d_transpose(h,
                                      32, [5, 5], [2, 2],
                                      activation_fn=None)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=0.5, is_training=is_training)

        print(h)

        h = L.convolution2d(h,
                            len(self.classes) + 1, [1, 1], [1, 1],
                            activation_fn=None)
        print(h)
        return h
Example #3
0
    def forward(self, input_tensor, is_training):
        dropout_value = 0.5

        input_tensor = tf.image.resize_images(input_tensor, [224, 224])

        print("Is training:", is_training)

        with slim.arg_scope(vgg.vgg_arg_scope()):
            h, end_points = vgg.vgg_19(input_tensor, is_training=is_training)

        print(list(end_points.keys()))

        h = tf.pad(end_points['vgg_19/pool4'], [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        print(h)

        h = L.convolution2d_transpose(h, 128, [5, 5], [2, 2], activation_fn=None)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 64, [5, 5], [2, 2], activation_fn=None)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 32, [5, 5], [2, 2], activation_fn=None)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 32, [5, 5], [2, 2], activation_fn=None)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d(h, len(self.classes) + 1, [1, 1], [1, 1], activation_fn=None)

        return h
Example #4
0
    def _generator_mnist(self, _input,is_training_ph,reuse=False,prefix="GENERATOR"):

        output_shape = self._data.data_shape

        with tf.variable_scope(prefix):
            z = layers.fully_connected(_input,  7*7*16, activation_fn=None, scope='g_z')
            z = tf.reshape(z, [-1, 7,7,16])
            z= lrelu(z)
            z= batch_norm(z,is_training_ph,reuse,scope='bn_layer1')

            conv1 = layers.convolution2d_transpose(z, 8, 5, stride=2, activation_fn=None, scope='g_conv1')
            conv1=lrelu(conv1)
            conv1=batch_norm(conv1,is_training_ph,reuse,scope='bn_layer2')

            conv2 = layers.convolution2d_transpose(conv1, 4, 5, stride=2, activation_fn=None, scope='g_conv2')
            conv2=lrelu(conv2)
            conv2=batch_norm(conv2,is_training_ph,reuse,scope='bn_layer3')

            conv3 = layers.convolution2d_transpose(conv2, 1, 5, stride=1, activation_fn=None, scope='g_conv3')
            conv3=lrelu(conv3)
            conv3=batch_norm(conv3,is_training_ph,reuse,scope='bn_layer4')

            out=tf.reshape(conv3, [-1] + list(output_shape))


        return out
Example #5
0
def netG(z, y, BATCH_SIZE):

   # concat attribute y onto z
   z = tf.concat([z,y], axis=1)
   z = tcl.fully_connected(z, 4*4*512, activation_fn=tf.identity, scope='g_z')
   #z = tcl.batch_norm(z)
   z = tf.reshape(z, [BATCH_SIZE, 4, 4, 512])
   #z = tf.nn.relu(z)

   conv1 = tcl.convolution2d_transpose(z, 256, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv1')
   
   conv2 = tcl.convolution2d_transpose(conv1, 128, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv2')
   
   conv3 = tcl.convolution2d_transpose(conv2, 64, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv3')
   
   conv4 = tcl.convolution2d_transpose(conv3, 3, 5, 2, activation_fn=tf.nn.tanh, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv4')

   print 'z:',z
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print
   print 'END G'
   print
   tf.add_to_collection('vars', z)
   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   return conv4
def netG(z, batch_size):
   print 'GENERATOR'

   z = tcl.fully_connected(z, 4*4*1024, activation_fn=tf.identity, scope='g_z')
   z = tf.reshape(z, [batch_size, 4, 4, 1024])
   z = tcl.batch_norm(z)
   z = tf.nn.relu(z)
   
   conv1 = tcl.convolution2d_transpose(z, 512, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv1')

   conv2 = tcl.convolution2d_transpose(conv1, 256, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv2')

   conv3 = tcl.convolution2d_transpose(conv2, 128, 5, 2, normalizer_fn=tcl.batch_norm, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv3')

   conv4 = tcl.convolution2d_transpose(conv3, 3, 5, 2, activation_fn=tf.nn.tanh, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_conv4')

   print 'z:',z
   print 'conv1:',conv1
   print 'conv2:',conv2
   print 'conv3:',conv3
   print 'conv4:',conv4
   print
   print 'END G'
   print
   tf.add_to_collection('vars', z)
   tf.add_to_collection('vars', conv1)
   tf.add_to_collection('vars', conv2)
   tf.add_to_collection('vars', conv3)
   tf.add_to_collection('vars', conv4)
   return conv4 
def celebA_generator(z, n_z=100, DIM=64, reuse=False):
    with tf.variable_scope('G', reuse=reuse) as scope:
        z = tcl.fully_connected(z,
                                4 * 4 * 1024,
                                activation_fn=tf.identity,
                                scope='z')
        z = tf.reshape(z, [-1, 4, 4, 1024])
        z = tcl.batch_norm(z)
        z = tf.nn.relu(z)

        conv1 = tcl.convolution2d_transpose(
            z,
            512,
            5,
            2,
            normalizer_fn=tcl.batch_norm,
            activation_fn=tf.nn.relu,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='conv1')

        conv2 = tcl.convolution2d_transpose(
            conv1,
            256,
            5,
            2,
            normalizer_fn=tcl.batch_norm,
            activation_fn=tf.nn.relu,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='conv2')

        conv3 = tcl.convolution2d_transpose(
            conv2,
            128,
            5,
            2,
            normalizer_fn=tcl.batch_norm,
            activation_fn=tf.nn.relu,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='conv3')

        conv4 = tcl.convolution2d_transpose(
            conv3,
            3,
            5,
            2,
            activation_fn=tf.nn.tanh,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='conv4')

    variables = tf.contrib.framework.get_variables(scope)
    variables = {var.op.name.replace("G/", "g_"): var for var in variables}
    variables['BatchNorm/beta'] = variables.pop('g_BatchNorm/beta')
    variables['BatchNorm/moving_mean'] = variables.pop(
        'g_BatchNorm/moving_mean')
    variables['BatchNorm/moving_variance'] = variables.pop(
        'g_BatchNorm/moving_variance')
    out = tf.reshape(conv4, [-1, 64, 64, 3])
    out = out * 0.5 + 0.5
    return out, variables
Example #8
0
def conv_generator(x, output_dim, n_filters, scope='Generator'):
    with tf.variable_scope(scope):
        s4, s2 = int(output_dim / 4), int(output_dim / 2)
        z_ = layers.linear(x, s4 * s4 * n_filters * 2)
        h0 = tf.reshape(z_, [-1, s4, s4, n_filters * 2])
        h1 = layers.convolution2d_transpose(h0, n_filters, [5, 5], stride=2)
        h1 = tf.nn.elu(h1)
        h2 = layers.convolution2d_transpose(h1, 1, [5, 5], stride=2)
        return tf.reshape(tf.nn.tanh(h2), [-1, output_dim * output_dim])
Example #9
0
    def forward(self, input_tensor, is_training):
        dropout_value = 0.5

        h = L.convolution2d(input_tensor, 16, [5, 5], activation_fn=None)
        h = L.batch_norm(h)
        h = tf.nn.relu(h)
        h = L.max_pool2d(h, [2, 2], [2, 2])
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d(h, 32, [5, 5], activation_fn=None)
        h = L.batch_norm(h)
        h = tf.nn.relu(h)
        h = L.max_pool2d(h, [2, 2], [2, 2])
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d(h, 64, [5, 5], activation_fn=None)
        h = L.batch_norm(h)
        h = tf.nn.relu(h)
        h = L.max_pool2d(h, [2, 2], [2, 2])
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d(h, 128, [5, 5], activation_fn=None)
        h = L.batch_norm(h)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 128, [5, 5], [2, 2], activation_fn=None)
        h = L.batch_norm(h)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 64, [5, 5], [2, 2], activation_fn=None)
        h = L.batch_norm(h)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 32, [5, 5], [2, 2], activation_fn=None)
        h = L.batch_norm(h)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 32, [5, 5], activation_fn=None)
        h = L.batch_norm(h)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 32, [5, 5], activation_fn=None)
        h = L.batch_norm(h)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d(h, len(self.classes) + 1, [1, 1], [1, 1], activation_fn=None)

        return h
Example #10
0
    def __call__(self, z, reuse=True):
        #with tf.variable_scope(self.name,reuse=tf.AUTO_REUSE) as vs:
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()
            bs = tf.shape(z)[0]
            y = z[:, -10:]
            yb = tf.reshape(y, shape=[bs, 1, 1, 10])
            fc = tcl.fully_connected(z, 1024, activation_fn=tf.identity)
            fc = tc.layers.batch_norm(fc,
                                      decay=0.9,
                                      scale=True,
                                      updates_collections=None,
                                      is_training=self.is_training)
            fc = leaky_relu(fc)
            fc = tf.concat([fc, y], 1)

            if self.dataset == 'mnist':
                fc = tcl.fully_connected(fc,
                                         7 * 7 * 128,
                                         activation_fn=tf.identity)
                fc = tf.reshape(fc, tf.stack([bs, 7, 7, 128]))
            elif self.dataset == 'cifar10':
                fc = tcl.fully_connected(fc,
                                         8 * 8 * 128,
                                         activation_fn=tf.identity)
                fc = tf.reshape(fc, tf.stack([bs, 8, 8, 128]))
            fc = tc.layers.batch_norm(fc,
                                      decay=0.9,
                                      scale=True,
                                      updates_collections=None,
                                      is_training=self.is_training)
            fc = leaky_relu(fc)
            fc = conv_cond_concat(fc, yb)
            conv = tcl.convolution2d_transpose(fc,
                                               64, [4, 4], [2, 2],
                                               activation_fn=tf.identity)
            #(bs,14,14,64)
            conv = tc.layers.batch_norm(conv,
                                        decay=0.9,
                                        scale=True,
                                        updates_collections=None,
                                        is_training=self.is_training)
            conv = leaky_relu(conv)
            if self.dataset == 'mnist':
                output = tcl.convolution2d_transpose(
                    conv, 1, [4, 4], [2, 2], activation_fn=tf.nn.sigmoid)
                output = tf.reshape(output, [bs, -1])
            elif self.dataset == 'cifar10':
                output = tcl.convolution2d_transpose(
                    conv, 3, [4, 4], [2, 2], activation_fn=tf.nn.sigmoid)
                output = tf.reshape(output, [bs, -1])
            #(0,1) by tanh
            return output
    def forward(self, input_tensor, is_training):
        dropout_value = 0.5
        input_tensor = tf.expand_dims(input_tensor, -1)

        h = tf.layers.conv3d(input_tensor, 16, [5, 5, 3], padding="same")
        h = tf.layers.batch_normalization(h)
        h = tf.nn.relu(h)
        h = tf.layers.max_pooling3d(h, [2, 2, 2], [2, 2, 2])
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)
        print(h)

        h = tf.layers.conv3d(h, 32, [5, 5, 2], padding="same")
        h = tf.layers.batch_normalization(h)
        h = tf.nn.relu(h)
        h = tf.layers.max_pooling3d(h, [2, 2, 1], [2, 2, 1])
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)
        print(h)

        h = tf.layers.conv3d(h, 64, [5, 5, 2], padding="same")
        h = tf.layers.batch_normalization(h)
        h = tf.nn.relu(h)
        h = tf.layers.max_pooling3d(h, [2, 2, 2], [2, 2, 2])
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)
        print(h)

        h = tf.layers.conv3d(h, 128, [5, 5, 1], padding="same")
        h = tf.layers.batch_normalization(h)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        print(h)
        h = tf.squeeze(h, 3)
        print(h)

        h = L.convolution2d_transpose(h,
                                      128, [5, 5], [2, 2],
                                      activation_fn=tf.nn.relu)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h,
                                      64, [5, 5], [2, 2],
                                      activation_fn=tf.nn.relu)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h,
                                      32, [5, 5], [2, 2],
                                      activation_fn=tf.nn.relu)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d(h,
                            len(self.classes) + 1, [1, 1], [1, 1],
                            activation_fn=None)

        return h
def netG(z, batch_size):
    print 'GENERATOR'
    z = layers.fully_connected(z,
                               4 * 4 * 1024,
                               normalizer_fn=layers.batch_norm,
                               activation_fn=tf.nn.relu,
                               scope='g_z')
    z = tf.reshape(z, [batch_size, 4, 4, 1024])

    conv1 = layers.convolution2d_transpose(z,
                                           512,
                                           5,
                                           stride=2,
                                           normalizer_fn=layers.batch_norm,
                                           activation_fn=tf.nn.relu,
                                           scope='g_conv1')
    conv2 = layers.convolution2d_transpose(conv1,
                                           256,
                                           5,
                                           stride=2,
                                           normalizer_fn=layers.batch_norm,
                                           activation_fn=tf.nn.relu,
                                           scope='g_conv2')
    conv3 = layers.convolution2d_transpose(conv2,
                                           128,
                                           5,
                                           stride=2,
                                           normalizer_fn=layers.batch_norm,
                                           activation_fn=tf.nn.relu,
                                           scope='g_conv3')
    conv4 = layers.convolution2d_transpose(conv3,
                                           3,
                                           5,
                                           stride=2,
                                           activation_fn=tf.nn.tanh,
                                           scope='g_conv4')

    print 'z:', z
    print 'conv1:', conv1
    print 'conv2:', conv2
    print 'conv3:', conv3
    print 'conv4:', conv4
    print
    print 'END G'
    print
    tf.add_to_collection('vars', z)
    tf.add_to_collection('vars', conv1)
    tf.add_to_collection('vars', conv2)
    tf.add_to_collection('vars', conv3)
    tf.add_to_collection('vars', conv4)

    return conv4
Example #13
0
    def forward(self, input_tensor, is_training):
        dropout_value = 0.5

        # input_tensor = tf.image.resize_images(input_tensor, [224, 224])
        batch_size = tf.shape(input_tensor)[0]

        print("Is training:", is_training)

        with slim.arg_scope(vgg.vgg_arg_scope()):
            h, end_points = vgg.vgg_16(input_tensor, is_training=is_training)

        print(end_points)
        print(list(end_points.keys()))

        h = end_points['vgg_16/pool4']

        h = L.convolution2d_transpose(h, 256, [5, 5], [2, 2], activation_fn=None)
        h = tf.nn.relu(h)
        h = tf.concat([h, end_points['vgg_16/pool3']], axis=3)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        np_seed_mask = np.zeros((1, 56, 56, 1))
        np_seed_mask[:, 28:29, 28:29, :] = 1.0
        seed_mask = tf.constant(np_seed_mask, dtype=tf.float32)
        seed_mask = tf.tile(seed_mask, [batch_size, 1, 1, 1])

        h = L.convolution2d_transpose(h, 128, [5, 5], [2, 2], activation_fn=None)
        h = tf.nn.relu(h)
        h = tf.concat([h, end_points['vgg_16/pool2'], seed_mask], axis=3)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 64, [5, 5], [2, 2], activation_fn=None)
        h = tf.nn.relu(h)
        h = tf.concat([h, end_points['vgg_16/pool1']], axis=3)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 64, [5, 5], [2, 2], activation_fn=None)
        h = tf.concat([h, input_tensor], axis=3)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        # h = L.convolution2d_transpose(h, 64, [5, 5], [2, 2], activation_fn=None)
        # h = tf.nn.relu(h)
        # h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        # h = L.convolution2d_transpose(h, 64, [5, 5], [2, 2], activation_fn=None)
        # h = tf.nn.relu(h)
        # h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d(h, len(self.classes) + 1, [1, 1], [1, 1], activation_fn=None)

        return h
Example #14
0
    def _decode(self, feature_map, is_training):
        h = L.convolution2d_transpose(feature_map, 128, [5, 5], [2, 2], activation_fn=tf.nn.relu)
        h = L.dropout(h, keep_prob=0.5, is_training=is_training)

        h = L.convolution2d_transpose(h, 64, [5, 5], [2, 2], activation_fn=tf.nn.relu)
        h = L.dropout(h, keep_prob=0.5, is_training=is_training)

        h = L.convolution2d_transpose(h, 32, [5, 5], [2, 2], activation_fn=tf.nn.relu)
        h = L.dropout(h, keep_prob=0.5, is_training=is_training)

        h = L.convolution2d_transpose(h, 32, [5, 5], [2, 2], activation_fn=tf.nn.relu)

        h = L.convolution2d(h, len(self.classes) + 1, [1, 1], [1, 1], activation_fn=None)
        return h
Example #15
0
def netG(z, batch_size):
    print 'GENERATOR'

    z = tcl.fully_connected(z,
                            4 * 4 * 256,
                            activation_fn=tf.identity,
                            scope='g_z')
    z = tf.reshape(z, [batch_size, 4, 4, 256])
    z = tcl.batch_norm(z)
    z = tf.nn.relu(z)

    conv1 = tcl.convolution2d_transpose(
        z,
        128,
        5,
        2,
        normalizer_fn=tcl.batch_norm,
        activation_fn=tf.nn.relu,
        weights_initializer=tf.random_normal_initializer(stddev=0.02),
        scope='g_conv1')

    conv2 = tcl.convolution2d_transpose(
        conv1,
        64,
        5,
        2,
        normalizer_fn=tcl.batch_norm,
        activation_fn=tf.nn.relu,
        weights_initializer=tf.random_normal_initializer(stddev=0.02),
        scope='g_conv2')

    conv3 = tcl.convolution2d_transpose(
        conv2,
        1,
        5,
        2,
        normalizer_fn=tcl.batch_norm,
        activation_fn=tf.nn.relu,
        weights_initializer=tf.random_normal_initializer(stddev=0.02),
        scope='g_conv3')

    conv3 = conv3[:, :28, :28, :]

    print 'z:', z
    print 'conv1:', conv1
    print 'conv2:', conv2
    print 'conv3:', conv3
    return conv3
    def get_network(self, input_tensor, is_training):
        # Load pre-trained inception-resnet model
        with slim.arg_scope(inception_resnet_v2_arg_scope(batch_norm_decay = 0.999, weight_decay = 0.0001)):
            net, end_points = inception_resnet_v2(input_tensor, is_training = is_training)

        # Adding some modification to original InceptionResnetV2 - changing scoring of AUXILIARY TOWER
        weight_decay = 0.0005
        with tf.variable_scope('NewInceptionResnetV2'):
            with tf.variable_scope('AuxiliaryScoring'):
                with slim.arg_scope([layers.convolution2d, layers.convolution2d_transpose],
                                    weights_regularizer = slim.l2_regularizer(weight_decay),
                                    biases_regularizer = slim.l2_regularizer(weight_decay),
                                    activation_fn = None):
                    tf.summary.histogram('Last_layer/activations', net, [KEY_SUMMARIES])

                    # Scoring
                    net = slim.dropout(net, 0.7, is_training = is_training, scope = 'Dropout')
                    net = layers.convolution2d(net, num_outputs = self.FEATURES, kernel_size = 1, stride = 1,
                                               scope = 'Scoring_layer')
                    feature = net
                    tf.summary.histogram('Scoring_layer/activations', net, [KEY_SUMMARIES])

                    # Upsampling
                    net = layers.convolution2d_transpose(net, num_outputs = 16, kernel_size = 17, stride = 17,
                                                         padding = 'VALID', scope = 'Upsampling_layer')

                    tf.summary.histogram('Upsampling_layer/activations', net, [KEY_SUMMARIES])

            # Smoothing layer - separable gaussian filters
            net = super()._get_gauss_smoothing_net(net, size = self.SMOOTH_SIZE, std = 1.0, kernel_sum = 0.2)

            return net, feature
Example #17
0
    def tconv_ly(self, bottom, in_channels, out_channels, name=None):
        with tf.variable_scope(name):
            shape = self.ten_sh(bottom)
            shape[-1] = out_channels
            bottom = tf.pad(bottom, [[0, 0], [2, 2], [2, 2], [0, 0]],
                            "REFLECT")
            init = ly.xavier_initializer_conv2d()
            output = ly.convolution2d_transpose(inputs=bottom,
                                                num_outputs=out_channels,
                                                kernel_size=[3, 3],
                                                stride=[1, 1],
                                                padding='VALID',
                                                activation_fn=tf.nn.relu,
                                                weights_initializer=init,
                                                biases_initializer=init,
                                                trainable=True)
            output = tf.slice(output, begin=[0, 2, 2, 0], size=shape)
            #crop_fn = tf.image.resize_image_with_crop_or_pad
            #crop_lam = lambda img: crop_fn(img, target_height=shape[1],
            #target_width=shape[2])
            #output = tf.map_fn(crop_lam, output, parallel_iterations=shape[0])
            #output = crop_fn(output, target_height=shape[1], target_width=shape[2])
            print self.ten_sh(output)

            return output
Example #18
0
    def get_network(self, input_tensor, is_training):
        net_end, end_points = human_pose_resnet(input_tensor,
                                                reuse=True,
                                                training=is_training)
        return net_end
        net = end_points['resnet_end']

        with tf.variable_scope('NewHumanPoseResnet'):
            with tf.variable_scope('Scoring'):
                tf.histogram_summary('Last_layer/activations', net,
                                     [KEY_SUMMARIES])

                # net = slim.dropout(net, 1.0, is_training = is_training, scope = 'Dropout')
                weight_decay = 0.001

                # Scoring layer
                net = layers.convolution2d(
                    net,
                    num_outputs=512,
                    kernel_size=1,
                    stride=1,
                    scope='Scoring_layer',
                    activation_fn=None,
                    weights_regularizer=slim.l2_regularizer(weight_decay),
                    biases_regularizer=slim.l2_regularizer(weight_decay))

                tf.histogram_summary('Scoring_layer/activations', net,
                                     [KEY_SUMMARIES])

                # Upsampling
                net = layers.convolution2d_transpose(
                    net,
                    num_outputs=16,
                    kernel_size=16,
                    stride=16,
                    activation_fn=None,
                    padding='VALID',
                    scope='Upsampling_layer',
                    weights_regularizer=slim.l2_regularizer(weight_decay),
                    biases_regularizer=slim.l2_regularizer(weight_decay))

                tf.histogram_summary('Upsampling_layer/activations', net,
                                     [KEY_SUMMARIES])

            # Smoothing layer
            net = layers.convolution2d(
                net,
                num_outputs=16,
                kernel_size=10,
                stride=1,
                padding='SAME',
                activation_fn=None,
                scope='Smoothing_layer',
                trainable=False,
                biases_initializer=None,
                weights_initializer=self.get_gauss_pdf_initializer())

            return net
Example #19
0
def conv2d_transpose(inputs,num_outputs,kernel_size,stride,is_training,normalizer_fn,activation_fn,name):
    with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
        out = layers.convolution2d_transpose(inputs,
                    num_outputs=num_outputs,
                    kernel_size=kernel_size,
                    stride=stride,
                    normalizer_params={"is_training": is_training},
                    normalizer_fn=normalizer_fn,
                    activation_fn=activation_fn)
        return out
Example #20
0
def deconv2d(input_, o_size, k_size, name='deconv2d'):
    print name, 'input', ten_sh(input_)
    print name, 'output', o_size
    assert np.sum(np.mod(o_size[1:3], ten_sh(input_)[1:3]) - [0, 0]) == 0
    with tf.variable_scope(name):
        init = ly.xavier_initializer_conv2d()
        output = ly.convolution2d_transpose(input_, num_outputs=o_size[-1], \
                kernel_size=k_size, stride=np.divide(o_size[1:3], ten_sh(input_)[1:3]), \
                padding='SAME', weights_initializer=init)
        return output
Example #21
0
File: wgan.py Project: rmst/chi
 def generator(z):
     # arg_scope set default arguments for certain layers inside that scope
     with arg_scope(
         [layers.fully_connected, layers.convolution2d_transpose],
             weights_initializer=layers.xavier_initializer(),
             weights_regularizer=layers.l2_regularizer(2.5e-5)):
         x = layers.fully_connected(z,
                                    1024,
                                    normalizer_fn=layers.batch_norm)
         x = layers.fully_connected(x,
                                    7 * 7 * 128,
                                    normalizer_fn=layers.batch_norm)
         x = tf.reshape(x, [-1, 7, 7, 128])
         x = layers.convolution2d_transpose(x,
                                            64, [4, 4], [2, 2],
                                            normalizer_fn=layers.batch_norm)
         x = layers.convolution2d_transpose(x,
                                            1, [4, 4], [2, 2],
                                            activation_fn=tf.sigmoid)
         return x
Example #22
0
def netG16_decoder(layers, lab=False):
    enc_conv1, enc_conv2, enc_conv3, enc_conv4, enc_conv5, enc_conv6, enc_conv7, enc_conv8 = layers[0], layers[1], layers[2], layers[3], layers[4], layers[5], layers[6], layers[7]
    # decoder, no batch norm
    dec_conv1 = tcl.convolution2d_transpose(enc_conv8, 512, 4, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv1')
    dec_conv1 = relu(dec_conv1)
    dec_conv1 = tf.concat([dec_conv1, enc_conv7], axis=3)
    print (dec_conv1)
    dec_conv2 = tcl.convolution2d_transpose(dec_conv1, 512, 4, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv2')
    dec_conv2 = relu(dec_conv2)
    dec_conv2 = tf.concat([dec_conv2, enc_conv6], axis=3)
    print (dec_conv2)
    dec_conv3 = tcl.convolution2d_transpose(dec_conv2, 512, 4, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv3')
    dec_conv3 = relu(dec_conv3)
    dec_conv3 = tf.concat([dec_conv3, enc_conv5], axis=3)
    print (dec_conv3)
    dec_conv4 = tcl.convolution2d_transpose(dec_conv3, 512, 4, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv4')
    dec_conv4 = relu(dec_conv4)
    dec_conv4 = tf.concat([dec_conv4, enc_conv4], axis=3)
    print (dec_conv4)
    dec_conv5 = tcl.convolution2d_transpose(dec_conv4, 256, 4, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv5')
    dec_conv5 = relu(dec_conv5)
    dec_conv5 = tf.concat([dec_conv5, enc_conv3], axis=3)
    print (dec_conv5)
    dec_conv6 = tcl.convolution2d_transpose(dec_conv5, 128, 4, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv6')
    dec_conv6 = relu(dec_conv6)
    dec_conv6 = tf.concat([dec_conv6, enc_conv2], axis=3)
    print (dec_conv6)
    dec_conv7 = tcl.convolution2d_transpose(dec_conv6, 64, 4, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv7')
    dec_conv7 = relu(dec_conv7)
    dec_conv7 = tf.concat([dec_conv7, enc_conv1], axis=3)
    print (dec_conv7)
    c = 2 if lab else 3
    dec_conv8 = tcl.convolution2d_transpose(dec_conv7, c, 4, 2, activation_fn=tf.identity, weights_initializer=tf.random_normal_initializer(stddev=0.02), scope='g_dec_conv8')
    dec_conv8 = tanh(dec_conv8)
    print (dec_conv1)

    return dec_conv8
Example #23
0
def netG_decoder(layers, reuse=False):

    sc = tf.get_variable_scope()
    with tf.variable_scope(sc, reuse=reuse):

        enc_conv1 = layers[0]
        enc_conv2 = layers[1]
        enc_conv3 = layers[2]
        enc_conv4 = layers[3]
        enc_conv5 = layers[4]
        enc_conv6 = layers[5]
        enc_conv7 = layers[6]
        enc_conv8 = layers[7]

        # decoder, no batch norm
        dec_conv1 = tcl.convolution2d_transpose(
            enc_conv8,
            512,
            4,
            2,
            activation_fn=tf.identity,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='g_dec_conv1')
        dec_conv1 = relu(dec_conv1)
        dec_conv1 = tf.concat([dec_conv1, enc_conv7], axis=3)
        print 'dec_conv1:', dec_conv1

        dec_conv2 = tcl.convolution2d_transpose(
            dec_conv1,
            512,
            4,
            2,
            activation_fn=tf.identity,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='g_dec_conv2')
        dec_conv2 = relu(dec_conv2)
        dec_conv2 = tf.concat([dec_conv2, enc_conv6], axis=3)
        print 'dec_conv2:', dec_conv2

        dec_conv3 = tcl.convolution2d_transpose(
            dec_conv2,
            512,
            4,
            2,
            activation_fn=tf.identity,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='g_dec_conv3')
        dec_conv3 = relu(dec_conv3)
        dec_conv3 = tf.concat([dec_conv3, enc_conv5], axis=3)
        print 'dec_conv3:', dec_conv3

        dec_conv4 = tcl.convolution2d_transpose(
            dec_conv3,
            512,
            4,
            2,
            activation_fn=tf.identity,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='g_dec_conv4')
        dec_conv4 = relu(dec_conv4)
        dec_conv4 = tf.concat([dec_conv4, enc_conv4], axis=3)
        print 'dec_conv4:', dec_conv4

        dec_conv5 = tcl.convolution2d_transpose(
            dec_conv4,
            256,
            4,
            2,
            activation_fn=tf.identity,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='g_dec_conv5')
        dec_conv5 = relu(dec_conv5)
        dec_conv5 = tf.concat([dec_conv5, enc_conv3], axis=3)
        print 'dec_conv5:', dec_conv5

        dec_conv6 = tcl.convolution2d_transpose(
            dec_conv5,
            128,
            4,
            2,
            activation_fn=tf.identity,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='g_dec_conv6')
        dec_conv6 = relu(dec_conv6)
        dec_conv6 = tf.concat([dec_conv6, enc_conv2], axis=3)
        print 'dec_conv6:', dec_conv6

        dec_conv7 = tcl.convolution2d_transpose(
            dec_conv6,
            64,
            4,
            2,
            activation_fn=tf.identity,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='g_dec_conv7')
        dec_conv7 = relu(dec_conv7)
        dec_conv7 = tf.concat([dec_conv7, enc_conv1], axis=3)
        print 'dec_conv7:', dec_conv7

        dec_conv8 = tcl.convolution2d_transpose(
            dec_conv7,
            3,
            4,
            2,
            activation_fn=tf.identity,
            weights_initializer=tf.random_normal_initializer(stddev=0.02),
            scope='g_dec_conv8')
        dec_conv8 = tanh(dec_conv8)
        print 'dec_conv8', dec_conv8

        return dec_conv8
Example #24
0
def Autoencoder(input_tensor):
    """
    Autoencoder with shared weights.
    :param input_image: Original image.
    :return: (output_image, embedding_tensor)
    """
    with tf.variable_scope('autoencoder'):
        pad = 'SAME'

        #####################
        ###    ENCODER    ###
        #####################

        #1,8,8,2048
        with tf.variable_scope('conv1'):
            out = convolution2d(inputs=input_tensor,
                                num_outputs=2048,
                                kernel_size=3,
                                stride=1,
                                padding=pad,
                                rate=1,
                                activation_fn=tf.nn.relu,
                                weights_initializer=xavier_initializer())
            print("Dimensions conv1: ", out.get_shape())

        with tf.variable_scope('conv2'):
            embedding_tensor = convolution2d(
                inputs=out,
                num_outputs=4192,
                kernel_size=3,
                stride=2,
                padding=pad,
                rate=1,
                activation_fn=tf.nn.relu,
                weights_initializer=xavier_initializer())
            print("Dimensions conv2: ", embedding_tensor.get_shape())

        #####################
        ###    DECODER    ###
        #####################

        with tf.variable_scope('conv2'):
            out = convolution2d_transpose(
                inputs=embedding_tensor,
                num_outputs=4192,
                kernel_size=3,
                stride=2,
                padding=pad,
                activation_fn=tf.nn.relu,
                weights_initializer=xavier_initializer())
            print("Dimensions deconv2: ", out.get_shape())

        with tf.variable_scope('conv1'):
            output_tensor = convolution2d_transpose(
                inputs=out,
                num_outputs=2048,
                kernel_size=3,
                stride=1,
                padding=pad,
                activation_fn=tf.nn.relu,
                weights_initializer=xavier_initializer())
            print("Dimensions deconv1: ", output_tensor.get_shape())

        return output_tensor, embedding_tensor
Example #25
0
def run_network(inpt,
                string,
                is_training,
                use_batch_norm,
                debug=False,
                strip_batchnorm_from_last_layer=False):
    # 下面两步没有看懂,?????????
    # 如果use_batch_norm时,下面的两个都有效
    # layers.bacth_norm是对输出的全连接层进行批归一化的
    # 而conv_batch_norm是对卷积层进行批归一化的
    maybe_fc_batch_norm = layers.batch_norm if use_batch_norm else None
    maybe_conv_batch_norm = conv_batch_norm if use_batch_norm else None

    if debug:
        print("%s architecture" % (tf.get_variable_scope().name, ))

    layer_idx = 0

    out = inpt
    layer_strs = string.split(",")
    for i, layer in enumerate(layer_strs):
        # 最后一层跳过batch_norm
        if i + 1 == len(layer_strs) and strip_batchnorm_from_last_layer:
            maybe_fc_batch_norm = None
            maybe_conv_batch_norm = None

        # 如果为卷积层,进行卷积操作
        if layer.startswith("conv:"):
            nkernels, stride, num_outputs, nonlinearity_str = parse_conv_params(
                layer[len("conv:"):].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                normalizer_params={"is_training": is_training},
                normalizer_fn=maybe_conv_batch_norm,
                activation_fn=nonlinearity,
                scope='layer_%d' % (layer_idx, ))
            layer_idx += 1

            if debug:
                print(
                    "Convolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s"
                    % (nkernels, stride, num_outputs, nonlinearity_str))
        # 如果为反卷积层,进行反卷积操作
        #这个操作的具体过程可以查看http://blog.csdn.net/fate_fjh/article/details/52882134说得非常好
        #大致的过程就是如果输入为N1*N1,卷积核为N2*N2,步长为K,则输出为(N1-1)*K+N2
        elif layer.startswith("deconv:"):
            nkernels, stride, num_outputs, nonlinearity_str = parse_conv_params(
                layer[len("deconv:"):].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d_transpose(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                activation_fn=nonlinearity,
                normalizer_fn=maybe_conv_batch_norm,
                normalizer_params={"is_training": is_training},
                scope='layer_%d' % (layer_idx, ))
            layer_idx += 1
            if debug:
                print(
                    "Deconvolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s"
                    % (nkernels, stride, num_outputs, nonlinearity_str))
        # 如果为全连接层,进行全连接操作
        # 这里的全连接是tensorflow自己集成的,可以看一下内部的说明:大致的意思是
        # 如果使用了batch_norm的话,就没有bias,并且默认的激活函数是relu
        elif layer.startswith("fc:"):
            params = layer[len("fc:"):].split(":")
            nonlinearity_str = 'relu'
            if len(params) == 2:
                params, nonlinearity_str = params[:-1], params[-1]
            num_outputs = parse_math(params[0])
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.fully_connected(out,
                                         num_outputs=num_outputs,
                                         activation_fn=nonlinearity,
                                         normalizer_fn=maybe_fc_batch_norm,
                                         normalizer_params={
                                             "is_training": is_training,
                                             "updates_collections": None
                                         },
                                         scope='layer_%d' % (layer_idx, ))
            layer_idx += 1
            if debug:
                print("Fully connected with num_outputs=%d followed by %s" %
                      (num_outputs, nonlinearity_str))
        # 如果为重构层,进行重构操作
        elif layer.startswith("reshape:"):
            params = layer[len("reshape:"):].split(":")
            dims = [parse_math(dim) for dim in params]
            out = tf.reshape(out, [-1] + dims)
            if debug:
                print("Reshape to %r" % (dims, ))
        else:
            raise ValueError("Could not parse layer description: %r" %
                             (layer, ))
    if debug:
        print("")
    return out
Example #26
0
def human_pose_resnet(net, reuse=False, training=False):
    def batch_normalization(input_net, act_f=None, scope=None):
        return layers.batch_norm(input_net,
                                 center=True,
                                 scale=True,
                                 epsilon=1e-5,
                                 activation_fn=act_f,
                                 is_training=training,
                                 scope=scope)

    def conv_2d(input_net,
                num_outputs,
                kernel_size,
                stride=1,
                padding='SAME',
                scope=None):
        return layers.convolution2d(input_net,
                                    num_outputs=num_outputs,
                                    kernel_size=kernel_size,
                                    stride=stride,
                                    padding=padding,
                                    activation_fn=None,
                                    scope=scope)

    def padding(input_net, w, h):
        return tf.pad(input_net, [[0, 0], [h, h], [w, w], [0, 0]], "CONSTANT")

    def bottleneck(input_net, depth, depth_bottleneck, stride, i):
        with tf.variable_scope('Bottleneck_%d' % i, reuse=reuse):
            res_conv = stride > 1 or stride < 0
            stride = abs(stride)

            # Res connection
            out_net = conv_2d(input_net,
                              num_outputs=depth_bottleneck,
                              kernel_size=1,
                              stride=1,
                              padding='VALID',
                              scope='Conv_1')

            out_net = batch_normalization(out_net, tf.nn.relu, 'BatchNorm_1')

            out_net = padding(out_net, 1, 1)

            out_net = conv_2d(out_net,
                              num_outputs=depth_bottleneck,
                              kernel_size=3,
                              stride=stride,
                              padding='VALID',
                              scope='Conv_2')

            out_net = batch_normalization(out_net, tf.nn.relu, 'BatchNorm_2')

            out_net = conv_2d(out_net,
                              num_outputs=depth,
                              kernel_size=1,
                              stride=1,
                              padding='VALID',
                              scope='Conv_3')

            out_net = batch_normalization(out_net, scope='BatchNorm_3')

            # Skip connection
            if res_conv:
                input_net = conv_2d(input_net,
                                    num_outputs=depth,
                                    kernel_size=1,
                                    stride=stride,
                                    padding='VALID',
                                    scope='Conv_skip')

                input_net = batch_normalization(input_net,
                                                scope='BatchNorm_skip')

            out_net += input_net
            out_net = tf.nn.relu(out_net)

            return out_net

    def repeat_bottleneck(input_net, all_params):
        for i, (depth, depth_bottleneck, stride) in enumerate(all_params):
            input_net = bottleneck(input_net, depth, depth_bottleneck, stride,
                                   i)

        return input_net

    end_points = {}

    with tf.variable_scope('HumanPoseResnet', reuse=reuse):
        with tf.variable_scope('Block_0', reuse=reuse):
            net = padding(net, 3, 3)

            net = conv_2d(net,
                          num_outputs=64,
                          kernel_size=7,
                          stride=2,
                          padding='VALID')

            net = batch_normalization(net, tf.nn.relu)

            net = padding(net, 1, 1)

            net = layers.max_pool2d(net, 3, 2, padding='VALID')

        with tf.variable_scope('Block_1', reuse=reuse):
            net = repeat_bottleneck(net, [(256, 64, -1)] + [(256, 64, 1)] * 2)

        with tf.variable_scope('Block_2', reuse=reuse):
            net = repeat_bottleneck(net, [(512, 128, 2)] + [(512, 128, 1)] * 7)

        with tf.variable_scope('Block_3', reuse=reuse):
            net = repeat_bottleneck(net,
                                    [(1024, 256, 2)] + [(1024, 256, 1)] * 35)

        with tf.variable_scope('Block_4', reuse=reuse):
            net = repeat_bottleneck(net,
                                    [(2048, 512, -1)] + [(2048, 512, 1)] * 2)

        end_points['resnet_end'] = net
        with tf.variable_scope('Block_5', reuse=reuse):
            net = conv_2d(net,
                          num_outputs=16,
                          kernel_size=1,
                          stride=1,
                          padding='VALID')

            net = layers.convolution2d_transpose(net,
                                                 num_outputs=16,
                                                 kernel_size=16,
                                                 stride=16,
                                                 activation_fn=None,
                                                 padding='VALID')

            # net = tf.nn.sigmoid(net)

        return net, end_points
Example #27
0
def ema_ae(input_image):
    """
    Autoencoder with shared weights.
    :param input_image: Original image.
    :return: (output_image, embedding_tensor)
    """
    with tf.variable_scope('autoencoder'):
        out = input

        pad = 'SAME'

        #####################
        ###    ENCODER    ###
        #####################

        with tf.variable_scope('conv1'):
            out = convolution2d(inputs=input_image, num_outputs=1, kernel_size=3, stride=1, padding=pad, rate=1,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        with tf.variable_scope('conv2'):
            out = convolution2d(inputs=out, num_outputs=1, kernel_size=3, stride=2, padding=pad, rate=1,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        with tf.variable_scope('conv3'):
            out = convolution2d(inputs=out, num_outputs=16, kernel_size=3, stride=1, padding=pad, rate=1,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        with tf.variable_scope('conv4'):
            out = convolution2d(inputs=out, num_outputs=16, kernel_size=3, stride=2, padding=pad, rate=1,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        with tf.variable_scope('conv5'):
            out = convolution2d(inputs=out, num_outputs=32, kernel_size=3, stride=1, padding=pad, rate=1,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        with tf.variable_scope('conv6'):
            embedding_tensor = convolution2d(inputs=out, num_outputs=32, kernel_size=3, stride=2, padding=pad, rate=1,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        #####################
        ###    DECODER    ###
        #####################

        with tf.variable_scope('conv6'):
            out = convolution2d_transpose(inputs=embedding_tensor, num_outputs=32, kernel_size=3, stride=2, padding=pad,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        with tf.variable_scope('conv5'):
            out = convolution2d_transpose(inputs=out, num_outputs=32, kernel_size=3, stride=1, padding=pad,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        with tf.variable_scope('conv4'):
            out = convolution2d_transpose(inputs=out, num_outputs=16, kernel_size=3, stride=2, padding=pad,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        with tf.variable_scope('conv3'):
            out = convolution2d_transpose(inputs=out, num_outputs=16, kernel_size=3, stride=1, padding=pad,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        with tf.variable_scope('conv2'):
            out = convolution2d_transpose(inputs=out, num_outputs=1, kernel_size=3, stride=2, padding=pad,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        with tf.variable_scope('conv1'):
            output_image = convolution2d_transpose(inputs=out, num_outputs=1, kernel_size=3, stride=1, padding=pad,
                                activation_fn=tf.nn.relu, weights_initializer=xavier_initializer())

        return output_image, embedding_tensor
Example #28
0
    def __call__(self, z, reuse=True):
        #with tf.variable_scope(self.name,reuse=tf.AUTO_REUSE) as vs:
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()
            bs = tf.shape(z)[0]
            y = z[:, -10:]
            #yb = tf.reshape(y, shape=[bs, 1, 1, 10])
            fc = tcl.fully_connected(
                z,
                1024,
                weights_initializer=tf.random_normal_initializer(stddev=0.02),
                weights_regularizer=tc.layers.l2_regularizer(2.5e-5),
                activation_fn=tf.identity)
            fc = tc.layers.batch_norm(fc,
                                      decay=0.9,
                                      scale=True,
                                      updates_collections=None,
                                      is_training=self.is_training)
            fc = tf.nn.relu(fc)
            #fc = tf.concat([fc, y], 1)
            fc = tcl.fully_connected(
                fc,
                8 * 8 * 128,
                weights_initializer=tf.random_normal_initializer(stddev=0.02),
                weights_regularizer=tc.layers.l2_regularizer(2.5e-5),
                activation_fn=tf.identity)
            fc = tf.reshape(fc, tf.stack([bs, 8, 8, 128]))
            fc = tc.layers.batch_norm(fc,
                                      decay=0.9,
                                      scale=True,
                                      updates_collections=None,
                                      is_training=self.is_training)
            fc = tf.nn.relu(fc)
            #fc = conv_cond_concat(fc,yb)
            conv = tcl.convolution2d_transpose(
                fc,
                64, [4, 4], [2, 2],
                weights_initializer=tf.random_normal_initializer(stddev=0.02),
                weights_regularizer=tc.layers.l2_regularizer(2.5e-5),
                activation_fn=tf.identity)
            #(bs,16,16,64)
            conv = tc.layers.batch_norm(conv,
                                        decay=0.9,
                                        scale=True,
                                        updates_collections=None,
                                        is_training=self.is_training)
            conv = tf.nn.relu(conv)
            for _ in range(3):
                conv = tcl.convolution2d_transpose(
                    conv,
                    64, [4, 4], [2, 2],
                    weights_initializer=tf.random_normal_initializer(
                        stddev=0.02),
                    weights_regularizer=tc.layers.l2_regularizer(2.5e-5),
                    activation_fn=tf.identity)
                conv = tc.layers.batch_norm(conv,
                                            decay=0.9,
                                            scale=True,
                                            updates_collections=None,
                                            is_training=self.is_training)
                conv = tf.nn.relu(conv)
            #(bs,128,128,64)

            output = tcl.convolution2d_transpose(
                conv,
                3, [4, 4], [2, 2],
                weights_initializer=tf.random_normal_initializer(stddev=0.02),
                weights_regularizer=tc.layers.l2_regularizer(2.5e-5),
                activation_fn=tf.nn.sigmoid)
            output = tf.reshape(output, [bs, -1])
            #(0,1) by tanh
            return output
def human_pose_resnet(net, reuse=False, training=False):
    """
    Architecture of Part Detector network, as was described in https://arxiv.org/abs/1609.01743
    
    :param net: input tensor
    :param reuse: whether reuse variables or not. Use False if the variables are initialized with init_model_variables
    :param training: if the variables should be trainable. It has no effect if the 'reuse' param is set to True
    :return: output tensor and dictionary of named endpoints
    """
    def batch_normalization(input_net, act_f=None, scope=None):
        return layers.batch_norm(input_net,
                                 center=True,
                                 scale=True,
                                 epsilon=1e-5,
                                 activation_fn=act_f,
                                 is_training=training,
                                 scope=scope)

    def conv_2d(input_net,
                num_outputs,
                kernel_size,
                stride=1,
                padding_mod='SAME',
                scope=None):
        return layers.convolution2d(input_net,
                                    num_outputs=num_outputs,
                                    kernel_size=kernel_size,
                                    stride=stride,
                                    padding=padding_mod,
                                    activation_fn=None,
                                    scope=scope)

    def padding(input_net, w, h):
        return tf.pad(input_net, [[0, 0], [h, h], [w, w], [0, 0]], "CONSTANT")

    def bottleneck(input_net, depth, depth_bottleneck, stride, i):
        with tf.variable_scope('Bottleneck_%d' % i, reuse=reuse):
            res_conv = stride > 1 or stride < 0
            stride = abs(stride)

            # Res connection
            out_net = conv_2d(input_net,
                              num_outputs=depth_bottleneck,
                              kernel_size=1,
                              stride=1,
                              padding_mod='VALID',
                              scope='Conv_1')

            out_net = batch_normalization(out_net, tf.nn.relu, 'BatchNorm_1')

            out_net = padding(out_net, 1, 1)

            out_net = conv_2d(out_net,
                              num_outputs=depth_bottleneck,
                              kernel_size=3,
                              stride=stride,
                              padding_mod='VALID',
                              scope='Conv_2')

            out_net = batch_normalization(out_net, tf.nn.relu, 'BatchNorm_2')

            out_net = conv_2d(out_net,
                              num_outputs=depth,
                              kernel_size=1,
                              stride=1,
                              padding_mod='VALID',
                              scope='Conv_3')

            out_net = batch_normalization(out_net, scope='BatchNorm_3')

            # Skip connection
            if res_conv:
                input_net = conv_2d(input_net,
                                    num_outputs=depth,
                                    kernel_size=1,
                                    stride=stride,
                                    padding_mod='VALID',
                                    scope='Conv_skip')

                input_net = batch_normalization(input_net,
                                                scope='BatchNorm_skip')

            out_net += input_net
            out_net = tf.nn.relu(out_net)

            return out_net

    def repeat_bottleneck(input_net, all_params):
        for i, (depth, depth_bottleneck, stride) in enumerate(all_params):
            input_net = bottleneck(input_net, depth, depth_bottleneck, stride,
                                   i)

        return input_net

    end_points = {}

    with tf.variable_scope('HumanPoseResnet', reuse=reuse):
        with tf.variable_scope('Block_0', reuse=reuse):
            net = padding(net, 3, 3)

            net = conv_2d(net,
                          num_outputs=64,
                          kernel_size=7,
                          stride=2,
                          padding_mod='VALID')

            net = batch_normalization(net, tf.nn.relu)

            net = padding(net, 1, 1)

            net = layers.max_pool2d(net, 3, 2, padding='VALID')

        with tf.variable_scope('Block_1', reuse=reuse):
            net = repeat_bottleneck(net, [(256, 64, -1)] + [(256, 64, 1)] * 2)

        with tf.variable_scope('Block_2', reuse=reuse):
            net = repeat_bottleneck(net, [(512, 128, 2)] + [(512, 128, 1)] * 7)

        with tf.variable_scope('Block_3', reuse=reuse):
            net = repeat_bottleneck(net,
                                    [(1024, 256, 2)] + [(1024, 256, 1)] * 35)

        with tf.variable_scope('Block_4', reuse=reuse):
            net = repeat_bottleneck(net,
                                    [(2048, 512, -1)] + [(2048, 512, 1)] * 2)

        end_points['resnet_end'] = net
        with tf.variable_scope('Block_5', reuse=reuse):
            net = conv_2d(net,
                          num_outputs=16,
                          kernel_size=1,
                          stride=1,
                          padding_mod='VALID')
            end_points['features'] = net

            net = layers.convolution2d_transpose(net,
                                                 num_outputs=16,
                                                 kernel_size=16,
                                                 stride=16,
                                                 activation_fn=None,
                                                 padding='VALID')

            # net = tf.nn.sigmoid(net)

        return net, end_points
Example #30
0
def run_network(inpt,
                string,
                is_training,
                use_batch_norm,
                debug=False,
                strip_batchnorm_from_last_layer=False):
    maybe_fc_batch_norm = layers.batch_norm if use_batch_norm else None
    maybe_conv_batch_norm = conv_batch_norm if use_batch_norm else None

    if debug:
        print('%s architecture' % (tf.get_variable_scope().name, ))

    layer_idx = 0

    out = inpt
    layer_strs = string.split(',')
    for i, layer in enumerate(layer_strs):
        if i + 1 == len(layer_strs) and strip_batchnorm_from_last_layer:
            maybe_fc_batch_norm = None
            maybe_conv_batch_norm = None

        if layer.startswith('conv:'):
            nkernels,stride,num_outputs,nonlinearity_str =\
                parse_conv_params(layer[len("conv:"):].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                normalizer_params={'is_training': is_training},
                normalizer_fn=maybe_conv_batch_norm,
                activation_fn=nonlinearity,
                scope='layer_%d' % (layer_idx, ))
            layer_idx += 1

            if debug:
                print(
                    "Convolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s"
                    % (nkernels, stride, num_outputs, nonlinearity_str))

        elif layer.startswith('deconv:'):
            nkernels,stride,num_outputs,nonlinearity_str  = \
                parse_conv_params(layer[len("deconv:")].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d_transpose(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                activation_fn=nonlinearity,
                normalizer_fn=maybe_conv_batch_norm,
                normalizer_params={'is_training': is_training},
                scope='layer_%d' % (layer_idx, ))
            layer_idx += 1
            if debug:
                print(
                    "Deconvolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s"
                    % (nkernels, stride, num_outputs, nonlinearity_str))

        elif layer.startswith('fc:'):
            params = layer[len('fc:'):].split(':')
            nonlinearity_str = 'relu'
            if len(params) == 2:
                params, nonlinearity_str = params[:-1], params[-1]
            num_outputs = parse_math(params[0])
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.fully_connected(out,
                                         num_outputs=num_outputs,
                                         activation_fn=nonlinearity,
                                         normalizer_fn=maybe_fc_batch_norm,
                                         normalizer_params={
                                             'is_training': is_training,
                                             'updates_collections': None
                                         },
                                         scope='layer_%d' % (layer_idx, ))
            layer_idx += 1
            if debug:
                print("Fully connected with num_outputs=%d followed by %s" %
                      (num_outputs, nonlinearity_str))

        elif layer.startswith('reshape:'):
            params = layer[len('reshape:'):].split(":")
            dims = [parse_math(dim) for dim in params]
            out = tf.reshape(out, [-1] + dims)
            if debug:
                print('Reshape to %r' % (dims, ))

        else:
            raise ValueError("Could not parse layer description: %r" %
                             (layer, ))

    if debug:
        print('')
    return out
Example #31
0
def run_network(inpt, string, is_training, debug=False, strip_batchnorm_from_last_layer=False):
    maybe_fc_batch_norm   = layers.batch_norm
    maybe_conv_batch_norm = conv_batch_norm

    if debug:
        print ("%s architecture" % (tf.get_variable_scope().name,))

    layer_idx = 0

    out = inpt
    layer_strs = string.split(",")
    for i, layer in enumerate(layer_strs):
        if i + 1 == len(layer_strs) and strip_batchnorm_from_last_layer:
            maybe_fc_batch_norm   = None
            maybe_conv_batch_norm = None

        if layer.startswith("conv:"):
            nkernels, stride, num_outputs, nonlinearity_str = parse_conv_params(layer[len("conv:"):].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                normalizer_params={"is_training": is_training},
                normalizer_fn=maybe_conv_batch_norm,
                activation_fn=nonlinearity,
                scope='layer_%d' % (layer_idx,)
            )
            layer_idx += 1

            if debug:
                print ("Convolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s" %
                        (nkernels, stride, num_outputs, nonlinearity_str))

        elif layer.startswith("deconv:"):
            nkernels, stride, num_outputs, nonlinearity_str = parse_conv_params(layer[len("deconv:"):].split(":"))
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.convolution2d_transpose(
                out,
                num_outputs=num_outputs,
                kernel_size=nkernels,
                stride=stride,
                activation_fn=nonlinearity,
                normalizer_fn=maybe_conv_batch_norm,
                normalizer_params={"is_training": is_training},
                scope='layer_%d' % (layer_idx,)
            )
            layer_idx += 1
            if debug:
                print ("Deconvolution with nkernels=%d, stride=%d, num_outputs=%d followed by %s" %
                        (nkernels, stride, num_outputs, nonlinearity_str))
        elif layer.startswith("fc:"):
            params = layer[len("fc:"):].split(":")
            nonlinearity_str = 'relu'
            if len(params) == 2:
                params, nonlinearity_str = params[:-1], params[-1]
            num_outputs = parse_math(params[0])
            nonlinearity = NONLINEARITY_NAME_TO_F[nonlinearity_str]

            out = layers.fully_connected(
                out,
                num_outputs=num_outputs,
                activation_fn=nonlinearity,
                normalizer_fn=maybe_fc_batch_norm,
                normalizer_params={"is_training": is_training, "updates_collections": None},
                scope='layer_%d' % (layer_idx,)
            )
            layer_idx += 1
            if debug:
                print ("Fully connected with num_outputs=%d followed by %s" %
                        (num_outputs, nonlinearity_str))
        elif layer.startswith("reshape:"):
            params = layer[len("reshape:"):].split(":")
            dims = [parse_math(dim) for dim in params]
            out = tf.reshape(out, [-1] + dims)
            if debug:
                print("Reshape to %r" % (dims,))
        else:
            raise ValueError("Could not parse layer description: %r" % (layer,))
    if debug:
        print("")
    return out