Exemple #1
0
def mlp_net(train_data, is_training, reuse_mode=False, bn_mode=False):
    with tf.variable_scope('mlp', reuse=reuse_mode):
        # b*38*22 -> b*836 -> b*512 -> b*512 -> b*34
        inputs = tf.reshape(train_data, [-1, 836])
        layer1 = tf_util2.fully_connection('l1',
                                           inputs,
                                           512,
                                           is_training,
                                           reuse_mode,
                                           tf.nn.relu,
                                           0.001,
                                           use_bn=bn_mode)
        layer2 = tf_util2.fully_connection('l2',
                                           layer1,
                                           512,
                                           is_training,
                                           reuse_mode,
                                           tf.nn.relu,
                                           0.001,
                                           use_bn=bn_mode)
        layer3 = tf_util2.fully_connection('l3',
                                           layer2,
                                           34,
                                           is_training,
                                           reuse_mode,
                                           None,
                                           0.001,
                                           use_bn=False)
    return layer3
Exemple #2
0
def discriminator(inputs, is_training, reuse_mode):
    with tf.variable_scope ('discriminator', reuse=reuse_mode):
        # b x 28 x 28 x 1 -> b x 14 x 14 x 128
        layer1 = tf_util2.conv_2d ('l1', inputs, 128, is_training, reuse_mode,
                                   activation_fun = tf.nn.leaky_relu,
                                   padding = 'same',
                                   kernel_size = [3,3],
                                   stride_size = (2,2),
                                   use_bn = False)
        
        # b x 14 x 14 x 128 -> b x 7 x 7 x 256
        layer2 = tf_util2.conv_2d ('l2', layer1, 256, is_training, reuse_mode,
                                   activation_fun = tf.nn.leaky_relu,
                                   padding = 'same',
                                   kernel_size = [3,3],
                                   stride_size = (2,2),
                                   use_bn = True)
        
        # b x 7 x 7 x 256 -> b x 4 x 4 x 512
        layer3 = tf_util2.conv_2d ('l3', layer2, 512, is_training, reuse_mode,
                                   activation_fun = tf.nn.leaky_relu,
                                   padding = 'same',
                                   kernel_size = [3,3],
                                   stride_size = (2,2),
                                   use_bn = True)

        # b x 4 x 4 x 512 -> b x 4*4*512
        flatten = tf.reshape(layer3, (-1, 4*4*512))
        # b x 4*4*512 x 1 -> b x 1
        logits = tf_util2.fully_connection ('flatten', flatten, 1, is_training, reuse_mode, use_bn = False)        
        outputs = tf.sigmoid(logits)
    return logits, outputs
    
Exemple #3
0
def generator(inputs, is_training, reuse_mode):
    with tf.variable_scope ('generator', reuse=reuse_mode):
        # b x 100 -> b x 4 x 4 x 512
        layer1 = tf_util2.fully_connection ('l1', inputs, 4*4*512, is_training, reuse_mode, use_bn = False)
        layer1 = tf.reshape(layer1, [-1, 4, 4, 512])
        with tf.variable_scope ('l1', reuse=reuse_mode):
            layer1 = tf.layers.batch_normalization(layer1, training=is_training)
        layer1 = tf.nn.leaky_relu(layer1)
        
        # b x 4 x 4 x 512 -> b x 7 x 7 x 256
        layer2 = tf_util2.conv_2d_trans ('l2', layer1, 256, is_training, reuse_mode, 
                                         activation_fun = tf.nn.leaky_relu,
                                         padding = 'valid',
                                         kernel_size = [4,4],
                                         stride_size = (1,1),
                                         use_bn = True)
        
        # b x 7 x 7 x 256 -> b x 14 x 14 x 128
        layer3 = tf_util2.conv_2d_trans ('l3', layer2, 128, is_training, reuse_mode, 
                                         activation_fun = tf.nn.leaky_relu,
                                         padding = 'same',
                                         kernel_size = [3,3],
                                         stride_size = (2,2),
                                         use_bn = True)
        
        # b x 14 x 14 x 256 -> b x 28 x 28 x 1
        logits = tf_util2.conv_2d_trans ('l4', layer3, 1, is_training, reuse_mode, 
                                         padding = 'same',
                                         kernel_size = [3,3],
                                         stride_size = (2,2),
                                         use_bn = True)
        outputs = tf.tanh(logits)
    return outputs
Exemple #4
0
def pc_decoder(logits, fully_con, is_training=True, reuse_mode=False):
    layer = tf.reshape(logits, [-1, np.shape(logits)[1]])
    with tf.variable_scope(fully_con['name'], reuse=reuse_mode):
        for i in range(len(fully_con['layers'])):
            layer = tf_util2.fully_connection(
                fully_con['layers'][i],
                layer,
                fully_con['outs'][i],
                is_training,
                reuse_mode,
                activation[fully_con['activation'][i]],
                0.001,
                'xaiver',
                use_bn=fully_con['bn'][i],
                bn_momentum=0.9)
    layer = tf.reshape(layer, [-1, np.shape(layer)[1] / 3, 3])
    return layer
Exemple #5
0
def img_encoder(image,
                conv_net,
                fully_con,
                is_training=True,
                reuse_mode=False):
    layer = tf.reshape(image, [-1, np.shape(image)[1], np.shape(image)[2], 1])
    with tf.variable_scope(conv_net['name'], reuse=reuse_mode):
        for i in range(len(conv_net['layers'])):
            layer = tf_util2.conv_2d(conv_net['layers'][i],
                                     layer,
                                     conv_net['outs'][i],
                                     is_training,
                                     reuse_mode,
                                     activation[conv_net['activation'][i]],
                                     conv_net['padding'][i],
                                     conv_net['kenel'][i],
                                     conv_net['stride'][i],
                                     0.001,
                                     'xaiver',
                                     use_bn=conv_net['bn'][i],
                                     bn_momentum=0.9)
        layer = tf.layers.Flatten()(layer)
    with tf.variable_scope(fully_con['name'], reuse=reuse_mode):
        for i in range(len(fully_con['layers'])):
            layer = tf_util2.fully_connection(
                fully_con['layers'][i],
                layer,
                fully_con['outs'][i],
                is_training,
                reuse_mode,
                activation[fully_con['activation'][i]],
                0.001,
                'xaiver',
                use_bn=fully_con['bn'][i],
                bn_momentum=0.9)
    return layer
Exemple #6
0
def pc_vae_encoder(pc_in, is_training, reuse_mode=False, bn_mode=True):
    with tf.variable_scope('vae_encoder', reuse=reuse_mode):
        # b*2048*3*1 -> b*2048*1*64 -> b*2048*1*128 -> b*2048*1*128 -> b*2048*1*256 -> b*2048*1*256
        inputs = tf.reshape(pc_in, [-1, 2048, 3, 1])
        layer1 = tf_util2.conv_2d('e1',
                                  inputs,
                                  64,
                                  is_training,
                                  reuse_mode,
                                  tf.nn.relu,
                                  'valid', [1, 3], (1, 1),
                                  use_bn=bn_mode)
        layer2 = tf_util2.conv_2d('e2',
                                  layer1,
                                  128,
                                  is_training,
                                  reuse_mode,
                                  tf.nn.relu,
                                  'valid', [1, 1], (1, 1),
                                  use_bn=bn_mode)
        layer3 = tf_util2.conv_2d('e3',
                                  layer2,
                                  128,
                                  is_training,
                                  reuse_mode,
                                  tf.nn.relu,
                                  'valid', [1, 1], (1, 1),
                                  use_bn=bn_mode)
        layer4 = tf_util2.conv_2d('e4',
                                  layer3,
                                  256,
                                  is_training,
                                  reuse_mode,
                                  tf.nn.relu,
                                  'valid', [1, 1], (1, 1),
                                  use_bn=bn_mode)
        layer5 = tf_util2.conv_2d('e5',
                                  layer4,
                                  256,
                                  is_training,
                                  reuse_mode,
                                  tf.nn.relu,
                                  'valid', [1, 1], (1, 1),
                                  use_bn=bn_mode)
        # b*2048*1*256 -> b*1*1*256 -> b*256
        layer6 = tf.layers.max_pooling2d(layer5, [2048, 1], [1, 1],
                                         padding='valid')
        layer7 = tf.squeeze(layer6)
        # b*256 -> (z_sigma, z_mu)
        z_sigma = tf_util2.fully_connection('es',
                                            layer7,
                                            256,
                                            is_training,
                                            reuse_mode,
                                            None,
                                            0.001,
                                            use_bn=False)
        z_mu = tf_util2.fully_connection('em',
                                         layer7,
                                         256,
                                         is_training,
                                         reuse_mode,
                                         None,
                                         0.001,
                                         use_bn=False)
        epsion = tf.random_normal(shape=tf.shape(z_sigma),
                                  mean=0,
                                  stddev=1,
                                  dtype=tf.float32)
        latent = z_mu + tf.sqrt(tf.exp(z_sigma)) * epsion
    return latent, z_sigma, z_mu
Exemple #7
0
def pc_gan_discriminator(pc_in, is_training, reuse_mode=False, bn_mode=False):
    with tf.variable_scope('gan_discriminator', reuse=reuse_mode):
        # b*2048*3*1 -> b*2048*1*64 -> b*2048*1*128 -> b*2048*1*256 -> b*2048*1*1024
        inputs = tf.reshape(pc_in, [-1, 2048, 3, 1])
        layer1 = tf_util2.conv_2d('d1',
                                  inputs,
                                  64,
                                  is_training,
                                  reuse_mode,
                                  tf.nn.relu,
                                  'valid', [1, 3], (1, 1),
                                  use_bn=bn_mode)
        layer2 = tf_util2.conv_2d('d2',
                                  layer1,
                                  128,
                                  is_training,
                                  reuse_mode,
                                  tf.nn.relu,
                                  'valid', [1, 1], (1, 1),
                                  use_bn=bn_mode)
        layer3 = tf_util2.conv_2d('d3',
                                  layer2,
                                  256,
                                  is_training,
                                  reuse_mode,
                                  tf.nn.relu,
                                  'valid', [1, 1], (1, 1),
                                  use_bn=bn_mode)
        layer4 = tf_util2.conv_2d('d4',
                                  layer3,
                                  1024,
                                  is_training,
                                  reuse_mode,
                                  tf.nn.relu,
                                  'valid', [1, 1], (1, 1),
                                  use_bn=bn_mode)
        # b*2048*1*1024 -> b*1*1*1024 -> b*1024
        layer5 = tf.layers.max_pooling2d(layer4, [2048, 1], [1, 1],
                                         padding='valid')
        layer6 = tf.squeeze(layer5)
        # b*1024 -> b*256 -> b*256 -> b*1
        layer7 = tf_util2.fully_connection('d7',
                                           layer6,
                                           256,
                                           is_training,
                                           reuse_mode,
                                           tf.nn.relu,
                                           0.001,
                                           use_bn=False)
        layer8 = tf_util2.fully_connection('d8',
                                           layer7,
                                           256,
                                           is_training,
                                           reuse_mode,
                                           tf.nn.relu,
                                           0.001,
                                           use_bn=False)
        logits = tf_util2.fully_connection('logits',
                                           layer8,
                                           1,
                                           is_training,
                                           reuse_mode,
                                           None,
                                           0.001,
                                           use_bn=False)
        outputs = tf.sigmoid(logits)
    return outputs, logits
Exemple #8
0
def cnn_net(train_data, is_training, reuse_mode=False, bn_mode=False):
    with tf.variable_scope('cnn', reuse=reuse_mode):
        # b*38*22 -> b*38*22*32 -> b*19*11*64 -> b*10*6*64 -> b*5*3*64
        inputs = tf.reshape(train_data, [-1, 38, 22])
        conv1 = tf_util2.conv_2d('conv1',
                                 inputs,
                                 32,
                                 is_training,
                                 reuse_mode,
                                 tf.nn.relu,
                                 'same', [2, 2], (1, 1),
                                 use_bn=bn_mode)
        conv2 = tf_util2.conv_2d('conv2',
                                 conv1,
                                 64,
                                 is_training,
                                 reuse_mode,
                                 tf.nn.relu,
                                 'same', [2, 2], (2, 2),
                                 use_bn=bn_mode)
        conv3 = tf_util2.conv_2d('conv3',
                                 conv2,
                                 64,
                                 is_training,
                                 reuse_mode,
                                 tf.nn.relu,
                                 'same', [2, 2], (2, 2),
                                 use_bn=bn_mode)
        conv4 = tf_util2.conv_2d('conv4',
                                 conv3,
                                 64,
                                 is_training,
                                 reuse_mode,
                                 tf.nn.relu,
                                 'same', [2, 2], (2, 2),
                                 use_bn=bn_mode)
        flatten = tf.reshape(conv4, [-1, 5 * 3 * 64])
        # b*960 -> b*512 -> b*512 -> b*40
        layer1 = tf_util2.fully_connection('l1',
                                           flatten,
                                           512,
                                           is_training,
                                           reuse_mode,
                                           tf.nn.relu,
                                           0.001,
                                           use_bn=bn_mode)
        layer2 = tf_util2.fully_connection('l2',
                                           layer1,
                                           512,
                                           is_training,
                                           reuse_mode,
                                           tf.nn.relu,
                                           0.001,
                                           use_bn=bn_mode)
        layer3 = tf_util2.fully_connection('l3',
                                           layer2,
                                           34,
                                           is_training,
                                           reuse_mode,
                                           None,
                                           0.001,
                                           use_bn=bn_mode)
    return layer3