Beispiel #1
0
    def __denseBlock(self,
                     x,
                     growth_rate=16,
                     num_layers=8,
                     kernel_size=[3, 3],
                     layer=0):
        dense_block_output = x
        for i in range(num_layers):
            '''
            In Paper <Densely Connected Convolutional Networks>
            each composite function contains three consecutive operations:
            batch normalization(BN), followed by a rectified linear unit (ReLU) and a 3*3 convolution (Conv).
            '''
            if self.is_bn:
                x = tl.BatchNormLayer(x, name='denseblock%d/BN%d' % (layer, i))
            x = ReluLayer(x, name='denseblock%d/relu%d' % (layer, i))
            x = tl.Conv2d(x,
                          growth_rate,
                          kernel_size,
                          name='denseblock%d/conv%d' % (layer, i))
            # concat the output of layer
            dense_block_output = tl.ConcatLayer([dense_block_output, x],
                                                concat_dim=3,
                                                name='denseblock%d/concat%d' %
                                                (layer, i))
            x = dense_block_output

        return dense_block_output
Beispiel #2
0
    def buildModel(self):
        print("Building SRResNet...")

        #input layer
        x = tl.InputLayer(self.input, name='inputlayer')

        x = tl.Conv2d(x, self.feature_size, [3, 3], name='c1')
        conv_1 = x

        # B residual blocks
        for i in range(self.num_layers):
            x = self.__resBlock(x, self.feature_size, layer=i)
        x = tl.Conv2d(x, self.feature_size, [3, 3], name='c2')
        x = tl.BatchNormLayer(x)
        x = tl.ElementwiseLayer([x, conv_1],
                                tf.add,
                                name='res%d/res_add' % (layer))
        # B residual blacks end

        x = utils.SubpixelUpsample(x, self.feature_size, self.scale)

        # One final convolution on the upsampling output
        output = tl.Conv2d(x,
                           self.output_channels, [1, 1],
                           act=tf.nn.tanh,
                           name='lastLayer')

        self.cacuLoss(output)

        # Tensorflow graph setup... session, saver, etc.
        self.sess = tf.Session()
        self.saver = tf.train.Saver()
        print("Done building!")
        """
Beispiel #3
0
 def __resBlock(self, x, channels=64, kernel_size=[3, 3], scale=1, layer=0):
     nn = tl.Conv2d(x,
                    channels,
                    kernel_size,
                    act=None,
                    name='res%d/c1' % (layer))
     nn = tl.BatchNormLayer(nn, act=tf.nn.relu)
     nn = tl.Conv2d(nn,
                    channels,
                    kernel_size,
                    act=None,
                    name='res%d/c2' % (layer))
     nn = tl.BatchNormLayer(nn, act=tf.nn.relu)
     nn = ScaleLayer(nn, scale, name='res%d/scale' % (layer))
     n = tl.ElementwiseLayer([x, nn],
                             tf.add,
                             name='res%d/res_add' % (layer))
     return n
Beispiel #4
0
    def dense_block(self,
                    x,
                    growth_rate=16,
                    n_conv=8,
                    kernel_size=(3, 3),
                    layer=0):
        dense_block_output = x
        for i in range(n_conv):
            x = tl.BatchNormLayer(x, name='dense_%d/bn_%d' % (layer, i))
            x = ReluLayer(x, name='dense_%d/relu_%d' % (layer, i))
            x = tl.Conv2d(x,
                          growth_rate,
                          kernel_size,
                          name='dense_%d/conv_%d' % (layer, i))
            # concat the output of layer
            dense_block_output = tl.ConcatLayer([dense_block_output, x],
                                                concat_dim=3,
                                                name='dense_%d/concat_%d' %
                                                (layer, i))
            x = dense_block_output

        return x
Beispiel #5
0
    def behaviour_net(self):
        init = tf.truncated_normal_initializer(stddev=0.1)
        zero_init = tf.zeros_initializer()
        screen_input = layers.InputLayer(self.screen_in, name="screen_inputs")
        screen_input_bn = layers.BatchNormLayer(screen_input, name="screen_bn")
        conv_depth=64
        conv1_scr = layers.Conv2d(screen_input_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv1_scr")
        conv1_scr_bn = layers.BatchNormLayer(conv1_scr, name="screen_bn_1")
        conv2_scr = layers.Conv2d(conv1_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv2_scr")
        conv2_scr_bn = layers.BatchNormLayer(conv2_scr, name="screen_bn_2")
        conv3_scr = layers.Conv2d(conv2_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv3_scr")
        conv3_scr_bn = layers.BatchNormLayer(conv3_scr, name="screen_bn_3")
        conv4_scr = layers.Conv2d(conv3_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv4_scr")
        conv4_scr_bn = layers.BatchNormLayer(conv4_scr, name="screen_bn_4")
        conv5_scr = layers.Conv2d(conv4_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv5_scr")
        conv5_scr_bn = layers.BatchNormLayer(conv5_scr, name="screen_bn_5")
        conv6_scr = layers.Conv2d(conv5_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv6_scr")
        conv6_scr_bn = layers.BatchNormLayer(conv6_scr, name="screen_bn_6")
        conv7_scr = layers.Conv2d(conv6_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv7_scr")
        conv7_scr_bn = layers.BatchNormLayer(conv7_scr, name="screen_bn_7")
        conv8_scr = layers.Conv2d(conv7_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv8_scr")
        conv8_scr_bn = layers.BatchNormLayer(conv8_scr, name="screen_bn_8")
        conv9_scr = layers.Conv2d(conv8_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv9_scr")
        conv9_scr_bn = layers.BatchNormLayer(conv9_scr, name="screen_bn_9")
        conv10_scr = layers.Conv2d(conv9_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv10_scr")
        conv10_scr_bn = layers.BatchNormLayer(conv10_scr, name="screen_bn_10")
        scr_GAP = layers.PoolLayer(conv10_scr_bn, ksize=[1, 84, 84, 1], padding="VALID", pool=tf.nn.avg_pool,
                                   name="scr_GAP")
        scr_info = layers.FlattenLayer(scr_GAP, name="scr_flattened")

        minimap_input = layers.InputLayer(self.minimap_in, name="mini_in")
        minimap_input_bn = layers.BatchNormLayer(minimap_input, name="minimap_bn")
        conv1_mini = layers.Conv2d(minimap_input_bn, 32, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv1_mini")
        conv1_mini_bn = layers.BatchNormLayer(conv1_mini, name="mini_bn_1")
        conv2_mini = layers.Conv2d(conv1_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv2_mini")
        conv2_mini_bn = layers.BatchNormLayer(conv2_mini, name="mini_bn_2")
        conv3_mini = layers.Conv2d(conv2_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv3_mini")
        conv3_mini_bn = layers.BatchNormLayer(conv3_mini, name="mini_bn_3")
        conv4_mini = layers.Conv2d(conv3_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv4_mini")
        conv4_mini_bn = layers.BatchNormLayer(conv4_mini, name="mini_bn_4")
        conv5_mini = layers.Conv2d(conv4_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv5_mini")
        conv5_mini_bn = layers.BatchNormLayer(conv5_mini, name="mini_bn_5")
        conv6_mini = layers.Conv2d(conv5_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv6_mini")
        conv6_mini_bn = layers.BatchNormLayer(conv6_mini, name="mini_bn_6")
        conv7_mini = layers.Conv2d(conv6_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv7_mini")
        conv7_mini_bn = layers.BatchNormLayer(conv7_mini, name="mini_bn_7")
        conv8_mini = layers.Conv2d(conv7_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv8_mini")
        conv8_mini_bn = layers.BatchNormLayer(conv8_mini, name="mini_bn_8")
        conv9_mini = layers.Conv2d(conv8_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv9_mini")
        conv9_mini_bn = layers.BatchNormLayer(conv9_mini, name="mini_bn_9")
        conv10_mini = layers.Conv2d(conv9_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv10_mini")
        conv10_mini_bn = layers.BatchNormLayer(conv10_mini, name="mini_bn_10")
        mini_GAP = layers.PoolLayer(conv10_mini_bn, ksize=[1, 64, 64, 1], padding="VALID", pool=tf.nn.avg_pool,
                                    name="mini_GAP")
        mini_info = layers.FlattenLayer(mini_GAP, name="mini_flattened")
        multi_select_in = layers.InputLayer(self.multi_select_in, name="multi_select")
        select_info = layers.FlattenLayer(multi_select_in, name="select_flattened")
        select_info_bn = layers.BatchNormLayer(select_info, name="select_bn")

        info_combined = layers.ConcatLayer([scr_info, mini_info, select_info_bn], name="info_all")
        dense1 = layers.DenseLayer(info_combined, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init, name="Dense1")
        dense1_bn = layers.BatchNormLayer(dense1, name="dense1_bn")
        dense2 = layers.DenseLayer(dense1_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense2")
        dense2_bn = layers.BatchNormLayer(dense2, name="dense2_bn")
        dense3 = layers.DenseLayer(dense2_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense3")
        dense3_bn = layers.BatchNormLayer(dense3, name="dense3_bn")
        dense4 = layers.DenseLayer(dense3_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense4")
        dense4_bn = layers.BatchNormLayer(dense4, name="dense4_bn")
        dense5 = layers.DenseLayer(dense4_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense5")
        dense5_bn = layers.BatchNormLayer(dense5, name="dense5_bn")
        dense6 = layers.DenseLayer(dense5_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense6")
        dense6_bn = layers.BatchNormLayer(dense6, name="dense6_bn")
        self.q=layers.DenseLayer(dense6_bn, n_units=7057, W_init=zero_init, b_init=zero_init,
                                   name="q")
        self.Q_behave=self.q.outputs
Beispiel #6
0
    def target_net(self):
        init = tf.truncated_normal_initializer(stddev=0.1)
        zero_init = tf.zeros_initializer()
        screen_input = layers.InputLayer(self.screen_in, name="screen_inputs_t")
        screen_input_bn = layers.BatchNormLayer(screen_input, name="screen_bn_t")
        conv_depth = 64
        conv1_scr = layers.Conv2d(screen_input_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv1_scr_t")
        conv1_scr_bn = layers.BatchNormLayer(conv1_scr, name="screen_bn_1_t")
        conv2_scr = layers.Conv2d(conv1_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv2_scr_t")
        conv2_scr_bn = layers.BatchNormLayer(conv2_scr, name="screen_bn_2_t")
        conv3_scr = layers.Conv2d(conv2_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv3_scr_t")
        conv3_scr_bn = layers.BatchNormLayer(conv3_scr, name="screen_bn_3_t")
        conv4_scr = layers.Conv2d(conv3_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv4_scr_t")
        conv4_scr_bn = layers.BatchNormLayer(conv4_scr, name="screen_bn_4_t")
        conv5_scr = layers.Conv2d(conv4_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv5_scr_t")
        conv5_scr_bn = layers.BatchNormLayer(conv5_scr, name="screen_bn_5_t")
        conv6_scr = layers.Conv2d(conv5_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv6_scr_t")
        conv6_scr_bn = layers.BatchNormLayer(conv6_scr, name="screen_bn_6_t")
        conv7_scr = layers.Conv2d(conv6_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv7_scr_t")
        conv7_scr_bn = layers.BatchNormLayer(conv7_scr, name="screen_bn_7_t")
        conv8_scr = layers.Conv2d(conv7_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv8_scr_t")
        conv8_scr_bn = layers.BatchNormLayer(conv8_scr, name="screen_bn_8_t")
        conv9_scr = layers.Conv2d(conv8_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv9_scr_t")
        conv9_scr_bn = layers.BatchNormLayer(conv9_scr, name="screen_bn_9_t")
        conv10_scr = layers.Conv2d(conv9_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init,
                                   name="conv10_scr_t")
        conv10_scr_bn = layers.BatchNormLayer(conv10_scr, name="screen_bn_10_t")
        scr_GAP = layers.PoolLayer(conv10_scr_bn, ksize=[1, 84, 84, 1], padding="VALID", pool=tf.nn.avg_pool,
                                   name="scr_GAP_t")
        scr_info = layers.FlattenLayer(scr_GAP, name="scr_flattened_t")

        minimap_input = layers.InputLayer(self.minimap_in, name="mini_in_t")
        minimap_input_bn = layers.BatchNormLayer(minimap_input, name="minimap_bn_t")
        conv1_mini = layers.Conv2d(minimap_input_bn, 32, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv1_mini_t")
        conv1_mini_bn = layers.BatchNormLayer(conv1_mini, name="mini_bn_1_t")
        conv2_mini = layers.Conv2d(conv1_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv2_mini_t")
        conv2_mini_bn = layers.BatchNormLayer(conv2_mini, name="mini_bn_2_t")
        conv3_mini = layers.Conv2d(conv2_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv3_mini_t")
        conv3_mini_bn = layers.BatchNormLayer(conv3_mini, name="mini_bn_3_t")
        conv4_mini = layers.Conv2d(conv3_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv4_mini_t")
        conv4_mini_bn = layers.BatchNormLayer(conv4_mini, name="mini_bn_4_t")
        conv5_mini = layers.Conv2d(conv4_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv5_mini_t")
        conv5_mini_bn = layers.BatchNormLayer(conv5_mini, name="mini_bn_5_t")
        conv6_mini = layers.Conv2d(conv5_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv6_mini_t")
        conv6_mini_bn = layers.BatchNormLayer(conv6_mini, name="mini_bn_6_t")
        conv7_mini = layers.Conv2d(conv6_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv7_mini_t")
        conv7_mini_bn = layers.BatchNormLayer(conv7_mini, name="mini_bn_7_t")
        conv8_mini = layers.Conv2d(conv7_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv8_mini_t")
        conv8_mini_bn = layers.BatchNormLayer(conv8_mini, name="mini_bn_8_t")
        conv9_mini = layers.Conv2d(conv8_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv9_mini_t")
        conv9_mini_bn = layers.BatchNormLayer(conv9_mini, name="mini_bn_9_t")
        conv10_mini = layers.Conv2d(conv9_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                    b_init=init, name="conv10_mini_t")
        conv10_mini_bn = layers.BatchNormLayer(conv10_mini, name="mini_bn_10_t")
        mini_GAP = layers.PoolLayer(conv10_mini_bn, ksize=[1, 64, 64, 1], padding="VALID", pool=tf.nn.avg_pool,
                                    name="mini_GAP_t")
        mini_info = layers.FlattenLayer(mini_GAP, name="mini_flattened_t")
        multi_select_in = layers.InputLayer(self.multi_select_in, name="multi_select_t")
        select_info = layers.FlattenLayer(multi_select_in, name="select_flattened_t")
        select_info_bn = layers.BatchNormLayer(select_info, name="select_bn_t")

        info_combined = layers.ConcatLayer([scr_info, mini_info, select_info_bn], name="info_all_t")
        dense1 = layers.DenseLayer(info_combined, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense1_t")
        dense1_bn = layers.BatchNormLayer(dense1, name="dense1_bn_t")
        dense2 = layers.DenseLayer(dense1_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense2_t")
        dense2_bn = layers.BatchNormLayer(dense2, name="dense2_bn_t")
        dense3 = layers.DenseLayer(dense2_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense3_t")
        dense3_bn = layers.BatchNormLayer(dense3, name="dense3_bn_t")
        dense4 = layers.DenseLayer(dense3_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense4_t")
        dense4_bn = layers.BatchNormLayer(dense4, name="dense4_bn_t")
        dense5 = layers.DenseLayer(dense4_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense5_t")
        dense5_bn = layers.BatchNormLayer(dense5, name="dense5_bn_t")
        dense6 = layers.DenseLayer(dense5_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense6_t")
        dense6_bn = layers.BatchNormLayer(dense6, name="dense6_bn_t")
        self.q_target = layers.DenseLayer(dense6_bn, n_units=7057, W_init=zero_init, b_init=zero_init,
                                          name="q_t")
        self.target_params = self.q_target.all_params
        self.Q_target = self.q_target.outputs
Beispiel #7
0
    def encoder(self, input_imgs, is_train=True, reuse=False):
        '''
        input_imgs: the input images to be encoded into a vector as latent representation. size here is [b_size,64,64,3]
        '''
        z_dim = self.z_dim  # 512 128???
        ef_dim = 64  # encoder filter number

        w_init = tf.random_normal_initializer(stddev=0.02)
        gamma_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope("encoder", reuse=reuse):
            tl.layers.set_name_reuse(reuse)

            net_in = l.InputLayer(input_imgs, name='en/in')  # (b_size,64,64,3)
            net_h0 = l.Conv2d(net_in,
                              ef_dim, (5, 5), (2, 2),
                              act=None,
                              padding='SAME',
                              W_init=w_init,
                              name='en/h0/conv2d')
            net_h0 = l.BatchNormLayer(net_h0,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='en/h0/batch_norm')
            # net_h0.outputs._shape = (b_size,32,32,64)

            net_h1 = l.Conv2d(net_h0,
                              ef_dim * 2, (5, 5), (2, 2),
                              act=None,
                              padding='SAME',
                              W_init=w_init,
                              name='en/h1/conv2d')
            net_h1 = l.BatchNormLayer(net_h1,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='en/h1/batch_norm')
            # net_h1.outputs._shape = (b_size,16,16,64*2)

            net_h2 = l.Conv2d(net_h1,
                              ef_dim * 4, (5, 5), (2, 2),
                              act=None,
                              padding='SAME',
                              W_init=w_init,
                              name='en/h2/conv2d')
            net_h2 = l.BatchNormLayer(net_h2,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='en/h2/batch_norm')
            # net_h2.outputs._shape = (b_size,8,8,64*4)

            net_h3 = l.Conv2d(net_h2,
                              ef_dim * 8, (5, 5), (2, 2),
                              act=None,
                              padding='SAME',
                              W_init=w_init,
                              name='en/h3/conv2d')
            net_h3 = l.BatchNormLayer(net_h3,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='en/h3/batch_norm')
            # net_h2.outputs._shape = (b_size,4,4,64*8)

            # mean of z
            net_h4 = l.FlattenLayer(net_h3, name='en/h4/flatten')
            # net_h4.outputs._shape = (b_size,8*8*64*4)
            net_out1 = l.DenseLayer(net_h4,
                                    n_units=z_dim,
                                    act=tf.identity,
                                    W_init=w_init,
                                    name='en/h3/lin_sigmoid')
            net_out1 = l.BatchNormLayer(net_out1,
                                        act=tf.identity,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='en/out1/batch_norm')

            # net_out1 = DenseLayer(net_h4, n_units=z_dim, act=tf.nn.relu,
            #         W_init = w_init, name='en/h4/lin_sigmoid')
            z_mean = net_out1.outputs  # (b_size,512)

            # log of variance of z(covariance matrix is diagonal)
            net_h5 = l.FlattenLayer(net_h3, name='en/h5/flatten')
            net_out2 = l.DenseLayer(net_h5,
                                    n_units=z_dim,
                                    act=tf.identity,
                                    W_init=w_init,
                                    name='en/h4/lin_sigmoid')
            net_out2 = l.BatchNormLayer(net_out2,
                                        act=tf.nn.softplus,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='en/out2/batch_norm')
            # net_out2 = DenseLayer(net_h5, n_units=z_dim, act=tf.nn.relu,
            #         W_init = w_init, name='en/h5/lin_sigmoid')
            z_log_sigma_sq = net_out2.outputs + 1e-6  # (b_size,512)

        return net_out1, net_out2, z_mean, z_log_sigma_sq
Beispiel #8
0
    def generator(self, inputs, is_train=True, reuse=False):
        '''
        generator of GAN, which can also be seen as a decoder of VAE
        inputs: latent representation from encoder. [b_size,z_dim]
        '''
        image_size = self.output_size  # 64 the output size of generator
        s2, s4, s8, _ = int(image_size / 2), int(image_size / 4), int(
            image_size / 8), int(image_size / 16)  # 32,16,8,4
        gf_dim = 64
        c_dim = self.c_dim  # n_color 3
        batch_size = self.batch_size  # 64

        w_init = tf.random_normal_initializer(stddev=0.02)
        gamma_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope("generator", reuse=reuse):
            tl.layers.set_name_reuse(reuse)

            net_in = l.InputLayer(inputs, name='g/in')
            net_h0 = l.DenseLayer(net_in,
                                  n_units=gf_dim * 4 * s8 * s8,
                                  W_init=w_init,
                                  act=tf.identity,
                                  name='g/h0/lin')
            # net_h0.outputs._shape = (b_size,256*8*8)
            net_h0 = l.ReshapeLayer(net_h0,
                                    shape=[-1, s8, s8, gf_dim * 4],
                                    name='g/h0/reshape')
            # net_h0.outputs._shape = (b_size,8,8,256)
            net_h0 = l.BatchNormLayer(net_h0,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='g/h0/batch_norm')

            # upsampling
            net_h1 = l.DeConv2d(net_h0,
                                gf_dim * 4, (5, 5),
                                out_size=(s4, s4),
                                strides=(2, 2),
                                padding='SAME',
                                batch_size=batch_size,
                                act=None,
                                W_init=w_init,
                                name='g/h1/decon2d')
            net_h1 = l.BatchNormLayer(net_h1,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='g/h1/batch_norm')
            # net_h1.outputs._shape = (b_size,16,16,256)

            net_h2 = l.DeConv2d(net_h1,
                                gf_dim * 2, (5, 5),
                                out_size=(s2, s2),
                                strides=(2, 2),
                                padding='SAME',
                                batch_size=batch_size,
                                act=None,
                                W_init=w_init,
                                name='g/h2/decon2d')
            net_h2 = l.BatchNormLayer(net_h2,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='g/h2/batch_norm')
            # net_h2.outputs._shape = (b_size,32,32,128)

            net_h3 = l.DeConv2d(net_h2,
                                gf_dim // 2, (5, 5),
                                out_size=(image_size, image_size),
                                strides=(2, 2),
                                padding='SAME',
                                batch_size=batch_size,
                                act=None,
                                W_init=w_init,
                                name='g/h3/decon2d')
            net_h3 = l.BatchNormLayer(net_h3,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='g/h3/batch_norm')
            # net_h3.outputs._shape = (b_size,64,64,32)

            # no BN on last deconv
            net_h4 = l.DeConv2d(net_h3,
                                c_dim, (5, 5),
                                out_size=(image_size, image_size),
                                strides=(1, 1),
                                padding='SAME',
                                batch_size=batch_size,
                                act=None,
                                W_init=w_init,
                                name='g/h4/decon2d')
            # net_h4.outputs._shape = (b_size,64,64,3)
            # net_h4 = Conv2d(net_h3, c_dim, (5,5),(1,1), padding='SAME', W_init=w_init, name='g/h4/conv2d')
            logits = net_h4.outputs
            net_h4.outputs = tf.nn.tanh(net_h4.outputs)
        return net_h4, logits