예제 #1
0
    def __denseBlock(self,
                     x,
                     growth_rate=16,
                     num_layers=8,
                     kernel_size=[3, 3],
                     layer=0):
        dense_block_output = x
        for i in range(num_layers):
            '''
            In Paper <Densely Connected Convolutional Networks>
            each composite function contains three consecutive operations:
            batch normalization(BN), followed by a rectified linear unit (ReLU) and a 3*3 convolution (Conv).
            '''
            if self.is_bn:
                x = tl.BatchNormLayer(x, name='denseblock%d/BN%d' % (layer, i))
            x = ReluLayer(x, name='denseblock%d/relu%d' % (layer, i))
            x = tl.Conv2d(x,
                          growth_rate,
                          kernel_size,
                          name='denseblock%d/conv%d' % (layer, i))
            # concat the output of layer
            dense_block_output = tl.ConcatLayer([dense_block_output, x],
                                                concat_dim=3,
                                                name='denseblock%d/concat%d' %
                                                (layer, i))
            x = dense_block_output

        return dense_block_output
예제 #2
0
    def build_model(self, n_dense_blocks=8, scale=8, subpixel=False):
        print("Building DenseNet...")

        norm_input = utils.normalize_color_tf(self.input)
        norm_target = utils.normalize_color_tf(self.target)
        x = tl.InputLayer(norm_input, name='input_layer')
        '''
        extract low level feature
        In Paper <Densely Connected Convolutional Networks>,the filter size here is 7*7
        and followed by a max pool layer
        upscale_input = tl.Conv2d(x,self.feature_size, [7, 7], act = None, name = 'conv0')
        upscale_input = tl.MaxPool2d(upscale_input, [3,3], [2,2], name = 'maxpool0')
        '''
        with tf.variable_scope("low_level_features"):
            x = tl.Conv2d(x, 128, [3, 3], act=None, name='conv0')

        conv1 = x
        with tf.variable_scope("dense_blocks"):
            for i in range(n_dense_blocks):
                x = self.dense_block(x, 16, 8, (3, 3), layer=i)
                x = tl.ConcatLayer([conv1, x],
                                   concat_dim=3,
                                   name='dense%d/concat_output' % i)

        with tf.variable_scope("bottleneck_layer"):
            '''
            bottleneck layer
            In Paper <Image Super-Resolution Using Dense Skip Connections>
            The channel here is 256
            '''
            x = tl.Conv2d(x, 256, (1, 1), act=None, name='bottleneck')

        with tf.variable_scope("upscale_module"):
            '''
            Paper <Densely Connected Convolutional Networks> using deconv layers to upscale the output
            we provide two methods here: deconv, subpixel
            '''
            if subpixel:
                x = utils.subpixel_upsample(x, 128, scale)
            else:
                x = utils.deconv_upsample(x, 128, (3, 3), scale)

        with tf.variable_scope("reconstruction_layer"):
            output = tl.Conv2d(x,
                               self.n_channels, (3, 3),
                               act=tf.nn.relu,
                               name='reconstruction')

        self.output = tf.clip_by_value(output.outputs, 0.0, 1.0, name="output")
        self.calculate_loss(norm_target, self.output)
        conf = tf.ConfigProto(allow_soft_placement=True,
                              log_device_placement=False)
        self.sess = tf.Session(config=conf)
        self.saver = tf.train.Saver()
        print("Done building!")
예제 #3
0
    def dense_block(self,
                    x,
                    growth_rate=16,
                    n_conv=8,
                    kernel_size=(3, 3),
                    layer=0):
        dense_block_output = x
        for i in range(n_conv):
            x = tl.BatchNormLayer(x, name='dense_%d/bn_%d' % (layer, i))
            x = ReluLayer(x, name='dense_%d/relu_%d' % (layer, i))
            x = tl.Conv2d(x,
                          growth_rate,
                          kernel_size,
                          name='dense_%d/conv_%d' % (layer, i))
            # concat the output of layer
            dense_block_output = tl.ConcatLayer([dense_block_output, x],
                                                concat_dim=3,
                                                name='dense_%d/concat_%d' %
                                                (layer, i))
            x = dense_block_output

        return x
예제 #4
0
    def buildModel(self):
        print("Building Recurrent WaveletNet...")

        # input layers
        x = []
        for i in range(self.num_layers):
            x.append(tl.InputLayer(self.input, name='inputlayer%d' % (i + 1)))
        '''
        extract low level feature
        In Paper <Densely Connected Convolutional Networks>,the filter size here is 7*7
        and followed by a max pool layer
        upscale_input = tl.Conv2d(x,self.feature_size, [7, 7], act = None, name = 'conv0')
        upscale_input = tl.MaxPool2d(upscale_input, [3,3], [2,2], name = 'maxpool0')
        '''
        upscale_input = tl.Conv2d(x,
                                  self.feature_size, [3, 3],
                                  act=None,
                                  name='conv0')

        # dense-net
        '''
        using SRDenseNet_All model :
        all levels of features(output of dense block) are combined 
        via skip connections as input for reconstructing the HR images
        x
        |\
        | \
        |  dense blockl layer
        | /
        |/
        x1
        |
        [x,x1] (concat)
        '''
        x = upscale_input
        for i in range(self.dense_block):
            # the output of dense blocl
            x = self.__denseBlock(x,
                                  self.growth_rate,
                                  self.num_layers, [3, 3],
                                  layer=i)
            # concat
            upscale_input = tl.ConcatLayer([upscale_input, x],
                                           concat_dim=3,
                                           name='denseblock%d/concat_output' %
                                           (i))
        '''
        bottleneck layer
        In Paper <Image Super-Resolution Using Dense Skip Connections>
        The channel here is 256
        '''
        upscale_input = tl.Conv2d(upscale_input,
                                  self.bottleneck_size, [1, 1],
                                  act=None,
                                  name='bottleneck')
        '''
        Paper <Densely Connected Convolutional Networks> using deconv layer to upscale the output
        here provide two methods here: deconv, subpixel
        '''
        # subpixel to upscale
        if self.is_subpixel:
            upscale_output = tl.Conv2d(upscale_input,
                                       self.bottleneck_size, [3, 3],
                                       act=None,
                                       name='s1/1')
            upscale_output = tl.SubpixelConv2d(upscale_output,
                                               scale=2,
                                               act=tf.nn.relu,
                                               name='pixelshufferx2/1')

            upscale_output = tl.Conv2d(upscale_output,
                                       self.bottleneck_size, [3, 3],
                                       act=None,
                                       name='s1/2')
            upscale_output = tl.SubpixelConv2d(upscale_output,
                                               scale=2,
                                               act=tf.nn.relu,
                                               name='pixelshufferx2/2')

            if self.scale == 8:
                upscale_output = tl.Conv2d(upscale_output,
                                           self.bottleneck_size, [3, 3],
                                           act=None,
                                           name='s1/3')
                upscale_output = tl.SubpixelConv2d(upscale_output,
                                                   scale=2,
                                                   act=tf.nn.relu,
                                                   name='pixelshufferx2/3')
        # deconv to upscale
        else:
            # if scale is 8,using 3 deconv layers
            # is scale is 4,using 2 deconv layers
            width, height = int(upscale_input.outputs.shape[1]), int(
                upscale_input.outputs.shape[2])
            upscale_output, feature_size, width, height = self.__deconv(
                upscale_input,
                self.bottleneck_size,
                width,
                height,
                name='deconv0')
            upscale_output, feature_size, width, height = self.__deconv(
                upscale_output, feature_size, width, height, name='deconv1')
            if self.scale == 8:
                upscale_output, feature_size, width, height = self.__deconv(
                    upscale_output,
                    feature_size,
                    width,
                    height,
                    name='deconv2')

        # reconstruction layer
        output = tl.Conv2d(upscale_output,
                           self.output_channels, [3, 3],
                           act=tf.nn.relu,
                           name='lastLayer')

        self.output = output.outputs

        self.cacuLoss(output)

        # Tensorflow graph setup... session, saver, etc.
        self.sess = tf.Session()
        self.saver = tf.train.Saver()
        print("Done building!")
예제 #5
0
    def behaviour_net(self):
        init = tf.truncated_normal_initializer(stddev=0.1)
        zero_init = tf.zeros_initializer()
        screen_input = layers.InputLayer(self.screen_in, name="screen_inputs")
        screen_input_bn = layers.BatchNormLayer(screen_input, name="screen_bn")
        conv_depth=64
        conv1_scr = layers.Conv2d(screen_input_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv1_scr")
        conv1_scr_bn = layers.BatchNormLayer(conv1_scr, name="screen_bn_1")
        conv2_scr = layers.Conv2d(conv1_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv2_scr")
        conv2_scr_bn = layers.BatchNormLayer(conv2_scr, name="screen_bn_2")
        conv3_scr = layers.Conv2d(conv2_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv3_scr")
        conv3_scr_bn = layers.BatchNormLayer(conv3_scr, name="screen_bn_3")
        conv4_scr = layers.Conv2d(conv3_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv4_scr")
        conv4_scr_bn = layers.BatchNormLayer(conv4_scr, name="screen_bn_4")
        conv5_scr = layers.Conv2d(conv4_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv5_scr")
        conv5_scr_bn = layers.BatchNormLayer(conv5_scr, name="screen_bn_5")
        conv6_scr = layers.Conv2d(conv5_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv6_scr")
        conv6_scr_bn = layers.BatchNormLayer(conv6_scr, name="screen_bn_6")
        conv7_scr = layers.Conv2d(conv6_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv7_scr")
        conv7_scr_bn = layers.BatchNormLayer(conv7_scr, name="screen_bn_7")
        conv8_scr = layers.Conv2d(conv7_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv8_scr")
        conv8_scr_bn = layers.BatchNormLayer(conv8_scr, name="screen_bn_8")
        conv9_scr = layers.Conv2d(conv8_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv9_scr")
        conv9_scr_bn = layers.BatchNormLayer(conv9_scr, name="screen_bn_9")
        conv10_scr = layers.Conv2d(conv9_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init, b_init=init,
                                  name="conv10_scr")
        conv10_scr_bn = layers.BatchNormLayer(conv10_scr, name="screen_bn_10")
        scr_GAP = layers.PoolLayer(conv10_scr_bn, ksize=[1, 84, 84, 1], padding="VALID", pool=tf.nn.avg_pool,
                                   name="scr_GAP")
        scr_info = layers.FlattenLayer(scr_GAP, name="scr_flattened")

        minimap_input = layers.InputLayer(self.minimap_in, name="mini_in")
        minimap_input_bn = layers.BatchNormLayer(minimap_input, name="minimap_bn")
        conv1_mini = layers.Conv2d(minimap_input_bn, 32, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv1_mini")
        conv1_mini_bn = layers.BatchNormLayer(conv1_mini, name="mini_bn_1")
        conv2_mini = layers.Conv2d(conv1_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv2_mini")
        conv2_mini_bn = layers.BatchNormLayer(conv2_mini, name="mini_bn_2")
        conv3_mini = layers.Conv2d(conv2_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv3_mini")
        conv3_mini_bn = layers.BatchNormLayer(conv3_mini, name="mini_bn_3")
        conv4_mini = layers.Conv2d(conv3_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv4_mini")
        conv4_mini_bn = layers.BatchNormLayer(conv4_mini, name="mini_bn_4")
        conv5_mini = layers.Conv2d(conv4_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv5_mini")
        conv5_mini_bn = layers.BatchNormLayer(conv5_mini, name="mini_bn_5")
        conv6_mini = layers.Conv2d(conv5_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv6_mini")
        conv6_mini_bn = layers.BatchNormLayer(conv6_mini, name="mini_bn_6")
        conv7_mini = layers.Conv2d(conv6_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv7_mini")
        conv7_mini_bn = layers.BatchNormLayer(conv7_mini, name="mini_bn_7")
        conv8_mini = layers.Conv2d(conv7_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv8_mini")
        conv8_mini_bn = layers.BatchNormLayer(conv8_mini, name="mini_bn_8")
        conv9_mini = layers.Conv2d(conv8_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv9_mini")
        conv9_mini_bn = layers.BatchNormLayer(conv9_mini, name="mini_bn_9")
        conv10_mini = layers.Conv2d(conv9_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv10_mini")
        conv10_mini_bn = layers.BatchNormLayer(conv10_mini, name="mini_bn_10")
        mini_GAP = layers.PoolLayer(conv10_mini_bn, ksize=[1, 64, 64, 1], padding="VALID", pool=tf.nn.avg_pool,
                                    name="mini_GAP")
        mini_info = layers.FlattenLayer(mini_GAP, name="mini_flattened")
        multi_select_in = layers.InputLayer(self.multi_select_in, name="multi_select")
        select_info = layers.FlattenLayer(multi_select_in, name="select_flattened")
        select_info_bn = layers.BatchNormLayer(select_info, name="select_bn")

        info_combined = layers.ConcatLayer([scr_info, mini_info, select_info_bn], name="info_all")
        dense1 = layers.DenseLayer(info_combined, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init, name="Dense1")
        dense1_bn = layers.BatchNormLayer(dense1, name="dense1_bn")
        dense2 = layers.DenseLayer(dense1_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense2")
        dense2_bn = layers.BatchNormLayer(dense2, name="dense2_bn")
        dense3 = layers.DenseLayer(dense2_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense3")
        dense3_bn = layers.BatchNormLayer(dense3, name="dense3_bn")
        dense4 = layers.DenseLayer(dense3_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense4")
        dense4_bn = layers.BatchNormLayer(dense4, name="dense4_bn")
        dense5 = layers.DenseLayer(dense4_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense5")
        dense5_bn = layers.BatchNormLayer(dense5, name="dense5_bn")
        dense6 = layers.DenseLayer(dense5_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense6")
        dense6_bn = layers.BatchNormLayer(dense6, name="dense6_bn")
        self.q=layers.DenseLayer(dense6_bn, n_units=7057, W_init=zero_init, b_init=zero_init,
                                   name="q")
        self.Q_behave=self.q.outputs
예제 #6
0
    def target_net(self):
        init = tf.truncated_normal_initializer(stddev=0.1)
        zero_init = tf.zeros_initializer()
        screen_input = layers.InputLayer(self.screen_in, name="screen_inputs_t")
        screen_input_bn = layers.BatchNormLayer(screen_input, name="screen_bn_t")
        conv_depth = 64
        conv1_scr = layers.Conv2d(screen_input_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv1_scr_t")
        conv1_scr_bn = layers.BatchNormLayer(conv1_scr, name="screen_bn_1_t")
        conv2_scr = layers.Conv2d(conv1_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv2_scr_t")
        conv2_scr_bn = layers.BatchNormLayer(conv2_scr, name="screen_bn_2_t")
        conv3_scr = layers.Conv2d(conv2_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv3_scr_t")
        conv3_scr_bn = layers.BatchNormLayer(conv3_scr, name="screen_bn_3_t")
        conv4_scr = layers.Conv2d(conv3_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv4_scr_t")
        conv4_scr_bn = layers.BatchNormLayer(conv4_scr, name="screen_bn_4_t")
        conv5_scr = layers.Conv2d(conv4_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv5_scr_t")
        conv5_scr_bn = layers.BatchNormLayer(conv5_scr, name="screen_bn_5_t")
        conv6_scr = layers.Conv2d(conv5_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv6_scr_t")
        conv6_scr_bn = layers.BatchNormLayer(conv6_scr, name="screen_bn_6_t")
        conv7_scr = layers.Conv2d(conv6_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv7_scr_t")
        conv7_scr_bn = layers.BatchNormLayer(conv7_scr, name="screen_bn_7_t")
        conv8_scr = layers.Conv2d(conv7_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv8_scr_t")
        conv8_scr_bn = layers.BatchNormLayer(conv8_scr, name="screen_bn_8_t")
        conv9_scr = layers.Conv2d(conv8_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                  b_init=init,
                                  name="conv9_scr_t")
        conv9_scr_bn = layers.BatchNormLayer(conv9_scr, name="screen_bn_9_t")
        conv10_scr = layers.Conv2d(conv9_scr_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init,
                                   name="conv10_scr_t")
        conv10_scr_bn = layers.BatchNormLayer(conv10_scr, name="screen_bn_10_t")
        scr_GAP = layers.PoolLayer(conv10_scr_bn, ksize=[1, 84, 84, 1], padding="VALID", pool=tf.nn.avg_pool,
                                   name="scr_GAP_t")
        scr_info = layers.FlattenLayer(scr_GAP, name="scr_flattened_t")

        minimap_input = layers.InputLayer(self.minimap_in, name="mini_in_t")
        minimap_input_bn = layers.BatchNormLayer(minimap_input, name="minimap_bn_t")
        conv1_mini = layers.Conv2d(minimap_input_bn, 32, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv1_mini_t")
        conv1_mini_bn = layers.BatchNormLayer(conv1_mini, name="mini_bn_1_t")
        conv2_mini = layers.Conv2d(conv1_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv2_mini_t")
        conv2_mini_bn = layers.BatchNormLayer(conv2_mini, name="mini_bn_2_t")
        conv3_mini = layers.Conv2d(conv2_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv3_mini_t")
        conv3_mini_bn = layers.BatchNormLayer(conv3_mini, name="mini_bn_3_t")
        conv4_mini = layers.Conv2d(conv3_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv4_mini_t")
        conv4_mini_bn = layers.BatchNormLayer(conv4_mini, name="mini_bn_4_t")
        conv5_mini = layers.Conv2d(conv4_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv5_mini_t")
        conv5_mini_bn = layers.BatchNormLayer(conv5_mini, name="mini_bn_5_t")
        conv6_mini = layers.Conv2d(conv5_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv6_mini_t")
        conv6_mini_bn = layers.BatchNormLayer(conv6_mini, name="mini_bn_6_t")
        conv7_mini = layers.Conv2d(conv6_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv7_mini_t")
        conv7_mini_bn = layers.BatchNormLayer(conv7_mini, name="mini_bn_7_t")
        conv8_mini = layers.Conv2d(conv7_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv8_mini_t")
        conv8_mini_bn = layers.BatchNormLayer(conv8_mini, name="mini_bn_8_t")
        conv9_mini = layers.Conv2d(conv8_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                   b_init=init, name="conv9_mini_t")
        conv9_mini_bn = layers.BatchNormLayer(conv9_mini, name="mini_bn_9_t")
        conv10_mini = layers.Conv2d(conv9_mini_bn, conv_depth, (3, 3), act=tf.nn.relu, padding="SAME", W_init=init,
                                    b_init=init, name="conv10_mini_t")
        conv10_mini_bn = layers.BatchNormLayer(conv10_mini, name="mini_bn_10_t")
        mini_GAP = layers.PoolLayer(conv10_mini_bn, ksize=[1, 64, 64, 1], padding="VALID", pool=tf.nn.avg_pool,
                                    name="mini_GAP_t")
        mini_info = layers.FlattenLayer(mini_GAP, name="mini_flattened_t")
        multi_select_in = layers.InputLayer(self.multi_select_in, name="multi_select_t")
        select_info = layers.FlattenLayer(multi_select_in, name="select_flattened_t")
        select_info_bn = layers.BatchNormLayer(select_info, name="select_bn_t")

        info_combined = layers.ConcatLayer([scr_info, mini_info, select_info_bn], name="info_all_t")
        dense1 = layers.DenseLayer(info_combined, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense1_t")
        dense1_bn = layers.BatchNormLayer(dense1, name="dense1_bn_t")
        dense2 = layers.DenseLayer(dense1_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense2_t")
        dense2_bn = layers.BatchNormLayer(dense2, name="dense2_bn_t")
        dense3 = layers.DenseLayer(dense2_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense3_t")
        dense3_bn = layers.BatchNormLayer(dense3, name="dense3_bn_t")
        dense4 = layers.DenseLayer(dense3_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense4_t")
        dense4_bn = layers.BatchNormLayer(dense4, name="dense4_bn_t")
        dense5 = layers.DenseLayer(dense4_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense5_t")
        dense5_bn = layers.BatchNormLayer(dense5, name="dense5_bn_t")
        dense6 = layers.DenseLayer(dense5_bn, n_units=1000, act=tf.nn.relu, W_init=init, b_init=init,
                                   name="Dense6_t")
        dense6_bn = layers.BatchNormLayer(dense6, name="dense6_bn_t")
        self.q_target = layers.DenseLayer(dense6_bn, n_units=7057, W_init=zero_init, b_init=zero_init,
                                          name="q_t")
        self.target_params = self.q_target.all_params
        self.Q_target = self.q_target.outputs