Exemplo n.º 1
0
def net():
    # conv1
    conv1_weight = tf.get_variable('conv1_b', [5,5,1,6])
    conv1_bias = tf.get_variable('conv1_b', [6])
    conv1 = tf.bias_add(tf.nn.conv2d(X, conv1_weight),strides=[1,1,1,1], padding='VALID')#conv1_bias)

    print(conv1)

    conv1_pooling = tf.nn.max_pooling(conv1,filter=[1,2,2,1], strides=[1,2,2,1], padding='VALID')

    print(conv1_pooling)

    # 28, 28, 6

    # conv2
    conv2_weight_1 = tf.get_variable('conv2_w', [5,5,2,1])
    conv2_bias_1 = tf.get_variable('conv2_b', [1])
    conv2_tensor = tf.constant(np.zeros[30,28,28,16])

    for i in range(0, 6, 1):
        # 0-2 1-3 2-4 3-5
        # 4-5+1, 5+1+2
        conv2 = tf.bias_add(tf.nn.conv2d(conv1_pooling[i:i+3], conv2_weight_1), strides=[1,1,1,1], padding='VALID')# conv2_bias_1)
        conv2_tensor[:, :, :, i] = conv2
    conv2_tensor[:,:,:,4] = tf.bias_add(tf.nn.conv2d(tf.concat([conv1_pooling[4:], conv1_pooling[0]]), conv2_weight_1), strides=[1,1,1,1], padding='VALID')# conv2_bias_1)

    conv2_tensor[:,:,:,5] = tf.bias_add(tf.nn.conv2d(tf.concat([conv1_pooling[5], conv1_pooling[0:2]]), conv2_weight_1), strides=[1,1,1,1], padding='VALID')# conv2_bias_1)

    # .....
    conv2_pooling= tf.nn.max_pooling(conv2_tensor, filter=[1,2,2,1], strides=[1,2,2,1], padding='VALID')
    print(conv2_pooling)
Exemplo n.º 2
0
 def call(self, inputs, **kwargs):
     main_input, embedding_matrix = inputs
     input_shape_tensor = tf.shape(main_input)
     last_input_dim = tf.int_shape(main_input)[-1]
     emb_input_dim, emb_output_dim = tf.int_shape(embedding_matrix)
     projected = tf.dot(tf.reshape(main_input, (-1, last_input_dim)),
                        self.projection)
     if self.add_biases:
         projected = tf.bias_add(projected,
                                 self.biases,
                                 data_format='channels_last')
     if 0 < self.projection_dropout < 1:
         projected = tf.in_train_phase(
             lambda: tf.dropout(projected, self.projection_dropout),
             projected,
             training=kwargs.get('training'))
     attention = tf.dot(projected, tf.transpose(embedding_matrix))
     if self.scaled_attention:
         # scaled dot-product attention, described in
         # "Attention is all you need" (https://arxiv.org/abs/1706.03762)
         sqrt_d = tf.constant(math.sqrt(emb_output_dim), dtype=tf.floatx())
         attention = attention / sqrt_d
     result = tf.reshape(
         self.activation(attention),
         (input_shape_tensor[0], input_shape_tensor[1], emb_input_dim))
     return result
Exemplo n.º 3
0
def conv2d_op(x,name,shape,strides,pams):
	with tf.name_scope(name) as scope:
		W_conv=tf.get_variable(scope+'w',shape=shape,dtype=tf.float32,
			initializer=tf.contrib.layers.xavier_initializer())
		b_conv=tf.Variable(tf.constant(0.0,shape=[shape[-1]],dtype=tf.float32),trainable=True,name='b')
		conv=tf.conv2d(x,W_conv,strides=strides,padding='SAME')
		h_conv=tf.nn.relu(tf.bias_add(conv,b_conv),name=scope)
		pams+=[W_conv,b_conv]
		return h_conv
Exemplo n.º 4
0
    def call(self, x):

        # Perform convolution operation
        # TODO: figure out how to do this yourself in C++... might be an
        #       interesting side project
        feature_map = tf.nn.conv2d(x,
                                   self.kernel,
                                   strides=self.strides,
                                   padding='SAME')
        # Add bias
        feature_map_biased = tf.bias_add(feature_map, self.bias)

        # Return activated feature map
        return self.activation_fn(feature_map_biased)
Exemplo n.º 5
0
    def attention(self,
                  pre_q,
                  pre_v,
                  pre_k,
                  out_seq_len: int,
                  d_model: int,
                  training=None):
        """
        Calculates the output of the attention once the affine transformations
        of the inputs are done. Here's the shapes of the arguments:
        :param pre_q: (batch_size, q_seq_len, num_heads, d_model // num_heads)
        :param pre_v: (batch_size, v_seq_len, num_heads, d_model // num_heads)
        :param pre_k: (batch_size, k_seq_len, num_heads, d_model // num_heads)
        :param out_seq_len: the length of the output sequence
        :param d_model: dimensionality of the model (by the paper)
        :param training: Passed by tferas. Should not be defined manually.
          Optional scalar tensor indicating if we're in training
          or inference phase.
        """
        # shaping Q and V into (batch_size, num_heads, seq_len, d_model//heads)
        q = tf.transpose(pre_q, [0, 2, 1, 3])
        v = tf.transpose(pre_v, [0, 2, 1, 3])

        if self.compression_window_size is None:
            k_transposed = tf.transpose(pre_k, [0, 2, 3, 1])
        else:
            # Memory-compressed attention described in paper
            # "Generating Wikipedia by Summarizing Long Sequences"
            # (https://arxiv.org/pdf/1801.10198.pdf)
            # It compresses keys and values using 1D-convolution which reduces
            # the size of Q * tf_transposed from roughly seq_len^2
            # to convoluted_seq_len^2. If we use strided convolution with
            # window size = 3 and stride = 3, memory requirements of such
            # memory-compressed attention will be 9 times smaller than
            # that of the original version.
            if self.use_masking:
                raise NotImplementedError(
                    "Masked memory-compressed attention has not "
                    "been implemented yet")
            k = tf.transpose(pre_k, [0, 2, 1, 3])
            k, v = [
                tf.reshape(
                    # Step 3: Return the result to its original dimensions
                    # (batch_size, num_heads, seq_len, d_model//heads)
                    tf.bias_add(
                        # Step 3: ... and add bias
                        tf.conv1d(
                            # Step 2: we "compress" tf and V using strided conv
                            tf.reshape(
                                # Step 1: we reshape tf and V to
                                # (batch + num_heads,  seq_len, d_model//heads)
                                item,
                                (-1, tf.shape(item)[-2],
                                 d_model // self.num_heads)),
                            kernel,
                            strides=self.compression_window_size,
                            padding='valid',
                            data_format='channels_last'),
                        bias,
                        data_format='channels_last'),
                    # new shape
                    tf.concatenate(
                        [tf.shape(item)[:2], [-1, d_model // self.num_heads]]))
                for item, kernel, bias in ((k, self.k_conv_kernel,
                                            self.k_conv_bias),
                                           (v, self.v_conv_kernel,
                                            self.v_conv_bias))
            ]
            k_transposed = tf.transpose(k, [0, 1, 3, 2])
        # shaping tf into (batch_size, num_heads, d_model//heads, seq_len)
        # for further matrix multiplication
        a = tf.cast(d_model // self.num_heads, dtype=tf.float32)
        sqrt_d = tf.math.sqrt(a)
        q_shape = tf.shape(q)
        k_t_shape = tf.shape(k_transposed)
        v_shape = tf.shape(v)
        # before performing batch_dot all tensors are being converted to 3D
        # shape (batch_size * num_heads, rows, cols) to make sure batch_dot
        # performs identically on all backends
        new_q_shape = tf.concat([[-1], q_shape[-2:]], axis=0)
        new_k_shape = tf.concat([[-1], k_t_shape[-2:]], axis=0)
        new_v_shape = tf.concat([[-1], v_shape[-2:]], axis=0)
        factor1 = tf.reshape(q, new_q_shape)
        factor2 = tf.reshape(k_transposed, new_k_shape)
        factor3 = tf.reshape(v, new_v_shape)
        batch_dot_raw = K.batch_dot(factor1, factor2)
        attention_heads = tf.reshape(
            K.batch_dot(
                self.apply_dropout_if_needed(tf.nn.softmax(
                    self.mask_attention_if_needed(batch_dot_raw / sqrt_d)),
                                             training=training), factor3),
            (-1, self.num_heads, q_shape[-2], v_shape[-1]))
        attention_heads_merged = tf.reshape(
            tf.transpose(attention_heads, [0, 2, 1, 3]), (-1, d_model))
        attention_out = tf.reshape(
            tf.tensordot(attention_heads_merged, self.output_weights, axes=1),
            (-1, out_seq_len, d_model))
        return attention_out
Exemplo n.º 6
0
 def call(self, inputs):
     return tf.bias_add(inputs, self.bias)
Exemplo n.º 7
0
image_height = 10
image_width = 10
image_channels = 3

filter_height = 5
filter_width = 5
filter_channels = 64

inputs = tf.placeholder(
    tf.place32, shape=[None, image_height, image_width, image_channels])
# Take the weights for each filter channels - 64
weights = tf.Variable(
    tf.truncated_normal(
        [filter_height, filter_width, image_channels, filter_channels]))
bias = tf.Variable(tf.zeros(filter_channels))

# Stride for moving along X,Y axes
# Same padding for adding rows along the image
conv_layer = tf.nn.conv2d(inputs,
                          weights,
                          strides=[1, 2, 2, 1],
                          padding='SAME')
conv_layer = tf.bias_add(bias)
conv_layer = tf.nn.relu(conv_layer)

# Pools the values together by taking the max value in the neigboring cells
conv_layer = tf.nn.max_pool(conv_layer,
                            k_size=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1],
                            padding='SAME')
Exemplo n.º 8
0
 def call(self, inputs, state, scope=None):
     with vs.variable_scope(scope or type(self).__name__):  # "GruRcnCell"
         with vs.variable_scope("Gates"):  # Reset gate and update gate.
             # We start with bias of 1.0.
             w_z = self._conv(inputs,
                              self._nb_filter,
                              self._ih_filter_length,
                              self._ih_strides,
                              self._ih_pandding,
                              scope="WzConv")
             u_z = self._conv(state,
                              self._nb_filter,
                              self._hh_filter_length, [1, 1, 1, 1],
                              "SAME",
                              scope="UzConv")
             z_bias = tf.get_variable(name="z_biases",
                                      initializer=tf.constant(
                                          1,
                                          shape=[self._nb_filter],
                                          dtype=tf.float32))
             z_gate = math_ops.sigmoid(
                 tf.bias_add(w_z + u_z,
                             z_bias,
                             data_format=self._data_format))
             w_r = self._conv(inputs,
                              self._nb_filter,
                              self._ih_filter_length,
                              self._ih_strides,
                              self._ih_pandding,
                              scope="WrConv")
             u_r = self._conv(state,
                              self._nb_filter,
                              self._hh_filter_length, [1, 1, 1, 1],
                              "SAMW",
                              scope="UrConv")
             r_bias = tf.get_variable(name="r_biases",
                                      initializer=tf.constant(
                                          1,
                                          shape=[self._nb_filter],
                                          dtype=tf.float32))
             r_gate = math_ops.sigmoid(
                 tf.bias_add(w_r + u_r,
                             r_bias,
                             data_format=self._data_format))
         with vs.variable_scope("Candidate"):
             w = self._conv(inputs,
                            self._nb_filter,
                            self._ih_filter_length,
                            self._ih_strides,
                            self._ih_pandding,
                            scope="WConv")
             u = self._conv(r_gate * state,
                            self._nb_filter,
                            self._hh_filter_length, [1, 1, 1, 1],
                            "SAME",
                            scope="UConv")
             c_bias = tf.get_variable(name="c_biases",
                                      initializer=tf.constant(
                                          1,
                                          shape=[self._nb_filter],
                                          dtype=tf.float32))
             c = math_ops.tanh(tf.bias_add(w + u, c_bias))
         new_h = z_gate * state + (1 - z_gate) * c
     return new_h, new_h
Exemplo n.º 9
0
    def forward_pass(self, x, step):
        """
        :param x: input image batch (either from real dataset or generator)

                  shape: (batch_size, image_width, image_height, image_channels)

        :return truth_score: scalar value detailing confidence the discriminator
                             places on the image being real, where 1 is utmost
                             confidence that the image is real, whereas 0 is utmost
                             confidence that the image is generated

                             shape: (batch_size,)

        :return tags_score: vector detailing confidence that the discriminator places
                            on a certain tag detailing an image
        """
        # initial convolution
        with tf.name_scope('initial_conv') as layer_scope:

            initial_kernel = WeightVariable(
                shape=[4, 4, 3, 32],
                name='Filter_initial',
                #model_scope=self.model_scope,
                layer_scope=layer_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))(step)

            initial_bias = BiasVariable(
                shape=(32, ),
                name='bias_initial',
                #model_scope=self.model_scope,
                layer_scope=layer_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))(step)

            feature_map = tf.nn.conv2d(x,
                                       initial_kernel,
                                       strides=[2, 2, 1, 1],
                                       padding='SAME')

            bias_feature_map = tf.bias_add(feature_map, initial_bias)

            residual_input = act_fm = tf.nn.leaky_relu(bias_feature_map)

        # First series of ResBlocks
        for i in range(2):
            with tf.name_scope('(k3n32s1) ResBlock1_pass{}'.format(i)):

                # First convolutional layer in ResBlock 1
                res_kernel1 = WeightVariable(
                    shape=[3, 3, 32, 32],
                    name='res1_filter1',
                    model_scope=self.model_scope,
                    initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                               stddev=0.02))

                res_bias1 = BiasVariable(
                    shape=(32, ),
                    name='res1_bias1',
                    model_scope=self.model_scope,
                    initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                               stddev=0.02))

                res_fm1 = tf.nn.conv2d(residual_input,
                                       res_kernel1,
                                       strides=[1, 1, 1, 1],
                                       padding='SAME')

                bias_res_fm1 = tf.bias_add(res_fm1, res_bias1)

                act_res1_fm1 = tf.nn.leaky_relu(bias_res_fm1)

                # Second convolutional layer in ResBlock
                res_kernel2 = WeightVariable(
                    shape=[3, 3, 32, 32],
                    name='res1_filter2',
                    model_scope=self.model_scope,
                    initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                               stddev=0.02))

                res_bias2 = BiasVariable(
                    shape=(32, ),
                    name='res1_bias2',
                    model_scope=self.model_scope,
                    initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                               stddev=0.02))

                res_fm2 = tf.nn.conv2d(act_fm,
                                       res_kernel2,
                                       strides=[1, 1, 1, 1],
                                       padding='SAME')

                bias_res_fm2 = tf.bias_add(res_fm2, res_bias2)

                # Elementwise sum with residual input
                residual_sum = tf.add(bias_res_fm2, residual_input)

                # Final ResBlock activation
                residual_input = residual_output = tf.nn.leaky_relu(
                    residual_sum)

        # Convolutional layer 'bridging gap' between first and second set of ResBlocks. This
        # conv layer has the effect of blowing up the number of channels x2 as well
        with tf.name_scope('bridge_conv_layer1'):
            bridge1_kernel = WeightVariable(
                shape=[4, 4, 32, 64],
                name='bridge1_kernel',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            bridge1_bias = BiasVariable(
                shape=(64, ),
                name='bridge_bias',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            bridge1_fm = tf.nn.conv2d(residual_output,
                                      bridge1_kernel,
                                      strides=[2, 2, 2, 2],
                                      padding='SAME')

            bias_bridge1_fm = tf.bias_add(bridge1_fm, bridge1_bias)

            residual_input = bridge1_output = tf.nn.leaky_relu(bias_bridge1_fm)

        # Second ResBlock (filter: (3x3), stride: (1, 1, 1, 1), num_filters: 64)
        for i in range(2):
            with tf.name_scope('(k3n64s1) ResBlock2_pass{}'.format(i)):
                # 1st conv layer in ResBlock
                resblock2_kernel1 = WeightVariable(
                    shape=[3, 3, 64, 64],
                    name='resblock2_kernel1_pass{}'.format(i),
                    model_scope=self.model_scope,
                    initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                               stddev=0.02))

                resblock2_bias1 = BiasVariable(
                    shape=(64, ),
                    name='resblock2_bias1_pass{}'.format(i),
                    model_scope=self.model_scope,
                    initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                               stddev=0.02))

                resblock2_fm1 = tf.nn.conv2d(residual_input,
                                             resblock2_kernel1,
                                             strides=[1, 1, 1, 1],
                                             padding='SAME')

                bias_resblock2_fm1 = tf.bias_add(resblock2_fm1,
                                                 resblock2_bias1)

                act_resblock2_fm1 = tf.nn.leaky_relu(bias_resblock2_fm1)

                # 2nd conv layer in ResBlock
                resblock2_kernel2 = WeightVariable(
                    shape=[3, 3, 64, 64],
                    name='resblock2_kernel2_pass{}'.format(i),
                    model_scope=self.model_scope,
                    initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                               stddev=0.02))

                resblock2_bias2 = BiasVariable(
                    Shape=(64, ),
                    name='resblock2_bias2_pass{}'.format(i),
                    model_scope=self.model_scope,
                    initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                               stddev=0.02))

                resblock2_fm2 = tf.nn.conv2d(act_resblock2_fm1,
                                             resblock2_kernel2,
                                             strides=[1, 1, 1, 1],
                                             padding='SAME')

                # Elementwise sum with residual input (the feature map input at the beginning of
                # the ResBlock
                elementwise_sum_resblock2 = tf.add(resblock2_fm2,
                                                   residual_input)

                # Final activation before feeding into next ResBlock/ bridge conv layer
                residual_output = residual_input = tf.nn.leaky_relu(
                    elementwise_sum_resblock2)

        # Second bridge conv layer between two ResBlocks
        with tf.name_scope('bridge_conv_layer2'):
            bridge2_kernel = WeightVariable(
                shape=[4, 4, 64, 128],
                name='bridge2_kernel',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            bridge2_bias = BiasVariable(
                shape=(128, ),
                name='bridge2_bias',
                model_scope=self.model_scope,
                initializer=tf.initialzier.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            bridge2_fm = tf.nn.conv2d(residual_output,
                                      bridge2_kernel,
                                      strides=[2, 2, 1, 1],
                                      padding='SAME')

            residual_input = bridge2_output = tf.nn.leaky_relu(bridge2_fm)

        # Initialize 3rd ResBlock
        with tf.name_scope('(k3n128s1) ResBlock3'):
            for i in range(1, 3):
                with tf.name_scope('(k3n128s1) ResBlock3_pass{}'.format(i)):

                    # First conv layer
                    resblock3_kernel1 = WeightVariable(
                        shape=[3, 3, 128, 128],
                        name='resblock3_kernel1_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock3_bias1 = BiasVariable(
                        shape=(128, ),
                        name='resblock3_bias1_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock3_fm1 = tf.nn.conv2d(residual_input,
                                                 resblock3_kernel1,
                                                 strides=[1, 1, 1, 1],
                                                 padding='SAME')

                    bias_resblock3_fm1 = tf.bias_add(resblock3_fm1,
                                                     resblock3_bias1)

                    act_resblock3_fm1 = tf.nn.leaky_relu(resblock3_fm1)

                    # Second conv layer
                    resblock3_kernel2 = WeightVariable(
                        shape=[3, 3, 128, 128],
                        name='resblock3_kernel2_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock3_bias2 = BiasVariable(
                        shape=(128, ),
                        name='resblock3_bias2_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock3_fm2 = tf.nn.conv2d(act_resblock3_fm1,
                                                 resblock3_kernel2,
                                                 strides=[1, 1, 1, 1],
                                                 padding='SAME')

                    bias_resblock3_fm2 = tf.bias_add(resblock3_fm2,
                                                     resblock3_bias2)

                    # Element-wise summation of input to ResBlock and the final feature map
                    # produced by the second conv layer in the ResBlock
                    elementwise_sum_resblock3 = tf.add(bias_resblock3_fm2,
                                                       residual_input)

                    # Final ResBlock activation
                    residual_output = residual_input = tf.nn.leaky_relu(
                        elementwise_sum_resblock3)

        # Third bridge convolutional layer
        with tf.name_scope('bridge_conv_layer3'):
            bridge3_kernel = WeightVariable(
                shape=[3, 3, 128, 256],
                name='bridge3_kernel',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            bridge3_bias = BiasVariable(
                shape=(256, ),
                name='bridge3_bias',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            bridge3_fm = tf.nn.conv2d(residual_output,
                                      bridge3_kernel,
                                      strides=[2, 2, 1, 1],
                                      padding='SAME')

            residual_input = act_bridge3_fm = tf.nn.leaky_relu(bridge3_fm)

        # Initialize 4th ResBlock
        with tf.name_scope('(k3n256s1) ResBlock4'):
            for i in range(1, 3):
                with tf.name_scope('ResBlock4 pass{}'.format(i)):

                    # 1st Conv Layer
                    resblock4_kernel1 = WeightVariable(
                        shape=[3, 3, 256, 256],
                        name='resblock4_kernel1_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock4_bias1 = BiasVariable(
                        shape=(256, ),
                        name='resblock4_bias1_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock4_fm1 = tf.nn.conv2d(residual_input,
                                                 resblock4_kernel1,
                                                 strides=[1, 1, 1, 1],
                                                 padding='SAME')

                    bias_resblock4_fm1 = tf.bias_add(resblock4_fm1,
                                                     resblock4_bias1)

                    # 2nd Conv layer
                    resblock4_kernel2 = WeightVariable(
                        shape=[3, 3, 256, 256],
                        name='resblock4_kernel2_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initiializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock4_bias2 = BiasVariable(
                        shape=(256, ),
                        name='resblock4_bias2_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock4_fm2 = tf.nn.conv2d(bias_resblock4_fm1,
                                                 resblock4_kernel2,
                                                 strides=[1, 1, 1, 1],
                                                 padding='SAME')

                    # Perform elementwise sum with input to ResBlock (i.e., output to
                    # bridge conv layer or previous ResBlock)
                    elementwise_sum_resblock4 = tf.add(resblock4_fm2,
                                                       residual_input)

                    # Final activation in ResBlock
                    residual_output = residual_input = tf.nn.leaky_relu(
                        elementwise_sum_resblock4)

        # ..."Hey, look at that... 4th bridge layer
        with tf.name_scope('bridge_conv_layer4'):
            bridge4_kernel = WeightVariable(
                shape=[3, 3, 256, 512],
                name='bridge_conv_layer4_kernel',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            bridge4_bias = BiasVariable(
                shape=(512, ),
                name='bridge_conv_layer4_bias',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            bridge4_fm = tf.nn.conv2d(residual_output,
                                      bridge4_kernel,
                                      strides=[2, 2, 1, 1],
                                      padding='SAME')

            act_bridge4_fm = residual_input = tf.nn.leaky_relu(bridge4_fm)

        # And a wild 5th ResBlock (the last one, I promise) appears
        with tf.name_scope('(k3n512s1) ResBlock5'):
            for i in range(1, 3):
                with tf.name_scope('(k3n512s1) ResBlock5_pass{}.'.format(i)):
                    # 1st Conv layer
                    resblock5_kernel1 = WeightVariable(
                        shape=[3, 3, 512, 512],
                        name='resblock5_kernel1_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock5_bias1 = BiasVariable(
                        shape=(512, ),
                        name='resblock5_bias1_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock5_fm1 = tf.nn.conv2d(residual_input,
                                                 resblock5_kernel1,
                                                 strides=[1, 1, 1, 1],
                                                 padding='SAME')

                    resblock5_bias_fm1 = tf.bias_add(resblock5_fm1,
                                                     resblock5_bias1)

                    resblock5_act_fm1 = tf.nn.leaky_relu(resblock5_bias_fm1)

                    # 2nd Conv layer
                    resblock5_kernel2 = WeightVariable(
                        shape=[3, 3, 512, 512],
                        name='resblock5_kernel1_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock5_bias2 = BiasVariable(
                        shape=(512, ),
                        name='resblock5_bias2_pass{}'.format(i),
                        model_scope=self.model_scope,
                        initializer=tf.initializer.TruncatedNormal(
                            mean=0.0, stddev=0.02))

                    resblock5_fm2 = tf.nn.conv2d(resblock5_act_fm1,
                                                 resblock5_kernel2,
                                                 strides=[1, 1, 1, 1],
                                                 padding='SAME')

                    resblock5_bias_fm2 = tf.bias_add(resblock5_fm2,
                                                     resblock5_bias2)

                    # Elementwise summation with input to residual block
                    resblock5_elementwise_sum = tf.add(resblock5_fm2,
                                                       residual_input)

                    # Final activation
                    residual5_output = residual_input = tf.nn.leaky_relu(
                        resblock5_elementwise_sum)

        # Initialize final conv layer
        with tf.name_scope('(k3n1024s2) final_conv_layer'):
            final_kernel = WeightVariable(
                shape=[3, 3, 512, 1024],
                name='final_conv_layer_filter',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            final_bias = BiasVariable(
                shape=(1024, ),
                name='final_conv_layer_bias',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            final_fm = tf.nn.conv2d(residual5_output,
                                    final_kernel,
                                    strides=[2, 2, 1, 1],
                                    padding='SAME')

            final_bias_fm = tf.bias_add(final_fm, final_bias)

            final_act_fm = tf.nn.leaky_relu(final_bias_fm)

        # Flatten feature map for read in to final fully-connected layers
        flattened_shape = self.image_width * self.image_height * 1024
        flattened_final_fm = tf.reshape(final_act_fm, [-1, flattened_shape])

        # Final output layer for truth_score
        with tf.name_scope('forgery_score_output_layer'):
            forgery_score_weights = WeightVariable(
                shape=[flattened_shape, 1],
                name='forgery_score_weights',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            forgery_score_bias = BiasVariable(
                shape=(1, ),
                name='forgery_score_bias',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            unactivated_forgery_score = tf.bias_add(
                tf.matmul(flattened_final_fm, forgery_score_weights),
                forgery_score_bias)

            # output shape: (batch_size,)
            forgery_score = tf.nn.sigmoid(unactivated_forgery_score)

        # Final output layer for tags to assign to input image
        with tf.name_scope('tag_confidence_output_layer'):
            tag_confidence_weights = WeightVariable(
                shape=[flattened_shape, self.num_tags],
                name='tag_confidence_weights',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            tag_confidence_bias = BiasVariable(
                shape=(self.num_tags, ),
                name='tag_confidence_bias',
                model_scope=self.model_scope,
                initializer=tf.initializer.TruncatedNormal(mean=0.0,
                                                           stddev=0.02))

            unactivated_tag_confidences = tf.bias_add(
                tf.matmul(flattened_final_fm, tag_confidence_weights),
                tag_confidence_bias)

            # output shape: (batch_size, num_tags)
            tag_confidences = tf.nn.sigmoid(unactivated_tag_confidences)

        # Outputs
        # forgery_scores: (batch_size,)
        # tag_confidences: (batch_size, num_tags)
        return forgery_score, tag_confidences