Exemple #1
0
        def _form_groups(net, start_layer, end_layer):
            for layer_id in range(start_layer, end_layer):
                #res blocks
                W_res_name = "W_g{}_res".format(layer_id)
                b_res_name = "b_g{}_res".format(layer_id)
                net = self.res_blocks(net,
                                      W_res_name,
                                      b_res_name,
                                      scope="RES_{}".format(layer_id))
                # decv
                filter_name = "{}{}".format(W_name, layer_id)
                bias_name = "{}{}".format(b_name, layer_id)
                curr_filter = self.decv_filters[filter_name]
                curr_bias = self.decv_biases[bias_name]

                # de-convolution
                net = ne.conv2d_transpose(net,
                                          filters=curr_filter,
                                          biases=curr_bias,
                                          strides=self.decv_strides[layer_id],
                                          padding=self.decv_padding[layer_id])
                # batch normalization
                if self.use_norm == "BATCH":
                    net = ne.batch_norm(net, self.is_training)
                elif self.use_norm == "LAYER":
                    net = ne.layer_norm(net, self.is_training)
                elif self.use_norm == "INSTA":
                    net = ne.instance_norm(net, self.is_training)

                if layer_id != end_layer - 1:
                    net = ne.leaky_relu(net, self.decv_leaky_ratio[layer_id])
                    net = ne.drop_out(net, self.decv_drop_rate[layer_id],
                                      self.is_training)

                if layer_id == self.num_decv - 2:
                    # mask
                    if FLAGS.USE_LABEL_MASK:
                        w = net.get_shape().as_list()[1]
                        h = net.get_shape().as_list()[2]
                        c = net.get_shape().as_list()[3]
                        net = tf.reshape(net, [-1, w * h, c])
                        net = tf.matmul(net, mask_states)
                        net = tf.reshape(net, [-1, w, h, c])
                        if self.use_norm == "BATCH":
                            net = ne.batch_norm(net, self.is_training)
                        elif self.use_norm == "LAYER":
                            net = ne.layer_norm(net, self.is_training)
                        elif self.use_norm == "INSTA":
                            net = ne.instance_norm(net, self.is_training)
                #import pdb; pdb.set_trace()
            return net
Exemple #2
0
    def res_blocks(self, inputs, W_name, b_name, scope):
        with tf.variable_scope(scope):
            net = inputs
            for res_id in range(self.num_res_block):
                res_net = net
                for layer_id in range(self.res_block_size):
                    filter_name = "{}{}_{}".format(W_name, res_id, layer_id)
                    bias_name = "{}{}_{}".format(b_name, res_id, layer_id)
                    curr_filter = self.res_filters[filter_name]
                    curr_bias = self.res_biases[bias_name]
                    # convolution
                    net = ne.conv2d_transpose(
                        net,
                        filters=curr_filter,
                        biases=curr_bias,
                        strides=self.res_strides[layer_id],
                        padding=self.res_padding[layer_id])

                    if self.use_norm == "BATCH":
                        net = ne.batch_norm(net, self.is_training)
                    elif self.use_norm == "LAYER":
                        net = ne.layer_norm(net, self.is_training)
                    elif self.use_norm == "INSTA":
                        net = ne.instance_norm(net, self.is_training)
                    net = ne.leaky_relu(net, self.res_leaky_ratio[layer_id])
                    #net = ne.leaky_brelu(net, self.res_leaky_ratio[layer_id], self.layer_low_bound, self.output_up_bound) # Nonlinear act
                    net = ne.drop_out(net, self.res_drop_rate[layer_id],
                                      self.is_training)

                net += res_net
            net = tf.identity(net, name='res_output')
            #import pdb; pdb.set_trace()
            return net
Exemple #3
0
 def _form_groups(net, start_layer, end_layer):
     for layer_id in range(start_layer, end_layer):
         filter_name = "{}{}".format(W_name, layer_id)
         bias_name = "{}{}".format(b_name, layer_id)
         curr_filter = self.conv_filters[filter_name]
         curr_bias = self.conv_biases[bias_name]
         # convolution
         net = ne.conv2d(net, filters=curr_filter, biases=curr_bias,
                         strides=self.conv_strides[layer_id],
                         padding=self.conv_padding[layer_id])
         conv_net = net
         # batch normalization
         if self.use_norm == "BATCH":
             net = ne.batch_norm(net, self.is_training)
         elif self.use_norm == "LAYER":
             net = ne.layer_norm(net, self.is_training)
         elif self.use_norm == "INSTA":
             net = ne.instance_norm(net, self.is_training)
         #net = ne.leaky_brelu(net, self.conv_leaky_ratio[layer_id], self.layer_low_bound, self.output_up_bound) # Nonlinear act
         net = ne.leaky_relu(net, self.conv_leaky_ratio[layer_id])
         net = ne.drop_out(net, self.conv_drop_rate[layer_id], self.is_training)
         # residual for conv
         if conv_residual:
             net += conv_net
         # res blocks
         if self.num_res_block != 0:
             W_res_name = "W_g{}_res".format(layer_id)
             b_res_name = "b_g{}_res".format(layer_id)
             net = self.res_blocks(net, W_res_name, b_res_name, scope="RES_{}".format(layer_id))
     return net
Exemple #4
0
 def in_layer(self, inputs, W_name="W_in_", b_name="b_in_"):
     layer_id = 0
     net = inputs
     filter_name = "{}{}".format(W_name, layer_id)
     bias_name = "{}{}".format(b_name, layer_id)
     curr_filter = self.in_filter[filter_name]
     curr_bias = self.in_bias[bias_name]
     # batch normalization
     if self.use_norm == "BATCH":
         net = ne.batch_norm(net, self.is_training)
     elif self.use_norm == "LAYER":
         net = ne.layer_norm(net, self.is_training)
     elif self.use_norm == "INSTA":
         net = ne.instance_norm(net, self.is_training)
     #net = ne.leaky_brelu(net, self.conv_leaky_ratio[layer_id], self.layer_low_bound, self.output_up_bound) # Nonlinear act
     net = ne.leaky_relu(net, self.in_leaky_ratio)
     # convolution
     net = ne.conv2d_transpose(net,
                               filters=curr_filter,
                               biases=curr_bias,
                               strides=self.in_stride,
                               padding=self.in_padding)
     #net = ne.max_pool_2x2(net) # Pooling
     net = tf.identity(net, name='in_output')
     #import pdb; pdb.set_trace()
     return net
Exemple #5
0
 def random_noise_layer(self, inputs, random_mask):
     net = inputs
     random_noise = tf.random_normal(tf.shape(net), mean=self.mean, stddev=self.stddev)
     if random_mask != None:
         random_noise = tf.multiply(random_mask, random_noise)
     net += random_noise
     if self.use_norm == "BATCH":
         net = ne.batch_norm(net, self.is_training)
     elif self.use_norm == "LAYER":
         net = ne.layer_norm(net, self.is_training)
     elif self.use_norm == "INSTA":
         net = ne.instance_norm(net, self.is_training)
     net = tf.identity(net, name='rand_output')
     return net