def res_blocks(self, inputs, W_name, b_name, scope): with tf.variable_scope(scope): net = inputs for res_id in range(self.num_res_block): res_net = net for layer_id in range(self.res_block_size): filter_name = "{}{}_{}".format(W_name, res_id, layer_id) bias_name = "{}{}_{}".format(b_name, res_id, layer_id) curr_filter = self.res_filters[filter_name] curr_bias = self.res_biases[bias_name] # convolution net = ne.conv2d_transpose( net, filters=curr_filter, biases=curr_bias, strides=self.res_strides[layer_id], padding=self.res_padding[layer_id]) if self.use_norm == "BATCH": net = ne.batch_norm(net, self.is_training) elif self.use_norm == "LAYER": net = ne.layer_norm(net, self.is_training) elif self.use_norm == "INSTA": net = ne.instance_norm(net, self.is_training) net = ne.leaky_relu(net, self.res_leaky_ratio[layer_id]) #net = ne.leaky_brelu(net, self.res_leaky_ratio[layer_id], self.layer_low_bound, self.output_up_bound) # Nonlinear act net = ne.drop_out(net, self.res_drop_rate[layer_id], self.is_training) net += res_net net = tf.identity(net, name='res_output') #import pdb; pdb.set_trace() return net
def _form_groups(net, start_layer, end_layer): for layer_id in range(start_layer, end_layer): #res blocks W_res_name = "W_g{}_res".format(layer_id) b_res_name = "b_g{}_res".format(layer_id) net = self.res_blocks(net, W_res_name, b_res_name, scope="RES_{}".format(layer_id)) # decv filter_name = "{}{}".format(W_name, layer_id) bias_name = "{}{}".format(b_name, layer_id) curr_filter = self.decv_filters[filter_name] curr_bias = self.decv_biases[bias_name] # de-convolution net = ne.conv2d_transpose(net, filters=curr_filter, biases=curr_bias, strides=self.decv_strides[layer_id], padding=self.decv_padding[layer_id]) # batch normalization if self.use_norm == "BATCH": net = ne.batch_norm(net, self.is_training) elif self.use_norm == "LAYER": net = ne.layer_norm(net, self.is_training) net = ne.leaky_relu(net, self.decv_leaky_ratio[layer_id]) net = ne.drop_out(net, self.decv_drop_rate[layer_id], self.is_training) return net
def _form_groups(net, start_layer, end_layer): for layer_id in range(start_layer, end_layer): filter_name = "{}{}".format(W_name, layer_id) bias_name = "{}{}".format(b_name, layer_id) curr_filter = self.conv_filters[filter_name] curr_bias = self.conv_biases[bias_name] # convolution net = ne.conv2d(net, filters=curr_filter, biases=curr_bias, strides=self.conv_strides[layer_id], padding=self.conv_padding[layer_id]) conv_net = net # batch normalization if self.use_norm == "BATCH": net = ne.batch_norm(net, self.is_training) elif self.use_norm == "LAYER": net = ne.layer_norm(net, self.is_training) elif self.use_norm == "INSTA": net = ne.instance_norm(net, self.is_training) #net = ne.leaky_brelu(net, self.conv_leaky_ratio[layer_id], self.layer_low_bound, self.output_up_bound) # Nonlinear act net = ne.leaky_relu(net, self.conv_leaky_ratio[layer_id]) net = ne.drop_out(net, self.conv_drop_rate[layer_id], self.is_training) # residual for conv if conv_residual: net += conv_net # res blocks if self.num_res_block != 0: W_res_name = "W_g{}_res".format(layer_id) b_res_name = "b_g{}_res".format(layer_id) net = self.res_blocks(net, W_res_name, b_res_name, scope="RES_{}".format(layer_id)) return net
def _form_groups(net, start_layer, end_layer): for layer_id in range(start_layer, end_layer): #res blocks W_res_name = "W_g{}_res".format(layer_id) b_res_name = "b_g{}_res".format(layer_id) net = self.res_blocks(net, W_res_name, b_res_name, scope="RES_{}".format(layer_id)) # decv filter_name = "{}{}".format(W_name, layer_id) bias_name = "{}{}".format(b_name, layer_id) curr_filter = self.decv_filters[filter_name] curr_bias = self.decv_biases[bias_name] # de-convolution net = ne.conv2d_transpose(net, filters=curr_filter, biases=curr_bias, strides=self.decv_strides[layer_id], padding=self.decv_padding[layer_id]) # batch normalization if self.use_norm == "BATCH": net = ne.batch_norm(net, self.is_training) elif self.use_norm == "LAYER": net = ne.layer_norm(net, self.is_training) elif self.use_norm == "INSTA": net = ne.instance_norm(net, self.is_training) if layer_id != end_layer - 1: net = ne.leaky_relu(net, self.decv_leaky_ratio[layer_id]) net = ne.drop_out(net, self.decv_drop_rate[layer_id], self.is_training) if layer_id == self.num_decv - 2: # mask if FLAGS.USE_LABEL_MASK: w = net.get_shape().as_list()[1] h = net.get_shape().as_list()[2] c = net.get_shape().as_list()[3] net = tf.reshape(net, [-1, w * h, c]) net = tf.matmul(net, mask_states) net = tf.reshape(net, [-1, w, h, c]) if self.use_norm == "BATCH": net = ne.batch_norm(net, self.is_training) elif self.use_norm == "LAYER": net = ne.layer_norm(net, self.is_training) elif self.use_norm == "INSTA": net = ne.instance_norm(net, self.is_training) #import pdb; pdb.set_trace() return net
def _fc_layers(self, inputs, weights_dict, biases_dict, fc_leaky_ratio, fc_drop_rate, num_fc, W_name, b_name): net = inputs for layer_id in range(num_fc): weight_name = "{}{}".format(W_name, layer_id) bias_name = "{}{}".format(b_name, layer_id) curr_weight = weights_dict[weight_name] curr_bias = biases_dict[bias_name] net = ne.fully_conn(net, weights=curr_weight, biases=curr_bias) # batch normalization if self.use_norm == "BATCH": net = ne.batch_norm(net, self.is_training, axis=-1) #net = ne.leaky_brelu(net, self.enfc_leaky_ratio[layer_id], self.enfc_low_bound[layer_id], self.enfc_up_bound[layer_id]) # Nonlinear act net = ne.leaky_relu(net, fc_leaky_ratio[layer_id]) net = ne.drop_out(net, fc_drop_rate[layer_id], self.is_training) #net = ne.elu(net) net = tf.identity(net, name='output') return net
def defc_layers(self, inputs, W_name="W_defc", b_name="b_defc"): net = inputs for layer_id in range(self.num_enfc): weight_name = "{}{}".format(W_name, layer_id) bias_name = "{}{}".format(b_name, layer_id) curr_weight = self.defc_weights[weight_name] curr_bias = self.defc_biases[bias_name] net = ne.fully_conn(net, weights=curr_weight, biases=curr_bias) # batch normalization if self.use_batch_norm: net = ne.batch_norm(net, self.is_training, axis=1) #net = ne.leaky_brelu(net, self.defc_leaky_ratio[layer_id], self.layer_low_bound, self.layer_up_bound) # Nonlinear act net = ne.leaky_relu(net, self.defc_leaky_ratio[layer_id]) net = ne.drop_out(net, self.defc_drop_rate[layer_id], self.is_training) #net = ne.elu(net) net = tf.identity(net, name='output') net = tf.reshape(net, [-1] + self.decv_in_shape) return net
def enfc_layers(self, inputs, W_name="W_enfc", b_name="b_enfc"): net = tf.reshape(inputs, [ -1, self.conv_out_shape[0] * self.conv_out_shape[1] * self.conv_out_shape[2] ]) for layer_id in range(self.num_enfc): weight_name = "{}{}".format(W_name, layer_id) bias_name = "{}{}".format(b_name, layer_id) curr_weight = self.enfc_weights[weight_name] curr_bias = self.enfc_biases[bias_name] net = ne.fully_conn(net, weights=curr_weight, biases=curr_bias) # batch normalization if self.use_norm == "BATCH": net = ne.batch_norm(net, self.is_training, axis=1) elif self.use_norm == "LAYER": net = ne.layer_norm(net, self.is_training) #net = ne.leaky_brelu(net, self.enfc_leaky_ratio[layer_id], self.enfc_low_bound[layer_id], self.enfc_up_bound[layer_id]) # Nonlinear act net = ne.leaky_relu(net, self.enfc_leaky_ratio[layer_id]) net = ne.drop_out(net, self.enfc_drop_rate[layer_id], self.is_training) #net = ne.elu(net) net = tf.identity(net, name='output') return net