def batch_norm(x, name, is_training=True): decay_rate = 0.99 shape = x.get_shape().as_list() dim = shape[-1] if len(shape) == 2: mean, var = tf.nn.moments(x, [0], name='moments_bn_{}'.format(name)) elif len(shape) == 4: mean, var = tf.nn.moments(x, [0, 1, 2], name='moments_bn_{}'.format(name)) avg_mean = get_biases('avg_mean_bn_{}'.format(name), [1, dim], 0.0, False) avg_var = get_biases('avg_var_bn_{}'.format(name), [1, dim], 1.0, False) beta = get_biases('beta_bn_{}'.format(name), [1, dim], 0.0) gamma = get_biases('gamma_bn_{}'.format(name), [1, dim], 1.0) if is_training: avg_mean_assign_op = tf.assign( avg_mean, decay_rate * avg_mean + (1 - decay_rate) * mean) avg_var_assign_op = tf.assign( avg_var, decay_rate * avg_var + (1 - decay_rate) * var) with tf.control_dependencies([avg_mean_assign_op, avg_var_assign_op]): ret = gamma * (x - mean) / tf.sqrt(1e-6 + var) + beta else: ret = gamma * (x - avg_mean) / tf.sqrt(1e-6 + avg_var) + beta return ret
def set_model(self, figs, is_training, reuse=False): # return only logits h = figs # convolution with tf.variable_scope(self.name_scope_conv, reuse=reuse): for i, (in_chan, out_chan) in enumerate( zip(self.layer_chanels, self.layer_chanels[1:])): if i == 0: conved = conv_layer(inputs=h, out_num=out_chan, filter_width=5, filter_hight=5, stride=1, l_id=i) h = tf.nn.relu(conved) #h = lrelu(conved) else: conved = conv_layer(inputs=h, out_num=out_chan, filter_width=5, filter_hight=5, stride=2, l_id=i) bn_conved = batch_norm(conved, i, is_training) h = tf.nn.relu(bn_conved) #h = lrelu(bn_conved) feature_image = h # full connect dim = get_dim(h) h = tf.reshape(h, [-1, dim]) with tf.variable_scope(self.name_scope_fc, reuse=reuse): weights = get_weights('fc', [dim, self.fc_dim], 0.02) biases = get_biases('fc', [self.fc_dim], 0.0) h = tf.matmul(h, weights) + biases h = batch_norm(h, 'fc', is_training) h = tf.nn.relu(h) weights = get_weights('fc2', [self.fc_dim, 1], 0.02) biases = get_biases('fc2', [1], 0.0) h = tf.matmul(h, weights) + biases return h, feature_image
def set_model(self, z, batch_size, is_training, reuse = False): # reshape z with tf.variable_scope(self.name_scope_reshape, reuse = reuse): w_r = get_weights('_r', [self.z_dim, self.in_dim * self.in_dim * self.layer_chanels[0]], 0.02) b_r = get_biases('_r', [self.in_dim * self.in_dim * self.layer_chanels[0]], 0.0) h = tf.matmul(z, w_r) + b_r h = batch_norm(h, 'reshape', is_training) #h = tf.nn.relu(h) h = lrelu(h) h = tf.reshape(h, [-1, self.in_dim, self.in_dim, self.layer_chanels[0]]) # deconvolution layer_num = len(self.layer_chanels) - 1 with tf.variable_scope(self.name_scope_deconv, reuse = reuse): for i, (in_chan, out_chan) in enumerate(zip(self.layer_chanels, self.layer_chanels[1:])): deconved = deconv_layer(inputs = h, out_shape = [batch_size, self.in_dim * 2 ** (i + 1), self.in_dim * 2 **(i + 1), out_chan], filter_width = 5, filter_hight = 5, stride = 2, l_id = i) if i == layer_num -1: h = tf.nn.tanh(deconved) else: bn_deconved = batch_norm(deconved, i, is_training) #h = tf.nn.relu(bn_deconved) h = lrelu(bn_deconved) return h
def set_model(self, figs, is_training, reuse=False): u''' return only logits. not sigmoid(logits). ''' h = figs # convolution with tf.variable_scope(self.name_scope_conv, reuse=reuse): for i, (in_chan, out_chan) in enumerate( zip(self.layer_chanels, self.layer_chanels[1:])): conved = conv_layer(inputs=h, out_num=out_chan, filter_width=5, filter_hight=5, stride=2, l_id=i) if i == 0: h = tf.nn.relu(conved) #h = lrelu(conved) else: bn_conved = batch_norm(conved, i, is_training) h = tf.nn.relu(bn_conved) #h = lrelu(bn_conved) # full connect dim = get_dim(h) h = tf.reshape(h, [-1, dim]) with tf.variable_scope(self.name_scope_fc, reuse=reuse): weights = get_weights('fc', [dim, self.fc_dim], 0.02) biases = get_biases('fc', [self.fc_dim], 0.0) h = tf.matmul(h, weights) + biases h = batch_norm(h, 'en_fc_bn', is_training) h = tf.nn.relu(h) weights = get_weights('mu', [self.fc_dim, self.z_dim], 0.02) biases = get_biases('mu', [self.z_dim], 0.0) mu = tf.matmul(h, weights) + biases weights = get_weights('sigma', [self.fc_dim, self.z_dim], 0.02) biases = get_biases('sigma', [self.z_dim], 0.0) log_sigma = tf.matmul(h, weights) + biases return mu, log_sigma