def minibatch_stddev_layer(x, group_size=4): with tf.compat.v1.variable_scope('MinibatchStd'): group_size = tf.minimum(group_size, tf.shape(x)[0]) sz1 = tf.shape(x)[1] sz2 = tf.shape(x)[2] sz3 = tf.shape(x)[3] y = tf.reshape(x, [group_size, -1, sz1, sz2, sz3]) y = tf.cast(y, tf.float32) y -= tf.reduce_mean(y, axis=0, keepdims=True) y = tf.reduce_mean(tf.square(y), axis=0) y = tf.sqrt(y + 1e-8) y = tf.reduce_mean(y, axis=[1, 2, 3], keepdims=True) y = tf.cast(y, x.dtype) y = tf.tile(y, [group_size, sz1, sz2, 1]) return tf.concat([x, y], axis=3)
def conv2d_downscale2d(x, fmaps, kernel, name): assert kernel >= 1 and kernel % 2 == 1 with tf.compat.v1.variable_scope(name): w = get_weight([kernel, kernel, x.shape[3].value, fmaps]) w = tf.pad(w, [[1, 1], [1, 1], [0, 0], [0, 0]], mode="CONSTANT") w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25 w = tf.cast(w, x.dtype) return tf.nn.conv2d(x, w, strides=[1, 2, 2, 1], padding="SAME")
def apply_bias(x): """ Apply bias """ b = tf.compat.v1.get_variable('bias', shape=[x.shape[-1]], initializer=tf.keras.initializers.zeros()) b = tf.cast(b, x.dtype) if len(x.shape) == 2: return x + b else: return x + tf.reshape(b, [1, 1, 1, -1])
def conv_base(x, fmaps, kernel_size, stride, name, gain=np.sqrt(2), activation_fn=None, normalizer_fn=None, transpose=False, padding='SAME'): """ Convolutional layer base. """ assert (isinstance(name, str)) with tf.compat.v1.variable_scope(name): strides = [1, stride, stride, 1] if not transpose: w = get_weight([kernel_size, kernel_size, x.shape[3].value, fmaps], gain=gain) w = tf.cast(w, x.dtype) out = tf.nn.conv2d(x, filter=w, strides=strides, padding=padding) else: sz0 = tf.shape(x)[0] sz1 = tf.shape(x)[1] sz2 = tf.shape(x)[2] output_shape = [sz0, stride * sz1, stride * sz2, fmaps] w = get_weight([kernel_size, kernel_size, fmaps, x.shape[3].value], gain=gain) w = tf.cast(w, x.dtype) out = tf.nn.conv2d_transpose(x, filter=w, output_shape=output_shape, strides=strides, padding=padding) out = apply_bias(out) if activation_fn is not None: out = activation_fn(out) if normalizer_fn is not None: out = normalizer_fn(out) return out
def nice_preview(x): """ Beautiful previews Keep only first 3 bands --> RGB """ x = x[:, :, :, :3] axis = [0, 1, 2] stds = tf.math.reduce_std(x, axis=axis, keepdims=True) means = tf.math.reduce_mean(x, axis=axis, keepdims=True) mins = means - 2 * stds maxs = means + 2 * stds x = tf.divide(x - mins, maxs - mins) x = tf.clip_by_value(x, clip_value_min=0, clip_value_max=1) return tf.cast(255 * x, tf.uint8)
def upscale2d_conv2d(x, fmaps, kernel, name): assert kernel >= 1 and kernel % 2 == 1 with tf.compat.v1.variable_scope(name): w = get_weight([kernel, kernel, x.shape[3].value, fmaps]) w = tf.transpose(w, [0, 1, 3, 2]) w = tf.pad(w, [[1, 1], [1, 1], [0, 0], [0, 0]], mode="CONSTANT") w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) w = tf.cast(w, x.dtype) sz0 = tf.shape(x)[0] sz1 = tf.shape(x)[1] sz2 = tf.shape(x)[2] output_shape = [sz0, 2 * sz1, 2 * sz2, fmaps] return tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=[1, 2, 2, 1], padding="SAME")