コード例 #1
0
def inv_bias_act(x,
                 act='linear',
                 alpha=0.2,
                 gain=None,
                 lrmul=1,
                 bias_var='bias',
                 reverse=False):
    assert act in ['linear', 'lrelu']
    x = tf.transpose(x, [0, 3, 1, 2])
    b = tf.get_variable(bias_var,
                        shape=[x.shape[1]],
                        initializer=tf.initializers.zeros()) * lrmul
    if reverse:
        if act == 'lrelu':
            if gain is None:
                gain = np.sqrt(2)
            x = x / gain
            mask = tf.cast(x < 0, x.dtype) * (1.0 / alpha - 1.0) + 1.0
            x = x * mask
        x = fused_bias_act(x, -b)
        return tf.transpose(x, [0, 2, 3, 1])
    return tf.transpose(
        fused_bias_act(x,
                       b=tf.cast(b, x.dtype),
                       act=act,
                       alpha=alpha,
                       gain=gain), [0, 2, 3, 1])
コード例 #2
0
def apply_bias_act(x, act="linear", alpha=None, gain=None, lrmul=1, bias_var="bias"):
    b = (
        tf.get_variable(
            bias_var, shape=[x.shape[1]], initializer=tf.initializers.zeros()
        )
        * lrmul
    )
    return fused_bias_act(x, b=tf.cast(b, x.dtype), act=act, alpha=alpha, gain=gain)
コード例 #3
0
ファイル: networks.py プロジェクト: Redmancometh/nsgan
def apply_bias_act(x,
                   act='linear',
                   gain=None,
                   lrmul=1,
                   clamp=None,
                   bias_var='bias',
                   trainable=True):
    b = tf.get_variable(bias_var,
                        shape=[x.shape[1]],
                        initializer=tf.initializers.zeros(),
                        trainable=trainable) * lrmul
    return fused_bias_act(x,
                          b=tf.cast(b, x.dtype),
                          act=act,
                          gain=gain,
                          clamp=clamp)
コード例 #4
0
def apply_bias_act(x,
                   act='linear',
                   alpha=None,
                   gain=None,
                   lrmul=1,
                   bias_var='bias',
                   impl='cuda'):
    b = tf.get_variable(bias_var,
                        shape=[x.shape[1]],
                        initializer=tf.initializers.zeros()) * lrmul
    return fused_bias_act(x,
                          b=tf.cast(b, x.dtype),
                          act=act,
                          alpha=alpha,
                          gain=gain,
                          impl=impl)