Exemple #1
0
def gate_phase_hirose(z, scope='', reuse=None):
    '''
    Hirose inspired gate activation filtering according to
    phase angle.
    '''
    with tf.variable_scope('phase_hirose_' + scope, reuse=reuse):
        m = tf.get_variable('m', [], tf.float32,
                            initializer=urnd_init(0.9, 1.1))
        a = tf.get_variable('a', [], tf.float32,
                            initializer=urnd_init(1.9, 2.1))
        b = tf.get_variable('b', [], tf.float32, urnd_init(3.9, 4.1))
        modulus = tf.sqrt(tf.real(z)**2 + tf.imag(z)**2)
        phase = tf.atan2(tf.imag(z), tf.real(z))
        gate = tf.tanh(modulus/(m*m)) * tf.nn.sigmoid(a*phase + b)
        return tf.complex(gate, tf.zeros_like(gate))
Exemple #2
0
def mod_relu(z, scope='', reuse=None):
    """
        Implementation of the modRelu from Arjovski et al.
        f(z) = relu(|z| + b)(z / |z|) or
        f(r,theta) = relu(r + b)e^(i*theta)
        b is initialized to zero, this leads to a network, which
        is linear during early optimization.
    Input:
        z: complex input.
        b: 'dead' zone radius.
    Returns:
        z_out: complex output.

    Reference:
         Arjovsky et al. Unitary Evolution Recurrent Neural Networks
         https://arxiv.org/abs/1511.06464
    """
    with tf.variable_scope('mod_relu' + scope, reuse=reuse):
        b = tf.get_variable('b', [], dtype=tf.float32,
                            initializer=urnd_init(-0.01, 0.01))
        modulus = tf.sqrt(tf.real(z)**2 + tf.imag(z)**2)
        rescale = tf.nn.relu(modulus + b) / (modulus + 1e-6)
        # return tf.complex(rescale * tf.real(z),
        #                   rescale * tf.imag(z))
        rescale = tf.complex(rescale, tf.zeros_like(rescale))
        return tf.multiply(rescale, z)
Exemple #3
0
def hirose(z, scope='', reuse=None):
    """
    Compute the non-linearity proposed by Hirose.
    """
    with tf.variable_scope('hirose' + scope, reuse=reuse):
        m = tf.get_variable('m', [], tf.float32,
                            initializer=urnd_init(0.9, 1.1))
        modulus = tf.sqrt(tf.real(z)**2 + tf.imag(z)**2)
        # use m*m to enforce positive m.
        rescale = tf.complex(tf.nn.tanh(modulus/(m*m))/modulus,
                             tf.zeros_like(modulus))
        return tf.multiply(rescale, z)
Exemple #4
0
def hirose(z, scope='', reuse=None):
    """
    Compute the non-linearity proposed by Hirose.
    See for example:
    Complex Valued nonlinear Adaptive Filters
    Mandic and Su Lee Goh
    Chapter 4.3.1 (Amplitude-Phase split complex approach)
    """
    with tf.variable_scope('hirose' + scope, reuse=reuse):
        m = tf.get_variable('m', [],
                            tf.float32,
                            initializer=urnd_init(0.9, 1.1))
        modulus = tf.sqrt(tf.real(z)**2 + tf.imag(z)**2)
        # use m*m to enforce positive m.
        rescale = tf.complex(
            tf.nn.tanh(modulus / (m * m)) / modulus, tf.zeros_like(modulus))
        return tf.multiply(rescale, z)
Exemple #5
0
def diag_mul(h, state_size, no, reuse):
    """
    Multiplication with a diagonal matrix.
    Input:
        h: hidden state_vector.
        state_size: The RNN state size.
        reuse: True if graph variables should be reused.
    Returns:
        R*h
    """
    with tf.variable_scope("diag_phis_" + str(no), reuse=reuse):
        omega = tf.get_variable('vr', shape=[state_size], dtype=tf.float32,
                                initializer=urnd_init(-np.pi, np.pi))
        dr = tf.cos(omega)
        di = tf.sin(omega)

    with tf.variable_scope("diag_mul_" + str(no)):
        D = tf.diag(tf.complex(dr, di))
        return tf.matmul(h, D)