def __call__(self, s_signals, s_dropout_keep=1.): with tf.variable_scope(self.name): s_mid = ops.lyr_linear( 'linear0', s_signals, hparams.FFT_SIZE*2, axis=-1) s_mid = ops.relu(s_mid, hparams.RELU_LEAKAGE) s_out = ops.lyr_linear( 'linear1', s_mid, hparams.FEATURE_SIZE * hparams.EMBED_SIZE, axis=-1) s_out = tf.reshape( s_out, [hparams.BATCH_SIZE, -1, hparams.FEATURE_SIZE, hparams.EMBED_SIZE]) return s_out
def __call__(self, s_signals, s_dropout_keep=1.): with tf.variable_scope(self.name): s_signals = s_signals - tf.reduce_mean( s_signals, axis=(1,2), keep_dims=True) hdim = 300 init_range = .75 / sqrt(hdim) w_initer = tf.random_uniform_initializer( -init_range, init_range, dtype=hparams.FLOATX) b_init_value = np.zeros([hdim*4], dtype=hparams.FLOATX) b_init_value[hdim*1:hdim*2] = 1.5 # input gate b_init_value[hdim*2:hdim*3] = -1. # forget gate b_init_value[hdim*3:hdim*4] = 1. # output gate b_initer = tf.constant_initializer(b_init_value, dtype=hparams.FLOATX) s_mid0 = _lyr_bilstm( 'lstm0', self.model, s_signals, hdim, -2, -1, w_initer, b_initer, s_dropout_keep) s_mid1 = _lyr_bilstm( 'lstm1', self.model, s_mid0, hdim, -2, -1, w_initer, b_initer, s_dropout_keep) s_mid2 = _lyr_bilstm( 'lstm2', self.model, s_mid1, hdim, -2, -1, w_initer, b_initer, s_dropout_keep) s_out = _lyr_bilstm( 'lstm3', self.model, s_mid2, hdim, -2, -1, w_initer, b_initer, s_dropout_keep) s_out = s_out - tf.reduce_mean( s_out, axis=(1,2), keep_dims=True) # init_range = 2. / sqrt(300) init_range = 1.85 s_out = ops.lyr_linear( 'output', s_out, hparams.FEATURE_SIZE * hparams.EMBED_SIZE, w_init=tf.random_uniform_initializer( -init_range, init_range, dtype=hparams.FLOATX), bias=None) s_out = tf.reshape( s_out, [ hparams.BATCH_SIZE, -1, hparams.FEATURE_SIZE, hparams.EMBED_SIZE]) return s_out