コード例 #1
0
ファイル: tf_wpe.py プロジェクト: uncledickHe/nara_wpe
def get_correlations_narrow(Y, inverse_power, K, delay):
    """

    :param Y: [D, T] `Tensor`
    :param inverse_power: [T] `Tensor`
    :param K: Number of taps
    :param delay: delay
    :return:
    """
    dyn_shape = tf.shape(Y)
    T = dyn_shape[-1]
    D = dyn_shape[0]

    # TODO: Large gains also expected when precalculating Psi.
    # TODO: Small gains expected, when views are pre-calculated in main.
    # TODO: Larger gains expected with scipy.signal.signaltools.fftconvolve().
    # Code without fft will be easier to port to Chainer.
    # Shape (D, T - K + 1, K)
    Psi = tf_signal.frame(Y, K, 1, axis=-1)[:, :T - delay - K + 1, ::-1]
    Psi_conj_norm = (
        tf.cast(inverse_power[None, delay + K - 1:, None], Psi.dtype)
        * tf.conj(Psi)
    )

    correlation_matrix = tf.einsum('dtk,etl->kdle', Psi_conj_norm, Psi)
    correlation_vector = tf.einsum(
        'dtk,et->ked', Psi_conj_norm, Y[:, delay + K - 1:]
    )

    correlation_matrix = tf.reshape(correlation_matrix, (K * D, K * D))
    return correlation_matrix, correlation_vector
コード例 #2
0
ファイル: tf_wpe.py プロジェクト: suwoncjh/nara_wpe
def get_correlations(Y, inverse_power, taps, delay):
    """Calculates weighted correlations of a window of length taps

    Args:
        Y (tf.Ttensor): Complex-valued STFT signal with shape (F, D, T)
        inverse_power (tf.Tensor): Weighting factor with shape (F, T)
        taps (int): Lenghts of correlation window
        delay (int): Delay for the weighting factor

    Returns:
        tf.Tensor: Correlation matrix of shape (F, taps*D, taps*D)
        tf.Tensor: Correlation vector of shape (F, taps*D)
    """
    dyn_shape = tf.shape(Y)
    F = dyn_shape[0]
    D = dyn_shape[1]
    T = dyn_shape[2]

    Psi = tf_signal.frame(Y, taps, 1, axis=-1)[..., :T - delay - taps + 1, ::-1]
    Psi_conj_norm = (
        tf.cast(inverse_power[:, None, delay + taps - 1:, None], Psi.dtype)
        * tf.conj(Psi)
    )

    correlation_matrix = tf.einsum('fdtk,fetl->fkdle', Psi_conj_norm, Psi)
    correlation_vector = tf.einsum(
        'fdtk,fet->fked', Psi_conj_norm, Y[..., delay + taps - 1:]
    )

    correlation_matrix = tf.reshape(correlation_matrix, (F, taps * D, taps * D))
    return correlation_matrix, correlation_vector
コード例 #3
0
def main(_):
  with tf.Graph().as_default():
    n_mel_bins = 128
    lower_edge_hertz = 0.0
    upper_edge_hertz = 11025.0
    sample_rate = 22050.0
  
    sfr_length = tf.constant(1024, name="sfr_length")
    sfr_step = tf.constant(512, name="sfr_step")
  
    file_pattern = tf.placeholder(tf.string, shape=(), name="file_pattern")
    fr_length = tf.placeholder(tf.int32, shape=(), name="fr_length")
    fr_step = tf.placeholder(tf.int32, shape=(), name="fr_step")
    attenuation = tf.placeholder(tf.float32, shape=(), name="attn")
    n_mfcc = tf.placeholder(tf.int32, shape=(), name="n_mfcc")
    calculate_mean = tf.placeholder(tf.bool, shape=(), name="cal_mean")
    calculate_variance = tf.placeholder(
      tf.bool, shape=(), name="cal_variance")
    p_deviation = tf.placeholder(tf.float32, shape=(), name="p_deviation")
    
    initializer, fn, dc = aim.read_audio(file_pattern)
    gt = aim.read_ground_truth_labels(fn, fr_length, sample_rate)
    frames = contrib_signal.frame(dc, fr_length, fr_step, name="frame_audio")
    nf = aim.normalize_audio(frames, attenuation)
    features, ids = aim.extract_features(nf,
                                    sample_rate,
                                    lower_edge_hertz,
                                    upper_edge_hertz,
                                    sfr_length,
                                    sfr_step,
                                    n_mel_bins,
                                    n_mfcc,
                                    p_deviation)
    rf, rids = aim.reduce_features(
      features, ids, calculate_mean, calculate_variance)
    nf = aim.normalize_features(rf)
    with tf.Session() as sess:
      file_writer = tf.summary.FileWriter('log', sess.graph)
      parameters = {
        file_pattern: "*.wav",
        fr_length: 11025,
        fr_step: 11025,
        attenuation: 24.0,
        n_mfcc: 20,
        calculate_mean: True,
        calculate_variance: True,
        p_deviation: 2.0
      }
      sess.run(initializer, parameters)
      while True:
        try:
          rid, f, n, g = sess.run([rids, fn, nf, gt], parameters)
          print(f.decode())
          mi = fs.mutual_info_classif(n, g)
          mi_with_ids = np.transpose(np.concatenate(([rid], [mi]), axis=0))
          print(mi_with_ids)
        except tf.errors.OutOfRangeError:
          break
コード例 #4
0
def frame_op(decoded_audio):
    fr_length = tf.placeholder(tf.int32, name='fr_length')
    fr_step = tf.placeholder(tf.int32, name='fr_step')
    with tf.name_scope('frame_audio') as scope:
        frames = contrib_signal.frame(decoded_audio.audio,
                                      fr_length,
                                      fr_step,
                                      axis=0)
        frames_flat = tf.layers.flatten(frames)
    return (fr_length, fr_step, frames_flat)
コード例 #5
0
ファイル: tf_wpe.py プロジェクト: uncledickHe/nara_wpe
def get_correlations(
        Y, inverse_power, K, delay, mask_logits=None,
        mask_type=MASK_TYPES.DIRECT
):
    """

    :param Y: [F, D, T] `Tensor`
    :param inverse_power: [F, T] `Tensor`
    :param K: Number of taps
    :param delay: delay
    :return:
    """
    dyn_shape = tf.shape(Y)
    F = dyn_shape[0]
    D = dyn_shape[1]
    T = dyn_shape[2]

    # TODO: Large gains also expected when precalculating Psi.
    # TODO: Small gains expected, when views are pre-calculated in main.
    # TODO: Larger gains expected with scipy.signal.signaltools.fftconvolve().
    # Code without fft will be easier to port to Chainer.
    # Shape (D, T - K + 1, K)
    Y = Y / tf.norm(Y, axis=(-2, -1), keep_dims=True)
    Psi = tf_signal.frame(Y, K, 1, axis=-1)[..., :T - delay - K + 1, ::-1]
    Psi_conj_norm = (
        tf.cast(inverse_power[:, None, delay + K - 1:, None], Psi.dtype)
        * tf.conj(Psi)
    )

    if mask_logits is not None:
        # Using logits instead of a 'normal' mask is numerical more stable.
        # There are a few ways to apply the mask:
        # DIRECT: Mask is limited to values between 0 and 1
        # RATIO: Mask values are positive and unlimited
        # *_INV: Use 1-Mask to mask only the reverberation (may be easier
        # to interpret)
        logits = tf.cast(mask_logits[:, None, delay + K - 1:, None], Y.dtype)
        if mask_type == MASK_TYPES.DIRECT or mask_type == MASK_TYPES.DIRECT_INV:
            scale = -1. if mask_type == MASK_TYPES.DIRECT else 1.
            Psi_conj_norm += Psi_conj_norm * tf.exp(scale * logits)
        elif mask_type == MASK_TYPES.RATIO or mask_type == MASK_TYPES.RATIO_INV:
            scale = -1. if mask_type == MASK_TYPES.DIRECT else 1.
            Psi_conj_norm *= tf.exp(scale * logits)

    correlation_matrix = tf.einsum('fdtk,fetl->fkdle', Psi_conj_norm, Psi)
    correlation_vector = tf.einsum(
        'fdtk,fet->fked', Psi_conj_norm, Y[..., delay + K - 1:]
    )

    correlation_matrix = tf.reshape(correlation_matrix, (F, K * D, K * D))
    return correlation_matrix, correlation_vector
コード例 #6
0
ファイル: aim_ops.py プロジェクト: simplelife2010/aim_python
def zero_crossing_rate(frames, sfr_length=1024, sfr_step=512, name=None):
    """Calculate the zero crossing rate for a batch of audio signals.
  
  Args:
    frames: A `Tensor` of shape `[frames, samples]`.
    sfr_length: An integer scalar `Tensor`. The subframe length in samples.
    sfr_step: An integer scalar `Tensor`. The number of samples to step.
    name: `string`, name of the operation.
    
  Returns:
    A `Tensor` with shape `[frames, 1]` containing the zero crossing rate.
    A `Tensor` with shape `[1]` containing the feature id.
  """
    with tf.name_scope(name, "zero_crossing_rate"):
        subframes = contrib_signal.frame(frames, sfr_length, sfr_step)
        sign = tf.sign(subframes)
        diff = tf.divide(tf.abs(tf.subtract(sign[:, :, 1:], sign[:, :, :-1])),
                         2.0)
        n_zc = tf.reduce_sum(diff, axis=-1, keepdims=True)
        zcr = tf.divide(n_zc, tf.to_float(sfr_length))
        return ([tf.constant("zero_crossing_rate")], zcr)
コード例 #7
0
ファイル: VAD_LSTM_2.py プロジェクト: zsk843/vad_modified
    def inference(self, inputs, keep_prob, is_training=True, reuse=None):
        # initialization
        # c1_out = affine_transform(inputs, num_hidden_1, name="hidden_1")
        # inputs_shape = inputs.get_shape().as_list()
        with tf.variable_scope(scope_name):
            # print(inputs.get_shape().as_list())
            # in_rnn = rnn_in(inputs, seq_size, target_delay)
            in_rnn = signal.frame(inputs, seq_size+2*target_delay, seq_size, axis=0,)

            # in_rnn = tf.reshape(inputs,[-1, seq_size+target_delay, num_features])
            stacked_rnn = []
            for iiLyr in range(num_layers):
                stacked_rnn.append(tf.nn.rnn_cell.LSTMCell(num_units=lstm_cell_size, state_is_tuple=True))
            MultiLyr_cell = tf.nn.rnn_cell.MultiRNNCell(cells=stacked_rnn, state_is_tuple=True)

            outputs, _state = tf.nn.dynamic_rnn(MultiLyr_cell, in_rnn, time_major=False, dtype=tf.float32)
            outputs = tf.reshape(outputs[:, target_delay-1:seq_size+target_delay-1, :], [-1, lstm_cell_size])

            outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)

            logits = affine_transform(outputs, bdnn_outputsize, name="output1")
            logits = tf.reshape(logits, [-1, int(bdnn_outputsize)])

        return logits
コード例 #8
0
ファイル: tf_wpe.py プロジェクト: suwoncjh/nara_wpe
def block_wpe_step(
        Y, inverse_power, taps=10, delay=3, mode='inv',
        block_length_in_seconds=2., forgetting_factor=0.7,
        fft_shift=256, sampling_rate=16000):
    """Applies wpe in a block-wise fashion.

    Args:
        Y (tf.Tensor): Complex valued STFT signal with shape (F, D, T)
        inverse_power (tf.Tensor): Power signal with shape (F, T)
        taps (int, optional): Defaults to 10.
        delay (int, optional): Defaults to 3.
        mode (str, optional): Specifies how R^-1@r is calculate:
            "inv" calculates the inverse of R directly and then uses matmul
            "solve" solves Rx=r for x
        block_length_in_seconds (float, optional): Length of each block in 
            seconds
        forgetting_factor (float, optional): Forgetting factor for the signal
            statistics between the blocks
        fft_shift (int, optional): Shift used for the STFT.
        sampling_rate (int, optional): Sampling rate of the observed signal.
    """
    frames_per_block = block_length_in_seconds * sampling_rate // fft_shift
    frames_per_block = tf.cast(frames_per_block, tf.int32)
    framed_Y = tf_signal.frame(
        Y, frames_per_block, frames_per_block, pad_end=True)
    framed_inverse_power = tf_signal.frame(
        inverse_power, frames_per_block, frames_per_block, pad_end=True)
    num_blocks = tf.shape(framed_Y)[-2]

    enhanced_arr = tf.TensorArray(
        framed_Y.dtype, size=num_blocks, clear_after_read=True)
    start_block = tf.constant(0)
    correlation_matrix, correlation_vector = get_correlations(
        framed_Y[..., start_block, :], framed_inverse_power[..., start_block, :],
        taps, delay
    )
    num_bins = Y.shape[0]
    num_channels = Y.shape[1].value
    if num_channels is None:
        num_channels = tf.shape(Y)[1]
    num_frames = tf.shape(Y)[-1]

    def cond(k, *_):
        return k < num_blocks

    with tf.name_scope('block_WPE'):
        def block_step(
                k, enhanced, correlation_matrix_tm1, correlation_vector_tm1):

            def _init_step():
                return correlation_matrix_tm1, correlation_vector_tm1

            def _update_step():
                correlation_matrix, correlation_vector = get_correlations(
                    framed_Y[..., k, :], framed_inverse_power[..., k, :],
                    taps, delay
                )
                return (
                    (1. - forgetting_factor) * correlation_matrix_tm1
                    + forgetting_factor * correlation_matrix,
                    (1. - forgetting_factor) * correlation_vector_tm1
                    + forgetting_factor * correlation_vector
                )

            correlation_matrix, correlation_vector = tf.case(
                ((tf.equal(k, 0), _init_step),), default=_update_step
            )

            def step(inp):
                (Y_f, inverse_power_f,
                    correlation_matrix_f, correlation_vector_f) = inp
                with tf.name_scope('filter_matrix'):
                    filter_matrix_conj = get_filter_matrix_conj(
                        Y_f,
                        correlation_matrix_f, correlation_vector_f,
                        taps, delay, mode=mode
                    )
                with tf.name_scope('apply_filter'):
                    enhanced_f = perform_filter_operation(
                        Y_f, filter_matrix_conj, taps, delay)
                return enhanced_f

            enhanced_block = tf.map_fn(
                step,
                (framed_Y[..., k, :], framed_inverse_power[..., k, :],
                 correlation_matrix, correlation_vector),
                dtype=framed_Y.dtype,
                parallel_iterations=100
            )

            enhanced = enhanced.write(k, enhanced_block)
            return k + 1, enhanced, correlation_matrix, correlation_vector

        _, enhanced_arr, _, _ = tf.while_loop(
            cond, block_step,
            (start_block, enhanced_arr, correlation_matrix, correlation_vector)
        )

        enhanced = enhanced_arr.stack()
        enhanced = tf.transpose(enhanced, (1, 2, 0, 3))
        enhanced = tf.reshape(enhanced, (num_bins, num_channels, -1))

        return enhanced[..., :num_frames]
コード例 #9
0
ファイル: eager_STFT.py プロジェクト: v0lta/Spectral-RNN
def stft(data,
         window,
         nperseg,
         noverlap,
         nfft=None,
         sides=None,
         padded=True,
         scaling='spectrum',
         boundary='zeros',
         debug=False):
    # Following:
    # https://github.com/scipy/scipy/blob/v1.1.0/scipy/signal/spectral.py#L847-L991
    # Args:
    #   data: The time domain data to be transformed [expects batch, dim, time].
    #   window: Tensorflow array of size [window_size]
    #   nperseg: The number of samples per window segment.
    #   noverlap: The number of samples overlapping.
    with tf.variable_scope("stft"):
        boundary_funcs = {'zeros': zero_ext, None: None}

        if boundary not in boundary_funcs:
            raise ValueError(
                "Unknown boundary option '{0}', must be one of: {1}".format(
                    boundary, list(boundary_funcs.keys())))

        if boundary is not None:
            ext_func = boundary_funcs[boundary]
            data = ext_func(data, nperseg // 2, axis=-1)

        nstep = nperseg - noverlap
        # do what scipy's spectral_helper does.
        if padded:
            # Pad to integer number of windowed segments
            # I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
            dim = len(data.shape)
            nadd = (-(data.shape[-1].value - nperseg) % nstep) % nperseg

            if debug:
                zeros_shape = list(data.shape[:-1]) + [nadd]
            # zeros_shape = list(data.shape[:-1]) + [nadd]
            # data = tf.concat([data, tf.zeros(zeros_shape)], axis=-1)
            zeros = np.zeros([dim, 2], dtype=np.int32)
            zeros[-1, 1] = nadd
            data = tf.pad(data, tf.constant(zeros, dtype=tf.int32))

        # do what numpy's _fft_helper does.
        if nperseg == 1 and noverlap == 0:
            result = tf.expand_dims(data, -1)
        else:
            data_shape = data.shape.as_list()
            step = nperseg - noverlap
            # do the framing
            result = tfsignal.frame(data, nperseg, step)
            # numpy framing.
            shape = data_shape[:-1] + [(data_shape[-1] - noverlap) // step,
                                       nperseg]

        # Apply window by multiplication
        assert result.shape.as_list() == shape
        result = window * result
        result = tf.spectral.rfft(result)

        if scaling == 'spectrum':
            scale = 1.0 / tf.reduce_sum(window)**2
        else:
            raise ValueError('Unknown scaling: %r' % scaling)
        scale = tf.sqrt(scale)
        result *= tf.complex(scale, tf.zeros_like(scale))
    if debug:
        zeros_shape = list(data.shape[:-1]) + [nadd]
        data_np = np.concatenate((data.numpy(), np.zeros(zeros_shape)),
                                 axis=-1)
        strides = data_np.strides[:-1] + (step * data_np.strides[-1],
                                          data_np.strides[-1])
        result_np = np.lib.stride_tricks.as_strided(data_np,
                                                    shape=shape,
                                                    strides=strides)
        result_np = window.numpy() * result_np
        result_np = np.fft.rfft(result_np)
        result_np *= scale.numpy()
        return result, result_np.astype(np.complex64)
    else:
        return result
コード例 #10
0
def generate_mel_filter_banks(signal,
                              sample_rate_hz,
                              frame_size_s=FRAME_SIZE_S,
                              frame_stride_s=FRAME_STRIDE_S,
                              window_fn=functools.partial(
                                  tf_signal.hamming_window, periodic=True),
                              fft_num_points=STFT_NUM_POINTS,
                              lower_freq_hz=0.0,
                              num_mel_bins=NUM_TRIANGULAR_FILTERS,
                              log_offset=1e-6,
                              should_log_weight=False):

    # Convert the signal to a tf tensor in case it is in an np array.
    signal = tf.convert_to_tensor(signal, dtype=tf.float32)

    # Compute the remaining parameters for this calculation.
    frame_length = int(sample_rate_hz * frame_size_s)
    frame_step = int(sample_rate_hz * frame_stride_s)

    # The upper frequency is bounded by half the sample rate by Nyquist's Law.
    upper_freq_hz = sample_rate_hz / 2.0

    # Package the signal into equally-sized, overlapping subsequences (padded with 0s if necessary).
    frames = tf_signal.frame(signal,
                             frame_length=frame_length,
                             frame_step=frame_step,
                             pad_end=True,
                             pad_value=0)

    # Apply a Short-Term Fourier Transform (STFT) to convert into the frequency domain (assuming each window has a
    # constant frequency snapshot).
    stfts = tf_signal.stft(frames,
                           frame_length=frame_length,
                           frame_step=frame_step,
                           fft_length=fft_num_points,
                           window_fn=window_fn)

    # Compute the magnitude and power of the frequencies (the magnitude spectrogram).
    magnitude_spectrograms = tf.abs(stfts)
    power_spectograms = tf.real(stfts * tf.conj(stfts))

    # Warp the linear-scale spectrograms into the mel-scale.
    num_spectrogram_bins = 1 + int(fft_num_points / 2)

    # Compute the conversion matrix to mel-frequency space.
    linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(
        num_mel_bins=num_mel_bins,
        num_spectrogram_bins=num_spectrogram_bins,
        sample_rate=sample_rate_hz,
        lower_edge_hertz=lower_freq_hz,
        upper_edge_hertz=upper_freq_hz,
        dtype=tf.float32)

    # Apply the conversion to complete the calculation of the filter-bank
    mel_spectrograms = tf.tensordot(magnitude_spectrograms,
                                    linear_to_mel_weight_matrix, 1)
    mel_spectrograms.set_shape(magnitude_spectrograms.shape[:-1].concatenate(
        linear_to_mel_weight_matrix.shape[-1:]))

    if should_log_weight:
        return tf.log(mel_spectrograms + log_offset)
    else:
        return mel_spectrograms
コード例 #11
0
def MonotonicChunkwiseAttentionInitializer(dataInput,
                                           scopeName,
                                           hiddenNoduleNumber,
                                           attentionScope=None,
                                           blstmFlag=True):
    def MovingMax(tensor, windowLen, namescope):
        with tensorflow.name_scope(namescope):
            networkParameter['%s_Pad' % namescope] = tensorflow.pad(
                tensor,
                paddings=[[0, 0], [windowLen - 1, 0]],
                name='%s_Pad' % namescope)
            networkParameter['%s_Reshape' % namescope] = tensorflow.reshape(
                tensor=networkParameter['%s_Pad' % namescope],
                shape=[
                    tensorflow.shape(networkParameter['%s_Pad' %
                                                      namescope])[0], 1,
                    tensorflow.shape(networkParameter['%s_Pad' %
                                                      namescope])[1], 1
                ],
                name='%s_Reshape' % namescope)
            networkParameter['%s_MaxPool' %
                             namescope] = tensorflow.nn.max_pool(
                                 networkParameter['%s_Reshape' % namescope],
                                 ksize=[1, 1, windowLen, 1],
                                 strides=[1, 1, 1, 1],
                                 padding='VALID',
                                 name='%s_MaxPool' % namescope)[:, 0, :, 0]

    with tensorflow.name_scope(scopeName):
        networkParameter = {}

        if blstmFlag:
            networkParameter['DataInput'] = tensorflow.concat(
                [dataInput[0], dataInput[1]], axis=2, name='DataInput')
        else:
            networkParameter['DataInput'] = dataInput

        networkParameter['BatchSize'], networkParameter[
            'TimeStep'], networkParameter[
                'HiddenNoduleNumber'] = tensorflow.unstack(
                    tensorflow.shape(networkParameter['DataInput'],
                                     name='Shape'))

        networkParameter['DataLogits'] = tensorflow.layers.dense(
            inputs=networkParameter['DataInput'],
            units=1,
            activation=tensorflow.nn.tanh,
            name='DataLogits_%s' % scopeName)[..., 0]
        MovingMax(tensor=networkParameter['DataLogits'],
                  windowLen=attentionScope,
                  namescope='Logits_Max')

        #########################################################################

        networkParameter['DataLogits_Pad'] = tensorflow.pad(
            networkParameter['DataLogits'],
            paddings=[[0, 0], [attentionScope - 1, 0]],
            constant_values=0,
            name='DataLogits_Pad')
        networkParameter['DataLogits_Frame'] = signal.frame(
            signal=networkParameter['DataLogits_Pad'],
            frame_length=attentionScope,
            frame_step=1,
            name='DataLogits_Frame')
        networkParameter['DataLogits_Frame_Reduce'] = tensorflow.subtract(
            x=networkParameter['DataLogits_Frame'],
            y=networkParameter['Logits_Max_MaxPool'][:, :, tensorflow.newaxis],
            name='DataLogits_Frame_Reduce')
        networkParameter['Denominator_Softmax'] = tensorflow.reduce_sum(
            input_tensor=tensorflow.exp(
                x=networkParameter['DataLogits_Frame_Reduce']),
            axis=2,
            name='Denominator_Softmax')
        networkParameter['Denominator_Frame'] = signal.frame(
            signal=networkParameter['Denominator_Softmax'],
            frame_length=attentionScope,
            frame_step=1,
            pad_end=True,
            pad_value=0,
            name='Denominator_Frame')

        ########################################################################

        networkParameter['FrameMax'] = signal.frame(
            signal=networkParameter['Logits_Max_MaxPool'],
            frame_length=attentionScope,
            frame_step=1,
            pad_end=True,
            pad_value=0,
            name='FrameMax')
        networkParameter['DataLogits_Numerators'] = tensorflow.subtract(
            x=networkParameter['DataLogits'][:, :, tensorflow.newaxis],
            y=networkParameter['FrameMax'],
            name='DataLogits_Numerators')
        networkParameter['Numerators_Softmax'] = tensorflow.exp(
            x=networkParameter['DataLogits_Numerators'],
            name='Numerators_Softmax')
        networkParameter['AttentionProbability_Raw'] = tensorflow.divide(
            x=networkParameter['Numerators_Softmax'],
            y=tensorflow.maximum(networkParameter['Denominator_Frame'], 1E-5),
            name='AttentionProbability_Raw')
        networkParameter['AttentionProbability'] = tensorflow.reduce_sum(
            input_tensor=networkParameter['AttentionProbability_Raw'],
            axis=2,
            name='AttentionProbability')

        networkParameter['AttentionProbability_Supplement'] = tensorflow.tile(
            input=networkParameter['AttentionProbability'][:, :,
                                                           tensorflow.newaxis],
            multiples=[1, 1, hiddenNoduleNumber],
            name='AttentionProbability_Supplement')
        networkParameter['FinalResult_Media'] = tensorflow.multiply(
            x=networkParameter['AttentionProbability_Supplement'],
            y=networkParameter['DataInput'],
            name='FinalResult_Media')
        networkParameter['FinalResult'] = tensorflow.reduce_sum(
            input_tensor=networkParameter['FinalResult_Media'],
            axis=1,
            name='FinalResult')

        return networkParameter