Example #1
0
  def transform_wav_data(wav_data):
    """Transforms with sox."""
    if jitter_amount_sec:
      wav_data = audio_io.jitter_wav_data(wav_data, hparams.sample_rate,
                                          jitter_amount_sec)
    wav_data = audio_transform.transform_wav_audio(wav_data, hparams)

    return [wav_data]
Example #2
0
  def transform_wav_data(wav_data):
    """Transforms with sox."""
    if jitter_amount_sec:
      wav_data = audio_io.jitter_wav_data(wav_data, hparams.sample_rate,
                                          jitter_amount_sec)
    wav_data = audio_transform.transform_wav_audio(wav_data, hparams)

    return [wav_data]
Example #3
0
  def transform_wav_data(wav_data):
    """Transforms wav data."""
    # Only do audio transformations during training.
    if is_training:
      wav_data = audio_io.jitter_wav_data(wav_data, hparams.sample_rate,
                                          jitter_amount_sec)

    # Normalize.
    if hparams.normalize_audio:
      wav_data = audio_io.normalize_wav_data(wav_data, hparams.sample_rate)

    return [wav_data]
Example #4
0
  def transform_wav_data(wav_data):
    """Transforms wav data."""
    # Only do audio transformations during training.
    if is_training:
      wav_data = audio_io.jitter_wav_data(wav_data, hparams.sample_rate,
                                          jitter_amount_sec)

    # Normalize.
    if hparams.normalize_audio:
      wav_data = audio_io.normalize_wav_data(wav_data, hparams.sample_rate)

    return [wav_data]
Example #5
0
  def transform_wav_data(wav_data, sequence_tensor):
    """Transforms with sox."""
    sequence, cropped_beginning_seconds = preprocess_sequence(
        sequence_tensor, hparams)

    # Only do audio transformations during training.
    if is_training:
      wav_data = audio_io.jitter_wav_data(wav_data, hparams.sample_rate,
                                          jitter_amount_sec)
      wav_data = audio_transform.transform_wav_audio(wav_data, hparams)

    # If requested, crop wav.
    if hparams.crop_training_sequence_to_notes:
      wav_data = audio_io.crop_wav_data(wav_data, hparams.sample_rate,
                                        cropped_beginning_seconds,
                                        sequence.total_time)

    # Normalize.
    if hparams.normalize_audio:
      wav_data = audio_io.normalize_wav_data(wav_data, hparams.sample_rate)

    return [wav_data]
Example #6
0
  def transform_wav_data(wav_data, sequence_tensor):
    """Transforms with sox."""
    sequence, cropped_beginning_seconds = preprocess_sequence(
        sequence_tensor, hparams)

    # Only do audio transformations during training.
    if is_training:
      wav_data = audio_io.jitter_wav_data(wav_data, hparams.sample_rate,
                                          jitter_amount_sec)
      wav_data = audio_transform.transform_wav_audio(wav_data, hparams)

    # If requested, crop wav.
    if hparams.crop_training_sequence_to_notes:
      wav_data = audio_io.crop_wav_data(wav_data, hparams.sample_rate,
                                        cropped_beginning_seconds,
                                        sequence.total_time)

    # Normalize.
    if hparams.normalize_audio:
      wav_data = audio_io.normalize_wav_data(wav_data, hparams.sample_rate)

    return [wav_data]