Beispiel #1
0
  def DetectHardClipping(cls, signal, threshold=2):
    """Detects hard clipping.

    Hard clipping is simply detected by counting samples that touch either the
    lower or upper bound too many times in a row (according to |threshold|).
    The presence of a single sequence of samples meeting such property is enough
    to label the signal as hard clipped.

    Args:
      signal: AudioSegment instance.
      threshold: minimum number of samples at full-scale in a row.

    Returns:
      True if hard clipping is detect, False otherwise.
    """
    if signal.channels != 1:
      raise NotImplementedError('mutliple-channel clipping not implemented')
    if signal.sample_width != 2:  # Note that signal.sample_width is in bytes.
      raise exceptions.SignalProcessingException(
          'hard-clipping detection only supported for 16 bit samples')

    # Get raw samples, check type, cast.
    samples = signal.get_array_of_samples()
    if samples.typecode != 'h':
      raise exceptions.SignalProcessingException(
          'hard-clipping detection only supported for 16 bit samples')
    samples = np.array(signal.get_array_of_samples(), np.int16)

    # Detect adjacent clipped samples.
    samples_type_info = np.iinfo(samples.dtype)
    mask_min = samples == samples_type_info.min
    mask_max = samples == samples_type_info.max

    def HasLongSequence(vector, min_legth=threshold):
      """Returns True if there are one or more long sequences of True flags."""
      seq_length = 0
      for b in vector:
        seq_length = seq_length + 1 if b else 0
        if seq_length >= min_legth:
          return True
      return False

    return HasLongSequence(mask_min) or HasLongSequence(mask_max)
Beispiel #2
0
    def count_samples(cls, signal):
        """
    Number of samples per channel.

    Args:
      signal: AudioSegment instance.
    """
        number_of_samples = len(signal.get_array_of_samples())
        assert signal.channels > 0
        assert number_of_samples % signal.channels == 0
        return number_of_samples / signal.channels
Beispiel #3
0
  def CountSamples(cls, signal):
    """Number of samples per channel.

    Args:
      signal: AudioSegment instance.

    Returns:
      An integer.
    """
    number_of_samples = len(signal.get_array_of_samples())
    assert signal.channels > 0
    assert number_of_samples % signal.channels == 0
    return number_of_samples / signal.channels
Beispiel #4
0
    def ApplyImpulseResponse(cls, signal, impulse_response):
        """Applies an impulse response to a signal.

    Args:
      signal: AudioSegment instance.
      impulse_response: list or numpy vector of float values.

    Returns:
      AudioSegment instance.
    """
        # Get samples.
        assert signal.channels == 1, (
            'multiple-channel recordings not supported')
        samples = signal.get_array_of_samples()

        # Convolve.
        logging.info(
            'applying %d order impulse response to a signal lasting %d ms',
            len(impulse_response), len(signal))
        convolved_samples = scipy.signal.fftconvolve(in1=samples,
                                                     in2=impulse_response,
                                                     mode='full').astype(
                                                         np.int16)
        logging.info('convolution computed')

        # Cast.
        convolved_samples = array.array(signal.array_type, convolved_samples)

        # Verify.
        logging.debug('signal length: %d samples', len(samples))
        logging.debug('convolved signal length: %d samples',
                      len(convolved_samples))
        assert len(convolved_samples) > len(samples)

        # Generate convolved signal AudioSegment instance.
        convolved_signal = pydub.AudioSegment(data=convolved_samples,
                                              metadata={
                                                  'sample_width':
                                                  signal.sample_width,
                                                  'frame_rate':
                                                  signal.frame_rate,
                                                  'frame_width':
                                                  signal.frame_width,
                                                  'channels': signal.channels,
                                              })
        assert len(convolved_signal) > len(signal)

        return convolved_signal
Beispiel #5
0
    def Copy(cls, signal):
        """Makes a copy os a signal.

    Args:
      signal: AudioSegment instance.

    Returns:
      An AudioSegment instance.
    """
        return pydub.AudioSegment(data=signal.get_array_of_samples(),
                                  metadata={
                                      'sample_width': signal.sample_width,
                                      'frame_rate': signal.frame_rate,
                                      'frame_width': signal.frame_width,
                                      'channels': signal.channels,
                                  })
Beispiel #6
0
  def Copy(cls, signal):
    """Makes a copy os a signal.

    Args:
      signal: AudioSegment instance.

    Returns:
      An AudioSegment instance.
    """
    return pydub.AudioSegment(
        data=signal.get_array_of_samples(),
        metadata={
            'sample_width': signal.sample_width,
            'frame_rate': signal.frame_rate,
            'frame_width': signal.frame_width,
            'channels': signal.channels,
        })
Beispiel #7
0
  def ApplyImpulseResponse(cls, signal, impulse_response):
    """Applies an impulse response to a signal.

    Args:
      signal: AudioSegment instance.
      impulse_response: list or numpy vector of float values.

    Returns:
      AudioSegment instance.
    """
    # Get samples.
    assert signal.channels == 1, (
        'multiple-channel recordings not supported')
    samples = signal.get_array_of_samples()

    # Convolve.
    logging.info('applying %d order impulse response to a signal lasting %d ms',
                 len(impulse_response), len(signal))
    convolved_samples = scipy.signal.fftconvolve(
        in1=samples,
        in2=impulse_response,
        mode='full').astype(np.int16)
    logging.info('convolution computed')

    # Cast.
    convolved_samples = array.array(signal.array_type, convolved_samples)

    # Verify.
    logging.debug('signal length: %d samples', len(samples))
    logging.debug('convolved signal length: %d samples', len(convolved_samples))
    assert len(convolved_samples) > len(samples)

    # Generate convolved signal AudioSegment instance.
    convolved_signal = pydub.AudioSegment(
        data=convolved_samples,
        metadata={
            'sample_width': signal.sample_width,
            'frame_rate': signal.frame_rate,
            'frame_width': signal.frame_width,
            'channels': signal.channels,
        })
    assert len(convolved_signal) > len(signal)

    return convolved_signal
Beispiel #8
0
 def AudioSegmentToRawData(cls, signal):
     samples = signal.get_array_of_samples()
     if samples.typecode != 'h':
         raise exceptions.SignalProcessingException(
             'Unsupported samples type')
     return np.array(signal.get_array_of_samples(), np.int16)
 def AudioSegmentToRawData(cls, signal):
   samples = signal.get_array_of_samples()
   if samples.typecode != 'h':
     raise exceptions.SignalProcessingException('Unsupported samples type')
   return np.array(signal.get_array_of_samples(), np.int16)