示例#1
0
def extract_polyphonic_sequences(
        quantized_sequence, start_step=0, min_steps_discard=None,
        max_steps_discard=None):
    """Extracts a polyphonic track from the given quantized NoteSequence.

    Currently, this extracts only one polyphonic sequence from a given track.

    Args:
      quantized_sequence: A quantized NoteSequence.
      start_step: Start extracting a sequence at this time step. Assumed
          to be the beginning of a bar.
      min_steps_discard: Minimum length of tracks in steps. Shorter tracks are
          discarded.
      max_steps_discard: Maximum length of tracks in steps. Longer tracks are
          discarded.

    Returns:
      poly_seqs: A python list of PolyphonicSequence instances.
      stats: A dictionary mapping string names to `statistics.Statistic` objects.
    """
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

    stats = dict((stat_name, statistics.Counter(stat_name)) for stat_name in
                 ['polyphonic_tracks_discarded_too_short',
                  'polyphonic_tracks_discarded_too_long',
                  'polyphonic_tracks_discarded_more_than_1_program'])

    steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)

    # Create a histogram measuring lengths (in bars not steps).
    stats['polyphonic_track_lengths_in_bars'] = statistics.Histogram(
        'polyphonic_track_lengths_in_bars',
        [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])

    # Allow only 1 program.
    programs = set()
    for note in quantized_sequence.notes:
        programs.add(note.program)
    if len(programs) > 1:
        stats['polyphonic_tracks_discarded_more_than_1_program'].increment()
        return [], stats.values()

    # Translate the quantized sequence into a PolyphonicSequence.
    poly_seq = PolyphonicSequence(quantized_sequence,
                                  start_step=start_step)

    poly_seqs = []
    num_steps = poly_seq.num_steps

    if min_steps_discard is not None and num_steps < min_steps_discard:
        stats['polyphonic_tracks_discarded_too_short'].increment()
    elif max_steps_discard is not None and num_steps > max_steps_discard:
        stats['polyphonic_tracks_discarded_too_long'].increment()
    else:
        poly_seqs.append(poly_seq)
        stats['polyphonic_track_lengths_in_bars'].increment(
            num_steps // steps_per_bar)

    return poly_seqs, stats.values()
示例#2
0
def extract_polyphonic_sequences(
    quantized_sequence, start_step=0, min_steps_discard=None,
    max_steps_discard=None):
  """Extracts a polyphonic track from the given quantized NoteSequence.

  Currently, this extracts only one polyphonic sequence from a given track.

  Args:
    quantized_sequence: A quantized NoteSequence.
    start_step: Start extracting a sequence at this time step. Assumed
        to be the beginning of a bar.
    min_steps_discard: Minimum length of tracks in steps. Shorter tracks are
        discarded.
    max_steps_discard: Maximum length of tracks in steps. Longer tracks are
        discarded.

  Returns:
    poly_seqs: A python list of PolyphonicSequence instances.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.
  """
  sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

  stats = dict([(stat_name, statistics.Counter(stat_name)) for stat_name in
                ['polyphonic_tracks_discarded_too_short',
                 'polyphonic_tracks_discarded_too_long',
                 'polyphonic_tracks_discarded_more_than_1_program']])

  steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(
      quantized_sequence)

  # Create a histogram measuring lengths (in bars not steps).
  stats['polyphonic_track_lengths_in_bars'] = statistics.Histogram(
      'polyphonic_track_lengths_in_bars',
      [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])

  # Allow only 1 program.
  programs = set()
  for note in quantized_sequence.notes:
    programs.add(note.program)
  if len(programs) > 1:
    stats['polyphonic_tracks_discarded_more_than_1_program'].increment()
    return [], stats.values()

  # Translate the quantized sequence into a PolyphonicSequence.
  poly_seq = PolyphonicSequence(quantized_sequence,
                                start_step=start_step)

  poly_seqs = []
  num_steps = poly_seq.num_steps

  if min_steps_discard is not None and num_steps < min_steps_discard:
    stats['polyphonic_tracks_discarded_too_short'].increment()
  elif max_steps_discard is not None and num_steps > max_steps_discard:
    stats['polyphonic_tracks_discarded_too_long'].increment()
  else:
    poly_seqs.append(poly_seq)
    stats['polyphonic_track_lengths_in_bars'].increment(
        num_steps // steps_per_bar)

  return poly_seqs, stats.values()
示例#3
0
  def __init__(self, quantized_sequence=None, steps_per_quarter=None,
               start_step=0):
    """Construct a PolyphonicSequence.

    Either quantized_sequence or steps_per_quarter should be supplied.

    Args:
      quantized_sequence: a quantized NoteSequence proto.
      steps_per_quarter: how many steps a quarter note represents.
      start_step: The offset of this sequence relative to the
          beginning of the source sequence. If a quantized sequence is used as
          input, only notes starting after this step will be considered.
    """
    assert (quantized_sequence, steps_per_quarter).count(None) == 1

    if quantized_sequence:
      sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
      self._events = self._from_quantized_sequence(quantized_sequence,
                                                   start_step)
      self._steps_per_quarter = (
          quantized_sequence.quantization_info.steps_per_quarter)
    else:
      self._events = [
          PolyphonicEvent(event_type=PolyphonicEvent.START, pitch=None)]
      self._steps_per_quarter = steps_per_quarter

    self._start_step = start_step
示例#4
0
def event_list_chords(quantized_sequence, event_lists):
  """Extract corresponding chords for multiple EventSequences.

  Args:
    quantized_sequence: The underlying quantized NoteSequence from which to
        extract the chords. It is assumed that the step numbering in this
        sequence matches the step numbering in each EventSequence in
        `event_lists`.
    event_lists: A list of EventSequence objects.

  Returns:
    A nested list of chord the same length as `event_lists`, where each list is
    the same length as the corresponding EventSequence (in events, not steps).
  """
  sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

  chords = ChordProgression()
  if quantized_sequence.total_quantized_steps > 0:
    chords.from_quantized_sequence(
        quantized_sequence, 0, quantized_sequence.total_quantized_steps)

  pad_chord = chords[-1] if chords else NO_CHORD

  chord_lists = []
  for e in event_lists:
    chord_lists.append([chords[step] if step < len(chords) else pad_chord
                        for step in e.steps])

  return chord_lists
示例#5
0
def event_list_chords(quantized_sequence, event_lists):
  """Extract corresponding chords for multiple EventSequences.

  Args:
    quantized_sequence: The underlying quantized NoteSequence from which to
        extract the chords. It is assumed that the step numbering in this
        sequence matches the step numbering in each EventSequence in
        `event_lists`.
    event_lists: A list of EventSequence objects.

  Returns:
    A nested list of chord the same length as `event_lists`, where each list is
    the same length as the corresponding EventSequence (in events, not steps).
  """
  sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

  chords = ChordProgression()
  if quantized_sequence.total_quantized_steps > 0:
    chords.from_quantized_sequence(
        quantized_sequence, 0, quantized_sequence.total_quantized_steps)

  pad_chord = chords[-1] if chords else NO_CHORD

  chord_lists = []
  for e in event_lists:
    chord_lists.append([chords[step] if step < len(chords) else pad_chord
                        for step in e.steps])

  return chord_lists
示例#6
0
    def __init__(self,
                 quantized_sequence=None,
                 steps_per_quarter=None,
                 start_step=0):
        """Construct a PolyphonicSequence.

    Either quantized_sequence or steps_per_quarter should be supplied.

    Args:
      quantized_sequence: a quantized NoteSequence proto.
      steps_per_quarter: how many steps a quarter note represents.
      start_step: The offset of this sequence relative to the
          beginning of the source sequence. If a quantized sequence is used as
          input, only notes starting after this step will be considered.
    """
        assert (quantized_sequence, steps_per_quarter).count(None) == 1

        if quantized_sequence:
            sequences_lib.assert_is_relative_quantized_sequence(
                quantized_sequence)
            self._events = self._from_quantized_sequence(
                quantized_sequence, start_step)
            self._steps_per_quarter = (
                quantized_sequence.quantization_info.steps_per_quarter)
        else:
            self._events = [
                PolyphonicEvent(event_type=PolyphonicEvent.START, pitch=None)
            ]
            self._steps_per_quarter = steps_per_quarter

        self._start_step = start_step
示例#7
0
  def __init__(self, quantized_sequence=None, steps_per_quarter=None,
               start_step=0, num_velocity_bins=0,
               max_shift_quarters=DEFAULT_MAX_SHIFT_QUARTERS, instrument=None,
               program=None, is_drum=None):
    """Construct a MetricPerformance.

    Either quantized_sequence or steps_per_quarter should be supplied.

    Args:
      quantized_sequence: A quantized NoteSequence proto.
      steps_per_quarter: Number of quantized time steps per quarter note, if
          using metric quantization.
      start_step: The offset of this sequence relative to the
          beginning of the source sequence. If a quantized sequence is used as
          input, only notes starting after this step will be considered.
      num_velocity_bins: Number of velocity bins to use. If 0, velocity events
          will not be included at all.
      max_shift_quarters: Maximum number of quarter notes for a single time-
          shift event.
      instrument: If not None, extract only the specified instrument from
          `quantized_sequence`. Otherwise, extract all instruments.
      program: MIDI program used for this performance, or None if not specified.
          Ignored if `quantized_sequence` is provided.
      is_drum: Whether or not this performance consists of drums, or None if not
          specified. Ignored if `quantized_sequence` is provided.

    Raises:
      ValueError: If both or neither of `quantized_sequence` or
          `steps_per_quarter` is specified.
    """
    if (quantized_sequence, steps_per_quarter).count(None) != 1:
      raise ValueError(
          'Must specify exactly one of quantized_sequence or steps_per_quarter')

    if quantized_sequence:
      sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
      self._steps_per_quarter = (
          quantized_sequence.quantization_info.steps_per_quarter)
      self._events = self._from_quantized_sequence(
          quantized_sequence, start_step, num_velocity_bins,
          max_shift_steps=self._steps_per_quarter * max_shift_quarters,
          instrument=instrument)
      program, is_drum = _program_and_is_drum_from_sequence(
          quantized_sequence, instrument)

    else:
      self._steps_per_quarter = steps_per_quarter
      self._events = []

    super(MetricPerformance, self).__init__(
        start_step=start_step,
        num_velocity_bins=num_velocity_bins,
        max_shift_steps=self._steps_per_quarter * max_shift_quarters,
        program=program,
        is_drum=is_drum)
    def __init__(self,
                 quantized_sequence=None,
                 events_list=None,
                 steps_per_quarter=None,
                 start_step=0,
                 min_pitch=MIN_MIDI_PITCH,
                 max_pitch=MAX_MIDI_PITCH,
                 split_repeats=True,
                 shift_range=False):
        """Construct a PianorollSequence.

    Exactly one of `quantized_sequence` or `steps_per_quarter` must be supplied.
    At most one of `quantized_sequence` and `events_list` may be supplied.

    Args:
      quantized_sequence: an optional quantized NoteSequence proto to base
          PianorollSequence on.
      events_list: an optional list of Pianoroll events to base
          PianorollSequence on.
      steps_per_quarter: how many steps a quarter note represents. Must be
          provided if `quanitzed_sequence` not given.
      start_step: The offset of this sequence relative to the
          beginning of the source sequence. If a quantized sequence is used as
          input, only notes starting after this step will be considered.
      min_pitch: The minimum valid pitch value, inclusive.
      max_pitch: The maximum valid pitch value, inclusive.
      split_repeats: Whether to force repeated notes to have a 0-state step
          between them when initializing from a quantized NoteSequence.
      shift_range: If True, assume that the given events_list is in the full
         MIDI pitch range and needs to be shifted and filtered based on
         `min_pitch` and `max_pitch`.
    """
        assert (quantized_sequence, steps_per_quarter).count(None) == 1
        assert (quantized_sequence, events_list).count(None) >= 1

        self._min_pitch = min_pitch
        self._max_pitch = max_pitch

        if quantized_sequence:
            sequences_lib.assert_is_relative_quantized_sequence(
                quantized_sequence)
            self._events = self._from_quantized_sequence(
                quantized_sequence, start_step, min_pitch, max_pitch,
                split_repeats)
            self._steps_per_quarter = (
                quantized_sequence.quantization_info.steps_per_quarter)
        else:
            self._events = []
            self._steps_per_quarter = steps_per_quarter
            if events_list:
                for e in events_list:
                    self.append(e, shift_range)
        self._start_step = start_step
示例#9
0
  def __init__(self, quantized_sequence=None, events_list=None,
               steps_per_quarter=None, start_step=0, min_pitch=MIN_MIDI_PITCH,
               max_pitch=MAX_MIDI_PITCH, split_repeats=True, shift_range=False):
    """Construct a PianorollSequence.

    Exactly one of `quantized_sequence` or `steps_per_quarter` must be supplied.
    At most one of `quantized_sequence` and `events_list` may be supplied.

    Args:
      quantized_sequence: an optional quantized NoteSequence proto to base
          PianorollSequence on.
      events_list: an optional list of Pianoroll events to base
          PianorollSequence on.
      steps_per_quarter: how many steps a quarter note represents. Must be
          provided if `quanitzed_sequence` not given.
      start_step: The offset of this sequence relative to the
          beginning of the source sequence. If a quantized sequence is used as
          input, only notes starting after this step will be considered.
      min_pitch: The minimum valid pitch value, inclusive.
      max_pitch: The maximum valid pitch value, inclusive.
      split_repeats: Whether to force repeated notes to have a 0-state step
          between them when initializing from a quantized NoteSequence.
      shift_range: If True, assume that the given events_list is in the full
         MIDI pitch range and needs to be shifted and filtered based on
         `min_pitch` and `max_pitch`.
    """
    assert (quantized_sequence, steps_per_quarter).count(None) == 1
    assert (quantized_sequence, events_list).count(None) >= 1

    self._min_pitch = min_pitch
    self._max_pitch = max_pitch

    if quantized_sequence:
      sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
      self._events = self._from_quantized_sequence(quantized_sequence,
                                                   start_step, min_pitch,
                                                   max_pitch, split_repeats)
      self._steps_per_quarter = (
          quantized_sequence.quantization_info.steps_per_quarter)
    else:
      self._events = []
      self._steps_per_quarter = steps_per_quarter
      if events_list:
        for e in events_list:
          self.append(e, shift_range)
    self._start_step = start_step
示例#10
0
def extract_chords(quantized_sequence,
                   max_steps=None,
                   all_transpositions=False):
    """Extracts a single chord progression from a quantized NoteSequence.

  This function will extract the underlying chord progression (encoded as text
  annotations) from `quantized_sequence`.

  Args:
    quantized_sequence: A quantized NoteSequence.
    max_steps: An integer, maximum length of a chord progression. Chord
        progressions will be trimmed to this length. If None, chord
        progressions will not be trimmed.
    all_transpositions: If True, also transpose the chord progression into all
        12 keys.

  Returns:
    chord_progressions: If `all_transpositions` is False, a python list
        containing a single ChordProgression instance. If `all_transpositions`
        is True, a python list containing 12 ChordProgression instances, one
        for each transposition.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.
  """
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

    stats = dict([('chords_truncated', statistics.Counter('chords_truncated'))
                  ])
    chords = ChordProgression()
    chords.from_quantized_sequence(quantized_sequence, 0,
                                   quantized_sequence.total_quantized_steps)
    if max_steps is not None:
        if len(chords) > max_steps:
            chords.set_length(max_steps)
            stats['chords_truncated'].increment()
    if all_transpositions:
        chord_progressions = []
        for amount in range(-6, 6):
            transposed_chords = copy.deepcopy(chords)
            transposed_chords.transpose(amount)
            chord_progressions.append(transposed_chords)
        return chord_progressions, stats.values()
    else:
        return [chords], stats.values()
示例#11
0
def extract_chords(quantized_sequence, max_steps=None,
                   all_transpositions=False):
  """Extracts a single chord progression from a quantized NoteSequence.

  This function will extract the underlying chord progression (encoded as text
  annotations) from `quantized_sequence`.

  Args:
    quantized_sequence: A quantized NoteSequence.
    max_steps: An integer, maximum length of a chord progression. Chord
        progressions will be trimmed to this length. If None, chord
        progressions will not be trimmed.
    all_transpositions: If True, also transpose the chord progression into all
        12 keys.

  Returns:
    chord_progressions: If `all_transpositions` is False, a python list
        containing a single ChordProgression instance. If `all_transpositions`
        is True, a python list containing 12 ChordProgression instances, one
        for each transposition.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.
  """
  sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

  stats = dict([('chords_truncated', statistics.Counter('chords_truncated'))])
  chords = ChordProgression()
  chords.from_quantized_sequence(
      quantized_sequence, 0, quantized_sequence.total_quantized_steps)
  if max_steps is not None:
    if len(chords) > max_steps:
      chords.set_length(max_steps)
      stats['chords_truncated'].increment()
  if all_transpositions:
    chord_progressions = []
    for amount in range(-6, 6):
      transposed_chords = copy.deepcopy(chords)
      transposed_chords.transpose(amount)
      chord_progressions.append(transposed_chords)
    return chord_progressions, stats.values()
  else:
    return [chords], stats.values()
示例#12
0
  def testAssertIsRelativeQuantizedNoteSequence(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])

    relative_quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=self.steps_per_quarter)
    absolute_quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
        self.note_sequence, steps_per_second=4)

    sequences_lib.assert_is_relative_quantized_sequence(
        relative_quantized_sequence)
    with self.assertRaises(sequences_lib.QuantizationStatusException):
      sequences_lib.assert_is_relative_quantized_sequence(
          absolute_quantized_sequence)
    with self.assertRaises(sequences_lib.QuantizationStatusException):
      sequences_lib.assert_is_relative_quantized_sequence(self.note_sequence)
示例#13
0
  def testAssertIsRelativeQuantizedNoteSequence(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])

    relative_quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=self.steps_per_quarter)
    absolute_quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
        self.note_sequence, steps_per_second=4)

    sequences_lib.assert_is_relative_quantized_sequence(
        relative_quantized_sequence)
    with self.assertRaises(sequences_lib.QuantizationStatusException):
      sequences_lib.assert_is_relative_quantized_sequence(
          absolute_quantized_sequence)
    with self.assertRaises(sequences_lib.QuantizationStatusException):
      sequences_lib.assert_is_relative_quantized_sequence(self.note_sequence)
示例#14
0
def extract_pianoroll_sequences(quantized_sequence,
                                start_step=0,
                                min_steps_discard=None,
                                max_steps_discard=None,
                                max_steps_truncate=None):
    """Extracts a polyphonic track from the given quantized NoteSequence.

  Currently, this extracts only one pianoroll from a given track.

  Args:
    quantized_sequence: A quantized NoteSequence.
    start_step: Start extracting a sequence at this time step. Assumed
        to be the beginning of a bar.
    min_steps_discard: Minimum length of tracks in steps. Shorter tracks are
        discarded.
    max_steps_discard: Maximum length of tracks in steps. Longer tracks are
        discarded. Mutually exclusive with `max_steps_truncate`.
    max_steps_truncate: Maximum length of tracks in steps. Longer tracks are
        truncated. Mutually exclusive with `max_steps_discard`.

  Returns:
    pianoroll_seqs: A python list of PianorollSequence instances.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.

  Raises:
    ValueError: If both `max_steps_discard` and `max_steps_truncate` are
        specified.
  """

    if (max_steps_discard, max_steps_truncate).count(None) == 0:
        raise ValueError(
            'Only one of `max_steps_discard` and `max_steps_truncate` can be '
            'specified.')
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

    # pylint: disable=g-complex-comprehension
    stats = dict((stat_name, statistics.Counter(stat_name)) for stat_name in [
        'pianoroll_tracks_truncated_too_long',
        'pianoroll_tracks_discarded_too_short',
        'pianoroll_tracks_discarded_too_long',
        'pianoroll_tracks_discarded_more_than_1_program'
    ])
    # pylint: enable=g-complex-comprehension

    steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)

    # Create a histogram measuring lengths (in bars not steps).
    stats['pianoroll_track_lengths_in_bars'] = statistics.Histogram(
        'pianoroll_track_lengths_in_bars',
        [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])

    # Allow only 1 program.
    programs = set()
    for note in quantized_sequence.notes:
        programs.add(note.program)
    if len(programs) > 1:
        stats['pianoroll_tracks_discarded_more_than_1_program'].increment()
        return [], list(stats.values())

    # Translate the quantized sequence into a PianorollSequence.
    pianoroll_seq = PianorollSequence(quantized_sequence=quantized_sequence,
                                      start_step=start_step)

    pianoroll_seqs = []
    num_steps = pianoroll_seq.num_steps

    if min_steps_discard is not None and num_steps < min_steps_discard:
        stats['pianoroll_tracks_discarded_too_short'].increment()
    elif max_steps_discard is not None and num_steps > max_steps_discard:
        stats['pianoroll_tracks_discarded_too_long'].increment()
    else:
        if max_steps_truncate is not None and num_steps > max_steps_truncate:
            stats['pianoroll_tracks_truncated_too_long'].increment()
            pianoroll_seq.set_length(max_steps_truncate)
        pianoroll_seqs.append(pianoroll_seq)
        stats['pianoroll_track_lengths_in_bars'].increment(num_steps //
                                                           steps_per_bar)
    return pianoroll_seqs, list(stats.values())
示例#15
0
    def from_quantized_sequence(self,
                                quantized_sequence,
                                search_start_step=0,
                                gap_bars=1,
                                pad_end=False,
                                ignore_is_drum=False):
        """Populate self with drums from the given quantized NoteSequence object.

    A drum track is extracted from the given quantized sequence starting at time
    step `start_step`. `start_step` can be used to drive extraction of multiple
    drum tracks from the same quantized sequence. The end step of the extracted
    drum track will be stored in `self._end_step`.

    0 velocity notes are ignored. The drum extraction is ended when there are
    no drums for a time stretch of `gap_bars` in bars (measures) of music. The
    number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    Each drum event is a Python frozenset of simultaneous (after quantization)
    drum "pitches", or an empty frozenset to indicate no drums are played.

    Args:
      quantized_sequence: A quantized NoteSequence instance.
      search_start_step: Start searching for drums at this time step. Assumed to
          be the beginning of a bar.
      gap_bars: If this many bars or more follow a non-empty drum event, the
          drum track is ended.
      pad_end: If True, the end of the drums will be padded with empty events so
          that it will end at a bar boundary.
      ignore_is_drum: Whether accept notes where `is_drum` is False.

    Raises:
      NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
    """
        sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
        self._reset()

        steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
            quantized_sequence)
        if steps_per_bar_float % 1 != 0:
            raise events_lib.NonIntegerStepsPerBarException(
                'There are %f timesteps per bar. Time signature: %d/%d' %
                (steps_per_bar_float,
                 quantized_sequence.time_signatures[0].numerator,
                 quantized_sequence.time_signatures[0].denominator))
        self._steps_per_bar = steps_per_bar = int(steps_per_bar_float)
        self._steps_per_quarter = (
            quantized_sequence.quantization_info.steps_per_quarter)

        # Group all drum notes that start at the same step.
        all_notes = [
            note for note in quantized_sequence.notes
            if ((note.is_drum or ignore_is_drum)  # drums only
                and note.velocity  # no zero-velocity notes
                # after start_step only
                and note.quantized_start_step >= search_start_step)
        ]
        grouped_notes = collections.defaultdict(list)
        for note in all_notes:
            grouped_notes[note.quantized_start_step].append(note)

        # Sort by note start times.
        notes = sorted(grouped_notes.items(), key=operator.itemgetter(0))

        if not notes:
            return

        gap_start_index = 0

        track_start_step = (notes[0][0] -
                            (notes[0][0] - search_start_step) % steps_per_bar)
        for start, group in notes:

            start_index = start - track_start_step
            pitches = frozenset(note.pitch for note in group)

            # If a gap of `gap` or more steps is found, end the drum track.
            note_distance = start_index - gap_start_index
            if len(self) and note_distance >= gap_bars * steps_per_bar:
                break

            # Add a drum event, a set of drum "pitches".
            self.set_length(start_index + 1)
            self._events[start_index] = pitches

            gap_start_index = start_index + 1

        if not self._events:
            # If no drum events were added, don't set `_start_step` and `_end_step`.
            return

        self._start_step = track_start_step

        length = len(self)
        # Optionally round up `_end_step` to a multiple of `steps_per_bar`.
        if pad_end:
            length += -len(self) % steps_per_bar
        self.set_length(length)
示例#16
0
def extract_lead_sheet_fragments(quantized_sequence,
                                 search_start_step=0,
                                 min_bars=7,
                                 max_steps_truncate=None,
                                 max_steps_discard=None,
                                 gap_bars=1.0,
                                 min_unique_pitches=5,
                                 ignore_polyphonic_notes=True,
                                 pad_end=False,
                                 filter_drums=True,
                                 require_chords=False,
                                 all_transpositions=False):
  """Extracts a list of lead sheet fragments from a quantized NoteSequence.

  This function first extracts melodies using melodies_lib.extract_melodies,
  then extracts the chords underlying each melody using
  chords_lib.extract_chords_for_melodies.

  Args:
    quantized_sequence: A quantized NoteSequence object.
    search_start_step: Start searching for a melody at this time step. Assumed
        to be the first step of a bar.
    min_bars: Minimum length of melodies in number of bars. Shorter melodies are
        discarded.
    max_steps_truncate: Maximum number of steps in extracted melodies. If
        defined, longer melodies are truncated to this threshold. If pad_end is
        also True, melodies will be truncated to the end of the last bar below
        this threshold.
    max_steps_discard: Maximum number of steps in extracted melodies. If
        defined, longer melodies are discarded.
    gap_bars: A melody comes to an end when this number of bars (measures) of
        silence is encountered.
    min_unique_pitches: Minimum number of unique notes with octave equivalence.
        Melodies with too few unique notes are discarded.
    ignore_polyphonic_notes: If True, melodies will be extracted from
        `quantized_sequence` tracks that contain polyphony (notes start at the
        same time). If False, tracks with polyphony will be ignored.
    pad_end: If True, the end of the melody will be padded with NO_EVENTs so
        that it will end at a bar boundary.
    filter_drums: If True, notes for which `is_drum` is True will be ignored.
    require_chords: If True, only return lead sheets that have at least one
        chord other than NO_CHORD. If False, lead sheets with only melody will
        also be returned.
    all_transpositions: If True, also transpose each lead sheet fragment into
        all 12 keys.

  Returns:
    A python list of LeadSheet instances.

  Raises:
    NonIntegerStepsPerBarError: If `quantized_sequence`'s bar length
        (derived from its time signature) is not an integer number of time
        steps.
  """
  sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
  stats = dict([('empty_chord_progressions',
                 statistics.Counter('empty_chord_progressions'))])
  melodies, melody_stats = melodies_lib.extract_melodies(
      quantized_sequence, search_start_step=search_start_step,
      min_bars=min_bars, max_steps_truncate=max_steps_truncate,
      max_steps_discard=max_steps_discard, gap_bars=gap_bars,
      min_unique_pitches=min_unique_pitches,
      ignore_polyphonic_notes=ignore_polyphonic_notes, pad_end=pad_end,
      filter_drums=filter_drums)
  chord_progressions, chord_stats = chords_lib.extract_chords_for_melodies(
      quantized_sequence, melodies)
  lead_sheets = []
  for melody, chords in zip(melodies, chord_progressions):
    # If `chords` is None, it's because a chord progression could not be
    # extracted for this particular melody.
    if chords is not None:
      if require_chords and all(chord == chords_lib.NO_CHORD
                                for chord in chords):
        stats['empty_chord_progressions'].increment()
      else:
        lead_sheet = LeadSheet(melody, chords)
        if all_transpositions:
          for amount in range(-6, 6):
            transposed_lead_sheet = copy.deepcopy(lead_sheet)
            transposed_lead_sheet.transpose(amount)
            lead_sheets.append(transposed_lead_sheet)
        else:
          lead_sheets.append(lead_sheet)
  return lead_sheets, list(stats.values()) + melody_stats + chord_stats
示例#17
0
def infer_chords_for_sequence(quantized_sequence,
                              chords_per_bar=None,
                              key_change_prob=0.001,
                              chord_change_prob=0.5,
                              chord_pitch_out_of_key_prob=0.01,
                              chord_note_concentration=100.0):
  """Infer chords for a quantized NoteSequence using the Viterbi algorithm.

  This uses some heuristics to infer chords for a quantized NoteSequence. At
  each chord position a key and chord will be inferred, and the chords will be
  added (as text annotations) to the sequence.

  Args:
    quantized_sequence: The quantized NoteSequence for which to infer chords.
        This NoteSequence will be modified in place.
    chords_per_bar: The number of chords per bar to infer. If None, use a
        default number of chords based on the time signature of
        `quantized_sequence`.
    key_change_prob: Probability of a key change between two adjacent frames.
    chord_change_prob: Probability of a chord change between two adjacent
        frames.
    chord_pitch_out_of_key_prob: Probability of a pitch in a chord not belonging
        to the current key.
    chord_note_concentration: Concentration parameter for the distribution of
        observed pitches played over a chord. At zero, all pitches are equally
        likely. As concentration increases, observed pitches must match the
        chord pitches more closely.

  Raises:
    SequenceAlreadyHasChordsException: If `quantized_sequence` already has
        chords.
    UncommonTimeSignatureException: If `chords_per_bar` is not specified and
        `quantized_sequence` has an uncommon time signature.
    NonIntegerStepsPerChordException: If the number of quantized steps per chord
        is not an integer.
    EmptySequenceException: If `quantized_sequence` is empty.
    SequenceTooLongException: If the number of chords to be inferred is too
        large.
  """
  sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
  for ta in quantized_sequence.text_annotations:
    if ta.annotation_type == music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL:
      raise SequenceAlreadyHasChordsException(
          'NoteSequence already has chord(s): %s' % ta.text)

  if chords_per_bar is None:
    time_signature = (quantized_sequence.time_signatures[0].numerator,
                      quantized_sequence.time_signatures[0].denominator)
    if time_signature not in _DEFAULT_TIME_SIGNATURE_CHORDS_PER_BAR:
      raise UncommonTimeSignatureException(
          'No default chords per bar for time signature: (%d, %d)' %
          time_signature)
    chords_per_bar = _DEFAULT_TIME_SIGNATURE_CHORDS_PER_BAR[time_signature]

  # Determine the number of seconds (and steps) each chord is held.
  steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
      quantized_sequence)
  steps_per_chord_float = steps_per_bar_float / chords_per_bar
  if steps_per_chord_float != round(steps_per_chord_float):
    raise NonIntegerStepsPerChordException(
        'Non-integer number of steps per chord: %f' % steps_per_chord_float)
  steps_per_chord = int(steps_per_chord_float)
  steps_per_second = sequences_lib.steps_per_quarter_to_steps_per_second(
      quantized_sequence.quantization_info.steps_per_quarter,
      quantized_sequence.tempos[0].qpm)
  seconds_per_chord = steps_per_chord / steps_per_second

  num_chords = int(math.ceil(quantized_sequence.total_time / seconds_per_chord))
  if num_chords == 0:
    raise EmptySequenceException('NoteSequence is empty.')
  if num_chords > _MAX_NUM_CHORDS:
    raise SequenceTooLongException(
        'NoteSequence too long for chord inference: %d frames' % num_chords)

  # Compute pitch vectors for each chord frame, then compute log-likelihood of
  # observing those pitch vectors under each possible chord.
  note_pitch_vectors = sequence_note_pitch_vectors(
      quantized_sequence, seconds_per_frame=seconds_per_chord)
  chord_frame_loglik = _chord_frame_log_likelihood(
      note_pitch_vectors, chord_note_concentration)

  # Compute distribution over chords for each key, and transition distribution
  # between key-chord pairs.
  key_chord_distribution = _key_chord_distribution(
      chord_pitch_out_of_key_prob=chord_pitch_out_of_key_prob)
  key_chord_transition_distribution = _key_chord_transition_distribution(
      key_chord_distribution,
      key_change_prob=key_change_prob,
      chord_change_prob=chord_change_prob)
  key_chord_loglik = np.log(key_chord_distribution)
  key_chord_transition_loglik = np.log(key_chord_transition_distribution)

  key_chords = _key_chord_viterbi(
      chord_frame_loglik, key_chord_loglik, key_chord_transition_loglik)

  # Add the inferred chord changes to the sequence, logging any key changes.
  current_key_name = None
  current_chord_name = None
  for frame, (key, chord) in enumerate(key_chords):
    if _PITCH_CLASS_NAMES[key] != current_key_name:
      if current_key_name is not None:
        tf.logging.info('Sequence has key change from %s to %s at %f seconds.',
                        current_key_name, _PITCH_CLASS_NAMES[key],
                        frame * seconds_per_chord)
      current_key_name = _PITCH_CLASS_NAMES[key]

    if chord == constants.NO_CHORD:
      figure = constants.NO_CHORD
    else:
      root, kind = chord
      figure = '%s%s' % (_PITCH_CLASS_NAMES[root], kind)

    if figure != current_chord_name:
      ta = quantized_sequence.text_annotations.add()
      ta.time = frame * seconds_per_chord
      ta.quantized_step = frame * steps_per_chord
      ta.text = figure
      ta.annotation_type = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
      current_chord_name = figure
示例#18
0
  def from_quantized_sequence(self,
                              quantized_sequence,
                              search_start_step=0,
                              instrument=0,
                              gap_bars=1,
                              ignore_polyphonic_notes=False,
                              pad_end=False,
                              filter_drums=True):
    """Populate self with a melody from the given quantized NoteSequence.

    A monophonic melody is extracted from the given `instrument` starting at
    `search_start_step`. `instrument` and `search_start_step` can be used to
    drive extraction of multiple melodies from the same quantized sequence. The
    end step of the extracted melody will be stored in `self._end_step`.

    0 velocity notes are ignored. The melody extraction is ended when there are
    no held notes for a time stretch of `gap_bars` in bars (measures) of music.
    The number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    `ignore_polyphonic_notes` determines what happens when polyphonic (multiple
    notes start at the same time) data is encountered. If
    `ignore_polyphonic_notes` is true, the highest pitch is used in the melody
    when multiple notes start at the same time. If false, an exception is
    raised.

    Args:
      quantized_sequence: A NoteSequence quantized with
          sequences_lib.quantize_note_sequence.
      search_start_step: Start searching for a melody at this time step. Assumed
          to be the first step of a bar.
      instrument: Search for a melody in this instrument number.
      gap_bars: If this many bars or more follow a NOTE_OFF event, the melody
          is ended.
      ignore_polyphonic_notes: If True, the highest pitch is used in the melody
          when multiple notes start at the same time. If False,
          PolyphonicMelodyException will be raised if multiple notes start at
          the same time.
      pad_end: If True, the end of the melody will be padded with NO_EVENTs so
          that it will end at a bar boundary.
      filter_drums: If True, notes for which `is_drum` is True will be ignored.

    Raises:
      NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
      PolyphonicMelodyException: If any of the notes start on the same step
          and `ignore_polyphonic_notes` is False.
    """
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
    self._reset()

    steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)
    if steps_per_bar_float % 1 != 0:
      raise events_lib.NonIntegerStepsPerBarException(
          'There are %f timesteps per bar. Time signature: %d/%d' %
          (steps_per_bar_float, quantized_sequence.time_signatures[0].numerator,
           quantized_sequence.time_signatures[0].denominator))
    self._steps_per_bar = steps_per_bar = int(steps_per_bar_float)
    self._steps_per_quarter = (
        quantized_sequence.quantization_info.steps_per_quarter)

    # Sort track by note start times, and secondarily by pitch descending.
    notes = sorted([n for n in quantized_sequence.notes
                    if n.instrument == instrument and
                    n.quantized_start_step >= search_start_step],
                   key=lambda note: (note.quantized_start_step, -note.pitch))

    if not notes:
      return

    # The first step in the melody, beginning at the first step of a bar.
    melody_start_step = (
        notes[0].quantized_start_step -
        (notes[0].quantized_start_step - search_start_step) % steps_per_bar)
    for note in notes:
      if filter_drums and note.is_drum:
        continue

      # Ignore 0 velocity notes.
      if not note.velocity:
        continue

      start_index = note.quantized_start_step - melody_start_step
      end_index = note.quantized_end_step - melody_start_step

      if not self._events:
        # If there are no events, we don't need to check for polyphony.
        self._add_note(note.pitch, start_index, end_index)
        continue

      # If `start_index` comes before or lands on an already added note's start
      # step, we cannot add it. In that case either discard the melody or keep
      # the highest pitch.
      last_on, last_off = self._get_last_on_off_events()
      on_distance = start_index - last_on
      off_distance = start_index - last_off
      if on_distance == 0:
        if ignore_polyphonic_notes:
          # Keep highest note.
          # Notes are sorted by pitch descending, so if a note is already at
          # this position its the highest pitch.
          continue
        else:
          self._reset()
          raise PolyphonicMelodyException()
      elif on_distance < 0:
        raise PolyphonicMelodyException(
            'Unexpected note. Not in ascending order.')

      # If a gap of `gap` or more steps is found, end the melody.
      if len(self) and off_distance >= gap_bars * steps_per_bar:
        break

      # Add the note-on and off events to the melody.
      self._add_note(note.pitch, start_index, end_index)

    if not self._events:
      # If no notes were added, don't set `_start_step` and `_end_step`.
      return

    self._start_step = melody_start_step

    # Strip final MELODY_NOTE_OFF event.
    if self._events[-1] == MELODY_NOTE_OFF:
      del self._events[-1]

    length = len(self)
    # Optionally round up `_end_step` to a multiple of `steps_per_bar`.
    if pad_end:
      length += -len(self) % steps_per_bar
    self.set_length(length)
示例#19
0
  def from_quantized_sequence(self, quantized_sequence, start_step, end_step):
    """Populate self with the chords from the given quantized NoteSequence.

    A chord progression is extracted from the given sequence starting at time
    step `start_step` and ending at time step `end_step`.

    The number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    Args:
      quantized_sequence: A quantized NoteSequence instance.
      start_step: Start populating chords at this time step.
      end_step: Stop populating chords at this time step.

    Raises:
      NonIntegerStepsPerBarError: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
      CoincidentChordsError: If any of the chords start on the same step.
    """
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
    self._reset()

    steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)
    if steps_per_bar_float % 1 != 0:
      raise events_lib.NonIntegerStepsPerBarError(
          'There are %f timesteps per bar. Time signature: %d/%d' %
          (steps_per_bar_float, quantized_sequence.time_signature.numerator,
           quantized_sequence.time_signature.denominator))
    self._steps_per_bar = int(steps_per_bar_float)
    self._steps_per_quarter = (
        quantized_sequence.quantization_info.steps_per_quarter)

    # Sort track by chord times.
    chords = sorted([a for a in quantized_sequence.text_annotations
                     if a.annotation_type == CHORD_SYMBOL],
                    key=lambda chord: chord.quantized_step)

    prev_step = None
    prev_figure = NO_CHORD

    for chord in chords:
      if chord.quantized_step >= end_step:
        # No more chords within range.
        break

      elif chord.quantized_step < start_step:
        # Chord is before start of range.
        prev_step = chord.quantized_step
        prev_figure = chord.text
        continue

      if chord.quantized_step == prev_step:
        if chord.text == prev_figure:
          # Identical coincident chords, just skip.
          continue
        else:
          # Two different chords start at the same time step.
          self._reset()
          raise CoincidentChordsError(
              'chords %s and %s are coincident' % (prev_figure, chord.text))

      if chord.quantized_step > start_step:
        # Add the previous chord.
        if prev_step is None:
          start_index = 0
        else:
          start_index = max(prev_step, start_step) - start_step
        end_index = chord.quantized_step - start_step
        self._add_chord(prev_figure, start_index, end_index)

      prev_step = chord.quantized_step
      prev_figure = chord.text

    if prev_step is None or prev_step < end_step:
      # Add the last chord active before end_step.
      if prev_step is None:
        start_index = 0
      else:
        start_index = max(prev_step, start_step) - start_step
      end_index = end_step - start_step
      self._add_chord(prev_figure, start_index, end_index)

    self._start_step = start_step
    self._end_step = end_step
示例#20
0
  def from_quantized_sequence(self,
                              quantized_sequence,
                              search_start_step=0,
                              gap_bars=1,
                              pad_end=False,
                              ignore_is_drum=False):
    """Populate self with drums from the given quantized NoteSequence object.

    A drum track is extracted from the given quantized sequence starting at time
    step `start_step`. `start_step` can be used to drive extraction of multiple
    drum tracks from the same quantized sequence. The end step of the extracted
    drum track will be stored in `self._end_step`.

    0 velocity notes are ignored. The drum extraction is ended when there are
    no drums for a time stretch of `gap_bars` in bars (measures) of music. The
    number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    Each drum event is a Python frozenset of simultaneous (after quantization)
    drum "pitches", or an empty frozenset to indicate no drums are played.

    Args:
      quantized_sequence: A quantized NoteSequence instance.
      search_start_step: Start searching for drums at this time step. Assumed to
          be the beginning of a bar.
      gap_bars: If this many bars or more follow a non-empty drum event, the
          drum track is ended.
      pad_end: If True, the end of the drums will be padded with empty events so
          that it will end at a bar boundary.
      ignore_is_drum: Whether accept notes where `is_drum` is False.

    Raises:
      NonIntegerStepsPerBarError: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
    """
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
    self._reset()

    steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)
    if steps_per_bar_float % 1 != 0:
      raise events_lib.NonIntegerStepsPerBarError(
          'There are %f timesteps per bar. Time signature: %d/%d' %
          (steps_per_bar_float, quantized_sequence.time_signatures[0].numerator,
           quantized_sequence.time_signatures[0].denominator))
    self._steps_per_bar = steps_per_bar = int(steps_per_bar_float)
    self._steps_per_quarter = (
        quantized_sequence.quantization_info.steps_per_quarter)

    # Group all drum notes that start at the same step.
    all_notes = [note for note in quantized_sequence.notes
                 if ((note.is_drum or ignore_is_drum)  # drums only
                     and note.velocity  # no zero-velocity notes
                     # after start_step only
                     and note.quantized_start_step >= search_start_step)]
    grouped_notes = collections.defaultdict(list)
    for note in all_notes:
      grouped_notes[note.quantized_start_step].append(note)

    # Sort by note start times.
    notes = sorted(grouped_notes.items(), key=operator.itemgetter(0))

    if not notes:
      return

    gap_start_index = 0

    track_start_step = (
        notes[0][0] - (notes[0][0] - search_start_step) % steps_per_bar)
    for start, group in notes:

      start_index = start - track_start_step
      pitches = frozenset(note.pitch for note in group)

      # If a gap of `gap` or more steps is found, end the drum track.
      note_distance = start_index - gap_start_index
      if len(self) and note_distance >= gap_bars * steps_per_bar:  # pylint:disable=len-as-condition
        break

      # Add a drum event, a set of drum "pitches".
      self.set_length(start_index + 1)
      self._events[start_index] = pitches

      gap_start_index = start_index + 1

    if not self._events:
      # If no drum events were added, don't set `_start_step` and `_end_step`.
      return

    self._start_step = track_start_step

    length = len(self)
    # Optionally round up `_end_step` to a multiple of `steps_per_bar`.
    if pad_end:
      length += -len(self) % steps_per_bar
    self.set_length(length)
示例#21
0
def extract_melodies(quantized_sequence,
                     search_start_step=0,
                     min_bars=7,
                     max_steps_truncate=None,
                     max_steps_discard=None,
                     gap_bars=1.0,
                     min_unique_pitches=5,
                     ignore_polyphonic_notes=True,
                     pad_end=False,
                     filter_drums=True):
  """Extracts a list of melodies from the given quantized NoteSequence.

  This function will search through `quantized_sequence` for monophonic
  melodies in every track at every time step.

  Once a note-on event in a track is encountered, a melody begins.
  Gaps of silence in each track will be splitting points that divide the
  track into separate melodies. The minimum size of these gaps are given
  in `gap_bars`. The size of a bar (measure) of music in time steps is
  computed from the time signature stored in `quantized_sequence`.

  The melody is then checked for validity. The melody is only used if it is
  at least `min_bars` bars long, and has at least `min_unique_pitches` unique
  notes (preventing melodies that only repeat a few notes, such as those found
  in some accompaniment tracks, from being used).

  After scanning each instrument track in the quantized sequence, a list of all
  extracted Melody objects is returned.

  Args:
    quantized_sequence: A quantized NoteSequence.
    search_start_step: Start searching for a melody at this time step. Assumed
        to be the first step of a bar.
    min_bars: Minimum length of melodies in number of bars. Shorter melodies are
        discarded.
    max_steps_truncate: Maximum number of steps in extracted melodies. If
        defined, longer melodies are truncated to this threshold. If pad_end is
        also True, melodies will be truncated to the end of the last bar below
        this threshold.
    max_steps_discard: Maximum number of steps in extracted melodies. If
        defined, longer melodies are discarded.
    gap_bars: A melody comes to an end when this number of bars (measures) of
        silence is encountered.
    min_unique_pitches: Minimum number of unique notes with octave equivalence.
        Melodies with too few unique notes are discarded.
    ignore_polyphonic_notes: If True, melodies will be extracted from
        `quantized_sequence` tracks that contain polyphony (notes start at
        the same time). If False, tracks with polyphony will be ignored.
    pad_end: If True, the end of the melody will be padded with NO_EVENTs so
        that it will end at a bar boundary.
    filter_drums: If True, notes for which `is_drum` is True will be ignored.

  Returns:
    melodies: A python list of Melody instances.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.

  Raises:
    NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
        (derived from its time signature) is not an integer number of time
        steps.
  """
  sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

  # TODO(danabo): Convert `ignore_polyphonic_notes` into a float which controls
  # the degree of polyphony that is acceptable.
  melodies = []
  stats = dict([(stat_name, statistics.Counter(stat_name)) for stat_name in
                ['polyphonic_tracks_discarded',
                 'melodies_discarded_too_short',
                 'melodies_discarded_too_few_pitches',
                 'melodies_discarded_too_long',
                 'melodies_truncated']])
  # Create a histogram measuring melody lengths (in bars not steps).
  # Capture melodies that are very small, in the range of the filter lower
  # bound `min_bars`, and large. The bucket intervals grow approximately
  # exponentially.
  stats['melody_lengths_in_bars'] = statistics.Histogram(
      'melody_lengths_in_bars',
      [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, min_bars // 2, min_bars,
       min_bars + 1, min_bars - 1])
  instruments = set([n.instrument for n in quantized_sequence.notes])
  steps_per_bar = int(
      sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence))
  for instrument in instruments:
    instrument_search_start_step = search_start_step
    # Quantize the track into a Melody object.
    # If any notes start at the same time, only one is kept.
    while 1:
      melody = Melody()
      try:
        melody.from_quantized_sequence(
            quantized_sequence,
            instrument=instrument,
            search_start_step=instrument_search_start_step,
            gap_bars=gap_bars,
            ignore_polyphonic_notes=ignore_polyphonic_notes,
            pad_end=pad_end,
            filter_drums=filter_drums)
      except PolyphonicMelodyException:
        stats['polyphonic_tracks_discarded'].increment()
        break  # Look for monophonic melodies in other tracks.
      except events_lib.NonIntegerStepsPerBarException:
        raise
      # Start search for next melody on next bar boundary (inclusive).
      instrument_search_start_step = (
          melody.end_step +
          (search_start_step - melody.end_step) % steps_per_bar)
      if not melody:
        break

      # Require a certain melody length.
      if len(melody) - 1 < melody.steps_per_bar * min_bars:
        stats['melodies_discarded_too_short'].increment()
        continue

      # Discard melodies that are too long.
      if max_steps_discard is not None and len(melody) > max_steps_discard:
        stats['melodies_discarded_too_long'].increment()
        continue

      # Truncate melodies that are too long.
      if max_steps_truncate is not None and len(melody) > max_steps_truncate:
        truncated_length = max_steps_truncate
        if pad_end:
          truncated_length -= max_steps_truncate % melody.steps_per_bar
        melody.set_length(truncated_length)
        stats['melodies_truncated'].increment()

      # Require a certain number of unique pitches.
      note_histogram = melody.get_note_histogram()
      unique_pitches = np.count_nonzero(note_histogram)
      if unique_pitches < min_unique_pitches:
        stats['melodies_discarded_too_few_pitches'].increment()
        continue

      # TODO(danabo)
      # Add filter for rhythmic diversity.

      stats['melody_lengths_in_bars'].increment(
          len(melody) // melody.steps_per_bar)

      melodies.append(melody)

  return melodies, stats.values()
示例#22
0
def infer_chords_for_sequence(quantized_sequence,
                              chords_per_bar,
                              key_change_prob=0.001,
                              chord_change_prob=0.5,
                              chord_pitch_out_of_key_prob=0.01,
                              chord_note_concentration=100.0):
    """Infer chords for a quantized NoteSequence using the Viterbi algorithm.

  This uses some heuristics to infer chords for a quantized NoteSequence. At
  each chord position a key and chord will be inferred, and the chords will be
  added (as text annotations) to the sequence.

  Args:
    quantized_sequence: The quantized NoteSequence for which to infer chords.
        This NoteSequence will be modified in place.
    chords_per_bar: The number of chords per bar to infer.
    key_change_prob: Probability of a key change between two adjacent frames.
    chord_change_prob: Probability of a chord change between two adjacent
        frames.
    chord_pitch_out_of_key_prob: Probability of a pitch in a chord not belonging
        to the current key.
    chord_note_concentration: Concentration parameter for the distribution of
        observed pitches played over a chord. At zero, all pitches are equally
        likely. As concentration increases, observed pitches must match the
        chord pitches more closely.

  Raises:
    SequenceAlreadyHasChordsException: If `quantized_sequence` already has
        chords.
    NonIntegerStepsPerChordException: If the number of quantized steps per chord
        is not an integer.
    EmptySequenceException: If `quantized_sequence` is empty.
    SequenceTooLongException: If the number of chords to be inferred is too
        large.
  """
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
    for ta in quantized_sequence.text_annotations:
        if ta.annotation_type == music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL:
            raise SequenceAlreadyHasChordsException(
                'NoteSequence already has chord(s): %s' % ta.text)

    # Determine the number of seconds (and steps) each chord is held.
    steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)
    steps_per_chord_float = steps_per_bar_float / chords_per_bar
    if steps_per_chord_float != round(steps_per_chord_float):
        raise NonIntegerStepsPerChordException(
            'Non-integer number of steps per chord: %f' %
            steps_per_chord_float)
    steps_per_chord = int(steps_per_chord_float)
    steps_per_second = sequences_lib.steps_per_quarter_to_steps_per_second(
        quantized_sequence.quantization_info.steps_per_quarter,
        quantized_sequence.tempos[0].qpm)
    seconds_per_chord = steps_per_chord / steps_per_second

    num_chords = int(
        math.ceil(quantized_sequence.total_time / seconds_per_chord))
    if num_chords == 0:
        raise EmptySequenceException('NoteSequence is empty.')
    if num_chords > _MAX_NUM_CHORDS:
        raise SequenceTooLongException(
            'NoteSequence too long for chord inference: %d frames' %
            num_chords)

    # Compute pitch vectors for each chord frame, then compute log-likelihood of
    # observing those pitch vectors under each possible chord.
    note_pitch_vectors = sequence_note_pitch_vectors(
        quantized_sequence, seconds_per_frame=seconds_per_chord)
    chord_frame_loglik = _chord_frame_log_likelihood(note_pitch_vectors,
                                                     chord_note_concentration)

    # Compute distribution over chords for each key, and transition distribution
    # between key-chord pairs.
    key_chord_distribution = _key_chord_distribution(
        chord_pitch_out_of_key_prob=chord_pitch_out_of_key_prob)
    key_chord_transition_distribution = _key_chord_transition_distribution(
        key_chord_distribution,
        key_change_prob=key_change_prob,
        chord_change_prob=chord_change_prob)
    key_chord_loglik = np.log(key_chord_distribution)
    key_chord_transition_loglik = np.log(key_chord_transition_distribution)

    key_chords = _key_chord_viterbi(chord_frame_loglik, key_chord_loglik,
                                    key_chord_transition_loglik)

    # Add the inferred chord changes to the sequence, logging any key changes.
    current_key_name = None
    current_chord_name = None
    for frame, (key, chord) in enumerate(key_chords):
        if _PITCH_CLASS_NAMES[key] != current_key_name:
            if current_key_name is not None:
                tf.logging.info(
                    'Sequence has key change from %s to %s at %f seconds.',
                    current_key_name, _PITCH_CLASS_NAMES[key],
                    frame * seconds_per_chord)
            current_key_name = _PITCH_CLASS_NAMES[key]

        if chord == constants.NO_CHORD:
            figure = constants.NO_CHORD
        else:
            root, kind = chord
            figure = '%s%s' % (_PITCH_CLASS_NAMES[root], kind)

        if figure != current_chord_name:
            ta = quantized_sequence.text_annotations.add()
            ta.time = frame * seconds_per_chord
            ta.quantized_step = frame * steps_per_chord
            ta.text = figure
            ta.annotation_type = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
            current_chord_name = figure
示例#23
0
  def from_quantized_sequence(self, quantized_sequence, start_step, end_step):
    """Populate self with the chords from the given quantized NoteSequence.

    A chord progression is extracted from the given sequence starting at time
    step `start_step` and ending at time step `end_step`.

    The number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    Args:
      quantized_sequence: A quantized NoteSequence instance.
      start_step: Start populating chords at this time step.
      end_step: Stop populating chords at this time step.

    Raises:
      NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
      CoincidentChordsException: If any of the chords start on the same step.
    """
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
    self._reset()

    steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)
    if steps_per_bar_float % 1 != 0:
      raise events_lib.NonIntegerStepsPerBarException(
          'There are %f timesteps per bar. Time signature: %d/%d' %
          (steps_per_bar_float, quantized_sequence.time_signature.numerator,
           quantized_sequence.time_signature.denominator))
    self._steps_per_bar = int(steps_per_bar_float)
    self._steps_per_quarter = (
        quantized_sequence.quantization_info.steps_per_quarter)

    # Sort track by chord times.
    chords = sorted([a for a in quantized_sequence.text_annotations
                     if a.annotation_type == CHORD_SYMBOL],
                    key=lambda chord: chord.quantized_step)

    prev_step = None
    prev_figure = NO_CHORD

    for chord in chords:
      if chord.quantized_step >= end_step:
        # No more chords within range.
        break

      elif chord.quantized_step < start_step:
        # Chord is before start of range.
        prev_step = chord.quantized_step
        prev_figure = chord.text
        continue

      if chord.quantized_step == prev_step:
        if chord.text == prev_figure:
          # Identical coincident chords, just skip.
          continue
        else:
          # Two different chords start at the same time step.
          self._reset()
          raise CoincidentChordsException('chords %s and %s are coincident' %
                                          (prev_figure, chord.text))

      if chord.quantized_step > start_step:
        # Add the previous chord.
        if prev_step is None:
          start_index = 0
        else:
          start_index = max(prev_step, start_step) - start_step
        end_index = chord.quantized_step - start_step
        self._add_chord(prev_figure, start_index, end_index)

      prev_step = chord.quantized_step
      prev_figure = chord.text

    if prev_step is None or prev_step < end_step:
      # Add the last chord active before end_step.
      if prev_step is None:
        start_index = 0
      else:
        start_index = max(prev_step, start_step) - start_step
      end_index = end_step - start_step
      self._add_chord(prev_figure, start_index, end_index)

    self._start_step = start_step
    self._end_step = end_step
示例#24
0
def extract_pianoroll_sequences(
    quantized_sequence, start_step=0, min_steps_discard=None,
    max_steps_discard=None, max_steps_truncate=None):
  """Extracts a polyphonic track from the given quantized NoteSequence.

  Currently, this extracts only one pianoroll from a given track.

  Args:
    quantized_sequence: A quantized NoteSequence.
    start_step: Start extracting a sequence at this time step. Assumed
        to be the beginning of a bar.
    min_steps_discard: Minimum length of tracks in steps. Shorter tracks are
        discarded.
    max_steps_discard: Maximum length of tracks in steps. Longer tracks are
        discarded. Mutually exclusive with `max_steps_truncate`.
    max_steps_truncate: Maximum length of tracks in steps. Longer tracks are
        truncated. Mutually exclusive with `max_steps_discard`.

  Returns:
    pianoroll_seqs: A python list of PianorollSequence instances.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.

  Raises:
    ValueError: If both `max_steps_discard` and `max_steps_truncate` are
        specified.
  """

  if (max_steps_discard, max_steps_truncate).count(None) == 0:
    raise ValueError(
        'Only one of `max_steps_discard` and `max_steps_truncate` can be '
        'specified.')
  sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

  stats = dict([(stat_name, statistics.Counter(stat_name)) for stat_name in
                ['pianoroll_tracks_truncated_too_long',
                 'pianoroll_tracks_discarded_too_short',
                 'pianoroll_tracks_discarded_too_long',
                 'pianoroll_tracks_discarded_more_than_1_program']])

  steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(
      quantized_sequence)

  # Create a histogram measuring lengths (in bars not steps).
  stats['pianoroll_track_lengths_in_bars'] = statistics.Histogram(
      'pianoroll_track_lengths_in_bars',
      [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])

  # Allow only 1 program.
  programs = set()
  for note in quantized_sequence.notes:
    programs.add(note.program)
  if len(programs) > 1:
    stats['pianoroll_tracks_discarded_more_than_1_program'].increment()
    return [], stats.values()

  # Translate the quantized sequence into a PianorollSequence.
  pianoroll_seq = PianorollSequence(quantized_sequence=quantized_sequence,
                                    start_step=start_step)

  pianoroll_seqs = []
  num_steps = pianoroll_seq.num_steps

  if min_steps_discard is not None and num_steps < min_steps_discard:
    stats['pianoroll_tracks_discarded_too_short'].increment()
  elif max_steps_discard is not None and num_steps > max_steps_discard:
    stats['pianoroll_tracks_discarded_too_long'].increment()
  else:
    if max_steps_truncate is not None and num_steps > max_steps_truncate:
      stats['pianoroll_tracks_truncated_too_long'].increment()
      pianoroll_seq.set_length(max_steps_truncate)
    pianoroll_seqs.append(pianoroll_seq)
    stats['pianoroll_track_lengths_in_bars'].increment(
        num_steps // steps_per_bar)
  return pianoroll_seqs, stats.values()
示例#25
0
    def from_quantized_sequence(self,
                                quantized_sequence,
                                search_start_step=0,
                                instrument=0,
                                gap_bars=1,
                                ignore_polyphonic_notes=False,
                                pad_end=False,
                                filter_drums=True):
        """Populate self with a melody from the given quantized NoteSequence.

    A monophonic melody is extracted from the given `instrument` starting at
    `search_start_step`. `instrument` and `search_start_step` can be used to
    drive extraction of multiple melodies from the same quantized sequence. The
    end step of the extracted melody will be stored in `self._end_step`.

    0 velocity notes are ignored. The melody extraction is ended when there are
    no held notes for a time stretch of `gap_bars` in bars (measures) of music.
    The number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    `ignore_polyphonic_notes` determines what happens when polyphonic (multiple
    notes start at the same time) data is encountered. If
    `ignore_polyphonic_notes` is true, the highest pitch is used in the melody
    when multiple notes start at the same time. If false, an exception is
    raised.

    Args:
      quantized_sequence: A NoteSequence quantized with
          sequences_lib.quantize_note_sequence.
      search_start_step: Start searching for a melody at this time step. Assumed
          to be the first step of a bar.
      instrument: Search for a melody in this instrument number.
      gap_bars: If this many bars or more follow a NOTE_OFF event, the melody
          is ended.
      ignore_polyphonic_notes: If True, the highest pitch is used in the melody
          when multiple notes start at the same time. If False,
          PolyphonicMelodyException will be raised if multiple notes start at
          the same time.
      pad_end: If True, the end of the melody will be padded with NO_EVENTs so
          that it will end at a bar boundary.
      filter_drums: If True, notes for which `is_drum` is True will be ignored.

    Raises:
      NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
      PolyphonicMelodyException: If any of the notes start on the same step
          and `ignore_polyphonic_notes` is False.
    """
        sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
        self._reset()

        steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
            quantized_sequence)
        if steps_per_bar_float % 1 != 0:
            raise events_lib.NonIntegerStepsPerBarException(
                'There are %f timesteps per bar. Time signature: %d/%d' %
                (steps_per_bar_float,
                 quantized_sequence.time_signatures[0].numerator,
                 quantized_sequence.time_signatures[0].denominator))
        self._steps_per_bar = steps_per_bar = int(steps_per_bar_float)
        self._steps_per_quarter = (
            quantized_sequence.quantization_info.steps_per_quarter)

        # Sort track by note start times, and secondarily by pitch descending.
        notes = sorted([
            n for n in quantized_sequence.notes if n.instrument == instrument
            and n.quantized_start_step >= search_start_step
        ],
                       key=lambda note:
                       (note.quantized_start_step, -note.pitch))

        if not notes:
            return

        # The first step in the melody, beginning at the first step of a bar.
        melody_start_step = (
            notes[0].quantized_start_step -
            (notes[0].quantized_start_step - search_start_step) %
            steps_per_bar)
        for note in notes:
            if filter_drums and note.is_drum:
                continue

            # Ignore 0 velocity notes.
            if not note.velocity:
                continue

            start_index = note.quantized_start_step - melody_start_step
            end_index = note.quantized_end_step - melody_start_step

            if not self._events:
                # If there are no events, we don't need to check for polyphony.
                self._add_note(note.pitch, start_index, end_index)
                continue

            # If `start_index` comes before or lands on an already added note's start
            # step, we cannot add it. In that case either discard the melody or keep
            # the highest pitch.
            last_on, last_off = self._get_last_on_off_events()
            on_distance = start_index - last_on
            off_distance = start_index - last_off
            if on_distance == 0:
                if ignore_polyphonic_notes:
                    # Keep highest note.
                    # Notes are sorted by pitch descending, so if a note is already at
                    # this position its the highest pitch.
                    continue
                else:
                    self._reset()
                    raise PolyphonicMelodyException()
            elif on_distance < 0:
                raise PolyphonicMelodyException(
                    'Unexpected note. Not in ascending order.')

            # If a gap of `gap` or more steps is found, end the melody.
            if len(self) and off_distance >= gap_bars * steps_per_bar:
                break

            # Add the note-on and off events to the melody.
            self._add_note(note.pitch, start_index, end_index)

        if not self._events:
            # If no notes were added, don't set `_start_step` and `_end_step`.
            return

        self._start_step = melody_start_step

        # Strip final MELODY_NOTE_OFF event.
        if self._events[-1] == MELODY_NOTE_OFF:
            del self._events[-1]

        length = len(self)
        # Optionally round up `_end_step` to a multiple of `steps_per_bar`.
        if pad_end:
            length += -len(self) % steps_per_bar
        self.set_length(length)
示例#26
0
def extract_melodies(quantized_sequence,
                     search_start_step=0,
                     min_bars=7,
                     max_steps_truncate=None,
                     max_steps_discard=None,
                     gap_bars=1.0,
                     min_unique_pitches=5,
                     ignore_polyphonic_notes=True,
                     pad_end=False,
                     filter_drums=True):
    """Extracts a list of melodies from the given quantized NoteSequence.

  This function will search through `quantized_sequence` for monophonic
  melodies in every track at every time step.

  Once a note-on event in a track is encountered, a melody begins.
  Gaps of silence in each track will be splitting points that divide the
  track into separate melodies. The minimum size of these gaps are given
  in `gap_bars`. The size of a bar (measure) of music in time steps is
  computed from the time signature stored in `quantized_sequence`.

  The melody is then checked for validity. The melody is only used if it is
  at least `min_bars` bars long, and has at least `min_unique_pitches` unique
  notes (preventing melodies that only repeat a few notes, such as those found
  in some accompaniment tracks, from being used).

  After scanning each instrument track in the quantized sequence, a list of all
  extracted Melody objects is returned.

  Args:
    quantized_sequence: A quantized NoteSequence.
    search_start_step: Start searching for a melody at this time step. Assumed
        to be the first step of a bar.
    min_bars: Minimum length of melodies in number of bars. Shorter melodies are
        discarded.
    max_steps_truncate: Maximum number of steps in extracted melodies. If
        defined, longer melodies are truncated to this threshold. If pad_end is
        also True, melodies will be truncated to the end of the last bar below
        this threshold.
    max_steps_discard: Maximum number of steps in extracted melodies. If
        defined, longer melodies are discarded.
    gap_bars: A melody comes to an end when this number of bars (measures) of
        silence is encountered.
    min_unique_pitches: Minimum number of unique notes with octave equivalence.
        Melodies with too few unique notes are discarded.
    ignore_polyphonic_notes: If True, melodies will be extracted from
        `quantized_sequence` tracks that contain polyphony (notes start at
        the same time). If False, tracks with polyphony will be ignored.
    pad_end: If True, the end of the melody will be padded with NO_EVENTs so
        that it will end at a bar boundary.
    filter_drums: If True, notes for which `is_drum` is True will be ignored.

  Returns:
    melodies: A python list of Melody instances.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.

  Raises:
    NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
        (derived from its time signature) is not an integer number of time
        steps.
  """
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

    # TODO(danabo): Convert `ignore_polyphonic_notes` into a float which controls
    # the degree of polyphony that is acceptable.
    melodies = []
    stats = dict([(stat_name, statistics.Counter(stat_name)) for stat_name in [
        'polyphonic_tracks_discarded', 'melodies_discarded_too_short',
        'melodies_discarded_too_few_pitches', 'melodies_discarded_too_long',
        'melodies_truncated'
    ]])
    # Create a histogram measuring melody lengths (in bars not steps).
    # Capture melodies that are very small, in the range of the filter lower
    # bound `min_bars`, and large. The bucket intervals grow approximately
    # exponentially.
    stats['melody_lengths_in_bars'] = statistics.Histogram(
        'melody_lengths_in_bars', [
            0, 1, 10, 20, 30, 40, 50, 100, 200, 500, min_bars // 2, min_bars,
            min_bars + 1, min_bars - 1
        ])
    instruments = set([n.instrument for n in quantized_sequence.notes])
    steps_per_bar = int(
        sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence))
    for instrument in instruments:
        instrument_search_start_step = search_start_step
        # Quantize the track into a Melody object.
        # If any notes start at the same time, only one is kept.
        while 1:
            melody = Melody()
            try:
                melody.from_quantized_sequence(
                    quantized_sequence,
                    instrument=instrument,
                    search_start_step=instrument_search_start_step,
                    gap_bars=gap_bars,
                    ignore_polyphonic_notes=ignore_polyphonic_notes,
                    pad_end=pad_end,
                    filter_drums=filter_drums)
            except PolyphonicMelodyException:
                stats['polyphonic_tracks_discarded'].increment()
                break  # Look for monophonic melodies in other tracks.
            except events_lib.NonIntegerStepsPerBarException:
                raise
            # Start search for next melody on next bar boundary (inclusive).
            instrument_search_start_step = (
                melody.end_step +
                (search_start_step - melody.end_step) % steps_per_bar)
            if not melody:
                break

            # Require a certain melody length.
            if len(melody) < melody.steps_per_bar * min_bars:
                stats['melodies_discarded_too_short'].increment()
                continue

            # Discard melodies that are too long.
            if max_steps_discard is not None and len(
                    melody) > max_steps_discard:
                stats['melodies_discarded_too_long'].increment()
                continue

            # Truncate melodies that are too long.
            if max_steps_truncate is not None and len(
                    melody) > max_steps_truncate:
                truncated_length = max_steps_truncate
                if pad_end:
                    truncated_length -= max_steps_truncate % melody.steps_per_bar
                melody.set_length(truncated_length)
                stats['melodies_truncated'].increment()

            # Require a certain number of unique pitches.
            note_histogram = melody.get_note_histogram()
            unique_pitches = np.count_nonzero(note_histogram)
            if unique_pitches < min_unique_pitches:
                stats['melodies_discarded_too_few_pitches'].increment()
                continue

            # TODO(danabo)
            # Add filter for rhythmic diversity.

            stats['melody_lengths_in_bars'].increment(
                len(melody) // melody.steps_per_bar)

            melodies.append(melody)

    return melodies, list(stats.values())
示例#27
0
def extract_lead_sheet_fragments(quantized_sequence,
                                 search_start_step=0,
                                 min_bars=7,
                                 max_steps_truncate=None,
                                 max_steps_discard=None,
                                 gap_bars=1.0,
                                 min_unique_pitches=5,
                                 ignore_polyphonic_notes=True,
                                 pad_end=False,
                                 filter_drums=True,
                                 require_chords=False,
                                 all_transpositions=False):
    """Extracts a list of lead sheet fragments from a quantized NoteSequence.

    This function first extracts melodies using melodies_lib.extract_melodies,
    then extracts the chords underlying each melody using
    chords_lib.extract_chords_for_melodies.

    Args:
      quantized_sequence: A quantized NoteSequence object.
      search_start_step: Start searching for a melody at this time step. Assumed
          to be the first step of a bar.
      min_bars: Minimum length of melodies in number of bars. Shorter melodies are
          discarded.
      max_steps_truncate: Maximum number of steps in extracted melodies. If
          defined, longer melodies are truncated to this threshold. If pad_end is
          also True, melodies will be truncated to the end of the last bar below
          this threshold.
      max_steps_discard: Maximum number of steps in extracted melodies. If
          defined, longer melodies are discarded.
      gap_bars: A melody comes to an end when this number of bars (measures) of
          silence is encountered.
      min_unique_pitches: Minimum number of unique notes with octave equivalence.
          Melodies with too few unique notes are discarded.
      ignore_polyphonic_notes: If True, melodies will be extracted from
          `quantized_sequence` tracks that contain polyphony (notes start at the
          same time). If False, tracks with polyphony will be ignored.
      pad_end: If True, the end of the melody will be padded with NO_EVENTs so
          that it will end at a bar boundary.
      filter_drums: If True, notes for which `is_drum` is True will be ignored.
      require_chords: If True, only return lead sheets that have at least one
          chord other than NO_CHORD. If False, lead sheets with only melody will
          also be returned.
      all_transpositions: If True, also transpose each lead sheet fragment into
          all 12 keys.

    Returns:
      A python list of LeadSheet instances.

    Raises:
      NonIntegerStepsPerBarError: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
    """
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
    stats = dict([('empty_chord_progressions',
                   statistics.Counter('empty_chord_progressions'))])
    melodies, melody_stats = melody_pipelines.extract_melodies(
        quantized_sequence,
        search_start_step=search_start_step,
        min_bars=min_bars,
        max_steps_truncate=max_steps_truncate,
        max_steps_discard=max_steps_discard,
        gap_bars=gap_bars,
        min_unique_pitches=min_unique_pitches,
        ignore_polyphonic_notes=ignore_polyphonic_notes,
        pad_end=pad_end,
        filter_drums=filter_drums)
    chord_progressions, chord_stats = chord_pipelines.extract_chords_for_melodies(
        quantized_sequence, melodies)
    lead_sheets = []
    for melody, chords in zip(melodies, chord_progressions):
        # If `chords` is None, it's because a chord progression could not be
        # extracted for this particular melody.
        if chords is not None:
            if require_chords and all(chord == chords_lib.NO_CHORD
                                      for chord in chords):
                stats['empty_chord_progressions'].increment()
            else:
                lead_sheet = LeadSheet(melody, chords)
                if all_transpositions:
                    for amount in range(-6, 6):
                        transposed_lead_sheet = copy.deepcopy(lead_sheet)
                        transposed_lead_sheet.transpose(amount)
                        lead_sheets.append(transposed_lead_sheet)
                else:
                    lead_sheets.append(lead_sheet)
    return lead_sheets, list(stats.values()) + melody_stats + chord_stats