Exemple #1
0
def extract_polyphonic_sequences(
    quantized_sequence, start_step=0, min_steps_discard=None,
    max_steps_discard=None):
  """Extracts a polyphonic track from the given quantized NoteSequence.

  Currently, this extracts only one polyphonic sequence from a given track.

  Args:
    quantized_sequence: A quantized NoteSequence.
    start_step: Start extracting a sequence at this time step. Assumed
        to be the beginning of a bar.
    min_steps_discard: Minimum length of tracks in steps. Shorter tracks are
        discarded.
    max_steps_discard: Maximum length of tracks in steps. Longer tracks are
        discarded.

  Returns:
    poly_seqs: A python list of PolyphonicSequence instances.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.
  """
  sequences_lib.assert_is_quantized_sequence(quantized_sequence)

  stats = dict([(stat_name, statistics.Counter(stat_name)) for stat_name in
                ['polyphonic_tracks_discarded_too_short',
                 'polyphonic_tracks_discarded_too_long',
                 'polyphonic_tracks_discarded_more_than_1_program']])

  steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(
      quantized_sequence)

  # Create a histogram measuring lengths (in bars not steps).
  stats['polyphonic_track_lengths_in_bars'] = statistics.Histogram(
      'polyphonic_track_lengths_in_bars',
      [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])

  # Allow only 1 program.
  programs = set()
  for note in quantized_sequence.notes:
    programs.add(note.program)
  if len(programs) > 1:
    stats['polyphonic_tracks_discarded_more_than_1_program'].increment()
    return [], stats.values()

  # Translate the quantized sequence into a PolyphonicSequence.
  poly_seq = PolyphonicSequence(quantized_sequence,
                                start_step=start_step)

  poly_seqs = []
  num_steps = poly_seq.num_steps

  if min_steps_discard is not None and num_steps < min_steps_discard:
    stats['polyphonic_tracks_discarded_too_short'].increment()
  elif max_steps_discard is not None and num_steps > max_steps_discard:
    stats['polyphonic_tracks_discarded_too_long'].increment()
  else:
    poly_seqs.append(poly_seq)
    stats['polyphonic_track_lengths_in_bars'].increment(
        num_steps // steps_per_bar)

  return poly_seqs, stats.values()
  def __init__(self, quantized_sequence=None, steps_per_quarter=None,
               start_step=0):
    """Construct a PolyphonicSequence.

    Either quantized_sequence or steps_per_quarter should be supplied.

    Args:
      quantized_sequence: a quantized NoteSequence proto.
      steps_per_quarter: how many steps a quarter note represents.
      start_step: The offset of this sequence relative to the
          beginning of the source sequence. If a quantized sequence is used as
          input, only notes starting after this step will be considered.
    """
    assert (quantized_sequence, steps_per_quarter).count(None) == 1

    if quantized_sequence:
      sequences_lib.assert_is_quantized_sequence(quantized_sequence)
      self._events = self._from_quantized_sequence(quantized_sequence,
                                                   start_step)
      self._steps_per_quarter = (
          quantized_sequence.quantization_info.steps_per_quarter)
    else:
      self._events = [
          PolyphonicEvent(event_type=PolyphonicEvent.START, pitch=None)]
      self._steps_per_quarter = steps_per_quarter

    self._start_step = start_step
Exemple #3
0
    def __init__(self,
                 quantized_sequence=None,
                 steps_per_quarter=None,
                 start_step=0):
        """Construct a PolyphonicSequence.

    Either quantized_sequence or steps_per_quarter should be supplied.

    Args:
      quantized_sequence: a quantized NoteSequence proto.
      steps_per_quarter: how many steps a quarter note represents.
      start_step: The offset of this sequence relative to the
          beginning of the source sequence. If a quantized sequence is used as
          input, only notes starting after this step will be considered.
    """
        assert (quantized_sequence, steps_per_quarter).count(None) == 1

        if quantized_sequence:
            sequences_lib.assert_is_quantized_sequence(quantized_sequence)
            self._events = self._from_quantized_sequence(
                quantized_sequence, start_step)
            self._steps_per_quarter = (
                quantized_sequence.quantization_info.steps_per_quarter)
        else:
            self._events = [
                PolyphonicEvent(event_type=PolyphonicEvent.START, pitch=None)
            ]
            self._steps_per_quarter = steps_per_quarter

        self._start_step = start_step
def extract_lead_sheet_fragments(quantized_sequence,
                                 min_bars=7,
                                 gap_bars=1.0,
                                 min_unique_pitches=5,
                                 ignore_polyphonic_notes=True,
                                 require_chords=False):
    """Extracts a list of lead sheet fragments from a quantized NoteSequence.

  This function first extracts melodies using melodies_lib.extract_melodies,
  then extracts the chords underlying each melody using
  chords_lib.extract_chords_for_melodies.

  Args:
    quantized_sequence: A quantized NoteSequence object.
    min_bars: Minimum length of melodies in number of bars. Shorter melodies are
        discarded.
    gap_bars: A melody comes to an end when this number of bars (measures) of
        silence is encountered.
    min_unique_pitches: Minimum number of unique notes with octave equivalence.
        Melodies with too few unique notes are discarded.
    ignore_polyphonic_notes: If True, melodies will be extracted from
        `quantized_sequence` tracks that contain polyphony (notes start at the
        same time). If False, tracks with polyphony will be ignored.
    require_chords: If True, only return lead sheets that have at least one
        chord other than NO_CHORD. If False, lead sheets with only melody will
        also be returned.

  Returns:
    A python list of LeadSheet instances.

  Raises:
    NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
        (derived from its time signature) is not an integer number of time
        steps.
  """
    sequences_lib.assert_is_quantized_sequence(quantized_sequence)
    stats = dict([('empty_chord_progressions',
                   statistics.Counter('empty_chord_progressions'))])
    melodies, melody_stats = melodies_lib.extract_melodies(
        quantized_sequence,
        min_bars=min_bars,
        gap_bars=gap_bars,
        min_unique_pitches=min_unique_pitches,
        ignore_polyphonic_notes=ignore_polyphonic_notes)
    chord_progressions, chord_stats = chords_lib.extract_chords_for_melodies(
        quantized_sequence, melodies)
    lead_sheets = []
    for melody, chords in zip(melodies, chord_progressions):
        if chords is not None:
            if require_chords and all(chord == chords_lib.NO_CHORD
                                      for chord in chords):
                stats['empty_chord_progressions'].increment()
            else:
                lead_sheet = LeadSheet(melody, chords)
                lead_sheets.append(lead_sheet)
    return lead_sheets, stats.values() + melody_stats + chord_stats
Exemple #5
0
def extract_lead_sheet_fragments(quantized_sequence,
                                 min_bars=7,
                                 gap_bars=1.0,
                                 min_unique_pitches=5,
                                 ignore_polyphonic_notes=True,
                                 require_chords=False):
  """Extracts a list of lead sheet fragments from a quantized NoteSequence.

  This function first extracts melodies using melodies_lib.extract_melodies,
  then extracts the chords underlying each melody using
  chords_lib.extract_chords_for_melodies.

  Args:
    quantized_sequence: A quantized NoteSequence object.
    min_bars: Minimum length of melodies in number of bars. Shorter melodies are
        discarded.
    gap_bars: A melody comes to an end when this number of bars (measures) of
        silence is encountered.
    min_unique_pitches: Minimum number of unique notes with octave equivalence.
        Melodies with too few unique notes are discarded.
    ignore_polyphonic_notes: If True, melodies will be extracted from
        `quantized_sequence` tracks that contain polyphony (notes start at the
        same time). If False, tracks with polyphony will be ignored.
    require_chords: If True, only return lead sheets that have at least one
        chord other than NO_CHORD. If False, lead sheets with only melody will
        also be returned.

  Returns:
    A python list of LeadSheet instances.

  Raises:
    NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
        (derived from its time signature) is not an integer number of time
        steps.
  """
  sequences_lib.assert_is_quantized_sequence(quantized_sequence)
  stats = dict([('empty_chord_progressions',
                 statistics.Counter('empty_chord_progressions'))])
  melodies, melody_stats = melodies_lib.extract_melodies(
      quantized_sequence, min_bars=min_bars, gap_bars=gap_bars,
      min_unique_pitches=min_unique_pitches,
      ignore_polyphonic_notes=ignore_polyphonic_notes)
  chord_progressions, chord_stats = chords_lib.extract_chords_for_melodies(
      quantized_sequence, melodies)
  lead_sheets = []
  for melody, chords in zip(melodies, chord_progressions):
    if chords is not None:
      if require_chords and all(chord == chords_lib.NO_CHORD
                                for chord in chords):
        stats['empty_chord_progressions'].increment()
      else:
        lead_sheet = LeadSheet(melody, chords)
        lead_sheets.append(lead_sheet)
  return lead_sheets, stats.values() + melody_stats + chord_stats
  def testAssertIsQuantizedNoteSequence(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=self.steps_per_quarter)

    sequences_lib.assert_is_quantized_sequence(quantized_sequence)
    with self.assertRaises(sequences_lib.QuantizationStatusException):
      sequences_lib.assert_is_quantized_sequence(self.note_sequence)
Exemple #7
0
    def __init__(self,
                 quantized_sequence=None,
                 events_list=None,
                 steps_per_quarter=None,
                 start_step=0,
                 min_pitch=MIN_MIDI_PITCH,
                 max_pitch=MAX_MIDI_PITCH,
                 split_repeats=True,
                 shift_range=False):
        """Construct a PianorollSequence.

    Exactly one of `quantized_sequence` or `steps_per_quarter` must be supplied.
    At most one of `quantized_sequence` and `events_list` may be supplied.

    Args:
      quantized_sequence: an optional quantized NoteSequence proto to base
          PianorollSequence on.
      events_list: an optional list of Pianoroll events to base
          PianorollSequence on.
      steps_per_quarter: how many steps a quarter note represents. Must be
          provided if `quanitzed_sequence` not given.
      start_step: The offset of this sequence relative to the
          beginning of the source sequence. If a quantized sequence is used as
          input, only notes starting after this step will be considered.
      min_pitch: The minimum valid pitch value, inclusive.
      max_pitch: The maximum valid pitch value, inclusive.
      split_repeats: Whether to force repeated notes to have a 0-state step
          between them when initializing from a quantized NoteSequence.
      shift_range: If True, assume that the given events_list is in the full
         MIDI pitch range and needs to be shifted and filtered based on
         `min_pitch` and `max_pitch`.
    """
        assert (quantized_sequence, steps_per_quarter).count(None) == 1
        assert (quantized_sequence, events_list).count(None) >= 1

        self._min_pitch = min_pitch
        self._max_pitch = max_pitch

        if quantized_sequence:
            sequences_lib.assert_is_quantized_sequence(quantized_sequence)
            self._events = self._from_quantized_sequence(
                quantized_sequence, start_step, min_pitch, max_pitch,
                split_repeats)
            self._steps_per_quarter = (
                quantized_sequence.quantization_info.steps_per_quarter)
        else:
            self._events = []
            self._steps_per_quarter = steps_per_quarter
            if events_list:
                for e in events_list:
                    self.append(e, shift_range)
        self._start_step = start_step
    def testAssertIsQuantizedNoteSequence(self):
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(12, 100, 0.01, 10.0),
                                           (11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01),
                                           (52, 99, 4.75, 5.0)])

        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, steps_per_quarter=self.steps_per_quarter)

        sequences_lib.assert_is_quantized_sequence(quantized_sequence)
        with self.assertRaises(sequences_lib.QuantizationStatusException):
            sequences_lib.assert_is_quantized_sequence(self.note_sequence)
Exemple #9
0
  def __init__(self, quantized_sequence=None, events_list=None,
               steps_per_quarter=None, start_step=0, min_pitch=MIN_MIDI_PITCH,
               max_pitch=MAX_MIDI_PITCH, split_repeats=True, shift_range=False):
    """Construct a PianorollSequence.

    Exactly one of `quantized_sequence` or `steps_per_quarter` must be supplied.
    At most one of `quantized_sequence` and `events_list` may be supplied.

    Args:
      quantized_sequence: an optional quantized NoteSequence proto to base
          PianorollSequence on.
      events_list: an optional list of Pianoroll events to base
          PianorollSequence on.
      steps_per_quarter: how many steps a quarter note represents. Must be
          provided if `quanitzed_sequence` not given.
      start_step: The offset of this sequence relative to the
          beginning of the source sequence. If a quantized sequence is used as
          input, only notes starting after this step will be considered.
      min_pitch: The minimum valid pitch value, inclusive.
      max_pitch: The maximum valid pitch value, inclusive.
      split_repeats: Whether to force repeated notes to have a 0-state step
          between them when initializing from a quantized NoteSequence.
      shift_range: If True, assume that the given events_list is in the full
         MIDI pitch range and needs to be shifted and filtered based on
         `min_pitch` and `max_pitch`.
    """
    assert (quantized_sequence, steps_per_quarter).count(None) == 1
    assert (quantized_sequence, events_list).count(None) >= 1

    self._min_pitch = min_pitch
    self._max_pitch = max_pitch

    if quantized_sequence:
      sequences_lib.assert_is_quantized_sequence(quantized_sequence)
      self._events = self._from_quantized_sequence(quantized_sequence,
                                                   start_step, min_pitch,
                                                   max_pitch, split_repeats)
      self._steps_per_quarter = (
          quantized_sequence.quantization_info.steps_per_quarter)
    else:
      self._events = []
      self._steps_per_quarter = steps_per_quarter
      if events_list:
        for e in events_list:
          self.append(e, shift_range)
    self._start_step = start_step
Exemple #10
0
def extract_chords(quantized_sequence,
                   max_steps=None,
                   all_transpositions=False):
    """Extracts a single chord progression from a quantized NoteSequence.

  This function will extract the underlying chord progression (encoded as text
  annotations) from `quantized_sequence`.

  Args:
    quantized_sequence: A quantized NoteSequence.
    max_steps: An integer, maximum length of a chord progression. Chord
        progressions will be trimmed to this length. If None, chord
        progressions will not be trimmed.
    all_transpositions: If True, also transpose the chord progression into all
        12 keys.

  Returns:
    chord_progressions: If `all_transpositions` is False, a python list
        containing a single ChordProgression instance. If `all_transpositions`
        is True, a python list containing 12 ChordProgression instances, one
        for each transposition.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.
  """
    sequences_lib.assert_is_quantized_sequence(quantized_sequence)

    stats = dict([('chords_truncated', statistics.Counter('chords_truncated'))
                  ])
    chords = ChordProgression()
    chords.from_quantized_sequence(quantized_sequence, 0,
                                   quantized_sequence.total_quantized_steps)
    if max_steps is not None:
        if len(chords) > max_steps:
            chords.set_length(max_steps)
            stats['chords_truncated'].increment()
    if all_transpositions:
        chord_progressions = []
        for amount in range(-6, 6):
            transposed_chords = copy.deepcopy(chords)
            transposed_chords.transpose(amount)
            chord_progressions.append(transposed_chords)
        return chord_progressions, stats.values()
    else:
        return [chords], stats.values()
Exemple #11
0
def extract_chords(quantized_sequence, max_steps=None,
                   all_transpositions=False):
  """Extracts a single chord progression from a quantized NoteSequence.

  This function will extract the underlying chord progression (encoded as text
  annotations) from `quantized_sequence`.

  Args:
    quantized_sequence: A quantized NoteSequence.
    max_steps: An integer, maximum length of a chord progression. Chord
        progressions will be trimmed to this length. If None, chord
        progressions will not be trimmed.
    all_transpositions: If True, also transpose the chord progression into all
        12 keys.

  Returns:
    chord_progressions: If `all_transpositions` is False, a python list
        containing a single ChordProgression instance. If `all_transpositions`
        is True, a python list containing 12 ChordProgression instances, one
        for each transposition.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.
  """
  sequences_lib.assert_is_quantized_sequence(quantized_sequence)

  stats = dict([('chords_truncated', statistics.Counter('chords_truncated'))])
  chords = ChordProgression()
  chords.from_quantized_sequence(
      quantized_sequence, 0, quantized_sequence.total_quantized_steps)
  if max_steps is not None:
    if len(chords) > max_steps:
      chords.set_length(max_steps)
      stats['chords_truncated'].increment()
  if all_transpositions:
    chord_progressions = []
    for amount in range(-6, 6):
      transposed_chords = copy.deepcopy(chords)
      transposed_chords.transpose(amount)
      chord_progressions.append(transposed_chords)
    return chord_progressions, stats.values()
  else:
    return [chords], stats.values()
    def __init__(self,
                 quantized_sequence=None,
                 steps_per_quarter=None,
                 start_step=0,
                 min_pitch=MIN_MIDI_PITCH,
                 max_pitch=MAX_MIDI_PITCH):
        """Construct a PianorollSequence.

    Either quantized_sequence or steps_per_quarter should be supplied.

    Args:
      quantized_sequence: a quantized NoteSequence proto.
      steps_per_quarter: how many steps a quarter note represents. Must be
          provided if `quanitzed_sequence` not given.
      start_step: The offset of this sequence relative to the
          beginning of the source sequence. If a quantized sequence is used as
          input, only notes starting after this step will be considered.
      min_pitch: The minimum valid pitch value, inclusive.
      max_pitch: The maximum valid pitch value, inclusive.
    """
        assert (quantized_sequence, steps_per_quarter).count(None) == 1

        self._min_pitch = min_pitch
        self._max_pitch = max_pitch

        if quantized_sequence:
            sequences_lib.assert_is_quantized_sequence(quantized_sequence)
            self._events = self._from_quantized_sequence(
                quantized_sequence, start_step, min_pitch, max_pitch)
            self._steps_per_quarter = (
                quantized_sequence.quantization_info.steps_per_quarter)
        else:
            self._events = []
            self._steps_per_quarter = steps_per_quarter

        self._start_step = start_step
Exemple #13
0
  def from_quantized_sequence(self,
                              quantized_sequence,
                              search_start_step=0,
                              gap_bars=1,
                              pad_end=False):
    """Populate self with drums from the given quantized NoteSequence object.

    A drum track is extracted from the given quantized sequence starting at time
    step `start_step`. `start_step` can be used to drive extraction of multiple
    drum tracks from the same quantized sequence. The end step of the extracted
    drum track will be stored in `self._end_step`.

    0 velocity notes are ignored. The drum extraction is ended when there are
    no drums for a time stretch of `gap_bars` in bars (measures) of music. The
    number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    Each drum event is a Python frozenset of simultaneous (after quantization)
    drum "pitches", or an empty frozenset to indicate no drums are played.

    Args:
      quantized_sequence: A quantized NoteSequence instance.
      search_start_step: Start searching for drums at this time step. Assumed to
          be the beginning of a bar.
      gap_bars: If this many bars or more follow a non-empty drum event, the
          drum track is ended.
      pad_end: If True, the end of the drums will be padded with empty events so
          that it will end at a bar boundary.

    Raises:
      NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
    """
    sequences_lib.assert_is_quantized_sequence(quantized_sequence)
    self._reset()

    steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)
    if steps_per_bar_float % 1 != 0:
      raise events_lib.NonIntegerStepsPerBarException(
          'There are %f timesteps per bar. Time signature: %d/%d' %
          (steps_per_bar_float, quantized_sequence.time_signatures[0].numerator,
           quantized_sequence.time_signatures[0].denominator))
    self._steps_per_bar = steps_per_bar = int(steps_per_bar_float)
    self._steps_per_quarter = (
        quantized_sequence.quantization_info.steps_per_quarter)

    # Group all drum notes that start at the same step.
    all_notes = [note for note in quantized_sequence.notes
                 if note.is_drum                 # drums only
                 and note.velocity               # no zero-velocity notes
                 # after start_step only
                 and note.quantized_start_step >= search_start_step]
    grouped_notes = collections.defaultdict(list)
    for note in all_notes:
      grouped_notes[note.quantized_start_step].append(note)

    # Sort by note start times.
    notes = sorted(grouped_notes.items(), key=operator.itemgetter(0))

    if not notes:
      return

    gap_start_index = 0

    track_start_step = (
        notes[0][0] - (notes[0][0] - search_start_step) % steps_per_bar)
    for start, group in notes:

      start_index = start - track_start_step
      pitches = frozenset(note.pitch for note in group)

      # If a gap of `gap` or more steps is found, end the drum track.
      note_distance = start_index - gap_start_index
      if len(self) and note_distance >= gap_bars * steps_per_bar:
        break

      # Add a drum event, a set of drum "pitches".
      self.set_length(start_index + 1)
      self._events[start_index] = pitches

      gap_start_index = start_index + 1

    if not self._events:
      # If no drum events were added, don't set `_start_step` and `_end_step`.
      return

    self._start_step = track_start_step

    length = len(self)
    # Optionally round up `_end_step` to a multiple of `steps_per_bar`.
    if pad_end:
      length += -len(self) % steps_per_bar
    self.set_length(length)
Exemple #14
0
def extract_performances(quantized_sequence,
                         start_step=0,
                         min_events_discard=None,
                         max_events_truncate=None,
                         num_velocity_bins=0):
    """Extracts a performance from the given quantized NoteSequence.

  Currently, this extracts only one performance from a given track.

  Args:
    quantized_sequence: A quantized NoteSequence.
    start_step: Start extracting a sequence at this time step.
    min_events_discard: Minimum length of tracks in events. Shorter tracks are
        discarded.
    max_events_truncate: Maximum length of tracks in events. Longer tracks are
        truncated.
    num_velocity_bins: Number of velocity bins to use. If 0, velocity events
        will not be included at all.

  Returns:
    performances: A python list of Performance instances.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.
  """
    sequences_lib.assert_is_quantized_sequence(quantized_sequence)

    stats = dict([(stat_name, statistics.Counter(stat_name)) for stat_name in [
        'performances_discarded_too_short', 'performances_truncated',
        'performances_discarded_more_than_1_program'
    ]])

    if sequences_lib.is_absolute_quantized_sequence(quantized_sequence):
        steps_per_second = quantized_sequence.quantization_info.steps_per_second
        # Create a histogram measuring lengths in seconds.
        stats['performance_lengths_in_seconds'] = statistics.Histogram(
            'performance_lengths_in_seconds', [5, 10, 20, 30, 40, 60, 120])
    else:
        steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(
            quantized_sequence)
        # Create a histogram measuring lengths in bars.
        stats['performance_lengths_in_bars'] = statistics.Histogram(
            'performance_lengths_in_bars',
            [1, 10, 20, 30, 40, 50, 100, 200, 500])

    # Allow only 1 program.
    programs = set()
    for note in quantized_sequence.notes:
        programs.add(note.program)
    if len(programs) > 1:
        stats['performances_discarded_more_than_1_program'].increment()
        return [], stats.values()

    performances = []

    # Translate the quantized sequence into a Performance.
    if sequences_lib.is_absolute_quantized_sequence(quantized_sequence):
        performance = Performance(quantized_sequence,
                                  start_step=start_step,
                                  num_velocity_bins=num_velocity_bins)
    else:
        performance = MetricPerformance(quantized_sequence,
                                        start_step=start_step,
                                        num_velocity_bins=num_velocity_bins)

    if (max_events_truncate is not None
            and len(performance) > max_events_truncate):
        performance.truncate(max_events_truncate)
        stats['performances_truncated'].increment()

    if min_events_discard is not None and len(
            performance) < min_events_discard:
        stats['performances_discarded_too_short'].increment()
    else:
        performances.append(performance)
        if sequences_lib.is_absolute_quantized_sequence(quantized_sequence):
            stats['performance_lengths_in_seconds'].increment(
                performance.num_steps // steps_per_second)
        else:
            stats['performance_lengths_in_bars'].increment(
                performance.num_steps // steps_per_bar)

    return performances, stats.values()
Exemple #15
0
def extract_melodies(quantized_sequence,
                     search_start_step=0,
                     min_bars=7,
                     max_steps_truncate=None,
                     max_steps_discard=None,
                     gap_bars=1.0,
                     min_unique_pitches=5,
                     ignore_polyphonic_notes=True,
                     pad_end=False,
                     filter_drums=True):
  """Extracts a list of melodies from the given quantized NoteSequence.

  This function will search through `quantized_sequence` for monophonic
  melodies in every track at every time step.

  Once a note-on event in a track is encountered, a melody begins.
  Gaps of silence in each track will be splitting points that divide the
  track into separate melodies. The minimum size of these gaps are given
  in `gap_bars`. The size of a bar (measure) of music in time steps is
  computed from the time signature stored in `quantized_sequence`.

  The melody is then checked for validity. The melody is only used if it is
  at least `min_bars` bars long, and has at least `min_unique_pitches` unique
  notes (preventing melodies that only repeat a few notes, such as those found
  in some accompaniment tracks, from being used).

  After scanning each instrument track in the quantized sequence, a list of all
  extracted Melody objects is returned.

  Args:
    quantized_sequence: A quantized NoteSequence.
    search_start_step: Start searching for a melody at this time step. Assumed
        to be the first step of a bar.
    min_bars: Minimum length of melodies in number of bars. Shorter melodies are
        discarded.
    max_steps_truncate: Maximum number of steps in extracted melodies. If
        defined, longer melodies are truncated to this threshold. If pad_end is
        also True, melodies will be truncated to the end of the last bar below
        this threshold.
    max_steps_discard: Maximum number of steps in extracted melodies. If
        defined, longer melodies are discarded.
    gap_bars: A melody comes to an end when this number of bars (measures) of
        silence is encountered.
    min_unique_pitches: Minimum number of unique notes with octave equivalence.
        Melodies with too few unique notes are discarded.
    ignore_polyphonic_notes: If True, melodies will be extracted from
        `quantized_sequence` tracks that contain polyphony (notes start at
        the same time). If False, tracks with polyphony will be ignored.
    pad_end: If True, the end of the melody will be padded with NO_EVENTs so
        that it will end at a bar boundary.
    filter_drums: If True, notes for which `is_drum` is True will be ignored.

  Returns:
    melodies: A python list of Melody instances.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.

  Raises:
    NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
        (derived from its time signature) is not an integer number of time
        steps.
  """
  sequences_lib.assert_is_quantized_sequence(quantized_sequence)

  # TODO(danabo): Convert `ignore_polyphonic_notes` into a float which controls
  # the degree of polyphony that is acceptable.
  melodies = []
  stats = dict([(stat_name, statistics.Counter(stat_name)) for stat_name in
                ['polyphonic_tracks_discarded',
                 'melodies_discarded_too_short',
                 'melodies_discarded_too_few_pitches',
                 'melodies_discarded_too_long',
                 'melodies_truncated']])
  # Create a histogram measuring melody lengths (in bars not steps).
  # Capture melodies that are very small, in the range of the filter lower
  # bound `min_bars`, and large. The bucket intervals grow approximately
  # exponentially.
  stats['melody_lengths_in_bars'] = statistics.Histogram(
      'melody_lengths_in_bars',
      [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, min_bars // 2, min_bars,
       min_bars + 1, min_bars - 1])
  instruments = set([n.instrument for n in quantized_sequence.notes])
  steps_per_bar = int(
      sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence))
  for instrument in instruments:
    instrument_search_start_step = search_start_step
    # Quantize the track into a Melody object.
    # If any notes start at the same time, only one is kept.
    while 1:
      melody = Melody()
      try:
        melody.from_quantized_sequence(
            quantized_sequence,
            instrument=instrument,
            search_start_step=instrument_search_start_step,
            gap_bars=gap_bars,
            ignore_polyphonic_notes=ignore_polyphonic_notes,
            pad_end=pad_end,
            filter_drums=filter_drums)
      except PolyphonicMelodyException:
        stats['polyphonic_tracks_discarded'].increment()
        break  # Look for monophonic melodies in other tracks.
      except events_lib.NonIntegerStepsPerBarException:
        raise
      # Start search for next melody on next bar boundary (inclusive).
      instrument_search_start_step = (
          melody.end_step +
          (search_start_step - melody.end_step) % steps_per_bar)
      if not melody:
        break

      # Require a certain melody length.
      if len(melody) - 1 < melody.steps_per_bar * min_bars:
        stats['melodies_discarded_too_short'].increment()
        continue

      # Discard melodies that are too long.
      if max_steps_discard is not None and len(melody) > max_steps_discard:
        stats['melodies_discarded_too_long'].increment()
        continue

      # Truncate melodies that are too long.
      if max_steps_truncate is not None and len(melody) > max_steps_truncate:
        truncated_length = max_steps_truncate
        if pad_end:
          truncated_length -= max_steps_truncate % melody.steps_per_bar
        melody.set_length(truncated_length)
        stats['melodies_truncated'].increment()

      # Require a certain number of unique pitches.
      note_histogram = melody.get_note_histogram()
      unique_pitches = np.count_nonzero(note_histogram)
      if unique_pitches < min_unique_pitches:
        stats['melodies_discarded_too_few_pitches'].increment()
        continue

      # TODO(danabo)
      # Add filter for rhythmic diversity.

      stats['melody_lengths_in_bars'].increment(
          len(melody) // melody.steps_per_bar)

      melodies.append(melody)

  return melodies, stats.values()
Exemple #16
0
  def from_quantized_sequence(self,
                              quantized_sequence,
                              search_start_step=0,
                              instrument=0,
                              gap_bars=1,
                              ignore_polyphonic_notes=False,
                              pad_end=False,
                              filter_drums=True):
    """Populate self with a melody from the given quantized NoteSequence.

    A monophonic melody is extracted from the given `instrument` starting at
    `search_start_step`. `instrument` and `search_start_step` can be used to
    drive extraction of multiple melodies from the same quantized sequence. The
    end step of the extracted melody will be stored in `self._end_step`.

    0 velocity notes are ignored. The melody extraction is ended when there are
    no held notes for a time stretch of `gap_bars` in bars (measures) of music.
    The number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    `ignore_polyphonic_notes` determines what happens when polyphonic (multiple
    notes start at the same time) data is encountered. If
    `ignore_polyphonic_notes` is true, the highest pitch is used in the melody
    when multiple notes start at the same time. If false, an exception is
    raised.

    Args:
      quantized_sequence: A NoteSequence quantized with
          sequences_lib.quantize_note_sequence.
      search_start_step: Start searching for a melody at this time step. Assumed
          to be the first step of a bar.
      instrument: Search for a melody in this instrument number.
      gap_bars: If this many bars or more follow a NOTE_OFF event, the melody
          is ended.
      ignore_polyphonic_notes: If True, the highest pitch is used in the melody
          when multiple notes start at the same time. If False,
          PolyphonicMelodyException will be raised if multiple notes start at
          the same time.
      pad_end: If True, the end of the melody will be padded with NO_EVENTs so
          that it will end at a bar boundary.
      filter_drums: If True, notes for which `is_drum` is True will be ignored.

    Raises:
      NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
      PolyphonicMelodyException: If any of the notes start on the same step
          and `ignore_polyphonic_notes` is False.
    """
    sequences_lib.assert_is_quantized_sequence(quantized_sequence)
    self._reset()

    steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)
    if steps_per_bar_float % 1 != 0:
      raise events_lib.NonIntegerStepsPerBarException(
          'There are %f timesteps per bar. Time signature: %d/%d' %
          (steps_per_bar_float, quantized_sequence.time_signatures[0].numerator,
           quantized_sequence.time_signatures[0].denominator))
    self._steps_per_bar = steps_per_bar = int(steps_per_bar_float)
    self._steps_per_quarter = (
        quantized_sequence.quantization_info.steps_per_quarter)

    # Sort track by note start times, and secondarily by pitch descending.
    notes = sorted([n for n in quantized_sequence.notes
                    if n.instrument == instrument and
                    n.quantized_start_step >= search_start_step],
                   key=lambda note: (note.quantized_start_step, -note.pitch))

    if not notes:
      return

    # The first step in the melody, beginning at the first step of a bar.
    melody_start_step = (
        notes[0].quantized_start_step -
        (notes[0].quantized_start_step - search_start_step) % steps_per_bar)
    for note in notes:
      if filter_drums and note.is_drum:
        continue

      # Ignore 0 velocity notes.
      if not note.velocity:
        continue

      start_index = note.quantized_start_step - melody_start_step
      end_index = note.quantized_end_step - melody_start_step

      if not self._events:
        # If there are no events, we don't need to check for polyphony.
        self._add_note(note.pitch, start_index, end_index)
        continue

      # If `start_index` comes before or lands on an already added note's start
      # step, we cannot add it. In that case either discard the melody or keep
      # the highest pitch.
      last_on, last_off = self._get_last_on_off_events()
      on_distance = start_index - last_on
      off_distance = start_index - last_off
      if on_distance == 0:
        if ignore_polyphonic_notes:
          # Keep highest note.
          # Notes are sorted by pitch descending, so if a note is already at
          # this position its the highest pitch.
          continue
        else:
          self._reset()
          raise PolyphonicMelodyException()
      elif on_distance < 0:
        raise PolyphonicMelodyException(
            'Unexpected note. Not in ascending order.')

      # If a gap of `gap` or more steps is found, end the melody.
      if len(self) and off_distance >= gap_bars * steps_per_bar:
        break

      # Add the note-on and off events to the melody.
      self._add_note(note.pitch, start_index, end_index)

    if not self._events:
      # If no notes were added, don't set `_start_step` and `_end_step`.
      return

    self._start_step = melody_start_step

    # Strip final MELODY_NOTE_OFF event.
    if self._events[-1] == MELODY_NOTE_OFF:
      del self._events[-1]

    length = len(self)
    # Optionally round up `_end_step` to a multiple of `steps_per_bar`.
    if pad_end:
      length += -len(self) % steps_per_bar
    self.set_length(length)
def extract_lead_sheet_fragments(quantized_sequence,
                                 search_start_step=0,
                                 min_bars=7,
                                 max_steps_truncate=None,
                                 max_steps_discard=None,
                                 gap_bars=1.0,
                                 min_unique_pitches=5,
                                 ignore_polyphonic_notes=True,
                                 pad_end=False,
                                 filter_drums=True,
                                 require_chords=False,
                                 all_transpositions=False):
    """Extracts a list of lead sheet fragments from a quantized NoteSequence.

  This function first extracts melodies using melodies_lib.extract_melodies,
  then extracts the chords underlying each melody using
  chords_lib.extract_chords_for_melodies.

  Args:
    quantized_sequence: A quantized NoteSequence object.
    search_start_step: Start searching for a melody at this time step. Assumed
        to be the first step of a bar.
    min_bars: Minimum length of melodies in number of bars. Shorter melodies are
        discarded.
    max_steps_truncate: Maximum number of steps in extracted melodies. If
        defined, longer melodies are truncated to this threshold. If pad_end is
        also True, melodies will be truncated to the end of the last bar below
        this threshold.
    max_steps_discard: Maximum number of steps in extracted melodies. If
        defined, longer melodies are discarded.
    gap_bars: A melody comes to an end when this number of bars (measures) of
        silence is encountered.
    min_unique_pitches: Minimum number of unique notes with octave equivalence.
        Melodies with too few unique notes are discarded.
    ignore_polyphonic_notes: If True, melodies will be extracted from
        `quantized_sequence` tracks that contain polyphony (notes start at the
        same time). If False, tracks with polyphony will be ignored.
    pad_end: If True, the end of the melody will be padded with NO_EVENTs so
        that it will end at a bar boundary.
    filter_drums: If True, notes for which `is_drum` is True will be ignored.
    require_chords: If True, only return lead sheets that have at least one
        chord other than NO_CHORD. If False, lead sheets with only melody will
        also be returned.
    all_transpositions: If True, also transpose each lead sheet fragment into
        all 12 keys.

  Returns:
    A python list of LeadSheet instances.

  Raises:
    NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
        (derived from its time signature) is not an integer number of time
        steps.
  """
    sequences_lib.assert_is_quantized_sequence(quantized_sequence)
    stats = dict([('empty_chord_progressions',
                   statistics.Counter('empty_chord_progressions'))])
    melodies, melody_stats = melodies_lib.extract_melodies(
        quantized_sequence,
        search_start_step=search_start_step,
        min_bars=min_bars,
        max_steps_truncate=max_steps_truncate,
        max_steps_discard=max_steps_discard,
        gap_bars=gap_bars,
        min_unique_pitches=min_unique_pitches,
        ignore_polyphonic_notes=ignore_polyphonic_notes,
        pad_end=pad_end,
        filter_drums=filter_drums)
    chord_progressions, chord_stats = chords_lib.extract_chords_for_melodies(
        quantized_sequence, melodies)
    lead_sheets = []
    for melody, chords in zip(melodies, chord_progressions):
        # If `chords` is None, it's because a chord progression could not be
        # extracted for this particular melody.
        if chords is not None:
            if require_chords and all(chord == chords_lib.NO_CHORD
                                      for chord in chords):
                stats['empty_chord_progressions'].increment()
            else:
                lead_sheet = LeadSheet(melody, chords)
                if all_transpositions:
                    for amount in range(-6, 6):
                        transposed_lead_sheet = copy.deepcopy(lead_sheet)
                        transposed_lead_sheet.transpose(amount)
                        lead_sheets.append(transposed_lead_sheet)
                else:
                    lead_sheets.append(lead_sheet)
    return lead_sheets, stats.values() + melody_stats + chord_stats
Exemple #18
0
    def from_quantized_sequence(self, quantized_sequence, start_step,
                                end_step):
        """Populate self with the chords from the given quantized NoteSequence.

    A chord progression is extracted from the given sequence starting at time
    step `start_step` and ending at time step `end_step`.

    The number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    Args:
      quantized_sequence: A quantized NoteSequence instance.
      start_step: Start populating chords at this time step.
      end_step: Stop populating chords at this time step.

    Raises:
      NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
      CoincidentChordsException: If any of the chords start on the same step.
    """
        sequences_lib.assert_is_quantized_sequence(quantized_sequence)
        self._reset()

        steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
            quantized_sequence)
        if steps_per_bar_float % 1 != 0:
            raise events_lib.NonIntegerStepsPerBarException(
                'There are %f timesteps per bar. Time signature: %d/%d' %
                (steps_per_bar_float,
                 quantized_sequence.time_signature.numerator,
                 quantized_sequence.time_signature.denominator))
        self._steps_per_bar = int(steps_per_bar_float)
        self._steps_per_quarter = (
            quantized_sequence.quantization_info.steps_per_quarter)

        # Sort track by chord times.
        chords = sorted([
            a for a in quantized_sequence.text_annotations
            if a.annotation_type == CHORD_SYMBOL
        ],
                        key=lambda chord: chord.quantized_step)

        prev_step = None
        prev_figure = NO_CHORD

        for chord in chords:
            if chord.quantized_step >= end_step:
                # No more chords within range.
                break

            elif chord.quantized_step < start_step:
                # Chord is before start of range.
                prev_step = chord.quantized_step
                prev_figure = chord.text
                continue

            if chord.quantized_step == prev_step:
                if chord.text == prev_figure:
                    # Identical coincident chords, just skip.
                    continue
                else:
                    # Two different chords start at the same time step.
                    self._reset()
                    raise CoincidentChordsException(
                        'chords %s and %s are coincident' %
                        (prev_figure, chord.text))

            if chord.quantized_step > start_step:
                # Add the previous chord.
                start_index = max(prev_step, start_step) - start_step
                end_index = chord.quantized_step - start_step
                self._add_chord(prev_figure, start_index, end_index)

            prev_step = chord.quantized_step
            prev_figure = chord.text

        if prev_step < end_step:
            # Add the last chord active before end_step.
            start_index = max(prev_step, start_step) - start_step
            end_index = end_step - start_step
            self._add_chord(prev_figure, start_index, end_index)

        self._start_step = start_step
        self._end_step = end_step
    def from_quantized_sequence(self,
                                quantized_sequence,
                                search_start_step=0,
                                gap_bars=1,
                                pad_end=False):
        """Populate self with drums from the given quantized NoteSequence object.

    A drum track is extracted from the given quantized sequence starting at time
    step `start_step`. `start_step` can be used to drive extraction of multiple
    drum tracks from the same quantized sequence. The end step of the extracted
    drum track will be stored in `self._end_step`.

    0 velocity notes are ignored. The drum extraction is ended when there are
    no drums for a time stretch of `gap_bars` in bars (measures) of music. The
    number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    Each drum event is a Python frozenset of simultaneous (after quantization)
    drum "pitches", or an empty frozenset to indicate no drums are played.

    Args:
      quantized_sequence: A quantized NoteSequence instance.
      search_start_step: Start searching for drums at this time step. Assumed to
          be the beginning of a bar.
      gap_bars: If this many bars or more follow a non-empty drum event, the
          drum track is ended.
      pad_end: If True, the end of the drums will be padded with empty events so
          that it will end at a bar boundary.

    Raises:
      NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
    """
        sequences_lib.assert_is_quantized_sequence(quantized_sequence)
        self._reset()

        steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
            quantized_sequence)
        if steps_per_bar_float % 1 != 0:
            raise events_lib.NonIntegerStepsPerBarException(
                'There are %f timesteps per bar. Time signature: %d/%d' %
                (steps_per_bar_float,
                 quantized_sequence.time_signatures[0].numerator,
                 quantized_sequence.time_signatures[0].denominator))
        self._steps_per_bar = steps_per_bar = int(steps_per_bar_float)
        self._steps_per_quarter = (
            quantized_sequence.quantization_info.steps_per_quarter)

        # Group all drum notes that start at the same step.
        all_notes = [
            note for note in quantized_sequence.notes
            if note.is_drum  # drums only
            and note.velocity  # no zero-velocity notes
            # after start_step only
            and note.quantized_start_step >= search_start_step
        ]
        grouped_notes = collections.defaultdict(list)
        for note in all_notes:
            grouped_notes[note.quantized_start_step].append(note)

        # Sort by note start times.
        notes = sorted(grouped_notes.items(), key=operator.itemgetter(0))

        if not notes:
            return

        gap_start_index = 0

        track_start_step = (notes[0][0] -
                            (notes[0][0] - search_start_step) % steps_per_bar)
        for start, group in notes:

            start_index = start - track_start_step
            pitches = frozenset(note.pitch for note in group)

            # If a gap of `gap` or more steps is found, end the drum track.
            note_distance = start_index - gap_start_index
            if len(self) and note_distance >= gap_bars * steps_per_bar:
                break

            # Add a drum event, a set of drum "pitches".
            self.set_length(start_index + 1)
            self._events[start_index] = pitches

            gap_start_index = start_index + 1

        if not self._events:
            # If no drum events were added, don't set `_start_step` and `_end_step`.
            return

        self._start_step = track_start_step

        length = len(self)
        # Optionally round up `_end_step` to a multiple of `steps_per_bar`.
        if pad_end:
            length += -len(self) % steps_per_bar
        self.set_length(length)
def extract_melodies(quantized_sequence,
                     search_start_step=0,
                     min_bars=7,
                     max_steps_truncate=None,
                     max_steps_discard=None,
                     gap_bars=1.0,
                     min_unique_pitches=5,
                     ignore_polyphonic_notes=True,
                     pad_end=False,
                     filter_drums=True):
    """Extracts a list of melodies from the given quantized NoteSequence.

  This function will search through `quantized_sequence` for monophonic
  melodies in every track at every time step.

  Once a note-on event in a track is encountered, a melody begins.
  Gaps of silence in each track will be splitting points that divide the
  track into separate melodies. The minimum size of these gaps are given
  in `gap_bars`. The size of a bar (measure) of music in time steps is
  computed from the time signature stored in `quantized_sequence`.

  The melody is then checked for validity. The melody is only used if it is
  at least `min_bars` bars long, and has at least `min_unique_pitches` unique
  notes (preventing melodies that only repeat a few notes, such as those found
  in some accompaniment tracks, from being used).

  After scanning each instrument track in the quantized sequence, a list of all
  extracted Melody objects is returned.

  Args:
    quantized_sequence: A quantized NoteSequence.
    search_start_step: Start searching for a melody at this time step. Assumed
        to be the first step of a bar.
    min_bars: Minimum length of melodies in number of bars. Shorter melodies are
        discarded.
    max_steps_truncate: Maximum number of steps in extracted melodies. If
        defined, longer melodies are truncated to this threshold. If pad_end is
        also True, melodies will be truncated to the end of the last bar below
        this threshold.
    max_steps_discard: Maximum number of steps in extracted melodies. If
        defined, longer melodies are discarded.
    gap_bars: A melody comes to an end when this number of bars (measures) of
        silence is encountered.
    min_unique_pitches: Minimum number of unique notes with octave equivalence.
        Melodies with too few unique notes are discarded.
    ignore_polyphonic_notes: If True, melodies will be extracted from
        `quantized_sequence` tracks that contain polyphony (notes start at
        the same time). If False, tracks with polyphony will be ignored.
    pad_end: If True, the end of the melody will be padded with NO_EVENTs so
        that it will end at a bar boundary.
    filter_drums: If True, notes for which `is_drum` is True will be ignored.

  Returns:
    melodies: A python list of Melody instances.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.

  Raises:
    NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
        (derived from its time signature) is not an integer number of time
        steps.
  """
    sequences_lib.assert_is_quantized_sequence(quantized_sequence)

    # TODO(danabo): Convert `ignore_polyphonic_notes` into a float which controls
    # the degree of polyphony that is acceptable.
    melodies = []
    stats = dict([(stat_name, statistics.Counter(stat_name)) for stat_name in [
        'polyphonic_tracks_discarded', 'melodies_discarded_too_short',
        'melodies_discarded_too_few_pitches', 'melodies_discarded_too_long',
        'melodies_truncated'
    ]])
    # Create a histogram measuring melody lengths (in bars not steps).
    # Capture melodies that are very small, in the range of the filter lower
    # bound `min_bars`, and large. The bucket intervals grow approximately
    # exponentially.
    stats['melody_lengths_in_bars'] = statistics.Histogram(
        'melody_lengths_in_bars', [
            0, 1, 10, 20, 30, 40, 50, 100, 200, 500, min_bars // 2, min_bars,
            min_bars + 1, min_bars - 1
        ])
    instruments = set([n.instrument for n in quantized_sequence.notes])
    steps_per_bar = int(
        sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence))
    for instrument in instruments:
        instrument_search_start_step = search_start_step
        # Quantize the track into a Melody object.
        # If any notes start at the same time, only one is kept.
        while 1:
            melody = Melody()
            try:
                melody.from_quantized_sequence(
                    quantized_sequence,
                    instrument=instrument,
                    search_start_step=instrument_search_start_step,
                    gap_bars=gap_bars,
                    ignore_polyphonic_notes=ignore_polyphonic_notes,
                    pad_end=pad_end,
                    filter_drums=filter_drums)
            except PolyphonicMelodyException:
                stats['polyphonic_tracks_discarded'].increment()
                break  # Look for monophonic melodies in other tracks.
            except events_lib.NonIntegerStepsPerBarException:
                raise
            # Start search for next melody on next bar boundary (inclusive).
            instrument_search_start_step = (
                melody.end_step +
                (search_start_step - melody.end_step) % steps_per_bar)
            if not melody:
                break

            # Require a certain melody length.
            if len(melody) - 1 < melody.steps_per_bar * min_bars:
                stats['melodies_discarded_too_short'].increment()
                continue

            # Discard melodies that are too long.
            if max_steps_discard is not None and len(
                    melody) > max_steps_discard:
                stats['melodies_discarded_too_long'].increment()
                continue

            # Truncate melodies that are too long.
            if max_steps_truncate is not None and len(
                    melody) > max_steps_truncate:
                truncated_length = max_steps_truncate
                if pad_end:
                    truncated_length -= max_steps_truncate % melody.steps_per_bar
                melody.set_length(truncated_length)
                stats['melodies_truncated'].increment()

            # Require a certain number of unique pitches.
            note_histogram = melody.get_note_histogram()
            unique_pitches = np.count_nonzero(note_histogram)
            if unique_pitches < min_unique_pitches:
                stats['melodies_discarded_too_few_pitches'].increment()
                continue

            # TODO(danabo)
            # Add filter for rhythmic diversity.

            stats['melody_lengths_in_bars'].increment(
                len(melody) // melody.steps_per_bar)

            melodies.append(melody)

    return melodies, stats.values()
    def from_quantized_sequence(self,
                                quantized_sequence,
                                search_start_step=0,
                                instrument=0,
                                gap_bars=1,
                                ignore_polyphonic_notes=False,
                                pad_end=False,
                                filter_drums=True):
        """Populate self with a melody from the given quantized NoteSequence.

    A monophonic melody is extracted from the given `instrument` starting at
    `search_start_step`. `instrument` and `search_start_step` can be used to
    drive extraction of multiple melodies from the same quantized sequence. The
    end step of the extracted melody will be stored in `self._end_step`.

    0 velocity notes are ignored. The melody extraction is ended when there are
    no held notes for a time stretch of `gap_bars` in bars (measures) of music.
    The number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    `ignore_polyphonic_notes` determines what happens when polyphonic (multiple
    notes start at the same time) data is encountered. If
    `ignore_polyphonic_notes` is true, the highest pitch is used in the melody
    when multiple notes start at the same time. If false, an exception is
    raised.

    Args:
      quantized_sequence: A NoteSequence quantized with
          sequences_lib.quantize_note_sequence.
      search_start_step: Start searching for a melody at this time step. Assumed
          to be the first step of a bar.
      instrument: Search for a melody in this instrument number.
      gap_bars: If this many bars or more follow a NOTE_OFF event, the melody
          is ended.
      ignore_polyphonic_notes: If True, the highest pitch is used in the melody
          when multiple notes start at the same time. If False,
          PolyphonicMelodyException will be raised if multiple notes start at
          the same time.
      pad_end: If True, the end of the melody will be padded with NO_EVENTs so
          that it will end at a bar boundary.
      filter_drums: If True, notes for which `is_drum` is True will be ignored.

    Raises:
      NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
      PolyphonicMelodyException: If any of the notes start on the same step
          and `ignore_polyphonic_notes` is False.
    """
        sequences_lib.assert_is_quantized_sequence(quantized_sequence)
        self._reset()

        steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
            quantized_sequence)
        if steps_per_bar_float % 1 != 0:
            raise events_lib.NonIntegerStepsPerBarException(
                'There are %f timesteps per bar. Time signature: %d/%d' %
                (steps_per_bar_float,
                 quantized_sequence.time_signatures[0].numerator,
                 quantized_sequence.time_signatures[0].denominator))
        self._steps_per_bar = steps_per_bar = int(steps_per_bar_float)
        self._steps_per_quarter = (
            quantized_sequence.quantization_info.steps_per_quarter)

        # Sort track by note start times, and secondarily by pitch descending.
        notes = sorted([
            n for n in quantized_sequence.notes if n.instrument == instrument
            and n.quantized_start_step >= search_start_step
        ],
                       key=lambda note:
                       (note.quantized_start_step, -note.pitch))

        if not notes:
            return

        # The first step in the melody, beginning at the first step of a bar.
        melody_start_step = (
            notes[0].quantized_start_step -
            (notes[0].quantized_start_step - search_start_step) %
            steps_per_bar)
        for note in notes:
            if filter_drums and note.is_drum:
                continue

            # Ignore 0 velocity notes.
            if not note.velocity:
                continue

            start_index = note.quantized_start_step - melody_start_step
            end_index = note.quantized_end_step - melody_start_step

            if not self._events:
                # If there are no events, we don't need to check for polyphony.
                self._add_note(note.pitch, start_index, end_index)
                continue

            # If `start_index` comes before or lands on an already added note's start
            # step, we cannot add it. In that case either discard the melody or keep
            # the highest pitch.
            last_on, last_off = self._get_last_on_off_events()
            on_distance = start_index - last_on
            off_distance = start_index - last_off
            if on_distance == 0:
                if ignore_polyphonic_notes:
                    # Keep highest note.
                    # Notes are sorted by pitch descending, so if a note is already at
                    # this position its the highest pitch.
                    continue
                else:
                    self._reset()
                    raise PolyphonicMelodyException()
            elif on_distance < 0:
                raise PolyphonicMelodyException(
                    'Unexpected note. Not in ascending order.')

            # If a gap of `gap` or more steps is found, end the melody.
            if len(self) and off_distance >= gap_bars * steps_per_bar:
                break

            # Add the note-on and off events to the melody.
            self._add_note(note.pitch, start_index, end_index)

        if not self._events:
            # If no notes were added, don't set `_start_step` and `_end_step`.
            return

        self._start_step = melody_start_step

        # Strip final MELODY_NOTE_OFF event.
        if self._events[-1] == MELODY_NOTE_OFF:
            del self._events[-1]

        length = len(self)
        # Optionally round up `_end_step` to a multiple of `steps_per_bar`.
        if pad_end:
            length += -len(self) % steps_per_bar
        self.set_length(length)
Exemple #22
0
  def from_quantized_sequence(self, quantized_sequence, start_step, end_step):
    """Populate self with the chords from the given quantized NoteSequence.

    A chord progression is extracted from the given sequence starting at time
    step `start_step` and ending at time step `end_step`.

    The number of time steps per bar is computed from the time signature in
    `quantized_sequence`.

    Args:
      quantized_sequence: A quantized NoteSequence instance.
      start_step: Start populating chords at this time step.
      end_step: Stop populating chords at this time step.

    Raises:
      NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
          (derived from its time signature) is not an integer number of time
          steps.
      CoincidentChordsException: If any of the chords start on the same step.
    """
    sequences_lib.assert_is_quantized_sequence(quantized_sequence)
    self._reset()

    steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)
    if steps_per_bar_float % 1 != 0:
      raise events_lib.NonIntegerStepsPerBarException(
          'There are %f timesteps per bar. Time signature: %d/%d' %
          (steps_per_bar_float, quantized_sequence.time_signature.numerator,
           quantized_sequence.time_signature.denominator))
    self._steps_per_bar = int(steps_per_bar_float)
    self._steps_per_quarter = (
        quantized_sequence.quantization_info.steps_per_quarter)

    # Sort track by chord times.
    chords = sorted([a for a in quantized_sequence.text_annotations
                     if a.annotation_type == CHORD_SYMBOL],
                    key=lambda chord: chord.quantized_step)

    prev_step = None
    prev_figure = NO_CHORD

    for chord in chords:
      if chord.quantized_step >= end_step:
        # No more chords within range.
        break

      elif chord.quantized_step < start_step:
        # Chord is before start of range.
        prev_step = chord.quantized_step
        prev_figure = chord.text
        continue

      if chord.quantized_step == prev_step:
        if chord.text == prev_figure:
          # Identical coincident chords, just skip.
          continue
        else:
          # Two different chords start at the same time step.
          self._reset()
          raise CoincidentChordsException('chords %s and %s are coincident' %
                                          (prev_figure, chord.text))

      if chord.quantized_step > start_step:
        # Add the previous chord.
        start_index = max(prev_step, start_step) - start_step
        end_index = chord.quantized_step - start_step
        self._add_chord(prev_figure, start_index, end_index)

      prev_step = chord.quantized_step
      prev_figure = chord.text

    if prev_step < end_step:
      # Add the last chord active before end_step.
      start_index = max(prev_step, start_step) - start_step
      end_index = end_step - start_step
      self._add_chord(prev_figure, start_index, end_index)

    self._start_step = start_step
    self._end_step = end_step
Exemple #23
0
def extract_performances(
    quantized_sequence, start_step=0, min_events_discard=None,
    max_events_truncate=None, max_steps_truncate=None, num_velocity_bins=0,
    split_instruments=False, note_performance=False):
  """Extracts one or more performances from the given quantized NoteSequence.

  Args:
    quantized_sequence: A quantized NoteSequence.
    start_step: Start extracting a sequence at this time step.
    min_events_discard: Minimum length of tracks in events. Shorter tracks are
        discarded.
    max_events_truncate: Maximum length of tracks in events. Longer tracks are
        truncated.
    max_steps_truncate: Maximum length of tracks in quantized time steps. Longer
        tracks are truncated.
    num_velocity_bins: Number of velocity bins to use. If 0, velocity events
        will not be included at all.
    split_instruments: If True, will extract a performance for each instrument.
        Otherwise, will extract a single performance.
    note_performance: If True, will create a NotePerformance object. If
        False, will create either a MetricPerformance or Performance based on
        how the sequence was quantized.

  Returns:
    performances: A python list of Performance or MetricPerformance (if
        `quantized_sequence` is quantized relative to meter) instances.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.
  """
  sequences_lib.assert_is_quantized_sequence(quantized_sequence)

  stats = dict([(stat_name, statistics.Counter(stat_name)) for stat_name in
                ['performances_discarded_too_short',
                 'performances_truncated', 'performances_truncated_timewise',
                 'performances_discarded_more_than_1_program',
                 'performance_discarded_too_many_time_shift_steps',
                 'performance_discarded_too_many_duration_steps']])

  if sequences_lib.is_absolute_quantized_sequence(quantized_sequence):
    steps_per_second = quantized_sequence.quantization_info.steps_per_second
    # Create a histogram measuring lengths in seconds.
    stats['performance_lengths_in_seconds'] = statistics.Histogram(
        'performance_lengths_in_seconds',
        [5, 10, 20, 30, 40, 60, 120])
  else:
    steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)
    # Create a histogram measuring lengths in bars.
    stats['performance_lengths_in_bars'] = statistics.Histogram(
        'performance_lengths_in_bars',
        [1, 10, 20, 30, 40, 50, 100, 200, 500])

  if split_instruments:
    instruments = set(note.instrument for note in quantized_sequence.notes)
  else:
    instruments = set([None])
    # Allow only 1 program.
    programs = set()
    for note in quantized_sequence.notes:
      programs.add(note.program)
    if len(programs) > 1:
      stats['performances_discarded_more_than_1_program'].increment()
      return [], stats.values()

  performances = []

  for instrument in instruments:
    # Translate the quantized sequence into a Performance.
    if note_performance:
      try:
        performance = NotePerformance(
            quantized_sequence, start_step=start_step,
            num_velocity_bins=num_velocity_bins, instrument=instrument)
      except NotePerformanceTooManyTimeShiftSteps:
        stats['performance_discarded_too_many_time_shift_steps'].increment()
        continue
      except NotePerformanceTooManyDurationSteps:
        stats['performance_discarded_too_many_duration_steps'].increment()
        continue
    elif sequences_lib.is_absolute_quantized_sequence(quantized_sequence):
      performance = Performance(quantized_sequence, start_step=start_step,
                                num_velocity_bins=num_velocity_bins,
                                instrument=instrument)
    else:
      performance = MetricPerformance(quantized_sequence, start_step=start_step,
                                      num_velocity_bins=num_velocity_bins,
                                      instrument=instrument)

    if (max_steps_truncate is not None and
        performance.num_steps > max_steps_truncate):
      performance.set_length(max_steps_truncate)
      stats['performances_truncated_timewise'].increment()

    if (max_events_truncate is not None and
        len(performance) > max_events_truncate):
      performance.truncate(max_events_truncate)
      stats['performances_truncated'].increment()

    if min_events_discard is not None and len(performance) < min_events_discard:
      stats['performances_discarded_too_short'].increment()
    else:
      performances.append(performance)
      if sequences_lib.is_absolute_quantized_sequence(quantized_sequence):
        stats['performance_lengths_in_seconds'].increment(
            performance.num_steps // steps_per_second)
      else:
        stats['performance_lengths_in_bars'].increment(
            performance.num_steps // steps_per_bar)

  return performances, stats.values()
Exemple #24
0
def extract_lead_sheet_fragments(quantized_sequence,
                                 search_start_step=0,
                                 min_bars=7,
                                 max_steps_truncate=None,
                                 max_steps_discard=None,
                                 gap_bars=1.0,
                                 min_unique_pitches=5,
                                 ignore_polyphonic_notes=True,
                                 pad_end=False,
                                 filter_drums=True,
                                 require_chords=False,
                                 all_transpositions=False):
  """Extracts a list of lead sheet fragments from a quantized NoteSequence.

  This function first extracts melodies using melodies_lib.extract_melodies,
  then extracts the chords underlying each melody using
  chords_lib.extract_chords_for_melodies.

  Args:
    quantized_sequence: A quantized NoteSequence object.
    search_start_step: Start searching for a melody at this time step. Assumed
        to be the first step of a bar.
    min_bars: Minimum length of melodies in number of bars. Shorter melodies are
        discarded.
    max_steps_truncate: Maximum number of steps in extracted melodies. If
        defined, longer melodies are truncated to this threshold. If pad_end is
        also True, melodies will be truncated to the end of the last bar below
        this threshold.
    max_steps_discard: Maximum number of steps in extracted melodies. If
        defined, longer melodies are discarded.
    gap_bars: A melody comes to an end when this number of bars (measures) of
        silence is encountered.
    min_unique_pitches: Minimum number of unique notes with octave equivalence.
        Melodies with too few unique notes are discarded.
    ignore_polyphonic_notes: If True, melodies will be extracted from
        `quantized_sequence` tracks that contain polyphony (notes start at the
        same time). If False, tracks with polyphony will be ignored.
    pad_end: If True, the end of the melody will be padded with NO_EVENTs so
        that it will end at a bar boundary.
    filter_drums: If True, notes for which `is_drum` is True will be ignored.
    require_chords: If True, only return lead sheets that have at least one
        chord other than NO_CHORD. If False, lead sheets with only melody will
        also be returned.
    all_transpositions: If True, also transpose each lead sheet fragment into
        all 12 keys.

  Returns:
    A python list of LeadSheet instances.

  Raises:
    NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
        (derived from its time signature) is not an integer number of time
        steps.
  """
  sequences_lib.assert_is_quantized_sequence(quantized_sequence)
  stats = dict([('empty_chord_progressions',
                 statistics.Counter('empty_chord_progressions'))])
  melodies, melody_stats = melodies_lib.extract_melodies(
      quantized_sequence, search_start_step=search_start_step,
      min_bars=min_bars, max_steps_truncate=max_steps_truncate,
      max_steps_discard=max_steps_discard, gap_bars=gap_bars,
      min_unique_pitches=min_unique_pitches,
      ignore_polyphonic_notes=ignore_polyphonic_notes, pad_end=pad_end,
      filter_drums=filter_drums)
  chord_progressions, chord_stats = chords_lib.extract_chords_for_melodies(
      quantized_sequence, melodies)
  lead_sheets = []
  for melody, chords in zip(melodies, chord_progressions):
    # If `chords` is None, it's because a chord progression could not be
    # extracted for this particular melody.
    if chords is not None:
      if require_chords and all(chord == chords_lib.NO_CHORD
                                for chord in chords):
        stats['empty_chord_progressions'].increment()
      else:
        lead_sheet = LeadSheet(melody, chords)
        if all_transpositions:
          for amount in range(-6, 6):
            transposed_lead_sheet = copy.deepcopy(lead_sheet)
            transposed_lead_sheet.transpose(amount)
            lead_sheets.append(transposed_lead_sheet)
        else:
          lead_sheets.append(lead_sheet)
  return lead_sheets, stats.values() + melody_stats + chord_stats