def transform(self, note_sequence):
     try:
         if self._steps_per_quarter is not None:
             quantized_sequence = sequences_lib.quantize_note_sequence(
                 note_sequence, self._steps_per_quarter)
         else:
             quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
                 note_sequence, self._steps_per_second)
         return [quantized_sequence]
     except sequences_lib.MultipleTimeSignatureError as e:
         tf.logging.warning(
             'Multiple time signatures in NoteSequence %s: %s',
             note_sequence.filename, e)
         self._set_stats([
             statistics.Counter(
                 'sequences_discarded_because_multiple_time_signatures', 1)
         ])
         return []
     except sequences_lib.MultipleTempoError as e:
         tf.logging.warning('Multiple tempos found in NoteSequence %s: %s',
                            note_sequence.filename, e)
         self._set_stats([
             statistics.Counter(
                 'sequences_discarded_because_multiple_tempos', 1)
         ])
         return []
     except sequences_lib.BadTimeSignatureError as e:
         tf.logging.warning('Bad time signature in NoteSequence %s: %s',
                            note_sequence.filename, e)
         self._set_stats([
             statistics.Counter(
                 'sequences_discarded_because_bad_time_signature', 1)
         ])
         return []
Example #2
0
def extract_polyphonic_sequences(
        quantized_sequence, start_step=0, min_steps_discard=None,
        max_steps_discard=None):
    """Extracts a polyphonic track from the given quantized NoteSequence.

    Currently, this extracts only one polyphonic sequence from a given track.

    Args:
      quantized_sequence: A quantized NoteSequence.
      start_step: Start extracting a sequence at this time step. Assumed
          to be the beginning of a bar.
      min_steps_discard: Minimum length of tracks in steps. Shorter tracks are
          discarded.
      max_steps_discard: Maximum length of tracks in steps. Longer tracks are
          discarded.

    Returns:
      poly_seqs: A python list of PolyphonicSequence instances.
      stats: A dictionary mapping string names to `statistics.Statistic` objects.
    """
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

    stats = dict((stat_name, statistics.Counter(stat_name)) for stat_name in
                 ['polyphonic_tracks_discarded_too_short',
                  'polyphonic_tracks_discarded_too_long',
                  'polyphonic_tracks_discarded_more_than_1_program'])

    steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)

    # Create a histogram measuring lengths (in bars not steps).
    stats['polyphonic_track_lengths_in_bars'] = statistics.Histogram(
        'polyphonic_track_lengths_in_bars',
        [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])

    # Allow only 1 program.
    programs = set()
    for note in quantized_sequence.notes:
        programs.add(note.program)
    if len(programs) > 1:
        stats['polyphonic_tracks_discarded_more_than_1_program'].increment()
        return [], stats.values()

    # Translate the quantized sequence into a PolyphonicSequence.
    poly_seq = PolyphonicSequence(quantized_sequence,
                                  start_step=start_step)

    poly_seqs = []
    num_steps = poly_seq.num_steps

    if min_steps_discard is not None and num_steps < min_steps_discard:
        stats['polyphonic_tracks_discarded_too_short'].increment()
    elif max_steps_discard is not None and num_steps > max_steps_discard:
        stats['polyphonic_tracks_discarded_too_long'].increment()
    else:
        poly_seqs.append(poly_seq)
        stats['polyphonic_track_lengths_in_bars'].increment(
            num_steps // steps_per_bar)

    return poly_seqs, stats.values()
Example #3
0
    def transform(self, sequence):
        stats = dict([(state_name, statistics.Counter(state_name))
                      for state_name in [
                          'skipped_due_to_range_exceeded',
                          'notes_dropped_due_to_range_exceeded',
                          'transpositions_generated'
                      ]])

        for text_annotation in sequence.text_annotations:
            if text_annotation.annotation_type == CHORD_SYMBOL:
                tf.logging.warn(
                    'Chord symbols ignored by TranspositionPipeline.')
                break

        transposed = []
        for amount in self._transposition_range:
            # Note that transpose is called even with a transpose amount of zero, to
            # ensure that out-of-range pitches are handled correctly.
            ts = self._transpose(sequence, amount, stats)
            if ts is not None:
                transposed.append(ts)

        stats['transpositions_generated'].increment(len(transposed))
        self._set_stats(stats.values())
        return transposed
    def transform(self, input_object):
        sequence = input_object
        stats = dict(
            (state_name, statistics.Counter(state_name)) for state_name in
            ['skipped_due_to_range_exceeded', 'transpositions_generated'])

        if sequence.key_signatures:
            tf.logging.warn('Key signatures ignored by TranspositionPipeline.')
        if any(note.pitch_name for note in sequence.notes):
            tf.logging.warn('Pitch names ignored by TranspositionPipeline.')
        if any(ta.annotation_type == CHORD_SYMBOL
               for ta in sequence.text_annotations):
            tf.logging.warn('Chord symbols ignored by TranspositionPipeline.')

        transposed = []
        for amount in self._transposition_range:
            # Note that transpose is called even with a transpose amount of zero, to
            # ensure that out-of-range pitches are handled correctly.
            ts = self._transpose(sequence, amount, stats)
            if ts is not None:
                transposed.append(ts)

        stats['transpositions_generated'].increment(len(transposed))
        self._set_stats(stats.values())
        return transposed
Example #5
0
    def __init__(self, name="Transform model output into midi file format"):
        super(ModelOutPutIntoMidiFile, self).__init__(input_type=Iterable,
                                                      output_type=Iterable,
                                                      name=name)

        self.stat1 = statistics.Counter('how_many_notes')
        self.stats = [self.stat1]
Example #6
0
 def transform(self, input_object):
     self._set_stats(
         [statistics.Counter('output_count', input_object.z)])
     return [
         Type1(x=input_object.x + i, y=input_object.y + i)
         for i in range(input_object.z)
     ]
Example #7
0
def extract_chords_for_melodies(quantized_sequence, melodies):
  """Extracts a chord progression from the quantized NoteSequence for melodies.

  This function will extract the underlying chord progression (encoded as text
  annotations) from `quantized_sequence` for each monophonic melody in
  `melodies`.  Each chord progression will be the same length as its
  corresponding melody.

  Args:
    quantized_sequence: A quantized NoteSequence object.
    melodies: A python list of Melody instances.

  Returns:
    chord_progressions: A python list of ChordProgression instances, the same
        length as `melodies`. If a progression fails to be extracted for a
        melody, the corresponding list entry will be None.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.
  """
  chord_progressions = []
  stats = dict([('coincident_chords', statistics.Counter('coincident_chords'))])
  for melody in melodies:
    try:
      chords = ChordProgression()
      chords.from_quantized_sequence(
          quantized_sequence, melody.start_step, melody.end_step)
    except CoincidentChordsError:
      stats['coincident_chords'].increment()
      chords = None
    chord_progressions.append(chords)

  return chord_progressions, list(stats.values())
    def transform(self, sequence):
        stats = dict([(state_name, statistics.Counter(state_name))
                      for state_name in [
                          'skipped_due_to_range_exceeded',
                          'notes_dropped_due_to_range_exceeded',
                          'transpositions_generated'
                      ]])

        for text_annotation in sequence.text_annotations:
            if text_annotation.annotation_type == CHORD_SYMBOL:
                tf.logging.warn(
                    'Chord symbols ignored by TranspositionPipeline.')
                break

        transposed = []
        for amount in self._transposition_range:
            if amount == 0:
                transposed.append(sequence)
            else:
                ts = self._transpose(sequence, amount, stats)
                if ts is not None:
                    transposed.append(ts)

        stats['transpositions_generated'].increment(len(transposed))
        self._set_stats(stats.values())
        return transposed
Example #9
0
    def __init__(self, name="Transformat NotesAndTimesSTep format into Midi"):
        super(NotesAndTimesStepIntoMidi, self).__init__(input_type=Iterable,
                                                        output_type=Iterable,
                                                        name=name)

        self.stat1 = statistics.Counter('how_many_notes')
        self.stats = [self.stat1]
Example #10
0
 def transform(self, note_sequence):
   quantized_sequence = sequences_lib.QuantizedSequence()
   try:
     quantized_sequence.from_note_sequence(note_sequence,
                                           self._steps_per_quarter)
     return [quantized_sequence]
   except sequences_lib.MultipleTimeSignatureException as e:
     tf.logging.debug('Multiple time signatures found in NoteSequence %s: %s',
                      note_sequence.filename, e)
     self._set_stats([statistics.Counter(
         'sequences_discarded_because_multiple_time_signatures', 1)])
     return []
   except sequences_lib.MultipleTempoException as e:
     tf.logging.debug('Multiple tempos found in NoteSequence %s: %s',
                      note_sequence.filename, e)
     self._set_stats([statistics.Counter(
         'sequences_discarded_because_multiple_tempos', 1)])
     return []
Example #11
0
def extract_lead_sheet_fragments(quantized_sequence,
                                 min_bars=7,
                                 gap_bars=1.0,
                                 min_unique_pitches=5,
                                 ignore_polyphonic_notes=True,
                                 require_chords=False):
    """Extracts a list of lead sheet fragments from a quantized NoteSequence.

  This function first extracts melodies using melodies_lib.extract_melodies,
  then extracts the chords underlying each melody using
  chords_lib.extract_chords_for_melodies.

  Args:
    quantized_sequence: A quantized NoteSequence object.
    min_bars: Minimum length of melodies in number of bars. Shorter melodies are
        discarded.
    gap_bars: A melody comes to an end when this number of bars (measures) of
        silence is encountered.
    min_unique_pitches: Minimum number of unique notes with octave equivalence.
        Melodies with too few unique notes are discarded.
    ignore_polyphonic_notes: If True, melodies will be extracted from
        `quantized_sequence` tracks that contain polyphony (notes start at the
        same time). If False, tracks with polyphony will be ignored.
    require_chords: If True, only return lead sheets that have at least one
        chord other than NO_CHORD. If False, lead sheets with only melody will
        also be returned.

  Returns:
    A python list of LeadSheet instances.

  Raises:
    NonIntegerStepsPerBarException: If `quantized_sequence`'s bar length
        (derived from its time signature) is not an integer number of time
        steps.
  """
    sequences_lib.assert_is_quantized_sequence(quantized_sequence)
    stats = dict([('empty_chord_progressions',
                   statistics.Counter('empty_chord_progressions'))])
    melodies, melody_stats = melodies_lib.extract_melodies(
        quantized_sequence,
        min_bars=min_bars,
        gap_bars=gap_bars,
        min_unique_pitches=min_unique_pitches,
        ignore_polyphonic_notes=ignore_polyphonic_notes)
    chord_progressions, chord_stats = chords_lib.extract_chords_for_melodies(
        quantized_sequence, melodies)
    lead_sheets = []
    for melody, chords in zip(melodies, chord_progressions):
        if chords is not None:
            if require_chords and all(chord == chords_lib.NO_CHORD
                                      for chord in chords):
                stats['empty_chord_progressions'].increment()
            else:
                lead_sheet = LeadSheet(melody, chords)
                lead_sheets.append(lead_sheet)
    return lead_sheets, stats.values() + melody_stats + chord_stats
Example #12
0
 def transform(self, quantized_sequence):
   try:
     chord_progressions, stats = chords_lib.extract_chords(
         quantized_sequence, max_steps=self._max_steps,
         all_transpositions=self._all_transpositions)
   except events_lib.NonIntegerStepsPerBarError as detail:
     tf.logging.warning('Skipped sequence: %s', detail)
     chord_progressions = []
     stats = [statistics.Counter('non_integer_steps_per_bar', 1)]
   except chords_lib.CoincidentChordsError as detail:
     tf.logging.warning('Skipped sequence: %s', detail)
     chord_progressions = []
     stats = [statistics.Counter('coincident_chords', 1)]
   except chord_symbols_lib.ChordSymbolError as detail:
     tf.logging.warning('Skipped sequence: %s', detail)
     chord_progressions = []
     stats = [statistics.Counter('chord_symbol_exception', 1)]
   self._set_stats(stats)
   return chord_progressions
Example #13
0
 def transform(self, lead_sheet):
     lead_sheet.squash(self._min_note, self._max_note,
                       self._transpose_to_key)
     try:
         encoded = [
             self._conditional_encoder_decoder.encode(
                 lead_sheet.chords, lead_sheet.melody)
         ]
         stats = []
     except magenta.music.ChordEncodingError as e:
         tf.logging.warning('Skipped lead sheet: %s', e)
         encoded = []
         stats = [statistics.Counter('chord_encoding_exception', 1)]
     except magenta.music.ChordSymbolError as e:
         tf.logging.warning('Skipped lead sheet: %s', e)
         encoded = []
         stats = [statistics.Counter('chord_symbol_exception', 1)]
     self._set_stats(stats)
     return encoded
 def transform(self, quantized_sequence):
   try:
     melodies, stats = melodies_lib.extract_melodies(
         quantized_sequence,
         min_bars=self._min_bars,
         max_steps_truncate=self._max_steps,
         min_unique_pitches=self._min_unique_pitches,
         gap_bars=self._gap_bars,
         ignore_polyphonic_notes=self._ignore_polyphonic_notes,
         filter_drums=self._filter_drums)
   except events_lib.NonIntegerStepsPerBarException as detail:
     tf.logging.warning('Skipped sequence: %s', detail)
     melodies = []
     stats = [statistics.Counter('non_integer_steps_per_bar', 1)]
   except events_lib.ZeroDivisionError as detail:
     tf.logging.warning('Skipped sequence: %s', detail)
     melodies = []
     stats = [statistics.Counter('zero_division_error', 1)]
   self._set_stats(stats)
   return melodies
Example #15
0
def extract_polyphonic_sequences(quantized_sequence,
                                 start_step=0,
                                 min_steps_discard=None,
                                 max_steps_discard=None,
                                 mod_writer=None):

    mw = mod_writer

    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

    stats = dict([(stat_name, statistics.Counter(stat_name)) for stat_name in [
        'polyphonic_tracks_discarded_too_short',
        'polyphonic_tracks_discarded_too_long',
        'polyphonic_tracks_discarded_more_than_1_program'
    ]])

    steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(
        quantized_sequence)

    # Create a histogram measuring lengths (in bars not steps).
    stats['polyphonic_track_lengths_in_bars'] = statistics.Histogram(
        'polyphonic_track_lengths_in_bars',
        [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])

    # Allow only 1 program.
    programs = set()
    for note in quantized_sequence.notes:
        programs.add(note.program)
    if len(programs) > 1:
        stats['polyphonic_tracks_discarded_more_than_1_program'].increment()
        return [], stats.values()

    filename = 'quantized_sequence'
    mw.write(mw.model_dir, filename, quantized_sequence)
    poly_seq = PolyphonicSequence(quantized_sequence,
                                  start_step=start_step,
                                  mod_writer=mw)
    quantized_poly_ns = poly_seq.to_sequence()
    quantized_poly_ns.filename = quantized_sequence.filename
    mw.write(mw.model_dir, 'quantized_poly_ns', quantized_poly_ns)

    poly_seqs = []
    num_steps = poly_seq.num_steps

    if min_steps_discard is not None and num_steps < min_steps_discard:
        stats['polyphonic_tracks_discarded_too_short'].increment()
    elif max_steps_discard is not None and num_steps > max_steps_discard:
        stats['polyphonic_tracks_discarded_too_long'].increment()
    else:
        poly_seqs.append(poly_seq)
        stats['polyphonic_track_lengths_in_bars'].increment(num_steps //
                                                            steps_per_bar)
    # pdb.set_trace()
    return poly_seqs, stats.values()
 def transform(self, lead_sheet):
     lead_sheet.squash(self._min_note, self._max_note,
                       self._transpose_to_key)
     try:
         encoded = [
             self._conditional_encoder_decoder.encode(
                 lead_sheet.chords, lead_sheet.melody)
         ]
         stats = []
     except note_seq.ChordEncodingError as e:
         tf.logging.warning('Skipped lead sheet: %s', e)
         encoded = []
         stats = [statistics.Counter('chord_encoding_exception', 1)]
     except note_seq.ChordSymbolError as e:
         tf.logging.warning('Skipped lead sheet: %s', e)
         encoded = []
         stats = [statistics.Counter('chord_symbol_exception', 1)]
     self._set_stats(stats)
     return [
         pipelines_common.make_sequence_example(*enc) for enc in encoded
     ]
Example #17
0
    def __init__(self, name="Transform piano rolls into note and time steps"):
        """Transform pianoRoll into NoteAndTimeStep format

        :param: (string) name: Pipeline name.
        """
        super(PianoRollsToIntoNoteAndTimeSteps,
              self).__init__(input_type=Iterable,
                             output_type=Iterable,
                             name=name)

        self.stat1 = statistics.Counter('how_many_notes')
        self.stats = [self.stat1]
Example #18
0
 def transform(self, quantized_sequence):
   try:
     drum_tracks, stats = drums_lib.extract_drum_tracks(
         quantized_sequence,
         min_bars=self._min_bars,
         max_steps_truncate=self._max_steps,
         gap_bars=self._gap_bars)
   except events_lib.NonIntegerStepsPerBarError as detail:
     tf.logging.warning('Skipped sequence: %s', detail)
     drum_tracks = []
     stats = [statistics.Counter('non_integer_steps_per_bar', 1)]
   self._set_stats(stats)
   return drum_tracks
Example #19
0
    def __init__(self, name="Read midis to python variable"):
        """Class used for loading midi files into ppython variable

        :param: (string) name: Pipeline name

        """
        super(MidiToPythonVariablePipeline, self).__init__(
            input_type=Iterable,
            output_type=str,
            name=name)

        self.stat1 = statistics.Counter('how_many_mid_files')
        self.stats = [self.stat1]
 def transform(self, quantized_sequence):
     try:
         lead_sheets, stats = extract_lead_sheet_fragments(
             quantized_sequence,
             min_bars=self._min_bars,
             max_steps_truncate=self._max_steps,
             min_unique_pitches=self._min_unique_pitches,
             gap_bars=self._gap_bars,
             ignore_polyphonic_notes=self._ignore_polyphonic_notes,
             filter_drums=self._filter_drums,
             require_chords=self._require_chords,
             all_transpositions=self._all_transpositions)
     except events_lib.NonIntegerStepsPerBarError as detail:
         tf.logging.warning('Skipped sequence: %s', detail)
         lead_sheets = []
         stats = [statistics.Counter('non_integer_steps_per_bar', 1)]
     except chord_symbols_lib.ChordSymbolError as detail:
         tf.logging.warning('Skipped sequence: %s', detail)
         lead_sheets = []
         stats = [statistics.Counter('chord_symbol_exception', 1)]
     self._set_stats(stats)
     return lead_sheets
    def _pitch_freq_counter(self, sequence):
        """Counts the number of notes by pitch."""

        stats = dict([
            (stat_name, statistics.Counter(stat_name)) for stat_name in list(
                map(str, list(range(self.min_valid_pitch,
                                    self.max_valid_pitch))))
        ])

        for note in sequence.notes:
            stats[str(note.pitch)].increment()

        self._set_stats(stats.values())
Example #22
0
 def transform(self, quantized_sequence):
     try:
         melodies, stats = melodies_lib.extract_melodies(
             quantized_sequence,
             min_bars=self.min_bars,
             min_unique_pitches=self.min_unique_pitches,
             gap_bars=self.gap_bars,
             ignore_polyphonic_notes=self.ignore_polyphonic_notes)
     except events_lib.NonIntegerStepsPerBarException as detail:
         tf.logging.warning('Skipped sequence: %s', detail)
         melodies = []
         stats = [statistics.Counter('non_integer_steps_per_bar', 1)]
     self._set_stats(stats)
     return melodies
Example #23
0
    def __init__(
        self,
        name="Transform format of notes and times step into deep learning input"
    ):
        """Transform format of notes and times step into deep learning input"

        :param: (string) name: Pipeline name.
        """
        super(GenMusic, self).__init__(input_type=Iterable,
                                       output_type=Iterable,
                                       name=name)

        self.stat1 = statistics.Counter('how_many_notes')
        self.stats = [self.stat1]
 def transform(self, note_sequence):
   try:
     quantized_sequence = sequences_lib.quantize_note_sequence(
         note_sequence, self._steps_per_quarter)
     return [quantized_sequence]
   except sequences_lib.MultipleTimeSignatureException as e:
     tf.logging.warning('Multiple time signatures in NoteSequence %s: %s',
                        note_sequence.filename, e)
     self._set_stats([statistics.Counter(
         'sequences_discarded_because_multiple_time_signatures', 1)])
     return []
   except sequences_lib.MultipleTempoException as e:
     tf.logging.warning('Multiple tempos found in NoteSequence %s: %s',
                        note_sequence.filename, e)
     self._set_stats([statistics.Counter(
         'sequences_discarded_because_multiple_tempos', 1)])
     return []
   except sequences_lib.BadTimeSignatureException as e:
     tf.logging.warning('Denominator not power of 2 in NoteSequence %s: %s',
                        note_sequence.filename, e)
     self._set_stats([statistics.Counter(
         'sequences_discarded_because_bad_time_signature', 1)])
     return []
Example #25
0
    def __init__(
        self,
        name="Transform format of notes and times step into deep learning input"
    ):
        """Transform format of notes and times step into deep learning input"

        :param: (string) name: Pipeline name.
        """
        super(NotesAndTimesStepIntoDeepLearningInput,
              self).__init__(input_type=Iterable,
                             output_type=Iterable,
                             name=name)

        self.stat1 = statistics.Counter('how_many_deep_learning_inputs')
        self.stats = [self.stat1]
Example #26
0
    def testCounter(self):
        counter = statistics.Counter('name_123')
        self.assertEqual(counter.count, 0)
        counter.increment()
        self.assertEqual(counter.count, 1)
        counter.increment(10)
        self.assertEqual(counter.count, 11)

        counter_2 = statistics.Counter('name_123', 5)
        self.assertEqual(counter_2.count, 5)
        counter.merge_from(counter_2)
        self.assertEqual(counter.count, 16)

        class ABC(object):
            pass

        with self.assertRaises(statistics.MergeStatisticsError):
            counter.merge_from(ABC())

        self.assertEqual(str(counter), 'name_123: 16')

        counter_copy = counter.copy()
        self.assertEqual(counter_copy.count, 16)
        self.assertEqual(counter_copy.name, 'name_123')
Example #27
0
 def transform(self, quantized_sequence):
     try:
         # pylint has a false positive error on this method call for some reason.
         # pylint:disable=redundant-keyword-arg
         drum_tracks, stats = extract_drum_tracks(
             quantized_sequence,
             min_bars=self._min_bars,
             max_steps_truncate=self._max_steps,
             gap_bars=self._gap_bars)
         # pylint:enable=redundant-keyword-arg
     except events_lib.NonIntegerStepsPerBarError as detail:
         tf.logging.warning('Skipped sequence: %s', detail)
         drum_tracks = []
         stats = [statistics.Counter('non_integer_steps_per_bar', 1)]
     self._set_stats(stats)
     return drum_tracks
    def transform(self, sequence):
        stats = dict([
            (state_name, statistics.Counter(state_name)) for state_name in
            ['skipped_due_to_range_exceeded', 'transpositions_generated']
        ])

        transposed = []
        for amount in self._transposition_range:
            # Note that transpose is called even with a transpose amount of zero, to
            # ensure that out-of-range pitches are handled correctly.
            ts = self._transpose(sequence, amount, stats)
            if ts is not None:
                transposed.append(ts)

        stats['transpositions_generated'].increment(len(transposed))
        self._set_stats(stats.values())
        return transposed
Example #29
0
  def transform(self, sequence):
    stats = dict([(state_name, statistics.Counter(state_name)) for state_name in
                  ['skipped_due_to_range_exceeded',
                   'transpositions_generated']])

    transposed = []
    # Transpose up to a major third in either direction.
    for amount in self._transposition_range:
      if amount == 0:
        transposed.append(sequence)
      else:
        ts = self._transpose(sequence, amount, stats)
        if ts is not None:
          transposed.append(ts)

    stats['transpositions_generated'].increment(len(transposed))
    self._set_stats(stats.values())
    return transposed
Example #30
0
def extract_chords(quantized_sequence,
                   max_steps=None,
                   all_transpositions=False):
    """Extracts a single chord progression from a quantized NoteSequence.

  This function will extract the underlying chord progression (encoded as text
  annotations) from `quantized_sequence`.

  Args:
    quantized_sequence: A quantized NoteSequence.
    max_steps: An integer, maximum length of a chord progression. Chord
        progressions will be trimmed to this length. If None, chord
        progressions will not be trimmed.
    all_transpositions: If True, also transpose the chord progression into all
        12 keys.

  Returns:
    chord_progressions: If `all_transpositions` is False, a python list
        containing a single ChordProgression instance. If `all_transpositions`
        is True, a python list containing 12 ChordProgression instances, one
        for each transposition.
    stats: A dictionary mapping string names to `statistics.Statistic` objects.
  """
    sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)

    stats = dict([('chords_truncated', statistics.Counter('chords_truncated'))
                  ])
    chords = ChordProgression()
    chords.from_quantized_sequence(quantized_sequence, 0,
                                   quantized_sequence.total_quantized_steps)
    if max_steps is not None:
        if len(chords) > max_steps:
            chords.set_length(max_steps)
            stats['chords_truncated'].increment()
    if all_transpositions:
        chord_progressions = []
        for amount in range(-6, 6):
            transposed_chords = copy.deepcopy(chords)
            transposed_chords.transpose(amount)
            chord_progressions.append(transposed_chords)
        return chord_progressions, stats.values()
    else:
        return [chords], stats.values()