def testExtractPianorollSequences(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    seqs, _ = pianoroll_lib.extract_pianoroll_sequences(quantized_sequence)
    self.assertEqual(1, len(seqs))

    seqs, _ = pianoroll_lib.extract_pianoroll_sequences(
        quantized_sequence, min_steps_discard=2, max_steps_discard=5)
    self.assertEqual(1, len(seqs))

    self.note_sequence.notes[0].end_time = 1.0
    self.note_sequence.total_time = 1.0
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    seqs, _ = pianoroll_lib.extract_pianoroll_sequences(
        quantized_sequence, min_steps_discard=3, max_steps_discard=5)
    self.assertEqual(0, len(seqs))

    self.note_sequence.notes[0].end_time = 10.0
    self.note_sequence.total_time = 10.0
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    seqs, _ = pianoroll_lib.extract_pianoroll_sequences(
        quantized_sequence, min_steps_discard=3, max_steps_discard=5)
    self.assertEqual(0, len(seqs))
Example #2
0
  def testExtractDrumTracksTooShort(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 127, 3, 4), (14, 50, 6, 7)],
        is_drum=True)
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    drum_tracks, _ = drums_lib.extract_drum_tracks(
        quantized_sequence, min_bars=2, gap_bars=1)
    drum_tracks = [list(drums) for drums in drum_tracks]
    self.assertEqual([], drum_tracks)

    del self.note_sequence.notes[:]
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 127, 3, 4), (14, 50, 7, 8)],
        is_drum=True)
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    drum_tracks, _ = drums_lib.extract_drum_tracks(
        quantized_sequence, min_bars=2, gap_bars=1)
    drum_tracks = [list(drums) for drums in drum_tracks]
    self.assertEqual(
        [[NO_DRUMS, NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
          DRUMS(14)]],
        drum_tracks)
    def testStepsPerBar(self):
        qns = sequences_lib.quantize_note_sequence(self.note_sequence, self.steps_per_quarter)
        self.assertEqual(16, sequences_lib.steps_per_bar_in_quantized_sequence(qns))

        self.note_sequence.time_signatures[0].numerator = 6
        self.note_sequence.time_signatures[0].denominator = 8
        qns = sequences_lib.quantize_note_sequence(self.note_sequence, self.steps_per_quarter)
        self.assertEqual(12.0, sequences_lib.steps_per_bar_in_quantized_sequence(qns))
Example #4
0
  def testFromNoteSequence_TempoChange(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    del self.note_sequence.tempos[:]

    # No tempos.
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    # Single tempo.
    self.note_sequence.tempos.add(qpm=60, time=0)
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    # Multiple tempos with no change.
    self.note_sequence.tempos.add(qpm=60, time=1)
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    # Tempo change.
    self.note_sequence.tempos.add(qpm=120, time=2)
    with self.assertRaises(sequences_lib.MultipleTempoException):
      sequences_lib.quantize_note_sequence(
          self.note_sequence, self.steps_per_quarter)
    def testQuantizeNoteSequence_TimeSignatureChange(self):
        testing_lib.add_track_to_sequence(
            self.note_sequence,
            0,
            [
                (12, 100, 0.01, 10.0),
                (11, 55, 0.22, 0.50),
                (40, 45, 2.50, 3.50),
                (55, 120, 4.0, 4.01),
                (52, 99, 4.75, 5.0),
            ],
        )
        del self.note_sequence.time_signatures[:]
        sequences_lib.quantize_note_sequence(self.note_sequence, self.steps_per_quarter)

        # Single time signature.
        self.note_sequence.time_signatures.add(numerator=4, denominator=4, time=0)
        sequences_lib.quantize_note_sequence(self.note_sequence, self.steps_per_quarter)

        # Multiple time signatures with no change.
        self.note_sequence.time_signatures.add(numerator=4, denominator=4, time=1)
        sequences_lib.quantize_note_sequence(self.note_sequence, self.steps_per_quarter)

        # Time signature change.
        self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=2)
        with self.assertRaises(sequences_lib.MultipleTimeSignatureException):
            sequences_lib.quantize_note_sequence(self.note_sequence, self.steps_per_quarter)
Example #6
0
  def testFromNoteSequence_NoImplicitTempoChangeOutOfOrder(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    del self.note_sequence.tempos[:]

    # No tempo.
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    # No implicit tempo change, but tempos are added out of order.
    self.note_sequence.tempos.add(qpm=60, time=2)
    self.note_sequence.tempos.add(qpm=60, time=0)
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)
Example #7
0
  def testQuantizeNoteSequence(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    testing_lib.add_chords_to_sequence(
        self.note_sequence,
        [('B7', 0.22), ('Em9', 4.0)])
    testing_lib.add_control_changes_to_sequence(
        self.note_sequence, 0,
        [(2.0, 64, 127), (4.0, 64, 0)])

    expected_quantized_sequence = copy.deepcopy(self.note_sequence)
    expected_quantized_sequence.quantization_info.steps_per_quarter = (
        self.steps_per_quarter)
    testing_lib.add_quantized_steps_to_sequence(
        expected_quantized_sequence,
        [(0, 40), (1, 2), (10, 14), (16, 17), (19, 20)])
    testing_lib.add_quantized_chord_steps_to_sequence(
        expected_quantized_sequence, [1, 16])
    testing_lib.add_quantized_control_steps_to_sequence(
        expected_quantized_sequence, [8, 16])

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=self.steps_per_quarter)

    self.assertProtoEquals(expected_quantized_sequence, quantized_sequence)
 def testExtractLeadSheetFragmentsCoincidentChords(self):
   testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 11)])
   testing_lib.add_track_to_sequence(
       self.note_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8),
        (50, 100, 33, 37), (52, 100, 34, 37)])
   testing_lib.add_chords_to_sequence(
       self.note_sequence,
       [('C', 2), ('G7', 6), ('Cmaj7', 33), ('F', 33)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, steps_per_quarter=1)
   lead_sheets, _ = lead_sheets_lib.extract_lead_sheet_fragments(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True, require_chords=True)
   melodies, _ = melodies_lib.extract_melodies(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True)
   chord_progressions, _ = chords_lib.extract_chords_for_melodies(
       quantized_sequence, melodies)
   # Last lead sheet should be rejected for coincident chords.
   self.assertEqual(list(melodies[:2]),
                    list(lead_sheet.melody for lead_sheet in lead_sheets))
   self.assertEqual(list(chord_progressions[:2]),
                    list(lead_sheet.chords for lead_sheet in lead_sheets))
Example #9
0
  def testExtractPerformancesRelativeQuantized(self):
    self.note_sequence.tempos.add(qpm=60.0)
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=100)

    perfs, _ = performance_lib.extract_performances(quantized_sequence)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=10)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=8, max_events_truncate=10)
    self.assertEqual(0, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=3)
    self.assertEqual(1, len(perfs))
    self.assertEqual(3, len(perfs[0]))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, max_steps_truncate=100)
    self.assertEqual(1, len(perfs))
    self.assertEqual(100, perfs[0].num_steps)
Example #10
0
  def testFromRelativeQuantizedNoteSequence(self):
    self.note_sequence.tempos.add(qpm=60.0)
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=100)
    performance = performance_lib.MetricPerformance(quantized_sequence)

    self.assertEqual(100, performance.steps_per_quarter)

    pe = performance_lib.PerformanceEvent
    expected_performance = [
        pe(pe.NOTE_ON, 60),
        pe(pe.NOTE_ON, 64),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_ON, 67),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_OFF, 67),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_OFF, 64),
        pe(pe.TIME_SHIFT, 100),
        pe(pe.NOTE_OFF, 60),
    ]
    self.assertEqual(expected_performance, list(performance))
 def testExtractLeadSheetFragmentsNoChords(self):
   testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 11)])
   testing_lib.add_track_to_sequence(
       self.note_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8),
        (50, 100, 33, 37), (52, 100, 34, 37)])
   testing_lib.add_chords_to_sequence(
       self.note_sequence,
       [('C', 2), ('G7', 6), (NO_CHORD, 10)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, steps_per_quarter=1)
   lead_sheets, stats = lead_sheets_lib.extract_lead_sheet_fragments(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True, require_chords=True)
   melodies, _ = melodies_lib.extract_melodies(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True)
   chord_progressions, _ = chords_lib.extract_chords_for_melodies(
       quantized_sequence, melodies)
   stats_dict = dict([(stat.name, stat) for stat in stats])
   # Last lead sheet should be rejected for having no chords.
   self.assertEqual(list(melodies[:2]),
                    list(lead_sheet.melody for lead_sheet in lead_sheets))
   self.assertEqual(list(chord_progressions[:2]),
                    list(lead_sheet.chords for lead_sheet in lead_sheets))
   self.assertEqual(stats_dict['empty_chord_progressions'].count, 1)
 def transform(self, note_sequence):
   try:
     if self._steps_per_quarter is not None:
       quantized_sequence = sequences_lib.quantize_note_sequence(
           note_sequence, self._steps_per_quarter)
     else:
       quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
           note_sequence, self._steps_per_second)
     return [quantized_sequence]
   except sequences_lib.MultipleTimeSignatureError as e:
     tf.logging.warning('Multiple time signatures in NoteSequence %s: %s',
                        note_sequence.filename, e)
     self._set_stats([statistics.Counter(
         'sequences_discarded_because_multiple_time_signatures', 1)])
     return []
   except sequences_lib.MultipleTempoError as e:
     tf.logging.warning('Multiple tempos found in NoteSequence %s: %s',
                        note_sequence.filename, e)
     self._set_stats([statistics.Counter(
         'sequences_discarded_because_multiple_tempos', 1)])
     return []
   except sequences_lib.BadTimeSignatureError as e:
     tf.logging.warning('Bad time signature in NoteSequence %s: %s',
                        note_sequence.filename, e)
     self._set_stats([statistics.Counter(
         'sequences_discarded_because_bad_time_signature', 1)])
     return []
Example #13
0
  def testExtractMelodiesPadEnd(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 127, 2, 4), (14, 50, 6, 7)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 8)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 2,
        [(12, 127, 2, 4), (14, 50, 6, 9)])

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                 NOTE_OFF],
                [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                 NO_EVENT],
                [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                 NO_EVENT, NO_EVENT, NOTE_OFF, NO_EVENT, NO_EVENT]]
    melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=2,
        ignore_polyphonic_notes=True, pad_end=True)
    melodies = [list(melody) for melody in melodies]
    self.assertEqual(expected, melodies)
Example #14
0
  def testFromQuantizedNoteSequence(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    poly_seq = list(polyphony_lib.PolyphonicSequence(quantized_sequence))

    pe = polyphony_lib.PolyphonicEvent
    expected_poly_seq = [
        pe(pe.START, None),
        # step 0
        pe(pe.NEW_NOTE, 64),
        pe(pe.NEW_NOTE, 60),
        pe(pe.STEP_END, None),
        # step 1
        pe(pe.NEW_NOTE, 67),
        pe(pe.CONTINUED_NOTE, 64),
        pe(pe.CONTINUED_NOTE, 60),
        pe(pe.STEP_END, None),
        # step 2
        pe(pe.CONTINUED_NOTE, 64),
        pe(pe.CONTINUED_NOTE, 60),
        pe(pe.STEP_END, None),
        # step 3
        pe(pe.CONTINUED_NOTE, 60),
        pe(pe.STEP_END, None),

        pe(pe.END, None),
    ]
    self.assertEqual(expected_poly_seq, poly_seq)
Example #15
0
  def testExtractMelodiesStatistics(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 2, 4), (11, 1, 6, 7), (10, 100, 8, 10), (9, 100, 11, 14),
         (8, 100, 16, 40), (7, 100, 41, 42)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 2, 8)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 2,
        [(12, 127, 0, 1)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 3,
        [(12, 127, 2, 4), (12, 50, 6, 8)])

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    _, stats = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=2,
        ignore_polyphonic_notes=False)

    stats_dict = dict([(stat.name, stat) for stat in stats])
    self.assertEqual(stats_dict['polyphonic_tracks_discarded'].count, 1)
    self.assertEqual(stats_dict['melodies_discarded_too_short'].count, 1)
    self.assertEqual(stats_dict['melodies_discarded_too_few_pitches'].count, 1)
    self.assertEqual(
        stats_dict['melody_lengths_in_bars'].counters,
        {float('-inf'): 0, 0: 0, 1: 0, 2: 0, 10: 1, 20: 0, 30: 0, 40: 0, 50: 0,
         100: 0, 200: 0, 500: 0})
Example #16
0
def midi_file_to_melody(midi_file, steps_per_quarter=4, qpm=None,
                        ignore_polyphonic_notes=True):
  """Loads a melody from a MIDI file.

  Args:
    midi_file: Absolute path to MIDI file.
    steps_per_quarter: Quantization of Melody. For example, 4 = 16th notes.
    qpm: Tempo in quarters per a minute. If not set, tries to use the first
        tempo of the midi track and defaults to
        magenta.music.DEFAULT_QUARTERS_PER_MINUTE if fails.
    ignore_polyphonic_notes: Only use the highest simultaneous note if True.

  Returns:
    A Melody object extracted from the MIDI file.
  """
  sequence = midi_io.midi_file_to_sequence_proto(midi_file)
  if qpm is None:
    if sequence.tempos:
      qpm = sequence.tempos[0].qpm
    else:
      qpm = constants.DEFAULT_QUARTERS_PER_MINUTE
  quantized_sequence = sequences_lib.quantize_note_sequence(
      sequence, steps_per_quarter=steps_per_quarter)
  melody = Melody()
  melody.from_quantized_sequence(
      quantized_sequence, ignore_polyphonic_notes=ignore_polyphonic_notes)
  return melody
Example #17
0
  def testExtractMelodiesSimple(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 2, 4), (11, 1, 6, 7)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 9)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 9,
        [(13, 100, 2, 4), (15, 25, 6, 8)],
        is_drum=True)

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11],
                [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                 NO_EVENT, NO_EVENT]]
    melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=2,
        ignore_polyphonic_notes=True)

    self.assertEqual(2, len(melodies))
    self.assertTrue(isinstance(melodies[0], melodies_lib.Melody))
    self.assertTrue(isinstance(melodies[1], melodies_lib.Melody))

    melodies = sorted([list(melody) for melody in melodies])
    self.assertEqual(expected, melodies)
Example #18
0
  def testExtractChordsForMelodiesCoincidentChords(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 2, 4), (11, 1, 6, 11)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 8),
         (50, 100, 33, 37), (52, 100, 34, 37)])
    testing_lib.add_chords_to_sequence(
        self.note_sequence,
        [('C', 2), ('G7', 6), ('E13', 8), ('Cmaj7', 8)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
        ignore_polyphonic_notes=True)
    chord_progressions, stats = chords_lib.extract_chords_for_melodies(
        quantized_sequence, melodies)
    expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'],
                ['Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']]
    stats_dict = dict((stat.name, stat) for stat in stats)
    self.assertIsNone(chord_progressions[0])
    self.assertEqual(expected,
                     [list(chords) for chords in chord_progressions[1:]])
    self.assertEqual(stats_dict['coincident_chords'].count, 1)
 def testMelodyExtractor(self):
   note_sequence = common_testing_lib.parse_test_proto(
       music_pb2.NoteSequence,
       """
       time_signatures: {
         numerator: 4
         denominator: 4}
       tempos: {
         qpm: 60}""")
   testing_lib.add_track_to_sequence(
       note_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 7)])
   testing_lib.add_track_to_sequence(
       note_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       note_sequence, steps_per_quarter=1)
   expected_events = [
       [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11],
       [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT]]
   expected_melodies = []
   for events_list in expected_events:
     melody = melodies_lib.Melody(
         events_list, steps_per_quarter=1, steps_per_bar=4)
     expected_melodies.append(melody)
   unit = melody_pipelines.MelodyExtractor(
       min_bars=1, min_unique_pitches=1, gap_bars=1)
   self._unit_transform_test(unit, quantized_sequence, expected_melodies)
Example #20
0
 def testDrumsExtractor(self):
   note_sequence = common_testing_lib.parse_test_proto(
       music_pb2.NoteSequence,
       """
       time_signatures: {
         numerator: 4
         denominator: 4}
       tempos: {
         qpm: 60}""")
   testing_lib.add_track_to_sequence(
       note_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 7), (12, 1, 6, 8)],
       is_drum=True)
   testing_lib.add_track_to_sequence(
       note_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       note_sequence, steps_per_quarter=1)
   expected_events = [
       [NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
        DRUMS(11, 12)]]
   expected_drum_tracks = []
   for events_list in expected_events:
     drums = drums_lib.DrumTrack(
         events_list, steps_per_quarter=1, steps_per_bar=4)
     expected_drum_tracks.append(drums)
   unit = drum_pipelines.DrumsExtractor(min_bars=1, gap_bars=1)
   self._unit_transform_test(unit, quantized_sequence, expected_drum_tracks)
 def testExtractLeadSheetFragments(self):
   testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 100, .5, 1), (11, 1, 1.5, 2.75)])
   testing_lib.add_track_to_sequence(
       self.note_sequence, 1,
       [(12, 127, .5, 1), (14, 50, 1.5, 2),
        (50, 100, 8.25, 9.25), (52, 100, 8.5, 9.25)])
   testing_lib.add_chords_to_sequence(
       self.note_sequence,
       [('C', .5), ('G7', 1.5), ('Cmaj7', 8.25)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, self.steps_per_quarter)
   lead_sheets, _ = lead_sheets_lib.extract_lead_sheet_fragments(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True, require_chords=True)
   melodies, _ = melodies_lib.extract_melodies(
       quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
       ignore_polyphonic_notes=True)
   chord_progressions, _ = chords_lib.extract_chords_for_melodies(
       quantized_sequence, melodies)
   self.assertEqual(list(melodies),
                    list(lead_sheet.melody for lead_sheet in lead_sheets))
   self.assertEqual(list(chord_progressions),
                    list(lead_sheet.chords for lead_sheet in lead_sheets))
Example #22
0
 def testFromQuantizedNoteSequenceWithNoChords(self):
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, self.steps_per_quarter)
   chords = chords_lib.ChordProgression()
   chords.from_quantized_sequence(
       quantized_sequence, start_step=0, end_step=16)
   expected = [NO_CHORD] * 16
   self.assertEqual(expected, list(chords))
Example #23
0
  def testQuantizeNoteSequence_NoImplicitTimeSignatureChangeOutOfOrder(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    del self.note_sequence.time_signatures[:]

    # No time signature.
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    # No implicit time signature change, but time signatures are added out of
    # order.
    self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=2)
    self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=0)
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)
Example #24
0
 def testExtractChords(self):
   testing_lib.add_chords_to_sequence(
       self.note_sequence, [('C', 2), ('G7', 6), ('F', 8)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, self.steps_per_quarter)
   quantized_sequence.total_quantized_steps = 10
   chord_progressions, _ = chords_lib.extract_chords(quantized_sequence)
   expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7', 'F', 'F']]
   self.assertEqual(expected, [list(chords) for chords in chord_progressions])
Example #25
0
  def testFromNotesStepsPerBar(self):
    self.note_sequence.time_signatures[0].numerator = 7
    self.note_sequence.time_signatures[0].denominator = 8

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=12)
    drums = drums_lib.DrumTrack()
    drums.from_quantized_sequence(quantized_sequence, search_start_step=0)
    self.assertEqual(42, drums.steps_per_bar)
Example #26
0
 def testFromQuantizedNoteSequenceWithCoincidentChords(self):
   testing_lib.add_chords_to_sequence(
       self.note_sequence,
       [('Am', 4), ('D7', 8), ('G13', 12), ('Csus', 12)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, self.steps_per_quarter)
   chords = chords_lib.ChordProgression()
   with self.assertRaises(chords_lib.CoincidentChordsError):
     chords.from_quantized_sequence(
         quantized_sequence, start_step=0, end_step=16)
Example #27
0
  def testExtractPianorollMultiProgram(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
    self.note_sequence.notes[0].program = 2
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    seqs, _ = pianoroll_lib.extract_pianoroll_sequences(quantized_sequence)
    self.assertEqual(0, len(seqs))
Example #28
0
 def testFromQuantizedNoteSequenceWithinSingleChord(self):
   testing_lib.add_chords_to_sequence(
       self.note_sequence, [('F', 0), ('Gm', 8)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, self.steps_per_quarter)
   chords = chords_lib.ChordProgression()
   chords.from_quantized_sequence(
       quantized_sequence, start_step=4, end_step=6)
   expected = ['F'] * 2
   self.assertEqual(expected, list(chords))
Example #29
0
 def testExtractChordsAllTranspositions(self):
   testing_lib.add_chords_to_sequence(
       self.note_sequence, [('C', 1)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, self.steps_per_quarter)
   quantized_sequence.total_quantized_steps = 2
   chord_progressions, _ = chords_lib.extract_chords(quantized_sequence,
                                                     all_transpositions=True)
   expected = list(zip([NO_CHORD] * 12, ['Gb', 'G', 'Ab', 'A', 'Bb', 'B',
                                         'C', 'Db', 'D', 'Eb', 'E', 'F']))
   self.assertEqual(expected, [tuple(chords) for chords in chord_progressions])
Example #30
0
  def testFromNotesStepsPerBar(self):
    self.note_sequence.time_signatures[0].numerator = 7
    self.note_sequence.time_signatures[0].denominator = 8
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=12)

    melody = melodies_lib.Melody()
    melody.from_quantized_sequence(quantized_sequence,
                                   search_start_step=0, instrument=0,
                                   ignore_polyphonic_notes=False)
    self.assertEqual(42, melody.steps_per_bar)
Example #31
0
    def cleanDataset(self):

        path = "\Wikifonia"
        count = 0
        for file in glob.glob(path):
            try:
                mxlObject = musicxml_parser.MusicXMLDocument(file)
                mxlSequence = musicxml_reader.musicxml_to_sequence_proto(
                    mxlObject)
                quantizedNoteSequence = sequences_lib.quantize_note_sequence(
                    mxlSequence, 1)
                chord_prog, stats = chord_pipelines.extract_chords(
                    quantizedNoteSequence)
                melodies, stats = melody_pipelines.extract_melodies(
                    quantizedNoteSequence)
                ac, stats = chord_pipelines.extract_chords_for_melodies(
                    quantizedNoteSequence, melodies)
            except:
                os.remove(file)
                print(file)
                count = count + 1
    def testQuantizer(self):
        steps_per_quarter = 4
        note_sequence = common_testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 60}""")
        testing_lib.add_track_to_sequence(note_sequence, 0,
                                          [(12, 100, 0.01, 10.0),
                                           (11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01),
                                           (52, 99, 4.75, 5.0)])
        expected_quantized_sequence = sequences_lib.quantize_note_sequence(
            note_sequence, steps_per_quarter)

        unit = note_sequence_pipelines.Quantizer(steps_per_quarter)
        self._unit_transform_test(unit, note_sequence,
                                  [expected_quantized_sequence])
Example #33
0
  def testExtractMelodiesMelodyTooLong(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 127, 2, 4), (14, 50, 6, 15)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 18)])

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14] +
                [NO_EVENT] * 7,
                [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14] +
                [NO_EVENT] * 7]
    melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=1, max_steps_truncate=14,
        max_steps_discard=18, gap_bars=1, min_unique_pitches=2,
        ignore_polyphonic_notes=True)
    melodies = [list(melody) for melody in melodies]
    self.assertEqual(expected, melodies)
Example #34
0
  def testExtractDrumTracksSimple(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 2, 4), (11, 1, 6, 7)],
        is_drum=True)
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 9)],
        is_drum=True)
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    expected = [[NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
                 DRUMS(11, 14)]]
    drum_tracks, _ = drums_lib.extract_drum_tracks(
        quantized_sequence, min_bars=1, gap_bars=1)

    self.assertEqual(1, len(drum_tracks))
    self.assertTrue(isinstance(drum_tracks[0], drums_lib.DrumTrack))

    drum_tracks = sorted([list(drums) for drums in drum_tracks])
    self.assertEqual(expected, drum_tracks)
    def testAssertIsRelativeQuantizedNoteSequence(self):
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(12, 100, 0.01, 10.0),
                                           (11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01),
                                           (52, 99, 4.75, 5.0)])

        relative_quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, steps_per_quarter=self.steps_per_quarter)
        absolute_quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
            self.note_sequence, steps_per_second=4)

        sequences_lib.assert_is_relative_quantized_sequence(
            relative_quantized_sequence)
        with self.assertRaises(sequences_lib.QuantizationStatusException):
            sequences_lib.assert_is_relative_quantized_sequence(
                absolute_quantized_sequence)
        with self.assertRaises(sequences_lib.QuantizationStatusException):
            sequences_lib.assert_is_relative_quantized_sequence(
                self.note_sequence)
Example #36
0
  def testInferChordsForSequenceAddKeySignatures(self):
    sequence = music_pb2.NoteSequence()
    testing_lib.add_track_to_sequence(
        sequence, 0,
        [(60, 100, 0.0, 1.0), (64, 100, 0.0, 1.0), (67, 100, 0.0, 1.0),   # C
         (62, 100, 1.0, 2.0), (65, 100, 1.0, 2.0), (69, 100, 1.0, 2.0),   # Dm
         (60, 100, 2.0, 3.0), (65, 100, 2.0, 3.0), (69, 100, 2.0, 3.0),   # F
         (59, 100, 3.0, 4.0), (62, 100, 3.0, 4.0), (67, 100, 3.0, 4.0),   # G
         (66, 100, 4.0, 5.0), (70, 100, 4.0, 5.0), (73, 100, 4.0, 5.0),   # F#
         (68, 100, 5.0, 6.0), (71, 100, 5.0, 6.0), (75, 100, 5.0, 6.0),   # G#m
         (66, 100, 6.0, 7.0), (71, 100, 6.0, 7.0), (75, 100, 6.0, 7.0),   # B
         (65, 100, 7.0, 8.0), (68, 100, 7.0, 8.0), (73, 100, 7.0, 8.0)])  # C#
    quantized_sequence = sequences_lib.quantize_note_sequence(
        sequence, steps_per_quarter=4)
    chord_inference.infer_chords_for_sequence(
        quantized_sequence, chords_per_bar=2, add_key_signatures=True)

    expected_key_signatures = [(0, 0.0), (6, 4.0)]
    key_signatures = [(ks.key, ks.time)
                      for ks in quantized_sequence.key_signatures]
    self.assertEqual(expected_key_signatures, key_signatures)
Example #37
0
    def testMultiTrack(self):
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(12, 100, 1.0, 4.0),
                                           (19, 100, 0.95, 3.0)])
        testing_lib.add_track_to_sequence(self.note_sequence, 3,
                                          [(12, 100, 1.0, 4.0),
                                           (19, 100, 2.0, 5.0)])
        testing_lib.add_track_to_sequence(self.note_sequence, 7,
                                          [(12, 100, 1.0, 5.0),
                                           (19, 100, 2.0, 4.0),
                                           (24, 100, 3.0, 3.5)])

        expected_quantized_sequence = copy.deepcopy(self.note_sequence)
        expected_quantized_sequence.quantization_info.steps_per_quarter = (
            self.steps_per_quarter)
        testing_lib.add_quantized_steps_to_sequence(
            expected_quantized_sequence, [(4, 16), (4, 12), (4, 16), (8, 20),
                                          (4, 20), (8, 16), (12, 14)])
        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, self.steps_per_quarter)
        self.assertProtoEquals(expected_quantized_sequence, quantized_sequence)
Example #38
0
    def testExtractMelodiesLateStart(self):
        music_testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                                [(12, 100, 102, 103),
                                                 (13, 100, 104, 106)])
        music_testing_lib.add_track_to_sequence(self.note_sequence, 1,
                                                [(12, 100, 100, 101),
                                                 (13, 100, 102, 105)])

        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, steps_per_quarter=1)

        expected = [[NO_EVENT, NO_EVENT, 12, NOTE_OFF, 13, NO_EVENT],
                    [12, NOTE_OFF, 13, NO_EVENT, NO_EVENT]]
        melodies, _ = melody_pipelines.extract_melodies(
            quantized_sequence,
            min_bars=1,
            gap_bars=1,
            min_unique_pitches=2,
            ignore_polyphonic_notes=True)
        melodies = sorted([list(melody) for melody in melodies])
        self.assertEqual(expected, melodies)
Example #39
0
  def testExtractMelodiesTooFewPitches(self):
    # Test that extract_melodies discards melodies with too few pitches where
    # pitches are equivalent by octave.
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0, 1), (13, 100, 1, 2), (18, 100, 2, 3),
         (24, 100, 3, 4), (25, 100, 4, 5)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 100, 0, 1), (13, 100, 1, 2), (18, 100, 2, 3),
         (25, 100, 3, 4), (26, 100, 4, 5)])

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    expected = [[12, 13, 18, 25, 26]]
    melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=4,
        ignore_polyphonic_notes=True)
    melodies = [list(melody) for melody in melodies]
    self.assertEqual(expected, melodies)
 def transform(self, note_sequence):
     try:
         quantized_sequence = sequences_lib.quantize_note_sequence(
             note_sequence, self._steps_per_quarter)
         return [quantized_sequence]
     except sequences_lib.MultipleTimeSignatureException as e:
         tf.logging.warning(
             'Multiple time signatures in NoteSequence %s: %s',
             note_sequence.filename, e)
         self._set_stats([
             statistics.Counter(
                 'sequences_discarded_because_multiple_time_signatures', 1)
         ])
         return []
     except sequences_lib.MultipleTempoException as e:
         tf.logging.warning('Multiple tempos found in NoteSequence %s: %s',
                            note_sequence.filename, e)
         self._set_stats([
             statistics.Counter(
                 'sequences_discarded_because_multiple_tempos', 1)
         ])
         return []
Example #41
0
  def testExtractPerformancesRelativeQuantized(self):
    self.note_sequence.tempos.add(qpm=60.0)
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=100)

    perfs, _ = performance_lib.extract_performances(quantized_sequence)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=10)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=8, max_events_truncate=10)
    self.assertEqual(0, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=3)
    self.assertEqual(1, len(perfs))
    self.assertEqual(3, len(perfs[0]))
Example #42
0
    def testFromNotesStartAndEndStep(self):
        testing_lib.add_track_to_sequence(self.note_sequence,
                                          0, [(12, 100, 1, 2),
                                              (11, 100, 2.25, 2.5),
                                              (13, 100, 3.25, 3.75),
                                              (14, 100, 8.75, 9),
                                              (15, 100, 9.25, 10.75)])
        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, self.steps_per_quarter)

        melody = melodies_lib.Melody()
        melody.from_quantized_sequence(quantized_sequence,
                                       search_start_step=18,
                                       instrument=0,
                                       ignore_polyphonic_notes=False)
        expected = [
            NO_EVENT, 14, NOTE_OFF, 15, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT,
            NO_EVENT
        ]
        self.assertEqual(expected, list(melody))
        self.assertEqual(34, melody.start_step)
        self.assertEqual(43, melody.end_step)
Example #43
0
 def testFromQuantizedNoteSequenceMultipleTracks(self):
   testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 100, 0, 10), (40, 45, 2.5, 3.5), (60, 100, 4, 5.5)],
       is_drum=True)
   testing_lib.add_track_to_sequence(
       self.note_sequence, 1,
       [(11, 55, .25, .5), (55, 120, 4, 4.25), (52, 99, 4.75, 5)],
       is_drum=True)
   testing_lib.add_track_to_sequence(
       self.note_sequence, 2,
       [(13, 100, 0, 10), (14, 45, 2.5, 3.5), (15, 100, 4, 5.5)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, self.steps_per_quarter)
   drums = drums_lib.DrumTrack()
   drums.from_quantized_sequence(quantized_sequence, search_start_step=0)
   expected = ([DRUMS(12), DRUMS(11), NO_DRUMS, NO_DRUMS, NO_DRUMS, NO_DRUMS,
                NO_DRUMS, NO_DRUMS, NO_DRUMS, NO_DRUMS, DRUMS(40), NO_DRUMS,
                NO_DRUMS, NO_DRUMS, NO_DRUMS, NO_DRUMS, DRUMS(55, 60),
                NO_DRUMS, NO_DRUMS, DRUMS(52)])
   self.assertEqual(expected, list(drums))
   self.assertEqual(16, drums.steps_per_bar)
Example #44
0
    def testFromNotesStartAndEndStep(self):
        testing_lib.add_track_to_sequence(self.note_sequence,
                                          0, [(12, 100, 1, 2),
                                              (11, 100, 2.25, 2.5),
                                              (13, 100, 3.25, 3.75),
                                              (14, 100, 4.75, 5),
                                              (15, 100, 5.25, 6.75)],
                                          is_drum=True)

        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, self.steps_per_quarter)

        drums = drums_lib.DrumTrack()
        drums.from_quantized_sequence(quantized_sequence, start_step=18)
        expected = [
            NO_DRUMS, NO_DRUMS, NO_DRUMS,
            DRUMS(14), NO_DRUMS,
            DRUMS(15)
        ]
        self.assertEqual(expected, list(drums))
        self.assertEqual(16, drums.start_step)
        self.assertEqual(22, drums.end_step)
Example #45
0
  def testExtractMultipleMelodiesFromSameTrack(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 2, 4), (11, 1, 6, 11)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 8),
         (50, 100, 33, 37), (52, 100, 34, 37)])

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11,
                 NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT],
                [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                 NO_EVENT],
                [NO_EVENT, 50, 52, NO_EVENT, NO_EVENT]]
    melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
        ignore_polyphonic_notes=True)
    melodies = sorted([list(melody) for melody in melodies])
    self.assertEqual(expected, melodies)
Example #46
0
    def testSlice(self):
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(12, 100, 1, 3), (11, 100, 5, 7),
                                           (13, 100, 9, 10)])
        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, steps_per_quarter=1)

        melody = melodies_lib.Melody()
        melody.from_quantized_sequence(quantized_sequence,
                                       search_start_step=0,
                                       instrument=0,
                                       ignore_polyphonic_notes=False)
        expected = [
            NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11, NO_EVENT, NOTE_OFF,
            NO_EVENT, 13
        ]
        self.assertEqual(expected, list(melody))

        expected_slice = [
            NO_EVENT, NO_EVENT, NO_EVENT, 11, NO_EVENT, NOTE_OFF, NO_EVENT
        ]
        self.assertEqual(expected_slice, list(melody[2:-1]))
Example #47
0
 def testExtractDrumTracksPadEnd(self):
   testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 127, 2, 4), (14, 50, 6, 7)],
       is_drum=True)
   testing_lib.add_track_to_sequence(
       self.note_sequence, 1,
       [(12, 127, 2, 4), (15, 50, 6, 8)],
       is_drum=True)
   testing_lib.add_track_to_sequence(
       self.note_sequence, 2,
       [(12, 127, 2, 4), (16, 50, 8, 9)],
       is_drum=True)
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, steps_per_quarter=1)
   expected = [[NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
                DRUMS(14, 15), NO_DRUMS, DRUMS(16), NO_DRUMS, NO_DRUMS,
                NO_DRUMS]]
   drum_tracks, _ = drums_lib.extract_drum_tracks(
       quantized_sequence, min_bars=1, gap_bars=1, pad_end=True)
   drum_tracks = [list(drums) for drums in drum_tracks]
   self.assertEqual(expected, drum_tracks)
Example #48
0
  def testQuantizeNoteSequence_TimeSignatureChange(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    del self.note_sequence.time_signatures[:]
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    # Single time signature.
    self.note_sequence.time_signatures.add(numerator=4, denominator=4, time=0)
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    # Multiple time signatures with no change.
    self.note_sequence.time_signatures.add(numerator=4, denominator=4, time=1)
    sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    # Time signature change.
    self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=2)
    with self.assertRaises(sequences_lib.MultipleTimeSignatureException):
      sequences_lib.quantize_note_sequence(
          self.note_sequence, self.steps_per_quarter)
 def testLeadSheetExtractor(self):
   note_sequence = common_testing_lib.parse_test_proto(
       music_pb2.NoteSequence,
       """
       time_signatures: {
         numerator: 4
         denominator: 4}
       tempos: {
         qpm: 60}""")
   music_testing_lib.add_track_to_sequence(
       note_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 7)])
   music_testing_lib.add_track_to_sequence(
       note_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8)])
   music_testing_lib.add_chords_to_sequence(
       note_sequence,
       [('Cm7', 2), ('F9', 4), ('G7b9', 6)])
   quantized_sequence = sequences_lib.quantize_note_sequence(
       note_sequence, steps_per_quarter=1)
   expected_melody_events = [
       [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11],
       [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT]]
   expected_chord_events = [
       [NO_CHORD, NO_CHORD, 'Cm7', 'Cm7', 'F9', 'F9', 'G7b9'],
       [NO_CHORD, NO_CHORD, 'Cm7', 'Cm7', 'F9', 'F9', 'G7b9', 'G7b9']]
   expected_lead_sheets = []
   for melody_events, chord_events in zip(expected_melody_events,
                                          expected_chord_events):
     melody = melodies_lib.Melody(
         melody_events, steps_per_quarter=1, steps_per_bar=4)
     chords = chords_lib.ChordProgression(
         chord_events, steps_per_quarter=1, steps_per_bar=4)
     lead_sheet = lead_sheets_lib.LeadSheet(melody, chords)
     expected_lead_sheets.append(lead_sheet)
   unit = lead_sheet_pipelines.LeadSheetExtractor(
       min_bars=1, min_unique_pitches=1, gap_bars=1, all_transpositions=False)
   self._unit_transform_test(unit, quantized_sequence, expected_lead_sheets)
Example #50
0
 def testExtractMultipleDrumTracks(self):
     testing_lib.add_track_to_sequence(self.note_sequence,
                                       0, [(12, 100, 2, 4), (11, 1, 6, 11)],
                                       is_drum=True)
     testing_lib.add_track_to_sequence(self.note_sequence,
                                       1, [(12, 127, 2, 4), (14, 50, 6, 8),
                                           (50, 100, 33, 37),
                                           (52, 100, 37, 38)],
                                       is_drum=True)
     quantized_sequence = sequences_lib.quantize_note_sequence(
         self.note_sequence, steps_per_quarter=1)
     expected = [[
         NO_DRUMS, NO_DRUMS,
         DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
         DRUMS(11, 14)
     ], [NO_DRUMS,
         DRUMS(50), NO_DRUMS, NO_DRUMS, NO_DRUMS,
         DRUMS(52)]]
     drum_tracks, _ = drums_lib.extract_drum_tracks(quantized_sequence,
                                                    min_bars=1,
                                                    gap_bars=2)
     drum_tracks = sorted([list(drums) for drums in drum_tracks])
     self.assertEqual(expected, drum_tracks)
Example #51
0
    def testQuantizeNoteSequence(self):
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(12, 100, 0.01, 10.0),
                                           (11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01),
                                           (52, 99, 4.75, 5.0)])
        testing_lib.add_chords_to_sequence(self.note_sequence, [('B7', 0.22),
                                                                ('Em9', 4.0)])

        expected_quantized_sequence = copy.deepcopy(self.note_sequence)
        expected_quantized_sequence.quantization_info.steps_per_quarter = (
            self.steps_per_quarter)
        testing_lib.add_quantized_steps_to_sequence(
            expected_quantized_sequence, [(0, 40), (1, 2), (10, 14), (16, 17),
                                          (19, 20)])
        testing_lib.add_quantized_chord_steps_to_sequence(
            expected_quantized_sequence, [1, 16])

        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, steps_per_quarter=self.steps_per_quarter)

        self.assertProtoEquals(expected_quantized_sequence, quantized_sequence)
Example #52
0
  def testExtractMelodiesMelodyTooShort(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 127, 2, 4), (14, 50, 6, 7)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 8)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 2,
        [(12, 127, 2, 4), (14, 50, 6, 9)])

    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)

    expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                 NO_EVENT],
                [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14,
                 NO_EVENT, NO_EVENT]]
    melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=2, gap_bars=1, min_unique_pitches=2,
        ignore_polyphonic_notes=True)
    melodies = [list(melody) for melody in melodies]
    self.assertEqual(expected, melodies)
Example #53
0
 def testChordsExtractor(self):
     note_sequence = common_testing_lib.parse_test_proto(
         music_pb2.NoteSequence, """
     time_signatures: {
       numerator: 4
       denominator: 4}
     tempos: {
       qpm: 60}""")
     testing_lib.add_chords_to_sequence(note_sequence, [('C', 2), ('Am', 4),
                                                        ('F', 5)])
     quantized_sequence = sequences_lib.quantize_note_sequence(
         note_sequence, steps_per_quarter=1)
     quantized_sequence.total_quantized_steps = 8
     expected_events = [[NO_CHORD, NO_CHORD, 'C', 'C', 'Am', 'F', 'F', 'F']]
     expected_chord_progressions = []
     for events_list in expected_events:
         chords = chords_lib.ChordProgression(events_list,
                                              steps_per_quarter=1,
                                              steps_per_bar=4)
         expected_chord_progressions.append(chords)
     unit = chord_pipelines.ChordsExtractor(all_transpositions=False)
     self._unit_transform_test(unit, quantized_sequence,
                               expected_chord_progressions)
Example #54
0
 def testExtractLeadSheetFragmentsNoChords(self):
     music_testing_lib.add_track_to_sequence(self.note_sequence,
                                             0, [(12, 100, 2, 4),
                                                 (11, 1, 6, 11)])
     music_testing_lib.add_track_to_sequence(self.note_sequence,
                                             1, [(12, 127, 2, 4),
                                                 (14, 50, 6, 8),
                                                 (50, 100, 33, 37),
                                                 (52, 100, 34, 37)])
     music_testing_lib.add_chords_to_sequence(self.note_sequence,
                                              [('C', 2), ('G7', 6),
                                               (NO_CHORD, 10)])
     quantized_sequence = sequences_lib.quantize_note_sequence(
         self.note_sequence, steps_per_quarter=1)
     lead_sheets, stats = lead_sheet_pipelines.extract_lead_sheet_fragments(
         quantized_sequence,
         min_bars=1,
         gap_bars=2,
         min_unique_pitches=2,
         ignore_polyphonic_notes=True,
         require_chords=True)
     melodies, _ = melody_pipelines.extract_melodies(
         quantized_sequence,
         min_bars=1,
         gap_bars=2,
         min_unique_pitches=2,
         ignore_polyphonic_notes=True)
     chord_progressions, _ = chord_pipelines.extract_chords_for_melodies(
         quantized_sequence, melodies)
     stats_dict = dict((stat.name, stat) for stat in stats)
     # Last lead sheet should be rejected for having no chords.
     self.assertEqual(list(melodies[:2]),
                      list(lead_sheet.melody for lead_sheet in lead_sheets))
     self.assertEqual(list(chord_progressions[:2]),
                      list(lead_sheet.chords for lead_sheet in lead_sheets))
     self.assertEqual(stats_dict['empty_chord_progressions'].count, 1)
Example #55
0
  def testExtractChordsForMelodies(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 2, 4), (11, 1, 6, 11)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 8),
         (50, 100, 33, 37), (52, 100, 34, 37)])
    testing_lib.add_chords_to_sequence(
        self.note_sequence,
        [('C', 2), ('G7', 6), ('Cmaj7', 33)])
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, self.steps_per_quarter)

    melodies, _ = melodies_lib.extract_melodies(
        quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2,
        ignore_polyphonic_notes=True)
    chord_progressions, _ = chords_lib.extract_chords_for_melodies(
        quantized_sequence, melodies)
    expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C',
                 'G7', 'G7', 'G7', 'G7', 'G7'],
                [NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'],
                ['G7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']]
    self.assertEqual(expected, [list(chords) for chords in chord_progressions])
Example #56
0
  def testEventListChordsWithMelodies(self):
    note_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
    note_sequence.tempos.add(qpm=60.0)
    testing_lib.add_chords_to_sequence(
        note_sequence, [('N.C.', 0), ('C', 2), ('G7', 6)])
    note_sequence.total_time = 8.0

    melodies = [
        melodies_lib.Melody([60, -2, -2, -1],
                            start_step=0, steps_per_quarter=1, steps_per_bar=4),
        melodies_lib.Melody([62, -2, -2, -1],
                            start_step=4, steps_per_quarter=1, steps_per_bar=4),
    ]

    quantized_sequence = sequences_lib.quantize_note_sequence(
        note_sequence, steps_per_quarter=1)
    chords = chords_lib.event_list_chords(quantized_sequence, melodies)

    expected_chords = [
        [NO_CHORD, NO_CHORD, 'C', 'C'],
        ['C', 'C', 'G7', 'G7']
    ]

    self.assertEqual(expected_chords, chords)
Example #57
0
    def observationModelTrainer(self):
        ALL_CHORD_LIST = [
            'N.C', 'C', 'Cm', 'C#', 'C#m', 'D', 'Dm', 'Eb', 'Ebm', 'E', 'Em',
            'F', 'Fm', 'F#', 'F#m', 'G', 'Gm', 'G#', 'G#m', 'A', 'Am', 'A#',
            'A#m', 'B', 'Bm'
        ]
        Same_Chord = {
            'Db': 'C#',
            'Dbm': 'C#m',
            'D#': 'Eb',
            'D#m': 'Ebm',
            'Gb': 'F#',
            'Gbm': 'F#m',
            'Ab': 'G#',
            'Abm': 'G#m',
            'Bb': 'A#',
            'Bbm': 'A#m'
        }
        path = "Wikifonia"
        for file in glob.glob(path):
            mxlObject = musicxml_parser.MusicXMLDocument(file)
            mxlSequence = musicxml_reader.musicxml_to_sequence_proto(mxlObject)
            quantizedNoteSequence = sequences_lib.quantize_note_sequence(
                mxlSequence, 1)

            chord_prog, stats = chord_pipelines.extract_chords(
                quantizedNoteSequence)
            previous = None
            for chord in list(chord_prog[0]):
                if previous is None:
                    previous = chord
                    continue

                curChord = re.sub(r'\d+', '', chord)
                prevChord = re.sub(r'\d+', '', previous)
                curChord = curChord[:3]
                prevChord = prevChord[:3]

                if curChord != 'N.C':
                    if len(curChord) == 3 and curChord[2] != 'm':
                        curChord = curChord[:2]
                        if curChord[1] not in ['#', 'b']:
                            curChord = curChord[:1]

                if prevChord != 'N.C':
                    if len(prevChord) == 3 and prevChord[2] != 'm':
                        prevChord = prevChord[:2]
                        if prevChord[1] not in ['#', 'b']:
                            prevChord = prevChord[:1]

                    if curChord in Same_Chord:
                        curChord = Same_Chord[curChord]
                    if prevChord in Same_Chord:
                        prevChord = Same_Chord[prevChord]

                    if curChord == 'Cb':
                        curChord = 'B'
                    if prevChord == 'Cb':
                        prevChord = 'B'
                    if curChord == 'Fb':
                        curChord = 'E'
                    if prevChord == 'Fb':
                        prevChord = 'E'
                    if curChord == 'Cbm':
                        curChord = 'D'
                    if prevChord == 'Cbm':
                        prevChord = 'D'
                    if curChord == 'Fbm':
                        curChord = 'Em'
                    if prevChord == 'Fbm':
                        prevChord = 'Em'

                    if prevChord != curChord:
                        a = ALL_CHORD_LIST.index(prevChord)
                        b = ALL_CHORD_LIST.index(curChord)
                        self.ct_matrix[a][b] = self.ct_matrix[a][b] + 1
                    previous = curChord

        normed_ct_matrix = normalize(self.ct_matrix, axis=1, norm='l1')
        self.ct_matrix = normed_ct_matrix
Example #58
0
def generate_drums():
    """Generate a new drum groove by querying the model."""
    global drums_bundle
    global generated_drums
    global playable_notes
    global seed_drum_sequence
    global num_steps
    global qpm
    global total_seconds
    global temperature
    drums_config_id = drums_bundle.generator_details.id
    drums_config = drums_rnn_model.default_configs[drums_config_id]
    generator = drums_rnn_sequence_generator.DrumsRnnSequenceGenerator(
        model=drums_rnn_model.DrumsRnnModel(drums_config),
        details=drums_config.details,
        steps_per_quarter=drums_config.steps_per_quarter,
        checkpoint=melody_rnn_generate.get_checkpoint(),
        bundle=drums_bundle)
    generator_options = generator_pb2.GeneratorOptions()
    generator_options.args['temperature'].float_value = temperature
    generator_options.args['beam_size'].int_value = 1
    generator_options.args['branch_factor'].int_value = 1
    generator_options.args['steps_per_iteration'].int_value = 1
    if seed_drum_sequence is None:
        primer_drums = magenta.music.DrumTrack([frozenset([36])])
        primer_sequence = primer_drums.to_sequence(qpm=qpm)
        local_num_steps = num_steps
    else:
        primer_sequence = seed_drum_sequence
        local_num_steps = num_steps * 2
        tempo = primer_sequence.tempos.add()
        tempo.qpm = qpm
    step_length = 60. / qpm / 4.0
    total_seconds = local_num_steps * step_length
    # Set the start time to begin on the next step after the last note ends.
    last_end_time = (max(
        n.end_time
        for n in primer_sequence.notes) if primer_sequence.notes else 0)
    generator_options.generate_sections.add(start_time=last_end_time +
                                            step_length,
                                            end_time=total_seconds)
    generated_sequence = generator.generate(primer_sequence, generator_options)
    generated_sequence = sequences_lib.quantize_note_sequence(
        generated_sequence, 4)
    if seed_drum_sequence is not None:
        i = 0
        while i < len(generated_sequence.notes):
            if generated_sequence.notes[i].quantized_start_step < num_steps:
                del generated_sequence.notes[i]
            else:
                generated_sequence.notes[i].quantized_start_step -= num_steps
                generated_sequence.notes[i].quantized_end_step -= num_steps
                i += 1
    drum_pattern = [(n.pitch, n.quantized_start_step, n.quantized_end_step)
                    for n in generated_sequence.notes]
    # First clear the last drum pattern.
    if len(playable_notes) > 0:
        playable_notes = SortedList(
            [x for x in playable_notes if x.type != 'drums'],
            key=lambda x: x.onset)
    for p, s, e in drum_pattern:
        playable_notes.add(
            PlayableNote(type='drums',
                         note=[],
                         instrument=DRUM_MAPPING[p],
                         onset=s))
Example #59
0
def parse_midi_file(midi_file,
                    max_notes=float('Inf'),
                    max_time_signatures=1,
                    max_tempos=1,
                    ignore_polyphonic_notes=True,
                    convert_to_drums=False,
                    steps_per_quarter=16):
    """Summary

    Parameters
    ----------
    midi_file : TYPE
        Description
    max_notes : TYPE, optional
        Description
    max_time_signatures : int, optional
        Description
    max_tempos : int, optional
        Description
    ignore_polyphonic_notes : bool, optional
        Description
    convert_to_drums : bool, optional
        Description
    steps_per_quarter : int, optional
        Description

    Returns
    -------
    TYPE
        Description
    """
    seq = midi_io.midi_file_to_sequence_proto(midi_file)

    while len(seq.notes) > max_notes:
        seq.notes.pop()

    while len(seq.time_signatures) > max_time_signatures:
        seq.time_signatures.pop()

    while len(seq.tempos) > max_tempos:
        seq.tempos.pop()

    if convert_to_drums:
        for note_i in range(len(seq.notes)):
            seq.notes[note_i].program = 10

    if ignore_polyphonic_notes:
        convert_to_monophonic(seq)

    seq = sequences_lib.quantize_note_sequence(
        seq, steps_per_quarter=steps_per_quarter)

    if seq.tempos:
        qpm = seq.tempos[0].qpm
    else:
        qpm = 120

    melody = Melody()
    melody.from_quantized_sequence(
        seq, ignore_polyphonic_notes=ignore_polyphonic_notes)
    seq = melody.to_sequence(qpm=qpm)

    return seq, qpm
Example #60
0
def split_performance(performance,
                      steps_per_segment,
                      new_performance_fn,
                      clip_tied_notes=False):
    """Splits a performance into multiple fixed-length segments.

  Args:
    performance: A Performance (or MetricPerformance) object to split.
    steps_per_segment: The number of quantized steps per segment.
    new_performance_fn: A function to create new Performance (or
        MetricPerformance objects). Takes `quantized_sequence` and `start_step`
        arguments.
    clip_tied_notes: If True, clip tied notes across segments by converting each
        segment to NoteSequence and back.

  Returns:
    A list of performance segments.
  """
    segments = []
    cur_segment = new_performance_fn(quantized_sequence=None, start_step=0)
    cur_step = 0
    for e in performance:
        if e.event_type != performance_lib.PerformanceEvent.TIME_SHIFT:
            if cur_step == steps_per_segment:
                # At a segment boundary, note-offs happen before the cutoff.
                # Everything else happens after.
                if e.event_type != performance_lib.PerformanceEvent.NOTE_OFF:
                    segments.append(cur_segment)
                    cur_segment = new_performance_fn(quantized_sequence=None,
                                                     start_step=len(segments) *
                                                     steps_per_segment)
                    cur_step = 0
                cur_segment.append(e)
            else:
                # We're not at a segment boundary.
                cur_segment.append(e)
        else:
            if cur_step + e.event_value <= steps_per_segment:
                # If it's a time shift, but we're still within the current segment,
                # just append to current segment.
                cur_segment.append(e)
                cur_step += e.event_value
            else:
                # If it's a time shift that goes beyond the current segment, possibly
                # split the time shift into two events and create a new segment.
                cur_segment_steps = steps_per_segment - cur_step
                if cur_segment_steps > 0:
                    cur_segment.append(
                        performance_lib.PerformanceEvent(
                            event_type=performance_lib.PerformanceEvent.
                            TIME_SHIFT,
                            event_value=cur_segment_steps))

                segments.append(cur_segment)
                cur_segment = new_performance_fn(quantized_sequence=None,
                                                 start_step=len(segments) *
                                                 steps_per_segment)
                cur_step = 0

                new_segment_steps = e.event_value - cur_segment_steps
                if new_segment_steps > 0:
                    cur_segment.append(
                        performance_lib.PerformanceEvent(
                            event_type=performance_lib.PerformanceEvent.
                            TIME_SHIFT,
                            event_value=new_segment_steps))
                    cur_step += new_segment_steps

    segments.append(cur_segment)

    # There may be a final segment with zero duration. If so, remove it.
    if segments and segments[-1].num_steps == 0:
        segments = segments[:-1]

    if clip_tied_notes:
        # Convert each segment to NoteSequence and back to remove notes that are
        # held across segment boundaries.
        for i in range(len(segments)):
            sequence = segments[i].to_sequence()
            if isinstance(segments[i], performance_lib.MetricPerformance):
                # Performance is quantized relative to meter.
                quantized_sequence = sequences_lib.quantize_note_sequence(
                    sequence, steps_per_quarter=segments[i].steps_per_quarter)
            else:
                # Performance is quantized with absolute timing.
                quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
                    sequence, steps_per_second=segments[i].steps_per_second)
            segments[i] = new_performance_fn(
                quantized_sequence=quantized_sequence,
                start_step=segments[i].start_step)
            segments[i].set_length(steps_per_segment)

    return segments