def testExtractMelodiesMelodyTooLongWithPad(self): music_testing_lib.add_track_to_sequence(self.note_sequence, 0, [(12, 127, 2, 4), (14, 50, 6, 15)]) music_testing_lib.add_track_to_sequence(self.note_sequence, 1, [(12, 127, 2, 4), (14, 50, 6, 18)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) expected = [[ NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT ]] melodies, _ = melody_pipelines.extract_melodies( quantized_sequence, min_bars=1, max_steps_truncate=14, max_steps_discard=18, gap_bars=1, min_unique_pitches=2, ignore_polyphonic_notes=True, pad_end=True) melodies = [list(melody) for melody in melodies] self.assertEqual(expected, melodies)
def testExtractMelodiesTooFewPitches(self): # Test that extract_melodies discards melodies with too few pitches where # pitches are equivalent by octave. music_testing_lib.add_track_to_sequence(self.note_sequence, 0, [(12, 100, 0, 1), (13, 100, 1, 2), (18, 100, 2, 3), (24, 100, 3, 4), (25, 100, 4, 5)]) music_testing_lib.add_track_to_sequence(self.note_sequence, 1, [(12, 100, 0, 1), (13, 100, 1, 2), (18, 100, 2, 3), (25, 100, 3, 4), (26, 100, 4, 5)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) expected = [[12, 13, 18, 25, 26]] melodies, _ = melody_pipelines.extract_melodies( quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=4, ignore_polyphonic_notes=True) melodies = [list(melody) for melody in melodies] self.assertEqual(expected, melodies)
def testExtractMultipleMelodiesFromSameTrack(self): music_testing_lib.add_track_to_sequence(self.note_sequence, 0, [(12, 100, 2, 4), (11, 1, 6, 11)]) music_testing_lib.add_track_to_sequence(self.note_sequence, 1, [(12, 127, 2, 4), (14, 50, 6, 8), (50, 100, 33, 37), (52, 100, 34, 37)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) expected = [[ NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT ], [ NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT ], [NO_EVENT, 50, 52, NO_EVENT, NO_EVENT]] melodies, _ = melody_pipelines.extract_melodies( quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2, ignore_polyphonic_notes=True) melodies = sorted([list(melody) for melody in melodies]) self.assertEqual(expected, melodies)
def testExtractMelodiesMelodyTooShort(self): music_testing_lib.add_track_to_sequence(self.note_sequence, 0, [(12, 127, 2, 4), (14, 50, 6, 7)]) music_testing_lib.add_track_to_sequence(self.note_sequence, 1, [(12, 127, 2, 4), (14, 50, 6, 8)]) music_testing_lib.add_track_to_sequence(self.note_sequence, 2, [(12, 127, 2, 4), (14, 50, 6, 9)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) expected = [[ NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT ], [ NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT, NO_EVENT ]] melodies, _ = melody_pipelines.extract_melodies( quantized_sequence, min_bars=2, gap_bars=1, min_unique_pitches=2, ignore_polyphonic_notes=True) melodies = [list(melody) for melody in melodies] self.assertEqual(expected, melodies)
def testExtractChordsForMelodiesCoincidentChords(self): music_testing_lib.add_track_to_sequence(self.note_sequence, 0, [(12, 100, 2, 4), (11, 1, 6, 11)]) music_testing_lib.add_track_to_sequence(self.note_sequence, 1, [(12, 127, 2, 4), (14, 50, 6, 8), (50, 100, 33, 37), (52, 100, 34, 37)]) music_testing_lib.add_chords_to_sequence(self.note_sequence, [('C', 2), ('G7', 6), ('E13', 8), ('Cmaj7', 8)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, self.steps_per_quarter) melodies, _ = melody_pipelines.extract_melodies( quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2, ignore_polyphonic_notes=True) chord_progressions, stats = chord_pipelines.extract_chords_for_melodies( quantized_sequence, melodies) expected = [[NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'], ['Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']] stats_dict = dict((stat.name, stat) for stat in stats) self.assertIsNone(chord_progressions[0]) self.assertEqual(expected, [list(chords) for chords in chord_progressions[1:]]) self.assertEqual(stats_dict['coincident_chords'].count, 1)
def testExtractLeadSheetFragmentsCoincidentChords(self): music_testing_lib.add_track_to_sequence(self.note_sequence, 0, [(12, 100, 2, 4), (11, 1, 6, 11)]) music_testing_lib.add_track_to_sequence(self.note_sequence, 1, [(12, 127, 2, 4), (14, 50, 6, 8), (50, 100, 33, 37), (52, 100, 34, 37)]) music_testing_lib.add_chords_to_sequence(self.note_sequence, [('C', 2), ('G7', 6), ('Cmaj7', 33), ('F', 33)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) lead_sheets, _ = lead_sheet_pipelines.extract_lead_sheet_fragments( quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2, ignore_polyphonic_notes=True, require_chords=True) melodies, _ = melody_pipelines.extract_melodies( quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2, ignore_polyphonic_notes=True) chord_progressions, _ = chord_pipelines.extract_chords_for_melodies( quantized_sequence, melodies) # Last lead sheet should be rejected for coincident chords. self.assertEqual(list(melodies[:2]), list(lead_sheet.melody for lead_sheet in lead_sheets)) self.assertEqual(list(chord_progressions[:2]), list(lead_sheet.chords for lead_sheet in lead_sheets))
def testExtractChordsForMelodies(self): music_testing_lib.add_track_to_sequence(self.note_sequence, 0, [(12, 100, 2, 4), (11, 1, 6, 11)]) music_testing_lib.add_track_to_sequence(self.note_sequence, 1, [(12, 127, 2, 4), (14, 50, 6, 8), (50, 100, 33, 37), (52, 100, 34, 37)]) music_testing_lib.add_chords_to_sequence(self.note_sequence, [('C', 2), ('G7', 6), ('Cmaj7', 33)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, self.steps_per_quarter) melodies, _ = melody_pipelines.extract_melodies( quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2, ignore_polyphonic_notes=True) chord_progressions, _ = chord_pipelines.extract_chords_for_melodies( quantized_sequence, melodies) expected = [[ NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7', 'G7', 'G7', 'G7' ], [NO_CHORD, NO_CHORD, 'C', 'C', 'C', 'C', 'G7', 'G7'], ['G7', 'Cmaj7', 'Cmaj7', 'Cmaj7', 'Cmaj7']] self.assertEqual(expected, [list(chords) for chords in chord_progressions])
def testExtractLeadSheetFragments(self): music_testing_lib.add_track_to_sequence(self.note_sequence, 0, [(12, 100, .5, 1), (11, 1, 1.5, 2.75)]) music_testing_lib.add_track_to_sequence(self.note_sequence, 1, [(12, 127, .5, 1), (14, 50, 1.5, 2), (50, 100, 8.25, 9.25), (52, 100, 8.5, 9.25)]) music_testing_lib.add_chords_to_sequence(self.note_sequence, [('C', .5), ('G7', 1.5), ('Cmaj7', 8.25)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, self.steps_per_quarter) lead_sheets, _ = lead_sheet_pipelines.extract_lead_sheet_fragments( quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2, ignore_polyphonic_notes=True, require_chords=True) melodies, _ = melody_pipelines.extract_melodies( quantized_sequence, min_bars=1, gap_bars=2, min_unique_pitches=2, ignore_polyphonic_notes=True) chord_progressions, _ = chord_pipelines.extract_chords_for_melodies( quantized_sequence, melodies) self.assertEqual(list(melodies), list(lead_sheet.melody for lead_sheet in lead_sheets)) self.assertEqual(list(chord_progressions), list(lead_sheet.chords for lead_sheet in lead_sheets))
def testExtractMelodiesStatistics(self): music_testing_lib.add_track_to_sequence( self.note_sequence, 0, [(12, 100, 2, 4), (11, 1, 6, 7), (10, 100, 8, 10), (9, 100, 11, 14), (8, 100, 16, 40), (7, 100, 41, 42)]) music_testing_lib.add_track_to_sequence( self.note_sequence, 1, [(12, 127, 2, 4), (14, 50, 2, 8)]) music_testing_lib.add_track_to_sequence( self.note_sequence, 2, [(12, 127, 0, 1)]) music_testing_lib.add_track_to_sequence( self.note_sequence, 3, [(12, 127, 2, 4), (12, 50, 6, 8)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) _, stats = melody_pipelines.extract_melodies( quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=2, ignore_polyphonic_notes=False) stats_dict = dict((stat.name, stat) for stat in stats) self.assertEqual(stats_dict['polyphonic_tracks_discarded'].count, 1) self.assertEqual(stats_dict['melodies_discarded_too_short'].count, 1) self.assertEqual(stats_dict['melodies_discarded_too_few_pitches'].count, 1) self.assertEqual( stats_dict['melody_lengths_in_bars'].counters, {float('-inf'): 0, 0: 0, 1: 0, 2: 0, 10: 1, 20: 0, 30: 0, 40: 0, 50: 0, 100: 0, 200: 0, 500: 0})
def testExtractMelodiesSimple(self): music_testing_lib.add_track_to_sequence(self.note_sequence, 0, [(12, 100, 2, 4), (11, 1, 6, 7)]) music_testing_lib.add_track_to_sequence(self.note_sequence, 1, [(12, 127, 2, 4), (14, 50, 6, 9)]) music_testing_lib.add_track_to_sequence(self.note_sequence, 9, [(13, 100, 2, 4), (15, 25, 6, 8)], is_drum=True) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) expected = [[NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11], [ NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT, NO_EVENT ]] melodies, _ = melody_pipelines.extract_melodies( quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=2, ignore_polyphonic_notes=True) self.assertEqual(2, len(melodies)) self.assertIsInstance(melodies[0], melodies_lib.Melody) self.assertIsInstance(melodies[1], melodies_lib.Melody) melodies = sorted([list(melody) for melody in melodies]) self.assertEqual(expected, melodies)
def emissionModelTrainer(self): ALL_CHORD_LIST = ['N.C', 'C', 'Cm', 'C#', 'C#m', 'D', 'Dm', 'Eb', 'Ebm', 'E', 'Em', 'F', 'Fm', 'F#', 'F#m', 'G', 'Gm', 'G#', 'G#m', 'A', 'Am', 'A#', 'A#m', 'B', 'Bm'] Same_Chord = {'Db': 'C#', 'Dbm': 'C#m', 'D#': 'Eb', 'D#m': 'Ebm', 'Gb': 'F#', 'Gbm': 'F#m', 'Ab': 'G#', 'Abm': 'G#m', 'Bb': 'A#', 'Bbm': 'A#m'} ALL_NOTE_LIST = ['C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B'] path = "D:\FAI\Wikifonia" for file in glob.glob(path): mxlObject = musicxml_parser.MusicXMLDocument(file) mxlSequence = musicxml_reader.musicxml_to_sequence_proto(mxlObject) quantizedNoteSequence = sequences_lib.quantize_note_sequence(mxlSequence, 1) melodies, stats = melody_pipelines.extract_melodies(quantizedNoteSequence) chord_prog, stats = chord_pipelines.extract_chords_for_melodies(quantizedNoteSequence, melodies) if not chord_prog: continue for i in range(len(list(chord_prog[0]))): curChord = list(chord_prog[0])[i] curMel = list(melodies[0])[i] while (curMel > 71): curMel = curMel - 12 while (curMel < 60): curMel = curMel + 12 curChord = re.sub(r'\d+', '', curChord) curChord = curChord[:3] if curChord not in 'N.C.': if len(curChord) == 3 and curChord[2] not in 'm': curChord = curChord[:2] if curChord[1] not in ['#', 'b']: curChord = curChord[:1] if curChord in Same_Chord: curChord = Same_Chord[curChord] if curChord in 'Cb': curChord = 'B' if curChord in 'Fb': curChord = 'E' if curChord in 'Cbm': curChord = 'D' if curChord in 'Fbm': curChord = 'Em' a = ALL_CHORD_LIST.index(re.sub(r'\d+', '', curChord)) b = curMel self.mo_matrix[a][b - 60] = self.mo_matrix[a][b - 60] + 1 normed_mo_matrix = normalize(self.mo_matrix, axis=1, norm='l1') self.mo_matrix = normed_mo_matrix
def extractPitch(self, song): seq = midi_io.midi_file_to_note_sequence(song) qseq = sequences_lib.quantize_note_sequence(seq, 1) melodies, stats = melody_pipelines.extract_melodies(qseq) rounded_pitches = [] for mel in list(melodies[0]): while (mel > 71): mel = mel - 12 while (mel < 60): mel = mel + 12 rounded_pitches.append(mel) return rounded_pitches
def cleanDataset(self): path = "D:\FAI\Wikifonia" count = 0 for file in glob.glob(path): try: mxlObject = musicxml_parser.MusicXMLDocument(file) mxlSequence = musicxml_reader.musicxml_to_sequence_proto(mxlObject) quantizedNoteSequence = sequences_lib.quantize_note_sequence(mxlSequence, 1) chord_prog, stats = chord_pipelines.extract_chords(quantizedNoteSequence) melodies, stats = melody_pipelines.extract_melodies(quantizedNoteSequence) ac, stats = chord_pipelines.extract_chords_for_melodies(quantizedNoteSequence, melodies) except: os.remove(file) print(file) count = count + 1
def load_primer(self): """Loads default MIDI primer file. Also assigns the steps per bar of this file to be the model's defaults. """ if not os.path.exists(self.midi_primer): tf.logging.warn('ERROR! No such primer file exists! %s', self.midi_primer) return self.primer_sequence = midi_io.midi_file_to_sequence_proto(self.midi_primer) quantized_seq = sequences_lib.quantize_note_sequence( self.primer_sequence, steps_per_quarter=4) extracted_melodies, _ = melody_pipelines.extract_melodies( quantized_seq, min_bars=0, min_unique_pitches=1) self.primer = extracted_melodies[0] self.steps_per_bar = self.primer.steps_per_bar
def testExtractMelodiesLateStart(self): music_testing_lib.add_track_to_sequence( self.note_sequence, 0, [(12, 100, 102, 103), (13, 100, 104, 106)]) music_testing_lib.add_track_to_sequence( self.note_sequence, 1, [(12, 100, 100, 101), (13, 100, 102, 105)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) expected = [[NO_EVENT, NO_EVENT, 12, NOTE_OFF, 13, NO_EVENT], [12, NOTE_OFF, 13, NO_EVENT, NO_EVENT]] melodies, _ = melody_pipelines.extract_melodies( quantized_sequence, min_bars=1, gap_bars=1, min_unique_pitches=2, ignore_polyphonic_notes=True) melodies = sorted([list(melody) for melody in melodies]) self.assertEqual(expected, melodies)
def extract_lead_sheet_fragments(quantized_sequence, search_start_step=0, min_bars=7, max_steps_truncate=None, max_steps_discard=None, gap_bars=1.0, min_unique_pitches=5, ignore_polyphonic_notes=True, pad_end=False, filter_drums=True, require_chords=False, all_transpositions=False): """Extracts a list of lead sheet fragments from a quantized NoteSequence. This function first extracts melodies using melodies_lib.extract_melodies, then extracts the chords underlying each melody using chords_lib.extract_chords_for_melodies. Args: quantized_sequence: A quantized NoteSequence object. search_start_step: Start searching for a melody at this time step. Assumed to be the first step of a bar. min_bars: Minimum length of melodies in number of bars. Shorter melodies are discarded. max_steps_truncate: Maximum number of steps in extracted melodies. If defined, longer melodies are truncated to this threshold. If pad_end is also True, melodies will be truncated to the end of the last bar below this threshold. max_steps_discard: Maximum number of steps in extracted melodies. If defined, longer melodies are discarded. gap_bars: A melody comes to an end when this number of bars (measures) of silence is encountered. min_unique_pitches: Minimum number of unique notes with octave equivalence. Melodies with too few unique notes are discarded. ignore_polyphonic_notes: If True, melodies will be extracted from `quantized_sequence` tracks that contain polyphony (notes start at the same time). If False, tracks with polyphony will be ignored. pad_end: If True, the end of the melody will be padded with NO_EVENTs so that it will end at a bar boundary. filter_drums: If True, notes for which `is_drum` is True will be ignored. require_chords: If True, only return lead sheets that have at least one chord other than NO_CHORD. If False, lead sheets with only melody will also be returned. all_transpositions: If True, also transpose each lead sheet fragment into all 12 keys. Returns: A python list of LeadSheet instances. Raises: NonIntegerStepsPerBarError: If `quantized_sequence`'s bar length (derived from its time signature) is not an integer number of time steps. """ sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence) stats = dict([('empty_chord_progressions', statistics.Counter('empty_chord_progressions'))]) melodies, melody_stats = melody_pipelines.extract_melodies( quantized_sequence, search_start_step=search_start_step, min_bars=min_bars, max_steps_truncate=max_steps_truncate, max_steps_discard=max_steps_discard, gap_bars=gap_bars, min_unique_pitches=min_unique_pitches, ignore_polyphonic_notes=ignore_polyphonic_notes, pad_end=pad_end, filter_drums=filter_drums) chord_progressions, chord_stats = chord_pipelines.extract_chords_for_melodies( quantized_sequence, melodies) lead_sheets = [] for melody, chords in zip(melodies, chord_progressions): # If `chords` is None, it's because a chord progression could not be # extracted for this particular melody. if chords is not None: if require_chords and all(chord == chords_lib.NO_CHORD for chord in chords): stats['empty_chord_progressions'].increment() else: lead_sheet = LeadSheet(melody, chords) if all_transpositions: for amount in range(-6, 6): transposed_lead_sheet = copy.deepcopy(lead_sheet) transposed_lead_sheet.transpose(amount) lead_sheets.append(transposed_lead_sheet) else: lead_sheets.append(lead_sheet) return lead_sheets, list(stats.values()) + melody_stats + chord_stats
def _generate(self, input_sequence, generator_options): if len(generator_options.input_sections) > 1: raise sequence_generator.SequenceGeneratorError( 'This model supports at most one input_sections message, but got %s' % len(generator_options.input_sections)) if len(generator_options.generate_sections) != 1: raise sequence_generator.SequenceGeneratorError( 'This model supports only 1 generate_sections message, but got %s' % len(generator_options.generate_sections)) if input_sequence and input_sequence.tempos: qpm = input_sequence.tempos[0].qpm else: qpm = note_seq.DEFAULT_QUARTERS_PER_MINUTE steps_per_second = note_seq.steps_per_quarter_to_steps_per_second( self.steps_per_quarter, qpm) generate_section = generator_options.generate_sections[0] if generator_options.input_sections: input_section = generator_options.input_sections[0] primer_sequence = note_seq.trim_note_sequence(input_sequence, input_section.start_time, input_section.end_time) input_start_step = note_seq.quantize_to_step( input_section.start_time, steps_per_second, quantize_cutoff=0) else: primer_sequence = input_sequence input_start_step = 0 if primer_sequence.notes: last_end_time = max(n.end_time for n in primer_sequence.notes) else: last_end_time = 0 if last_end_time > generate_section.start_time: raise sequence_generator.SequenceGeneratorError( 'Got GenerateSection request for section that is before the end of ' 'the NoteSequence. This model can only extend sequences. Requested ' 'start time: %s, Final note end time: %s' % (generate_section.start_time, last_end_time)) # Quantize the priming sequence. quantized_sequence = note_seq.quantize_note_sequence( primer_sequence, self.steps_per_quarter) # Setting gap_bars to infinite ensures that the entire input will be used. extracted_melodies, _ = melody_pipelines.extract_melodies( quantized_sequence, search_start_step=input_start_step, min_bars=0, min_unique_pitches=1, gap_bars=float('inf'), ignore_polyphonic_notes=True) assert len(extracted_melodies) <= 1 start_step = note_seq.quantize_to_step( generate_section.start_time, steps_per_second, quantize_cutoff=0) # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it # always rounds down. This avoids generating a sequence that ends at 5.0 # seconds when the requested end time is 4.99. end_step = note_seq.quantize_to_step( generate_section.end_time, steps_per_second, quantize_cutoff=1.0) if extracted_melodies and extracted_melodies[0]: melody = extracted_melodies[0] else: # If no melody could be extracted, create an empty melody that starts 1 # step before the request start_step. This will result in 1 step of # silence when the melody is extended below. steps_per_bar = int( note_seq.steps_per_bar_in_quantized_sequence(quantized_sequence)) melody = note_seq.Melody([], start_step=max(0, start_step - 1), steps_per_bar=steps_per_bar, steps_per_quarter=self.steps_per_quarter) # Ensure that the melody extends up to the step we want to start generating. melody.set_length(start_step - melody.start_step) # Extract generation arguments from generator options. arg_types = { 'temperature': lambda arg: arg.float_value, 'beam_size': lambda arg: arg.int_value, 'branch_factor': lambda arg: arg.int_value, 'steps_per_iteration': lambda arg: arg.int_value } args = dict((name, value_fn(generator_options.args[name])) for name, value_fn in arg_types.items() if name in generator_options.args) generated_melody = self._model.generate_melody( end_step - melody.start_step, melody, **args) generated_sequence = generated_melody.to_sequence(qpm=qpm) assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5 return generated_sequence