def main(unused_argv): logging.set_verbosity(FLAGS.log) if not os.path.exists(FLAGS.output_dir): os.makedirs(FLAGS.output_dir) for input_file in sorted(os.listdir(FLAGS.input_dir)): if not input_file.endswith('.wav'): continue wav_filename = input_file midi_filename = input_file.replace('.wav', '.mid') logging.info('Aligning %s to %s', midi_filename, wav_filename) samples = audio_io.load_audio( os.path.join(FLAGS.input_dir, wav_filename), align_fine_lib.SAMPLE_RATE) ns = midi_io.midi_file_to_sequence_proto( os.path.join(FLAGS.input_dir, midi_filename)) aligned_ns, unused_stats = align_fine_lib.align_cpp( samples, align_fine_lib.SAMPLE_RATE, ns, align_fine_lib.CQT_HOP_LENGTH_FINE, sf2_path=FLAGS.sf2_path, penalty_mul=FLAGS.penalty_mul) midi_io.sequence_proto_to_midi_file( aligned_ns, os.path.join(FLAGS.output_dir, midi_filename)) logging.info('Done')
def testIsDrumDetection(self): """Verify that is_drum instruments are properly tracked. self.midi_is_drum_filename is a MIDI file containing two tracks set to channel 9 (is_drum == True). Each contains one NoteOn. This test is designed to catch a bug where the second track would lose is_drum, remapping the drum track to an instrument track. """ sequence_proto = midi_io.midi_file_to_sequence_proto( self.midi_is_drum_filename) with tempfile.NamedTemporaryFile(prefix='MidiDrumTest') as temp_file: midi_io.sequence_proto_to_midi_file(sequence_proto, temp_file.name) midi_data1 = mido.MidiFile(filename=self.midi_is_drum_filename) # Use the file object when writing to the tempfile # to avoid permission error. midi_data2 = mido.MidiFile(file=temp_file) # Count number of channel 9 Note Ons. channel_counts = [0, 0] for index, midi_data in enumerate([midi_data1, midi_data2]): for event in midi_data: if (event.type == 'note_on' and event.velocity > 0 and event.channel == 9): channel_counts[index] += 1 self.assertEqual(channel_counts, [2, 2])
def midi_file_to_melody(midi_file, steps_per_quarter=4, qpm=None, ignore_polyphonic_notes=True): """Loads a melody from a MIDI file. Args: midi_file: Absolute path to MIDI file. steps_per_quarter: Quantization of Melody. For example, 4 = 16th notes. qpm: Tempo in quarters per a minute. If not set, tries to use the first tempo of the midi track and defaults to note_seq.DEFAULT_QUARTERS_PER_MINUTE if fails. ignore_polyphonic_notes: Only use the highest simultaneous note if True. Returns: A Melody object extracted from the MIDI file. """ sequence = midi_io.midi_file_to_sequence_proto(midi_file) if qpm is None: if sequence.tempos: qpm = sequence.tempos[0].qpm else: qpm = constants.DEFAULT_QUARTERS_PER_MINUTE quantized_sequence = sequences_lib.quantize_note_sequence( sequence, steps_per_quarter=steps_per_quarter) melody = Melody() melody.from_quantized_sequence( quantized_sequence, ignore_polyphonic_notes=ignore_polyphonic_notes) return melody
def midi_file_to_drum_track(midi_file, steps_per_quarter=4): """Loads a drum track from a MIDI file. Args: midi_file: Absolute path to MIDI file. steps_per_quarter: Quantization of DrumTrack. For example, 4 = 16th notes. Returns: A DrumTrack object extracted from the MIDI file. """ sequence = midi_io.midi_file_to_sequence_proto(midi_file) quantized_sequence = sequences_lib.quantize_note_sequence( sequence, steps_per_quarter=steps_per_quarter) drum_track = DrumTrack() drum_track.from_quantized_sequence(quantized_sequence) return drum_track
def compare_directory(self, directory): self.maxDiff = None # pylint: disable=invalid-name files_in_dir = tf.gfile.ListDirectory(directory) files_parsed = 0 for file_in_dir in files_in_dir: if not file_in_dir.endswith('.abc'): continue abc = os.path.join(directory, file_in_dir) midis = {} ref_num = 1 while True: midi = re.sub(r'\.abc$', str(ref_num) + '.mid', os.path.join(directory, file_in_dir)) if not tf.gfile.Exists(midi): break midis[ref_num] = midi ref_num += 1 print('parsing {}: {}'.format(files_parsed, abc)) tunes, exceptions = abc_parser.parse_abc_tunebook_file(abc) files_parsed += 1 self.assertEqual(len(tunes), len(midis) - len(exceptions)) for tune in tunes.values(): expanded_tune = sequences_lib.expand_section_groups(tune) midi_ns = midi_io.midi_file_to_sequence_proto( midis[tune.reference_number]) # abc2midi adds a 1-tick delay to the start of every note, but we don't. tick_length = ((1 / (midi_ns.tempos[0].qpm / 60)) / midi_ns.ticks_per_quarter) for note in midi_ns.notes: note.start_time -= tick_length # For now, don't compare velocities. note.velocity = 90 if len(midi_ns.notes) != len(expanded_tune.notes): pdb.set_trace() self.assertProtoEquals(midi_ns, expanded_tune) for midi_note, test_note in zip(midi_ns.notes, expanded_tune.notes): try: self.assertProtoEquals(midi_note, test_note) except Exception as e: # pylint: disable=broad-except print(e) pdb.set_trace() self.assertEqual(midi_ns.total_time, expanded_tune.total_time)
def load_primer(self): """Loads default MIDI primer file. Also assigns the steps per bar of this file to be the model's defaults. """ if not os.path.exists(self.midi_primer): tf.logging.warn('ERROR! No such primer file exists! %s', self.midi_primer) return self.primer_sequence = midi_io.midi_file_to_sequence_proto(self.midi_primer) quantized_seq = sequences_lib.quantize_note_sequence( self.primer_sequence, steps_per_quarter=4) extracted_melodies, _ = melody_pipelines.extract_melodies( quantized_seq, min_bars=0, min_unique_pitches=1) self.primer = extracted_melodies[0] self.steps_per_bar = self.primer.steps_per_bar
def compare_to_abc2midi_and_metadata( self, midi_path, expected_metadata, expected_expanded_metadata, test): """Compare parsing results to the abc2midi "reference" implementation.""" # Compare section annotations and groups before expanding. self.compare_proto_list(expected_metadata.section_annotations, test.section_annotations) self.compare_proto_list(expected_metadata.section_groups, test.section_groups) expanded_test = sequences_lib.expand_section_groups(test) abc2midi = midi_io.midi_file_to_sequence_proto( os.path.join(testing_lib.get_testdata_dir(), midi_path)) # abc2midi adds a 1-tick delay to the start of every note, but we don't. tick_length = ((1 / (abc2midi.tempos[0].qpm / 60)) / abc2midi.ticks_per_quarter) for note in abc2midi.notes: # For now, don't compare velocities. note.velocity = 90 note.start_time -= tick_length self.compare_proto_list(abc2midi.notes, expanded_test.notes) self.assertEqual(abc2midi.total_time, expanded_test.total_time) self.compare_proto_list(abc2midi.time_signatures, expanded_test.time_signatures) # We've checked the notes and time signatures, now compare the rest of the # proto to the expected proto. expanded_test_copy = copy.deepcopy(expanded_test) del expanded_test_copy.notes[:] expanded_test_copy.ClearField('total_time') del expanded_test_copy.time_signatures[:] self.assertProtoEquals(expected_expanded_metadata, expanded_test_copy)