def to_midi(self, tempo=120, instrument=PIANO, tonic=None, lib=None):
        if not self.progression:
            Logging.error("Progression not assigned!")
            return None
        if not tonic:
            tonic = self.meta['tonic']
        midi = PrettyMIDI()
        unit_length = 30 / tempo
        ins = Instrument(instrument)
        if not self.saved_in_source_base:
            current_pos = 0
            for i in self.get_chord_progression():
                memo = -1
                length = 0
                for j in i:
                    if j == memo:
                        length += unit_length
                    else:
                        if memo != -1:
                            for pitch in memo.to_midi_pitch(
                                    tonic=self.__key_changer(
                                        self.meta['tonic'], memo.root, tonic)):
                                note = Note(pitch=pitch,
                                            velocity=80,
                                            start=current_pos,
                                            end=current_pos + length)
                                ins.notes.append(note)
                        current_pos += length
                        length = unit_length
                        memo = j
                for pitch in memo.to_midi_pitch(tonic=self.__key_changer(
                        self.meta['tonic'], memo.root, tonic)):
                    note = Note(pitch=pitch,
                                velocity=80,
                                start=current_pos,
                                end=current_pos + length)
                    ins.notes.append(note)
                current_pos += length

        else:
            if lib is None:
                lib = pickle_read('lib')
            try:
                all_notes = lib[self.meta['source']]
            except:
                Logging.error(
                    'Progression with source name {n} '
                    'cannot be find in library! '
                    'Call set_in_lib(in_lib=False) to generate MIDI '
                    'by progression list itself'.format(n=self.meta['source']))
                return False
            for note in all_notes:
                ins.notes.append(
                    Note(start=note[0] * unit_length,
                         end=note[1] * unit_length,
                         pitch=note[2] + str_to_root[tonic],
                         velocity=note[3]))

        midi.instruments.append(ins)
        return midi
示例#2
0
    def melo_to_midi(melo, tonic='C', unit=0.125):
        # ins = Instrument(program=0)
        # cursor = 0
        # for i in melo:
        #     root = major_map_backward[i]
        #     if root != -1:
        #         pitch = root_to_pitch[root]
        #         ins.notes.append(Note(start=cursor, end=cursor + 0.125, pitch=pitch, velocity=60))
        #     cursor += 0.125
        ins = Instrument(program=0)
        current_pitch = MIDILoader.__melo_number_to_pitch(melo[0])
        start = 0
        for i in range(len(melo)):
            if i == len(melo) - 1:
                note = Note(pitch=current_pitch, velocity=80, start=start * unit, end=(i + 1) * 0.125)
                ins.notes.append(note)
                break
            if melo[i + 1] != melo[i]:
                if current_pitch is not 0:
                    note = Note(pitch=current_pitch, velocity=80, start=start * unit, end=(i + 1) * 0.125)
                    ins.notes.append(note)
                current_pitch = MIDILoader.__melo_number_to_pitch(melo[i + 1])
                start = i + 1

        return ins
示例#3
0
def array_to_pm(array,
                fs=DEFAULT_FS,
                velocity=DEFAULT_VELOCITY,
                pitch_range=DEFAULT_PITCH_RANGE):

    pm = PrettyMIDI()
    inst = Instrument(1)
    pm.instruments.append(inst)

    last_notes = {}
    last_state = np.zeros(array.shape[1]) > 0.5

    for i, step in enumerate(array):
        now = i / fs
        step = step > 0.5
        changed = step != last_state
        for pitch in np.where(changed)[0]:
            if step[pitch]:
                last_notes[pitch] = Note(velocity, pitch + pitch_range.start,
                                         now, None)
                inst.notes.append(last_notes[pitch])
            else:
                last_notes[pitch].end = now
                del last_notes[pitch]
        last_state = step

    now = (i + 1) / fs
    for note in last_notes.values():
        note.end = now

    return pm
示例#4
0
def listen_pitches(midi_pitch: list, time, instrument=0):
    midi = PrettyMIDI()
    ins = Instrument(instrument)
    for i in midi_pitch:
        ins.notes.append(Note(pitch=i, start=0, end=time, velocity=80))
    midi.instruments.append(ins)
    listen(midi)
示例#5
0
def devectorize(note_array):
    """
    Converts a vectorized note sequence into a list of pretty_midi Note
    objects
    """
    return [Note(start = a[0], end = a[1], pitch=a[2],
        velocity=a[3]) for a in note_array.tolist()]
def midi_to_source_base(midi: PrettyMIDI, tonic):
    notes = midi.instruments[0].notes

    # ensure that the first note starts at time 0
    start_time = 1000
    ori_start = 1000
    end_time = 0
    for note in notes:
        if note.start < start_time:
            start_time = note.start
            ori_start = note_time_to_pos(note.start)
    new_notes = []
    for note in notes:
        new_notes.append(Note(start=note.start - start_time,
                              end=note.end - start_time,
                              velocity=note.velocity,
                              pitch=note.pitch))
    notes = new_notes

    # change note format
    all_formatted_notes = []
    max_end = 0
    for note in notes:
        start = note_time_to_pos(note.start)
        end = note_time_to_pos(note.end)
        if end > max_end:
            max_end = end
        pitch = change_key_to_C(note.pitch, tonic)
        formatted_notes = [start, end, pitch, note.velocity]
        all_formatted_notes.append(formatted_notes)
    return all_formatted_notes, ori_start, max_end
示例#7
0
    def next_note(self, notes):
        """


        Parameters
        ----------
        notes

        Returns
        -------
        prettymidi.Note
        """
        notes = normalize_song(notes)
        fnotes = self.notebased_featurer.feature_notes(notes)
        for fnote in fnotes:
            fnote.calculate_features()
        sequence = self.notebased_featurer.extract_sequence_for_prediction(
            fnotes)
        pitch_array = self.pitch_model.predict(sequence)
        pitch_array = (pitch_array == np.max(pitch_array)).astype('float32')
        # TODO: Allow other strategies
        wait = self.wait_model.predict([sequence, pitch_array])[0][0]

        # Convert back to note
        pitch = self.notebased_featurer.unmap_pitch(np.argmax(pitch_array))
        start = notes[-1].start + wait

        return Note(pitch=pitch, start=start, end=start + 0.5, velocity=128)
示例#8
0
 def __load_pop909_melo(self):
     pop909_loader = MIDILoader(files='POP909')
     pitch_list = pop909_loader.get_full_midi_ins_from_pop909(index=self.midi_path, change_key_to='C')
     ins = Instrument(program=0)
     current_pitch = pitch_list[0]
     start = 0
     for i in range(len(pitch_list)):
         if i == len(pitch_list) - 1:
             note = Note(pitch=current_pitch, velocity=80, start=start * 0.125, end=(i + 1) * 0.125)
             ins.notes.append(note)
             break
         if pitch_list[i + 1] != pitch_list[i]:
             if current_pitch != 0:
                 note = Note(pitch=current_pitch, velocity=80, start=start * 0.125, end=(i + 1) * 0.125)
                 ins.notes.append(note)
             current_pitch = pitch_list[i + 1]
             start = i + 1
     return ins
 def transpose_to_key(self, key_to_transpose, notes):
     pitches = self.get_pitches
     current_key = self.get_major_key_histogram(pitches).argmax()
     transpose_amount = key_to_transpose - current_key
     new_notes = [(Note(velocity=x.velocity,
                        pitch=x.pitch + transpose_amount,
                        start=x.start,
                        end=x.end)) for x in notes]
     return new_notes
示例#10
0
def nmat2ins(nmat, program=0, tempo=120, sixteenth_notes_in_bar=16) -> Instrument:
    ins = Instrument(program=program)
    snib = sixteenth_notes_in_bar
    unit_length = (60 / tempo) / 4
    for note in nmat:
        midi_note = Note(pitch=note[0], velocity=note[1],
                         start=(note[2][0] - 1) * unit_length * snib + note[2][1] * unit_length,
                         end=(note[3][0] - 1) * unit_length * snib + note[3][1] * unit_length + 0.05)
        ins.notes.append(midi_note)
    return ins
示例#11
0
def pitch_lists_to_midi_file(pitch_lists, midi_path):
    midi = PrettyMIDI()
    ins = Instrument(0)
    cursor = 0
    unit_length = 0.125
    for pitch_list in pitch_lists:
        for pitch in pitch_list:
            if pitch != 0:
                ins.notes.append(Note(start=cursor, end=cursor + unit_length, pitch=pitch, velocity=60))
            cursor += unit_length
    midi.instruments.append(ins)
    midi.write(midi_path)
示例#12
0
    def to_note_seq(self):
        time = 0
        notes = []

        velocity = DEFAULT_VELOCITY
        velocity_bins = EventSeq.get_velocity_bins()

        last_notes = {}

        for event in self.events:
            if event.type == 'note_on':
                pitch = event.value + EventSeq.pitch_range.start
                note = Note(velocity, pitch, time, None)
                notes.append(note)
                last_notes[pitch] = note

            elif event.type == 'note_off':
                pitch = event.value + EventSeq.pitch_range.start

                if pitch in last_notes:
                    note = last_notes[pitch]
                    note.end = max(time, note.start + MIN_NOTE_LENGTH)
                    del last_notes[pitch]

            elif event.type == 'velocity':
                index = min(event.value, velocity_bins.size - 1)
                velocity = velocity_bins[index]
                # velocity = velocity_bins[24] #100

            elif event.type == 'time_shift':
                time += EventSeq.time_shift_bins[event.value]

        for note in notes:
            if note.end is None:
                note.end = note.start + DEFAULT_NOTE_LENGTH

            note.velocity = int(note.velocity)

        return NoteSeq(notes)
示例#13
0
文件: train_f.py 项目: nzinci/Sonia
    def conv2note_seq(self):
        time = 0
        notes = []

        velocity = STATE_VELOCITY
        velocity_bins = Event_Seqce.getting_veloc_basket()

        last_notes = {}

        for event in self.events:
            if event.type == 'note_on':
                pitch = event.value + Event_Seqce.pitch_range.start
                note = Note(velocity, pitch, time, None)
                notes.append(note)
                last_notes[pitch] = note

            elif event.type == 'note_off':
                pitch = event.value + Event_Seqce.pitch_range.start

                if pitch in last_notes:
                    note = last_notes[pitch]
                    note.end = max(time, note.start + MIN_NOTE_LENGTH)
                    del last_notes[pitch]

            elif event.type == 'velocity':
                index = min(event.value, velocity_bins.size - 1)
                velocity = velocity_bins[index]

            elif event.type == 'time_shift':
                time += Event_Seqce.time_shift_bins[event.value]

        for note in notes:
            if note.end is None:
                note.end = note.start + STATE_NOTE_LENGTH

            note.velocity = int(note.velocity)

        return Note_Seqce(notes)
示例#14
0
def read(midis, n_velocity_events=32, n_time_shift_events=125):

    note_sequence = []
    i = 0

    for m in midis:
        if m.instruments[0].program == 0:
            piano_data = m.instruments[0]
        else:
            raise PreprocessingError("Non-piano midi detected")
        note_sequence = self.apply_sustain(piano_data)
        note_sequence = sorted(note_sequence, key=lambda x: (x.start, x.pitch))
        note_sequences.append(note_sequence)

    live_notes = {}
    while i < len(midis):
        info, time_delta = midis[i]
        if i == 0:
            #start time tracking from zero
            time = 0
        else:
            #shift forward
            time = time + time_delta
        pitch = info[1]
        velocity = info[2]
        if velocity > 0:
            #(pitch (on), velocity, start_time (relative)
            live_notes.update({pitch: (velocity, time)})
            #how to preserve info ...?
        else:
            note_info = live_notes.get(pitch)
            if note_info is None:
                raise MidiInputError("what?")
            note_sequence.append(
                Note(pitch=pitch,
                     velocity=note_info[0],
                     start=note_info[1],
                     end=time))
            live_notes.pop(pitch)

        i += 1

    note_sequence = quantize(note_sequence, n_velocity_events,
                             n_time_shift_events)

    note_sequence = vectorize(note_sequence)
    return note_sequence
示例#15
0
    def decode(cls, string_pattern: List[str], bpm: int, beats: int,
               steps: int):
        """
        Decode the pattern of a string list and create a sequencer from it.
        """
        ppqn = steps
        end_tick = beats * steps
        # Allocate the array
        pattern = np.empty((128, len(string_pattern)), dtype=np.object)
        pattern.fill(None)
        for start_tick, string in enumerate(string_pattern):
            for pitch in _string_to_pitch(string):
                if start_tick <= end_tick:
                    start_seconds = tick2second(start_tick, ppqn,
                                                bpm2tempo(bpm))
                    pattern[pitch,
                            start_tick] = Note(_VELOCITY, pitch, start_seconds,
                                               start_seconds + _NOTE_DURATION)

        return cls(pattern, bpm, beats, steps)
示例#16
0
 def tokens2midi(self, tokens):
     """
     Игнорирование токенов педали
     :param tokens:
     :return:
     """
     midi = PrettyMIDI()
     program = pretty_midi.instrument_name_to_program('Acoustic Grand Piano')
     piano = Instrument(program=program)
     velocity = 0
     t = 0
     pitch2start = {}
     pitch2end = {}
     pitch2velocity = {}
     for token in tokens:
         if token.startswith("PEDAL"):
             continue
         value = int(token.split("_")[-1])
         if token.startswith("SET_VELOCITY"):
             velocity = value
         if token.startswith("TIME_SHIFT"):
             t += value
             pitch2end = {k: v + value for k, v in pitch2end.items()}
         if token.startswith("NOTE_ON"):
             pitch2start[value] = t
             pitch2end[value] = t
             pitch2velocity[value] = velocity
         if token.startswith("NOTE_OFF"):
             if value in pitch2start:
                 start = pitch2start.pop(value)
                 end = pitch2end.pop(value)
                 if end > start:
                     note = Note(
                         velocity=self._bin2velocity(pitch2velocity.pop(value)),
                         pitch=value,
                         start=start / 1000,
                         end=end / 1000
                     )
                     piano.notes.append(note)
     midi.instruments.append(piano)
     return midi
示例#17
0
def dump_midi(data, note_sets, path):
    midi_file = PrettyMIDI(resolution=220, initial_tempo=120)
    track = Instrument(0)
    time = 0

    # Shift first timing to 0
    #time -= note_sets['timing'][data[0][0]] * 30

    for note in data:
        # <padding> == 0
        if note[0] == 0:
            continue
        time += note_sets['timing'][note[0]] * 15 / 120
        track.notes.append(
            Note(velocity=100,
                 start=time,
                 end=time + note_sets['duration'][note[1]] * 15 / 120,
                 pitch=note_sets['pitch'][note[2]]))
        #print(track.notes[-1])
    midi_file.instruments.append(track)
    midi_file.write(path)
def create_midi(title,
                data,
                instrument_name='Acoustic Grand Piano',
                treshold=0.6):
    # Create a PrettyMIDI object
    song = PrettyMIDI()
    # Create an Instrument instance for a cello instrument
    instrument_program = instrument_name_to_program(instrument_name)
    instrument = Instrument(program=instrument_program)
    # Iterate over all note probabilities
    for sec in range(len(data)):
        # Iterate over all notes
        for note_number in range(NOTE_RANGE):
            if data[note_number] > treshold:
                # Create a Note instance for this note, starting at 0s and ending at .5s
                note = Note(velocity=100, pitch=note_number, start=0, end=.5)
                # Add it to our cello instrument
                instrument.notes.append(note)

    # Add the cello instrument to the PrettyMIDI object
    title.instruments.append(instrument)
    # Write out the MIDI data
    title.write('{}.mid'.format(title))
示例#19
0
文件: core.py 项目: jin0g/soundset
 def to_wave(self,
             instrument,
             font=None,
             stereo=False,
             rate=44100,
             mono_dim2=False,
             clip=True):
     # find default soundfont if needed
     if font is None: font = default_path('TimGM6mb.sf2')
     assert 0 <= instrument and instrument < 128
     # 1.create midi file
     from pretty_midi import PrettyMIDI, Instrument, Note
     midi = PrettyMIDI(resolution=960, initial_tempo=self.tempo)
     inst = Instrument(instrument)
     reso = 60 / self.tempo * 4 / self.beat
     for i, ns in enumerate(self.notes):
         for n in ns:
             inst.notes.append(
                 Note(velocity=100,
                      pitch=n,
                      start=i * reso,
                      end=i * reso + reso))
     midi.instruments.append(inst)
     midi.write('temp.mid')
     # 2.create wave file
     from midi2audio import FluidSynth
     fs = FluidSynth(font, sample_rate=rate)
     fs.midi_to_audio('temp.mid', 'temp.wav')
     # 3.import wav file
     from scipy.io import wavfile
     _, wave = wavfile.read('temp.wav')
     # clip
     if clip:
         le = len(self.notes)
         wave = wave[:int(rate * reso * le)]
     wave = wave.astype(float) / abs(wave).max() * 0.9
     return wave
示例#20
0
 def tokens2midi(self, tokens: List[str]) -> PrettyMIDI:
     midi = PrettyMIDI()
     program = instrument_name_to_program('Acoustic Grand Piano')
     piano = Instrument(program=program)
     velocity = 0
     t = 0
     pitch2start = {}
     pitch2end = {}
     pitch2velocity = {}
     n_tokens = len(tokens)
     for i in range(n_tokens):
         tok_i = tokens[i]
         value = int(tok_i.split("_")[-1])
         if tok_i.startswith("SET_VELOCITY"):
             velocity = value
         elif tok_i.startswith("TIME_SHIFT"):
             t += value
             pitch2end = {k: v + value for k, v in pitch2end.items()}
         elif tok_i.startswith("NOTE_ON"):
             pitch2start[value] = t
             pitch2end[value] = t
             pitch2velocity[value] = velocity
         elif tok_i.startswith("NOTE_OFF"):
             if value in pitch2start:
                 start = pitch2start.pop(value)
                 end = pitch2end.pop(value)
                 if end > start:
                     note = Note(
                         velocity=self._bin2velocity(pitch2velocity.pop(value)),
                         pitch=value,
                         start=start / 1000,
                         end=end / 1000
                     )
                     piano.notes.append(note)
     midi.instruments.append(piano)
     return midi
示例#21
0
def _tuples_to_notes(tuples):
    return [Note(start=s, end=e, pitch=p, velocity=v) for s, e, p, v in tuples]
示例#22
0
from pretty_midi import Note
import sys, pdb
sys.path.append("..")
from preprocess import SequenceEncoder
from helpers import vectorize

sample_note_sequence0 = [[
    Note(start=0.928000, end=1.720000, pitch=54, velocity=25),
    Note(start=0.952000, end=1.744000, pitch=42, velocity=25),
    Note(start=0.952000, end=1.720000, pitch=47, velocity=29),
    Note(start=1.384000, end=1.944000, pitch=62, velocity=41),
    Note(start=1.384000, end=1.968000, pitch=59, velocity=29),
    Note(start=1.368000, end=1.952000, pitch=35, velocity=33),
    Note(start=1.688000, end=2.184000, pitch=50, velocity=37),
    Note(start=1.720000, end=2.184000, pitch=54, velocity=37),
    Note(start=1.744000, end=2.208000, pitch=42, velocity=29),
    Note(start=1.720000, end=2.216000, pitch=47, velocity=21),
    Note(start=1.944000, end=2.384000, pitch=62, velocity=41),
    Note(start=1.968000, end=2.376000, pitch=59, velocity=9),
    Note(start=1.952000, end=2.392000, pitch=35, velocity=29),
    Note(start=2.184000, end=2.664000, pitch=50, velocity=29),
    Note(start=2.216000, end=2.664000, pitch=47, velocity=17),
    Note(start=2.208000, end=2.664000, pitch=42, velocity=33),
    Note(start=2.184000, end=2.656000, pitch=54, velocity=37),
    Note(start=2.384000, end=2.872000, pitch=62, velocity=33),
    Note(start=2.376000, end=3.344000, pitch=59, velocity=29),
    Note(start=2.392000, end=2.856000, pitch=35, velocity=33),
    #need to experiment with longer pauses between notes
    Note(start=4.8, end=5.8, pitch=40, velocity=37)
]]
示例#23
0
def _add_note(pm_inst: Instrument, pitch: int, vel: int, note_start_s: float,
              note_end_s: float) -> None:
    note = Note(vel, pitch, note_start_s, note_end_s)
    pm_inst.notes.append(note)
示例#24
0
def test_dataset_hook_slakh(benchmark_audio):
    # make a fake slakh directory.
    band = {"guitar": [30, 31], "drums": [127]}
    only_guitar = {"guitar": [30, 31]}
    empty = {}
    bad = {"guitar": [30], "guitar_2": [30]}
    with tempfile.TemporaryDirectory() as tmpdir:
        track_dir = os.path.join(tmpdir, "Track")
        os.mkdir(track_dir)
        # Create Metadata file
        metadata = "audio_dir: stems"
        metadata += "\nmidi_dir: MIDI"
        metadata += "\nstems:"
        metadata += "\n  S00:"
        metadata += "\n    program_num: 30"
        metadata += "\n  S01:"
        metadata += "\n    program_num: 127"
        metadata += "\n  S02:"
        metadata += "\n    program_num: 30"
        metadata += "\n  S03:"
        metadata += "\n    program_num: 30"
        metadata_path = os.path.join(track_dir, "metadata.yaml")
        metadata_file = open(metadata_path, "w")
        metadata_file.write(metadata)
        metadata_file.close()

        stems_dir = os.path.join(track_dir, "stems")
        midi_dir = os.path.join(track_dir, "MIDI")
        os.mkdir(stems_dir)
        os.mkdir(midi_dir)

        # Note: These aren't actually guitar and drums
        guitar_path1 = os.path.join(stems_dir, "S00.wav")
        guitar_path2 = os.path.join(stems_dir, "S02.wav")
        guitar_path3 = os.path.join(stems_dir, "S03.wav")
        drums_path = os.path.join(stems_dir, "S01.wav")
        mix_path = os.path.join(track_dir, "mix.wav")

        # making midi objects
        midi_0 = PrettyMIDI()
        midi_1 = PrettyMIDI()
        guitar = Instrument(30, name="guitar")
        guitar.notes = [Note(70, 59, 0, 1)]
        drum = Instrument(127, is_drum=True, name="drum")
        drum.notes = [Note(40, 30, 0, 1)]
        midi_0.instruments.append(guitar)
        midi_1.instruments.append(drum)
        midi_0.write(os.path.join(midi_dir, "S00.mid"))
        midi_1.write(os.path.join(midi_dir, "S01.mid"))
        midi_0.write(os.path.join(midi_dir, "S02.mid"))
        midi_0.write(os.path.join(midi_dir, "S03.mid"))

        midi_mix = PrettyMIDI()
        midi_mix.instruments += [guitar, drum]
        midi_mix.write(os.path.join(track_dir, "all_src.mid"))

        # Move them within directory
        shutil.copy(benchmark_audio['K0140.wav'], guitar_path1)
        shutil.copy(benchmark_audio['K0149.wav'], drums_path)
        # Make a mix from them.
        guitar_signal = nussl.AudioSignal(path_to_input_file=guitar_path1)
        drums_signal = nussl.AudioSignal(path_to_input_file=drums_path)
        guitar_signal.truncate_seconds(2)
        drums_signal.truncate_seconds(2)
        mix_signal = guitar_signal + drums_signal

        mix_signal.write_audio_to_file(mix_path)
        drums_signal.write_audio_to_file(drums_path)
        guitar_signal.write_audio_to_file(guitar_path1)
        guitar_signal.write_audio_to_file(guitar_path3)
        guitar_signal.write_audio_to_file(guitar_path2)

        # now that our fake slakh has been created, lets try some mixing
        band_slakh = nussl.datasets.Slakh(tmpdir,
                                          band,
                                          midi=True,
                                          make_submix=True,
                                          max_tracks_per_src=1)
        assert len(band_slakh) == 1
        data = band_slakh[0]
        _mix_signal, _sources = data["mix"], data["sources"]
        assert np.allclose(mix_signal.audio_data, _mix_signal.audio_data)
        assert len(_sources) == 2
        assert np.allclose(_sources["drums"].audio_data,
                           drums_signal.audio_data)
        assert np.allclose(_sources["guitar"].audio_data,
                           guitar_signal.audio_data)
        _midi_mix, _midi_sources = data["midi_mix"], data["midi_sources"]
        assert len(_midi_mix.instruments) == 2
        assert len(_midi_sources) == 2
        assert _midi_sources["guitar"][0].instruments[0].program == 30
        assert _midi_sources["drums"][0].instruments[0].program == 127

        band_slakh = nussl.datasets.Slakh(tmpdir,
                                          band,
                                          midi=True,
                                          make_submix=False)
        data = band_slakh[0]
        _mix_signal, _sources = data["mix"], data["sources"]
        assert isinstance(_sources["guitar"], list)
        assert len(_sources) == 2
        assert len(_sources["guitar"]) == 3
        assert len(_sources["drums"]) == 1
        assert np.allclose(
            sum(_sources["guitar"]).audio_data, 3 * guitar_signal.audio_data)

        with pytest.raises(DataSetException):
            not_enough_instruments = nussl.datasets.Slakh(
                tmpdir,
                band,
                midi=True,
                make_submix=True,
                min_acceptable_sources=3)

        guitar_slakh = nussl.datasets.Slakh(tmpdir,
                                            only_guitar,
                                            make_submix=True,
                                            min_acceptable_sources=1,
                                            max_tracks_per_src=1)
        data = guitar_slakh[0]
        _guitar_signal, _sources = data["mix"], data["sources"]
        assert len(_sources) == 1
        assert np.allclose(_sources["guitar"].audio_data,
                           guitar_signal.audio_data)
        assert np.allclose(_guitar_signal.audio_data, guitar_signal.audio_data)

        with pytest.raises(DataSetException):
            empty_slakh = nussl.datasets.Slakh(tmpdir,
                                               empty,
                                               min_acceptable_sources=1)

        with pytest.raises(ValueError):
            nussl.datasets.Slakh(tmpdir, band, min_acceptable_sources=0)
        with pytest.raises(ValueError):
            nussl.datasets.Slakh(tmpdir, band, max_tracks_per_src=0)
        with pytest.raises(ValueError):
            bad_slakh = nussl.datasets.Slakh(tmpdir, bad)
示例#25
0
def read(n_velocity_events=32, n_time_shift_events=125):
    if True:
        midiin = rtmidi.MidiIn()
        available_ports = midiin.get_ports()

        if available_ports:
            print("Connecting to midi-in port!")
            midiin.open_port(0)
        else:
            raise MidiInputError("Midi ports not availabled...")

        msg_sequence = []

        while True:
            proceed = input(
                "Play something on the keyboard and enter 'c' to continue or 'q' to quit.\n"
            )
            if proceed == "c":
                midiin.close_port()
                break
            elif proceed == "q":
                return
            else:
                print("Command not recognized")
                continue

        while True:
            msg = midiin.get_message()
            if msg is None:
                break
            else:
                msg_sequence.append(msg)

        if len(msg_sequence) == 0:
            raise MidiInputError("No messages detected")

        note_sequence = []
        i = 0
        #notes that haven't ended yet
        live_notes = {}
        while i < len(msg_sequence):
            info, time_delta = msg_sequence[i]
            if i == 0:
                #start time tracking from zero
                time = 0
            else:
                #shift forward
                time = time + time_delta
            pitch = info[1]
            velocity = info[2]
            if velocity > 0:
                #(pitch (on), velocity, start_time (relative)
                live_notes.update({pitch: (velocity, time)})
                #how to preserve info ...?
            else:
                note_info = live_notes.get(pitch)
                if note_info is None:
                    raise MidiInputError("what?")
                note_sequence.append(
                    Note(pitch=pitch,
                         velocity=note_info[0],
                         start=note_info[1],
                         end=time))
                live_notes.pop(pitch)

            i += 1

        note_sequence = quantize(note_sequence, n_velocity_events,
                                 n_time_shift_events)

        note_sequence = vectorize(note_sequence)
        return note_sequence
示例#26
0
    def decode_sequence(self, encoded_sequence, stuck_note_duration=None, keep_ghosts=False, verbose=False):
        """
        Takes in an encoded event sequence (sparse numerical representation) and transforms it back into a pretty_midi Note sequence. Randomly-generated encoded sequences, such as produced by the generation script, can have some unusual traits such as notes without a provided end time. Contains logic to handle these pathological notes.

        Args:
            encoded_sequence (list): List of events encoded as integers
            stuck_note_duration (int or None): if defined, for recovered notes missing an endtime, give them a fixed duration (as number of seconds held)
            keep_ghosts (bool): if true, when the decoding algorithm recovers notes with an end time preceding their start time, keep them by swapping start and end. If false, discard the "ghost" notes
            verbose (bool): If true, print results on how many stuck notes and ghost notes are detected.
        """
        events = []
        for num in encoded_sequence:
            events.append(self.number_to_event(num))
        #list of pseudonotes = {'start':x, 'pitch':something, 'velocity':something}
        notes = []
        #on the second pass, add in end time
        note_ons = []
        note_offs = []
        global_time = 0
        current_velocity = 0
        for event, value in events:
            #check event type
            if event == "TIME_SHIFT":
                global_time += 0.008 * value
                global_time = round(global_time, 5)

            elif event == "VELOCITY":
                current_velocity = value
            
            elif event == "NOTE_OFF":
                #eventually we'll sort this by timestamp and work thru
                note_offs.append({"pitch": value, "end": global_time})
            
            elif event == "NOTE_ON":
                #it's a NOTE_ON!
                #value is pitch 
                note_ons.append({"start": global_time, 
                    "pitch": value, "velocity": current_velocity})
            else:
                raise SequenceEncoderError("you fool!")

        #keep a count of notes that are missing an end time (stuck notes)
        #----default behavior is to ignore them. 
        stuck_notes = 0
        
        #keep a count of notes assigned end times *before* their start times (ghost notes)
        #----default behavior is to ignore them
        ghost_notes = 0


        #Zip up notes with corresponding note-off events
        while len(note_ons) > 0:
            note_on = note_ons[0]
            pitch = note_on['pitch']
            #this assumes everything is sorted nicely!
            note_off = next((n for n in note_offs if n['pitch'] == pitch), None)
            if note_off == None:
                stuck_notes += 1
                if stuck_note_duration is None:
                    note_ons.remove(note_on)
                    continue
                else:
                    note_off = {"pitch": pitch, "end": note_on['start'] + stuck_note_duration}
            else:
                note_offs.remove(note_off)

            if note_off['end'] < note_on['start']:
                ghost_notes += 1
                if keep_ghosts:
                    #reverse start and end (and see what happens...!)
                    new_end = note_on['start']
                    new_start = note_off['end']
                    note_on['start'] = new_start
                    note_off['end'] = new_end
                else:
                    note_ons.remove(note_on)
                    continue

            note = Note(start = note_on['start'], end = note_off['end'],
                    pitch = pitch, velocity = note_on['velocity'])
            notes.append(note)
            note_ons.remove(note_on)

        if verbose:
            print(f"{stuck_notes} notes missing an end-time...")
            print(f"{ghost_notes} had an end-time precede their start-time")

        return notes