def record_message(self, message):
        if not self.recording:
            return

        self.idle_timer.reset()

        current_time = time.time()

        # Initialize time to the time the first message arrives at
        if not self._buffer:
            self.last_message_time = current_time

        delta_time = current_time - self.last_message_time
        self.last_message_time = current_time

        message.time = second2tick(delta_time, TICKS_PER_BEAT,
                                   bpm2tempo(self.bpm))
        self._buffer.append(message)
        print(message)
Пример #2
0
def make_string_track(track_data,
                      ticks_per_beat,
                      bpm=120,
                      velocity=80,
                      program=1):
    track = mido.MidiTrack()
    track.append(mido.Message("program_change", program=program, channel=2))
    mask_gate = 0
    acc_ticks = 0
    playing_note = 0
    for note1, beat in zip(track_data[0], track_data[2]):
        ticks = int(
            mido.second2tick((60 / bpm) * beat, ticks_per_beat,
                             mido.bpm2tempo(120)))
        acc_ticks += ticks
        if mask_gate == 0:
            msg11 = mido.Message('note_on',
                                 note=note1,
                                 velocity=velocity,
                                 time=0,
                                 channel=2)
            track.append(msg11)
            playing_note = note1
            mask_gate += 1
        elif mask_gate == 2:
            msg21 = mido.Message('note_off',
                                 note=playing_note,
                                 velocity=velocity,
                                 time=acc_ticks,
                                 channel=2)
            track.append(msg21)
            mask_gate = 0
            acc_ticks = 0
        else:
            mask_gate += 1
    track.append(
        mido.Message('note_off',
                     note=playing_note,
                     velocity=velocity,
                     time=acc_ticks,
                     channel=2))
    return track
Пример #3
0
    def __call__(self, event, data=None):
        message, deltatime = event
        # if message[0] == 254:  #compensate for active sense delta times
        self.__activesense += deltatime
        # else:
        #     self.__activesense = deltatime
        if message[0] != 254:  #ignore active sense
            miditime = int(
                round(
                    mido.second2tick(self.__activesense,
                                     self.__mid.ticks_per_beat,
                                     mido.bpm2tempo(self.tempo))))

            if self.debug:
                print('deltatime: ', deltatime, 'msg: ', message,
                      'activecomp: ', self.__activesense)
            else:  #only print note on
                if message[0] == 144: print(message[1])

            if message[0] == self.on_id:
                self.__track.append(
                    Message('note_on',
                            note=message[1],
                            velocity=message[2],
                            time=miditime))
                self.__activesense = 0
            elif message[0] == 176:
                self.__track.append(
                    Message('control_change',
                            channel=1,
                            control=message[1],
                            value=message[2],
                            time=miditime))
            else:
                # print("note off!")
                self.__track.append(
                    Message('note_off',
                            note=message[1],
                            velocity=message[2],
                            time=miditime))
                self.__activesense = 0
Пример #4
0
    def __next__(self):
        evt = next(self.event_seq)

        # Interpret event data
        if evt >= VEL_OFFSET:
            # A velocity change
            self.last_velocity = (evt - VEL_OFFSET) * (MIDI_VELOCITY //
                                                       VEL_QUANTIZATION)
        elif evt >= TIME_OFFSET:
            # Shifting forward in time
            tick_bin = evt - TIME_OFFSET
            assert tick_bin >= 0 and tick_bin < TIME_QUANTIZATION
            seconds = TICK_BINS[tick_bin] / TICKS_PER_SEC
            self.delta_time += int(
                mido.second2tick(seconds, self.midi_file.ticks_per_beat,
                                 self.tempo))
        elif evt >= NOTE_ON_OFFSET:
            # Turning a note on (or off if velocity = 0)
            note = evt - NOTE_ON_OFFSET
            # We can turn a note on twice, indicating a replay
            if self.last_velocity == 0:
                # Note off
                if note in self.on_notes:
                    # We cannot turn a note off when it was never on
                    self.track.append(
                        mido.Message('note_off',
                                     note=note,
                                     time=self.delta_time))
                    self.on_notes.remove(note)
                    self.delta_time = 0
            else:
                self.track.append(
                    mido.Message('note_on',
                                 note=note,
                                 time=self.delta_time,
                                 velocity=self.last_velocity))
                self.on_notes.add(note)
                self.delta_time = 0
Пример #5
0
def write_midi_multi(multi_notes, midi_file_name):

    midi_file = MidiFile(ticks_per_beat=10000)

    track = MidiTrack()
    midi_file.tracks.append(track)

    track.append(Message('program_change', program=12, time=0))

    cur_time = 0
    prev_notes = np.full(89, False)
    for i, cur_notes in enumerate(multi_notes):
        cur_time += 0.005
        cur_ticks = second2tick(cur_time, midi_file.ticks_per_beat,
                                default_tempo)
        for note_num in range(88):
            if cur_notes[note_num] == prev_notes[note_num]:
                continue
            else:
                if cur_notes[note_num]:
                    track.append(
                        Message('note_on',
                                note=int(bottom_note + note_num),
                                velocity=127,
                                time=int(cur_ticks)))
                    cur_time = 0
                    cur_ticks = 0
                else:
                    track.append(
                        Message('note_off',
                                note=int(bottom_note + note_num),
                                velocity=127,
                                time=int(cur_ticks)))
                    cur_time = 0
                    cur_ticks = 0
        prev_notes = cur_notes

    midi_file.save(midi_file_name)
Пример #6
0
def to_vector(mid):
    """
    Produce vector format from midi, removing percussion
    """
    new_mid = MidiFile()
    for track in mid.tracks:
        new_track = MidiTrack()
        for msg in track:
            if msg.type == 'note_on' and msg.channel != 9:
                new_track.append(msg)
        new_mid.tracks.append(new_track[:])

    v = [[]]
    j = 0
    for msg in new_mid:
        if msg.type != 'note_on':
            continue

        dt = int(second2tick(msg.time, new_mid.ticks_per_beat, 500000))
        for __ in range(dt):
            v.append([])
            j += 1
        v[j].append(msg.note)

    result = []
    for x in v:
        if len(x) == 0:
            result.append((NO_NOTE, ))
        elif len(x) > 3:
            result.append(tuple(random.sample(x, 3)))
        else:
            result.append(tuple(x))
    # return [tuple(x) if len(x) > 0 else (-1,) for x in v]
    if len(result) > MIDI_CHUNK:
        x = random.randint(0, len(result) - MIDI_CHUNK)
        return result[x:(x + MIDI_CHUNK)]
    else:
        return result
Пример #7
0
def make_piano_track(track_data,
                     ticks_per_beat,
                     bpm=120,
                     velocity=80,
                     program=1):
    track = mido.MidiTrack()
    track.append(mido.Message("program_change", program=program))
    for note1, note2, beat in zip(track_data[0], track_data[1], track_data[2]):
        ticks = int(
            mido.second2tick((60 / bpm) * beat, ticks_per_beat,
                             mido.bpm2tempo(120)))
        msg11 = mido.Message('note_on', note=note1, velocity=velocity, time=0)
        msg12 = mido.Message('note_on', note=note2, velocity=velocity, time=0)
        msg21 = mido.Message('note_off',
                             note=note1,
                             velocity=velocity,
                             time=ticks)
        msg22 = mido.Message('note_off', note=note2, velocity=velocity, time=0)
        track.append(msg11)
        track.append(msg12)
        track.append(msg21)
        track.append(msg22)
    return track
Пример #8
0
def save_song(msgs, filename):
    mid = mido.MidiFile()
    mid.ticks_per_beat = TICKS_PER_BEAT

    first_time = None
    for msg in msgs:
        if msg.type == 'note_on':
            first_time = msg.time
            break

    last_time = first_time
    for msg in msgs:
        if msg.time < first_time:
            t = 0
        else:
            t = msg.time - last_time
        ticks = int(mido.second2tick(t, TICKS_PER_BEAT, 500000))
        last_time = msg.time
        msg.time = ticks
    track = mido.MidiTrack(msgs)

    mid.tracks.append(track)
    mid.save(filename)
Пример #9
0
	def save(self, output_name):
		if self.results is None:
			raise Exception('Result is None')

		results = []
		for ev in self.results:
			if ev.msg.type != 'note_on':
				target = None
				for i in reversed(range(len(results))):
					if results[i].msg.type == 'note_on':
						target = i
						break
				if target is not None:
					duration = ev.msg.time - results[target].msg.time
					if duration < 0.025:
						del results[target]
						continue
			results.append(ev)

		tracks = None if output_name is None else []
		for i in range(len(results) - 1):
			self.__save(tracks, results[i], results[i+1])

		if tracks is not None:
			track = mido.MidiTrack()
			prevtick = 0
			for xmsg in tracks:
				# 元のtickを採用するとトラック間の整合が取れないのでこうする
				tick = mido.second2tick(xmsg.msg.time, self.tpb, 500000)
				xmsg.msg.time = int(tick - prevtick)
				xmsg.msg.channel = 0
				prevtick = tick
				track.append(xmsg.msg)
			output = mido.MidiFile()
			output.tracks.append(track)
			output.ticks_per_beat = self.tpb
			output.save(output_name)
Пример #10
0
def ms_to_tick_gap_and_note_off(notes: List[Tuple[float, int]],
                                offset_ticks: int) -> List[Tuple[int, int]]:
    res: List[Tuple[int, int]] = []
    # first convert to absolute ticks
    notes_ticks = [(
        int(second2tick(tru_time / 1000, TICKS_PER_BEAT, bpm2tempo(BPM))),
        midi_note_num,
    ) for (tru_time, midi_note_num) in notes]

    # add the negative notes offset_ticks ticks after--these are the 'note_off' messages
    off_notes_ticks = [(t + offset_ticks, -midi_note_num)
                       for (t, midi_note_num) in notes_ticks]
    notes_ticks.extend(off_notes_ticks)
    # sort by tick
    all_notes = sorted(notes_ticks, key=lambda x: x[0])

    # now convert to gaps
    last_tick = 0
    for (abs_tick, note) in all_notes:
        tick_gap = abs_tick - last_tick
        last_tick = abs_tick
        res.append((tick_gap, note))

    return res
Пример #11
0
def save_inferences_to_midi(inferences, filename='Contrapunctus_XIV.mid'):
    print('Producing Midi file...')
    outfile = MidiFile()
    temp = bpm2tempo(48)  # or 76?
    # print('ticks_per_beat:', outfile.ticks_per_beat)
    outfile.ticks_per_beat = 2496

    for voice in range(len(inferences)):
        track = MidiTrack()
        outfile.tracks.append(track)
        track.append(MetaMessage('set_tempo', tempo=temp))
        track.append(Message('program_change', program=1))

        for inf in inferences[voice]:
            t = int(second2tick(inf.duration / 10.0, outfile.ticks_per_beat,
                                temp))
            track.append(Message('note_on', velocity=64, note=inf.note,
                                 time=t if inf.note == 0 else 0))
            track.append(Message('note_off', velocity=64, note=inf.note,
                                 time=0 if inf.note == 0 else t))

    outfile.save(filename)
    print('MidiFile saved...')
    return filename
Пример #12
0
    def exploreMIDI(self):

        timeSet = []  # store possible periods
        totalTime = 0
        tempo = 500000  # Default value

        for i, track in enumerate(self.OG_Mido.tracks):
            for j, event in enumerate(track):
                print(event)
                if event.type == "set_tempo":
                    tempo = event.tempo
                if event.time > 0:
                    timeSet.append(event.time)
                    totalTime += event.time

                    # self.lowest = event.note if event.note < self.lowest else self.lowest
                    # self.highest = event.note if event.note > self.highest else self.highest

        # drop first event's delta time
        timeSet.pop(0)
        self.minLength = gcd(*timeSet)
        self.maxNote = totalTime // self.minLength
        self.totalLength = second2tick(self.OG_Mido.length,
                                       self.OG_Mido.ticks_per_beat, tempo)
Пример #13
0
def one_hots_to_pretty_midi(one_hots, tempo, tpb, fs=18, program=1, bpm=120):
    """
    Create a pretty_midi object out of a list of one-hots
    """
    bpm = mido.tempo2bpm(tempo)
    mid = mido.MidiFile()
    track = mido.MidiTrack()
    mid.tracks.append(track)

    track.append(mido.MetaMessage('set_tempo', tempo=tempo))

    one_hot = np.zeros((128, len(one_hots)))
    for i in range(len(one_hots)):
        for j in range(len(one_hots[i])):
            if one_hots[i][j] == 1:
                one_hot[j][i] = one_hots[i][j]

    notes, frames = one_hot.shape

    frame_t = one_hot.T
    last_msg = 0
    toGenerate = []
    for frame in range(frames):
        current_notes = []
        current_notes_begin = {}
        current_notes_end = {}

        for j in range(len(frame_t[frame])):
            if frame_t[frame][j] == 1:
                current_notes.append(j)
                current_notes_begin[j] = frame
                current_notes_end[j] = frame + 1
                for k in range(frame + 1, frames):
                    if frame_t[k][j] == 1:
                        current_notes_end[j] += 1
                        frame_t[k][j] = 0
                    else:
                        break
        m = 0
        for note in current_notes:
            begin = (note, current_notes_begin[note] * 1 / 16, 'begin')
            end = (note, current_notes_end[note] * 1 / 16, 'end')
            toGenerate.append(begin)
            toGenerate.append(end)

    last_msg = 0

    sortedNotes = sorted(toGenerate, key=lambda x: x[1])
    for note in sortedNotes:
        if note[2] == 'end':
            track.append(
                mido.Message('note_off',
                             note=note[0],
                             time=int(
                                 mido.second2tick(note[1] - last_msg,
                                                  mid.ticks_per_beat,
                                                  int(tempo * 1.5)))))
        else:
            track.append(
                mido.Message('note_on',
                             note=note[0],
                             time=int(
                                 mido.second2tick(note[1] - last_msg,
                                                  mid.ticks_per_beat,
                                                  int(tempo * 1.5)))))
        last_msg = note[1]

    track.append(mido.MetaMessage('end_of_track'))

    return mid
Пример #14
0
 def seconds2ticks(self, seconds):
     return second2tick(seconds, self.ticks_per_beat, self.tempo)
Пример #15
0
ylabel('v')
tight_layout()
show()

#%% Kuramoto Sonification

track_times_dict = {}
j = 0

for i in spikemon.i:
    track_times_dict[i] = []
    
for i in spikemon.i:
    track_times_dict[i].append(spikemon.t[j])
    j+=1

mid = MidiFile()

program_counter = 0
for i in track_times_dict:
    track = MidiTrack()
    track.append(Message('program_change', program=program_counter, time=0))
    temp = 0
    for time in track_times_dict[i]:
        track.append(Message('note_on', note=64+(program_counter*2), velocity=64, time=(int(mido.second2tick(time - temp, mid.ticks_per_beat, 500000)))))
        track.append(Message('note_off', note=64+(program_counter*2), velocity=127, time=64*1))
        temp = time 
    mid.tracks.append(track)
    program_counter+=1

mid.save('new_song.mid')
Пример #16
0
def frames2tick(frames, samplerate=samplerate):
    sec = frames / float(samplerate)
    return int(second2tick(sec, ticks_per_beat, tempo))
Пример #17
0
def midi_to_piano_roll_augmented(midi_file, timestamp):
    # From a midi file, build an "augmented" piano roll

    #timestamp: used to extract only 30 seconds counted from the halfway point of the performance
    ticks_in_track = np.ceil(mido.second2tick(midi_file.length, tempo=500000, ticks_per_beat=midi_file.ticks_per_beat))
    decay_per_tick = 28 / mido.second2tick(1, tempo=500000, ticks_per_beat=midi_file.ticks_per_beat)
    
    piano_roll = np.zeros([128, int(ticks_in_track)])   # piano roll with x axis labeled by ticks and y axis labeled by midi note number
    # roll 127 is the sustain pedal

    time = 0
    pedal_flag = 0           # 1 if sustain pedal is pressed, 0 otherwise

    # Maps note ons and note offs (note offs are noted in the file as note on with velocity==0)
    for msg in midi_file.tracks[1]:
        time += msg.time
        if msg.type == 'note_on':
            if msg.velocity > 0:
                piano_roll[msg.note][time] = msg.velocity
            else: # note-off
                piano_roll[msg.note][time] = -1
        elif msg.type == 'control_change':
            if pedal_flag == 0 and msg.value>0: # pedal sends several messages during one pressing motion
                piano_roll[127][time] = 1
                pedal_flag = 1
            elif pedal_flag == 1 and msg.value == 0:
                piano_roll[127][time] = 1
                pedal_flag = 0
                
    # compose a sustain pedal ''timeline'' (processes note on/offs, creating a piano roll line for the pedal)
    pedal_flag = 0
    for j in range(piano_roll.shape[1]):
        if piano_roll[127,j] == 1:
            pedal_flag = int(not pedal_flag)
        piano_roll[127,j] = pedal_flag
    
    keypress_flag = 0  # indicated if key is pressed
    note_vel = 0    
    noteon_flag = 0    # indicates if note is sounding (key could be depressed but sustain pedal carries it on)
    pedal_flag = 0     # indicates if pedal is pressed
    
    for i in range(127):
        if np.sum(piano_roll[i, :] != 0):   # if there is any note in this roll
            note_vel = 0
            noteon_flag = 0
            pedal_flag = 0
            keypress_flag = 0
            for j in range(piano_roll.shape[1]):
                if piano_roll[i,j] > 0:    # key was pressed
                    keypress_flag = 1
                    note_vel = piano_roll[i,j]
                elif piano_roll[i,j] == -1:   # note-off was found
                    keypress_flag = 0

                pedal_flag = piano_roll[127,j]
                noteon_flag = keypress_flag or (pedal_flag and noteon_flag) # note can be heard when key is pressed 
                                                                            # or when it was already sounding and the pedal is pressed
                if noteon_flag:
                    piano_roll[i,j] = note_vel
                
                note_vel -= decay_per_tick # simple energy decay model
                if note_vel < 0:
                    noteon_flag = 0
                    note_vel = 0
                
    # Return only 30 seconds
    start_time = int(mido.second2tick(timestamp, tempo=500000, ticks_per_beat=midi_file.ticks_per_beat))
    stop_time  = int(mido.second2tick(timestamp+20, tempo=500000, ticks_per_beat=midi_file.ticks_per_beat))
    return piano_roll[:127,start_time:stop_time]
Пример #18
0
    def draw_roll(self):


        roll = self.get_roll()

        # build and set fig obj
        plt.ioff()
        fig = plt.figure(figsize=(4, 3))
        a1 = fig.add_subplot(111)
        a1.axis("equal")
        a1.set_facecolor("black")

        # change unit of time axis from tick to second
        tick = self.get_total_ticks()
        second = mido.tick2second(tick, self.ticks_per_beat, self.get_tempo())
        #print(second)
        if second > 10:
            x_label_period_sec = second // 10
        else:
            x_label_period_sec = second / 10  # ms
        #print(x_label_period_sec)
        x_label_interval = mido.second2tick(x_label_period_sec, self.ticks_per_beat, self.get_tempo()) / self.sr
        #print(x_label_interval)
        plt.xticks([int(x * x_label_interval) for x in range(20)], [round(x * x_label_period_sec, 2) for x in range(20)])

        # change scale and label of y axis
        plt.yticks([y*16 for y in range(8)], [y*16 for y in range(8)])

        # build colors
        channel_nb = 16
        transparent = colorConverter.to_rgba('black')
        colors = [mpl.colors.to_rgba(mpl.colors.hsv_to_rgb((i / channel_nb, 1, 1)), alpha=1) for i in range(channel_nb)]
        cmaps = [mpl.colors.LinearSegmentedColormap.from_list('my_cmap', [transparent, colors[i]], 128) for i in
                 range(channel_nb)]

        # build color maps
        for i in range(channel_nb):
            cmaps[i]._init()
            # create your alpha array and fill the colormap with them.
            alphas = np.linspace(0, 1, cmaps[i].N + 3)
            # create the _lut array, with rgba values
            cmaps[i]._lut[:, -1] = alphas


        # draw piano roll and stack image on a1
        for i in range(channel_nb):
            try:
                a1.imshow(roll[i], origin="lower", interpolation='nearest', cmap=cmaps[i], aspect='auto')
            except IndexError:
                pass

        # draw color bar

        colors = [mpl.colors.hsv_to_rgb((i / channel_nb, 1, 1)) for i in range(channel_nb)]
        cmap = mpl.colors.LinearSegmentedColormap.from_list('my_cmap', colors, 16)
        a2 = fig.add_axes([0.05, 0.80, 0.9, 0.15])
        cbar = mpl.colorbar.ColorbarBase(a2, cmap=cmap,
                                        orientation='horizontal',
                                        ticks=list(range(16)))

        # show piano roll
        plt.draw()
        plt.ion()
        plt.show(block=True)
Пример #19
0
 def convert_sec2tick(self,second):
   return mido.second2tick(second, self.PPQ, self.tempo)
Пример #20
0
    def CreateMIDIFromJSON(self, event):
        with open(
                s.path +
                "OST\\SongLevelData_Breezer_Expert-resources.assets-231-MonoBehaviour.json"
        ) as f:
            data = json.load(f)
            bpm = data["_beatsPerMinute"]
            tempo = mido.bpm2tempo(bpm)
            mid = mido.MidiFile(type=1, ticks_per_beat=480)
            midi_track = mido.MidiTrack()
            mid.tracks.append(midi_track)
            notes_track = mido.MidiTrack()
            mid.tracks.append(notes_track)
            obstacles_track = mido.MidiTrack()
            mid.tracks.append(obstacles_track)
            midi_track.append(
                mido.MetaMessage('time_signature',
                                 numerator=4,
                                 denominator=4,
                                 clocks_per_click=24,
                                 notated_32nd_notes_per_beat=8,
                                 time=0))
            midi_track.append(mido.MetaMessage('set_tempo', tempo=tempo))
            currenttick = 0
            notes = []
            obstacles = []
            events = []
            bps = bpm / 60
            for note in data["_notes"]:
                notes.append(note)
                #print(note)
            for obstacle in data["_obstacles"]:
                obstacles.append(obstacle)
            for event in data["_events"]:
                events.append(event)

            prevtime = 0
            noteofftime = 0
            ticks_per_beat = 480
            #currenttick += msg.time
            last_note = 0
            last_obstacle = 0
            last_channel = 0
            multiple_notes = False

            for note in notes:
                #print(note["_cutDirection"])
                notetimeinticks = int(
                    mido.second2tick(note["_time"] / bps, 480, tempo))
                note_number, channel = self.JSONNoteToSaberSlash(note)
                notes_track.append(
                    mido.Message('note_on',
                                 note=note_number,
                                 channel=channel,
                                 velocity=64,
                                 time=notetimeinticks - prevtime))
                notes_track.append(
                    mido.Message('note_off',
                                 note=last_note,
                                 channel=last_channel,
                                 velocity=0,
                                 time=0))
                last_note = note_number
                last_channel = channel
                prevtime = notetimeinticks

            prevtime = 0
            duration = 0
            for obstacle in obstacles:
                notetimeinticks = int(
                    mido.second2tick(obstacle["_time"] / bps, 480, tempo))
                obstacle_note_number = self.JSONNoteToObstacle(obstacle)
                obstacles_track.append(
                    mido.Message('note_on',
                                 note=obstacle_note_number,
                                 velocity=64,
                                 time=notetimeinticks - prevtime - duration))
                duration = int(
                    mido.second2tick(obstacle["_duration"] / bps, 480, tempo))
                obstacles_track.append(
                    mido.Message('note_off',
                                 note=obstacle_note_number,
                                 velocity=0,
                                 time=duration))
                last_obstacle = obstacle_note_number
                prevtime = notetimeinticks

            mid.tracks.append(mido.merge_tracks(mid.tracks))
            events_track = mido.MidiTrack()
            mid.tracks.append(events_track)

            last_event = 0
            prevtime = 0
            for event in events:
                eventtimeinticks = int(
                    mido.second2tick(event["_time"] / bps, 480, tempo))
                event_number, channel = self.JSONNoteToEvent(event)
                events_track.append(
                    mido.Message('note_on',
                                 note=event_number,
                                 channel=channel,
                                 velocity=64,
                                 time=eventtimeinticks - prevtime))
                events_track.append(
                    mido.Message('note_off',
                                 note=last_event,
                                 channel=last_channel,
                                 velocity=0,
                                 time=0))
                last_event = event_number
                last_channel = channel
                prevtime = eventtimeinticks

            mid.tracks.remove(notes_track)
            mid.tracks.remove(obstacles_track)
            mid.save(s.path + 'new_song.mid')
    def write_buffers(self, song):
        global no_mido_module

        if no_mido_module:
            print(
                "\n***WARNING: MIDI was not created because mido module was not found."
            )
            return None

        try:
            self.midi_key = re.sub(
                r'#', '#m',
                song.get_music_key())  # For mido sharped keys are minor
        except TypeError:
            self.midi_key = Resources.DEFAULT_KEY
            print(
                f"\n***ERROR: Invalid music key passed to the MIDI renderer: using {self.midi_key} instead."
            )

        try:
            tempo = mido.bpm2tempo(self.midi_bpm)
        except ValueError:
            print(
                f"\n***ERROR: invalid tempo passed to MIDI renderer. Using {Resources.DEFAULT_BPM} bpm instead."
            )
            tempo = mido.bpm2tempo(Resources.DEFAULT_BPM)

        mid = mido.MidiFile(type=0)
        track = mido.MidiTrack()
        mid.tracks.append(track)

        sec = mido.second2tick(1,
                               ticks_per_beat=mid.ticks_per_beat,
                               tempo=tempo)  # 1 second in ticks
        note_ticks = self.midi_note_duration * sec * Resources.DEFAULT_BPM / self.midi_bpm  # note duration in ticks

        self.write_header(mid, track, tempo)

        instrument_renderer = MidiInstrumentRenderer(
            self.locale, note_ticks=note_ticks, music_key=song.get_music_key())
        song_lines = song.get_lines()
        for line in song_lines:
            if len(line) > 0:
                if line[0].get_type().lower().strip() != 'voice':
                    instrument_index = 0
                    for instrument in line:
                        instrument.set_index(instrument_index)
                        #instrument_render = instrument.render_in_midi(note_duration=note_ticks,
                        #                                              music_key=song.get_music_key())
                        instrument_render = instrument_renderer.render(
                            instrument)
                        for i in range(0, instrument.get_repeat()):
                            for note_render in instrument_render:
                                track.append(note_render)
                            instrument_index += 1

        midi_buffer = io.BytesIO()
        mid.save(file=midi_buffer)

        midi_buffer.seek(0)

        return [midi_buffer]
Пример #22
0
def duration_txt_to_midi(duration_txt, ticks_per_beat, bpm, tempo):
    seconds = duration_txt * 60 / bpm
    return int(mido.second2tick(seconds, ticks_per_beat, tempo))
Пример #23
0
 def off_tick(self):
     tempo = self.tempo()
     absolute_tick = mido.second2tick(second=self.off, ticks_per_beat=PigNote.TICKS_PER_BEAT, tempo=tempo)
     return round(absolute_tick)
def analyze_file(input_midi_data, input_filename, criteria, output_midi_data):

    log(INFO, 'Analyzing input: {}'.format(input_filename))

    summed_length = 0
    last_written_notes_time = 0
    max_length_no_transpose = 0
    max_length_with_transpose = 0
    lowest_note = -1
    highest_note = -1
    transpose_offset = 0
    transpose_range = 0
    msg_count = 0
    msg_count_no_transpose = 0
    msg_count_with_transpose = 0

    is_format_ok = True
    is_file_too_short = False
    is_max_sim_notes_passed = False
    is_no_transpose_scan_done = False
    is_with_transpose_scan_done = False
    is_expected_length_check_ok = True
    is_global_status_ok = True

    # Get detailed necessary information from input MIDI data
    try:

        file_length = int(input_midi_data.length)
        tempo_in_bpm = midi.get_midi_file_tempo(input_midi_data)
        events = midi.get_midi_file_events(input_midi_data)
        ticks_per_beat = midi.get_midi_file_ticks_per_beat(input_midi_data)

    except Exception as error:

        is_format_ok = False
        file_length = 0

        log(WARNING,
            'Could not parse {}; file might be corrupt'.format(input_filename))
        log(WARNING, error)

    # Prepare filtering based on length
    if criteria['min_length'] == 0:
        expected_min_length = file_length
    else:
        expected_min_length = criteria['min_length']

    file_length_string = turn_seconds_int_to_minutes_and_seconds_string(
        file_length)
    expected_min_length_string = turn_seconds_int_to_minutes_and_seconds_string(
        expected_min_length)

    # In case of a corrupt file, let's just stop with that fle
    if is_format_ok:

        # Check general file format
        if input_midi_data.type == 2:

            log(WARNING, 'Unsupported MIDI file type 2')
            is_format_ok = False

        if tempo_in_bpm == 0:

            log(WARNING, 'No valid tempo was found in file')
            is_format_ok = False

        if len(events) == 0:

            log(WARNING, 'No events were found in file!')
            is_format_ok = False

        if ticks_per_beat == 0:

            log(WARNING, 'No valid ticks per beat data was found in file')
            is_format_ok = False

    # In case of a bad/unsupported format, let's just stop with that fle
    if is_format_ok:

        tempo_in_midi = bpm2tempo(tempo_in_bpm)

        # Initialize output MIDI data
        output_midi_data.ticks_per_beat = ticks_per_beat
        output_track = MidiTrack()
        output_track.append(
            Message('program_change',
                    program=OUTPUT_MIDI_FILE_INSTRUMENT,
                    time=0))
        output_track.append(
            MetaMessage('set_tempo', tempo=tempo_in_midi, time=0))
        msg_count += 2

        log(
            DEBUG, 'Tempo: {} / Length: {} / #Events: {}'.format(
                tempo_in_bpm, file_length_string, len(events)))

        # Check file overall length against expected minimum length
        if file_length < expected_min_length:

            log(
                WARNING,
                'Overall file length, {}, is lower than expected minimum length, {}'
                .format(file_length_string, expected_min_length_string))
            is_file_too_short = True

        # Start browsing file events
        for event in events:

            if event['type'] == IS_PAUSE:

                pause = event['value']

                summed_length += pause

                if not is_no_transpose_scan_done:

                    max_length_no_transpose += pause

                if not is_with_transpose_scan_done:

                    max_length_with_transpose += pause

            else:

                notes = event['value']

                if len(notes) > criteria['max_sim_notes']:

                    log(
                        WARNING,
                        'Maximum simultaneous notes count passed: {}'.format(
                            len(notes)))
                    is_max_sim_notes_passed = True

                is_first_note_in_notes = True

                for note in notes:

                    # Memorize highest and lowest notes ever found from the start of that file; this is used to transpose
                    if lowest_note == -1:
                        lowest_note = note
                        highest_note = note
                    elif note < lowest_note:
                        lowest_note = note
                    elif note > highest_note:
                        highest_note = note

                    if is_first_note_in_notes:

                        # Add delay prior to playing those notes
                        notes_delta_time_in_ticks = int(
                            second2tick(
                                summed_length - last_written_notes_time,
                                ticks_per_beat, tempo_in_midi))

                        is_first_note_in_notes = False

                    else:

                        # Other notes shall play at the same time as the first note
                        notes_delta_time_in_ticks = 0

                    if not (criteria['lowest_note'] <= note <=
                            criteria['highest_note']):

                        log(
                            DEBUG, 'Note {} out of range [{}-{}]'.format(
                                note, criteria['lowest_note'],
                                criteria['highest_note']))

                        if not is_no_transpose_scan_done:

                            is_no_transpose_scan_done = True
                            msg_count_no_transpose = msg_count

                            log(
                                INFO,
                                'Done with scan with no transpose - Got partial content'
                            )

                    if not (highest_note - lowest_note + 1 <=
                            criteria['num_notes']):

                        log(
                            DEBUG,
                            'Note {} out of range, even with transpose'.format(
                                note))

                        if not is_with_transpose_scan_done:

                            is_with_transpose_scan_done = True
                            msg_count_with_transpose = msg_count

                            log(
                                INFO,
                                'Done with scan with    transpose - Got partial content'
                            )

                            # No need to analyze file any further
                            break

                    else:

                        # Compute output transpose offset and transpose_range, as long as note track range fits within available number of notes
                        transpose_offset = criteria['lowest_note'] - lowest_note
                        transpose_range = highest_note - lowest_note + 1

                    output_track.append(
                        Message('note_on',
                                note=note,
                                velocity=OUTPUT_MIDI_FILE_VELOCITY,
                                time=notes_delta_time_in_ticks))
                    msg_count += 1

                last_written_notes_time = summed_length

        # End of file reached & all file fitted in with no transpose
        if not is_no_transpose_scan_done:

            msg_count_no_transpose = msg_count

            log(INFO, 'Done with scan with no transpose - Got full    content')

        # End of file reached & all file fitted in with transpose
        if not is_with_transpose_scan_done:

            msg_count_with_transpose = msg_count

            log(INFO, 'Done with scan with    transpose - Got full    content')

        log(DEBUG,
            'Scanned notes range = [{}-{}]'.format(lowest_note, highest_note))

        # Finalize output MIDI data
        output_midi_data.tracks.append(output_track)

    # Check expected minimum length criteria
    if not criteria[
            'do_transpose'] and max_length_no_transpose < expected_min_length:

        is_expected_length_check_ok = False

    elif criteria[
            'do_transpose'] and max_length_with_transpose < expected_min_length:

        is_expected_length_check_ok = False

    # Compute final/global file status
    if not is_format_ok or is_file_too_short or is_max_sim_notes_passed or not is_expected_length_check_ok:

        is_global_status_ok = False

    # Print out a synthetic line per file, showing up analysis detailed status
    if is_global_status_ok:
        print(COLOR_GREEN, end='', flush=True)
    else:
        print(COLOR_RED, end='', flush=True)

    print('{}'.format(
        truncate_and_format_string(input_filename, STATUS_FILENAME_LENGTH)) +
          COLOR_END + ' - ',
          end='',
          flush=True)

    if is_format_ok:
        print('Format: OK', end='', flush=True)
    else:
        print(COLOR_RED + 'Format: KO' + COLOR_END, end='', flush=True)

    print(' - File length: {} - '.format(file_length_string),
          end='',
          flush=True)

    if not is_file_too_short:
        print('Short file check: OK', end='', flush=True)
    else:
        print(COLOR_RED + 'Short file check: KO' + COLOR_END,
              end='',
              flush=True)

    print(' - ', end='', flush=True)

    if not is_max_sim_notes_passed:
        print('Max sim notes check: OK', end='', flush=True)
    else:
        print(COLOR_RED + 'Max sim notes check: KO' + COLOR_END,
              end='',
              flush=True)

    max_length_no_transpose_string = turn_seconds_int_to_minutes_and_seconds_string(
        max_length_no_transpose)
    max_length_with_transpose_string = turn_seconds_int_to_minutes_and_seconds_string(
        max_length_with_transpose)

    print(' - Max length no transpose: {}'.format(
        max_length_no_transpose_string),
          end='',
          flush=True)
    print(' - Max length with transpose: {} - '.format(
        max_length_with_transpose_string),
          end='',
          flush=True)

    if is_expected_length_check_ok:
        print('Expected length check: OK', end='', flush=True)
    else:
        print(COLOR_RED + 'Expected length check: KO' + COLOR_END,
              end='',
              flush=True)

    print('')

    # Try some post-processings on transposition, for the sake of a good sound
    if is_global_status_ok and criteria['do_transpose'] and (transpose_offset
                                                             != 0):

        log(INFO, 'Transpose range : {}'.format(transpose_range))
        log(INFO, 'Transpose offset: {}'.format(transpose_offset))

        align_lowest_note_offset = 12 - (criteria['lowest_note'] %
                                         12) + (lowest_note % 12)

        # Try and transpose by having the lowest note identical to the original, but on another
        # octave, so that the transposition will actually sound much closer to the original.
        if align_lowest_note_offset + transpose_range <= criteria['num_notes']:

            transpose_offset += align_lowest_note_offset
            log(
                INFO,
                "Could apply octave    correction on transpose offset, now: {}"
                .format(transpose_offset))

        # If transpose offset is odd (i.e. a half-tone) and we got room to make it even (i.e. full tone),
        # let's go for an even offset, by adding 1 half-tone: full tone transposition should sound better.
        elif (transpose_offset % 2 != 0) and (transpose_range <=
                                              criteria['num_notes'] - 1):

            transpose_offset += 1
            log(
                INFO,
                "Could apply full tone correction on transpose offset, now: {}"
                .format(transpose_offset))

    if not criteria['do_transpose']:

        return is_global_status_ok, msg_count_no_transpose, 0

    else:

        return is_global_status_ok, msg_count_with_transpose, transpose_offset
Пример #25
0
    def to_midi(self, settings: MidiConversionSettings, **kwargs):
        """
        Saves the chord progression to a MIDI file.

        .. note::
            This feature requires ``mido``, which you can get with ``pip install mido``.
        """
        if not isinstance(settings, MidiConversionSettings) or kwargs:
            raise ValueError(
                "to_midi now takes a MidiConversionSettings object, not individual arguments; see README.md"
            )

        repeat_options = {"replay", "hold"}
        assert (settings.repeat in repeat_options
                ), f"repeat argument must be one of: {repeat_options}"

        import mido

        mid = mido.MidiFile()
        track = mido.MidiTrack()
        mid.tracks.append(track)

        # Ensure beats_per_chord is a list
        if isinstance(settings.beats_per_chord, int):
            settings.beats_per_chord = [
                settings.beats_per_chord for _ in range(len(self.progression))
            ]
        assert len(settings.beats_per_chord) == len(
            self.progression
        ), "len(settings.beats_per_chord) is {}, which is not equal to the number of chords in the progression ({})".format(
            len(settings.beats_per_chord), len(self.progression))

        seconds_per_chord = [(60 / settings.tempo) * bpc
                             for bpc in settings.beats_per_chord]
        ticks_per_chord = [
            int(
                mido.second2tick(spc, mid.ticks_per_beat,
                                 mido.bpm2tempo(settings.tempo)))
            for spc in seconds_per_chord
        ]
        track.append(
            mido.MetaMessage("set_tempo",
                             tempo=mido.bpm2tempo(settings.tempo)))
        track.append(
            mido.Message("program_change", program=settings.instrument))

        played_chords = []
        prev_chord = None
        time = 0

        for chord, tpc in zip(self.midi(), ticks_per_chord):
            if chord == prev_chord and settings.repeat == "hold":
                played_chords[-1] = [
                    pnote._replace(duration=pnote.duration + tpc)
                    for pnote in played_chords[-1]
                ]
            else:
                played_chords.append([
                    MidiNote(
                        note=note,
                        velocity=settings.velocity,
                        time=time,
                        duration=tpc,
                    ) for note in chord
                ])
            prev_chord = chord
            time += tpc

        settings.set(progression=self)
        settings.set(played_chords=played_chords)
        settings.set(midi_track=track)

        if settings.effect:
            settings.effect.set_settings(settings)
            played_chords = [
                settings.effect.apply(chord) for chord in played_chords
            ]

        played_notes = [note for chord in played_chords for note in chord]
        for message in notes_to_messages(played_notes,
                                         velocity=settings.velocity):
            track.append(message)
        mid.save(settings.filename)
Пример #26
0
print("Enter filename for output: ")
name = input()
PATH = name + ".mid"
song = evaluate([random.randint(1, 400)], 1000, 0.6)
mid = mido.MidiFile()
track = mido.MidiTrack()
mid.tracks.append(track)
vel = 0
note = 0
m = ""
for t in song:
    tmp = evDictBack[t]
    mode, c = tmp.split()
    if mode == "note_on" or mode == "note_off":
        note = int(c)
        m = mode       
    elif mode == "set_velocity":
        vel = int(c)*4
        if vel == 128:
            vel = 127
    elif mode == "time_shift" and m == "note_on" or m == "note_off":
        tm = int(mido.second2tick(int(c)*8*4/1000, 384, 512820))
        track.append(mido.Message(m, note=note, velocity=vel, time=tm))
mid.save(PATH)

from midi2audio import FluidSynth
OUTPUT_WAV = "./" + name + ".wav"
fs = FluidSynth('FluidR3Mono_GM.sf3')
fs.midi_to_audio("./" + PATH, OUTPUT_WAV)
Пример #27
0
def frames2tick(frames, samplerate=samplerate):
    sec = frames / float(samplerate)
    return int(second2tick(sec, ticks_per_beat, tempo))
Пример #28
0
def steps2ticks(steps, bpm):
    return int(mido.second2tick(steps * 60 / (settings.sampling_freq * bpm), settings.ticks_per_beat,
                                mido.bpm2tempo(bpm)))
Пример #29
0
def generate_messages_from_song(song):
    note_off_times = {}
    cummulative_ticks = 0
    borrowed_time = 0
    all_messages = defaultdict(list)

    tempo = DEFAULT_TEMPO
    tap_rate_in_ticks = round(second2tick(TAP_RATE, song.ticks_per_beat,
                                          tempo))

    # The tracks of type 2 files are not in sync, so they can
    # not be played back like this.
    if song.type == 2:
        raise TypeError("can't merge tracks in type 2 (asynchronous) file")

    by_note = defaultdict(list)

    for message in merge_tracks(song.tracks):
        cummulative_ticks += message.time
        all_messages[cummulative_ticks].append(message)
        with suppress(AttributeError):
            index = len(all_messages[cummulative_ticks]
                        ) - 1  # Silly hack - there's a better way.
            by_note[message.note].append((index, cummulative_ticks, message))

    last_tick = list(all_messages.keys())[-1]
    messages_with_seconds = []
    for _note, messages in by_note.items():
        off_at = on_at = last_tick
        for _delete_me_index, tick, message in reversed(messages):
            if message.type == "note_on":
                on_at = tick
            elif message.type == "note_off":
                off_at = tick
                too_soon_by = on_at - off_at - tap_rate_in_ticks
                if too_soon_by < 0:
                    print(f"Too soon by {too_soon_by}")
                    adjusted_tick = tick - abs(too_soon_by)
                    try:
                        all_messages[tick].remove(message)
                    except Exception as e:
                        pprint.pprint(all_messages[tick])
                        raise
                    # message.time = 66666666666666666666666666
                    all_messages[adjusted_tick].append(message)
                    # TODO: In the future, probably keep adjusting backwards in the event that the note on before this was too close.
            else:
                raise TypeError("What kind of note is this?!")

    # Now turn these into notes with seconds.

    cummulative_ticks = 0
    for ticks, messages in sorted(all_messages.items()):
        for message in messages:
            if message.type == 'set_tempo':
                tempo = message.tempo
        delta_ticks = ticks - cummulative_ticks
        if delta_ticks > 0:
            delta = tick2second(delta_ticks, song.ticks_per_beat, tempo)
        else:
            delta = 0
        messages_with_seconds.append((delta, messages))
        cummulative_ticks = ticks
    return messages_with_seconds

    ##############################
    ##########################

    for index, message in enumerate(song):
        if message.is_meta:
            all_messages.append(message)
            continue

        cummulative_ticks += message.time

        # Try to pay back borrowed time.
        if borrowed_time and message.time > 0:
            after_repayment = message.time - borrowed_time
            if after_repayment >= 0:
                message.time = after_repayment
                print(f"Paid back all {borrowed_time}")
                borrowed_time = 0
            else:
                new_borrowed_time = abs(after_repayment)
                message.time = 0
                print(
                    f"Paid back {borrowed_time - new_borrowed_time} out of {borrowed_time}"
                )
                borrowed_time = new_borrowed_time

        swing = random.uniform(0, controls['SWING_VALUE'])
        if message.type == "note_on":
            if swing > 0:
                borrowed_time += swing
                message.time = message.time + swing

        times += message.time

        if message.type == "note_on":
            note_last_off, note_off_message_index = note_off_times.pop(
                message.note, (0, None))
            if note_last_off:
                note_off_message = all_messages[note_off_message_index]
                note_off_and_on_time = times - note_last_off
                sleeper = max(TAP_RATE - note_off_and_on_time, 0)
                if sleeper:
                    if note_off_message.time > 0:
                        assert False
                    borrowed_time += sleeper
                    print(
                        f"Note was on and off in only {note_off_and_on_time}; sleeping {sleeper}"
                    )
                    message.time = message.time + sleeper
                    # Give velocity a boost too.
                    message.velocity = min(message.velocity + 5, 127)
        elif message.type == "note_off":
            note_off_times[message.note] = (times, index)
        all_messages.append(message)
    return all_messages
 def convertTimesFromSecondsToTicks(self, notes):
     for note in notes:
         note.time = int(second2tick(note.time, self.ticks_per_beat,
                                     500000))
Пример #31
0
 def second2tick(self, sec):
     return mido.second2tick(sec, self.midi_file.ticks_per_beat,
                             self.midi_file.tempo)