Example #1
0
def comptineN2():
    """Generate the midi file of the comptine d'un autre été"""
    mid = MidiFile()
    trackl = MidiTrack()
    trackl.name = "Left hand"
    for i in range(8):
        trackl = comp_lh1(trackl)
        trackl = comp_lh1(trackl)
        trackl = comp_lh2(trackl)
        trackl = comp_lh2(trackl)
    trackl.append(Message('note_on', note=52))
    trackl.append(Message('note_off', note=52, time=200))
    mid.tracks.append(trackl)

    trackr = MidiTrack()
    trackr.name = 'Right hand'
    trackr.append(Message('note_on', note=67, velocity=0, time=3200))
    trackr = comp_rh1(trackr)
    trackr = comp_rh2(trackr)
    trackr = comp_rh2(trackr)
    trackr = comp_rh3(trackr)
    trackr = comp_rh3(trackr, end=True)
    trackr = comp_rh4(trackr)
    trackr.append(Message('note_on', note=71))
    trackr.append(Message('note_off', note=71, time=200))
    mid.tracks.append(trackr)

    mid.ticks_per_beat = 100
    vols = generate_vol()
    mid = volume(mid, vols)
    return mid
Example #2
0
File: lib.py Project: rcaze/Quantic
def tempo(beats, trk=None, bt=480):
    """Create a track setting a tempo at each new beat"""
    if not trk:
        trk = MidiTrack()
        trk.name = "Tempo variation"
    trk.append(MetaMessage("set_tempo", tempo=beats[0], time=0))

    for i, beat in enumerate(beats):
        trk.append(MetaMessage("set_tempo", time=bt, tempo=beat))

    return trk
Example #3
0
File: lib.py Project: rcaze/Quantic
def volume(mid, vols):
    """Change the volume of a midi"""
    bt = mid.ticks_per_beat
    trk = MidiTrack()
    trk.name = "Volume variation"
    trk.append(Message("control_change", control=7, time=0, value=vols[0]))

    for i, vol in enumerate(vols):
        trk.append(Message("control_change", control=7, time=bt, value=vol))

    mid.tracks.append(trk)
    return mid
def GenerateMidiFile(source, name):

  timestep = 3 # there are three ticks per beat in general

  mid = MidiFile()
  mid.type = 2
  track = MidiTrack()
  track.name = "actions"
  track_panning = MidiTrack()
  track_panning.name = "panning"
  track_depth = MidiTrack()
  track_depth.name = "depth"
  mid.tracks.append(track)
  mid.tracks.append(track_panning)
  mid.tracks.append(track_depth)

  track.append(Message('program_change', program=12, time=timestep))
  track_panning.append(Message('program_change', program=12, time=timestep))
  track_depth.append(Message('program_change', program=12, time=timestep))

  for event in source:

    velocity = np.round(event.reward * 10).astype(int) # max is ~ .6, so we have 6 levels of velocity to play with

    # two simultaneous notes
    # if event.directionChange:
    #   track.append(Message('note_on', note=64, velocity=64, time=timestep))
    #   track.append(Message('note_off', note=64, velocity=64, time=timestep))

    track.append(Message('note_on', note=65 + int(event.actionKind), velocity=64 + velocity, time=timestep))
    track.append(Message('note_off', note=65 + int(event.actionKind), velocity=64 + velocity, time=timestep))

    track_panning.append(Message('note_on', note=0 , velocity=0 + event.xyzNorm[0],  time=timestep))
    track_panning.append(Message('note_off', note=0 , velocity=0 + event.xyzNorm[0],  time=timestep))

    track_depth.append(Message('note_on', note=0, velocity= 0 + event.xyzNorm[2],  time=timestep))
    track_depth.append(Message('note_off', note=0, velocity= 0 + event.xyzNorm[2],  time=timestep))

  mid.save(f'{name}s.mid')
    def _add_notes(self, inst_voice, channel):
        voice_note_map = inst_voice.get_all_notes()

        for voice, notes in voice_note_map.items():
            track = MidiTrack()
            track.name = inst_voice.instrument.name
            self.mid.tracks.append(track)
            # For each note
            #    build a note on and off message, compute the ticks of the message
            #    append both messages to out list msgs
            velocity_msgs = self._gen_velocity_msgs(voice, channel)
            msgs = []
            for n in notes:
                # We do not need to set velocity outside of the default
                # Crescendo and decrescendo are taken care of by channel change messages only,
                #       which modify the constant velocity set per note.
                # If the velocity was set here, the channel  change would distort the setting.
                # Otherwise, the velocity would be acquired as follows
                ticks = self._wnt_to_ticks(n.get_absolute_position())
                msg = NoteMessage('note_on', channel,
                                  n.diatonic_pitch.chromatic_distance + 12,
                                  ticks, ScoreToMidiConverter.DEFAULT_VELOCITY)
                msgs.append(msg)
                end_ticks = self._wnt_to_ticks(n.get_absolute_position() +
                                               n.duration)
                msg = NoteMessage('note_off', channel,
                                  n.diatonic_pitch.chromatic_distance + 12,
                                  end_ticks)
                msgs.append(msg)

            # Sort the msgs list by tick time, and respect to off before on if same time
            msgs.extend(velocity_msgs)

            from functools import cmp_to_key
            msgs = sorted(
                msgs,
                key=cmp_to_key(
                    lambda x, y: ScoreToMidiConverter.compare_note_msgs(x, y)))

            prior_tick = 0
            for m in msgs:
                logging.info('{0}'.format(m))
                ticks_value = int(m.abs_tick_time - prior_tick)
                # Append the midi message to the track, with tics being incremental over succeeding messages.
                # We default to channel 1 for all tracks.
                track.append(m.to_midi_message(ticks_value))
                prior_tick = m.abs_tick_time
                if self.__trace:
                    print('{0}/{1}'.format(ticks_value, m))
Example #6
0
File: lib.py Project: rcaze/Quantic
def pedal(nbt, nmeas, trk=None, bt=480):
    """Play the pedal every n beat starting with the pedal on nbt is the numbr
    of beat per measure and meas is the number of measure"""
    if not trk:
        trk = MidiTrack()
        trk.name = "Pedal"
    p_on = rd.randint(64, 127)
    p_off = rd.randint(64, 127)
    trk.append(Message("control_change", control=64, value=p_on, time=0))
    for i in range(nmeas):
        p_off = rd.randint(64, 127)
        trk.append(
            Message("control_change", control=64, value=p_off, time=bt * nbt))
        p_on = rd.randint(64, 127)
        trk.append(Message("control_change", control=64, value=p_on))
    return trk
Example #7
0
File: lib.py Project: rcaze/Quantic
def tempo_r(mid, beats, rs):
    """Create a new track with randomly varying tempo"""
    bt = mid.ticks_per_beat
    trk = MidiTrack()
    trk.name = "Tempo variation"
    trk.append(MetaMessage("set_tempo", tempo=beats[0], time=0))

    for i, beat in enumerate(beats):
        r = rs[i]
        if r == 0:  # For the deterministic case
            tempo_r = beat
        else:
            tempo_r = rd.randint(beat - int(beat * r),
                                 beat + int(beat * r)) + 1
        trk.append(MetaMessage("set_tempo", time=bt, tempo=tempo_r))

    mid.tracks.append(trk)
    return mid
Example #8
0
    def export_midi(song, file_name):
        """Creates a midi file representing the current song and saves the result to a file."""
        midi_file = MidiFile(ticks_per_beat=song.ticks_per_beat)

        # TODO: Output key signature changes to track 0.

        for track_idx, track in enumerate(song):
            midi_track = MidiTrack()
            midi_track.name = track.name
            midi_file.tracks.append(midi_track)
            channel = track.channel
            if channel > 15:
                print('Warning: too many channels for song %s. Skipping extra channel #%d.'
                      % (song.name, channel))
                continue
            midi_track.append(Message('program_change', channel=channel, program=track.program,
                                      time=0))
            measure_number = 0
            prev_tick = 0
            tick = 0
            prev_sounding_notes = set()  # contains MIDI numbers of previously-sounding notes
            previous_time_signature = None

            for measure in track:
                tick = measure.start_tick  # Jump to the start of this measure.

                # Write a time signature message if it has changed.
                # Only do this for track 0.
                if track_idx == 0 and measure.time_signature != previous_time_signature:
                    previous_time_signature = measure.time_signature
                    delta = tick - prev_tick
                    prev_tick = tick
                    midi_track.append(MetaMessage(
                        'time_signature',
                        numerator=measure.time_signature.numerator,
                        denominator=measure.time_signature.denominator,
                        time=delta))

                for event in measure:
                    sounding_notes = set()  # Notes that are sounding at this moment.

                    # First send a note off for all previously-sounding notes EXCEPT those
                    # which show up in the current event with the "tie_from_previous" flag set.
                    tied_from_previous = set(n.midi_pitch for n in event if n.tie_from_previous)
                    prev_tick = SongMidiConverter._turn_off_notes(midi_track, channel, tick,
                                                                  prev_tick, prev_sounding_notes,
                                                                  tied_from_previous)

                    for note in event:
                        pitch = note.midi_pitch
                        velocity = note.velocity
                        tie_from_previous = note.tie_from_previous
                        assert pitch >= 0 and pitch < 128
                        assert tie_from_previous or (velocity >= 0 and velocity < 128)

                        if velocity > 0 and not tie_from_previous:
                            # Handle note-on events.
                            delta = tick - prev_tick
                            prev_tick = tick
                            midi_track.append(Message('note_on', channel=channel, note=pitch,
                                                      velocity=note.velocity, time=delta))
                            sounding_notes.add(pitch)
                        elif not tie_from_previous:
                            # Handle explicit note-off events (note with velocity 0)
                            delta = tick - prev_tick
                            prev_tick = tick
                            midi_track.append(Message('note_off', channel=channel, note=pitch,
                                                      velocity=0, time=delta))
                        else:
                            # Handle tie_from_previous.
                            sounding_notes.add(pitch)

                    prev_sounding_notes = sounding_notes
                    tick += event.duration

                # We processed all events in the measure.  If not at the end of the measure yet,
                # turn off all notes.
                if tick < measure.start_tick + measure.get_duration_ticks():
                    prev_tick = SongMidiConverter._turn_off_notes(midi_track, channel, tick,
                                                                  prev_tick, prev_sounding_notes)
                    prev_sounding_notes = set()

                measure_number += 1

            # Turn off all notes at end of track.
            SongMidiConverter._turn_off_notes(midi_track, channel, tick, prev_tick,
                                              prev_sounding_notes)

            # End of track message.
            midi_track.append(MetaMessage('end_of_track'))

        midi_file.save(file_name)
Example #9
0
def get_midi_from_numbers(source, version, velocities=None):
    """
  It is beneficial to train the system with all files in the same key so that they create harmonies rather than discordance
  take dataframe (source), and returns midi file. called for creating training data, and after inference.
  
  music21 method adapted from http://nickkellyresearch.com/python-script-transpose-midi-files-c-minor/
  """

    timestep = 1  # this is the time delta per note
    mid = MidiFile()
    mid.type = 1
    voices = []

    timedelta = 120

    # normalize velocities
    if velocities is not None:
        velocities = np.minimum(
            np.floor((velocities / np.max(velocities)) * 128).astype(np.int32),
            127)

    # for each voice make a midi track
    for voice in range(4):
        track = MidiTrack()
        track.name = f"voice{voice}_{version}"
        track.append(Message('program_change', program=12, time=timestep))
        # append to vioces list
        voices.append(track)
        # append to full midi
        mid.tracks.append(track)

    # go through numbers
    for voice in range(4):
        actionKindPrevious = -1
        notelength = 0
        for i, event in enumerate(source[:, voice]):
            if (i != 0 and event == -1):
                print("-1 event")
            if (event != actionKindPrevious):
                # if not first note, end previous note
                if (i != 0):
                    voices[voice].append(
                        Message('note_off',
                                note=np.int(actionKindPrevious),
                                velocity=80,
                                time=notelength * timedelta))
                # start new note
                if velocities is None:
                    velocity = 80
                else:
                    velocity = velocities[voice, i]
                voices[voice].append(
                    Message('note_on',
                            note=np.int(event),
                            velocity=velocity,
                            time=0))
                notelength = 0
            # else:
            notelength += 1
            actionKindPrevious = event

            if (i == len(source) - 2 and notelength > 0):
                voices[voice].append(
                    Message('note_off',
                            note=np.int(event),
                            velocity=80,
                            time=(notelength + 1) * timedelta))

            actionKindPrevious = event
            # notelength+=1

    return mid
Example #10
0
def parse(input_file, output_file, new_velocity, align_margin, collated,
          normalized_tempo, create_channels, index_patches):

    # TODO: Extract parts of parse function into other functions
    # TODO: Options for merging tracks
    # TODO: Align notes in different tracks
    # TODO: Use channels to split tracks

    # =====================
    #    Initialization
    # =====================

    # Check if the input file wasn't given
    if (input_file == ""):
        # If the input file weren't given, return an exception
        return Exception("Input file not specified")

    # Check if the input file wasn't given
    if (output_file == ""):
        # If the input file weren't given, return an exception
        return Exception("Output file not specified")

    # If index_patches is selected but not create_channels
    if (index_patches and not create_channels):
        return Exception("Patches cannot be indexed without creating channels")

    # Create lists for storing notes and meta messages
    track_notes = []
    track_meta = []

    # Create a list for storing final tracks
    output_tracks = []

    # Create a list to store which output tracks are only meta
    meta_track_indices = []

    # Create a dictionary to store the patch of each track
    patch_dictionary = {}

    # Create a list to store which indices tracks are split into
    split_indices = []

    # Try to load the input file
    try:
        # Load the input MIDI file
        input_song = mido.MidiFile(input_file)
    except Exception as err:
        # Print the error
        traceback.print_tb(err.__traceback__)
        # If it couldn't be loaded, return an error
        return Exception(
            "Input file could not be loaded, try checking the input file path")

    # Create a new MIDI file for the final song
    output_song = mido.MidiFile(ticks_per_beat=input_song.ticks_per_beat)

    # Check if we should override the note velocity
    try:
        # See if the user has input an integer
        new_velocity = int(new_velocity)
    except:
        # If the user has not input an integer, ignore it
        new_velocity = -1
        pass

    # If it's out of range, set it to -1
    if (new_velocity < 1 or new_velocity > 127):
        new_velocity = -1

    # Check if we should override the tempo
    try:
        # See if the user has input an integer
        normalized_tempo = int(normalized_tempo)
        # Convert the tempo from bpm to the correct units
        normalized_tempo = mido.bpm2tempo(normalized_tempo)
    except:
        # If the user has not inputted an integer, ignore it
        normalized_tempo = -1
        pass

    # If it's out of range, set it to -1
    if (normalized_tempo < 1):
        normalized_tempo = -1

    # Cast the parameter to a boolean
    create_channels = bool(create_channels)

    # Cast the parameter to a boolean
    index_patches = bool(index_patches)

    # Create a dictionary to serve as a look up table for tempo
    tempo_dict = {}

    # Create a aligning margin variable
    alignment_margin = 0

    try:
        # See if the user has input a number
        alignment_margin = float(align_margin)
    except:
        pass

    # ============================
    #    Extract Tempo Messages
    # ============================

    # Loop through all tracks
    for i, track in enumerate(input_song.tracks):

        # Create a time variable for storing absolute time and set it to 0
        tick_time = 0

        # Loop through all notes in the track
        for j, msg in enumerate(track):

            # If there is a change in time then add that to the absolute time
            tick_time += msg.time

            # If we found a set_tempo message
            if (msg.is_meta and msg.type == "set_tempo"):
                # Add it to the look up table
                tempo_dict[tick_time] = msg.tempo

    # If there are no tempo messages
    if (len(tempo_dict) == 0):
        # Set the tempo to the default of 120bpm
        tempo_dict[0] = mido.bpm2tempo(120)

    # ==========================
    #     Loop Through Tracks
    # ==========================

    for i, track in enumerate(input_song.tracks):

        # ===================================
        #    Program/Patch Change Messages
        # ===================================

        # Create a time variable for storing absolute time and set it to 0
        tick_time = 0

        # Create a list of channels
        channel_list = []

        # Loop through all messages
        for msg in track:

            # If this message is not a meta message
            if not msg.is_meta:
                try:
                    # If this message's channel isn't in the no_patches dictionary
                    if not msg.channel in channel_list:
                        # Add it
                        channel_list.append(msg.channel)
                except Exception:
                    pass

            # If there is a change in time then add that to the absolute time
            tick_time += msg.time

            # If this message is a patch change message
            if (msg.type == "program_change"):
                # If this channel doesn't have a dictionary entry
                if not msg.channel in patch_dictionary:
                    # Make an entry
                    patch_dictionary[msg.channel] = {}
                # Add a patch entry for the current channel at the current time
                patch_dictionary[msg.channel][tick_time] = msg.program

        # Loop through all channels
        for channel in channel_list:
            # If this channel doesn't have a dictionary entry
            if not channel in patch_dictionary:
                # Make an entry
                patch_dictionary[channel] = {}

            # If this channel doesn't have a patch at the beginning
            if not 0 in patch_dictionary[channel]:
                # Add the default patch at the beginning
                patch_dictionary[channel][0] = 0

        # ==============================
        #     Raw Message Extraction
        # ==============================

        # Store all meta messages in a list
        meta_messages = [msg for msg in track if msg.is_meta]

        # Reset tick_time to zero
        tick_time = 0

        # Create a new empty list inside the main list for all tracks to append notes to
        track_meta.append([])

        # Create a new empty list inside the main list for all tracks to append meta messages to
        track_notes.append([])

        # Loop through all meta messages
        for msg in meta_messages:
            # Append the message to the meta list for this track
            track_meta[i].append(msg)

        # ======================
        #    Meta-only tracks
        # ======================

        # If this is just a meta track
        if (len(meta_messages) == len(track)):
            # We'll first assume the split index is 0
            split_index = 0
            # If this is not the first track
            if (i != 0):
                # Get the indices the previous track was split into
                previous_track = split_indices[len(split_indices) - 1]
                # Set the split index as the previous maximum index plus one
                split_index = previous_track[len(previous_track) - 1] + 1
            # Append this index to the list
            split_indices.append([split_index])

            # Add this track index to the list recording which tracks are only meta
            meta_track_indices.append(len(output_tracks))
            # Add sub-list where this track will be stored
            output_tracks.append([])
            # Create a new track
            finished_track = MidiTrack()
            # Append this track to the sub-list
            output_tracks[len(output_tracks) - 1].append(finished_track)
            # Create a new variable to keep track of skipped delta time
            skipped_ticks = 0
            # Loop through all messages
            for msg in track_meta[i]:
                # If this is a tempo message
                if (msg.type == "set_tempo"):
                    # Increase the skipped time by the amount of this message
                    skipped_ticks += msg.time
                    # Don't append it
                    continue
                # Increase this message's time by the amount of skipped ticks
                msg.time += skipped_ticks
                # Reset the amount of skipped time
                skipped_ticks = 0
                # Append message to the finished track
                finished_track.append(msg)
            # Skip everything below
            continue

        # ===========================
        #     Convert Note Format
        # ===========================

        # Create a variable to store the state of the sustain controller
        sustain = False
        # Create a variable to store the state of the sustain controller last loop
        sustain_last = False
        # Create a variable to store the state of the sostenuto controller
        sostenuto = False
        # Create a variable to store the state of the sostenuto controller last loop
        sostenuto_last = False
        # Create a variable to store all notes being sustained by the sostenuto controller
        sostenuto_notes = []
        # Create a variable to turn all notes off
        off = False

        # Loop through all notes in the track
        for j, msg in enumerate(track):

            # If this is the last message
            if (j == len(track) - 1):
                # Turn all notes off
                off = True

            # If there is a change in time then add that to the absolute time
            tick_time += msg.time

            # If this is a note on message
            if (msg.type == "note_on"):
                # If the note has a nonzero velocity
                if (msg.velocity != 0):
                    # Create a new entry with the format of [TIME ON, TIME OFF, NOTE, VELOCITY, ALIGNED, TRACK INDEX, CHANNEL, PATCH]
                    track_notes[i].append([
                        tick_time, None, msg.note, msg.velocity, 0, None,
                        msg.channel,
                        get_patch(patch_dictionary, msg.channel, tick_time)
                    ])
                    # If this channel doesn't already have a patch
                    if (patch_dictionary[msg.channel] == None):
                        # Assume that it is has a default patch of 1
                        patch_dictionary[msg.channel] = 1
                # If the note has a zero velocity
                else:
                    # Loop through the notes list backwards
                    for j in reversed(range(len(track_notes[i]))):
                        if (msg.note in sostenuto_notes):
                            break
                        # Check if there is a note that is the same note and doesn't end yet
                        if (track_notes[i][j][2] == msg.note
                                and track_notes[i][j][1] == None):
                            # Set the current time as its end time
                            track_notes[i][j][1] = tick_time
            # If this is a note off message and sustain is not active
            if (msg.type == "note_off" and not sustain):
                # If this note is being sustained by sostenuto
                if (msg.note in sostenuto_notes):
                    # Don't stop it
                    break
                # Loop through the notes list backwards
                for j in reversed(range(len(track_notes[i]))):
                    # Check if there is an active note that doesn't end yet
                    if (track_notes[i][j][2] == msg.note
                            and track_notes[i][j][1] == None):
                        # Add the current time as its end time
                        track_notes[i][j][1] = tick_time
            # If this message is a controller change
            if (msg.type == "control_change"):
                # If it is a sustain message
                if (msg.control == 64):
                    # Set it on/off
                    sustain = not msg.value < 64
                # If it is a sostenuto message
                if (msg.control == 66):
                    # Set it on/off
                    sostenuto = not msg.value < 64
                # If this is a controller message to turn all notes off
                if (msg.control == 120):
                    # Turn on the flag
                    off = True
                # If this is a reset controllers message
                if (msg.control == 121):
                    # Turn off sustain and sostenuto
                    sustain = False
                    sostenuto = False

            # If sostenuto just turned on
            if (sostenuto and not sostenuto_last):
                # Remove the notes it was holding down
                sostenuto_notes.clear()
                # Check which notes are being pressed
                notes_on = list(filter(lambda e: e[1] == None, track_notes[i]))
                # Loop through the notes
                for note in notes_on:
                    # Add their pitch to the list
                    sostenuto_notes.append(note[2])

            # If sustain has changed to off
            if (sustain_last and not sustain):
                # Check which notes are on and aren't being sustained by sostenuto
                notes_on = list(
                    filter(
                        lambda e: e[1] == None and e[2] not in sostenuto_notes,
                        track_notes[i]))
                # Loop through them
                for note in notes_on:
                    # Set their end time to now
                    note[1] = tick_time

            # If sostenuto has been released
            if (sostenuto_last and not sostenuto):
                # Loop through all notes
                for note in track_notes[i]:
                    # Loop through all notes being sustained by sostenuto
                    for sostenuto_note in sostenuto_notes:
                        # If they have the same pitch and the note has no end time
                        if (note[2] == sostenuto_note and note[1] == None):
                            # Set its end time to now
                            note[1] = tick_time

            # If there is an all notes off message
            if (off):
                # Loop through all notes
                for note in track_notes[i]:
                    # If the note doesn't have an end time
                    if (note[1] == None):
                        # Set its end time to now
                        note[1] = tick_time
                # Clear all sostenuto notes
                sostenuto_notes.clear()
                # Turn off sostenuto
                sostenuto = False
                sostenuto_last = False
                # Turn the all notes off flag off
                off = False

            # Set the current value of sustain to the last variable for the next loop
            sustain_last = sustain
            # Set the current value of sostenuto to the last variable for the next loop
            sostenuto_last = sostenuto

        # =====================
        #    Note Processing
        # =====================

        # If there are any notes that have the same end and start time(0 duration), delete them
        track_notes[i] = [
            note for note in track_notes[i] if note[0] != note[1]
        ]

        # Remove duplicate notes
        track_notes[i] = [
            list(new_note) for new_note in set(
                tuple(note) for note in track_notes[i])
        ]

        # Convert the note time to second
        track_notes[i] = notes2second(track_notes[i], tempo_dict,
                                      input_song.ticks_per_beat)

        # Sort the notes by their end time
        track_notes[i].sort(key=lambda e: e[1])

        # Loop through notes
        for note in track_notes[i]:
            # Find the minimum time the note must begin/end at
            min_time = note[1] - Decimal(alignment_margin)
            # Find the maximum time the note must begin/end at
            max_time = note[1] + Decimal(alignment_margin)
            # Create a variable to store the mean time of all overlapping notes
            mean_time = Decimal(0)
            # Create a variable to store how many notes are within the margin
            overlap_notes = 0
            # If the note's end has already been aligned
            if (bool(note[4] & 0b01)):
                # Skip the rest of the loop
                continue
            # Loop through all notes
            for overlap in track_notes[i]:
                # If the note starts within the margin and its start has not been aligned
                if (overlap[0] >= min_time and overlap[0] <= max_time
                        and not bool(overlap[4] & 0b10)):
                    # Add the note's time to the mean time
                    mean_time += overlap[0]
                    # Add one to the note count
                    overlap_notes += 1
                # If the note ends within the margin and its end has not been aligned
                if (overlap[1] >= min_time and overlap[1] <= max_time
                        and not bool(overlap[4] & 0b01)):
                    # Add the note's time to the mean time
                    mean_time += overlap[1]
                    # Add one to the note count
                    overlap_notes += 1
            # If is no other note
            if (overlap_notes < 2):
                # Skip the rest of the loop
                continue
            # Average the mean time
            mean_time /= overlap_notes
            # Loop through all notes
            for overlap in track_notes[i]:
                # If the note starts within the margin and its start has not been aligned
                if (overlap[0] >= min_time and overlap[0] <= max_time
                        and not bool(overlap[4] & 0b10)):
                    # Set the note's time to the mean
                    overlap[0] = mean_time
                    # Turn the bit that signifies the start has been aligned on
                    overlap[4] = overlap[4] | 0b10
                # If the note ends within the margin and its end has not been aligned
                if (overlap[1] >= min_time and overlap[1] <= max_time
                        and not bool(overlap[4] & 0b01)):
                    # Set the note's time to the mean
                    overlap[1] = mean_time
                    # Turn the bit that signifies the end has been aligned on
                    overlap[4] = overlap[4] | 0b01

        # If we should not normalize the tempo
        if (normalized_tempo < 0):
            # Convert the note time back to ticks with the original tempos
            track_notes[i] = notes2tick(track_notes[i], tempo_dict,
                                        input_song.ticks_per_beat)
        # If we should normalize the tempo
        else:
            # Convert the note time back to ticks with a single tempo
            track_notes[i] = notes2tick(track_notes[i], {0: normalized_tempo},
                                        input_song.ticks_per_beat)

        # Only keep notes that do not have the same start and end time(nonzero duration)
        track_notes[i] = [
            note for note in track_notes[i] if note[0] != note[1]
        ]

        # Sort the notes by their start time
        track_notes[i].sort(key=lambda e: e[0])

        # ======================
        #    Track Splitting
        # ======================

        # Create a new list for new(split) tracks
        new_tracks = []

        # Iterate through all notes in the current track
        for note in track_notes[i]:
            # Calculate the track index of the note
            track_index = get_track_index(note, track_notes[i])

            # If we need more tracks, add them
            for j in range(1 + track_index - len(new_tracks)):
                new_tracks.append([])

            # Add the note to its track
            new_tracks[track_index].append(note)

        # ==================
        #      Indexing
        # ==================

        # Create a new variable to store the starting index
        initial_index = 0

        # Loop through all split tracks in the output_tracks list
        for split_tracks in output_tracks:
            # Increase the starting index by the amount of split tracks
            initial_index += len(split_tracks)

        # If we are indexing patches
        if (index_patches):
            # For every new track
            for j, new_track in enumerate(new_tracks):
                # Loop through the notes
                for note in new_track:
                    # Set the patch to a clamped index value between 0 and 127
                    note[7] = min(max(0, initial_index + j), 127)

        # ======================
        #      Track Output
        # ======================

        # Add parent list where split tracks from this track will be stored
        output_tracks.append([])

        for j, new_track in enumerate(new_tracks):

            # Create a new track to append to the MIDI file that will be exported
            finished_track = MidiTrack()

            # Use tick_time to represent absolute time and set it to 0
            tick_time = 0

            # Set the finished track name to the old track name concatenated with an index starting with 1
            finished_track.name = track.name + " " + str(j + 1)

            # Loop through all of the notes in the new track
            for note in new_track:
                # Add a note_on message for the note
                finished_track.append(
                    Message("note_on",
                            note=note[2],
                            velocity=new_velocity
                            if new_velocity >= 0 else note[3],
                            time=(note[0] - tick_time),
                            channel=note[6]))
                # Add a note_off message for the note
                finished_track.append(
                    Message("note_off",
                            note=note[2],
                            velocity=0,
                            time=(note[1] - note[0]),
                            channel=note[6]))
                # Set the absolute time counter to the last message added(the note_off message)
                tick_time = note[1]

            # Use tick_time to represent absolute time and set it to 0
            tick_time = 0

            # Create a variable to store the previous patch
            last_patch = None

            # If we are creating channels and not indexing patches
            if (create_channels and not index_patches):
                # We can now reasonably make the assumption that each channel has a one-to-one correspondence with each track

                # Loop through all messages
                for k, msg in enumerate(finished_track):
                    # Update tick_time
                    tick_time += msg.time
                    # If this is a meta message
                    if (msg.is_meta):
                        # Skip this loop iteration
                        continue
                    # If the patch has changed
                    if (not get_patch(patch_dictionary, msg.channel, tick_time)
                            == last_patch):
                        # Insert a patch change message
                        finished_track.insert(
                            k,
                            Message(
                                "program_change",
                                channel=msg.channel,
                                program=get_patch(patch_dictionary,
                                                  msg.channel, tick_time),
                                time=(tick_time -
                                      get_patch_time(patch_dictionary,
                                                     msg.channel, tick_time))))
                        # Update the preceding note's time
                        msg.time -= (tick_time - get_patch_time(
                            patch_dictionary, msg.channel, tick_time))

                    # Update last_patch
                    last_patch = get_patch(patch_dictionary, msg.channel,
                                           tick_time)

            # Append the finished track to the list of tracks to be output
            output_tracks[i].append(finished_track)

        # We'll first assume the starting index is 0
        starting_index = 0
        # If this is not the first track
        if (i != 0):
            # Get the indices the previous track was split into
            previous_track = split_indices[len(split_indices) - 1]

            # Set the starting index as the previous maximum index plus one
            starting_index = previous_track[len(previous_track) - 1] + 1
        # Append a sub-list to store the indices this track is split into
        split_indices.append([])
        # Loop through the number of tracks this track will be split into
        for j in range(len(output_tracks[i])):
            # Add the indices to the sub-list
            split_indices[i].append(starting_index + j)

    # =======================
    #     Song Processing
    # =======================

    # If we are collating the output
    if (collated):
        # Create a variable to store the maximum number of times a track was split
        max_split = 0
        # Loop through all tracks
        for split_track in output_tracks:
            # If this track was split more times, make it the new maximum
            max_split = max(max_split, len(split_track))
        # Loop through the sub-lists
        for i in range(max_split):
            # Loop through the initial(outer) lists
            for split_track in output_tracks:
                # If we have already appended all split tracks on this track
                if (i >= len(split_track)):
                    # Skip this loop iteration
                    continue
                # Add track to output file
                output_song.tracks.append(split_track[i])
    # If the tracks should not be collated
    else:
        # Flatten the list of output tracks
        output_tracks = [
            output_track for split_track in output_tracks
            for output_track in split_track
        ]
        # Loop through tracks
        for track in output_tracks:
            # Add track to output file
            output_song.tracks.append(track)

    # If we should normalize the tempo
    if (normalized_tempo > 0):
        # Set the tempo at the beginning of the song
        output_song.tracks[0].insert(
            0, MetaMessage("set_tempo", tempo=normalized_tempo))
    # If we are not normalizing the tempo
    else:

        # If there are no meta only tracks
        if (len(meta_track_indices) == 0):
            # Create a new track
            output_song.tracks.insert(0, MidiTrack())
            # Add its index to the list of meta only tracks
            meta_track_indices.append(0)

        # Create a list to store tempos in
        tempos = []

        # Convert tempo_dict to a 2d list
        for time in tempo_dict:
            tempos.append([time, tempo_dict[time]])

        # Re-use tick_time to store absolute time
        tick_time = 0

        # Create a variable to store the index of the last tempo inserted
        tempo_index = 0

        # Create a variable to store the total length of the track
        total_time = 0

        # Get the length of the track
        for msg in output_song.tracks[meta_track_indices[0]]:
            total_time += msg.time

        # Create a new variable to store if the first meta track is empty
        first_meta_empty = len(output_song.tracks[meta_track_indices[0]]) == 0

        # Loop through the first meta track to which we will add tempo messages(which will be the original length + # of tempo messages when done)
        for i in range(
                len(output_song.tracks[meta_track_indices[0]]) + len(tempos)):
            # If we addded all the tempos
            if (tempo_index == len(tempos)):
                # Stop adding them
                break
            # If the meta track is/was empty
            if (first_meta_empty):
                # Insert the message
                output_song.tracks[meta_track_indices[0]].append(
                    MetaMessage("set_tempo",
                                tempo=tempos[tempo_index][1],
                                time=tempos[tempo_index][0] - tick_time))
                # Increment tick_time
                tick_time += output_song.tracks[meta_track_indices[0]][i].time
                # Increment the tempo index
                tempo_index += 1
                # Skip everything below
                continue
            # Increment tick_time
            tick_time += output_song.tracks[meta_track_indices[0]][i].time
            # If we're at the end of the list
            if (i == len(output_song.tracks[meta_track_indices[0]]) - 1):
                # Insert the message
                output_song.tracks[meta_track_indices[0]].append(
                    MetaMessage("set_tempo",
                                tempo=tempos[tempo_index][1],
                                time=tempos[tempo_index][0] - tick_time))
                # Increment the tempo index
                tempo_index += 1
                # Skip everything below
                continue
            # If the tempo is between these messages
            if (tick_time <= tempos[tempo_index][0]
                    and tempos[tempo_index][0] <= tick_time +
                    output_song.tracks[meta_track_indices[0]][i + 1].time):
                # Decrease the delta of the message after it
                output_song.tracks[meta_track_indices[0]][
                    i + 1].time -= tempos[tempo_index][0] - tick_time
                # Insert the message
                output_song.tracks[meta_track_indices[0]].insert(
                    i + 1,
                    MetaMessage("set_tempo",
                                tempo=tempos[tempo_index][1],
                                time=tempos[tempo_index][0] - tick_time))
                # Increment the tempo index
                tempo_index += 1
        # If we didn't add all the tempos
        if (tempo_index < len(tempos) - 1):
            # Loop through the remaining tempos
            for i in range(tempo_index, len(tempos)):
                # Append the remaining messages
                output_song.tracks[meta_track_indices[0]].append(
                    MetaMessage("set_tempo",
                                tempo=tempos[i][1],
                                time=tempos[i][0] - tick_time))
                # Set tick_time
                tick_time = tempos[i][0]

    # If we are assigning patches on the output tracks
    if (index_patches):
        # Loop through all the tracks
        for i, track in enumerate(output_song.tracks):
            # Start off with a patch index of the current track index
            patch_index = i
            # For every meta only track before it, subtract one
            for index in meta_track_indices:
                if index < i:
                    patch_index -= 1
            # Set the patch of each of the track
            output_song.tracks[i] = set_track_patch(track, patch_index,
                                                    patch_index)

    # If we are creating output channels
    if (create_channels):
        # Loop through all the tracks
        for i, track in enumerate(output_song.tracks):
            # Start off with a channel index of the current track index
            channel_index = i
            # For every meta only track before it, subtract one
            for index in meta_track_indices:
                if index < i:
                    channel_index -= 1
            # Set the channel of each of the tracks
            output_song.tracks[i] = set_channel(track, channel_index)

    # ====================
    #     Song Output
    # ====================

    # Try to save the song
    try:
        # Save the song
        output_song.save(output_file)
        # If it saves, return true
        return True
    except Exception as err:
        # Print the error
        traceback.print_tb(err.__traceback__)
        # If it fails to save, return an exception
        return Exception(
            "Could not save file, try checking the output file path")
def build_track(drum_bass_pair: DrumMelodyPair,
                repeats: int = 4,
                tempo: int = None):
    from mido import MidiFile, MidiTrack, Message
    # для регулировки темпа используем коэффициент растяжения
    k = 1
    # растягивать будем дефолтную длительность шестнадцатой доли в темпе 120 bpm
    time_quant = MIDO_DEFAULT_QUARTER_LENGTH / 4

    # не самый лучший способ регулировки темпа
    # TODO сделать регулировку темпа через метасобытие
    def adjust_tempo():
        if not tempo:
            bpm = drum_bass_pair.tempo
        else:
            bpm = tempo
        bpm_default = 120
        nonlocal k, time_quant
        k = bpm_default / bpm
        time_quant = int((MIDO_DEFAULT_QUARTER_LENGTH / 4) * k)

    # регулируем темп
    adjust_tempo()

    # этап 1 -- записать в миди-файл барабанную партию
    midi_file = MidiFile(type=1)  # создаём midi-файл
    track = MidiTrack()  # создаём барабанный трек в midi-файле
    track.append(Message('program_change', program=9, time=0))
    track.name = "drum track"
    midi_file.tracks.append(track)

    time = 0

    repeat = repeats
    while repeat > 0:
        repeat -= 1
        for i in drum_bass_pair.drum_pattern:
            if not i:
                time += time_quant
                continue

            for j in i:
                track.append(
                    Message('note_on',
                            note=int(j),
                            velocity=127,
                            time=int(time),
                            channel=9))
                time = 0
            time = time_quant

    # этап 2 -- записать в миди-файл мелодию
    track2 = MidiTrack()
    track2.name = "bass track"
    midi_file.tracks.append(track2)
    # метаинформацией изменяем голос инструмента специальным midi-сообщением
    track2.append(
        Message('program_change',
                program=drum_bass_pair.instrument,
                time=0,
                channel=2))

    time = 0
    repeat = repeats
    last_notes = []
    while repeat > 0:
        repeat -= 1
        for i in (drum_bass_pair.melody):
            if not i:
                # все ноты сокращаются в denominator раз
                time += time_quant / drum_bass_pair.denominator
                continue

            for j in last_notes:
                track2.append(
                    Message('note_off',
                            note=int(j),
                            velocity=127,
                            time=int(time),
                            channel=2))
                time = 0
            for j in i:
                track2.append(
                    Message('note_on',
                            note=int(j),
                            velocity=127,
                            time=int(time),
                            channel=2))
                time = 0
            # все ноты сокращаются в denominator раз
            time = time_quant / drum_bass_pair.denominator
            last_notes = i

    for j in last_notes:
        track2.append(
            Message('note_off',
                    note=int(j),
                    velocity=127,
                    time=int(time),
                    channel=2))
        time = 0

    return midi_file