Esempio n. 1
0
			current_chord.append(SCALES[scale]['Notes'][note_in_chord] + NOTES.index(base_note) + BASE_MIDI_NUMBER)

		# Bring notes to order
		for i, _ in enumerate(current_chord[1:]):
			note = current_chord[i+1]
			while note < current_chord[i]:
				note += 12
				current_chord[i+1] = note

		# Use inversion to decrease spread of notes
		if auto_inversions_enabled:
			for i, note in enumerate(current_chord):
				while note > BASE_MIDI_NUMBER + NOTES.index(base_note) + 12:
					note -= 12
					current_chord[i] = note

		chord_progression.append({
				'chord': current_chord,
				'duration': duration
			})

	# Add notes to MIDI file
	current_time = time
	for i, chord in enumerate(chord_progression):	
		for pitch in chord['chord']:
			MyMIDI.addNote(track, channel, pitch, current_time, chord['duration'], volume)
		current_time += chord['duration']

	# Write MIDI file
	with open("{}_{}.mid".format(base_note, scale), "wb") as output_file:
	    MyMIDI.writeFile(output_file)
Esempio n. 2
0
import random

degrees  = [60, 62, 64, 65, 67, 69, 71, 72]  # MIDI note number
degrees1  = [61, 63, 66, 68, 70, 73, 75, 78, 80, 82]  # MIDI note number

track    = 0
channel  = 0
time     = 0    # In beats
duration = 1    # In beats
tempo    = 120   # In BPM
volume   = 127  # 0-127, as per the MIDI standard

note = []
music = []

MyMIDI = MIDIFile(1)  # One track, defaults to format 1 (tempo track is created
                      # automatically)
MyMIDI.addTempo(track, time, tempo)

for i, pitch in enumerate(degrees1):
    note = [track, channel, pitch, time + i, random.choice(degrees1), time + i, duration, volume]
    music.append(note)
   
    #MyMIDI.addNote(track, channel, pitch, time + i, duration, volume)
    MyMIDI.addNote(track, channel, random.choice(degrees1), time + i, duration, volume)


print(music)

with open("C:/Users/magarami/Desktop/major-scale1.mid", "wb") as output_file:
    MyMIDI.writeFile(output_file)
Esempio n. 3
0
tracknum = 0

for track in vpr["tracks"]:
    if (track["type"] == 1):
        offset += 1
        continue
    time = 0
    mf.addTrackName(tracknum, time, track["name"])
    for tempo in TEMPO:
        mf.addTempo(tracknum, tempo["pos"] / 480, tempo["value"] / 100)
    try:
        for part in track["parts"]:
            try:
                for note in part["notes"]:
                    mf.addNote(tracknum, 0, note["number"],
                               (note["pos"] + part["pos"]) / 480,
                               note["duration"] / 480, note["velocity"])
            except KeyError:
                print(
                    "[WARN] part {}, in track {}, does not have \"notes\" key".
                    format(part["name"], track["name"]))
    except KeyError:
        print("[WARN] track {} does not have \"parts\" key".format(
            track["name"]))
    tracknum += 1

with open(path + ".mid", 'wb') as outf:
    mf.writeFile(outf)

ZIP.close()
Esempio n. 4
0
from midiutil import MIDIFile

degrees = range(40, 73)  # MIDI note number
track = 0
channel = 0
time = 0  # In beats
duration = 4  # In beats
tempo = 120  # In BPM
# volume   = 19  # 0-127, as per the MIDI standard

MyMIDI = MIDIFile(1)  # One track, defaults to format 1 (tempo track is created
# automatically)
MyMIDI.addTempo(track, time, tempo)

for j, volume in enumerate(range(20, 21)):
    for i, pitch in enumerate(degrees):
        MyMIDI.addNote(track, channel, pitch,
                       time + 8 * i + j * len(degrees) * 8, duration, volume)

with open("MIDIproba.mid", "wb") as output_file:
    MyMIDI.writeFile(output_file)
Esempio n. 5
0
                length = 0
                hit_time = 0
                isOn = 0
        else:
            if i in frame:
                volume = min(40 + 4 * volumes_per_frame[j][i], 127)
                hit_time = j
                length += 1
                isOn = 1

    whites_in_time.append(temp)

#Create the music!!!
track = 0
channel = 0
time = 0  # In beats
tempo = 1500  # In BPM; tested by hand
volume = 100  # 0-127, as per the MIDI standard

myMIDI = MIDIFile(1)
myMIDI.addTempo(track, time, tempo)

for i in range(52):
    notes = whites_in_time[i]
    for note in notes:
        myMIDI.addNote(track, channel, white_bin_to_degree[i], note[0],
                       note[1], note[2])

with open("dynamic_vol.mid", "wb") as output_file:
    myMIDI.writeFile(output_file)
Esempio n. 6
0
def convert_to_midi(input_dict_list, bpm, filename):
    print("\nConverting predictions to midi...")
    
    midi_mapping = {
        "bass": 36,                     # C2
        "stick": 37,                    # C#2
        "snare": 38,                    # D2
        "floor": 41,                    # F2
        "hihat": 42,                    # F#2
        "hihatp": 44, # pedal           # G#2
        "hihat-open": 46,               # A#2  
        "tom": 47,                      # B2     
        "crash": 49,                    # D#3
        "ride": 51,                     # E#3
        "bell": 53                      # F3
    }

    # volume_mapping = {
    #     "bass": 120,
    #     "stick": 115,
    #     "snare": 105,
    #     "floor": 110,
    #     "tom": 110,
    #     "hihat": 100,
    #     "hihat-open": 100,
    #     "hihatp": 105, # pedal
    #     "crash": 100,
    #     "ride": 100,
    #     "bell": 100,
    #     "rest": 0,
    # }

    volume_mapping = {
        "bass": 127,
        "stick": 127,
        "snare": 127,
        "floor": 127,
        "tom": 127,
        "hihat": 127,
        "hihat-open": 127,
        "hihatp": 127, # pedal
        "crash": 127,
        "ride": 127,
        "bell": 127,
        "rest": 0,
    }


    # degrees  = [60, 62, 64, 65, 67, 69, 71, 72] # MIDI note number
    track    = 0
    channel  = 10
    time     = 0   # In beats
    # duration = 1   # In beats
    duration = 1/4   # In beats
    tempo    = bpm  # In BPM
    volume   = 115 # 0-127, as per the MIDI standard

    MyMIDI = MIDIFile(1) # One track, defaults to format 1 (tempo track
                         # automatically created)
    if bpm==0:
        tempo = 1 # prevent division by zero
    
    MyMIDI.addTempo(track,time, tempo)

    # for pitch in degrees:
    #     MyMIDI.addNote(track, channel, pitch, time, duration, volume)
    #     time = time + 1


    """
        Try to add multiple notes at a time: Loop addNote
        while instruments identified by the name are not exhausted
        
        To use foot hihat, if there are already four instruments, use "hihatp"
    """

    # input_dict_list is a list of dictionaries
    # The dictionaries have keys "1", "e", "&", "a"
    # where the values are class names, eg. "bass tom hihat ride"

    rhythm_start_time_dict = {
        "1": 0,
        "2": 0,
        "3": 0,
        "4": 0,
        # "5": 0,
        "e": 1/16,
        "&": 1/8,
        "a": 3/16
    }

    print("\nInside midi_ops...")
    for quarter_dict in input_dict_list:
        for key, val in quarter_dict.items():
            print("{}: {}".format(key, val))

    # halt()

    for quarter_dict in input_dict_list:
        prev_key = "0 1" # initialization of previous rhythm

        for key, val in quarter_dict.items():

            # Advance the time depending on the rhythm, identified by the key
            # prev_rhy = rhythm_start_time_dict[prev_key]
            # curr_rhy = rhythm_start_time_dict[key]
            # time = time + curr_rhy - prev_rhy

            split_key = key.split()
            # split_prev_key = prev_key.split()

            # curr_time = int(split_key[0]) + rhythm_start_time_dict[split_key[1]]
            # prev_time = int(split_prev_key[0]) \
                # + rhythm_start_time_dict[split_prev_key[1]]
            # time += curr_time - prev_time

            quarter_id = split_key[0] # e.g. "0"
            sixteenth_id = split_key[1] # e.g. "a"

            time = (int(quarter_id) * 0.25 \
                + rhythm_start_time_dict[sixteenth_id]) * 4

            prev_key = key # update prev_key

            has_hihat = False # add the hihat last
            has_hihat_open = False
            has_bass = False
            four_hits_at_a_time = False
            hit_count = 0

            for instrument in val.split():
                volume = volume_mapping[instrument]

                if instrument == "hihat":
                    has_hihat = True
                elif instrument == "hihat-open":
                    has_hihat_open = True
                elif instrument == "rest":
                    hit_count += 1
                else:
                    if instrument == "bass":
                        has_bass = True

                    pitch = midi_mapping[instrument]
                    MyMIDI.addNote(track, channel, pitch, 
                                    time, duration, volume)
                    hit_count += 1
            # End for every instrument hit per rhythm

            # Add hihat last if there's hihat
            if has_hihat_open or has_hihat:
                if hit_count == 3: # If there's a limb available
                    pitch = midi_mapping["hihatp"] # No other hihat option
                    MyMIDI.addNote(track, channel, pitch, 
                                    time, duration, volume)
                elif hit_count == 2: # If two limbs were occupied
                    if has_bass: # but one of that limb is a bass
                        # A hand is available, so option if open is available
                        if has_hihat_open:
                            pitch = midi_mapping["hihat-open"]
                            MyMIDI.addNote(track, channel, pitch, 
                                    time, duration, volume)
                        else: # Closed hihat
                            pitch = midi_mapping["hihat"]
                            MyMIDI.addNote(track, channel, pitch, 
                                    time, duration, volume)
                    else: # If two instruments were hit, and not one of them
                        # is a bass. It means that both hands were already
                        # used. Therefore, we can only use the left foot
                        # to play the hihat.
                        pitch = midi_mapping["hihatp"] # No other hihat option
                        MyMIDI.addNote(track, channel, pitch, 
                                                    time, duration, volume)
                else: # At most one limb will be occupied.
                    # Therefore, at least one limb (required) will be able
                    # to play the hihat.
                    if has_hihat_open:
                        pitch = midi_mapping["hihat-open"]
                        MyMIDI.addNote(track, channel, pitch, 
                                time, duration, volume)
                    else: # Closed hihat
                        pitch = midi_mapping["hihat"]
                        MyMIDI.addNote(track, channel, pitch, 
                                time, duration, volume)

            # End adding hihat
        # End for each rhythm
    # End for all the hits on each rhythm

    with open(filename + ".mid", "wb") as output_file:
        MyMIDI.writeFile(output_file)
Esempio n. 7
0
class MidiHelper:
    __channel = 9
    __drum_dict = {'kick': 36, 'snare': 38, 'hi_close': 42, 'hi_open': 46}
    __len_dict = {'full': 4, 'half': 2, 'quarter': 1, 'eighth': 0.5, 'sixteenth': 0.25}
    __vel_dict = {'ppp': 16, 'pp': 33, 'p': 49, 'mp': 64, 'mf': 80, 'f': 96, 'ff': 112, 'fff': 127}
    __current_time = 0
    __duration_last_note = 0

    def __init__(self):
        self.__midi = MIDIFile(numTracks=1, file_format=1)
        self.__midi.addProgramChange(tracknum=0, channel=self.__channel, program=115, time=0)
        pass

    def load_sample(self):
        pass

    def get_current_time(self):
        return self.__current_time

    def get_duration_last_note(self):
        return self.__duration_last_note

    def get_dict_duration(self):
        return self.__len_dict

    def add_cymbals(self, cymbals_bit, actents):
        if cymbals_bit is not 0:
            full_len = dcp(self.__current_time)
            self.__current_time = 0
            full_len = full_len * cymbals_bit / 4
            if cymbals_bit == 4:
                key = 'quarter'
            elif cymbals_bit == 8:
                key = 'eighth'
            elif cymbals_bit == 16:
                key = 'sixteenth'
            elif cymbals_bit == 2:
                key = 'half'
            for a, note in enumerate(range(int(full_len))):
                if a % cymbals_bit == 0 and actents:
                    self.put_note(name_note='hi_open', rest_time=0, length=key, volume="ff")
                else:
                    self.put_note(name_note='hi_close', rest_time=0, length=key, volume='pp')
        else:
            pass

    def set_tempo(self, tempo):
        print(int(tempo))
        self.__midi.addTempo(time=0, track=0, tempo=int(tempo))

    def put_note(self, name_note, rest_time, length, volume='mf'):
        self.__duration_last_note = self.__len_dict[length]
        self.__midi.addNote(track=0,
                            channel=self.__channel,
                            pitch=self.__drum_dict[name_note],
                            time=self.__current_time + rest_time,
                            duration=self.__len_dict[length],
                            volume=self.__vel_dict[volume])
        self.__current_time = self.__current_time + self.__duration_last_note + rest_time

    def save_midi(self, name):
        with open(name + ".mid", 'wb') as output_file:
            print('save in ' + name)
            self.__midi.writeFile(output_file)
Esempio n. 8
0
duration = 1    # In beats
tempo    = 60   # In BPM
volume   = 100  # 0-127, as per the MIDI standard

myMIDI = MIDIFile(6)
myMIDI.addTempo(CHORD_TRACK, time, tempo)

# chords
print("Generating chords")
myMIDI.addProgramChange(CHORD_TRACK, CHORD_CHANNEL, 0, CHORD_INSTR)
for i, chord in enumerate(cycle(chords)):
    time = i * 2
    if time == END:
        break
    for note in chord:
        myMIDI.addNote(CHORD_TRACK, CHORD_CHANNEL, note, time, 2, volume - 20)

# drums
print("Generating drums")
for i in range(KICK_START, END):
    myMIDI.addNote(DRUM_TRACK, DRUM_CHANNEL, KICK_INSTR, i, 1, volume - 20)

for i in range(SNARE_START, END):
    myMIDI.addNote(DRUM_TRACK, DRUM_CHANNEL, SNARE_INSTR, i+0.5, 1, volume - 20)


# bass
print("Generating bass")
myMIDI.addProgramChange(BASS_TRACK, BASS_CHANNEL, 0, BASS_INSTR)
for i, note in enumerate(cycle(bass)):
    time = BASS_START + i * 2
Esempio n. 9
0
degrees = [60, 62, 64, 65, 67, 69, 71, 72]  # MIDI note number
track = 0
channel = 0
time = 0  # 1 = 1/4
duration = 0.25  # 1 = 1/4
tempo = 60  # In BPM
volume = 100  # 0-127, as per the MIDI standard

midiFile = MIDIFile()

midiFile.addTempo(track, 0, 60)  #track, time, tempo

# for i, pitch in enumerate(degrees):
#     midiFile.addNote(track, channel, pitch, time + i/4, duration, volume)

# with open("C:\\Users\\yves\\Google Drive\\AIT\\X. Other\\Frusciantifier\\Frusciantifier\\major-scale.mid", "wb") as output_file:
#     midiFile.writeFile(output_file)

with open(
        "C:\\Users\\yves\\Google Drive\\AIT\\X. Other\\Frusciantifier\\Frusciantifier\\Song.json",
        "r") as Song:
    notes = json.load(Song)
    for note in notes:
        midiFile.addNote(track, channel, note['Degree'], note['Time'],
                         note['Duration'], volume)

with open(
        "C:\\Users\\yves\\Google Drive\\AIT\\X. Other\\Frusciantifier\\Frusciantifier\\finalmidi.mid",
        "wb") as output_file:
    midiFile.writeFile(output_file)
Esempio n. 10
0
    def handle(self, argv=None):
        """
        Main function.

        Parses command, load settings and dispatches accordingly.

        """
        help_message = "Please supply chord progression!. See --help for more options."
        parser = argparse.ArgumentParser(
            description=
            'chords2midi - Create MIDI files from written chord progressions.\n'
        )
        parser.add_argument('progression',
                            metavar='U',
                            type=str,
                            nargs='*',
                            help=help_message)
        parser.add_argument('-b',
                            '--bpm',
                            type=int,
                            default=160,
                            help='Set the BPM (default 160)')
        parser.add_argument('-t',
                            '--octave',
                            type=int,
                            default=4,
                            help='Set the octave (default 4)')
        parser.add_argument('-i',
                            '--input',
                            type=str,
                            default=None,
                            help='Read from an input file.')
        parser.add_argument('-k',
                            '--key',
                            type=str,
                            default='C',
                            help='Set the key (default C)')
        parser.add_argument('-n',
                            '--notes',
                            type=int,
                            default=99,
                            help='Notes in each chord (default all)')
        parser.add_argument('-d',
                            '--duration',
                            type=float,
                            default=1.0,
                            help='Set the chord duraction (default 1)')
        parser.add_argument(
            '-H',
            '--humanize',
            type=float,
            default=0.0,
            help=
            'Set the amount to "humanize" (strum) a chord, in ticks - try .11 (default 0.0)'
        )
        parser.add_argument(
            '-o',
            '--output',
            type=str,
            help=
            'Set the output file path. Default is the current key and progression in the current location.'
        )
        parser.add_argument(
            '-O',
            '--offset',
            type=float,
            help='Set the amount to offset each chord, in ticks. (default 0.0)'
        )
        parser.add_argument('-v',
                            '--version',
                            action='store_true',
                            default=False,
                            help='Display the current version of chords2midi')

        args = parser.parse_args(argv)
        self.vargs = vars(args)

        if self.vargs['version']:
            version = pkg_resources.require("chords2midi")[0].version
            print(version)
            return

        # Support `c2m I III V and `c2m I,III,V` formats.
        if not self.vargs['input']:
            if len(self.vargs['progression']) < 1:
                print("You need to supply a progression! (ex I V vi IV)")
                return
            if len(self.vargs['progression']) < 2:
                progression = self.vargs['progression'][0].split(',')
            else:
                progression = self.vargs['progression']
        else:
            with open(self.vargs['input']) as fn:
                content = ''.join(fn.readlines()).strip()
                content = content.replace('\n', ' ').replace(',', '  ')
                progression = content.split(' ')

        track = 0
        channel = 0
        ttime = 0
        duration = self.vargs['duration']  # In beats
        tempo = self.vargs['bpm']  # In BPM
        volume = 100  # 0-127, as per the MIDI standard
        bar = 0
        offset = self.vargs['offset']

        midi = MIDIFile(1)
        midi.addTempo(track, ttime, tempo)

        ##
        # Main generator
        ##

        # We do this to allow blank spaces
        progression_chords = []
        for chord in progression:
            progression_chord = to_chords(chord, self.vargs['key'])
            if progression_chord == []:
                progression_chord = [None]
            progression_chords.append(progression_chord[0])

        for chord in progression_chords:
            if chord is not None:
                humanize_amount = self.vargs['humanize']
                for i, note in enumerate(chord):
                    pitch = pychord.utils.note_to_val(note) + (
                        self.vargs['octave'] * 12)
                    midi.addNote(track=track,
                                 channel=channel,
                                 pitch=pitch,
                                 time=offset + bar + humanize_amount,
                                 duration=duration,
                                 volume=volume)

                    humanize_amount = humanize_amount + self.vargs['humanize']
                    if i + 1 >= self.vargs['notes']:
                        break
            bar = bar + 1

        ##
        # Output
        ##

        if self.vargs['output']:
            filename = self.vargs['output']
        elif self.vargs['input']:
            filename = self.vargs['input'].replace('.txt', '.mid')
        else:
            filename = self.vargs['key'] + '-' + '-'.join(
                progression) + '-' + str(self.vargs['bpm']) + '.mid'
            if os.path.exists(filename):
                filename = self.vargs['key'] + '-' + '-'.join(
                    progression) + '-' + str(self.vargs['bpm']) + + '-' + str(
                        int(time.time())) + '.mid'

        with open(filename, "wb") as output_file:
            midi.writeFile(output_file)
Esempio n. 11
0
import sys
from pathlib import PurePath


def getNoteData(note, key):
    return int(note.getElementsByTagName(key)[0].firstChild.data)


path = PurePath(sys.argv[1])

vsqx = xml.dom.minidom.parse(str(path))

TEMPO = int(
    vsqx.getElementsByTagName('tempo')[0].childNodes[1].firstChild.data[:-2])

mf = MIDIFile(len(vsqx.getElementsByTagName('vsTrack')),
              removeDuplicates=False)

time = 0

for trackNo, track in enumerate(vsqx.getElementsByTagName('vsTrack')):
    mf.addTrackName(trackNo, time, "Track {}".format(str(trackNo)))
    for note in track.getElementsByTagName('note'):
        mf.addNote(trackNo, 0, getNoteData(note, 'n'),
                   getNoteData(note, 't') / 480,
                   getNoteData(note, 'dur') / 480, getNoteData(note, 'v'))
    mf.addTempo(trackNo, time, TEMPO)

with open(str(path.parents[0]) + '\\' + path.stem + ".mid", 'wb') as outf:
    mf.writeFile(outf)
Esempio n. 12
0
tempo = 100
volume = 100
MyMIDI = MIDIFile(2)
MyMIDI.addTempo(1, time, tempo)
bVerify = "no"
sVerify = "no"
aVerify = "no"
tVerify = "no"

MyMIDI = MIDIFile(2)
MyMIDI.addTempo(1, time, tempo)

for pitch in bList:
    if time != 0 and bVerify == "no":
        time = 0
    MyMIDI.addNote(1, channel, pitch, time, duration, volume)
    time = time + 1
    bVerify = "go"
for pitch in sList:
    if time != 0 and sVerify == "no":
        time = 0
    MyMIDI.addNote(0, channel, pitch, time, duration, volume)
    time = time + 1
    sVerify = "go"
for pitch in aList:
    if time != 0 and aVerify == "no":
        time = 0
    MyMIDI.addNote(0, channel, pitch, time, duration, volume)
    time = time + 1
    aVerify = "go"
for pitch in tList:
Esempio n. 13
0
track = 0
channel = 0
time = 0  # In beats
duration = 1  # In beats
tempo = 60  # In BPM
volume = 100  # 0-127, as per the MIDI standard

volume_choices = [40, 60, 80, 100]

MyMIDI = MIDIFile(1)  # One track, defaults to format 1 (tempo track
# automatically created)
MyMIDI.addTempo(track, time, tempo)

for note in degrees:
    pitch = note.get("pitch")
    for i in range(1):
        volume = random.choice(volume_choices)
        MyMIDI.addNote(
            track=track,
            channel=channel,
            pitch=pitch + i,
            time=time,
            duration=duration,
            volume=volume,
        )
    time = time + 1

with open("major-scale.mid", "wb") as output_file:
    MyMIDI.writeFile(output_file)
Esempio n. 14
0
from midiutil import MIDIFile

track = 0
channel = 9
time = 0
duration = 1
bpm = 200
velocity = 100

MyMIDI = MIDIFile(2)
MyMIDI.addTempo(track, time, bpm)

MyMIDI.addTimeSignature(track, 0, 7, 2, 24)

MyMIDI.addNote(track, channel, 35, 0, duration, velocity)
MyMIDI.addNote(track, channel, 38, 3 * duration, duration, velocity)
MyMIDI.addNote(track, channel, 38, 5 * duration, duration, velocity)
for i in range(7):
    MyMIDI.addNote(track, channel, 42, (time + i) * duration, duration,
                   velocity)

with open("beat.mid", "wb") as output_file:
    MyMIDI.writeFile(output_file)
Esempio n. 15
0
class Pat2Midi:
    """
    class to convert Pattern to Midi
    """
    def __init__(self,
                 num_tracks: int = 1,
                 remove_duplicates: bool = True,
                 deinterleave: bool = True,
                 file_format: int = 1):
        """

        :param num_tracks: number of tracks (default: 1)
        :param remove_duplicates: remove notes if they start at the same time on the same channel if they have
               the same pitch  (default: True)
        :param deinterleave: clean up two note-ons with no note-off in between (default: True)
        :param file_format: 1 or 2 (default: 1)
        """
        self.midiFile = MIDIFile(numTracks=num_tracks,
                                 removeDuplicates=remove_duplicates,
                                 deinterleave=deinterleave,
                                 adjust_origin=False,
                                 file_format=file_format)
        self.last_set_tempo = [Defaults.tempo for _ in range(16)
                               ]  # set every track to default tempo
        self.set_tempo(Defaults.tempo, 0)
        self.last_set_cc = [[None for _ in range(NO_OF_CONTROLLERS)]
                            for _ in range(NO_OF_TRACKS)]
        self.note2midi = Note2Midi()

    def set_tempo(self, tempo=100, time=0):
        """

        :param tempo: bpm (default: 100)
        :param time: time at which the tempo change should be inserted in the midi stream (default: 0)
        """
        self.midiFile.addTempo(track=0, time=time, tempo=tempo)
        self.last_set_tempo[0] = tempo
        self.last_set_cc = [[None for _ in range(NO_OF_CONTROLLERS)]
                            for _ in range(NO_OF_TRACKS)]

    def add_phrase(self, phrase: Phrase, track=0, channel=0, start_time=0):
        """

        :param phrase: a Phrase containing patterns and animations
        :param track: default: 0
        :param channel: default: 0
        :param start_time: time at which the phrase should be inserted default: 0
        :return: total duration of the inserted phrase
        """
        for event in phrase:
            # set tempo events only if they changed since last time
            # handle note events
            if PP.NOTE in event:
                if event[PP.TEMPO] != self.last_set_tempo[track]:
                    self.midiFile.addTempo(
                        track, start_time + phrase.generated_duration(),
                        event[PP.TEMPO])
                    self.last_set_tempo[track] = event[PP.TEMPO]
                # set notes always
                if isinstance(event[PP.NOTE], Pchord):
                    for n in event[PP.NOTE].notes:
                        try:
                            intnote = int(n)
                        except ValueError:
                            intnote = self.note2midi.lookup(n)
                            if intnote == REST:
                                continue

                        self.midiFile.addNote(
                            track=track,
                            channel=channel,
                            pitch=intnote,
                            time=start_time + phrase.generated_duration() +
                            event[PP.LAG],
                            duration=event[PP.DUR] * event[PP.PLAYEDDUR],
                            volume=int(event[PP.VOL]),
                            annotation=None)

                else:

                    try:
                        intnote = int(event[PP.NOTE])
                    except ValueError:
                        intnote = self.note2midi.lookup(event[PP.NOTE])
                        if intnote == REST:
                            continue

                    self.midiFile.addNote(
                        track=track,
                        channel=channel,
                        pitch=intnote,
                        time=start_time + phrase.generated_duration() +
                        event[PP.LAG],
                        duration=event[PP.DUR] * event[PP.PLAYEDDUR],
                        volume=int(event[PP.VOL]),
                        annotation=None)

                self.handle_control_changes(channel, event, phrase, start_time,
                                            track)

            # handle controller events (only if they changed since last time)
            else:
                self.handle_control_changes(channel, event, phrase, start_time,
                                            track)

        return phrase.generated_duration()

    def handle_control_changes(self, channel, event, phrase, start_time,
                               track):
        """
        iterate over all control changes in the phrase and add them to the midi file
        :param channel: midi channel
        :param event: python dict containing phrase properties
        :param phrase:
        :param start_time: time offset
        :param track: midi track id
        :return:
        """
        for cc in range(NO_OF_OFFICIAL_CONTROLLERS):
            if PP.ctrl_dur_key(cc) in event:
                time = start_time + phrase.generated_ctrl_duration(cc)
                value = event[PP.ctrl_val_key(cc)]
                if value is not None:
                    self.midiFile.addControllerEvent(track=track,
                                                     channel=channel,
                                                     time=time,
                                                     controller_number=cc,
                                                     parameter=value)
        for cc in [MidiControlChanges.PitchWheel]:
            if PP.ctrl_dur_key(cc) in event:
                time = start_time + phrase.generated_ctrl_duration(cc)
                pwvalue = event[PP.ctrl_val_key(cc)]
                if pwvalue is not None:
                    self.midiFile.addPitchWheelEvent(track=track,
                                                     channel=channel,
                                                     time=time,
                                                     pitchWheelValue=pwvalue)

    def add_phrases(self, list_of_phrase, track=0, channel=0, start_time=0):
        """

        :param list_of_phrase: a list of Phrase
        :param track: default: 0
        :param channel: midi channel, deafult: 0
        :param start_time: default: 0
        :return: total duration of piece from begin until end of list of phrases
        """
        time_delta = 0
        for phrase in list_of_phrase:
            duration = self.add_phrase(phrase, track, channel,
                                       start_time + time_delta)
            time_delta += duration
        return start_time + time_delta

    def write(self, filename):
        """
        write to midi file
        :param filename: filename
        """
        try:
            with open(filename, "wb") as f:
                self.midiFile.writeFile(fileHandle=f)
        except Exception as e:
            print("we hit a SNAFU while writing to {0}: {1}".format(
                filename, e))
Esempio n. 16
0
def make_song(treble: list, bass: list, npm: int, song_name: str):
    """
    Main function for writing the song to a quantum computer.

    treble: A list of all treble notes. Each note is the length of the shortest note
    (eighth note in the case of Carol of the Bells)

    bass: A list of all bass notes. Each note is the length of the shortest note
    (eighth note in the case of Carol of the Bells)

    npm: Number of notes per minute (similar to bpm)

    song_name: A string of what the song is called. Used to name the file amonst other
    things.

    Returns:
        Nothing
    """
    # Setup for the MIDI file creation
    track = 0
    channel = 0
    duration = 1
    MyMIDI = MIDIFile(1)
    tempo = npm
    time = 0
    MyMIDI.addTempo(track, time, tempo)

    # Setup for the Video creation
    images = []
    initial_image = cv2.imread("carolbells_prob_0.png")
    height, width, layers = initial_image.shape
    video_name = song_name + "_quantum.avi"
    video = cv2.VideoWriter(video_name, 0, npm / 60, (width, height))

    # Setup for the song itself
    # full_song ends up being a list of concurrent notes (ex. ["AD", "BA", ...])
    full_song = [i + j for i, j in zip(treble, bass)]
    full_song = [x.upper() for x in full_song]
    full_song = [''.join(ch for ch, _ in itertools.groupby(i)) for i in full_song]
    transition("A", full_song[0]) # sets the song up for the beginning.

    # Makes all the files
    for count, i in enumerate(full_song):
        if count != len(full_song) - 1:
            # writes the actual circuit
            transition(full_song[count], full_song[count + 1])
            circuit.measure(qreg_q[0], creg_c[0])
            circuit.measure(qreg_q[2], creg_c[2])
            circuit.measure(qreg_q[1], creg_c[1])

            # runs the job on a QC simulator and stores data
            job = simulator.run(circuit, shots=1000)
            result = job.result()
            counts = result.get_counts(circuit)

            # Ensures that all qubits are represented (even if they have no amplitude)
            for x in qubit_list:
                if x not in counts:
                    counts[x] = 0
            # Keeps the qubits in the correct order (|000>, |001>, etc.)
            ordered_counts = collections.OrderedDict(sorted(counts.items()))

            # Plots the probabilities found from the simulated job
            plt.figure(figsize=(5, 2))
            plt.grid(axis='y', linestyle='--')
            plt.bar(ordered_counts.keys(), ordered_counts.values(), width=.25)
            plt.ylim(top=1100)
            plt.title(song_name)
            plt.savefig(song_name + "_prob_" + str(count))
            plt.clf()

            #Plots the circuit itself
            drawing = circuit.draw(output="mpl")
            drawing.savefig(song_name + "_circuit_" + str(count))
            plt.clf()
            plt.close('all')

            # Gets rid of the measurements for the next loop
            circuit.data.pop()
            circuit.data.pop()
            circuit.data.pop()
            # Makes SURE that there are no measurements hanging around
            for item in circuit.data:
                for thing in item:
                    if type(thing)==qiskit.circuit.measure.Measure:
                        circuit.data.remove(item)

            # More setup for the video compilation
            frame = cv2.imread(song_name + "_prob_" + str(count) + ".png")
            images.append(frame)

            # Writes individual notes to the MIDI file
            for i in counts:
                if int(counts[i])!=0:
                    volume = 100
                else:
                    volume = 0
                pitch = midi_dict[note_dict[i]]
                MyMIDI.addNote(track, channel, pitch, time, duration, volume)
            time+=1

    # Finishes writing the MIDI file
    with open(song_name+".mid", "wb") as output_file:
        MyMIDI.writeFile(output_file)

    # Finishes writing the Video
    for image in images:
        video.write(image)
    cv2.destroyAllWindows()
    video.release()
Esempio n. 17
0
                                        recordNumber) + "_" + str(
                                            recordMidiID) + "_" + noteName
                                    midiFileData.addTrackName(
                                        trackToUse, 0, trackName)
                                    trackCounter += 1

                                    if (trackCounter >= trackLimit):
                                        debugPrint("Resetting track counter")
                                        trackCounter = 0

                                    debugPrint("trackToUse {} {}".format(
                                        trackToUse, trackName))

                            midiFileData.addNote(trackToUse, midiChl,
                                                 midiEvent.note,
                                                 midiEvent.time - baseTime,
                                                 midiEvent.duration,
                                                 midiEvent.velocity)
                            debugPrint(midiEvent.__dict__)
                            midiEvent.trackUsed = trackToUse
                            lastMIDIEvent = midiEvent
                            midiSection.bHasMIDI = True

                        if (extendedBytes > 0):
                            debugPrint('Found extended bytes {} '.format(
                                hex(extendedBytes)))

                    else:  # Did not find expected 0x8x before note duration data
                        quitWithError('ERROR: Unknown command {} ({})'.format(
                            midiCmd, hex(midiCmd)))
                elif ((midiCmd >= 0x00 and midiCmd <= 0x0A) or midiCmd
Esempio n. 18
0
#!/usr/bin/env python

from midiutil import MIDIFile

# degrees  = [60, 62, 64, 65, 67, 69, 71, 72]  # MIDI note number
degrees = [65] * 3600
track = 0
channel = 0
time = 0  # In beats
duration = 1  # In beats
tempo = 60  # In BPM
volume = 100  # 0-127, as per the MIDI standard

MyMIDI = MIDIFile(
    1,
    file_format=0)  # One track, defaults to format 1 (tempo track is created
# automatically)
MyMIDI.addTempo(track, time, tempo)

for i, pitch in enumerate(degrees):
    MyMIDI.addNote(track, channel, pitch, time + i, duration, volume)

with open("test.mid", "wb") as output_file:
    MyMIDI.writeFile(output_file)
Esempio n. 19
0

def otv_str(x):
    return {
        0: '',
        1: '1',
        2: '2',
        3: '3',
        4: '4',
        5: '5',
        6: '6',
        7: '7',
        8: '8',
        9: '9',
        10: '10'
    }.get(x // 12, 11)  #default


def switch_pth(x):
    return pth_str(x) + otv_str(x)


for i in range(Song_len):
    #print(mySong_drt[i])
    MyMIDI.addNote(track, channel, mySong_pth[i] + pitch_min, time,
                   drt_dic[mySong_drt[i]] * 0.004, vel_dic[mySong_vel[i]])
    time = time + drt_dic[mySong_drt[i]] * 0.004
#    print(drt_dic[mySong_drt[i]])
with open("BrunoMars_11.mid", "wb") as output_file:
    MyMIDI.writeFile(output_file)
Esempio n. 20
0
def makemidi(img, midi, lo=74, hi=110):
    if int(lo) > 0:
        lo=0
    if int(hi) < 127:
        hi=127

    filename = img
    if filename.endswith('.png'):
        a= cv2.imread(filename, cv2.IMREAD_UNCHANGED)

        trans=a[:,:,3]==0
        a[trans]= [255,255,255,255]

        a=cv2.cvtColor(a, cv2.COLOR_BGRA2BGR)
    else:
        a= cv2.imread(filename, cv2.IMREAD_UNCHANGED)


    h, w = a.shape[:2]

    scale=w/h

    w=int(127*scale)

    dim= (w, 127)
    res= cv2.resize( a, dim)

    row=[]
    col=[]
    vel=[]
    for i in range(res.shape[1]):
        for j in range(res.shape[0]):
            r=res[j][i]
            
            row.append(int(pick(r)))
        col.append(int(valmap(row[random.randint(0, len(row))], 0, 255, 36, 72)))
        vel.append(int(valmap(row[random.randint(0, len(row))], 0, 255, lo, hi)))
    notes = []

    music_scale=get_octave(major, 0, 127)
    for x in col:
        n=snap(music_scale, x)
        notes.append(n)

    from midiutil import  MIDIFile

    MyMIDI = MIDIFile(1)

    t=random.randint(120, 180)
    MyMIDI.addTempo(0, 0, t*scale)


    for i, (note, velo) in enumerate(zip(notes, vel)):
        try:
            MyMIDI.addNote(0, 0, note, i/2, random.randint(1,2), velo)
        except :
            MyMIDI.addNote(0, 0, note, i/2, 1, velo)


    with open(midi, "wb") as output_file:
        MyMIDI.writeFile(output_file)
    exit("write {} successful".format(midi))
Esempio n. 21
0
  duration = int(duration[1:])

  if note == 0:
    if duration > 2000:
      duration = 2000
    currTime += duration
    continue
  
  duration += 150
  
  #print(duration)

  vsqxJson['stream'].append({u'velocity': 64, u'tick': 1 , u'sub_type': u'noteOn', u'channel': 1, u'note_num': note})
  vsqxJson['stream'].append({u'velocity': 0, u'tick': duration+1, u'sub_type': u'noteOff', u'channel': 1, u'note_num': note, u'lyrics': 'み'})

  mf.addNote(trackNo, 0, note, currTime / 480, duration / 480, 64)
  
  currTime += duration


with open(outputDir+"out.mid", 'wb') as outf:
	mf.writeFile(outf)
 
# We write the vsqx file
vsqxData = json2vsqx.json2vsqx(vsqxJson)
f = open(outputDir +'output.vsqx', 'wb')
f.write(vsqxData.toprettyxml('', '', 'utf-8'))
f.close()

"""From here, we can generate the lyrics to our song"""
Esempio n. 22
0
def jpg_to_midi(img_name, grid_name, result_name):

    # First we have to convert everything to string from symbol (pd datastructure)
    img_name = str(img_name)
    grid_name = str(grid_name)
    result_name = str(result_name)

    img = Image.open(img_name)
    grid_width, grid_height = get_grid_dimensions(grid_name)
    unit_width, unit_height = get_unit_dimensions(img.width, img.height, grid_width, grid_height)

    pixels = img.load()

    result_MIDI = MIDIFile(numTracks=grid_height, adjust_origin=False)
    result_MIDI.addTempo(0, 0, 60)

    # Now need to work along rows of grid and average region of image appropriately
    with open(grid_name) as grid:
        colours = []

        # For each row in the grid...
        for y_index, line in enumerate(grid):

            # Get starting and ending y coords
            starting_y = y_index * unit_height
            ending_y = (y_index + 1) * unit_height

            x_count = 0
            row_colours = []
            regions = line.split(",")

            # r = each number in a row in the grid
            for r in regions:
                starting_beat = x_count

                starting_x = x_count * unit_width
                x_count += int(r)                
                ending_x = x_count * unit_width
                # row_colours.append(get_average_colour(pixels, (float(starting_x), float(starting_y)), (float(ending_x), float(ending_y))))

                average_colour = get_average_colour(pixels, (float(starting_x), float(starting_y)), (float(ending_x), float(ending_y)))
                pov = hsv2pov(rgb2hsv(average_colour))

                #print pov
                #print get_midi_pitch(pov)
                #print "Beat start " + str(starting_beat)
                #print "Length " + str(r)

                # y_index = line number, which corresponds to track number of note
                # notes all placed on channel 0
                # midi_pitch worked out by passing average colour (pov tuple) to function
                # starting beat is the x_count at the start of this inner loop iteration; this is the sum of all previous numbers on the line
                # int(r) is the duration of the note in beats; given by the length of the cell in the grid
                # volume of note is given by velocity from pov tuple
                result_MIDI.addNote(y_index, 0, get_midi_pitch(pov), starting_beat, int(r), pov["velocity"])

    #povs = []

    #for c in colours:
        #hs = map(rgb2hsv, c)
        #ps = map(hsv2pov, hs)
        #povs.append(ps)

    with open(result_name, "wb") as output_file:
        result_MIDI.writeFile(output_file)
Esempio n. 23
0
pitch = 20

for col in np.transpose(matY):
    startTime = 0.0
    endTime = 0.0
    pitch += 1
    status = False
    for idx, i in enumerate(col):
        if i > 0.5 and status == False:
            startTime = idx * frame_width / sample_frequence
            status = True
        if i < 0.1 and status == True:
            endTime = idx * frame_width / sample_frequence
            status = False
            if endTime - startTime > 0.02:
                MyMIDI.addNote(track, channel, pitch, startTime,
                               endTime - startTime, volume)
                file.write("%f, %f, %d\n" % (startTime, endTime, pitch))
    if status:
        if (len(col) * frame_width -
                0.5) / sample_frequence - startTime > 0.02:
            MyMIDI.addNote(track, channel, pitch, startTime,
                           (len(col) * frame_width - 0.5) / sample_frequence -
                           startTime, volume)
            file.write(
                "%f, %f, %d\n" %
                (startTime,
                 (len(col) * frame_width - 0.5) / sample_frequence, pitch))
file.close()
with open(output_dir + '.mid', 'wb') as output_file:
    # with open("sample.mid", "wb") as output_file:
    MyMIDI.writeFile(output_file)