Ejemplo n.º 1
0
def testIsEmptyTrack():
    pat1 = midi.read_midifile('tests/trk15empty.mid')
    assert(isEmptyTrack(15,pat1) == True)
    pat2 =  midi.read_midifile('tests/empty.mid')
    assert(isEmptyTrack(0,pat2) == True)
    pat3 = midi.read_midifile('tests/notEmpty.mid')
    assert(isEmptyTrack(1,pat3)== False)
Ejemplo n.º 2
0
def id_pattern_itter(db):
    for record in db["records"]:
        record = db["records"][record]
        yield (record["id"], midi.read_midifile(StringIO.StringIO(record["data"])))
        for bar in record["bars"]:
            yield(bar["id"],midi.read_midifile(StringIO.StringIO(bar["data"])))
        for track in record["bars"]:
            yield(track["id"],midi.read_midifile(StringIO.StringIO(track["data"])))
        for bar_track in record["bars_tracks"]:
            yield(bar_track["id"],midi.read_midifile(StringIO.StringIO(bar_track["data"])))
Ejemplo n.º 3
0
 def test_mary(self): 
     midi.write_midifile("mary.mid", mary_test.MARY_MIDI)
     pattern1 = midi.read_midifile("mary.mid")
     midi.write_midifile("mary.mid", pattern1)
     pattern2 = midi.read_midifile("mary.mid")
     self.assertEqual(len(pattern1), len(pattern2))
     for track_idx in range(len(pattern1)):
         self.assertEqual(len(pattern1[track_idx]), len(pattern2[track_idx]))
         for event_idx in range(len(pattern1[track_idx])):
             event1 = pattern1[track_idx][event_idx]
             event2 = pattern1[track_idx][event_idx]
             self.assertEqual(event1.tick, event2.tick)
             self.assertEqual(event1.data, event2.data)
Ejemplo n.º 4
0
def main():
    usage = "%prog <seq-id> <midi-file> [<midi-file> ...]"
    parser = OptionParser(usage=usage)
    parser.add_option("--names", dest="names", action="store", help="read in a NAMES file from the midi auto-collection, intead of reading midi files from the command line")
    options, arguments = parser.parse_args()
    
    if options.names is not None:
        print "Reading names from %s" % options.names
        csv = UnicodeCsvReader(open(options.names))
        filenames = []
        dirname = os.path.dirname(options.names)
        
        csv.next()
        for row in csv:
            filename = os.path.join(dirname,row[0])
            if not os.path.exists(filename):
                continue
            seq = ChordSequence.objects.get(id=int(row[2]))
            filenames.append((seq,filename))
    else:
        if len(arguments) < 2:
            print >>sys.stderr, "Specify a sequence id and one or more midi files"
            sys.exit(1)
        
        seq_id = int(arguments[0])
        seq = ChordSequence.objects.get(id=seq_id)
        filenames = [(seq,fn) for fn in arguments[1:]]
        
    if len(filename) == 0:
        print sys.stderr, "No input files"
        sys.exit()
    
    files = []
    for seq,filename in filenames:
        print "Reading %s" % filename
        f = open(filename, 'r')
        data = f.read()
        # Try reading in the midi data to check it's ok
        read_midifile(StringIO(data))
        files.append((seq, os.path.basename(filename), ContentFile(data)))
    
    for seq, filename,f in files:
        print "Storing %s" % filename
        # Create a new midi data record in the database
        midi = MidiData()
        midi.sequence = seq
        midi.save()
        # Use the original filename
        midi.midi_file.save(filename, f)
        midi.save()
Ejemplo n.º 5
0
def main():
    #pattern1 = midi.read_midifile("midi/5RAP_04.MID")
    bars = []
    pattern = midi.read_midifile("midi/5RAP_04.MID")
    #pattern = midi.read_midifile("midi/decoy.mid")
    #pattern = midi.read_midifile("midi/drum_patterns.mid")

    #print_events(pattern, [])
    pattern = sanitize(pattern)

    midi.write_midifile("test.mid", pattern)
    pattern = midi.read_midifile("test.mid")
    print pattern
    return
Ejemplo n.º 6
0
	def testcreateTune(self):
		# --- tests if MIDI files are successfully converted to a Tune object ---
		# import midi file: C major scale with all quarter notes (refer to TestComputePitches)
		# Use Python MIDI library https://github.com/vishnubob/python-midi
		# MIDI files are an array of integers with a header
		TuneMIDI = midi.read_midifile("../tests/MIDITestFiles/c-major-scale-treble.mid")
		# ---- Fail Tune Parameter Constraints ---
		self.assertFalse(Tune("wrongFileType.txt"), Clef.TREBLE, "", [""])
		#  timeSignature has to be (int, int) where int > 0
		self.assertFalse(Tune(TuneMIDI), (-1, 0), Clef.BASS, "Title", ["Contributor"])
		self.assertFalse(Tune(TuneMIDI, (2.5, 3), Clef.BASS, "Title", ["Contributor"]))
		tune = Tune(TuneMIDI, (3,4), Clef.TREBLE, "Title", ["Contributor"])
		# --- test Tune setters and getters ---
		# If bad input, leave field unchanged
		tune.setTimeSignature((4,4))
		self.assertEqual(tune.getTimeSignature(), (4,4))
		tune.setTimeSignature((-1, 0))
		self.assertEqual(tune.getTimeSignature(), (4,4))
		tune.setTitle("new title")
		self.assertEqual(tune.getTitle(), "new title")
		tune.setTitle("this is toooooooooooooooooooooooooooooooooooooooooooo long title")
		self.assertEqual(tune.getTitle(), "new title")
		tune.setContributors(["person1, person2, person3"])
		self.assertEqual(tune.getContributors(), ["person1, person2, person3"])
		tune.setContributors(["this is tooooooooooooooooooooooooooooooooo long contributor name"])
		self.assertEqual(tune.getContributors(), ["person1, person2, person3"])

		frequencies = [261.63, 293.66, 329.63]
		# check frequencies and onsets calculated correctly from generateTune
		for i in xrange(0, 3):
			self.assertEqual(tune[i].getFrequency(), frequencies[i])
			self.assertEqual(tune[i].getOnset(), i)
			self.assertTrue(samerest.noteEqual(samerestNote))
Ejemplo n.º 7
0
    def load_midi_file(self, file_location):
        midi_file = midi.read_midifile(file_location)
        midi_file.reverse()
        midi_file.make_ticks_abs()

        resolution = midi_file.resolution
        song_info_track = midi_file.pop()

        has_tempo_event = False
        mpqn = 0  # milliseconds per quarter note

        for event in song_info_track:

            if type(event) is midi.SetTempoEvent:
                has_tempo_event = True
                mpqn = (event.data[0] << 16) | (event.data[1] << 8) | event.data[2]
                #  print "mpqn is&: ", mpqn

        if mpqn == 0:
            mpqn = 1
        current_tempo_in_beats_per_minute = 120

        if has_tempo_event:
            current_tempo_in_beats_per_minute = 60000000.0/mpqn

        self.bpm = current_tempo_in_beats_per_minute

        self.ms_per_tick = resolution * (current_tempo_in_beats_per_minute / 60.0)
        self.ms_per_tick = 1000.0/self.ms_per_tick
        self.song_buffer = SongBuffer(time_per_tick=self.ms_per_tick)
        # print self.ms_per_tick

        for track in midi_file:
            self.song_buffer.add_track(track)
Ejemplo n.º 8
0
def get_tracknames():
    filenames = []
    count = 0
    for root, subFolders, files in os.walk('.'):
        count +=1
        if count == 9:
            break
        files = [os.path.join(root, filename) for filename in files if filename.lower().endswith(".mid")]
        filenames += files

    filenames = filenames[:100]
    tracknames = []
    for filename in filenames:
        print "processing", filename
        try:
            pattern = midi.read_midifile(filename)
            current_tracknames = midiutil.get_track_names(pattern)
            current_tracknames =  [trackname.rstrip().lstrip().lower().encode('ascii', 'ignore') for trackname in current_tracknames]
            #current_tracknames = [trackname for trackname in tracknames if trackname != ""]
            tracknames += current_tracknames 
        except:
            print "    bad file", filename
    tracknames = [' '.join(ch for ch in t if ch.isalnum()) for t in tracknames if t != '']
    print tracknames
    return tracknames
Ejemplo n.º 9
0
def Main():
	#these values should match what is in SongData
	#and are here just for reference
	#REST = 128
	#HOLD = 129
	#NULL = 130
	#END = 131
	#BEATS_PER_BAR = 32
	#extract features
	absolute_pos = 0
	folder = os.path.join('downloads','midiworld')
	pitches = set()
	songs = []
	observedstates = set()
	featurevectors = []	
	import csv	
	with open('features.csv','wb') as out:
		try:
			print('finding valid songs')
			csv_out=csv.writer(out)
			valid_filenames = []		
			for filename in os.listdir(folder):
				out_of_range = False
				song_path = os.path.join(folder, filename)
				pattern = midi.read_midifile(song_path)
				song = SongData(pattern, filename)
				for action in song.eventset:
					if (action > 84 and action < 128) or action < 48:
						out_of_range = True
					if song.startstate.pitch == 131:
						out_of_range = True
				if not out_of_range:
					valid_filenames.append(song.filename)
			print(len(valid_filenames))
			pdump(valid_filenames, 'valid_filenames.pkl',noPrefix=True)
Ejemplo n.º 10
0
def process_file(midi_file):
    """
    Read a midi file, creates data structures suitable writing, and finally
    writes the TXT output.
    """
    inpath, infullname = os.path.split(midi_file)
    inname, inext = os.path.splitext(infullname)
    outputfile = os.path.join(OUTPUT_DIR, inname + ".txt")

    if not inext.lower() in (".mid", ".midi"):
        print "Skipping " + midi_file
        return

    print "Processing " + midi_file

    mf = midi.read_midifile(midi_file)
    ppq = mf.resolution  # ticks per quarter note

    # process the tracks according to the file format
    file_data = FILE_FORMAT_MAP[mf.format](mf)

    # get the tick period
    # FIXME: this assumes fixed tempo throughout the file.
    seconds_per_tick = 60.0 / file_data['params'].bpm / ppq

    if USE_TIME_IN_SECONDS:
        for event in file_data['on_events']:
            event[0] = seconds_per_tick * event[0]
        for event in file_data['off_events']:
            event[0] = seconds_per_tick * event[0]

    write_output_file(outputfile, file_data)
Ejemplo n.º 11
0
 def load(self, filename):
     '''Load MIDI file'''
     pattern = midi.read_midifile(filename)        
     self.resolution = pattern.resolution
     pattern.make_ticks_abs()
     events = []
     for track in pattern:
         for event in track:
             events.append(event)
     events.sort()
     cues = []
     for e in events:
         if type(e) == midi.events.TextMetaEvent:
             if e.text.startswith('cue'):
                 cues.append(e.tick)
     self.cues = cues
     self.events = events
     self.eventnum = 0
     self.tempo = 120.0
     self.time = 0
     self._playing = False
     self.softReset()
     while self.eventnum < len(self.events) and self.events[self.eventnum].tick < 2:
             event = self.events[self.eventnum]
             self.eventnum += 1
             self.do_event(event)
Ejemplo n.º 12
0
def openOptions(): #Options window UI
    top=Toplevel()
    Label(top, text="Chain Length: ").grid(row=0)
    Label(top, text="Instrument: ").grid(row=1)
    Label(top, text="Song Length: ").grid(row=2)
    Label(top, text="Tempo: ").grid(row=3)
    Label(top, text="Input Track: ").grid(row=4)
    lengths=range(2,10) #possible chain lengths
    c.d.chainLengthPicker=apply(OptionMenu,(top,c.d.chainLength)+tuple(lengths))
    c.d.chainLengthPicker.grid(row=0,column=1)
    instruments=c.d.instruments.keys()
    c.d.instrumentPicker=OptionMenu(top,c.d.instrumentName,*instruments,\
    command=setInstrument)
    c.d.instrumentPicker.grid(row=1,column=1)
    c.d.songLengthPicker=Entry(top,textvariable=c.d.songLength,\
    validatecommand=validateLength)
    c.d.songLengthPicker.grid(row=2,column=1)
    c.d.tempoPicker=Entry(top,textvariable=c.d.tempo,\
    validatecommand=validateTempo)
    c.d.tempoPicker.grid(row=3,column=1)
    tracks=[1] #default track
    if(c.d.inputFile.get() !=''):
        pat=midi.read_midifile(c.d.inputFile.get())
        tracks=range(len(pat))
    c.d.trackPicker=apply(OptionMenu,(top,c.d.track)+tuple(tracks))
    c.d.trackPicker.grid(row=4,column=1)
Ejemplo n.º 13
0
def parseMidi(filename):
	#Reads midi events from file
	stream = midi.read_midifile(filename)

	songName=""
	trackName=""
	tempo=75
	#Get info for track 0 (Song name and master tempo)
	for event in stream.get_track_by_number(0):
		if (event.name=="Track Name"):
			songName=event.data
			tempo=stream.get_tempo().tempo
			print "Song Name: "+songName+", tempo: "+str(tempo)

	#Get info for other tracks
	tracklist=sorted(stream.tracknames.items(), key=lambda trk: trk[1])

	num=0
	if not(len(tracklist)==0):
		#If there are more than one track in the file, you have to choose one
		for track in tracklist:
			if not(track[1]==0): print str(track[1])+") "+track[0]

		#Select a track to analize
		ok=0
		while not(ok):
			select=raw_input("Please insert track number you want to analyze [1.."+str(tracklist[-1][1])+"]: ")
			try:
				num=int(select)
				if (num>=1 and num<=tracklist[-1][1]):
					ok=1
				else:
					print "Number out of range"
			except ValueError:
				print "Invalid input"

	try:
		trackName=tracklist[num][0]
	except IndexError:
		print "Failed to get track name"

	if (songName==""): songName=filename.name.split(".")[0]
	if (trackName==""): trackName=songName

	print "scanning track "+trackName+"..."
	noteList=[]
	for event in stream.get_track_by_number(num):
		if (event.name=="Note On"):
			noteList.append(event)
			if (VERBOSE):
				print event
		if (event.name=="Note Off"):
			#Convert note off event in note on with velocity=0
			event.name="Note On"
			event.velocity=0
			noteList.append(event)
			if (VERBOSE):
				print event

	return (noteList, songName, trackName, tempo)
def get_notes():
    # downloaded from http://www.piano-midi.de/muss.htm
    filename = 'muss_1.mid'
    m = midi.read_midifile(filename)
    m.make_ticks_abs()
    tick = 120.0
    notes = np.array([(n.pitch, int(round(n.tick / tick)))
                      for n in m[1]
                      if type(n) == midi.events.NoteOnEvent
                      and n.velocity > 0
                      and n.pitch > 0])

    note_map = collections.defaultdict(list)
    max_pitch = 0
    min_pitch = 127
    for pitch, t in notes:
        note_map[t].append(pitch)
        if pitch > max_pitch:
            max_pitch = pitch
        elif pitch < min_pitch:
            min_pitch = pitch

    max_time = max(note_map.keys())

    output_units = max_pitch - min_pitch + 1
    output = np.zeros((max_time, output_units))
    for t in range(max_time):
        if t in note_map:
            for i in note_map[t]:
                output[t, i - min_pitch] = 1

    return output, min_pitch
Ejemplo n.º 15
0
def to_int_array(file_location):
    pattern = midi.read_midifile(file_location)

    return [
        event.data[0] for track in pattern for event in track
        if isinstance(event, midi.NoteOnEvent) and event.data[1] > 0
    ]
Ejemplo n.º 16
0
def create_record(filename, include_data = False):
    print "Creating record", filename
    record = {}
    record["id"] = generate_id(open(filename, "rb"))
    record["filename"] = filename

    try:
        pattern = midi.read_midifile(filename)
    except:
        print "Error"
        return None
    
    if include_data:
        record["data"] = midiutil.midi_to_data(pattern)
    record["time_signature"] = midiutil.get_time_signature(pattern)
    record["tempo"] = midiutil.get_tempo(pattern)
    record["track_names"] = midiutil.get_track_names(pattern)
    record["resolution"] = pattern.resolution
    record["note_distribution"] = midiutil.get_note_distribution(pattern,[9])
    record["key"] = midiutil.guess_key(pattern, record["note_distribution"])
    record["scale"] = midiutil.guess_scale(record["key"], record["note_distribution"])
    

    #for track in pattern:
    #    print midiutil.get_track_name(track)
    #    dist = midiutil.get_note_distribution(track, [], False)
    #    plt.bar(range(len(dist)), dist.values(), align="center")
    #    plt.xticks(range(len(dist)), dist.keys())
    #    plt.show()

    # dist = record["note_distribution"]
    

    return record
Ejemplo n.º 17
0
def main():
    usage = "%prog [options] <in-file>"
    description = "Print out stats about the notes in a MIDI file"
    parser = OptionParser(usage=usage, description=description)
    parser.add_option('-k', '--key-profile', dest="key_profile", action="store", type="int", help="output a graph of the key profile for the given key as a gnuplot script")
    options, arguments = parse_args_with_config(parser)
    
    if len(arguments) == 0:
        print "No input MIDI file given"
        sys.exit(1)
    filename = arguments[0]
    
    # Load the midi file
    midi = read_midifile(filename)
    print "Midi file type %d" % midi.format
    print "Resolution: %d" % midi.resolution
    print "%d notes" % len(note_ons(midi))
    # Get rid of drums
    midi = simplify(midi, remove_drums=True)
    notes = note_ons(midi)
    print "%d non-drum notes" % len(notes)
    # Analyse the note content
    pcs = dict([(i,0) for i in range(12)])
    for note in notes:
        pcs[note.pitch % 12] += 1
    
    note_names = dict([
        (0, "C"), (1, "C#"), (2, "D"), (3, "D#"), (4, "E"), (5, "F"), 
        (6, "F#"), (7, "G"), (8, "G#"), (9, "A"), (10, "A#"), (11, "B") ])
    # Print the notes
    for pc, count in reversed(sorted(pcs.items(), key=lambda x:x[1])):
        print "%s: %d" % (note_names[pc], count)
    
    if options.key_profile is not None:
        kp_output_file = "key_profile"
        pc_names = ["1", "#1/b2", "2", "#2/b3", "3", "4", "#4/b5", "5", 
            "#5/b6", "6", "#6/b7", "7"]
        # Output the pitch counts
        key = options.key_profile
        # Get the pc frequencies
        pc_freq = [float(pcs[(key+p)%12])/sum(pcs.values()) for p in range(12)]
        # Output them to a CSV
        data = "\n".join("%d\t%s\t%f" % (i,name,freq) for (name,freq,i) in zip(pc_names,pc_freq,range(12)))
        with open("%s.csv" % kp_output_file, 'w') as f:
            f.write(data)
        # Output the Gnuplot script
        gnuplot = """\
set style data lines
set nokey
set xrange [-1:13]
set terminal pdf monochrome
set output "key_profile.pdf"
set xlabel "Pitch class"
plot "key_profile.csv" using 1:3:xticlabel(2)
"""
        with open("%s.p" % kp_output_file, 'w') as f:
            f.write(gnuplot)
        # Run Gnuplot
        call(["gnuplot", "%s.p" % kp_output_file])
        print "Gnuplot plot output to %s.p and %s.pdf" % (kp_output_file,kp_output_file)
Ejemplo n.º 18
0
def readSongDoubleNote():
    pattern = midi.read_midifile(sys.argv[2])
    notes = ["C","C#","D","D#","E","F","F#","G","G#","A","A#","B"]
    tab = []
    i=1
    end = len(pattern)
    off = 0
    while i<end:
        j=0
        fin = len(pattern[i])
        while j < fin:
            if type(pattern[i][j]) == midi.events.NoteOnEvent:
                num = pattern[i][j].data[0]
                octave = int(floor(num/12))
                #print type(octave)
                note = notes[num - octave*12]
                if pattern[i][j].tick+off != 0 or len(tab) == 0:
                    tab.append([octave,note,pattern[i][j].tick+off])
                else :
                    tab[-1].append([octave,note,0])
            off = 0
            if type(pattern[i][j]) == midi.events.NoteOffEvent:
                off = pattern[i][j].tick
            j = j+1
        i = i+1
    return tab
Ejemplo n.º 19
0
def read_midi_file(midi_path, channel=9, quantisation_level=1/16):
    midi_file = midi.read_midifile(midi_path)

    # Find the drum tracks
    drum_tracks = set()
    for index, track in midi_file.tracklist.iteritems():
        for event in track:
            if isinstance(event, midi.NoteEvent) and event.channel == 9:
                drum_tracks.add(index)
                break

    # tick beat^-1 = (tick quart^-1 * quart note^-1) / note beat^-1
    tpb = 4 * midi_file.resolution *  quantisation_level
    beats = {}
    for track_index in drum_tracks:
        midi_track = midi_file.tracklist[track_index]
        for event in midi_track:
            if isinstance(event, midi.NoteOnEvent) and event.channel == channel:
                beat_index = int(round(event.tick / tpb))
                if beat_index not in beats:
                    beats[beat_index] = set()
                beats[beat_index].add(event.pitch)
    if beats:
        track = [frozenset() for _ in xrange(max(beats) + 1)]
        for beat_index, beat in beats.iteritems():
            track[beat_index] = frozenset(beat)
    return tuple(track)
Ejemplo n.º 20
0
def computeOnset(myfile):
	pattern = midi.read_midifile(myfile)
	NotesList = []
	index = 0
	bpm = 0
	resolution = pattern.resolution
	duration = 0;

	pattern.make_ticks_abs() # changes ticks to cumulative values
	print pattern

	for track in pattern:
		for event in track:
			if isinstance(event, midi.SetTempoEvent):
				bpm = event.bpm
			if isinstance(event, midi.NoteEvent):
				if (isinstance(event, midi.NoteOffEvent) or (isinstance(event, midi.NoteOnEvent) and event.data[1] == 0)):
					endset = ticksToTime(event.tick, bpm, resolution)
					currNote = NotesList[index]
					duration = endset - currNote.onset
					currNote.duration = secondsToDuration(duration)
					index += 1
				else:
					onset = ticksToTime(event.tick, bpm, resolution)
					newNote = Note(onset = onset)
					NotesList.append(newNote)
	
	return NotesList		
def time_series_list_builder(filename_io_mid):
	'''
	INPUT: STR : The name of MIDI file 'bwv733_io.mid' extracted out of Ableton into typically 3 separate
	 			MIDI files 'bwv733_t1.mid', 'bwv733_t2.mid', 'bwv733_t3.mid' 
				and merged with merger_t1s('bwv733.mid') 
	
	OUTPUT: time_series LIST [(time INT, pitch INT, duration INT), ...]: time-sorted
	
	
	'''
	tracks = midi.read_midifile(filename_io_mid)
	
	note_on = [0 for i in tracks]
	note_off = [0 for i in tracks]
	time_series_list = []
	
	for i, track in enumerate(tracks):
		note_on[i], note_off[i] = extract_melody(track)
		if len(note_on[i]) != len(note_off[i]):
			print '''len(note_on)={} and len(note_off)={} @track={}'''\
			.format(len(note_on[i]), len(note_off[i]), i)
		
		out = time_series_builder(note_on[i], note_off[i])
		if len (out) > 0:
			time_series_list.append(out)
	
	return time_series_list
Ejemplo n.º 22
0
def notes_from_file(filename: str) -> List[Note]:
    midifile_rel = midi.read_midifile(filename)
    midifile_abs = copy.deepcopy(midifile_rel)
    midifile_abs.make_ticks_abs()

    # Convert MIDI events to our music representation: a list of Note objects
    notes = []
    active_notes = {}

    for ev_rel, ev_abs in zip(midifile_rel[-1], midifile_abs[-1]):
        if isinstance(ev_rel, midi.NoteOnEvent) and ev_rel.data[1]:
            n = Note()
            n.resolution = midifile_rel.resolution
            n.tick_abs = ev_abs.tick
            n.pitch = ev_rel.data[0]
            n.velocity = ev_rel.data[1]
            if n.pitch not in active_notes:
                active_notes[n.pitch] = {n}
            else:
                active_notes[n.pitch].add(n)
        elif isinstance(ev_rel, midi.NoteOffEvent) or (isinstance(ev_rel, midi.NoteOnEvent) and ev_rel.data[1] == 0):
            n = active_notes[ev_rel.data[0]].pop()
            n.duration = ev_abs.tick - n.tick_abs
            notes.append(n)
    assert not any(active_notes.values()), "Some notes were not released"
    return sorted(notes, key=lambda note: note.tick_abs)
Ejemplo n.º 23
0
	def parseMidiFile(self, filename):
		#self.output = [] #numpy.zeros(5)
		pattern = midi.read_midifile(filename)
		self.ppq = pattern.resolution
		for track in pattern:
			#print track
			self.tracks.append([]) #add a new empty track
			for event in track:
				#print repr(event)

				if type(event) is midi.events.SetTempoEvent:
					self.bpm = event.get_bpm() #change tempo

				if type(event) is midi.events.NoteOnEvent:
					self.generateAudio(event.tick, event.channel)
					note = event.pitch
					self.noteActive[note] = True
				elif type(event) is midi.events.NoteOffEvent:
					self.generateAudio(event.tick, event.channel)
					note = event.pitch
					self.noteActive[note] = False
		self.tracks.pop() #removes the extra added track (the first track is just metadata)

		track_len = min([len(track) for track in self.tracks])
		self.tracks = [track[0:track_len] for track in self.tracks] #trim all tracks to the exact same size
		self.output[0] = numpy.sum(self.tracks, axis=0) #combine tracks
		return self.output
Ejemplo n.º 24
0
def midi_matrix(filename, min_pitch=36, max_pitch=120):
    pattern = midi.read_midifile(filename)

    resolution = pattern.resolution
    tempo = 120 # arbitrary default bpm

    max_tick = max([sum(map(lambda e: e.tick, track)) for track in pattern])

    ## max_pitch should be there as last one ==> +1
    matrix = np.zeros((1 + max_pitch - min_pitch, max_tick))
    for track in pattern:
        last_event_when = 0 # in ticks
        currently_played_notes = set()
        for event in track:
            if isinstance(event, midi.SetTempoEvent):
                tempo = event.get_bpm()
                continue

            if isinstance(event, midi.NoteOnEvent) or isinstance(event, midi.NoteOffEvent):
                # what happened in-between
                for note in currently_played_notes:
                    for tick in range(last_event_when, last_event_when + event.tick):
                        matrix[note - min_pitch][tick] = 1

                last_event_when += event.tick

                # the case of a noteOffEvent
                if event.get_velocity() == 0:
                    currently_played_notes.discard(event.get_pitch())
                else:
                    # the case of a noteOnEvent
                    currently_played_notes.add(event.get_pitch())

    return matrix
Ejemplo n.º 25
0
def test(ifile):
    ticks_so_far = 0
    pattern = midi.read_midifile(ifile)
    metadata = pattern[0]
    melody = pattern[1]
    piano = pattern[2]
    bassline = pattern[3]
    key_code = -1
    beats_per_measure = -1
    ticks_per_measure = -1  

    # extract time and key signature
    for el in metadata:
        if type(el) is midi.KeySignatureEvent: 
            key_code = el.data[0]
        elif type(el) is midi.TimeSignatureEvent:
            beats_per_measure = el.data[0]
            ticks_per_measure = pattern.resolution * beats_per_measure
    if key_code == -1 or beats_per_measure == -1:
        sys.exit("No time or key signature: " + input_file)

    key = get_key(key_code)

    chord_progression = get_chord_progression(bassline, piano, ticks_per_measure, key)
    # for c in chord_progression:
    #     print c


    # for n in notes: 
    #     print n.pitch
    measures = get_measures(melody, ticks_per_measure)
Ejemplo n.º 26
0
def midiToNoteStateMatrix(midifile, lowerbound=21, upperbound=109):
    # code from: https://github.com/hexahedria/biaxial-rnn-music-composition/

    pattern = midi.read_midifile(midifile)

    timeleft = [track[0].tick for track in pattern]

    posns = [0 for track in pattern]

    statematrix = []
    span = upperbound - lowerbound
    time = 0

    state = [[0,0] for x in range(span)]
    statematrix.append(state)
    while True:
        if time % (pattern.resolution / 4) == (pattern.resolution / 8):
            # Crossed a note boundary. Create a new state, defaulting to holding notes
            oldstate = state
            state = [[oldstate[x][0],0] for x in range(span)]
            statematrix.append(state)

        for i in range(len(timeleft)):
            while timeleft[i] == 0:
                track = pattern[i]
                pos = posns[i]

                evt = track[pos]
                if isinstance(evt, midi.NoteEvent):
                    if (evt.pitch < lowerbound) or (evt.pitch >= upperbound):
                        pass
                        # print "Note {} at time {} out of bounds (ignoring)".format(evt.pitch, time)
                    else:
                        if isinstance(evt, midi.NoteOffEvent) or evt.velocity == 0:
                            state[evt.pitch - lowerbound] = [0, 0]
                        else:
                            state[evt.pitch - lowerbound] = [1, 1]
                elif isinstance(evt, midi.TimeSignatureEvent):
                    if evt.numerator not in (2, 4):
                        # We don't want to worry about non-4 time signatures. Bail early!
                        #print("Found non even time signature event. Bailing early!".format(evt))
                        return statematrix


                try:
                    timeleft[i] = track[pos + 1].tick
                    posns[i] += 1
                except IndexError:
                    timeleft[i] = None

            if timeleft[i] is not None:
                timeleft[i] -= 1

        if all(t is None for t in timeleft):
            break

        time += 1

    return statematrix
Ejemplo n.º 27
0
def note_lists_for_file(filename, absolute=False):
    retval = []
    pattern = midi.read_midifile(filename)
    for track in pattern:
        l = midi_to_note_list(track, absolute)
        if len(l):
            retval.append(l)
    return retval
Ejemplo n.º 28
0
 def __init__(self, file_path):
     self.pattern = midi.read_midifile(file_path)
     # store pitch and tick into tuple in vector
     self.data_vector = [(self.pattern[1][i].pitch, self.pattern[1][i].tick)
                          for i in range(len(self.pattern[1])-1)]
     self.backup_vector = self.data_vector
     self.is_notes_repr = False
     self.is_midi_repr = True
Ejemplo n.º 29
0
def generate(chainLength,length,filename,outputPath,tracknum,instrument,tempo):
    markovSource = 'notes.txt'
    pattern = midi.read_midifile(filename)
    track = pattern[tracknum]
    outputNotes(track,markovSource)
    noteList = generateMusic(markovSource,length,chainLength).split()
    generateMidi(noteList,outputPath,instrument,tempo)
    print 'Song Generated'
def tranverse_all_folders(folder_trans):
    j = 0
    for path in os.listdir(folder_trans):
        pattern = midi.read_midifile(folder_trans + slash + path)
        print folder_trans + slash + path
        # Instantiate a MIDI Track (contains a list of MIDI events)
        track = midi.Track()
        # Append the track to the pattern
        pat.append(track)
        # Goes through extracted song and reconstruct them (pattern[1])
        '''
        tr = 1
        start_val = 1
        i = 1
        '''
        # World is MIne sample window
        
        tr = 0
        start_val = 14
        i = 14
        #print pattern
        '''
        # Suteki Da Ne sample window
        tr = 1
        start_val = 1
        i = 1
        '''
        while True:
            #print i
            #if i > len(pattern[tr]) - 2:
            if i > len(pattern[tr]) - 2:
                break
            #print pattern[tr][i]
            tick = pattern[tr][i].tick
            pitch = pattern[tr][i].data[0]

            # Because some pattern[][].data does not have a second array element
            if len(pattern[tr][i].data) == 2:
                velocity = pattern[tr][i].data[1]
            else:
                velocity = 0
            # Place all of tick, pitch, and velocity values in indiviudal vectors
            tick = np.array([tick])
            pitch = np.array([pitch])
            velocity = np.array([velocity])
            if i == start_val:
                tick_ar = tick
                pitch_ar = pitch
                velocity_ar = velocity
            else:
                tick_ar = np.concatenate((tick_ar, tick))
                pitch_ar = np.concatenate((pitch_ar, pitch))
                velocity_ar = np.concatenate((velocity_ar, tick))
            # To reconstruct the entire song in its (piano-like) original form
            #track.append(midi.NoteOnEvent(tick= tick, channel=1, data=[pitch, velocity]))
            i = i + 1
        j = j + 1
    return pattern, tick_ar, velocity_ar, pitch_ar
def midi2notes(midifile, scale):

	# these dict of list of dicts that is:
	# dict for each channel, list for each event, dict for {pitch; rest, duration}
	note_ons = dict()
	rests = dict()
	tracks = dict()

	instruments = dict()
	meter = []

	pattern = midi.read_midifile(midifile)

	remaining_time = [track[0].ticks for track in pattern]
	position_in_track = [0 for track in pattern]
	
	while not (all(t is None for t in remaining_time)):
		
		# new sixteenth step
		if currTime % (pattern.resolution / 4) == (pattern.resolution / 8):

			for track_note_ons in note_ons:
				
				# Add rest if no held notes found
				if len(note_ons[track_note_ons]) == 0:
					note_ons[track_note_ons].append({"pitch":-1, "rest":True, "len":1})

				# Prolong everything by 1 sixteenth
				for notes in note_ons[track_note_ons]:
					notes["len"] += 1
				


		for i in range(len(remaining_time)):

			while remaining_time[i] == 0:

				track = pattern[i]
				pos = position_in_track[i]

				event = track[pos]

				if isinstance(event, midi.ProgramChangeEvent):
					if i not in tracks:
						# tracks hold pitch, rest, duration, scale for each note
						tracks[i] = []
						note_ons[i] = []
						instruments[i] = instrument_classes.get_instrument_class(event.data[0])
				
				if isinstance(event, midi.TimeSignatureEvent):
					meter.append(event.nominator + "/" + event.denominator)

					for i in tracks:
						tracks[i].append(Meter_Change.TRUE)
				
				if isinstance(event, midi.NoteEvent):

					if isinstance(event, midi.NoteOffEvent) or event.velocity == 0:
						for note in note_ons[i]:
							if note["pitch"] == event.pitch:
								new_note = Note(pitch=note["pitch"], rest=False, duration=note["len"], scale=scale)
								tracks[i].append(new_note)
								note_ons[i].remove(note)

								if note in note_ons[i]:
									print("DEBUG: Je hlaseno nebezpeci mili agenti 007")
					else:
						#TODO: detect ongoing notes for new polyphonies

						# Delete rests
						for rest in note_ons[i]:
							if rest["rest"]:
								new_note = Note(pitch=-1, rest=True, duration=rest["len"], scale=scale)
								tracks[i].append(new_note)
								note_ons[i].remove(rest)

								if rest in note_ons[i]:
									print("DEBUG: Je hlaseno nebezpeci mili agenti 007")

						note_ons[i].append({"pitch":event.pitch, "rest":False, "len":0})
				try:
                    remainingTime[i] = track[pos + 1].tick
                    positionInTrack[i] += 1
                    
                # a bit of a bad practice here, but it's not the main time consuming part of the program
                except IndexError:
                    remainingTime[i] = None
Ejemplo n.º 32
0
    def __init__(self, cfg, ui):
        # This bit is quite tedious.  I apologise.
        self.segmentation = []
        for _ in range(18):
            self.segmentation.append(np.zeros((7, 7), dtype=np.uint8))

        self.segmentation[1][:, :] = 1
        
        self.segmentation[2][:, :] = 2
        self.segmentation[2][0:3, :] = 1
        self.segmentation[2][3, 0:3] = 1

        self.segmentation[3][:, :] = 1
        self.segmentation[3][2, 2:] = 2
        self.segmentation[3][3:5, :] = 2
        self.segmentation[3][4, 5:] = 3
        self.segmentation[3][5:, :] = 3

        self.segmentation[4][:, :] = 1
        self.segmentation[4][:4, 4:] = 2
        self.segmentation[4][4:, 3:] = 3
        self.segmentation[4][3:, :3] = 4

        self.segmentation[5][:, :] = 5
        self.segmentation[5][:3, :3] = 1
        self.segmentation[5][:3, 4:] = 3
        self.segmentation[5][4:, :3] = 2
        self.segmentation[5][4:, 4:] = 4

        self.segmentation[6][:, :] = 1
        self.segmentation[6][:4, 2:4] = 2
        self.segmentation[6][:4, 4:6] = 3
        self.segmentation[6][4:, :3] = 4
        self.segmentation[6][4:, 3:6] = 5

        self.segmentation[7][0, :] = 1
        self.segmentation[7][1, :] = 2
        self.segmentation[7][2, :] = 3
        self.segmentation[7][3, :] = 4
        self.segmentation[7][4, :] = 5
        self.segmentation[7][5, :] = 6
        self.segmentation[7][6, :] = 7

        self.segmentation[8][:, :] = 7
        self.segmentation[8][:, 6] = 8
        self.segmentation[8][:3, :2] = 1
        self.segmentation[8][:3, 2:4] = 2
        self.segmentation[8][:3, 4:6] = 3
        self.segmentation[8][3:6, :2] = 4
        self.segmentation[8][3:6, 2:4] = 5
        self.segmentation[8][3:6, 4:6] = 6

        self.segmentation[9][:, :] = 9
        self.segmentation[9][:2, :2] = 1
        self.segmentation[9][:2, 2:4] = 2
        self.segmentation[9][:2, 4:] = 3
        self.segmentation[9][2:4, :2] = 4
        self.segmentation[9][2:4, 2:4] = 5
        self.segmentation[9][2:4, 4:] = 6
        self.segmentation[9][4:6, :3] = 7
        self.segmentation[9][4:6, 3:] = 8

        self.segmentation[10][:, :] = self.segmentation[9]
        self.segmentation[10][6, 3:] = 10

        self.segmentation[11][:, :] = self.segmentation[9]
        self.segmentation[11][4:6, 5:] = 9
        self.segmentation[11][6, :3] = 10
        self.segmentation[11][6, 3:] = 11

        self.segmentation[12][:, :] = self.segmentation[11]
        self.segmentation[12][:4, 6] = 12

        self.segmentation[13][:, :] = 13
        self.segmentation[13][:3, 0] = 1
        self.segmentation[13][:3, 1] = 2
        self.segmentation[13][:3, 2] = 3
        self.segmentation[13][:3, 3] = 4
        self.segmentation[13][:3, 4] = 5
        self.segmentation[13][:3, 5] = 6
        self.segmentation[13][3:6, 0] = 7
        self.segmentation[13][3:6, 1] = 8
        self.segmentation[13][3:6, 2] = 9
        self.segmentation[13][3:6, 3] = 10
        self.segmentation[13][3:6, 4] = 11
        self.segmentation[13][3:6, 5] = 12

        self.segmentation[14][:, :] = self.segmentation[13]
        self.segmentation[14][6, :] = 14

        self.segmentation[15][:, :] = self.segmentation[14]
        self.segmentation[15][6, 4:] = 15
        self.segmentation[15][4:, 6] = 15

        self.segmentation[16][:, :] = self.segmentation[15]
        self.segmentation[16][6, 3] = 15
        self.segmentation[16][4:, 6] = 16

        # Urgh, that was awful.


        logger.info("Starting Gridi")
        self.gen = self.generator()
        self.midi_p = midi.read_midifile(cfg)
        self.midi_res = self.midi_p.resolution
        self.midi_p.make_ticks_abs()
        self.events = self.midi_p[np.argmax([len(t) for t in self.midi_p])]
        self.events = []
        for t in self.midi_p:
            for event in t:
                self.events.append(event)
        self.events.sort(key=lambda x: x.tick)
        self.t_delay = 60 / 120 / self.midi_res
        self.state = np.zeros((7, 7, 3), dtype=np.uint8)

        self.midi_client = 128
        self.midi_port = 0
        self.hw = midi.sequencer.SequencerHardware()
        self.seq = midi.sequencer.SequencerWrite(
                sequencer_resolution=self.midi_res)
        self.seq.subscribe_port(self.midi_client, self.midi_port)
        self.seq.start_sequencer()

        self.channels = []
        for event in self.events:
            if isinstance(event, midi.NoteOnEvent):
                if event.channel not in self.channels:
                    self.channels.append(event.channel)
        self.channels.sort()

        # Now we know how many channels we have.  We choose the appropriate
        # self.segmentations, then for each channel make a mask array.
        # This part is a little messy becuase the channels may not be
        # sequential, or all exist. e.g. we could just have 1 and 12.
        self.pole_assignments = []
        for idx in range(len(self.channels)):
            self.pole_assignments.append(
                self.segmentation[len(self.channels)] == idx + 1)
Ejemplo n.º 33
0
def midiToNoteStateMatrix(midifile, squash=True, span=span):
    pattern = midi.read_midifile(midifile)

    timeleft = [track[0].tick for track in pattern]

    posns = [0 for track in pattern]

    statematrix = []
    time = 0

    state = [[0,0] for x in range(span)]
    statematrix.append(state)
    condition = True
    while condition:
        if time % (pattern.resolution / 4) == (pattern.resolution / 8):
            # Crossed a note boundary. Create a new state, defaulting to holding notes
            oldstate = state
            state = [[oldstate[x][0],0] for x in range(span)]
            statematrix.append(state)
        for i in range(len(timeleft)): #For each track
            if not condition:
                break
            while timeleft[i] == 0:
                track = pattern[i]
                pos = posns[i]

                evt = track[pos]
                if isinstance(evt, midi.NoteEvent):
                    if (evt.pitch < lowerBound) or (evt.pitch >= upperBound):
                        pass
                        # print "Note {} at time {} out of bounds (ignoring)".format(evt.pitch, time)
                    else:
                        if isinstance(evt, midi.NoteOffEvent) or evt.velocity == 0:
                            state[evt.pitch-lowerBound] = [0, 0]
                        else:
                            state[evt.pitch-lowerBound] = [1, 1]
                elif isinstance(evt, midi.TimeSignatureEvent):
                    if evt.numerator not in (2, 4):
                        # We don't want to worry about non-4 time signatures. Bail early!
                        # print "Found time signature event {}. Bailing!".format(evt)
                        out =  statematrix
                        condition = False
                        break
                try:
                    timeleft[i] = track[pos + 1].tick
                    posns[i] += 1
                except IndexError:
                    timeleft[i] = None

            if timeleft[i] is not None:
                timeleft[i] -= 1

        if all(t is None for t in timeleft):
            break

        time += 1

    S = np.array(statematrix)
    statematrix = np.hstack((S[:, :, 0], S[:, :, 1]))
    statematrix = np.asarray(statematrix).tolist()
    return statematrix
Ejemplo n.º 34
0
def generate_events(midifile):
    '''Retruns a touple of midi resolution and raw list of midi events
        for the input midi file'''
    midi_dump = midi.read_midifile(midifile)
    return midi_dump.resolution, midi_dump[0]
Ejemplo n.º 35
0
def generate_embeddings_from_midi(midifile):
    '''Returns a list of all embedding lists for the input path to a
        format 0 midi file. Other midi formats not supported'''
    midi_dump = midi.read_midifile(midifile)

    # only format 0 supported
    if len(midi_dump) != 1:
        raise RuntimeError("Only format 0 midi files are supported.")

    midi_track = midi_dump[0]
    ppqn = midi_dump.resolution
    tempo = 500000
    ticks = int(0)
    # embeddigns for the midi
    # format is a one hot encoded vector of notes
    # last element is the duraiton of the note in milliseconds
    embeddings = []

    # bookkeeping for which notes are being played currently
    note_state = []
    for event_idx in range(len(midi_track)):
        curr_event = midi_track[event_idx]
        delta_ticks = curr_event.tick
        event_name = curr_event.name
        event_data = curr_event.data
        ticks += delta_ticks
        if event_name is midi.NoteOnEvent.name and delta_ticks == 0:
            note_state.append(event_data[0])

        elif event_name is midi.NoteOnEvent.name and delta_ticks != 0:
            embeddings.append(embed_note(note_state, delta_ticks, tempo, ppqn))
            note_state.append(event_data[0])

        elif event_name is midi.NoteOffEvent.name and delta_ticks == 0:
            if event_data[0] in note_state:
                note_state.remove(event_data[0])

        elif event_name is midi.NoteOffEvent.name and delta_ticks != 0:
            embeddings.append(embed_note(note_state, delta_ticks, tempo, ppqn))
            if event_data[0] in note_state:
                note_state.remove(event_data[0])

        elif event_name is midi.SetTempoEvent.name and delta_ticks == 0:
            # mid-song tempo change
            # tempo is represented in microseconds per beat as tt tt tt - 24-bit (3-byte) hex
            # convert first to binary string and then to a decimal number (microsec/beat)
            tempo_binary = (format(curr_event.data[0], '08b') +
                            format(curr_event.data[1], '08b') +
                            format(curr_event.data[2], '08b'))
            tempo = int(tempo_binary, 2)

        elif event_name is midi.SetTempoEvent.name and delta_ticks != 0:
            embeddings.append(embed_note(note_state, delta_ticks, tempo, ppqn))
            tempo_binary = (format(curr_event.data[0], '08b') +
                            format(curr_event.data[1], '08b') +
                            format(curr_event.data[2], '08b'))
            tempo = int(tempo_binary, 2)

        elif event_name is midi.EndOfTrackEvent.name:
            break
        elif event_name is midi.TimeSignatureEvent.name:
            # lets not worry about non-4 time signatures
            if curr_event.numerator not in (2, 4):
                break
        else:
            continue
    return embeddings