def is_direct_fifth_or_octave(voice1, voice2, k): i = _interval(voice1[k], voice2[k]) if i == 7 or i == 0: d1 = notesToChromatic(voice1[k - 1], voice1[k]).direction d2 = notesToChromatic(voice2[k - 1], voice2[k]).direction return (d1 == d2) and (d1 != 0)
def makeSegment(segment_form): """Return a Segment object from a dictionary with specific data about the segment, including metadata.""" seg = Segment() initial = segment_form['initial'] final = segment_form['final'] songObj = segment_form['songObj'] score = songObj.getExcerpt(initial, final) seg.filename = songObj.filename # metadata seg.collection = songObj.collection seg.title = songObj.title seg.composers = songObj.composers seg.composersStr = songObj.composersStr # music if not segment_form['save']: seg.score = score seg.time_signature = songObj.time_signature seg.meter = songObj.meter seg.pickup = score.pickup seg.measuresNumber = len(score.getElementsByClass(music21.stream.Measure)) seg.totalLength = sum([n.duration.quarterLength for n in score.flat.notesAndRests]) # analysis contourObj = Contour(score) seg.ambitus = score.analyze("ambitus").chromatic.directed seg.contour = contourObj # FIXME: the reduction_morris method in wrong in Music21. Using local Sampaio prime form seg.contour_prime = contour.sampaio(contourObj.reduction_morris()[0]) seg.contour_size = len(contourObj) notes = note.song_notes(score) seg.notes = [note.make_note(n) for n in notes] _size = len(notes) seg.intervals = note.intervals_without_direction(notes) seg.intervals_with_direction = note.intervals_with_direction(notes) seg.intervals_with_direction_semitones = note.intervals_with_direction_semitones(notes) seg.first_interval = notesToChromatic(notes[0], notes[1]).directed seg.last_interval = notesToChromatic(notes[_size - 2], notes[_size - 1]).directed seg.typeof = segment_form['typeof'] seg.number = segment_form['number'] seg.initial_event = initial seg.final_event = final seg.part_number = segment_form['part_number'] seg.period_number = segment_form['period_number'] seg.segment_number = segment_form['segment_number'] return seg
def relative_sequences(data_path): for (number, filename) in enumerate(os.listdir(data_path), start=1): LOG.debug("Working on piece {number}".format(**locals())) piece = parse(os.path.join(data_path, filename)) LOG.debug("Finding relative sequences in piece {number}: {filename}.".format(**locals())) instrument = lambda x: x.getInstrument().instrumentName.lower() guitar_parts = (e for e in piece if isinstance (e, Part) and \ ('guitar' in instrument(e) or \ 'gtr' in instrument(e)) and not \ 'bass' in instrument(e)) for (number, part) in enumerate(guitar_parts): LOG.debug('Part number {number}'.format(**locals())) current_sequence = None last_note = None for measure in (elem for elem in part if isinstance(elem, Measure)): for element in measure: # can be note, rest, chord, timesig,... if isinstance(element, Note): if not last_note: current_sequence = [(0,0,0)] else: interval = notesToChromatic(last_note, element) entry = current_sequence[-1][1:3] + (interval.semitones,) current_sequence.append(entry) last_note = element elif isinstance(element, Rest) and element.quarterLength < 4: pass # ignore short rests elif current_sequence: yield current_sequence current_sequence = None last_note = None
def mixed_sequences(data_path, output_path): for (number, filename) in enumerate(os.listdir(data_path), start=1): try: full_path = os.path.join(data_path, filename) LOG.debug("Finding mixed sequences in piece {number}: {filename}.".format(**locals())) piece = parse(full_path) temp_abspath = os.path.join(output_path, TEMP_MIDI_NAME) piece.write('midi', temp_abspath) chord_per_measure = temperley.chord_per_measure(piece, temp_abspath) keys = temperley.key_sequence(temp_abspath) if len(set(keys)) > 1: continue # more than one key in piece is complicated... else: key_string = convertKeyStringToMusic21KeyString(keys[0].replace('b','-')) key = Key() key_pitch = key.getPitches()[0] instrument = lambda x: x.getInstrument().instrumentName.lower() guitar_parts = (e for e in piece if isinstance (e, Part) and \ ('guitar' in instrument(e) or \ 'gtr' in instrument(e)) and not \ 'bass' in instrument(e)) for part in guitar_parts: current_sequence = None last_note = None measures = [elem for elem in part if isinstance(elem, Measure)] if len(chord_per_measure) != len(measures): continue for chord, measure in zip(chord_per_measure, measures): chord_pitch = music21.pitch.Pitch(convertKeyStringToMusic21KeyString(chord)) for element in measure: if isinstance(element, Note): if not last_note: current_sequence = [(0, 0, notesToChromatic(key_pitch, element).semitones, key.mode, notesToChromatic(key_pitch, chord_pitch).semitones)] else: interval = notesToChromatic(last_note, element) entry = (current_sequence[-1][1], interval.semitones, notesToChromatic(key_pitch, element).semitones, key.mode, notesToChromatic(key_pitch, chord_pitch).semitones) current_sequence.append(entry) last_note = element elif isinstance(element, Rest) and element.quarterLength < 4: pass elif current_sequence: yield current_sequence current_sequence = None last_note = None except Exception: LOG.warning('Encountered exception in {filename}'.format(**locals()))
def make_song(score, filename): def make_note(n): return Note(n.name, pitchToBase40(n), n.duration.quarterLength) try: score = score.expandRepeats() except music21.repeat.ExpanderException: pass notes = song_notes(score) measures = score.parts[0].getElementsByClass("Measure") song = Song() song.filename = os.path.basename(filename).decode('utf-8') song.full_filename = filename song.bookname = os.path.split(os.path.dirname(filename))[-1].decode('utf-8') try: song.title = score.metadata.title.decode('utf-8') except AttributeError: song.title = None song.notes = [make_note(n) for n in notes] song.measures = [[make_note(n) for n in song_notes(measure)] for measure in measures] song.size = len(measures) song.durations_uniq = sorted(set(song.durations), reverse=True) song.has_pickup = True if measures[0].number == 0 else False song.time_signature = str(measures[0].timeSignature) song.ambitus = score.analyze("ambitus").chromatic.directed _key = score.analyze("key") song.tonic = str(_key.tonic) song.mode = _key.mode _size = len(notes) song.first_interval = notesToChromatic(notes[0], notes[1]).directed song.last_interval = notesToChromatic(notes[_size - 2], notes[_size - 1]).directed song.intervals = intervals_without_direction(notes) song.intervals_with_direction = intervals_with_direction(notes) song.contour = Contour(notes) return song
def _measure_melodies(measure): r""" Return a list of all the melody events (as strings) in a supplied measure. #. `measure`: a music21 `Stream` directly containing notes/rests/... """ entries = [] for element in measure: if isinstance(element, Note): if entries: step = notesToChromatic(last_note, element).semitones prev_entry = entries[-1] entries.append((prev_entry[1], prev_entry[2], step)) else: entries.append((0, 0, 0)) # first note means no interval last_note = element return ["BEGINNING_OF_SEQUENCE"] + entries + ["END_OF_SEQUENCE"]
def _measure_melodies_mixed(measure, key, chord): r""" Return a list of all the melody events (as strings) in a supplied measure. #. `measure`: a music21 `Stream` directly containing notes/rests/... """ entries = [] if key == "minor": key_pitch = music21.pitch.Pitch("a") elif key == "major": key_pitch = music21.pitch.Pitch("c") key_obj = music21.key.Key(key_pitch) if chord[-1] == "m": chord = chord[:-1] chord_pitch = music21.pitch.Pitch(chord) for element in measure: if isinstance(element, Note): if entries: prev_entry = entries[-1] interval = notesToChromatic(last_note, element) entry = ( prev_entry[1], notesToChromatic(last_note, element).semitones, notesToChromatic(key_pitch, element).semitones, key, notesToChromatic(key_pitch, chord_pitch).semitones, ) entries.append(entry) else: entries = [ ( 0, 0, notesToChromatic(key_pitch, element).semitones, key, notesToChromatic(key_pitch, chord_pitch).semitones, ) ] last_note = element return ["BEGINNING_OF_SEQUENCE"] + entries + ["END_OF_SEQUENCE"]
def intervalTooBig(self, midiRef, omrRef, setint=5): if interval.notesToChromatic(midiRef, omrRef).intervalClass > setint: return True return False
def mixed_sequences(data_path, output_path): for (number, filename) in enumerate(os.listdir(data_path), start=1): try: full_path = os.path.join(data_path, filename) LOG.debug("Finding mixed sequences in piece {number}: {filename}.". format(**locals())) piece = parse(full_path) temp_abspath = os.path.join(output_path, TEMP_MIDI_NAME) piece.write('midi', temp_abspath) chord_per_measure = temperley.chord_per_measure( piece, temp_abspath) keys = temperley.key_sequence(temp_abspath) if len(set(keys)) > 1: continue # more than one key in piece is complicated... else: key_string = convertKeyStringToMusic21KeyString( keys[0].replace('b', '-')) key = Key() key_pitch = key.getPitches()[0] instrument = lambda x: x.getInstrument().instrumentName.lower() guitar_parts = (e for e in piece if isinstance (e, Part) and \ ('guitar' in instrument(e) or \ 'gtr' in instrument(e)) and not \ 'bass' in instrument(e)) for part in guitar_parts: current_sequence = None last_note = None measures = [elem for elem in part if isinstance(elem, Measure)] if len(chord_per_measure) != len(measures): continue for chord, measure in zip(chord_per_measure, measures): chord_pitch = music21.pitch.Pitch( convertKeyStringToMusic21KeyString(chord)) for element in measure: if isinstance(element, Note): if not last_note: current_sequence = [ (0, 0, notesToChromatic( key_pitch, element).semitones, key.mode, notesToChromatic(key_pitch, chord_pitch).semitones) ] else: interval = notesToChromatic(last_note, element) entry = (current_sequence[-1][1], interval.semitones, notesToChromatic(key_pitch, element).semitones, key.mode, notesToChromatic( key_pitch, chord_pitch).semitones) current_sequence.append(entry) last_note = element elif isinstance(element, Rest) and element.quarterLength < 4: pass elif current_sequence: yield current_sequence current_sequence = None last_note = None except Exception: LOG.warning( 'Encountered exception in {filename}'.format(**locals()))
def get_interval_between_parts_at_offset(self, part_number_1, part_number_2, offset): note_1 = self.get_note_at_offset(part_number_1, offset) note_2 = self.get_note_at_offset(part_number_2, offset) return notesToChromatic(note_1, note_2)
def _interval(a, b): return abs(notesToChromatic(a, b).semitones) % 12
def intervals_midi(notes): size = len(notes) pos = zip(range(size-1), range(1, size)) return [notesToChromatic(notes[x], notes[y]).semitones for x, y in pos]
from music21 import key from music21 import interval bci = corpus.chorales.Iterator(2, 371, numberingSystem = 'riemenschneider', numberList = [1,2,3,4,6,190,371], returnType = 'stream') chordProgressions = [] intervalSets = [] for chorale in bci: reduction = chorale.chordify() analyzedKey = chorale.analyze('key') chords = [] intervals = [] for c in reduction.flat.getElementsByClass('Chord'): c.closedPosition(forceOctave=4, inPlace = True) chordPitches = c.pitches chordSize = len(chordPitches) mainInterval = interval.notesToChromatic(chordPitches[0], chordPitches[chordSize-1]) rn = roman.romanNumeralFromChord(c, analyzedKey) chords.append(rn) intervals.append(mainInterval) chordProgressions.append(chords) intervalSets.append(intervals) for ch in chordProgressions: print ch print "//" for i in intervalSets: print i print "//"