Beispiel #1
0
print(f"Generated {len(prediction_output)} notes")

save_folder = os.path.join(os.getcwd(), 'generated music')

midi = music21.stream.Stream()

offset = 0
for notes_result, duration_result, offset_result in prediction_output:
    offset += int(offset_result)
    if ('.' in notes_result):
        notes = notes_result.split('.')
        chord = []
        for single_note in notes:
            #dividing by 12 to go back to original duration and offset
            new_note = note.Note(single_note)
            new_note.duration = duration.Duration(int(duration_result) / 12)
            new_note.offset = float(offset / 12)
            new_note.storedInstrument = instrument.Piano()
            chord.append(new_note)
        midi.append(music21.chord.Chord(chord))
    elif notes_result == 'rest':
        new_note = note.Rest()
        new_note.duration = duration.Duration(int(duration_result) / 12)
        new_note.offset = float(offset / 12)
        new_note.storedInstrument = instrument.Piano()
        midi.append(new_note)
    elif notes_result != 'START' and notes_result != 'UNKNOWN':
        new_note = note.Note(notes_result)
        new_note.duration = duration.Duration(int(duration_result) / 12)
        new_note.offset = float(offset / 12)
def _musedataRecordListToNoteOrChord(records, previousElement=None):
    '''Given a list of MuseDataRecord objects, return a configured
    :class:`~music21.note.Note` or :class:`~music21.chord.Chord`.

    Optionally pass a previous element, which may be music21 Note, Chord, or Rest;
    this is used to determine tie status
    '''
    from music21 import note
    from music21 import chord
    from music21 import tie

    if len(records) == 1:
        post = note.Note()
        # directly assign pitch object; will already have accidental
        post.pitch = records[0].getPitchObject()
    else:
        #environLocal.printDebug(['attempting chord creation: records', len(records)])
        # can supply a lost of Pitch objects at creation
        post = chord.Chord([r.getPitchObject() for r in records])

    # if a chord, we are assume that all durations are the same
    post.quarterLength = records[0].getQuarterLength()

    # see if there are nay lyrics; not sure what to do if lyrics are defined
    # for multiple chord tones
    lyricList = records[0].getLyrics()
    if lyricList is not None:
        # cyclicalling addLyric will auto increment lyric number assinged
        for lyric in lyricList:
            post.addLyric(lyric)

    # see if there are any beams; again, get from first record only
    beamsChars = records[0].getBeams()
    if beamsChars is not None:
        post.beams = _musedataBeamToBeams(beamsChars)

    # get accents and expressions; assumes all on first
    # returns an empty list of None
    dynamicObjs = [] # stored in stream, not Note

    for a in records[0].getArticulationObjects():
        post.articulations.append(a)
    for e in records[0].getExpressionObjects():
        post.expressions.append(e)

    for d in records[0].getDynamicObjects():
        dynamicObjs.append(d)

    # presently this sets a single tie for a chord; may be different cases
    if records[0].isTied():
        post.tie = tie.Tie('start') # can be start or continue;
        if previousElement != None and previousElement.tie != None:
            # if previous is a start or a continue; this has to be a continue
            # as musedata does not mark the end of a tie
            if previousElement.tie.type in ['start', 'continue']:
                post.tie = tie.Tie('continue')
    else: # if no tie indication in the musedata record
        if previousElement != None and previousElement.tie != None:
            if previousElement.tie.type in ['start', 'continue']:
                post.tie = tie.Tie('stop') # can be start, end, continue
    return post, dynamicObjs
Beispiel #3
0
    def __init__(self,
                 bassNote='C3',
                 notationString=None,
                 fbScale=None,
                 fbRules=None,
                 numParts=4,
                 maxPitch='B5',
                 listOfPitches=None):
        ''' 
        A Segment corresponds to a 1:1 realization of a bassNote and notationString 
        of a :class:`~music21.figuredBass.realizer.FiguredBassLine`.
        It is created by passing six arguments: a 
        :class:`~music21.figuredBass.realizerScale.FiguredBassScale`, a bassNote, a notationString,
        a :class:`~music21.figuredBass.rules.Rules` object, a number of parts and a maximum pitch. 
        Realizations of a Segment are represented 
        as possibility tuples (see :mod:`~music21.figuredBass.possibility` for more details). 
        
        Methods in Python's `itertools <http://docs.python.org/library/itertools.html>`_ 
        module are used extensively. Methods 
        which generate possibilities or possibility progressions return iterators, 
        which are turned into lists in the examples 
        for display purposes only.
        
        if fbScale is None, a realizerScale.FiguredBassScale() is created

        if fbRules is None, a rules.Rules() instance is created.  Each Segment gets 
        its own deepcopy of the one given.
        
        
        Here, a Segment is created using the default values: a FiguredBassScale in C, 
        a bassNote of C3, an empty notationString, and a default
        Rules object.
        
        >>> from music21.figuredBass import segment
        >>> s1 = segment.Segment()
        >>> s1.bassNote
        <music21.note.Note C>
        >>> s1.numParts
        4
        >>> s1.pitchNamesInChord
        ['C', 'E', 'G']
        >>> [str(p) for p in s1.allPitchesAboveBass]
        ['C3', 'E3', 'G3', 'C4', 'E4', 'G4', 'C5', 'E5', 'G5']
        >>> s1.segmentChord
        <music21.chord.Chord C3 E3 G3 C4 E4 G4 C5 E5 G5>
        '''
        if common.isStr(bassNote):
            bassNote = note.Note(bassNote)
        if common.isStr(maxPitch):
            maxPitch = pitch.Pitch(maxPitch)

        if fbScale is None:
            if _defaultRealizerScale['scale'] is None:
                _defaultRealizerScale[
                    'scale'] = realizerScale.FiguredBassScale()
            fbScale = _defaultRealizerScale['scale']  # save making it

        if fbRules is None:
            self.fbRules = rules.Rules()
        else:
            self.fbRules = copy.deepcopy(fbRules)

        self._specialResolutionRuleChecking = None
        self._singlePossibilityRuleChecking = None
        self._consecutivePossibilityRuleChecking = None

        self.bassNote = bassNote
        self.numParts = numParts
        self._maxPitch = maxPitch
        if notationString == None and listOfPitches != None:  #must be a chord symbol or roman num.
            self.pitchNamesInChord = listOfPitches
        #!------ Added to accommodate harmony.ChordSymbol and roman.RomanNumeral objects ------!
        else:
            self.pitchNamesInChord = fbScale.getPitchNames(
                self.bassNote.pitch, notationString)

        self.allPitchesAboveBass = getPitches(self.pitchNamesInChord,
                                              self.bassNote.pitch,
                                              self._maxPitch)
        self.segmentChord = chord.Chord(self.allPitchesAboveBass,
                                        quarterLength=bassNote.quarterLength)
        self._environRules = environment.Environment(_MOD)
notes_input_sequence = []
durations_input_sequence = []

overall_preds = []

for n, d in zip(notes, durations):
    note_int = note_to_int[n]
    duration_int = duration_to_int[d]

    notes_input_sequence.append(note_int)
    durations_input_sequence.append(duration_int)

    prediction_output.append([n, d])

    if n != 'START':
        midi_note = note.Note(n)

        new_note = np.zeros(128)
        new_note[midi_note.pitch.midi] = 1
        overall_preds.append(new_note)

att_matrix = np.zeros(shape=(max_extra_notes + sequence_length,
                             max_extra_notes))

for note_index in range(max_extra_notes):

    prediction_input = [
        np.array([notes_input_sequence]),
        np.array([durations_input_sequence])
    ]
Beispiel #5
0
def get():
    piece = stream.Score()
    p1 = stream.Part()
    p1.id = 'part1'

    notes = [
        note.Note('C4', type='quarter'),
        note.Note('D4', type='quarter'),
        note.Note('E4', type='quarter'),
        note.Note('F4', type='quarter'),
        note.Note('G4', type='half'),
        note.Note('G4', type='half'),
        note.Note('A4', type='quarter'),
        note.Note('A4', type='quarter'),
        note.Note('A4', type='quarter'),
        note.Note('A4', type='quarter'),
        note.Note('G4', type='half'),
        note.Note('A4', type='quarter'),
        note.Note('A4', type='quarter'),
        note.Note('A4', type='quarter'),
        note.Note('A4', type='quarter'),
        note.Note('G4', type='half'),
        note.Note('F4', type='quarter'),
        note.Note('F4', type='quarter'),
        note.Note('F4', type='quarter'),
        note.Note('F4', type='quarter'),
        note.Note('E4', type='half'),
        note.Note('E4', type='half'),
        note.Note('D4', type='quarter'),
        note.Note('D4', type='quarter'),
        note.Note('D4', type='quarter'),
        note.Note('D4', type='quarter'),
        note.Note('C4', type='half'),
    ]

    p1.append(notes)

    piece.insert(0, metadata.Metadata())
    piece.metadata.title = 'Alle meine Entchen'
    piece.insert(0, p1)
    return piece, notes
Beispiel #6
0
print('Markov graph created.')

print('Predicting sequence...')

# Choose a random integer within the note's total count.
random_int = random.choice(range(1, len(notes)))

counter = 0
# Iterate through neighbour array until the neighbour occurrence is greater than the random integer.
for node in data_graph:
    counter += node['count']
    if counter >= random_int:
        # Append neighbour to data out.
        markov_out.append(node['pitch'])
        s.append(note.Note(node['pitch']))
        break

# Generate range of notes equal to defined sequence length.
for generated_note in range(OUTPUT_LENGTH - 1):

    # Get node's neighbour array and total occurrence.
    pitch_index = next((i for i, item in enumerate(data_graph)
                        if item['pitch'] == markov_out[generated_note]), -1)
    nbs_array = data_graph[pitch_index]['nbs']
    total_count = data_graph[pitch_index]['count']

    # Choose a random integer within the note's total count.
    random_int = random.choice(range(1, total_count))

    counter = 0
Beispiel #7
0
    def parse_symbols(self):
        """Go through notes in order and check for errors, later this will apply modifiers"""
        template_names = sorted(os.listdir('./resources/templates/'))
        templates = []
        for name in template_names:
            if name != '.DS_Store':
                tem = cv2.imread('./resources/templates/' + name, 0)
                templates.append((tem, name))
        bar = 1
        bar_length = 0
        i = 0
        for i in range(len(self.symbols)):
            sym = self.symbols[i]
            print(i)
            if sym.is_bar():
                print('bar {} is {} long'.format(bar, bar_length))
                if bar_length < 4:
                    saw_eight_alone = False
                    saw_eight_alone_at = 0
                    # check if we can fix anything in this bar or add rest at end
                    for ii in range(-1, -i, -1):
                        ss = self.symbols[i + ii]
                        if self.symbols[i + ii].is_bar():
                            break
                        before_ss = self.symbols[i + ii - 1]
                        after_ss = self.symbols[i + ii + 1]

                        if self.symbols[i + ii].type == SymbolType.EIGHTH:
                            if self.symbols[i + ii - 1].type != SymbolType.EIGHTH and \
                                    self.symbols[i + ii + 1].type != SymbolType.EIGHTH:
                                saw_eight_alone = True
                                saw_eight_alone_at = ii
                        if self.symbols[i + ii].markHalf:
                            increase_by = 4 - bar_length
                            steam_tmp = self.stream.elements[ii:]
                            self.stream = self.stream[:ii]
                            s = steam_tmp[0]
                            old_len = s.quarterLength
                            new_len = max(old_len, 2)
                            d = duration.Duration()
                            d.quarterLength = new_len
                            n = note.Note(pitch=steam_tmp[0].pitch, duration=d)
                            self.stream.append(n)
                            if len(steam_tmp) != 1:
                                for iii in range(1, len(steam_tmp)):
                                    self.stream.append(steam_tmp[iii])
                            bar_length += (new_len - old_len)
                            if bar_length == 4:
                                print('corrected with shortening')
                                break
                    if bar_length != 4:
                        need = 4 - bar_length
                        if need == 0.5 and saw_eight_alone:
                            print('bar {} is too short changing eighth'.format(
                                bar, need))
                            ii = saw_eight_alone_at
                            steam_tmp = self.stream.elements[ii:]
                            self.stream = self.stream[:ii]
                            s = steam_tmp[0]
                            d = duration.Duration()
                            d.quarterLength = 1
                            n = note.Note(pitch=steam_tmp[0].pitch, duration=d)
                            self.stream.append(n)
                            if len(steam_tmp) != 1:
                                for iii in range(1, len(steam_tmp)):
                                    self.stream.append(steam_tmp[iii])
                        else:
                            d = duration.Duration()
                            d.quarterLength = need
                            print('bar {} is too short adding {}'.format(
                                bar, need))
                            rest = note.Rest(duration=d)
                            if need != 4:
                                self.stream.append(rest)
                if bar_length > 4:
                    # check if we can fix anything in this bar or add rest at end
                    for ii in range(-1, -i, -1):
                        need = 4 - (bar_length - 4)
                        if self.symbols[i + ii].is_bar():
                            break
                        if self.symbols[i + ii].markFull:
                            reduce_by = bar_length - 4
                            steam_tmp = self.stream.elements[ii:]
                            self.stream = self.stream[:ii]
                            s = steam_tmp[0]
                            old_len = s.quarterLength
                            new_len = max(old_len - reduce_by, 0.5)
                            d = duration.Duration()
                            d.quarterLength = new_len
                            n = note.Note(pitch=steam_tmp[0].pitch, duration=d)
                            self.stream.append(n)
                            if len(steam_tmp) != 1:
                                for iii in range(1, len(steam_tmp)):
                                    self.stream.append(steam_tmp[iii])
                            bar_length -= (old_len - new_len)
                            if bar_length == 4:
                                print('corrected with legnthening')
                                break
                    if bar_length != 4:
                        need = 4 - (bar_length - 4)
                        d = duration.Duration()
                        d.quarterLength = need
                        print('bar {} is too long short {}'.format(bar, need))
                        rest = note.Rest(duration=d)
                        self.stream.append(rest)
                bar_length = 0
                bar += 1
            if sym.is_part_of_key_sig():
                if sym.get_type() == SymbolType.CLEF:
                    pass
                if sym.get_type() == SymbolType.TIMESIG:
                    pass
            if sym.is_note():
                if sym.get_type() == SymbolType.TIED_EIGHTH:
                    beam_notes = sym.determine_beamed_pitch([
                        t for t in filter(lambda t: t[1] == 'full.png',
                                          templates)
                    ])
                    for n_p in beam_notes:
                        pitch_str = to_pitchStr_treble(n_p)
                        n = note.Note(
                            pitchName=pitch_str,
                            duration=get_duration_from_name('eighth'))
                        bar_length += 0.5
                        self.stream.append(n)
                        # Music21 will decided a nice beaming scheme for us
                else:
                    pitch_str = to_pitchStr_treble(
                        sym.determine_pitch(templates))
                    d = get_duration_from_name(sym.get_name())
                    bar_length += d.quarterLength
                    n = note.Note(pitchName=pitch_str, duration=d)
                    self.stream.append(n)
            if sym.is_rest():
                d = sym.get_rest_duration()
                bar_length += d
                dur = duration.Duration()
                dur.quarterLength = d
                n = note.Rest(duration=dur)
                self.stream.append(n)
Beispiel #8
0
        def createDoubleInvertedTurnMeasure():
            '''
            Returns a dictionary with the following keys

            returnDict = {
                'name': string,
                'midi': measure stream,
                'omr': measure stream,
                'expected': measure stream,
            }
            '''
            omrMeasure = stream.Measure()
            omrNote1 = note.Note('B-')
            middleNote = note.Note('G')
            omrNote2 = note.Note('B-')  # enharmonic to trill
            omrMeasure.append([omrNote1, middleNote, omrNote2])

            expectedFixedOmrMeasure = stream.Stream()
            expectOmrNote1 = deepcopy(omrNote1)
            expectOmrNote1.expressions.append(expressions.InvertedTurn())
            expectOmrNote2 = deepcopy(omrNote2)
            expectOmrNote2.expressions.append(expressions.InvertedTurn())
            expectedFixedOmrMeasure.append(
                [expectOmrNote1,
                 deepcopy(middleNote), expectOmrNote2])

            midiMeasure = stream.Measure()
            turn1 = [
                note.Note('A'),
                note.Note('B-'),
                note.Note('C5'),
                note.Note('B-')
            ]
            turn2 = [
                note.Note('G#'),
                note.Note('A#'),
                note.Note('B'),
                note.Note('A#')
            ]
            for n in turn1:
                n.duration = duration.Duration(.25)
            for n in turn2:
                n.duration = duration.Duration(.25)
            midiMeasure.append([*turn1, deepcopy(middleNote), *turn2])

            returnDict = {
                'name':
                'Inverted turns with accidentals separated By non-ornament Note',
                'midi': midiMeasure,
                'omr': omrMeasure,
                'expected': expectedFixedOmrMeasure,
            }
            return returnDict
Beispiel #9
0
                                        soprano.flat.notes[-1],
                                        current_bass,
                                        down_beat,
                                        species,
                                        prob_factor=2,
                                        debug=True))
    s.insert(0, soprano)
    s.insert(0, bass)

    if show:
        s.show()
    return s


if __name__ == "__main__":
    from music21 import note, pitch, corpus, stream, key, clef, meter, interval, duration
    cf = stream.Stream([key.Key('Eb'), meter.TimeSignature('2/4')])
    cf.append([
        note.Note("Eb3", type='half'),
        note.Note("D3", type='half'),
        note.Note("Eb3", type='half'),
        note.Note("F3", type='half'),
        note.Note("Ab3", type='half'),
        note.Note("G3", type='half'),
        note.Note("F3", type='half'),
        note.Note("Eb3", type='half')
    ])

    result = write_two_part(cf=cf, cf_type='bass', species=2, show=False)
    result.write('musicxml', './output/result.xml')
    result.write('midi', './output/result.midi')
Beispiel #10
0
    def testGetNotesWithinDuration(self):
        n1 = note.Note('C')
        n1.duration = duration.Duration('quarter')
        m1 = stream.Stream()
        m1.append(n1)

        result = getNotesWithinDuration(n1, duration.Duration('quarter'))
        self.assertIsInstance(result, stream.Stream)
        self.assertListEqual([n1], list(result.notes),
                             'starting note occupies full duration')

        result = getNotesWithinDuration(n1, duration.Duration('half'))
        self.assertListEqual([n1], list(result.notes),
                             'starting note occupies partial duration')

        result = getNotesWithinDuration(n1, duration.Duration('eighth'))
        self.assertListEqual([], list(result.notes), 'starting note too long')

        m2 = stream.Measure()
        n2 = note.Note('D')
        n2.duration = duration.Duration('eighth')
        n3 = note.Note('E')
        n3.duration = duration.Duration('eighth')
        m2.append([n1, n2, n3])

        result = getNotesWithinDuration(n1, duration.Duration('quarter'))
        self.assertListEqual([n1], list(result.notes),
                             'starting note occupies full duration')

        result = getNotesWithinDuration(n1, duration.Duration('half'))
        self.assertListEqual([n1, n2, n3], list(result.notes),
                             'all notes fill up full duration')

        result = getNotesWithinDuration(n1, duration.Duration('whole'))
        self.assertListEqual([n1, n2, n3], list(result.notes),
                             'all notes fill up partial duration')

        result = getNotesWithinDuration(n1, duration.Duration(1.5))
        self.assertListEqual([n1, n2], list(result.notes),
                             'some notes fill up full duration')

        result = getNotesWithinDuration(n1, duration.Duration(1.75))
        self.assertListEqual([n1, n2], list(result.notes),
                             'some notes fill up partial duration')

        # set active site from m2 to m1 (which runs out of notes to fill up)
        result = getNotesWithinDuration(n1,
                                        duration.Duration('half'),
                                        referenceStream=m1)
        self.assertListEqual([n1], list(result.notes),
                             'partial fill up from reference stream m1')

        m3 = stream.Measure()
        m3.id = 'm3'
        r1 = note.Rest()
        r1.duration = duration.Duration('quarter')
        m3.append([n1, r1])  # n1 active site now with m2
        result = getNotesWithinDuration(n1, duration.Duration('half'))
        msg = 'note and rest fill up full duration'
        self.assertListEqual([n1, r1], list(result.notesAndRests), msg)

        # set active site from m3 to m2
        result = getNotesWithinDuration(n1,
                                        duration.Duration('half'),
                                        referenceStream=m2)
        self.assertListEqual([n1, n2, n3], list(result.notes),
                             'fill up from reference stream m2')
Beispiel #11
0
        def createDoubleTrillMeasure():
            '''
            Returns a dictionary with the following keys

            returnDict = {
                'name': string,
                'midi': measure stream,
                'omr': measure stream,
                'expected': measure stream,
            }
            '''
            noteDuration = duration.Duration('quarter')

            # GAGA Trill
            trill1NoteDuration = duration.Duration(.25)
            n0 = note.Note('G')
            n0.duration = noteDuration
            n1 = note.Note('G')
            n1.duration = trill1NoteDuration
            n2 = note.Note('A')
            n2.duration = trill1NoteDuration
            trill1 = [n1, n2, deepcopy(n1), deepcopy(n2)]  # GAGA

            # CBCB Trill
            trill2NoteDuration = duration.Duration(.0625)
            n3 = note.Note('B3')  # omr
            n3.duration = noteDuration
            n4 = note.Note('B3')
            n4.duration = trill2NoteDuration
            n5 = note.Note('C')
            n5.duration = trill2NoteDuration
            trill2 = [
                n5, n4,
                deepcopy(n5),
                deepcopy(n4),
                deepcopy(n5),
                deepcopy(n4),
                deepcopy(n5),
                deepcopy(n4)
            ]

            midiMeasure = stream.Measure()
            midiMeasure.append(trill1)
            midiMeasure.append(trill2)

            omrMeasure = stream.Measure()
            omrMeasure.append([n0, n3])

            expectedFixedOmrMeasure = stream.Measure()
            n0WithTrill = deepcopy(n0)
            n0Trill = expressions.Trill()
            n0Trill.size = interval.Interval('m-2')
            n0Trill.quarterLength = trill1NoteDuration.quarterLength
            n0WithTrill.expressions.append(n0Trill)
            n1WithTrill = deepcopy(n3)
            n1Trill = expressions.Trill()
            n1Trill.size = interval.Interval('M2')
            n1Trill.quarterLength = trill2NoteDuration.quarterLength
            n1WithTrill.expressions.append(n0Trill)
            expectedFixedOmrMeasure.append([n0WithTrill, n1WithTrill])

            returnDict = {
                'name': 'Double Trill Measure',
                'midi': midiMeasure,
                'omr': omrMeasure,
                'expected': expectedFixedOmrMeasure,
            }
            return returnDict
Beispiel #12
0
def PIG2Stream(fname, beam=0, time_unit=.5, fixtempo=0):
    """
    Convert a PIG text file to a music21 Stream object.

    time_unit must be multiple of 2.
    beam = 0, right hand
    beam = 1, left hand.
    """
    from music21 import stream, note, chord
    from music21.articulations import Fingering
    import numpy as np

    f = open(fname, "r")
    lines = f.readlines()
    f.close()

    #work out note type from distribution of durations
    # triplets are squashed to the closest figure
    durations = []
    firstonset = 0
    blines=[]
    for l in lines:
        if l.startswith('//'): continue
        _, onset, offset, name, _, _, channel, _ = l.split()
        onset, offset = float(onset), float(offset)
        if beam != int(channel): continue
        if not firstonset:
            firstonset = onset
        if offset-onset<0.0001: continue
        durations.append(offset-onset)
        blines.append(l)
    durations = np.array(durations)
    logdurs = -np.log2(durations)
    mindur = np.min(logdurs)
    expos = (logdurs-mindur).astype(int)
    if np.max(expos) > 3:
        mindur = mindur + 1
    #print(durations, '\nexpos=',expos, '\nmindur=', mindur)

    sf = stream.Part()
    sf.id = beam

    # first rest
    if not fixtempo and firstonset:
        r = note.Rest()
        logdur = -np.log2(firstonset)
        r.duration.quarterLength = 1.0/time_unit/pow(2, int(logdur-mindur))
        sf.append(r)

    n = len(blines)
    for i in range(n):
        if blines[i].startswith('//'): continue
        _, onset, offset, name, _, _, _, finger = blines[i].split()
        onset, offset = float(onset), float(offset)
        name = name.replace('b', '-')

        chordnotes = [name]
        for j in range(1, 5):
            if i+j<n:
                noteid1, onset1, offset1, name1, _, _, _, finger1 = blines[i+j].split()
                onset1 = float(onset1)
                if onset1 == onset:
                    name1 = name1.replace('b', '-')
                    chordnotes.append(name1)

        if len(chordnotes)>1:
            an = chord.Chord(chordnotes)
        else:
            an = note.Note(name)
            if '_' not in finger:
                x = Fingering(abs(int(finger)))
                x.style.absoluteY = 20
            an.articulations.append(x)

        if fixtempo:
            an.duration.quarterLength = fixtempo
        else:
            logdur = -np.log2(offset - onset)
            an.duration.quarterLength = 1.0/time_unit/pow(2, int(logdur-mindur))
        #print('note/chord:', an, an.duration.quarterLength, an.duration.type, 't=',onset)

        sf.append(an)

        # rest up to the next
        if i+1<n:
            _, onset1, _, _, _, _, _, _ = blines[i+1].split()
            onset1 = float(onset1)
            if onset1 - offset > 0:
                r = note.Rest()
                if fixtempo:
                    r.duration.quarterLength = fixtempo
                logdur = -np.log2(onset1 - offset)
                d = int(logdur-mindur)
                if d<4:
                    r.duration.quarterLength = 1.0/time_unit/pow(2, d)
                    sf.append(r)

    return sf
Beispiel #13
0
    def run(self, runWithEnviron=True):
        '''
        Main code runner for testing. To set a new test, update the self.callTest attribute in __init__().

        Note that the default of runWithEnviron imports music21.environment.  That might
        skew results
        '''
        from music21 import environment

        suffix = '.png'  # '.svg'
        outputFormat = suffix[1:]
        _MOD = "test.timeGraphs"

        if runWithEnviron:
            environLocal = environment.Environment(_MOD)
            fp = environLocal.getTempFile(suffix)
        # manually get a temporary file
        else:
            import tempfile
            import os
            import sys
            if os.name in ['nt'] or sys.platform.startswith('win'):
                platform = 'win'
            else:
                platform = 'other'

            tempdir = os.path.join(tempfile.gettempdir(), 'music21')
            if platform != 'win':
                fd, fp = tempfile.mkstemp(dir=tempdir, suffix=suffix)
                if isinstance(fd, int):
                    # on MacOS, fd returns an int, like 3, when this is called
                    # in some context (specifically, programmatically in a
                    # TestExternal class. the fp is still valid and works
                    # TODO: this did not work on MacOS 10.6.8 w/ py 2.7
                    pass
                else:
                    fd.close()
            else:
                tf = tempfile.NamedTemporaryFile(dir=tempdir, suffix=suffix)
                fp = tf.name
                tf.close()

        if self.includeList is not None:
            gf = pycallgraph.GlobbingFilter(include=self.includeList,
                                            exclude=self.excludeList)
        else:
            gf = pycallgraph.GlobbingFilter(exclude=self.excludeList)
        # create instance; will call setup routines
        ct = self.callTest()

        # start timer
        print('%s starting test' % _MOD)
        t = Timer()
        t.start()

        graphviz = pycallgraph.output.GraphvizOutput(output_file=fp)
        graphviz.tool = '/usr/local/bin/dot'

        config = pycallgraph.Config()
        config.trace_filter = gf

        from music21 import meter
        from music21 import note
        #from music21 import converter
        #from music21 import common
        #beeth = common.getCorpusFilePath() + '/beethoven/opus133.mxl'
        #s = converter.parse(beeth, forceSource=True)
        #beeth = common.getCorpusFilePath() + '/bach/bwv66.6.mxl'
        #s = converter.parse(beeth, forceSource=True)

        with pycallgraph.PyCallGraph(output=graphviz, config=config):
            note.Note()
            meter.TimeSignature('4/4')
            ct.testFocus()  # run routine
            pass
        print('elapsed time: %s' % t)
        # open the completed file
        print('file path: ' + fp)
        try:
            environLocal = environment.Environment(_MOD)
            environLocal.launch(outputFormat, fp)
        except NameError:
            pass
Beispiel #14
0
    def generate_notes(self, notes, durations):
        """Takes the tuple (notes, durations) of the starting phrase as arguments.
        Returns predicted sequence list of [note, duration].
        """
        prediction_output = []
        notes_input_sequence = []
        durations_input_sequence = []

        max_extra_notes = self.spinbox_max_extra_notes.value()
        sequence_length = len(notes)
        for note_element, duration_element in zip(notes, durations):
            try:
                note_int = main.load_files_window.notes_lookups[0][
                    note_element]
            except KeyError:
                message_box = QtWidgets.QMessageBox(self)
                message_box.setText(
                    f'The note "{note_element}" is not in the lookup table')
                message_box.show()
                return
            try:
                duration_int = main.load_files_window.durations_lookups[0][
                    duration_element]
            except KeyError:
                message_box = QtWidgets.QMessageBox(self)
                message_box.setText(
                    f'The duration "{duration_element}" is not in the lookup table'
                )
                message_box.show()
                return

            notes_input_sequence.append(note_int)
            durations_input_sequence.append(duration_int)
            prediction_output.append([note_element, duration_element])

            if note_element != 'START':
                try:
                    midi_note = note.Note(note_element)
                except music21.pitch.AccidentalException:
                    message_box = QtWidgets.QMessageBox(self)
                    message_box.setText(
                        f'Chords are not supported in the starting phrase unfortunately'
                    )
                    message_box.show()
                    return

                new_note = np.zeros(128)
                new_note[midi_note.pitch.midi] = 1
        att_matrix = np.zeros(shape=(max_extra_notes + sequence_length,
                                     max_extra_notes))

        for note_index in range(max_extra_notes):
            prediction_input = [
                np.array([notes_input_sequence]),
                np.array([durations_input_sequence])
            ]

            notes_prediction, durations_prediction = main.load_files_window.model.predict(
                prediction_input, verbose=0)
            if main.load_files_window.model_parameters:
                att_prediction = main.load_files_window.model_with_att.predict(
                    prediction_input, verbose=0)[0]
                att_matrix[(note_index - len(att_prediction) +
                            sequence_length):(note_index + sequence_length),
                           note_index] = att_prediction

            new_note = np.zeros(128)

            for idx, n_i in enumerate(notes_prediction[0]):
                try:
                    note_name = main.load_files_window.notes_lookups[1][idx]
                    midi_note = note.Note(note_name)
                    new_note[midi_note.pitch.midi] = n_i

                except music21.pitch.AccidentalException:
                    pass
                except music21.pitch.PitchException:
                    pass
                except KeyError:
                    pass

            i1 = ComposeWindow.sample_with_temp(
                notes_prediction[0], self.spinbox_notes_temp.value())
            i2 = ComposeWindow.sample_with_temp(
                durations_prediction[0], self.spinbox_durations_temp.value())

            note_result = main.load_files_window.notes_lookups[1][i1]
            duration_result = main.load_files_window.durations_lookups[1][i2]

            prediction_output.append([note_result, duration_result])

            notes_input_sequence.append(i1)
            durations_input_sequence.append(i2)

            if len(notes_input_sequence) > self.spinbox_max_seq_length.value():
                notes_input_sequence = notes_input_sequence[1:]
                durations_input_sequence = durations_input_sequence[1:]

            if note_result == 'START':
                break

        return prediction_output
Beispiel #15
0
def create_t_voice(
    m_voice: stream.Stream,
    t_chord: Union[Sequence[int], Sequence[str], chord.Chord],
    position: int = 1,
    direction: Direction = Direction.UP,
    t_mode: TMode = TMode.DIATONIC,
) -> stream.Stream:
    """Generates a t-voice melodic stream from a m-voice melodic stream.

    Args:
        m_voice: The stream containing the melody to use as the basis for the tintinnabuli.
        t_chord: A list of pitch-classes to use as the basis of the t-voice. Accepts letter names or
          numeric pitch classes. Can also be a music21 Chord object.
        position: Optional; The position of the t-voice. Default is 1.
        direction: Optional; The direction of the tintinnabuli process. Default is Direction.UP.
        t_mode: Optional; Determines the way the "next" note is calculated within the t-voice pitch
          classes for notes with the same note name. For example if the m-voice contains an Eb and
          the t-chord is C major: TMode.DIATONIC will consider that Eb and E are the same note, and
          will return G as the first "diatonic" t-note above. TMode.CHROMATIC ignores this and
          simply returns E as the first "chromatic" t-note above. Default is TMode.DIATONIC.

    Returns:
        A stream that contains the new t-voice.
    """
    # Create t-voice stream
    t_voice = stream.Stream()

    # Create t-voice pitch-class lists
    if isinstance(t_chord, chord.Chord):
        t_pitches = t_chord.pitches
    else:
        t_pitches = t_chord
    t_pitch_classes = []
    t_steps = []
    for pitch_ in t_pitches:
        if isinstance(pitch_, str):
            pitch_ = pitch.Pitch(pitch_)
        elif isinstance(pitch_, int):
            pitch_ = pitch.Pitch(pitch_)
        t_pitch_classes.append(pitch_.pitchClass)
        t_steps.append(pitch_.step)

    # Determine starting pitch direction
    if direction is Direction.DOWN or direction is Direction.DOWN_ALTERNATE:
        pitch_delta = -1
    elif direction is Direction.UP or direction is Direction.UP_ALTERNATE:
        pitch_delta = 1

    temp_pitch = pitch.Pitch()

    for m_note in m_voice.flat.notes:
        temp_pitch.ps = m_note.pitch.ps
        position_index = 0
        while position_index < position:
            temp_pitch.ps = temp_pitch.ps + pitch_delta
            if temp_pitch.pitchClass in t_pitch_classes:
                if t_mode is TMode.DIATONIC:
                    if (temp_pitch.octave != m_note.pitch.octave
                            or temp_pitch.step != m_note.pitch.step):
                        position_index += 1
                else:
                    position_index += 1
        t_note = note.Note()
        t_note.pitch.ps = temp_pitch.ps
        t_note.duration = m_note.duration
        t_voice.insert(m_note.offset, t_note)
        if direction is Direction.UP_ALTERNATE or direction is Direction.DOWN_ALTERNATE:
            pitch_delta *= -1

    return t_voice
Beispiel #16
0
def write_two_part(cf, cf_type='bass', species=1, show=False):
    """
    write two part counterpoints.

    Parametjers
    ----------
    cf: stream
        canctus firmus in stream.
    cf_type: str
        'soprano' or 'bass'.
    species: int
        how many species to be generated, between 1 and 4.
    show: bool
        show the sheet output.

    Returns
    -------
    s: stream
        the generated two part counter point.
    """
    BASS_RANGE = ('D1', 'C3')  # voice range of bass
    SOPRANO_RANGE = ('C4', 'G5')  # voice range of soprano

    s = stream.Stream([cf.keySignature, cf.timeSignature])
    if cf_type == 'bass':
        soprano = stream.Part([cf.keySignature, cf.timeSignature],
                              id='soprano')
        bass = stream.Part(cf.flat.elements, id='bass')
    elif cf_type == 'soprano':
        bass = stream.Part([cf.keySignature, cf.timeSignature], id='bass')
        soprano = stream.Part(cf.flat.elements, id='soprano')

    # decide the duration by species
    species_length = bass.notes[0].quarterLength / species
    # according to key, decide the possible notes
    s.scale = s.keySignature.getScale()
    bass_pitches = s.scale.getPitches(BASS_RANGE[0], BASS_RANGE[1])
    soprano_pitches = s.scale.getPitches(SOPRANO_RANGE[0], SOPRANO_RANGE[1])
    soprano_tonic_pitches = s.scale.pitchesFromScaleDegrees([1],
                                                            SOPRANO_RANGE[0],
                                                            SOPRANO_RANGE[1])
    bass_tonic_pitches = s.scale.pitchesFromScaleDegrees([1], BASS_RANGE[0],
                                                         BASS_RANGE[1])

    # iterate for each cf note
    if cf_type == 'bass':
        for current_bass in bass.flat.notes:
            if not type(current_bass.previous()) == note.Note:
                # The first note must be tonic
                soprano.append(note.Rest(quarterLength=species_length))
                soprano.append(
                    note.Note(pitch=np.random.choice(soprano_tonic_pitches),
                              quarterLength=species_length))
            elif not current_bass.next():
                # The last note must be tonic
                soprano.append(
                    note.Note(pitch=np.random.choice(soprano_tonic_pitches),
                              quarterLength=species_length * species))
            else:
                # for every middle note, generate multiple notes as the number of species, using random_nextnote()
                for i in range(species):
                    # soprano.append(note.Note(pitch=np.random.choice(soprano_pitches), quarterLength=species_length))
                    down_beat = (True if i == 0 else False)
                    soprano.append(
                        random_nextnote(soprano_pitches,
                                        soprano.flat.notes[-1],
                                        current_bass,
                                        down_beat,
                                        species,
                                        prob_factor=2,
                                        debug=True))
    s.insert(0, soprano)
    s.insert(0, bass)

    if show:
        s.show()
    return s
Beispiel #17
0
def joinConsecutiveIdenticalPitches(detectedPitchObjects):
    # noinspection PyShadowingNames
    '''
    takes a list of equally-spaced :class:`~music21.pitch.Pitch` objects
    and returns a tuple of two lists, the first a list of
    :class:`~music21.note.Note`
    or :class:`~music21.note.Rest` objects (each of quarterLength 1.0)
    and a list of how many were joined together to make that object.

    N.B. the returned list is NOT a :class:`~music21.stream.Stream`.

    >>> readPath = common.getSourceFilePath() / 'audioSearch' / 'test_audio.wav'
    >>> freqFromAQList = audioSearch.getFrequenciesFromAudioFile(waveFilename=readPath)
    >>> chrome = scale.ChromaticScale('C4')
    >>> detectedPitchesFreq = audioSearch.detectPitchFrequencies(freqFromAQList, useScale=chrome)
    >>> detectedPitchesFreq = audioSearch.smoothFrequencies(detectedPitchesFreq)
    >>> (detectedPitches, listPlot) = audioSearch.pitchFrequenciesToObjects(
    ...        detectedPitchesFreq, useScale=chrome)
    >>> len(detectedPitches)
    861
    >>> notesList, durationList = audioSearch.joinConsecutiveIdenticalPitches(detectedPitches)
    >>> len(notesList)
    24
    >>> print(notesList)
    [<music21.note.Rest quarter>, <music21.note.Note C>, <music21.note.Note C>,
     <music21.note.Note D>, <music21.note.Note E>, <music21.note.Note F>,
     <music21.note.Note G>, <music21.note.Note A>, <music21.note.Note B>,
     <music21.note.Note C>, ...]
    >>> print(durationList)
    [71, 6, 14, 23, 34, 40, 27, 36, 35, 15, 17, 15, 6, 33, 22, 13, 16, 39, 35, 38, 27, 27, 26, 8]
    '''
    # initialization
    REST_FREQUENCY = 10
    detectedPitchObjects[0].frequency = REST_FREQUENCY

    # detecting the length of each note
    j = 0
    good = 0
    bad = 0
    valid_note = False

    total_notes = 0
    total_rests = 0
    notesList = []
    durationList = []

    while j < len(detectedPitchObjects):
        fr = detectedPitchObjects[j].frequency

        # detect consecutive instances of the same frequency
        while j < len(detectedPitchObjects
                      ) and fr == detectedPitchObjects[j].frequency:
            good = good + 1

            # if more than 6 consecutive identical samples, it might be a note
            if good >= 6:
                valid_note = True

                # if we've gone 15 or more samples without getting something constant,
                # assume it's a rest
                if bad >= 15:
                    durationList.append(bad)
                    total_rests = total_rests + 1
                    notesList.append(note.Rest())
                bad = 0
            j = j + 1
        if valid_note:
            durationList.append(good)
            total_notes = total_notes + 1
            # doesn't this unnecessarily create a note that it doesn't need?
            # notesList.append(detectedPitchObjects[j - 1].frequency) should work
            n = note.Note()
            n.pitch = detectedPitchObjects[j - 1]
            notesList.append(n)
        else:
            bad = bad + good
        good = 0
        valid_note = False
        j = j + 1
    return notesList, durationList
Beispiel #18
0
def random_nextnote(pitch_list,
                    last_note,
                    current_cf,
                    down_beat,
                    species,
                    prob_factor=2,
                    debug=False):
    """
    Randomly generate the next note according to the previous notes.

    Parameters
    ----------
    pitch_list: list of pitch.Pitch
        possibPle pitches
    last_note: note.Note
        the previous note
    current_cf: note.Note
        the current canctus firmus note
    down_beat: bool
        whether the current note is down beat. affects 
        the choise of interval.
    species: int
        the species of counterpoint
    prob_factor: int
        affects the probability factor of choosing pitches
    debug: bool
        if debug=True, print some process.
    Returns
    -------
    current_note: note.Note
        one current note that follows all the rules.
        * if there is no possible note, return None.
    """

    # list of pitch names in string
    pitch_name_list = [p.nameWithOctave for p in pitch_list]

    # pitches to avoid:
    pitch_name_avoid = []
    # avoid same note
    pitch_name_avoid.append(last_note.pitch.nameWithOctave)
    # Avoid dissonants
    dissonant_intervals = ['m2', 'M2', 'P4', 'A4', 'D5', 'm7', 'M7']
    dissonant_pitches = [
        current_cf.transpose(interval).pitch.name
        for interval in dissonant_intervals
    ]
    for p in pitch_list:
        if p.name in dissonant_pitches:
            pitch_name_avoid.append(p.nameWithOctave)

    # rules for down beats
    if down_beat:
        last_cf = current_cf.previous()
        # PPI from upbeat to downbeat
        inte = interval.Interval(last_cf, last_note)
        if inte.semiSimpleName == 'P5':
            pitch_name_avoid.append(current_cf.transpose(inte).nameWithOctave)
        elif inte.semiSimpleName == 'P8':
            pitch_name_avoid.append(current_cf.transpose(inte).nameWithOctave)

        # PPI from last downbeat to downbeat
        # get the last downbeat
        last_db = last_note
        for _ in range(species - 1):
            if type(last_db.previous()) == note.Note:
                last_db = last_db.previous()

        inte = interval.Interval(last_db, last_note)
        if inte.name == 'P5':
            pitch_name_avoid.append(current_cf.transpose('P5').nameWithOctave)
        elif inte.name == 'P8':
            pitch_name_avoid.append(current_cf.transpose('P8').nameWithOctave)
    # rules for up beats
    else:
        # upbeat's last cf is the current cf
        last_cf = current_cf
        # PPI from upbeat to downbeat
        if interval.Interval(last_cf, last_note).name == 'P5':
            pitch_name_avoid.append(current_cf.transpose('P5').nameWithOctave)
        elif interval.Interval(last_cf, last_note).name == 'P8':
            pitch_name_avoid.append(current_cf.transpose('P8').nameWithOctave)

    # Find every element in pitch_name_list and not in pitch_name_avoid
    pitch_name_valid = [
        pitch for pitch in pitch_name_list if not pitch in pitch_name_avoid
    ]
    if not pitch_name_valid:
        # if no valid pitch, return None
        print('No valid pitch! Stucked!')
        return None
    # calculate the distance of interval to the last note
    interval_to_rf = np.array([
        np.abs(interval.Interval(last_note.pitch, pitch.Pitch(p)).semitones)
        for p in pitch_name_valid
    ])
    if debug:
        print('intervals: {}'.format(np.around(interval_to_rf, decimals=3)))
    # generate a probability function according to the intervals. smaller interval has bigger probability.
    interval_p = 1 / interval_to_rf
    interval_p = interval_p**prob_factor
    interval_p = interval_p / np.sum(interval_p)
    if debug:
        print('probabilities: {}'.format(np.around(interval_p, decimals=3)))
    current_note = note.Note(
        pitch=np.random.choice(pitch_name_valid, p=interval_p))
    current_note.quarterLength = (current_cf.quarterLength / species)
    # current_note = note.Note(pitch=np.random.choice(pitch_name_valid), quaterLength= current_cf.quarterLength / species)
    return current_note
 def convert_names_to_notes(self, pitch_names):
     """given a list of pitch names, eg ['A4'] 
        returns a list of note.Notes, eg [<music21.note.Note A>]
     """
     return [note.Note(j) for j in pitch_names]
Beispiel #20
0
def generate(start):
    model = load_model('model.hdf5')
    notes = []
    with open("notes", 'rb') as f:
        notes = pickle.load(f)

    pitchnames = sorted(set(notes))
    ele_to_int = dict((ele, num) for num, ele in enumerate(pitchnames))
    n_vocab = len(set(notes))

    sequence_length = 20
    network_input = []

    for i in range(len(notes) - sequence_length):
        seq_in = notes[i:i + sequence_length]  # contains 100 values
        network_input.append([ele_to_int[ch] for ch in seq_in])

    int_to_ele = dict((num, ele) for num, ele in enumerate(pitchnames))

    pattern = []
    prediction_output = []

    for i in start:
        pattern.append(i)
        prediction_output.append(int_to_ele[i])

    for note_index in range(200):
        prediction_input = np.reshape(
            pattern, (1, len(pattern), 1))  # convert into numpy desired shape
        prediction_input = prediction_input / float(n_vocab)

        prediction = model.predict(prediction_input, verbose=0)

        idx = np.argmax(prediction)
        result = int_to_ele[idx]
        prediction_output.append(result)

        # Remove the first value, and append the recent value..
        # This way input is moving forward step-by-step with time..
        pattern.append(idx)
        pattern = pattern[1:]

    offset = 0  # Time
    output_notes = []

    for patterns in prediction_output:

        # if the pattern is a chord
        if ('+' in patterns) or patterns.isdigit():
            notes_in_chord = patterns.split('+')
            temp_notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(
                    int(current_note
                        ))  # create Note object for each note in the chord
                new_note.storedInstrument = instrument.Piano()
                temp_notes.append(new_note)

            new_chord = chord.Chord(
                temp_notes)  # creates the chord() from the list of notes
            new_chord.offset = offset
            output_notes.append(new_chord)

        else:
            # if the pattern is a note
            new_note = note.Note(patterns)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        offset += 0.5
    midi_stream = stream.Stream(output_notes)
    midi_stream.write('midi', fp='generated_output.mid')
    return
Beispiel #21
0
    def addMarkerPartFromExisting(self, existingPartName, newPartName, newPartTitle="", 
                                  direction="below", rhythmType="copy", color=None):
        partNum = self.pn[existingPartName]
        
        self._partOffsetsToPartIndecies()
        
        existingPart = self.modifiedExercise.parts[partNum]
        existingPartOffset = existingPart.offset
        if rhythmType == "chordify":
            existingPart = self.originalExercise.chordify()
        newPart = copy.deepcopy(existingPart.getElementsByClass('Measure'))
        firstNote = True
        inst = instrument.Instrument()

        for  m in newPart:
            previousNotRestContents = copy.deepcopy(m.getElementsByClass('NotRest'))
            measureDuration = m.duration.quarterLength
            
            m.removeByClass(['GeneralNote']) # includes rests
            m.removeByClass(['Dynamic'])
            m.removeByClass(['Stream']) # get voices or sub-streams
            m.removeByClass(['Dynamic']) 
            m.removeByClass(['Expression']) 
            m.removeByClass(['KeySignature']) 
            
            if rhythmType == "quarterNotes":
                for i in range(int(measureDuration)):
                    markerNote = note.Note('c4')
                    markerNote.notehead = 'x'
                    markerNote.quarterLength = 1
                    if color is not None:
                        markerNote.color = color
                    if firstNote:
                        markerNote.lyric = '>'
                        firstNote = False
                    m.append(markerNote)
            else:
                for oldNotRest in previousNotRestContents:
                    markerNote = note.Note('c4')
                    markerNote.offset = oldNotRest.offset
                    markerNote.notehead = 'x'
                    markerNote.quarterLength = oldNotRest.quarterLength
                    if color is not None:
                        markerNote.color = color
                    if firstNote:
                        markerNote.lyric = '>'
                        firstNote = False
                    m.insert(oldNotRest.offset, markerNote)
        inst.instrumentName = newPartTitle
        newPart.insert(0,inst)        
        for ks in newPart.flat.getElementsByClass('KeySignature'):
            ks.sharps = 0
        for c in newPart.flat.getElementsByClass('Clef'):
            c.sign = "C"
            c.line = 3
        self._updatepn(partNum,direction=direction)
        if direction == "above":
            insertLoc = existingPartOffset - 0.5
            self.pn[newPartName] = partNum
        elif direction == "below" or direction is None:
            insertLoc = existingPartOffset + 0.5
            self.pn[newPartName] = partNum + 1
        self.modifiedExercise.insert(insertLoc,newPart)
        #self.modifiedExercise.show('text')
        # Somehow needed for sorting...
        self.modifiedExercise._reprText()
        self._partOffsetsToZero()
        return newPart
Beispiel #22
0
 def _getPitch(self, matchObj, octave):
     noteObj = note.Note()
     noteObj.step = matchObj.group(1).upper()
     noteObj.octave = octave
     return noteObj