示例#1
0
def create_midi2(prediction_output):
    """ convert the output from the prediction to notes and create a midi file
        from the notes """
    offset = 0
    output_notes = [instrument.Guitar()]

    # create note and chord objects based on the values generated by the model
    for pattern in prediction_output:

        print(1)
        # pattern is a chord
        if ('.' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split('.')
            notes = []
            print(2)
            for current_note in notes_in_chord:
                print(3)
                new_note = note.Note(int(current_note))
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(instrument.Guitar())
            output_notes.append(new_chord)
        # pattern is a note
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            output_notes.append(instrument.Bass())
            output_notes.append(new_note)

        # increase offset each iteration so that notes do not stack
        offset += 0.5

    midi_stream = stream.Stream(output_notes)
    
    print('Saving Output file as midi....')

    midi_stream.write('midi', fp='test_output6.mid')
示例#2
0
def predict():
    gen_seq = []
    start = np.random.randint(len(network_input) - 1)
    pattern = network_input[start]
    for i in range(100):
        ans = resultModel(pattern, model, notes)
        gen_seq.append(label_to_element[ans])
        pattern.append(ans)
        pattern = pattern[1:]
    offset = 0
    output_notes = []
    for pattern in gen_seq:
        if ('+' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split('+')
            temp_notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                temp_notes.append(new_note)

            new_chord = chord.Chord(temp_notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)
        if offset < 7:
            offset += 1.0
        elif offset > 47:
            offset += 1.0
        else:
            offset += 0.5
    midi_stream = stream.Stream(output_notes)
    midi_stream.write('midi',
                      fp=os.path.join(BASE_DIR, "media", "music",
                                      "output.mid"))
示例#3
0
def create_music(prediction):
    """
    用神经网络'预测'的音乐数据来生成 MIDI 文件,再转成 MP3 文件
    """
    offset = 0   # 偏移
    output_notes = []

    # 生成 Note(音符)或 Chord(和弦)对象
    for data in prediction:
        # 是 Chord。格式例如: 4.15.7
        if ('.' in data) or data.isdigit():
            notes_in_chord = data.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()  # 乐器用钢琴 (piano)
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        # 是 Note
        else:
            new_note = note.Note(data)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # 每次迭代都将偏移增加,这样才不会交叠覆盖
        offset += 0.5

    # 创建音乐流(Stream)
    midi_stream = stream.Stream(output_notes)

    # 写入 MIDI 文件
    midi_stream.write('midi', fp='output.mid')

    # 将生成的 MIDI 文件转换成 MP3
    convert_midi_to_mp3()
def create_midi(prediction_output, i):
    """ convert the output from the prediction to notes and create a midi file
        from the notes """
    offset = 0
    output_notes = []

    # create note and chord objects based on the values generated by the model
    for pattern in prediction_output:
        # pattern is a chord
        if (',' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split(',')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
            duration = 0.5
        # pattern is a note
        else:
            #print(pattern)
            note2 = pattern.split(' ')[0]
            #print(note2)
            duration = convert_to_float(pattern.split(' ')[1])
            #print(duration)
            new_note = note.Note(note2)
            new_note.offset = offset
            new_note.quarterLength = duration
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # increase offset each iteration so that notes do not stack
        offset += duration

    midi_stream = stream.Stream(output_notes)
    midi_stream.write('midi', fp="test_output_FF_Normal" + str(i) + ".mid")
示例#5
0
def create_midi(prediction_output):
    """ convert the output from the prediction to notes and create a midi file
        from the notes """
    offset = 0
    output_notes = []
    n = ["C","C#","D","E-","E","F","F#","G","G#","A","B-","B"]
    pitchnames = [note.Note(no + str(octave)).nameWithOctave for octave in range(8) for no in n ]
    pitchnames.append("C8")
    # create note and chord objects based on the values generated by the model
    for pattern in prediction_output:
        # pattern is a chord
        if numNote(pattern) > 1:
            notes = []
            for pp in pattern:
                d = max(pp)
                if d == 0:
                    break
                new_note = note.Note(pitchnames[pp.index(d)])
                new_note.storedInstrument = instrument.Piano()
                #new_note.duration.quarterLength = d
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
            #offset += d
        else:
            d = max(pattern[0])
            new_note = note.Note(pitchnames[pattern[0].index(d)])
            new_note.offset = offset
            new_note.duration.quarterLength = d
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

            #offset += d
        offset += 0.5
    midi_stream = stream.Stream(output_notes)

    midi_stream.write('midi', fp='test_output.mp3')
示例#6
0
def create_midi(pitches, durations, filename):
    offset = 0
    output_notes = []

    # create note and chord objects based on the values generated by the model
    for pitch, duration in zip(pitches, durations):
        # pattern is a chord
        if ('.' in pitch):
            notes_in_chord = pitch.split('.')
            notes = []
            for name in notes_in_chord:
                new_note = note.Note(name)
                # new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            new_chord.duration.type = duration
            output_notes.append(new_chord)
            offset += new_chord.duration.quarterLength
        # pattern is a rest
        elif ('rest' in pitch):
            new_rest = note.Rest()
            # new_rest.storedInstrument = instrument.Piano()
            new_rest.offset = offset
            new_rest.duration.type = duration
            output_notes.append(new_rest)
            offset += new_rest.duration.quarterLength
        # pattern is a note
        else:
            new_note = note.Note(pitch)
            # new_note.storedInstrument = instrument.Piano()
            new_note.offset = offset
            new_note.duration.type = duration
            output_notes.append(new_note)
            offset += new_note.duration.quarterLength

    midi_stream = stream.Stream(output_notes)
    midi_stream.write('midi', fp=filename)
示例#7
0
def create_midi(prediction_output, filename):
    """ convert the output from the prediction to notes and create a midi file
        from the notes """
    offset = 0
    output_notes = []

    # create note and chord objects based on the values generated by the model
    for item in prediction_output:
        # pattern = item[0]
        pattern = item

        # pattern is a chord
        if ('.' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        # pattern is a note
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # increase offset each iteration so that notes do not stack
        offset += 0.5

    midi_stream = stream.Stream(output_notes)
    # midi_stream.show('text')
    # uncomment to open midi in MUSESCORE
    # midi_stream.show()
    midi_stream.write('midi', fp='{}.mid'.format(filename))
    print("Midi file saved.")
示例#8
0
def gen_output_notes(prediction_output,off_set):
    offset=0 #Time 
    output_notes=[]
    for pattern in prediction_output:
        # if pattern is a chord 
        if('+' in pattern or pattern.isdigit()):
            notes_in_chord=pattern.split("+")
            temp_notes=[]
            for current_note in notes_in_chord:
                new_note=note.Note(int(current_note)) # create Note Object 
                new_note.storedInstrument=instrument.Piano()
                temp_notes.append(new_note)
            new_chord=chord.Chord(temp_notes) # create a chord object from list of notes 
            new_chord.offset=offset 
            output_notes.append(new_chord)
        #if pattern is a note 
        else:
            new_note=note.Note(pattern)
            new_note.offset=offset 
            new_note.storedInstrument=instrument.Piano()
            output_notes.append(new_note)
        offset+=off_set
    return output_notes
    def listNormalOrders(self):
        '''
        List the normal orders for all 12 transpositions

        >>> pList = [pitch.Pitch('C4'), pitch.Pitch('E4'), pitch.Pitch('G#4')]
        >>> tc = analysis.transposition.TranspositionChecker(pList)
        >>> tc.listNormalOrders()
        [[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11],
         [0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11],
         [0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]]
        '''

        if self.allTranspositions is None:
            self.getTranspositions()
        allTranspositions = self.allTranspositions
        allNormalOrders = []
        for thisTransposition in allTranspositions:
            # pass
            c = chord.Chord(thisTransposition)
            thisNormalOrder = c.normalOrder
            allNormalOrders.append(thisNormalOrder)
        self.allNormalOrders = allNormalOrders
        return allNormalOrders
示例#10
0
def drop2_test():
    tonic = 'C'
    sc = scale.MajorScale(tonic)
    root = sc.pitchFromDegree(1)
    tonic_triad = chord.Chord([root, root.transpose(7), root.transpose(16)])

    cycle = generate_cycle_pair(sc,
                                tonic_triad,
                                "3/6",
                                voicing_type=Voicing.Drop2_A_form)
    cycle.show()

    cycle = generate_cycle_pair(sc,
                                tonic_triad,
                                "4/5",
                                voicing_type=Voicing.Drop2_A_form)
    cycle.show()

    cycle = generate_cycle_pair(sc,
                                tonic_triad,
                                "2/7",
                                voicing_type=Voicing.Drop2_A_form)
    cycle.show()
示例#11
0
    def testOrnamentA(self):
        from music21 import expressions
        from music21 import chord
        s = stream.Stream()
        s.repeatAppend(note.Note(), 4)
        s.repeatAppend(chord.Chord(['c4', 'g5']), 4)

        #s.insert(4, expressions.Trill())
        s.notes[3].expressions.append(expressions.Trill())
        s.notes[2].expressions.append(expressions.Mordent())
        s.notes[1].expressions.append(expressions.InvertedMordent())

        s.notes[6].expressions.append(expressions.Trill())
        s.notes[7].expressions.append(expressions.Mordent())
        s.notes[5].expressions.append(expressions.InvertedMordent())

        raw = fromMusic21Object(s)
        #s.show()

        self.assertEqual(raw.count('<trill-mark'), 2)
        self.assertEqual(raw.count('<ornaments>'), 6)
        self.assertEqual(raw.count('<inverted-mordent/>'), 2)
        self.assertEqual(raw.count('<mordent/>'), 2)
示例#12
0
    def testNoteColorA(self):
        from music21 import chord
        n1 = note.Note()
        n2 = note.Note()
        n2.color = '#ff1111'
        n3 = note.Note()
        n3.color = '#1111ff'
        r1 = note.Rest()
        r1.color = '#11ff11'

        c1 = chord.Chord(['c2', 'd3', 'e4'])
        c1.color = '#ff0000'
        s = stream.Stream()
        s.append([n1, n2, n3, r1, c1])
        #s.show()

        raw = fromMusic21Object(s)
        # three color indications
        self.assertEqual(
            raw.count("color="),
            8)  #exports to notehead AND note, so increased from 6 to 8
        # color set at note level only for rest, so only 1
        self.assertEqual(raw.count('note color="#11ff11"'), 1)
示例#13
0
def ex1_revised(show=True, *arguments, **keywords):
    if 'op133' in keywords.keys():
        beethovenScore = keywords['op133']
    else:
        beethovenScore = corpus.parse('opus133.xml') # load a MusicXML file

    violin2 = beethovenScore[1]      # most programming languages start counting from 0, 
    #  so part 0 = violin 1, part 1 = violin 2, etc.
    display = stream.Stream() # an empty container for filling with found notes
    for thisMeasure in violin2.getElementsByClass('Measure'):
        notes = thisMeasure.findConsecutiveNotes(skipUnisons=True, 
                                                 skipChords=True,
                                                 skipOctaves=True, 
                                                 skipRests=True, 
                                                 noNone=True )
        pitches = [n.pitch for n in notes]
        for i in range(len(pitches) - 3):
            testChord = chord.Chord(pitches[i:i+4])
            testChord.duration.type = "whole"
            if testChord.isDominantSeventh() is True:
                # since a chord was found in this measure, 
                # append the found pitches in closed position
                testChord.lyric = "m. " + str(thisMeasure.number)
                emptyMeasure = stream.Measure()
                emptyMeasure.append(testChord.closedPosition())
                display.append(emptyMeasure)
    
                # append the whole measure as well, tagging the first note of the measure with an
                # ordered list of all the pitch classes used in the measure.
                pcGroup = [p.pitchClass for p in thisMeasure.pitches]
                firstNote = thisMeasure.getElementsByClass(note.Note)[0]
                firstNote.lyric = str(sorted(set(pcGroup)))
                thisMeasure.setRightBarline("double")
                display.append(thisMeasure)
    
    if show:
        display.show('musicxml')
def create_midi(prediction_output):
    """ convert the output from the prediction to notes and create a midi file
        from the notes """
    offset = 0
    output_notes = []

    # create note and chord objects based on the values generated by the model
    for pattern in prediction_output:
        # pattern is a chord
        if ('.' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        # pattern is a note
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # increase offset each iteration so that notes do not stack
        offset += 0.5

    midi_stream = stream.Stream(output_notes)

    midi_stream.write('midi', fp=homeDir + 'test_output.mid')

    # create sheetmusic
    sheetmusic = stream.Stream(output_notes)
    sheetmusic.show()
    print("20_generate.py execute succesfully")
def create_midi(prediction_output):
    offset = 0
    output_notes = []

    print(prediction_output)

    # create note and chord objects based on the values generated by the model
    for pattern in prediction_output:
        # pattern is a chord
        if ('.' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        elif pattern == 'rest':
            new_note = note.Rest()
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)
        # pattern is a note
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # increase offset each iteration so that notes do not stack
        offset += 0.25

    midi_stream = stream.Stream(output_notes)

    midi_stream.write('midi', fp='test_output_random.mid')
示例#16
0
def create_music_objects(model_output):
    song_offset = 0
    music_objects = []  # a list of music objects; rest, chord or note

    # split the output of the model on spaces and remove spaces
    music_list = model_output.split(' ')
    music_list = [a for a in music_list if a != '' and a != ' ']

    # create note and chord objects based on the values generated by the model
    for music_element in music_list:
        # music element is a rest
        if 'rest' in music_element:
            new_rest = note.Rest()
            new_rest.offset = song_offset
            music_objects.append(new_rest)
        # music element is a chord
        elif '|' in music_element:
            notes_in_chord = music_element.split('|')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(current_note)
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = song_offset
            music_objects.append(new_chord)
        # music element is a note
        else:
            new_note = note.Note(music_element)
            new_note.offset = song_offset
            new_note.storedInstrument = instrument.Piano()
            music_objects.append(new_note)

        # increase offset each iteration so that notes do not stack
        song_offset += 0.5

    return music_objects
def create_midipart_with_durations(prediction_output,
                                   target_instrument=instrument.Piano()):
    offset = 0
    output_notes = []

    for i in range(len(prediction_output)):
        pattern = prediction_output[i]
        splitpattern = pattern.split(";")
        pattern = splitpattern[0]

        duration = get_number_from_duration(splitpattern[1])
        if ('chord' in pattern):
            notes = []
            pattern = get_notes_from_chord(pattern)
            patternpitches = pattern.split(',')
            for current_note in patternpitches:
                new_note = note.Note(current_note)
                new_note.storedInstrument = target_instrument
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        elif ('rest' in pattern):
            new_rest = note.Rest(pattern)
            new_rest.offset = offset
            new_rest.storedInstrument = target_instrument
            output_notes.append(new_rest)
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = target_instrument
            output_notes.append(new_note)
        offset += convert_to_float(duration)

    midipart = stream.Part(output_notes)

    return midipart
示例#18
0
    def test_Ch6_basic_II_A(self, *arguments, **keywords):
        '''p. 55
        Write the specified melodic interval above the given note.
        '''
        # combines 1 and 2

        pitches1 = ['e-4', 'f#4', 'a-4', 'd4', 'f4']
        intervals1 = ['M6', 'p5', 'M3', 'M7', 'P4']

        pitches2 = ['c#4', 'f3', 'a3', 'b-3', 'e-3']
        intervals2 = ['-M6', '-p5', '-M7', '-M2', '-P4']

        pitches = pitches1 + pitches2
        intervals = intervals1 + intervals2

        s = stream.Stream()
        for i, p in enumerate(pitches):
            if i == 0:
                s.append(clef.TrebleClef())
            if i == len(pitches1):
                s.append(clef.BassClef())

            p = pitch.Pitch(p)  # convert string to obj
            iObj = interval.Interval(intervals[i])
            c = chord.Chord([p, p.transpose(iObj)], type='whole')
            s.append(c)

        if 'show' in keywords.keys() and keywords['show']:
            s.show()

        match = []
        for c in s.getElementsByClass('Chord'):
            # append second pitch as a string
            match.append(str(c.pitches[1]))
        self.assertEqual(match, [
            'C5', 'C#5', 'C5', 'C#5', 'B-4', 'E3', 'B-2', 'B-2', 'A-3', 'B-2'
        ])
示例#19
0
    def testTrillExtensionA(self):
        '''Test basic wave line creation and output, as well as passing
        objects through make measure calls. 
        '''
        from music21 import stream, note, chord, expressions
        from music21.musicxml import m21ToXml
        s = stream.Stream()
        s.repeatAppend(note.Note(), 12)
        n1 = s.notes[0]
        n2 = s.notes[-1]
        sp1 = expressions.TrillExtension(n1, n2)
        s.append(sp1)
        raw = m21ToXml.GeneralObjectExporter().parse(s)
        self.assertEqual(raw.count(b'wavy-line'), 2)

        s = stream.Stream()
        s.repeatAppend(chord.Chord(['c-3', 'g4']), 12)
        n1 = s.notes[0]
        n2 = s.notes[-1]
        sp1 = expressions.TrillExtension(n1, n2)
        s.append(sp1)
        raw = m21ToXml.GeneralObjectExporter().parse(s)
        #s.show()
        self.assertEqual(raw.count(b'wavy-line'), 2)
示例#20
0
def getElement(dictionary):
    el = dictionary['element']
    ch = chord.Chord()
    sil = note.Rest()
    nota = note.Note()
    complexDuration = duration.Duration()
    complexDuration.quarterLength = 2.25
    if type(el) == type(ch):
        ordered_chord = el.sortAscending()
        el = ordered_chord[-1]
        if el.duration.type == complexDuration.type:
            print("\n---split-chord---\n")
            return [duration for duration in el.splitAtDurations()]
        return [el]
    if type(el) == type(nota):
        if el.duration.type == complexDuration.type:
            print("\n---split-note---\n")
            return [duration for duration in el.splitAtDurations()]
        return [el]
    if type(el) == type(sil):
        if el.duration.type == complexDuration.type:
            print("\n---split-rest---\n")
            return [duration for duration in el.splitAtDurations()]
        return [el]
示例#21
0
文件: predict.py 项目: jsr66/DeepRiff
def create_pdf(prediction_output, pdf_filepath):
    """ convert the output from the prediction to notes and create a midi file
        from the notes """
    print("IN CREATE_PDF")
    offset = 0
    output_notes = []

    # create note and chord objects based on the values generated by the model
    for pattern in prediction_output:
        # pattern is a chord
        if ('.' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        # pattern is a note
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # increase offset each iteration so that notes do not stack
        offset += 0.5

    midi_stream = stream.Stream(output_notes)

    try:
        midi_stream.write('lily.pdf', fp=pdf_filepath)
    except:
        print("Lilypond failed to print pdf.")
def create_midi_with_durations(prediction_output,
                               output_durations,
                               target_instrument=instrument.Piano(),
                               filename='test_output.mid'):
    offset = 0
    output_notes = []

    for i in range(len(prediction_output)):
        pattern = prediction_output[i]
        duration = get_number_from_duration(output_durations[i])
        if ('chord' in pattern):
            notes = []
            pattern = get_notes_from_chord(pattern)
            patternpitches = pattern.split(',')
            for current_note in patternpitches:
                new_note = note.Note(current_note)
                new_note.storedInstrument = target_instrument
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        elif ('rest' in pattern):
            new_rest = note.Rest(pattern)
            new_rest.offset = offset
            new_rest.storedInstrument = target_instrument
            output_notes.append(new_rest)
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = target_instrument
            output_notes.append(new_note)
        offset += convert_to_float(duration)

    midi_stream = stream.Stream(output_notes)

    midi_stream.write('midi', fp=filename)
def create_midi(prediction_output):
    """ convert the output from the prediction to notes and create a midi file
        from the notes """
    offset = 0
    output_notes = []

    # create note and chord objects based on the values generated by the model
    for pattern in prediction_output:
        # pattern is a chord
        if ('.' in pattern[0]) or pattern[0].isdigit():
            notes_in_chord = pattern[0].split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.duration.quarterLength = pattern[1]
            new_chord.offset = offset
            output_notes.append(new_chord)
        # pattern is a note
        else:
            pitch = pattern[0]
            duration = pattern[1]
            new_note = note.Note(pitch)
            new_note.duration.quarterLength = float(duration)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # increase offset each iteration so that notes do not stack
        offset += 0.5

    midi_stream = stream.Stream(output_notes)

    midi_stream.write('midi', fp='test_output.mid')
示例#24
0
def create_midi(prediction_output):
    print('Convert the output from the prediction to a MIDI file',
          sep=' ',
          end='\n',
          file=sys.stdout,
          flush=False)
    offset = 0
    output_notes = []
    #create note and chord objects based on the values generated by the model
    for pattern in prediction_output:
        #pattern is chords
        print(pattern, sep=' ', end='\n\n', file=sys.stdout, flush=False)
        patt = str(pattern)
        if ('.' in patt) or patt.isdigit():
            notes_in_chord = patt.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)

            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        #pattern is a note
        else:
            new_note = note.Note(pitchName=pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        #increase offset each iteration so that notes do not stack
        offset += 0.5

    midi_stream = stream.Stream(output_notes)
    midi_stream.write('midi', fp='test_output.mid')
示例#25
0
def createMidi(prediction_output):
    """ convert the output from the prediction to notes and create a midi file
        from the notes """
    offset = 0
    output_notes = []

    # create note and chord objects based on the values generated by the model
    for pattern in prediction_output:

        # pattern is a chord
        if ('.' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)

        # pattern is a note
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # increase offset each iteration so that notes do not stack
        offset += 0.5

    midi_stream = stream.Stream(output_notes)
    song_name = str(time.time()) + '.mid'
    midi_stream.write('midi', fp='currentSongs/' + song_name)

    return 'currentSongs/' + song_name
示例#26
0
def create_midi(prediction_output):
    offset = 0
    output_notes = []
    for pattern in prediction_output:
        if ('.' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)
        offset += 0.5

    midi_stream = stream.Stream(output_notes)

    midi_stream.write('midi', fp='test_output.mid')
def create_midi(note_list, path):
    """ convert the output from the prediction to notes and create a midi file from the notes """
    offset = 0
    output_notes = []

    # create note and chord objects based on the values generated by the model
    for pattern in note_list:
        # pattern is a Rest
        if pattern == 'Rest':
            new_note = note.Rest()
            new_note.offset = offset
            output_notes.append(new_note)
        # pattern is a chord
        elif ('.' in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split('.')
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        # pattern is a note
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # increase offset each iteration so that notes do not stack
        offset += 0.5

    midi_stream = stream.Stream(output_notes)

    midi_stream.write('midi', fp=path)
示例#28
0
def convert_notes_to_midi(prediction_output, filename):
    """
    convert the output from the prediction to notes and create a midi file
    from the notes
    """
    offset = 0
    output_notes = list()

    # create note and chord objects based on the values generated by the model
    for pattern in prediction_output:
        # pattern is a chord
        if ("." in pattern) or pattern.isdigit():
            notes_in_chord = pattern.split(".")
            notes = []
            for current_note in notes_in_chord:
                new_note = note.Note(int(current_note))
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            output_notes.append(new_chord)
        elif "rest" in pattern:
            new_rest = note.Rest(pattern)
            new_rest.offset = offset
            new_rest.storedInstrument = instrument.Piano()
            output_notes.append(new_rest)
        else:
            new_note = note.Note(pattern)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)

        # increase offset so notes do not stack
        offset += 0.5
    midi_stream = stream.Stream(output_notes)
    midi_stream.write("midi", filename)
示例#29
0
        def create_midi(prediction_output):
            """ convert the output from the prediction to notes and create a midi file
                from the notes """
            offset = 0
            output_notes = []

            print('Generating midi...')
            # create note and chord objects based on the values generated by the model
            for pattern in prediction_output:
                # pattern is a chord
                if ('.' in pattern) or pattern.isdigit():
                    notes_in_chord = pattern.split('.')
                    notes = []
                    for current_note in notes_in_chord:
                        new_note = note.Note(int(current_note))
                        new_note.storedInstrument = instrument.Piano()
                        notes.append(new_note)
                    new_chord = chord.Chord(notes)
                    new_chord.offset = offset
                    output_notes.append(new_chord)
                # pattern is a note
                else:
                    new_note = note.Note(pattern)
                    new_note.offset = offset
                    new_note.storedInstrument = instrument.Piano()
                    output_notes.append(new_note)

                # increase offset each iteration so that notes do not stack
                offset += 0.5

            print(output_notes)
            midi_stream = stream.Stream(output_notes)

            print('Writing midi file to disk...')
            midi_stream.write('midi', fp='assets/test_output.mid')
            print('Done.')
示例#30
0
    def get_harmonic_reduction(self):
        reduction = []

        temp_midi = stream.Score()
        temp_midi.insert(0, self.chord_progression)
        max_notes_per_chord = 4

        for measure in temp_midi.measures(0, None):  # None = get all measures.
            if type(measure) != stream.Measure:
                continue

            # count all notes length in each measure,
            count_dict = note_count(measure)
            sorted_items = sorted(count_dict.items(), key=lambda x: x[1])
            sorted_notes = [
                item[0] for item in sorted_items[-max_notes_per_chord:]
            ]
            measure_chord = chord.Chord(sorted_notes)

            reduction.append(measure_chord)

        self.harmony = reduction

        return reduction