예제 #1
0
    def testExportMetronomeMarksA(self):
        from music21 import tempo
        p = stream.Part()
        p.repeatAppend(note.Note('g#3'), 8)
        # default quarter assumed
        p.insert(0, tempo.MetronomeMark(number=121.6))

        raw = fromMusic21Object(p)
        match1 = '<beat-unit>quarter</beat-unit>'
        match2 = '<per-minute>121.6</per-minute>'
        self.assertEqual(raw.find(match1) > 0, True)
        self.assertEqual(raw.find(match2) > 0, True)
예제 #2
0
파일: examples.py 프로젝트: qnoboe/music21
def happyBirthday():
    '''
    fully copyright free!
    '''
    hb = cp("tinynotation: 3/4 d8. d16 e4 d g f#2 d8. d16 e4 d a g2 d8. " +
            "d16 d'4 b g8. g16 f#4 e c'8. c'16 b4 g a g2")
    hb.insert(0, key.KeySignature(1))
    hb.insert(0, tempo.TempoText("Brightly"))
    hb.insert(
        0, tempo.MetronomeMark(number=120, referent=note.Note(type='quarter')))
    hb.makeNotation(inPlace=True, cautionaryNotImmediateRepeat=False)
    return hb
예제 #3
0
파일: mus21.py 프로젝트: rgxb2807/pygliss
def comp_stream(comp, bpm=60, length=0.25, all=False):
    """."""
    parts = [stream.Part() for i in range(len(comp.chords[0].notes))]
    parts.append(tempo.MetronomeMark(number=bpm))

    chords = None
    if all:
        chords = comp.all_chords
    else:
        chords = comp.chords

    s = chord_stream(chords, bpm=bpm, length=length)
    return s
예제 #4
0
    def testExportMetronomeMarksD(self):
        from music21 import tempo
        p = stream.Part()
        p.repeatAppend(note.Note('g#3'), 8)
        # default quarter assumed
        p.insert(0, tempo.MetronomeMark('super fast', number=222.2))

        # TODO: order of attributes is not assured; allow for any order.
        match1 = '<words default-y="45.0" font-weight="bold" justify="left">super fast</words>'
        match2 = '<per-minute>222.2</per-minute>'
        raw = fromMusic21Object(p)
        self.assertEqual(raw.find(match1) > 0, True)
        self.assertEqual(raw.find(match2) > 0, True)

        p = stream.Part()
        p.repeatAppend(note.Note('g#3'), 8)
        # text does not show when implicit
        p.insert(0, tempo.MetronomeMark(number=132))
        # TODO: order of attributes is not assured; allow for any order.
        match1 = '<words default-y="45.0" font-weight="bold" justify="left">fast</words>'
        match2 = '<per-minute>132</per-minute>'
        raw = fromMusic21Object(p)
        self.assertEqual(raw.find(match1) > 0, False)
        self.assertEqual(raw.find(match2) > 0, True)

        p = stream.Part()
        p.repeatAppend(note.Note('g#3'), 8)
        mm = tempo.MetronomeMark('very slowly')
        self.assertEqual(mm.number, None)
        p.insert(0, mm)
        # text but no number
        # TODO: order of attributes is not assured; allow for any order.
        match1 = '<words default-y="45.0" font-weight="bold" justify="left">very slowly</words>'
        match2 = '<per-minute>'

        raw = fromMusic21Object(p)
        self.assertEqual(raw.find(match1) > 0, True)
        self.assertEqual(raw.find(match2) > 0, False)
예제 #5
0
def addPart(minLength=80, maxProbability=0.7, instrument=None):
    s1 = rhythmLine(minLength=minLength, maxProbability=maxProbability)
    ts1 = meter.TimeSignature("4/4")
    s1.insert(0, ts1)
    s1.insert(0, tempo.MetronomeMark(number=180, value="very fast"))
    if instrument is not None:
        s1.insert(0, instrument)
    s1.makeAccidentals()
    s1.makeMeasures(inPlace=True)
    for n in s1.flat.notesAndRests:
        if n.tie is not None and n.tie.type != 'start':
            n.__class__ = note.Rest

    return s1
예제 #6
0
    def testOutOfBoundsExpressionDoesNotCreateForward(self):
        '''
        A metronome mark at an offset exceeding the bar duration was causing
        <forward> tags, i.e. hidden rests. Prefer <offset> instead.
        '''
        m = stream.Measure()
        m.append(meter.TimeSignature('1/4'))
        m.append(note.Rest())
        m.insert(2, tempo.MetronomeMark('slow', 40))

        gex = GeneralObjectExporter()
        tree = self.getET(gex.fromGeneralObject(m))
        self.assertFalse(tree.findall('.//forward'))
        self.assertEqual(int(tree.findall('.//direction/offset')[0].text),
                         defaults.divisionsPerQuarter)
예제 #7
0
파일: MuseGAN.py 프로젝트: jienn/MuseGan
    def notes_to_midi(self, run_folder, output, filename=None):

        for score_num in range(len(output)):

            max_pitches = self.binarise_output(output)

            midi_note_score = max_pitches[score_num].reshape(
                [self.n_bars * self.n_steps_per_bar, self.n_tracks])
            parts = stream.Score()
            parts.append(tempo.MetronomeMark(number=66))
            '''self.notes_to_score_0(midi_note_score, parts, max_pitches, 0)
            self.notes_to_score_1(midi_note_score, parts, max_pitches)'''

            for i in range(self.n_tracks):
                last_x = int(midi_note_score[:, i][0])
                s = stream.Part()
                dur = 0

                for idx, x in enumerate(midi_note_score[:, i]):
                    x = int(x)

                    if (x != last_x or idx % 4 == 0) and idx > 0:
                        n = note.Note(last_x)
                        n.duration = duration.Duration(dur)
                        s.append(instrument.Trumpet())
                        s.append(n)
                        dur = 0

                    last_x = x
                    dur = dur + 0.25

                n = note.Note(last_x)
                n.duration = duration.Duration(dur)
                s.append(instrument.Trumpet())
                s.append(n)

                parts.append(s)

            if filename is None:
                parts.write('midi',
                            fp=os.path.join(
                                run_folder, "samples/sample_{}_{}.midi".format(
                                    self.epoch, score_num)))
            else:
                parts.write(
                    'midi',
                    fp=os.path.join(run_folder,
                                    "samples/{}.midi".format(filename)))
예제 #8
0
def convert_to_midi(sequence, bpm=60, output_file='./midi_output/music.mid'):
    """Save sequence as a midi file (with path = output_file). sequence
    can be from the original dataset or a new sequence generated by a
    trained model"""
    offset = 0  # the distance in quarter-notes of the note/chord/rest
    # being written from the beginning
    output_notes = [instrument.Piano(), tempo.MetronomeMark(number=bpm)]

    bps = bpm / 60  # beats per second
    converted_duration = duration.Duration()
    # create note, chord, and rest objects
    for vector in sequence:
        # convert from seconds to beats
        converted_duration.quarterLength = vector[-1] * bps

        if (np.sum(vector[:-1]) > 1):  # chord
            indices_in_chord = np.argsort(vector[:-1])[-int(np.sum(\
                vector[:-1])):]
            notes_in_chord = [piano_idx_to_note(i) for i in indices_in_chord]
            notes = []
            for cur_note in notes_in_chord:
                new_note = note.Note(cur_note)
                new_note.storedInstrument = instrument.Piano()
                notes.append(new_note)
            new_chord = chord.Chord(notes)
            new_chord.offset = offset
            new_chord.duration = converted_duration
            output_notes.append(new_chord)

        elif (np.sum(vector[:-1]) == 1):  # note
            index = np.argmax(vector[:-1])
            new_note = piano_idx_to_note(index)
            new_note = note.Note(new_note)
            new_note.offset = offset
            new_note.storedInstrument = instrument.Piano()
            new_note.duration = converted_duration
            output_notes.append(new_note)

        elif (np.sum(vector[:-1]) == 0):  # rest
            new_rest = note.Rest()
            new_rest.offset = offset
            new_rest.duration = converted_duration
            output_notes.append(new_rest)

        offset += vector[-1]

    midi_stream = stream.Stream(output_notes)
    midi_stream.write('midi', fp=output_file)
예제 #9
0
 def testExportMetronomeMarksB(self):
     from music21 import tempo
     from music21 import duration
     p = stream.Part()
     p.repeatAppend(note.Note('g#3'), 8)
     # default quarter assumed
     p.insert(0, tempo.MetronomeMark(number=222.2,
             referent=duration.Duration(quarterLength=.75)))
     #p.show()
     raw = fromMusic21Object(p)
     match1 = '<beat-unit>eighth</beat-unit>'
     match2 = '<beat-unit-dot/>'
     match3 = '<per-minute>222.2</per-minute>'
     self.assertEqual(raw.find(match1) > 0, True)
     self.assertEqual(raw.find(match2) > 0, True)
     self.assertEqual(raw.find(match3) > 0, True)
예제 #10
0
def addPart(minLength=80, maxProbability=0.7, instrument=None):
    s1 = rhythmLine(minLength=minLength, maxProbability=maxProbability)
    ts1 = meter.TimeSignature("4/4")
    s1.insert(0, ts1)
    s1.insert(0, tempo.MetronomeMark(number=180, text="very fast"))
    if instrument is not None:
        s1.insert(0, instrument)
    s1.makeAccidentals()
    s1.makeMeasures(inPlace=True)
    sf = s1.flat.notesAndRests
    for n in sf:
        if n.tie is not None and n.tie.type != 'start':
            r = note.Rest()
            r.quarterLength = n.quarterLength
            s1.replace(n, r, allDerived=True)

    return s1
예제 #11
0
    def generate(self, number_of_notes_per_sample, midi_file_path,
                 wav_file_path, use_polyphonic):
        """
        Generates as much MIDI-Files as given by numberOfSamples. From these Files WAV-Files are synthesized and
        the Paths to both MIDI and WAV-Files are stored in a CSV File which is also saved into destinationFolder.
        
        
        Args:
            number_of_notes_per_sample: int - Number of Notes generated for every Sample
            midi_file_path: str - Path where to save MIDI-Files into
            wav_file_path: str - Path where to save WAV-Files into
            use_polyphonic: bool - Switch for Polyphonic Samples or Monophonic
        """
        # Initializing Stream
        s = stream.Measure()
        s.append(tempo.MetronomeMark(number=self.tempo))

        for i in range(0, number_of_notes_per_sample):
            if random.uniform(0, 1) > self.pauseRatio:
                if use_polyphonic and random.uniform(0, 1) > self.chordRatio:
                    # Add Chord
                    notes = []
                    # Creates a chord with either 2, 3 or 4 Notes
                    while len(notes) < random.randrange(2, 4, 1):
                        note_string, note_length = self.__get_note_params()
                        new_note = note.Note(note_string, type=note_length)
                        if new_note not in notes:
                            notes.append(new_note)
                    s.append(chord.Chord(notes))
                else:
                    # Add single Note
                    note_string, note_length = self.__get_note_params()
                    s.append(note.Note(note_string, type=note_length))
            else:
                # Add Pause
                note_string, note_length = self.__get_note_params()
                s.append(note.Rest(note_string, type=note_length))

        # Write a MIDI-File from Stream
        if self.debug:
            print("MIDI: " + midi_file_path)
            print("WAV: " + wav_file_path)
        s.write('midi', fp=midi_file_path)

        # Synthesize WAV-File from MIDI-File
        self.wav_generator.midi_to_wav(wav_file_path, midi_file_path)
예제 #12
0
    def testExportMetronomeMarksE(self):
        '''
        Test writing of sound tags
        '''
        from music21 import meter
        from music21 import tempo
        p = stream.Part()
        p.repeatAppend(note.Note('g#3'), 8)
        # default quarter assumed
        p.insert(0, tempo.MetronomeMark('super slow', number=30.2))

        raw = fromMusic21Object(p)

        match1 = '<sound tempo="30.2"/>'
        self.assertEqual(raw.find(match1) > 0, True)
        #p.show()


        p = stream.Part()
        p.repeatAppend(note.Note('g#3'), 14)
        # default quarter assumed
        p.insert(meter.TimeSignature('2/4'))
        p.insert(0, tempo.MetronomeMark(number=30))
        p.insert(2, tempo.MetronomeMark(number=60))
        p.insert(4, tempo.MetronomeMark(number=120))
        p.insert(6, tempo.MetronomeMark(number=240))
        p.insert(8, tempo.MetronomeMark(number=240, referent=.75))
        p.insert(10, tempo.MetronomeMark(number=240, referent=.5))
        p.insert(12, tempo.MetronomeMark(number=240, referent=.25))
        #p.show()

        raw = fromMusic21Object(p)
        match1 = '<sound tempo="30.0"/>'
        self.assertEqual(raw.find(match1) > 0, True)
        match2 = '<sound tempo="60.0"/>'
        self.assertEqual(raw.find(match2) > 0, True)
        match3 = '<sound tempo="120.0"/>'
        self.assertEqual(raw.find(match3) > 0, True)
        match4 = '<sound tempo="240.0"/>'
        self.assertEqual(raw.find(match4) > 0, True)
        # from the dotted value
        match5 = '<sound tempo="180.0"/>'
        self.assertEqual(raw.find(match5) > 0, True)
예제 #13
0
    def testTextExpressionOffset(self):
        '''Transfer element offset after calling getTextExpression().'''
        # https://github.com/cuthbertLab/music21/issues/624
        s = converter.parse('tinynotation: 4/4 c1')
        c = repeat.Coda()
        c.useSymbol = False
        f = repeat.Fine()
        mm = tempo.MetronomeMark(text='Langsam')
        mm.placement = 'above'
        s.measure(1).storeAtEnd([c, f, mm])

        tree = self.getET(s)
        for direction in tree.findall('.//direction'):
            self.assertIsNone(direction.find('offset'))

        # Also check position
        mxDirection = tree.find('part/measure/direction')
        self.assertEqual(mxDirection.get('placement'), 'above')
예제 #14
0
def create_midi(notes, tempo_time, file_name):
    """Given an array of notes, a tempo to use, and a file name, create a midi music track"""
    output_notes = []
    output_notes.append(tempo.MetronomeMark(number=tempo_time))
    int_to_note = get_note_dic(True)
    offset = 0.0
    for id in notes:
        if int_to_note.get(id) != "|":
            length, is_rest = int_to_note.get(id)
            new_note = create_note(float(length), is_rest)
            new_note.offset = offset
            output_notes.append(new_note)

            # increase offset each iteration so that notes do not stack
            offset += new_note.duration.quarterLength

    #print(offset)
    midi_stream = stream.Stream(output_notes)
    midi_stream.write('midi', fp='{}.mid'.format(file_name))
예제 #15
0
def getMeasuresList(scores_json):
    scores = scores_json
    allMeasures = []
    lastMetronomeMark = tempo.MetronomeMark()
    lastKeysignature = key.KeySignature()
    lastTimeSignature = meter.TimeSignature()
    lastClef = clef.Clef()
    for i, strm in enumerate(scores):
        path = strm['path']
        strmScore = strm['score']
        for p in strmScore.getElementsByClass(stream.Part):
            for m in p.getElementsByClass(stream.Measure):
                isActive = False
                for r in m.recurse():
                    if type(r) == clef.TrebleClef:
                        lastClef = r
                    if type(r) == clef.BassClef:
                        lastClef = r
                    if type(r) == note.Note:
                        isActive = True
                    if type(r) == tempo.MetronomeMark:
                        lastMetronomeMark = r
                    if type(r) == key.KeySignature:
                        lastKeysignature = r
                    if type(r) == meter.TimeSignature:
                        lastTimeSignature = r
                try:
                    m.insert(0, lastTimeSignature)
                    m.insert(0, lastMetronomeMark)
                    m.insert(0, lastKeysignature)
                    m.insert(0, lastClef)
                except:
                    True
                if isActive:
                    obj = {
                        "path": path,
                        "part": p.partName,
                        "number": m.number,
                        "measure": m
                    }
                    allMeasures.append(obj)
    return allMeasures
예제 #16
0
def reMIDIfy(minisong, output):
    # empty stream
    s1 = stream.Stream()
    # assign the tempo based on what was read
    t = tempo.MetronomeMark('fast', song_tempo, note.Note(type='quarter'))
    s1.append(t)
    channels = minisong.shape[2]
    # fill in the stream with notes according to the minisong values
    # by channel
    for curr_channel in range(channels):
        inst = instrument.fromString(instrument_list[curr_channel])
        new_part = stream.Part()
        new_part.insert(0, inst)
        # by beat
        for beat in range(BEATS_PER_MINISONG):
            notes = []
            # by pitch in a beat
            for curr_pitch in range(NOTE_RANGE):
                #if this pitch is produced with at least 10% likelihood then count it
                if minisong[beat][curr_pitch][curr_channel] > VOLUME_CUTOFF:
                    p = pitch.Pitch()
                    p.midi = curr_pitch + LOWEST_PITCH
                    n = note.Note(pitch=p)
                    n.pitch = p
                    n.volume.velocity = minisong[beat][curr_pitch][
                        curr_channel] * MAX_VOL
                    n.quarterLength = LENGTH_PER_BEAT
                    notes.append(n)
            if notes:
                my_chord = chord.Chord(notes)
            else:
                my_chord = note.Rest()
                my_chord.quarterLength = LENGTH_PER_BEAT

            new_part.append(my_chord)
        s1.insert(curr_channel, new_part)

    mf = midi.translate.streamToMidiFile(s1)
    mf.open(output + ".mid", 'wb')
    mf.write()
    mf.close()
예제 #17
0
	def export(self): 
		"""Export the transcribed music."""
		if ( self.saveFileStr.get() not in self.saveDefault ):
			# Set the tempo
			tempoObject = tempo.MetronomeMark( None, 
												int(self.tempo.get()), 
												note.QuarterNote() )
			self.transcribedPart.insert(tempoObject)
			
			# Write to disk
			success = self.transcribedPart.write(fp=self.saveFile)
			if ( success ):
				saveMsg = "Your file has been saved to %s." % success
				tkMessageBox.showinfo("File saved!", saveMsg )
		elif ( self.saveFileStr.get() == "" ):
			self.saveFileStr.set(self.saveDefault)		
			pass
		else:
			# Don't have a save location... should get that
			self.getSavePath()
			self.export()
def notes_to_midi(n_bars, n_steps_per_bar, n_tracks, epoch, output_folder,
                  output):

    for score_num in range(len(output)):

        max_pitches = argmax_output(output)

        midi_note_score = max_pitches[score_num].reshape(
            [n_bars * n_steps_per_bar, n_tracks])
        parts = stream.Score()
        parts.append(tempo.MetronomeMark(number=66))

        for i in range(n_tracks):
            last_x = int(midi_note_score[:, i][0])
            s = stream.Part()
            dur = 0

            for idx, x in enumerate(midi_note_score[:, i]):
                x = int(x)

                if (x != last_x or idx % 4 == 0) and idx > 0:
                    n = note.Note(last_x)
                    n.duration = duration.Duration(dur)
                    s.append(n)
                    dur = 0

                last_x = x
                dur = dur + 0.25

            n = note.Note(last_x)
            n.duration = duration.Duration(dur)
            s.append(n)

            parts.append(s)

        parts.write('midi',
                    fp=os.path.join(
                        output_folder,
                        "sample_{}_{}.midi".format(epoch, score_num)))
예제 #19
0
    def testMetricModulationA(self):
        from music21 import tempo
        s = stream.Stream()
        m1 = stream.Measure()
        m1.repeatAppend(note.Note(quarterLength=1), 4)
        mm1 = tempo.MetronomeMark(number=60.0)
        m1.insert(0, mm1)

        m2 = stream.Measure()
        m2.repeatAppend(note.Note(quarterLength=1), 4)
        # tempo.MetronomeMark(number=120.0)
        mmod1 = tempo.MetricModulation()
        # assign with an equivalent statement of the eight
        mmod1.oldMetronome = mm1.getEquivalentByReferent(.5)
        # set the other side of eq based on the desired  referent
        mmod1.setOtherByReferent(referent='quarter')
        m2.insert(0, mmod1)

        m3 = stream.Measure()
        m3.repeatAppend(note.Note(quarterLength=1), 4)
        mmod2 = tempo.MetricModulation()
        mmod2.oldMetronome = mmod1.newMetronome.getEquivalentByReferent(1.5)
        # set the other side of eq based on the desired  referent
        mmod2.setOtherByReferent(referent=1)
        m3.insert(0, mmod2)

        s.append([m1, m2, m3])
        raw = fromMusic21Object(s)

        match = '<sound tempo="60.0"/>'
        self.assertEqual(raw.find(match) > 0, True)
        match = '<per-minute>60.0</per-minute>'
        self.assertEqual(raw.find(match) > 0, True)
        match = '<sound tempo="120.0"/>'
        self.assertEqual(raw.find(match) > 0, True)
        match = '<sound tempo="80.0"/>'
        self.assertEqual(raw.find(match) > 0, True)
예제 #20
0
    def notes_to_midi(self, run_folder, score, filename):

        for score_num in range(len(score)):

            max_pitches = np.argmax(score, axis=3)

            midi_note_score = max_pitches[score_num].reshape(
                [self.n_bars * self.n_steps_per_bar, self.n_tracks])
            parts = stream.Score()
            parts.append(tempo.MetronomeMark(number=66))

            for i in range(self.n_tracks):
                last_x = int(midi_note_score[:, i][0])
                s = stream.Part()
                dur = 0

                for idx, x in enumerate(midi_note_score[:, i]):
                    x = int(x)

                    if (x != last_x or idx % 4 == 0) and idx > 0:
                        n = note.Note(last_x)
                        n.duration = duration.Duration(dur)
                        s.append(n)
                        dur = 0

                    last_x = x
                    dur = dur + 0.25

                n = note.Note(last_x)
                n.duration = duration.Duration(dur)
                s.append(n)

                parts.append(s)
                parts.write(
                    'midi',
                    fp=os.path.join(run_folder,
                                    "samples/{}.midi".format(filename)))
예제 #21
0
def save_song(song, name):
    """ Convert list of ints to notes and save as a midi file """
    offset = 0
    prev_offset = 0
    output_notes = [tempo.MetronomeMark('allegro')]
    quarters = 3  # Quarters should be first element in the song, so it shouldn't have any effect
    # create note and chord objects based on the values generated by the model
    for e_pitch, e_offset, e_length in song:
        if e_offset < prev_offset:
            offset += quarters
        if e_pitch[:2] == 'TS':
            quarters = float(e_pitch[2:])
            new_note = meter.TimeSignature(str(int(float(e_pitch[2:]))) + '/4')
        elif float(e_length) != 0.0:
            new_note = note.Note(e_pitch)
            new_note.offset = offset + e_offset
            new_note.quarterLength = e_length
            new_note.storedInstrument = instrument.Piano()
            output_notes.append(new_note)
        prev_offset = e_offset

    midi_stream = stream.Stream(output_notes)

    midi_stream.write('midi', fp='%s.mid' % name)
예제 #22
0
def parse_data(note_stream, value_id):
    composition_file = open('../user_data/composition' + value_id + '.csv',
                            mode='r')
    composition_reader = csv.DictReader(composition_file)

    for row in composition_reader:
        composition_meter = row['beats_per_bar'] + '/' + row['base_beat']
        switch_enharmonic = row['enharmonic']
        tempo_value = row['tempo']
    note_stream.timeSignature = meter.TimeSignature(composition_meter)
    metronome_mark = tempo.MetronomeMark(number=int(tempo_value))
    use_clef = clef.TrebleClef()
    note_stream.append([metronome_mark, use_clef])

    composition_file.close()

    note_file = open('../user_data/notes' + value_id + '.csv', mode='r')
    note_reader = csv.DictReader(note_file)

    for row in note_reader:
        note_pitch_octave = row['pitch']
        note_duration = row['duration']
        if note_pitch_octave == 'rest':
            note_entry = note.Rest(quarterLength=float(note_duration))
        else:
            pitch_value = pitch.Pitch(note_pitch_octave)
            if switch_enharmonic == 'flat':
                if "#" in pitch_value.name:
                    pitch_value = pitch_value.getHigherEnharmonic()
            note_entry = note.Note(pitch_value,
                                   quarterLength=float(note_duration))
        note_stream.append(note_entry)

    note_file.close()

    return note_stream
예제 #23
0
    def notes_to_stream(self, score):
        scoreCompressed = self.TrimScore(score, 8)
        scoreCompressed = scoreCompressed[:, :, 0:95:
                                          6, :, :]  # ( batch, 4, 16, 84, 1)
        scoreCompressed = scoreCompressed > 0.5  # 이진화 처리

        # 피치번호 37~60, 12~37으로 나눈다.
        track1 = scoreCompressed[:, :, :,
                                 37:83, :]  # (4, 16, 23, 1) 낮은 음자리표 트랙
        track2 = scoreCompressed[:, :, :, 0:37, :]  # (4, 16, 25, 1) 높은 음자리표 트랙
        # 각각 마디와 타임스텝을 합친다. (96, 84, 1)
        track1 = track1.reshape(track1.shape[0] * track1.shape[1] *
                                track1.shape[2], track1.shape[3])  # (96, 25)
        track2 = track2.reshape(track2.shape[0] * track2.shape[1] *
                                track2.shape[2], track2.shape[3])  # ( 96, 23)

        scoreObject = note.Note()
        upPitch = 60
        dur = 0
        # Stream을 만든다.
        scoreStream = stream.Score()
        scoreStream.append(tempo.MetronomeMark(number=120))
        # 트랙1을 만든다.
        scoreTrack1 = stream.Part()

        # 1.타임 스텝만큼 반복한다. (96, 84)
        lastIndexes = np.array(-1)
        lengthOfTimestep = len(track1)  # 96
        for i in range(lengthOfTimestep):
            #  1.1. i번째 타임스텝의 피치들을 가져온다.
            pitches = track1[i]  # (25)
            #  1.2. 0보다 큰 피치들의 인덱스를 구한다.
            indexes = (np.where(pitches > 0))[0]  # 0보다 큰 수들의 인덱스 튜플형태로 나온다
            isEqual = np.array_equal(lastIndexes, indexes)

            if (isEqual == False or i % 16
                    == 0) and i > 0:  # 1.3. 이전 인덱스들과 같지 않거나 4의 배수의 타임스텝이면
                lengthOfIndexs = len(lastIndexes)  # 1.3.2 이전 인덱스 개수를 구한다.
                if lengthOfIndexs == 0:  # 1.3.3 인덱스 개수가 0개이면 쉼표를 만든다.
                    scoreObject = note.Rest()
                elif lengthOfIndexs == 1:  # 1.3.4 인덱스 개수가 1개이면 음표를 만든다.
                    scoreObject = note.Note(lastIndexes[0] + upPitch)
                else:  # 1.3.5 인덱스 개수가 2개이상이면 화음을 만든다.
                    scoreObject = chord.Chord()
                    for j in range(lengthOfIndexs):
                        scoreObject.add(note.Note(lastIndexes[j] + upPitch))
                scoreObject.duration = duration.Duration(dur)
                scoreTrack1.append(scoreObject)  # 1.3.1 만든 객체를 트랙의 추가한다.
                dur = 0

            lastIndexes = indexes
            dur += 0.25  # #  1.4. 음의길이를 센다.

        lengthOfIndexs = len(lastIndexes)  # 1.3.2 인덱스 개수를 구한다.
        if lengthOfIndexs == 0:  # 1.3.3 인덱스 개수가 0개이면 쉼표를 만든다.
            scoreObject = note.Rest()
        elif lengthOfIndexs == 1:  # 1.3.4 인덱스 개수가 1개이면 음표를 만든다.
            scoreObject = note.Note(lastIndexes[0] + upPitch)
        else:  # 1.3.5 인덱스 개수가 2개이상이면 화음을 만든다.
            scoreObject = chord.Chord()
            for j in range(lengthOfIndexs):
                scoreObject.add(note.Note(lastIndexes[j] + upPitch))
        scoreObject.duration = duration.Duration(dur)
        scoreTrack1.append(scoreObject)  # 1.3.1 만든 객체를 트랙의 추가한다.

        scoreStream.append(scoreTrack1)

        upPitch = 23
        dur = 0
        # 트랙 2를 추가한다.
        scoreTrack2 = stream.Part()
        scoreTrack2.clef = clef.BassClef()

        lastIndexes = np.array(-1)
        lengthOfTimestep = len(track2)  # 96
        for i in range(lengthOfTimestep):
            #  1.1. i번째 타임스텝의 피치들을 가져온다.
            pitches = track2[i]  # (23)
            #  1.2. 0보다 큰 피치들의 인덱스를 구한다.
            indexes = (np.where(pitches > 0))[0]  # 0보다 큰 수들의 인덱스 튜플형태로 나온다
            isEqual = np.array_equal(lastIndexes, indexes)

            if (isEqual == False or i % 16
                    == 0) and i > 0:  # 1.3. 이전 인덱스들과 같지 않거나 4의 배수의 타임스텝이면
                lengthOfIndexs = len(lastIndexes)  # 1.3.2 이전 인덱스 개수를 구한다.
                if lengthOfIndexs == 0:  # 1.3.3 인덱스 개수가 0개이면 쉼표를 만든다.
                    scoreObject = note.Rest()
                elif lengthOfIndexs == 1:  # 1.3.4 인덱스 개수가 1개이면 음표를 만든다.
                    scoreObject = note.Note(lastIndexes[0] + upPitch)
                else:  # 1.3.5 인덱스 개수가 2개이상이면 화음을 만든다.
                    scoreObject = chord.Chord()
                    for j in range(lengthOfIndexs):
                        scoreObject.add(note.Note(lastIndexes[j] + upPitch))
                scoreObject.duration = duration.Duration(dur)
                scoreTrack2.append(scoreObject)  # 1.3.1 만든 객체를 트랙의 추가한다.
                dur = 0

            lastIndexes = indexes
            dur += 0.25  # #  1.4. 음의길이를 센다.

        lengthOfIndexs = len(lastIndexes)  # 1.3.2 인덱스 개수를 구한다.
        if lengthOfIndexs == 0:  # 1.3.3 인덱스 개수가 0개이면 쉼표를 만든다.
            scoreObject = note.Rest()
        elif lengthOfIndexs == 1:  # 1.3.4 인덱스 개수가 1개이면 음표를 만든다.
            scoreObject = note.Note(lastIndexes[0] + upPitch)
        else:  # 1.3.5 인덱스 개수가 2개이상이면 화음을 만든다.
            scoreObject = chord.Chord()
            for j in range(lengthOfIndexs):
                scoreObject.add(note.Note(lastIndexes[j] + upPitch))
        scoreObject.duration = duration.Duration(dur)
        scoreTrack2.append(scoreObject)  # 1.3.1 만든 객체를 트랙의 추가한다.
        scoreStream.append(scoreTrack2)

        return scoreStream
예제 #24
0
def weave_data_frame_to_midi(data_frame,
                             midi_file_directory=os.getcwd(),
                             save_midi_file=True):
    if isinstance(data_frame, pd.DataFrame):

        score_dict = {}
        for idx in range(0, len(data_frame.iloc[:, 0:1].drop_duplicates())):
            score = stream.Score()
            score_dict[data_frame.iloc[:,
                                       0:1].drop_duplicates().iloc[idx,
                                                                   0]] = score

        part_dict = {}
        for idx in range(0, len(data_frame.iloc[:, 0:2].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:, 0:2].drop_duplicates().iloc[idx, 1]):
                part = stream.Part()
                part_dict[data_frame.iloc[:, 0:2].drop_duplicates().iloc[
                    idx, 1]] = part
                score_dict[data_frame.iloc[:, 0:2].drop_duplicates().iloc[
                    idx, 0]].append(part)

        for idx in range(0, len(data_frame.iloc[:, 0:4].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:, 0:4].drop_duplicates().iloc[idx, 3]):
                if data_frame.iloc[:, 0:4].drop_duplicates().iloc[
                        idx, 2] == 'StringInstrument':
                    instrument_element = instrument.StringInstrument()
                else:
                    instrument_element = instrument.fromString(
                        data_frame.iloc[:, 0:4].drop_duplicates().iloc[idx, 2])
                part_dict[data_frame.iloc[:, 0:4].drop_duplicates().iloc[
                    idx, 1]].append(instrument_element)
                instrument_element.offset = data_frame.iloc[:, 0:
                                                            4].drop_duplicates(
                                                            ).iloc[idx, 3]

        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates())):
            if not math.isnan(data_frame.iloc[:, [0, 1, 4, 5, 6]].
                              drop_duplicates().iloc[idx, 3]):
                metronome_element = tempo.MetronomeMark(
                    data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates().iloc[
                        idx, 2],
                    data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates().iloc[
                        idx, 3])
                part_dict[data_frame.iloc[:, [0, 1, 4, 5, 6]].drop_duplicates(
                ).iloc[idx, 1]].append(metronome_element)
                metronome_element.offset = data_frame.iloc[:,
                                                           [0, 1, 4, 5, 6
                                                            ]].drop_duplicates(
                                                            ).iloc[idx, 4]

        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates())):
            if not math.isnan(data_frame.iloc[:, [0, 1, 7, 8, 9]].
                              drop_duplicates().iloc[idx, 4]):
                key_element = key.Key(
                    data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates().iloc[
                        idx, 2],
                    data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates().iloc[
                        idx, 3])
                part_dict[data_frame.iloc[:, [0, 1, 7, 8, 9]].drop_duplicates(
                ).iloc[idx, 1]].append(key_element)
                key_element.offset = data_frame.iloc[:, [0, 1, 7, 8, 9
                                                         ]].drop_duplicates(
                                                         ).iloc[idx, 4]

        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 10, 11]].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:,
                                    [0, 1, 10, 11]].drop_duplicates().iloc[idx,
                                                                           3]):
                time_signature_element = meter.TimeSignature(
                    data_frame.iloc[:,
                                    [0, 1, 10, 11]].drop_duplicates().iloc[idx,
                                                                           2])
                part_dict[data_frame.iloc[:, [0, 1, 10, 11]].drop_duplicates().
                          iloc[idx, 1]].append(time_signature_element)
                time_signature_element.offset = data_frame.iloc[:, [
                    0, 1, 10, 11
                ]].drop_duplicates().iloc[idx, 3]

        voice_dict = {}
        for idx in range(
                0, len(data_frame.iloc[:, [0, 1, 12, 13]].drop_duplicates())):
            if not math.isnan(
                    data_frame.iloc[:,
                                    [0, 1, 12, 13]].drop_duplicates().iloc[idx,
                                                                           2]):
                voice = stream.Voice()
                voice_dict[data_frame.iloc[:, [0, 1, 12, 13]].drop_duplicates(
                ).iloc[idx, 2]] = voice
                part_dict[data_frame.iloc[:, [0, 1, 12, 13]].drop_duplicates().
                          iloc[idx, 1]].append(voice)
                voice.offset = data_frame.iloc[:,
                                               [0, 1, 12, 13]].drop_duplicates(
                                               ).iloc[idx, 3]

        for idx in range(
                0,
                len(data_frame.iloc[:,
                                    [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]])):
            try:
                if not math.isnan(
                        data_frame.iloc[:,
                                        [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]]
                        .iloc[idx, 9]):
                    if data_frame.iloc[:,
                                       [0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                                        ]].iloc[idx, 3] == "Note":
                        note_element = note.Note()
                        voice_dict[data_frame.
                                   iloc[:,
                                        [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]]
                                   .iloc[idx, 2]].append(note_element)
                        note_element.pitch.name = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 4]
                        note_element.pitch.octave = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 6]
                        note_element.volume.velocity = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 7]
                        note_element.duration.quarterLength = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 8]
                        note_element.offset = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 9]

                    elif data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                    ]].iloc[idx, 3] == "Rest":
                        rest_element = note.Rest()
                        voice_dict[data_frame.
                                   iloc[:,
                                        [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]]
                                   .iloc[idx, 2]].append(rest_element)
                        rest_element.duration.quarterLength = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 8]
                        rest_element.offset = data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 9]

                    elif data_frame.iloc[:, [
                            0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                    ]].iloc[idx, 3] == "Chord":
                        if data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx - 1, 9] != data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 9] or data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                        ]].iloc[idx, 3] != 'Chord':
                            chord_element = chord.Chord()
                            voice_dict[
                                data_frame.
                                iloc[:,
                                     [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]].
                                iloc[idx, 2]].append(chord_element)

                        if len(data_frame.
                               iloc[:, [0, 1, 12, 14, 15, 16, 17, 18, 19, 20]].
                               iloc[idx, 4]) > 2:
                            print(
                                "When the chord is in a row is still under development."
                            )
                            return False
                        else:
                            pitch_element = note.Note()
                            pitch_element.pitch.name = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 4]
                            pitch_element.pitch.octave = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 6]
                            pitch_element.volume.velocity = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 7]
                            pitch_element.duration.quarterLength = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 8]
                            chord_element.add(pitch_element)
                            chord_element.offset = data_frame.iloc[:, [
                                0, 1, 12, 14, 15, 16, 17, 18, 19, 20
                            ]].iloc[idx, 9]
                    else:
                        print(
                            str(idx) +
                            "th row is cannot converted to midi file")
            except KeyError:
                pass

            print_progress_bar_weaving(idx, data_frame)

        if score_dict:
            for _midi_file_name, score in zip(score_dict.keys(),
                                              score_dict.values()):
                if score:
                    midi_file = midi.translate.streamToMidiFile(score)

                    if save_midi_file and midi_file:
                        midi_file.open(
                            midi_file_directory + '/' + _midi_file_name +
                            '_encoded.mid', 'wb')
                        midi_file.write()
                        midi_file.close()
                        print(midi_file_directory + '/' + _midi_file_name +
                              '_encoded.mid is saved')

    else:
        print("The inputted data isn't data frame")
        return False

    return score_dict
예제 #25
0
 def get_tempo_mark(self, tempo_description=None, tempo_bpm=None, tempo_note=None):
     return tempo.MetronomeMark(self.tempo_description, self.tempo_bpm, self.tempo_beat_note)
예제 #26
0
from music21 import defaults, metadata, stream, instrument, midi, duration, meter, tempo, clef, chord, articulations
from music21.note import Note, Rest
from music21.chord import Chord

from drumsnotes import Snare, HiHat, OpenHiHat, PedalHiHat, Kick, Crash, Ride, HighTom, MiddleTom, LowTom
from drumsnotes import drumsPart, showDrums, StreamPlayer2

#
#
#
Logging.basicConfig(level=Logging.INFO)
Logging.warning('Watch out!')

aPart0 = drumsPart(time=meter.TimeSignature('3/4'),
                   metronome=tempo.MetronomeMark(number=100))
aPart0.metadata = metadata.Metadata()
aPart0.metadata.title = 'Tracas de Trois by music21.drumsnotes'
aPart0.metadata.composer = 'Nicolas BERTRAND'
# add a dummy Instrument to avoid musescore warn
aInstrument = instrument.Instrument()
# aInstrument.midiChannel = 9, 10, whatever
aPart0.insert(aInstrument)
#

# A
aMeasureA0 = stream.Measure()
aMeasureA0.append(Chord([Kick(duration=duration.Duration(1.0))]))
aMeasureA0.append(Snare(duration=duration.Duration(0.5)))
aMeasureA0.append(Snare(duration=duration.Duration(0.5)))
aMeasureA0.append(Snare(duration=duration.Duration(0.5)))
예제 #27
0
def pendulumMusic(show = True, 
                  loopLength = 160.0, 
                  totalLoops = 1, 
                  maxNotesPerLoop = 40,
                  totalParts = 16,
                  scaleStepSize = 3,
                  scaleType = scale.OctatonicScale,
                  startingPitch = 'C1'
                  ):    
    totalLoops = totalLoops * 1.01
    jMax = loopLength * totalLoops
    
    
    p = pitch.Pitch(startingPitch)
    if isinstance(scaleType, scale.Scale):
        octo = scaleType
    else:
        octo = scaleType(p)
    s = stream.Score()
    s.metadata = metadata.Metadata()
    s.metadata.title = 'Pendulum Waves'
    s.metadata.composer = 'inspired by http://www.youtube.com/watch?v=yVkdfJ9PkRQ'
    parts = [stream.Part(), stream.Part(), stream.Part(), stream.Part()]
    parts[0].insert(0, clef.Treble8vaClef())
    parts[1].insert(0, clef.TrebleClef())
    parts[2].insert(0, clef.BassClef())
    parts[3].insert(0, clef.Bass8vbClef())
    for i in range(totalParts):
        j = 1.0
        while j < (jMax+1.0):
            ps = p.ps
            if ps > 84:
                active = 0
            elif ps >= 60:
                active = 1
            elif ps >= 36:
                active = 2
            elif ps < 36:
                active = 3
            
            jQuant = round(j*8)/8.0

            establishedChords = parts[active].getElementsByOffset(jQuant)
            if len(establishedChords) == 0:
                c = chord.Chord([p])
                c.duration.type = '32nd'
                parts[active].insert(jQuant, c)
            else:
                c = establishedChords[0]
                pitches = c.pitches
                pitches.append(p)
                c.pitches = pitches
            j += loopLength/(maxNotesPerLoop - totalParts + i)
            #j += (8+(8-i))/8.0
        p = octo.next(p, stepSize = scaleStepSize)
            

    parts[0].insert(0, tempo.MetronomeMark(number = 120, referent = duration.Duration(2.0)))
    for i in range(4):
        parts[i].insert(int((jMax + 4.0)/4)*4, note.Rest(quarterLength=4.0))
        parts[i].makeRests(fillGaps=True, inPlace=True)
        parts[i] = parts[i].makeNotation()
        s.insert(0, parts[i])
    
    if show == True:
        #s.show('text')
        s.show('midi')
        s.show()
예제 #28
0
def gen_song(pitch_algorithm, durations_algorithm, dynamics_algorithm, alignment, instruments, k_shingles,
             piece_length=5000):
    ####### ALIGNMENT HANDLING ##############
    assert (alignment is not None), 'No MSA provided'

    assert isinstance(alignment, MultipleSeqAlignment) or \
           (isinstance(alignment, str) and os.path.isfile(alignment)) or \
           (isinstance(alignment, np.ndarray) and len(alignment.shape) == 2)

    if isinstance(alignment, str):
        print 'Reading alignment...'
        aln_file = AlignIO.read(open(alignment, 'rU'), 'clustal')
        aln_file = msa_to_phylip(aln_file)

        print 'Opening phylip file...'
        alignment = AlignIO.read(open(aln_file.split('.')[0] + ".phy"), 'phylip-relaxed')

    assert isinstance(piece_length, int) or isinstance(piece_length, float)  # and piece_length > 60

    # piece_length for now is only referring to number of musical elements
    # n_pieces = len(alignment[0]) / (step * piece_length)
    scores = []
    if not isinstance(alignment, np.ndarray):
        alignment = np.array([[y for y in x] for x in alignment])

    # k = np.random.choice(np.arange(3, 7), 1)[0] # random number between 3 and 6; used for k-shingling
    print 'K =', k_shingles

    from core.music.similarity import SimHandler

    split_len = int(alignment.shape[1] / piece_length) \
        if alignment.shape[1] % piece_length == 0 \
        else int(alignment.shape[1] / piece_length) + 1

    split_alignment = np.array_split(alignment, split_len, axis=1)

    sim = SimHandler(split_alignment, k=k_shingles)
    clusters = sim.cluster_by_similarites()

    print('Clusters', clusters)
    tempos = np.arange(45, 160, (160 - 45) / len(clusters))
    tempos_vector = sim.assign_tempos_by_clusters(clusters, tempos)

    assert len(tempos_vector) == len(clusters) == len(split_alignment)

    piece_idx = 0

    for p in range(0, alignment.shape[1], piece_length):

        if alignment.shape[1] - p < piece_length:
            piece_length = alignment.shape[1] - p

        score = stream.Score()

        score_tempo = tempo.MetronomeMark('tempo', tempos_vector[piece_idx])

        # print 'SCORE TEMPO', score_tempo._number
        score.insert(0, score_tempo)

        print 'Generating pitches and durations...'

        subsequence = alignment[:, p: p + piece_length]

        regions_file_path = 'regions_' + str(piece_idx) + '.txt'

        if not os.path.isdir(GLOBALS['REGIONS_DIR']):
            os.mkdir(GLOBALS['REGIONS_DIR'])

        if not 'DIR' in os.environ.keys(): os.environ['DIR'] = 'default'
        if not os.path.isdir(GLOBALS['REGIONS_DIR'] + '/' + os.environ['DIR']):
            os.mkdir(GLOBALS['REGIONS_DIR'] + '/' + os.environ['DIR'])

        regions_file_path = GLOBALS['REGIONS_DIR'] + '/' + os.environ['DIR'] + '/' + regions_file_path
        regions_file = open(regions_file_path, 'wr')

        for i in range(0, alignment.shape[0]):
            regions_file.write(''.join(subsequence[i]) + '\n')
            gen_stream(score, subsequence[i], pitch_algorithm, durations_algorithm, instruments[i])

        print 'Checking if parts have the same total duration...'

        # aligning part durations (score or midi cannot be produced with unequal duration parts)
        for part in score.parts:

            # obtaining highest duration from all parts
            # and aligning with it
            diff = score.highestTime - part.highestTime

            if diff > 0:

                while round(diff, 5) > 0:

                    # minimum = duration.Duration('2048th')
                    n = note.Rest()

                    if diff >= float(0.5):
                        n.duration = duration.Duration(0.5)
                    else:
                        if diff >= MIN_TEMPO:
                            n.duration = duration.Duration(diff)
                        else:
                            n.duration = duration.Duration(MIN_TEMPO)

                    assert n.duration.quarterLength >= MIN_TEMPO

                    part.append(n)
                    diff = score.highestTime - part.highestTime

        dynamics_vector = gen_dynamics_vector(subsequence, dynamics_algorithm)

        volumes = dynamics_vector['vol']
        print 'VOLUMES', dynamics_vector
        window_size = dynamics_algorithm['window_size']

        score = add_dynamics_to_score(volumes, score, window_size, instruments)

        print 'Dynamics to score'
        """for part in new_score:
            elems = ''
            for y in range(2, len(part)):
                elems += str(part[y].volume) + ' '
            print elems
        sys.exit(1)"""""
        scores.append(score)

        regions_file.write('\n\nTempo: ' + str(tempos_vector[piece_idx]))
        regions_file.close()

        piece_idx += 1

    """print similarities_df

    classes = np.arange(40, 145, (145 - 40) / n_classes, dtype=np.uint8)
    print classes

    jaccard_indices = np.array_split(similarities_df['jaccard'], n_classes)
    class_size = len(jaccard_indices[0])

    j = 0
    for i in range(0, len(classes)):
        similarities_df['tempo'][j:j + class_size] = classes[i]
        print 'L', len(similarities_df['tempo'][j:j + class_size])
        j += class_size

    similarities_df = similarities_df.sort_values('idx')"""

    # parte estatistica e output de ficheiros para @FileWriter
    # retornar score, utilizar dynamics_algorithm, adicionar volumes a score e analisar score
    return scores
예제 #29
0
def generate_music(inference_model,
                   corpus=corpus,
                   abstract_grammars=abstract_grammars,
                   tones=tones,
                   tones_indices=tones_indices,
                   indices_tones=indices_tones,
                   T_y=10,
                   max_tries=1000,
                   diversity=0.5):
    """
    使用训练的模型生成音乐
    Arguments:
    model -- 训练的模型
    corpus -- 音乐语料库, 193个音调作为字符串的列表(ex: 'C,0.333,<P1,d-5>')
    abstract_grammars -- grammars列表: 'S,0.250,<m2,P-4> C,0.250,<P4,m-2> A,0.250,<P4,m-2>'
    tones -- set of unique tones, ex: 'A,0.250,<M2,d-4>' is one element of the set.
    tones_indices -- a python dictionary mapping unique tone (ex: A,0.250,< m2,P-4 >) into their corresponding indices (0-77)
    indices_tones -- a python dictionary mapping indices (0-77) into their corresponding unique tone (ex: A,0.250,< m2,P-4 >)
    Tx -- integer, number of time-steps used at training time
    temperature -- scalar value, defines how conservative/creative the model is when generating music
    Returns:
    predicted_tones -- python list containing predicted tones
    """

    # set up audio stream
    out_stream = stream.Stream()

    # Initialize chord variables
    curr_offset = 0.0  # variable used to write sounds to the Stream.
    num_chords = int(len(chords) / 3)  # number of different set of chords

    print("Predicting new values for different set of chords.")
    # Loop over all 18 set of chords. At each iteration generate a sequence of tones
    # and use the current chords to convert it into actual sounds
    for i in range(1, num_chords):

        # Retrieve current chord from stream
        curr_chords = stream.Voice()

        # Loop over the chords of the current set of chords
        for j in chords[i]:
            # Add chord to the current chords with the adequate offset, no need to understand this
            curr_chords.insert((j.offset % 4), j)

        # Generate a sequence of tones using the model
        _, indices = predict_and_sample(inference_model, x_initializer,
                                        a_initializer, c_initializer)
        indices = list(indices.squeeze())
        pred = [indices_tones[p] for p in indices]

        predicted_tones = 'C,0.25 '
        for k in range(len(pred) - 1):
            predicted_tones += pred[k] + ' '

        predicted_tones += pred[-1]

        #### POST PROCESSING OF THE PREDICTED TONES ####
        # We will consider "A" and "X" as "C" tones. It is a common choice.
        predicted_tones = predicted_tones.replace(' A',
                                                  ' C').replace(' X', ' C')

        # Pruning #1: smoothing measure
        predicted_tones = qa.prune_grammar(predicted_tones)

        # Use predicted tones and current chords to generate sounds
        sounds = qa.unparse_grammar(predicted_tones, curr_chords)

        # Pruning #2: removing repeated and too close together sounds
        sounds = qa.prune_notes(sounds)

        # Quality assurance: clean up sounds
        sounds = qa.clean_up_notes(sounds)

        # Print number of tones/notes in sounds
        print(
            'Generated %s sounds using the predicted values for the set of chords ("%s") and after pruning'
            % (len([k for k in sounds if isinstance(k, note.Note)]), i))

        # Insert sounds into the output stream
        for m in sounds:
            out_stream.insert(curr_offset + m.offset, m)
        for mc in curr_chords:
            out_stream.insert(curr_offset + mc.offset, mc)

        curr_offset += 4.0

    # Initialize tempo of the output stream with 130 bit per minute
    out_stream.insert(0.0, tempo.MetronomeMark(number=130))

    # Save audio stream to fine
    mf = midi.translate.streamToMidiFile(out_stream)
    mf.open("output/my_music.midi", 'wb')
    mf.write()
    print("Your generated music is saved in output/my_music.midi")
    mf.close()

    return out_stream
예제 #30
0
def generate(data_fn, out_fn, N_epochs):
    # model settings
    max_len = 20
    max_tries = 1000
    diversity = 0.5

    # musical settings
    bpm = 130

    # get data
    chords, abstract_grammars = get_musical_data(data_fn)
    corpus, values, val_indices, indices_val = get_corpus_data(
        abstract_grammars)
    print("corpus length:", len(corpus))
    print("total # of values:", len(values))

    # build model
    model = lstm.build_model(corpus=corpus,
                             val_indices=val_indices,
                             max_len=max_len,
                             N_epochs=N_epochs)

    # set up audio stream
    out_stream = stream.Stream()

    # generation loop
    curr_offset = 0.0
    loopEnd = len(chords)
    for loopIndex in range(1, loopEnd):
        # get chords from file
        curr_chords = stream.Voice()
        for j in chords[loopIndex]:
            curr_chords.insert((j.offset % 4), j)

        # generate grammar
        curr_grammar = __generate_grammar(
            model=model,
            corpus=corpus,
            abstract_grammars=abstract_grammars,
            values=values,
            val_indices=val_indices,
            indices_val=indices_val,
            max_len=max_len,
            max_tries=max_tries,
            diversity=diversity,
        )

        curr_grammar = curr_grammar.replace(" A", " C").replace(" X", " C")

        # Pruning #1: smoothing measure
        curr_grammar = prune_grammar(curr_grammar)

        # Get notes from grammar and chords
        curr_notes = unparse_grammar(curr_grammar, curr_chords)

        # Pruning #2: removing repeated and too close together notes
        curr_notes = prune_notes(curr_notes)

        # quality assurance: clean up notes
        curr_notes = clean_up_notes(curr_notes)

        # print # of notes in curr_notes
        print("After pruning: %s notes" %
              (len([i for i in curr_notes if isinstance(i, note.Note)])))

        # insert into the output stream
        for m in curr_notes:
            out_stream.insert(curr_offset + m.offset, m)
        for mc in curr_chords:
            out_stream.insert(curr_offset + mc.offset, mc)

        curr_offset += 4.0

    out_stream.insert(0.0, tempo.MetronomeMark(number=bpm))

    # Play the final stream through output (see 'play' lambda function above)
    # play = lambda x: midi.realtime.StreamPlayer(x).play()
    # play(out_stream)

    # save stream
    mf = midi.translate.streamToMidiFile(out_stream)
    mf.open(out_fn, "wb")
    mf.write()
    mf.close()