Beispiel #1
0
def split_voices(lead, current):
    """
    Given two sequential notes, if the lead note contains more voices than current, then duplicate
    the notes in current to match lead, producing continuous voice lines
        :param lead: Leading note
        :param current: Current node
    """
    num_lead = get_number_of_voices(lead)
    num_current = get_number_of_voices(current)
    if current.isNote:
        return [
            Note(current.pitch, quarterLength=current.duration.quarterLength)
            for i in range(num_lead)
        ]
    if num_lead == num_current:
        return [
            Note(pitch, quarterLength=current.duration.quarterLength)
            for pitch in current.pitches
        ]
    if current.isChord and len(current.pitches) == 0:
        raise RuntimeError(
            "The system was able to parse the file, but detected an illegal construct: empty chord"
        )
    middle = map(itemgetter(0), [
        sorted([(c, abs(Interval(e, c).cents)) for c in current.pitches],
               key=itemgetter(1))[0] for e in lead.pitches[1:-1]
    ])
    return [
        Note(pitch, quarterLength=current.duration.quarterLength)
        for pitch in chain(current.pitches[0:1], middle, current.pitches[-1:])
    ]
def test_2():
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.25))
    top.append(Rest(quarterLength=0.25))
    bot.append(Note('G3', quarterLength=0.25))
    bot.append(Rest(quarterLength=0.25))
    return Score([top, bot])
Beispiel #3
0
def to_musicxml(sc_enc):
    "Converts Chord tuples (see chorales.prepare_poly) to musicXML"
    timestep = Duration(1. / FRAMES_PER_CROTCHET)
    musicxml_score = Stream()
    prev_chord = dict() # midi->(note instance from previous chord), used to determine tie type (start, continue, stop)
    for has_fermata, chord_notes in sc_enc:
        notes = []
        if len(chord_notes) == 0: # no notes => rest for this frame
            r = Rest()
            r.duration = timestep
            musicxml_score.append(r)
        else:
            for note_tuple in chord_notes:
                note = Note()
                if has_fermata:
                    note.expressions.append(expressions.Fermata())
                note.midi = note_tuple[0]
                if note_tuple[1]: # current note is tied
                    note.tie = Tie('stop')
                    if prev_chord and note.pitch.midi in prev_chord:
                        prev_note = prev_chord[note.pitch.midi]
                        if prev_note.tie is None:
                            prev_note.tie = Tie('start')
                        else:
                            prev_note.tie = Tie('continue')
                notes.append(note)
            prev_chord = { note.pitch.midi : note for note in notes }
            chord = Chord(notes=notes, duration=timestep)
            if has_fermata:
                chord.expressions.append(expressions.Fermata())
            musicxml_score.append(chord)
    return musicxml_score
Beispiel #4
0
    def generate_note(self, f0_info, n_duration, round_to_sixteenth=True):
        f0 = f0_info[0]
        a = remap(f0_info[1], self.cqt.min(), self.cqt.max(), 0, 1)
        duration = librosa.frames_to_time(n_duration, sr=self.sr, hop_length=self.hop_length)
        note_duration = 0.02 * np.around(duration / 0.02)  # Round to 2 decimal places for music21 compatibility
        midi_duration = second_to_quarter(duration, self.tempo)
        midi_velocity = int(round(remap(f0_info[1], self.cqt.min(), self.cqt.max(), 80, 120)))
        if round_to_sixteenth:
            midi_duration = round(midi_duration * 16) / 16
        try:
            if f0 is None:
                midi_note = None
                note_info = Rest(type=self.mm.secondsToDuration(note_duration).type)
                f0 = 0
            else:
                midi_note = round(librosa.hz_to_midi(f0))
                note = Note(librosa.midi_to_note(midi_note), type=self.mm.secondsToDuration(note_duration).type)
                note.volume.velocity = midi_velocity
                note_info = [note]
        except DurationException:
            if f0 is None:
                midi_note = None
                note_info = Rest(type='32nd')
                f0 = 0
            else:
                midi_note = round(librosa.hz_to_midi(f0))
                note = Note(librosa.midi_to_note(midi_note),
                            type='eighth')
                note.volume.velocity = midi_velocity
                note_info = [note]

        midi_info = [midi_note, midi_duration, midi_velocity]
        n = np.arange(librosa.frames_to_samples(n_duration, hop_length=self.hop_length))
        sine_wave = a * np.sin(2 * np.pi * f0 * n / float(self.sr))
        return [sine_wave, midi_info, note_info]
Beispiel #5
0
 def playNote(self, note):
     p = Note(note)
     p.quarterLength = 1.5
     stream_obj = stream.Stream()
     stream_obj.append(p)
     sp = midi.realtime.StreamPlayer(stream_obj)
     sp.play()
Beispiel #6
0
def to_musicxml(sc_enc):
    "Converts Chord tuples (see chorales.prepare_poly) to musicXML"
    timestep = Duration(1. / FRAMES_PER_CROTCHET)
    musicxml_score = Stream()
    prev_chord = dict(
    )  # midi->(note instance from previous chord), used to determine tie type (start, continue, stop)
    for has_fermata, chord_notes in sc_enc:
        notes = []
        if len(chord_notes) == 0:  # no notes => rest for this frame
            r = Rest()
            r.duration = timestep
            musicxml_score.append(r)
        else:
            for note_tuple in chord_notes:
                note = Note()
                if has_fermata:
                    note.expressions.append(expressions.Fermata())
                note.midi = note_tuple[0]
                if note_tuple[1]:  # current note is tied
                    note.tie = Tie('stop')
                    if prev_chord and note.pitch.midi in prev_chord:
                        prev_note = prev_chord[note.pitch.midi]
                        if prev_note.tie is None:
                            prev_note.tie = Tie('start')
                        else:
                            prev_note.tie = Tie('continue')
                notes.append(note)
            prev_chord = {note.pitch.midi: note for note in notes}
            chord = Chord(notes=notes, duration=timestep)
            if has_fermata:
                chord.expressions.append(expressions.Fermata())
            musicxml_score.append(chord)
    return musicxml_score
def test_5():
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.5))
    top.append(Note('F4', quarterLength=0.5))
    bot.append(Note('G3', quarterLength=0.5))
    bot.append(Note('A3', quarterLength=0.5))
    return Score([top, bot])
Beispiel #8
0
def main():
    parser = get_cmd_line_parser(description=__doc__)
    ParserArguments.filename(parser)
    ParserArguments.tempo(parser)
    ParserArguments.framerate(parser)
    ParserArguments.set_defaults(parser)
    ParserArguments.best(parser)
    args = parser.parse_args()
    defaults.framerate = args.framerate

    song = Stream()

    roots = 'ABCDEFG'
    scales = [scale.MajorScale, scale.MinorScale,
              scale.WholeToneScale, scale.ChromaticScale]

    print('Choosing a random scale from Major, Minor, Whole Tone, Chromatic.')
    rscale = random.choice(scales)(Pitch(random.choice(roots)))
    print('Using: %s' % rscale.name)

    print('Generating a score...')
    random_note_count = 50
    random_note_speeds = [0.5, 1]
    print('100 Random 1/8th and 1/4th notes in rapid succession...')
    for i in range(random_note_count):
        note = Note(random.choice(rscale.pitches))
        note.duration.quarterLength = random.choice(random_note_speeds)
        song.append(note)

    scale_practice_count = 4
    print('Do the scale up and down a few times... maybe %s' %
          scale_practice_count)
    rev = rscale.pitches[:]
    rev.reverse()
    updown_scale = rscale.pitches[:]
    updown_scale.extend(rev[1:-1])
    print('updown scale: %s' % updown_scale)
    for count, pitch in enumerate(cycle(updown_scale)):
        print(' note %s, %s' % (count, pitch))
        song.append(Note(pitch))
        if count >= scale_practice_count * len(updown_scale):
            break

    print('Composition finished:')
    song.show('txt')

    if args.best:
        print('Audifying the song to file "{}"...')
        wave = audify_to_file(song, args.tempo, args.filename, verbose=True)
    else:
        wave = audify_basic(song, args.tempo, verbose=True)
        print('Writing Song to file "{}"...'.format(args.filename))
        with wav_file_context(args.filename) as fout:
            fout.write_frames(wave.frames)

    return 0
Beispiel #9
0
 def test_write_arpeggio(self):
     f = Note("F4")
     a = Note("A4")
     c = Note("C5")
     f.duration.quarterLength = 4
     a.duration.quarterLength = 4
     c.duration.quarterLength = 4
     arpeggio = stream.Stream()
     for note in [f, a, c]:
         arpeggio.append(note)
     arpeggio.show()
def test_19():
    """
    NB: This test is designed specifically to ensure that the _event_finder()
    finds Rest objects when the happen at the same time as Note objects, when
    only Rest objects are requested to be found.
    """
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.5))
    top.append(Note('G5', quarterLength=0.5))
    bot.append(Note('G3', quarterLength=0.5))
    bot.append(Rest(quarterLength=0.5))
    return Score([top, bot])
def write_notation_cell(music, path, event_index):
    score = Score()

    metadata = Metadata()
    metadata.title = ''
    metadata.composer = ''
    score.insert(0, metadata)

    layout = ScoreLayout()
    layout.scalingMillimeters = 1.25
    layout.scalingTenths = 40
    score.insert(0, layout)

    for musician in music:
        instrument_name = musician['instrument']
        instrument = get_instrument(instrument_name)
        instrument.partName = instrument.instrumentName
        if instrument.instrumentName is 'Violoncello':
            instrument.partName = 'Cello'
        instrument.partAbbreviation = instrument.instrumentAbbreviation

        parts = []
        part = Part()
        parts.append(part)
        part.insert(0, instrument)

        score.insert(0, part)
        # score.insert(0, StaffGroup(parts))

        for event in musician['music']:
            pitches = event['pitches']
            dur = event['duration']
            # if not pitches or pitches == 'stop':
            #     note = Rest()
            if len(pitches) == 1:
                pitch = Pitch(pitches[0] + 60)
                note = Note(pitch)
            else:
                note = Chord(notes=[Pitch(p + 60) for p in pitches])

            duration = Duration()
            duration.fill([dur])
            note.duration = duration

            part.append(note)

    file_path = os.path.join(path, str(event_index).zfill(2))
    musicxml_file_path = file_path + '.xml'
    png_output_file_path = file_path + '.png'

    score.write('musicxml', musicxml_file_path)

    write_png_with_musescore(musicxml_file_path, png_output_file_path, dpi=600)
def test_18():
    """
    NB: This test is designed specifically to ensure that the _event_finder()
    doesn't stop processing when it doesn't find an element of the expected types
    at an offset. You should ask it to look for Rest objects only.
    """
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.5))
    top.append(Rest(quarterLength=0.5))
    bot.append(TimeSignature('4/4'))
    bot.append(Note('G3', quarterLength=0.5))
    bot.append(Rest(quarterLength=0.5))
    return Score([top, bot])
    def sheperd_tone(self,
                     notes_i,
                     length=0.5,
                     instrument_to_play=instrument.Piano()):

        # Highest octave, volume gets lower
        shepard_tone_u = stream.Part()
        shepard_tone_u.insert(0, instrument_to_play)
        c_major = [
            'C#5', 'D#5', 'E#5', 'F#5', 'G#5', 'A#5', 'B#5', 'C#6', 'D#6',
            'E#6', 'F#6', 'G#6', 'A#6', 'B#6'
        ]
        c_major = [c_major[notes_i % len(c_major)]]

        for i in c_major:
            n = Note(i, quarterLength=length)
            n.volume.velocityScalar = 0.7 - self.volume_increment_u
            shepard_tone_u.append(n)
            self.volume_increment_u = self.volume_increment_u + 0.05

        # Middle octave, volume constant
        shepard_tone_m = stream.Part()
        shepard_tone_m.insert(0, instrument_to_play)
        c_major = [
            'C#3', 'D#3', 'E#3', 'F#3', 'G#3', 'A#3', 'B#3', 'C#4', 'D#4',
            'E#4', 'F#4', 'G#4', 'A#4', 'B#4'
        ]
        c_major = [c_major[notes_i % len(c_major)]]

        for i in c_major:
            n = Note(i, quarterLength=length)
            shepard_tone_m.append(n)

        # Lowest octave, volume gets higher
        shepard_tone_l = stream.Part()
        shepard_tone_l.insert(0, instrument_to_play)
        c_major = [
            'C#1', 'D#1', 'E#1', 'F#1', 'G#1', 'A#1', 'B#1', 'C#2', 'D#2',
            'E#2', 'F#2', 'G#2', 'A#2', 'B#2'
        ]
        c_major = [c_major[notes_i % len(c_major)]]

        for i in c_major:
            n = Note(i, quarterLength=length)
            n.volume.velocityScalar = 0.05 + self.volume_increment_d
            shepard_tone_l.append(n)
            self.volume_increment_d = self.volume_increment_d + 0.05

        return stream.Stream([shepard_tone_u, shepard_tone_m, shepard_tone_l])
Beispiel #14
0
def add_bass_closing(roman, duration, bass):
   '''Generate a closing riff for the bassline, given chord and
      duration in eighths'''
   filled = 0
   length_weight = 2    # Longer notes later in the bar
   root = roman.root()  # Root pitch of the chord (NOT a note object)
   while filled < duration:
      note = Note(deepcopy(root))
      length = min(random.randint(1,length_weight),duration-filled) # cap at time left
      note.quarterLength = length/2.0

      note.octave -= 2
      bass.append(note)
      filled += length
      length_weight += length # Longer notes later in the bar
Beispiel #15
0
    def _matrix_to_part(self, submatrix):
        '''
        Takes a submatrix of size (T, D) and turn it into a music21.stream.Part
        object, where T is the number of time slices, and dim is the note
        vector.
        '''
        part = Part()
        pitches = submatrix[:, 0]
        articulations = submatrix[:, 1]

        current_note = None
        for current_tick in range(len(submatrix)):
            if articulations[current_tick]:  # if articulate
                # append the old note
                if current_note is not None:  # for the first note
                    part.append(current_note)

                # create a new note
                if pitches[current_tick] < self.rest:
                    current_note = Note()
                    # assign pitch, inverse of self._midi_to_input()
                    current_note.pitch.midi = pitches[current_tick]
                else:
                    current_note = Rest()
                # resets the duration to the smallest amount
                current_note.duration.quarterLength = self.unit_length
            else:
                current_note.duration.quarterLength += self.unit_length

        return part
Beispiel #16
0
def rhythmLine(baseDuration = QuarterNote(), minLength = 8.0, maxProbability = 0.5):
    newStream = stream.Stream()
    while newStream.duration.quarterLength < minLength:
        currentProbability = (newStream.duration.quarterLength / minLength) * maxProbability
        newNote = Note()
        newNote.duration =  baseDuration.clone()

        x = random.random()
        while x < currentProbability:
            print(x, currentProbability)
            newNote.duration = alterRhythm(newNote.duration)
            x = random.random()
        newStream.append(newNote)
        #newStream.getNoteTimeInfo()
        
    return newStream
def notate_score(musician_names, instrument_names, music):
    score = Score()

    for musician_name, instrument_name in zip(musician_names,
                                              instrument_names):
        instrument = get_instrument(instrument_name)
        instrument.partName = instrument.instrumentName
        instrument.partAbbreviation = instrument.instrumentAbbreviation

        parts = []
        part = Part()
        parts.append(part)
        part.insert(0, instrument)

        score.insert(0, part)
        score.insert(0, StaffGroup(parts))

        notes = music[musician_name]

        for pitches in notes:
            if not pitches or pitches == 'stop':
                note = Rest()
            elif len(pitches) == 1:
                pitch = Pitch(pitches[0] + 60)
                note = Note(pitch)
            else:
                note = Chord(notes=[Pitch(p + 60) for p in pitches])

            duration = Duration()
            duration.fill([4.0])
            note.duration = duration

            part.append(note)

    score.show('musicxml', '/Applications/Sibelius 7.5.app')
Beispiel #18
0
def decorateScore(romantext, progression):
    """Decorate an annotated chorale into piano form.

    Receives a romantext stream that has been properly voiced by the
    dynamic programming algorithm, replacing the 1-part layout with a
    2-part grand staff piano layout in SA-TB form.
    """
    romanNumerals = romantext.recurse().getElementsByClass("RomanNumeral")
    score = romantext.template(fillWithRests=False)
    trebleStaff = score.parts[0]
    bassStaff = copy.deepcopy(trebleStaff)
    trebleStaff[0].insert(0, TrebleClef())
    bassStaff[0].insert(0, BassClef())
    for rn, pitches in zip(romanNumerals, progression):
        b, t, a, s = [Note(p, quarterLength=rn.quarterLength) for p in pitches]
        b.lyric = rn.lyric
        trebleStaff.measure(rn.measureNumber).insert(rn.offset, s)
        trebleStaff.measure(rn.measureNumber).insert(rn.offset, a)
        bassStaff.measure(rn.measureNumber).insert(rn.offset, t)
        bassStaff.measure(rn.measureNumber).insert(rn.offset, b)
    staffGroup = StaffGroup(
        [trebleStaff, bassStaff], name="Harmonic reduction", symbol="brace"
    )
    score.insert(0, bassStaff)
    score.insert(0, staffGroup)
    for measure in score.recurse().getElementsByClass("Measure"):
        measure.makeVoices(inPlace=True)
    return score
Beispiel #19
0
def add_step_information(notes, keySignatures):
  """
  This function will populate the step information into Mupix note objects, it
  is required because music21 will not keep key signature information in
  measure other than the measure the key is defined in when reading musicXML.
  The maintainers of music21 don't believe this is an issue and won't fix it,
  so this and others must exist.

  :param [notes]: A list of Mupix NoteObjects.
  :type [notes]: List

  :param [keySignatures]: A list of Mupix KeySignatureObjects.
  :type [keySignatures]: List

  :return [List]: The original list of Mupix NoteObjects (in order) with step information included.
  :rtype: List
  """
  for key in keySignatures:
    key_name = key.step.upper() if key.mode == "major" else key.step.lower()

    for note in notes:
      if note.part == key.part and note.measure == key.measure:
        note.step = Interval(noteStart=Note(Key(key_name).asKey().tonic), noteEnd=note._music21_object).semitones % 12

  return notes
def convert_string_to_notes(string, scale):
    print('converting: %s' % string)
    return [
        Note(scale.pitchFromDegree(char),
             quarterLength=get_random_note_length())
        for char in bytearray(string, 'UTF-8')
    ]
Beispiel #21
0
def generateScore(chords, lengths=None, ts="4/4"):
    """Generates a four-part score from a sequence of chords.

    Soprano and alto parts are displayed on the top (treble) clef, while tenor
    and bass parts are displayed on the bottom (bass) clef, with correct stem
    directions.
    """
    if lengths is None:
        lengths = [1 for _ in chords]
    voices = [Voice([Piano()]) for _ in range(4)]
    for chord, length in zip(chords, lengths):
        bass, tenor, alto, soprano = [
            Note(p, quarterLength=length) for p in chord.pitches
        ]
        bass.addLyric(chord.lyric)
        bass.stemDirection = alto.stemDirection = "down"
        tenor.stemDirection = soprano.stemDirection = "up"
        voices[0].append(soprano)
        voices[1].append(alto)
        voices[2].append(tenor)
        voices[3].append(bass)

    female = Part([TrebleClef(), TimeSignature(ts), voices[0], voices[1]])
    male = Part([BassClef(), TimeSignature(ts), voices[2], voices[3]])
    score = Score([female, male])
    return score
def generate_sine_midi_note(f0_info, sr, n_duration):
    f0 = f0_info[0]
    A = remap(f0_info[1], CdB.min(), CdB.max(), 0, 1)
    duration = librosa.frames_to_time(n_duration, sr=fs, hop_length=hop_length)
    # Generate music21 note
    note_duration = 0.02 * np.around(
        duration / 2 /
        0.02)  # Round to 2 decimal places for music21 compatibility
    midi_velocity = int(round(remap(f0_info[1], CdB.min(), CdB.max(), 0, 127)))
    if f0 == None:
        try:
            note_info = Rest(type=mm.secondsToDuration(note_duration).type)
        except DurationException:
            note_info = None
        f0 = 0
    else:
        midi_note = round(librosa.hz_to_midi(f0))
        try:
            note = Note(midi_note,
                        type=mm.secondsToDuration(note_duration).type)
            note.volume.velocity = midi_velocity
            note_info = [note]
        except DurationException:
            note_info = None

    if note_info is None:
        return None

    # Generate Sinewave
    n = np.arange(librosa.frames_to_samples(n_duration, hop_length=hop_length))
    sine_wave = A * np.sin(2 * np.pi * f0 * n / float(sr))
    return [sine_wave, note_info]
Beispiel #23
0
def notate_note(note):
    if note['pitch'] == 'rest':
        n = Rest()
    else:
        if isinstance(note['pitch'], list):
            pitches = []
            for pitch_number in note['pitch']:
                p = Pitch(pitch_number)
                # Force all flats
                if p.accidental.name == 'sharp':
                    p = p.getEnharmonic()
                pitches.append(p)
            n = Chord(notes=pitches)

        else:
            p = Pitch(note['pitch'])
            # Force all flats
            if p.accidental.name == 'sharp':
                p = p.getEnharmonic()
            n = Note(p)

    d = Duration()
    if note['duration'] == 0:
        d.quarterLength = .125
        d = d.getGraceDuration()
    else:
        # music21 docs say `fill` is for testing. I can't remember why I chose
        # to use it originally. It works. But not for tuplets. Maybe this blog
        # post contains a better solution:
        # http://music21-mit.blogspot.com/2015/09/durations-and-durationtuples.html
        d.fill(note['durations'])
    n.duration = d
    return n
    def generate(self, seq_len, a_par=0):
        pattern = self.model_inp[self.start]
        prediction_output = []
        for note_index in range(seq_len):
            prediction_input = pattern.reshape(1, seq_len, 2,
                                               len(self.sorted_notes))
            prediction_input = prediction_input / float(len(self.sorted_notes))
            predictions = self.model.predict(prediction_input, verbose=0)[0]
            for prediction in predictions:
                index = np.argmax(prediction[0])
                duration_i = np.argmax(prediction[1])

                for name, value in self.sorted_notes.items():
                    if value == index:
                        result = name
                        break
                    else:
                        result = None

                for name, value in self.sorted_durations.items():
                    if value == duration_i:
                        duration = name
                        break
                    else:
                        duration = None

                prediction_output.append((result, Duration(duration)))
                result = np.zeros_like(prediction)
                result[0][index] = 1
                result[1][duration_i] = 1
                pattern = np.concatenate([pattern, [result]])
            pattern = pattern[len(pattern) - seq_len:len(pattern)]

        offset = 0
        output_notes = []
        for pattern, duration in prediction_output:
            if pattern.isdigit() or ('.' in pattern):
                notes_in_chord = pattern.split('.')
                notes = []
                for current_note in notes_in_chord:
                    new_note = Note(int(current_note))
                    new_note.duration = duration
                    new_note.storedInstrument = instrument.PanFlute()
                    notes.append(new_note)
                new_chord = Chord(notes)
                new_chord.offset = offset
                output_notes.append(new_chord)
            else:
                new_note = Note(pattern)
                new_note.offset = offset
                new_note.storedInstrument = instrument.Flute()
                output_notes.append(new_note)
            offset += 0.6

        midi_stream = stream.Stream(output_notes)
        midi_stream.write('midi',
                          fp=f'my_music/{self.model.name}_{self.start}.mid')
Beispiel #25
0
        def conditionalAdd(ts, n: note.Note) -> None:
            '''
            Add an element only if it is not already in the chord.

            If it has more tie information than the previously
            added note, then remove the previously added note and add it
            '''
            from music21 import stream

            nonlocal pitchBust  # love Py3!!!
            p = n.pitch
            pitchKey = p.nameWithOctave

            pitchGroup = None
            if addPartIdAsGroup:
                partContext = n.getContextByClass(stream.Part)
                if partContext is not None:
                    pidStr = str(partContext.id)
                    pitchGroup = pidStr.replace(
                        ' ', '_')  # spaces are not allowed as group names
                    n.pitch.groups.append(pitchGroup)
                    n.groups.append(pitchGroup)

            if pitchKey not in seenPitches:
                seenPitches.add(pitchKey)
                notesToAdd[pitchKey] = newNote(ts, n)
                return
            elif not removeRedundantPitches:
                notesToAdd[pitchKey + str(pitchBust)] = newNote(ts, n)
                pitchBust += 1
                return
            elif addPartIdAsGroup and pitchGroup is not None:
                notesToAdd[pitchKey].groups.append(pitchGroup)
                notesToAdd[pitchKey].pitch.groups.append(pitchGroup)

            if not addTies:
                return

            # else add derivation once multiple derivations are allowed.
            oldNoteTie = notesToAdd[pitchKey].tie
            if oldNoteTie is not None and oldNoteTie.type == 'continue':
                return  # previous note was as good or better

            possibleNewNote = newNote(ts, n)
            possibleNewNote.groups = notesToAdd[pitchKey].groups

            if possibleNewNote.tie is None:
                return  # do nothing
            elif oldNoteTie is None:
                notesToAdd[pitchKey] = possibleNewNote  # a better note to add
            elif {oldNoteTie.type, possibleNewNote.tie.type} == startStopSet:
                notesToAdd[pitchKey].tie.type = 'continue'
            elif possibleNewNote.tie.type == 'continue':
                notesToAdd[pitchKey] = possibleNewNote  # a better note to add
            elif possibleNewNote.tie.type == oldNoteTie.type:
                return
            else:
                raise VerticalityException('Did I miss one? ',
                                           possibleNewNote.tie, oldNoteTie)
def test_17():
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.5))
    top.append(Note('A4', quarterLength=0.75))  # 0.5
    top.append(Note('F4', quarterLength=0.75))  # 1.25
    top.append(Note('E4', quarterLength=0.5))  # 2.0
    bot.append(Note('G3', quarterLength=0.5))
    bot.append(Note('A3', quarterLength=0.25))  # 0.5
    bot.append(Note('F3', quarterLength=0.375))  # 0.75
    bot.append(Rest(quarterLength=0.25))  # 1.125
    bot.append(Note('G3', quarterLength=0.625))  # 1.375
    bot.append(Note('G3', quarterLength=0.5))  # 2.0
    return Score([top, bot])
Beispiel #27
0
    def PlayPiano(self, n):

        if (n == 1):
            p = Note("C----", type='whole')
        elif (n == 2):
            p = Note("D--", type='whole')
        elif (n == 3):
            p = Note("F--", type='whole')
        elif (n == 4):
            p = Note("G--", type='whole')
        elif (n == 5):
            p = Note("A--", type='whole')
        elif (n == 6):
            p = Note("B#4", type='whole')
        elif (n == 7):
            p = Note("B#6", type='whole')

        PianoPart = stream.Part()
        PianoPart.insert(0, instrument.Piano())

        pianoMeasure = stream.Measure()
        pianoMeasure.append(p)
        PianoPart.append(pianoMeasure)

        sp = midi.realtime.StreamPlayer(PianoPart)
        sp.play()
 def add_to_melody_sequence(new_melody_sequence, elem, bar_length):
     if type(elem) not in [Note, Rest]:
         pass
     elif bar_length + elem.quarterLength >= time_signature:
         extra = bar_length + elem.quarterLength - time_signature
         elem.quarterLength = time_signature - bar_length
         if elem.quarterLength > 0.0:
             new_melody_sequence += [elem]
         bar_length = extra
         # The possible extra note
         elem = Note(elem.nameWithOctave) if type(elem) is Note else Rest()
         elem.quarterLength = extra
         if elem.quarterLength > 0.0:
             new_melody_sequence += [elem]
     else:
         new_melody_sequence += [elem]
         bar_length += elem.quarterLength
     return (new_melody_sequence, elem, bar_length)
Beispiel #29
0
 def create_c_major_scale(self):
     song = []
     song.append(Note('C4', quarterLength=1.0))
     song.append(Note('D4', quarterLength=1.0))
     song.append(Note('E4', quarterLength=1.0))
     song.append(Note('F4', quarterLength=1.0))
     song.append(Note('G4', quarterLength=1.0))
     song.append(Note('A4', quarterLength=1.0))
     song.append(Note('B4', quarterLength=1.0))
     song.append(Note('C5', quarterLength=1.0))
     return song
def note_rhythm_zip(melody,
                    note_sequence,
                    rhythm_sequence,
                    time_signature,
                    interval=0.25):
    melody_sequence = mimic_melody(note_sequence, melody)
    melody_sequence = [
        Note(elem.nameWithOctave, quarterLength=interval)
        if type(elem) is Note else Rest(quarterLength=interval)
        for elem in melody_sequence
        for i in range(0, int(elem.quarterLength / interval))
    ]

    new_melody_sequence = []
    elem = None
    bar_length = 0.0

    # Handle notes in the melody due to bars and time signature
    def add_to_melody_sequence(new_melody_sequence, elem, bar_length):
        if type(elem) not in [Note, Rest]:
            pass
        elif bar_length + elem.quarterLength >= time_signature:
            extra = bar_length + elem.quarterLength - time_signature
            elem.quarterLength = time_signature - bar_length
            if elem.quarterLength > 0.0:
                new_melody_sequence += [elem]
            bar_length = extra
            # The possible extra note
            elem = Note(elem.nameWithOctave) if type(elem) is Note else Rest()
            elem.quarterLength = extra
            if elem.quarterLength > 0.0:
                new_melody_sequence += [elem]
        else:
            new_melody_sequence += [elem]
            bar_length += elem.quarterLength
        return (new_melody_sequence, elem, bar_length)

    for index, rhythm in enumerate(rhythm_sequence):
        if rhythm == 'Hold' and type(elem) is Note:
            elem.quarterLength += interval
        elif rhythm == 'Note':
            new_melody_sequence, elem, bar_length = add_to_melody_sequence(
                new_melody_sequence, elem, bar_length)
            elem = melody_sequence[index]
            elem.quarterLength = interval
        elif rhythm == 'Rest' and type(elem) is Rest:
            elem.quarterLength += interval
        elif rhythm == 'Rest' or rhythm == 'Hold':
            new_melody_sequence, elem, bar_length = add_to_melody_sequence(
                new_melody_sequence, elem, bar_length)
            elem = Rest()
            elem.quarterLength = interval
    new_melody_sequence, elem, bar_length = add_to_melody_sequence(
        new_melody_sequence, elem, bar_length)
    return new_melody_sequence
def searchForIntervals(notesStr):
    '''notesStr is the same as above.  Now however we check to see
    if the generic intervals are the same, rather than the note names.
    Useful if the clef is missing.
    '''
    notesArr = notesStr.split()
    noteObjArr = []
    for tN in notesArr:
        tNObj = Note()
        tNObj.name = tN[0]
        tNObj.octave = int(tN[1])
        noteObjArr.append(tNObj)
    
    interObjArr = []
    for i in range(len(noteObjArr) - 1):
        int1 = interval.notesToInterval(noteObjArr[i], noteObjArr[i+1])
        interObjArr.append(int1)
    #print interObjArr

    searcher1 = IntervalSearcher(interObjArr) 
    ballataObj  = cadencebook.BallataSheet()
    streamLily = ""

    for thisWork in ballataObj:
        for thisCadence in thisWork.snippets:
            if (thisCadence is None):
                continue
            for i in range(len(thisCadence.parts)):
                if searcher1.compareToStream(thisCadence.parts[i].flat) is True:
                    notesList = ""
                    for thisNote in thisCadence.parts[i].flat.notes:
                        notesList += thisNote.name + " "
                        #thisNote.editorial.color = "blue"
                    streamLily += "\\score {" + \
                            "<< \\time " + str(thisCadence.timeSig) + \
                            "\n \\new Staff {" + str(thisCadence.parts[i].lily) + "} >>" + \
                            str(thisCadence.header()) + "\n}\n"
                    print("In piece %r found in stream %d: %s" % (thisWork.title, i, notesList))

    if streamLily:
        print(streamLily)
        lily.lilyString.LilyString(streamLily).showPDF()
def searchForNotes(notesStr):
    '''the notesStr is a string of notes in the following form:
    "C4 D4 E4 B3 C4"
    that's it: name, octave. With no accidentals.  If octave is 0 then
    it means do not bother checking for octaves.
    
    Currently octave is ignored anyhow.
    '''
    notesArr = notesStr.split()
    noteObjArr = []
    for tN in notesArr:
        tNName = tN[0]
        if tNName.lower() != "r":
            tNObj = Note()
            tNObj.name = tN[0]
            tNObj.octave = int(tN[1])
        else:
            tNObj = Rest()
        noteObjArr.append(tNObj)
    ballataObj  = cadencebook.BallataSheet()
    searcher1 = NoteSearcher(noteObjArr) 
    streamLily = ""

    for thisWork in ballataObj:
        for thisCadence in thisWork.snippets:
            if thisCadence is None:
                continue
            for i in range(len(thisCadence.parts)):
                if searcher1.compareToStream(thisCadence.parts[i].flat) is True:
                    notesList = ""
                    for thisNote in thisCadence.parts[i].flat.notesAndRests:
                        #thisNote.editorial.color = "blue"
                        if hasattr(thisNote.lily, "value"):
                            notesList += thisNote.lily.value + " "
                    streamLily += "\\score {" + \
                            "<< \\time " + str(thisCadence.timeSig) + \
                            "\n \\new Staff {" + str(thisCadence.parts[i].lily) + "} >>" + \
                            thisCadence.header() + "\n}\n"
                    print("In piece %r found in stream %d: %s" % (thisWork.title, i, notesList))
    if streamLily:
        lS = lily.lilyString.LilyString(streamLily)
        lS.showPNG()
Beispiel #33
0
def generate_notes_in_batch(note_params_df,
                            output_dir,
                            audio_format='flac',
                            sample_rate=44100):
    """
    Generates a batch of single note samples from the given table of parameters.

    `note_params_df` - a Pandas Dataframe with columns:
    `midi_number, midi_instrument, volume, duration, tempo`. Their meaning is the same as in generate_single_note.
    `output_dir` - output directory for the MIDI files

    Each sample goes to a single MIDI file named by the numeric index. Also each synthesized audio sample goes to a
    """
    os.makedirs(output_dir, exist_ok=True)

    fs = FluidSynth(sample_rate=sample_rate)

    stream = Stream()

    for i, row in note_params_df.iterrows():
        stream.append(MetronomeMark(number=row['tempo']))
        stream.append(make_instrument(int(row['midi_instrument'])))
        duration = row['duration']
        stream.append(
            chord_with_volume(
                Chord([
                    Note(midi=int(row['midi_number']),
                         duration=Duration(duration))
                ]), row['volume']))
        stream.append(Rest(duration=Duration(2 * duration)))

    midi_file = '{0}/all_samples.midi'.format(output_dir)
    audio_file_stereo = '{0}/all_samples_stereo.{1}'.format(
        output_dir, audio_format)
    audio_file = '{0}/all_samples.{1}'.format(output_dir, audio_format)
    audio_index_file = '{0}/all_samples_index.csv'.format(output_dir)

    # TODO: We currently assume some fixed duration and tempo (1.0, 120)!!!
    # The parts should be split according to an index.
    audio_index = make_audio_index(note_params_df, 3.0, 0.5, sample_rate)
    audio_index.to_csv(audio_index_file)

    write_midi(stream, midi_file)

    fs.midi_to_audio(midi_file, audio_file_stereo)

    convert_to_mono(audio_file_stereo, audio_file)
    os.remove(audio_file_stereo)

    x, sample_rate = sf.read(audio_file)

    parts = split_audio_to_parts(x, sample_rate, audio_index)
    store_parts_to_files(parts, sample_rate, output_dir, audio_format)
 def next_note(self, notes):
     """
     notes: Music21 Stream or list of ints/floats
     returns: Musci21 Note
     """
     note_numbers = notes if isinstance(notes[0], (int, float)) else list(
         map(lambda note: note.pitch.ps,
             notes.flat.getElementsByClass('Note')))
     pattern = self.find_best_pattern(note_numbers)
     print("Best pattern found is:", pattern)
     note_number = self.next_note_from_pattern(note_numbers, pattern)
     return Note(note_number)
Beispiel #35
0
 def unifiedTest(self):
     C4 = Note(); C4.name = "C"
     D4 = Note(); D4.name = "D"
     E4 = Note(); E4.name = "E"
     F4 = Note(); F4.name = "F"
     G4 = Note(); G4.name = "G"
     A4 = Note(); A4.name = "A"
     B4 = Note(); B4.name = "B"
     C5 = Note(); C5.name = "C"; C5.octave = 5
     D5 = Note(); D5.name = "D"; D5.octave = 5
     
     a = VoiceLeadingQuartet(C4, D4, G4, A4)
     assert a.similarMotion() == True
     assert a.parallelMotion() == True
     assert a.antiParallelMotion() == False
     assert a.obliqueMotion() == False
     assert a.parallelInterval(interval.stringToInterval("P5")) == True
     assert a.parallelInterval(interval.stringToInterval("M3")) == False
 
     b = VoiceLeadingQuartet(C4, C4, G4, G4)
     assert b.noMotion() == True
     assert b.parallelMotion() == False
     assert b.antiParallelMotion() == False
     assert b.obliqueMotion() == False
         
     c = VoiceLeadingQuartet(C4, G4, C5, G4)
     assert c.antiParallelMotion() == True
     assert c.hiddenInterval(interval.stringToInterval("P5")) == False
 
     d = VoiceLeadingQuartet(C4, D4, E4, A4)
     assert d.hiddenInterval(Interval("P5")) == True
     assert d.hiddenInterval(Interval("A4")) == False
     assert d.hiddenInterval(Interval("AA4")) == False
Beispiel #36
0
 def testGenerateFirstSpecies(self):
     '''
     A First Species Counterpoint Generator by Jackie Rogoff (MIT 2010) written as part of 
     an UROP (Undergraduate Research Opportunities Program) project at M.I.T. 2007.
     '''
     
     n101 = Note()
     n101.duration.type = "quarter"
     n101.name = "A"
     aMinor = scale.ConcreteMinorScale(n101)
     n101b = Note()
     n101b.duration.type = "quarter"
     n101b.name = "D"
     dMinor = scale.ConcreteMinorScale(n101b)
     
     counterpoint1 = ModalCounterpoint()
     (n110, n111, n112, n113) = (Note(), Note(), Note(), Note())
     (n114, n115, n116, n117, n118) = (Note(), Note(), Note(), Note(), Note())
     (n119, n120, n121, n122, n123) = (Note(), Note(), Note(), Note(), Note())
     (n124, n125, n126, n127, n128) = (Note(), Note(), Note(), Note(), Note())
 
     n110.duration.type = "quarter"
     n111.duration.type = "quarter"
     n112.duration.type = "quarter"
     n113.duration.type = "quarter"
     n114.duration.type = "quarter"
     n115.duration.type = "quarter"
     n116.duration.type = "quarter"
     n117.duration.type = "quarter"
     n118.duration.type = "quarter"
 
     n110.name = "A"
     n110.octave = 3
     n111.name = "C"
     n111.octave = 4
     n112.name = "B"
     n112.octave = 3
     n113.name = "C"
     n113.octave = 4
     n114.name = "D"
     n115.name = "E"
     n116.name = "C"
     n116.octave = 4
     n117.name = "B"
     n117.octave = 3
     n118.name = "A"
     n118.octave = 3
     n119.name = "F"
     n120.name = "E"
     n121.name = "D"
     n122.name = "G"
     n123.name = "F"
     n124.name = "A"
     n125.name = "G"
     n126.name = "F"
     n127.name = "E"
     n128.name = "D"
 
     cantusFirmus1 = Stream([n110, n111, n112, n113, n114, n115, n116, n117, n118])
     cantusFirmus2 = Stream([n110, n115, n114, n119, n120, n113, n121, n116, n117, n118])
     cantusFirmus3 = Stream([n114, n119, n115, n121, n122, n123, n124, n125, n126, n127, n128])
     
     choices = [cantusFirmus1, cantusFirmus2, cantusFirmus3, cantusFirmus3, cantusFirmus3, cantusFirmus3]
     cantusFirmus = random.choice(choices)
 
     thisScale = aMinor
     if cantusFirmus is cantusFirmus3:
         thisScale = dMinor
         
     goodHarmony = False
     goodMelody = False
 
     while (goodHarmony == False or goodMelody == False):
         try:
             hopeThisWorks = counterpoint1.generateFirstSpecies(cantusFirmus, thisScale)
             print [note1.name + str(note1.octave) for note1 in hopeThisWorks.notes]
 
             hopeThisWorks2 = counterpoint1.raiseLeadingTone(hopeThisWorks, thisScale)
             print [note1.name + str(note1.octave) for note1 in hopeThisWorks2.notes]
def read_vmf_string(vmf_string):
    """
    Reads VMF data from a string to a Score Stream.

    :param vmf_string: The contents of the VMF file as a string.
    :return: A music21 score instance containing the music in the VMF file.
    """

    parts_converted = {}

    vmf = json.loads(vmf_string)

    # create a score
    score = Score()

    # Get the initial data
    number_of_parts = vmf['header']['number_of_parts']
    number_of_voices = vmf['header']['number_of_voices']
    smallest_note = float(Fraction(vmf['header']['tick_value']))

    # create the parts and first measure.
    for voice_number in range(number_of_parts):
        part = Part()
        voice = Voice()

        part.append(voice)

        score.append(part)

    # get the body of the vmf
    body = vmf['body']

    part_number = 0

    # We do this because we want to do each part at a time.
    for voice_number in range(number_of_voices):
        # Get all ticks for a given part.
        part = [tick[voice_number] for tick in body]

        current_element = None
        current_voice = None

        # iterate over each tick
        for tick in part:

            if current_voice is None:
                # Get the parent part if it exists.
                try:
                    current_part = parts_converted[tick[-1]]

                    # add a new voice and write to it.
                    voice = Voice()

                    initial_key_signature = KeySignature(vmf['header']['key_signature']['0.0'])
                    initial_time_signature = TimeSignature(vmf['header']['time_signature']['0.0'])

                    voice.append(initial_key_signature)
                    voice.append(initial_time_signature)

                    current_part.append(voice)

                except KeyError:
                    # Add it to our dictionary otherwise.
                    current_part = score.parts[part_number]
                    part_number += 1

                    parts_converted[tick[-1]] = current_part

                # Get the last voice.
                current_voice = current_part.voices[-1]

            if tick[0] == 1:
                if current_element is not None:
                    # check for precision and adjust
                    rounded = round(current_element.quarterLength)
                    if abs(current_element.quarterLength - rounded) < PRECISION:
                        current_element.quarterLength = rounded

                    # append to the part
                    current_voice.append(current_element)

                # Find how many notes to write. This will always be an int.
                number_of_notes = int(find_number_of_notes_in_tick(tick))

                if number_of_notes == 1:
                    # create a new note
                    current_element = Note(Pitch(pitchClass=tick[3], octave=tick[4]))
                else:
                    pitches = []

                    # create the pitches.
                    # From the beginning to the end of the pitch section of the tick.
                    for i in range(FIRST_PITCH_INDEX, FIRST_PITCH_INDEX + 2 * number_of_notes, 2):
                        pitch = Pitch(pitchClass=tick[i], octave=tick[i + 1])
                        pitches.append(pitch)

                    # create a new chord with these pitches.
                    current_element = Chord(pitches)


                # set the velocity of the note.
                current_element.volume.velocity = DynamicConverter.vmf_to_velocity(tick[DYNAMIC_BIT])
                # set the articulation
                if tick[ARTICULATION_BIT] != 0:
                    current_element.articulations.append(
                        ArticulationConverter.vmf_to_articulation(tick[ARTICULATION_BIT]))

                # set the value for this tick.
                current_element.quarterLength = smallest_note
            elif tick[0] == 2:
                # extend previous note
                current_element.quarterLength += smallest_note

            elif tick[0] == 0 and (isinstance(current_element, note.Note) or current_element is None):
                if current_element is not None:
                    # check for precision and adjust
                    rounded = round(current_element.quarterLength)
                    if abs(current_element.quarterLength - rounded) < PRECISION:
                        current_element.quarterLength = rounded

                    # append to the part
                    current_voice.append(current_element)

                # create new rest
                current_element = Rest()

                # Set the value for this tick.
                current_element.quarterLength = smallest_note

            elif tick[0] == 0 and isinstance(current_element, note.Rest):
                # extend previous rest.
                current_element.quarterLength += smallest_note

        # Append the last element in progress.
        if current_element is not None:
            # check for precision and adjust
            rounded = round(current_element.quarterLength)
            if abs(current_element.quarterLength - rounded) < PRECISION:
                current_element.quarterLength = rounded

            # append to the part
            current_voice.append(current_element)

    # create the stream for time signature changes
    time_signature_stream = Stream()

    for offset, time_signature_str in sorted(vmf['header']['time_signature'].items()):
        time_signature = TimeSignature(time_signature_str)
        time_signature_stream.append(time_signature)
        time_signature_stream[-1].offset = float(offset)

    # finish up the file.
    for part in score.parts:
        for voice in part.voices:
            voice.makeMeasures(inPlace=True, meterStream=time_signature_stream)

        for offset, t in sorted(vmf['header']['tempo'].items()):
            mm = tempo.MetronomeMark(number=t, referent=note.Note(type='quarter'))
            voice.insert(offset, mm)

        for offset, ks in sorted(vmf['header']['key_signature'].items()):
            voice.insert(offset, KeySignature(ks))

    return score