Exemplo n.º 1
0
    def m21(self):

        score = Score()
        part = Part()
        bars = len(self) // self.bar_size

        measure = Measure()
        measure.insert(0.0, self.meter.m21)

        for bar in range(bars):

            start = bar * self.bar_size
            end = (bar + 1) * self.bar_size
            positions = [position % self.bar_size 
                for position in self.iter_onset_positions(start=start, end=end)
            ]
            # Append an extra position to make sure that the last ioi is
            # between the last note and the end of the bar
            positions.append(self.bar_size)
            offsets = [self.get_offset(p) for p in positions]
            iois = [b - a for a, b in zip(offsets[:-1], offsets[1:])]

            for offset, ioi in zip(offsets[:-1], iois):

                note = Note('a5')
                note.duration.quarterLength = ioi * 4.0
                measure.insert(offset * 4.0, note)

            part.append(measure)
            measure = Measure()

        score.append(part)
        return score.makeMeasures()
Exemplo n.º 2
0
def setup_score(title, composer):
    timestamp = datetime.datetime.utcnow()
    metadata = Metadata()
    metadata.title = title
    metadata.composer = composer
    metadata.date = timestamp.strftime('%Y/%m/%d')

    score = Score()
    score.insert(0, metadata)

    return score
def write_notation_cell(music, path, event_index):
    score = Score()

    metadata = Metadata()
    metadata.title = ''
    metadata.composer = ''
    score.insert(0, metadata)

    layout = ScoreLayout()
    layout.scalingMillimeters = 1.25
    layout.scalingTenths = 40
    score.insert(0, layout)

    for musician in music:
        instrument_name = musician['instrument']
        instrument = get_instrument(instrument_name)
        instrument.partName = instrument.instrumentName
        if instrument.instrumentName is 'Violoncello':
            instrument.partName = 'Cello'
        instrument.partAbbreviation = instrument.instrumentAbbreviation

        parts = []
        part = Part()
        parts.append(part)
        part.insert(0, instrument)

        score.insert(0, part)
        # score.insert(0, StaffGroup(parts))

        for event in musician['music']:
            pitches = event['pitches']
            dur = event['duration']
            # if not pitches or pitches == 'stop':
            #     note = Rest()
            if len(pitches) == 1:
                pitch = Pitch(pitches[0] + 60)
                note = Note(pitch)
            else:
                note = Chord(notes=[Pitch(p + 60) for p in pitches])

            duration = Duration()
            duration.fill([dur])
            note.duration = duration

            part.append(note)

    file_path = os.path.join(path, str(event_index).zfill(2))
    musicxml_file_path = file_path + '.xml'
    png_output_file_path = file_path + '.png'

    score.write('musicxml', musicxml_file_path)

    write_png_with_musescore(musicxml_file_path, png_output_file_path, dpi=600)
Exemplo n.º 4
0
def notate_score(musician_names, instrument_names, music):
    score = Score()

    for musician_name, instrument_name in zip(musician_names,
                                              instrument_names):
        instrument = get_instrument(instrument_name)
        instrument.partName = instrument.instrumentName
        instrument.partAbbreviation = instrument.instrumentAbbreviation

        parts = []
        part = Part()
        parts.append(part)
        part.insert(0, instrument)

        score.insert(0, part)
        score.insert(0, StaffGroup(parts))

        notes = music[musician_name]

        for pitches in notes:
            if not pitches or pitches == 'stop':
                note = Rest()
            elif len(pitches) == 1:
                pitch = Pitch(pitches[0] + 60)
                note = Note(pitch)
            else:
                note = Chord(notes=[Pitch(p + 60) for p in pitches])

            duration = Duration()
            duration.fill([4.0])
            note.duration = duration

            part.append(note)

    score.show('musicxml', '/Applications/Sibelius 7.5.app')
def test_5():
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.5))
    top.append(Note('F4', quarterLength=0.5))
    bot.append(Note('G3', quarterLength=0.5))
    bot.append(Note('A3', quarterLength=0.5))
    return Score([top, bot])
def test_2():
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.25))
    top.append(Rest(quarterLength=0.25))
    bot.append(Note('G3', quarterLength=0.25))
    bot.append(Rest(quarterLength=0.25))
    return Score([top, bot])
Exemplo n.º 7
0
def generateScore(chords, lengths=None, ts="4/4"):
    """Generates a four-part score from a sequence of chords.

    Soprano and alto parts are displayed on the top (treble) clef, while tenor
    and bass parts are displayed on the bottom (bass) clef, with correct stem
    directions.
    """
    if lengths is None:
        lengths = [1 for _ in chords]
    voices = [Voice([Piano()]) for _ in range(4)]
    for chord, length in zip(chords, lengths):
        bass, tenor, alto, soprano = [
            Note(p, quarterLength=length) for p in chord.pitches
        ]
        bass.addLyric(chord.lyric)
        bass.stemDirection = alto.stemDirection = "down"
        tenor.stemDirection = soprano.stemDirection = "up"
        voices[0].append(soprano)
        voices[1].append(alto)
        voices[2].append(tenor)
        voices[3].append(bass)

    female = Part([TrebleClef(), TimeSignature(ts), voices[0], voices[1]])
    male = Part([BassClef(), TimeSignature(ts), voices[2], voices[3]])
    score = Score([female, male])
    return score
def test_16():
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.5))
    top.append(Note('A4', quarterLength=0.75))  # 0.5
    top.append(Note('G4', quarterLength=0.25))  # 1.25
    top.append(Note('B4', quarterLength=0.5))  # 1.5
    bot.append(Note('G3', quarterLength=0.5))
    bot.append(Rest(quarterLength=0.25))  # 0.5
    bot.append(Note('F3', quarterLength=0.75))  # 0.75
    bot.append(Note('E3', quarterLength=0.5))  # 1.5
    return Score([top, bot])
Exemplo n.º 9
0
def generate_counterpoint_attempt():
    # Import the melody, and process it
    filename = sys.argv[1]
    song = transform.import_mid(filename)
    time_signature_symbol = util.time_signature_symbol(song)
    transform.populate_measures(song)

    # Compute a counterpoint melody
    original_melody, generate_melody = counterpoint.algorithm(
        song, time_signature_symbol)

    # Build a new score
    score = Score()
    score.insert(0, original_melody)
    score.insert(0, generate_melody)

    # Export the score
    transform.export_mid(score, filename + '_gcp')
    transform.export_ly(score, filename + '_gcp')
    transform.export_pdf(score, filename + '_gcp')
Exemplo n.º 10
0
def test_19():
    """
    NB: This test is designed specifically to ensure that the _event_finder()
    finds Rest objects when the happen at the same time as Note objects, when
    only Rest objects are requested to be found.
    """
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.5))
    top.append(Note('G5', quarterLength=0.5))
    bot.append(Note('G3', quarterLength=0.5))
    bot.append(Rest(quarterLength=0.5))
    return Score([top, bot])
Exemplo n.º 11
0
def test_18():
    """
    NB: This test is designed specifically to ensure that the _event_finder()
    doesn't stop processing when it doesn't find an element of the expected types
    at an offset. You should ask it to look for Rest objects only.
    """
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.5))
    top.append(Rest(quarterLength=0.5))
    bot.append(TimeSignature('4/4'))
    bot.append(Note('G3', quarterLength=0.5))
    bot.append(Rest(quarterLength=0.5))
    return Score([top, bot])
Exemplo n.º 12
0
def test_17():
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.5))
    top.append(Note('A4', quarterLength=0.75))  # 0.5
    top.append(Note('F4', quarterLength=0.75))  # 1.25
    top.append(Note('E4', quarterLength=0.5))  # 2.0
    bot.append(Note('G3', quarterLength=0.5))
    bot.append(Note('A3', quarterLength=0.25))  # 0.5
    bot.append(Note('F3', quarterLength=0.375))  # 0.75
    bot.append(Rest(quarterLength=0.25))  # 1.125
    bot.append(Note('G3', quarterLength=0.625))  # 1.375
    bot.append(Note('G3', quarterLength=0.5))  # 2.0
    return Score([top, bot])
Exemplo n.º 13
0
def make_music21_score(
    part_names=('violin', 'flute', 'oboe', 'clarinet', 'alto_saxophone',
                'trumpet', 'bass', 'percussion'),
    title='Title',
    composer='Jonathan Marmor',
    time_signature=None,
    starting_tempo_bpm=60,
    starting_tempo_quarter_duration=1.0,
    timestamp=None,
):
    if not timestamp:
        timestamp = datetime.datetime.utcnow()
    metadata = Metadata()
    metadata.title = title
    metadata.composer = composer
    metadata.date = timestamp.strftime('%Y/%m/%d')

    score = Score()
    score.insert(0, metadata)

    for part_name in part_names:

        instrument_name, instrument_number = parse_part_name(part_name)

        instrument = instrument_data[instrument_name]

        part = Part()

        metronome_mark = MetronomeMark(
            number=starting_tempo_bpm,
            referent=Duration(starting_tempo_quarter_duration))
        part.append(metronome_mark)

        if time_signature:
            # Should be a string like '12/8'
            music21_time_signature = TimeSignature(time_signature)
            part.append(music21_time_signature)

        m21_instrument = instrument['class']()
        m21_instrument.partName = instrument['name']
        m21_instrument.partAbbreviation = instrument['abbreviation']

        if instrument_number > 1:
            m21_instrument.partName = '{} {}'.format(instrument['name'],
                                                     instrument_number)
            m21_instrument.partAbbreviation = '{} {}'.format(
                instrument['abbreviation'], instrument_number)

        part.insert(0, m21_instrument)

        clef = instrument.get('clef')
        if clef:
            part.append(clef())

        score.insert(0, part)

    return score
Exemplo n.º 14
0
    def matrix_to_score(self, matrix, verbose=False):
        '''
        Takes a matrix of (P, T, 2) and turn it into a music21.stream.Score object, where P is the number of parts, T is the number of time slices, and dim is the note vector.
        '''
        # (4 parts, # ticks, 2)
        assert len(matrix.shape) == 3, \
            "Input matrix needs to have 3-dimensions."

        num_parts, num_ticks, num_dim = matrix.shape
        assert num_parts == 4, "Input matrix needs to have 4 parts."
        assert num_ticks > 0, "No time slices in this matrix."
        assert num_dim == 2, "Note vector size mismatch."

        # need to make sure all pieces start with an articulated note, even if
        # it's a rest.
        matrix[:, 0, 1] = [1, 1, 1, 1]

        score = Score()
        parts = list(map(self._matrix_to_part, matrix))

        parts[0].insert(0, instrument.Violin())
        parts[0].partName = "Violin I"
        parts[0].clef = clef.TrebleClef()

        parts[1].insert(0, instrument.Violin())
        parts[1].partName = "Violin II"
        parts[1].clef = clef.TrebleClef()

        parts[2].insert(0, instrument.Viola())
        parts[2].clef = clef.AltoClef()

        parts[3].insert(0, instrument.Violoncello())
        parts[3].clef = clef.BassClef()
        _ = list(map(lambda part: score.append(part), parts))

        return score
Exemplo n.º 15
0
def analyze_parts(sample):
    try:
        partitions = instrument.partitionByInstrument(sample)
        if partitions is None:
            partitions = sample

        if 1 < len(partitions) <= 3:
            parted = []
            for i in range(len(partitions)):
                comb = combinations(partitions, i + 1)
                parted.extend([Score(c) for c in comb])
            return parted
        else:
            return partitions
    except:
        print('bad file.')
Exemplo n.º 16
0
def test_14():
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=0.0625))
    top.append(Note('G4', quarterLength=0.0625))  # 0.0625
    top.append(Note('G4', quarterLength=0.0625))  # 0.125
    top.append(Note('G4', quarterLength=0.0625))  # 0.1875
    top.append(Note('G4', quarterLength=0.0625))  # 0.25
    top.append(Note('G4', quarterLength=0.0625))  # 0.3125
    top.append(Note('G4', quarterLength=0.0625))  # 0.375
    top.append(Note('G4', quarterLength=0.0625))  # 0.4375
    top.append(Note('G4', quarterLength=0.5))  # 0.5
    bot.append(Note('G3', quarterLength=0.125))
    bot.append(Rest(quarterLength=0.125))  # 0.125
    bot.append(Note('A3', quarterLength=0.125))  # 0.25
    bot.append(Rest(quarterLength=0.0625))  # 0.375
    bot.append(Rest(quarterLength=0.0625))  # 0.4375
    bot.append(Note('G3', quarterLength=0.5))  # 0.5
    return Score([top, bot])
Exemplo n.º 17
0
def to_music21(music: "Music") -> Score:
    """Return a Music object as a music21 Score object.

    Parameters
    ----------
    music : :class:`muspy.Music`
        Music object to convert.

    Returns
    -------
    `music21.stream.Score`
        Converted music21 Score object.

    """
    # Create a new score
    score = Score()

    # Metadata
    if music.metadata:
        score.append(to_music21_metadata(music.metadata))

    # Tracks
    for track in music.tracks:
        # Create a new part
        part = Part()
        part.partName = track.name

        # Add tempos
        for tempo in music.tempos:
            part.append(to_music21_metronome(tempo))

        # Add time signatures
        for time_signature in music.time_signatures:
            part.append(to_music21_time_signature(time_signature))

        # Add key signatures
        for key_signature in music.key_signatures:
            part.append(to_music21_key(key_signature))

        # Add notes to part
        for note in track.notes:
            m21_note = M21Note(_get_pitch_name(note.pitch))
            m21_note.offset = note.time / music.resolution
            m21_note.quarterLength = note.duration / music.resolution
            part.append(m21_note)

        # Append the part to score
        score.append(part)

    return score
Exemplo n.º 18
0
def generate_song(chord_length=15,show_symbols=False):
   '''Generate a random chord progression with a piano/bass comp and
      return as a Music21 score. Default length of 15 chord changes
      (plus the fixed ones at start/end) makes a score about one A4 page.'''
   # Start with a blank score
   score = Score()
   # TODO: Add swing rhythm indicator without having to do it manually
   # in musescore (how to with music21?)

   # Add tracks/intstruments - names etc will be set automatically
   piano = Part()
   piano.insert(0, Piano())
   score.insert(0,piano)

   bass = Part()
   bass.insert(0, AcousticBass())
   score.insert(0,bass)

   #hihat = Part()   TODO drum kit

   # Get a random progression
   prog = ProgressionGenerator()
   prog.generate(chord_length)

   # Go through the progression, adding a comp for each chord
   for chord_choice in prog.chords:
      roman = RomanNumeral(chord_choice)   # Convert string into a generic Roman I/IV/etc chord

      # Duration = eights until the next chord change.
      # at least 1 bar on "important" chords (I,IV,V)
      if chord_choice in ('Imaj7', 'IVmaj7', 'V7'):
         duration = random.choice((8,8,8,8,10,10,12,12,14,16))
      else: # 1 bar or less on "minor" (pun intended) chords
         duration = random.choice((2,4,4,4,6,6,8,8,8,8))

      add_piano_riff(roman, duration, piano, show_symbols)
      add_bass_walk(roman, duration, bass)
      # TODO drum part

   # ending riff on last bar or two
   duration = random.choice((8,8,16))
   add_piano_closing(RomanNumeral('Imaj7'), duration, piano, show_symbols)
   add_bass_closing(RomanNumeral('Imaj7'), duration, bass)
   return score
Exemplo n.º 19
0
def scoreToText(score: Score, force_quantum=None) -> List[str]:
    """Converts a score into a list of string tokens

    Args:
        score (Score): music21.Score object
        force_quantum (int): By default, inactivated. If an int is given,
        the method will squeeze the notes and rests into the closest quantum.
        The int is the number of quanta per beat. Default depends on the midi file.

    Returns:
        List[str]: List of tokens
    """
    tokens: List[str] = []

    if force_quantum is not None:
        raise NotImplementedError

    # "chordify" the score: all voices and parallel parts turn into one
    chords = score.chordify()

    # find the quantum duration
    durations: List[Fraction] = []
    offsets: List[Fraction] = []
    for chord in chords.notesAndRests:
        durations.append(Fraction(chord.quarterLength).limit_denominator(100))
        offsets.append(Fraction(chord.offset).limit_denominator(100))

    # usually it is 12
    quantum_duration = np.lcm(max((d.denominator for d in durations)),
                              max((o.denominator for o in offsets)))

    # take the derivative of offsets, to get the rate of change in quanta/note
    offset_rates: List[Fraction] = np.diff(offsets)

    assert len(offset_rates) == len(durations) - 1

    # lead token stream with metadata
    tokens.append("quantum")
    tokens.append(str(quantum_duration))

    # score starts here
    for chord, duration, offset in zip_longest(chords.notesAndRests, durations,
                                               offset_rates):

        # get note, chord, rest info
        if type(chord) is Chord:
            tokens.extend(chordToTokens(chord))
        elif type(chord) is Note:
            tokens.extend(noteToTokens(chord))
        elif type(chord) is Rest:
            tokens.append("Rest")
            tokens.append(str(duration))
        else:
            raise TypeError(
                "Unknow type to turn to tokens {t}".format(t=type(chord)))

        # xxsep tokens
        if offset is not None:
            tokens.append("xxsep")
            tokens.append(str(offset.numerator))

    return tokens
Exemplo n.º 20
0
def build_midi(harmony, melody):
    chords_dict = get_chord_dicts()[1]

    song = []
    for i, eighth in enumerate(melody):
        # eighth = multi_hot_to_pianoroll(piano_roll[:midi_range]) # now make_music returns pianorolls already
        # chord = one_hot_to_index(piano_roll[-chord_classes:]) # TODO add chord to midi
        # print(f'EIGHTH: {eighth}') # DEBUG

        song_notes = []
        for note_ in eighth:
            note_name = NOTES[note_%12]
            note_octave = start_octave + note_//12 # starting from C2
            song_notes.append(note_name + str(note_octave))

        song_chords = []
        full_chord = chords_dict[harmony[i]]
        if full_chord != '<unk>':
            for chord_ in full_chord:
                chord_name = NOTES[chord_%12]
                song_chords.append(chord_name + str(start_octave-1))

        song.append(("REST" if len(song_notes) == 0 else song_notes, "REST" if len(song_chords) == 0 else song_chords))

    notes_score = Score()
    notes_score.append(instrument.Piano())
    chords_score = Score()
    chords_score.append(instrument.KeyboardInstrument())
    bass_score = Score()
    bass_score.append(instrument.ElectricBass())

    current_note_length = 0
    current_chord_length = 0

    for i, _ in enumerate(song):

        current_note_length += 0.5
        current_chord_length += 0.5

        # print(f'NOTE: {song[i][0]}\t\t\t- CHORD: {song[i][1]}')

        if i < len(song)-1:
            # note
            if song[i][0] != song[i+1][0]:
                if song[i][0] == "REST":
                    notes_score.append(note.Rest(duration=Duration(current_note_length)))
                else:
                    notes_score.append(chord.Chord([note.Note(nameWithOctave=note_name) for note_name in song[i][0]], duration=Duration(current_note_length)))
                current_note_length = 0

            # chord
            if song[i][1] != song[i+1][1] or current_chord_length == 4:
                if song[i][1] == "REST":
                    chords_score.append(note.Rest(duration=Duration(current_chord_length)))

                    bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                    bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                    bass_score.append(note.Rest(duration=Duration(current_chord_length/2)))
                else:
                    chords_score.append(chord.Chord([note.Note(nameWithOctave=chord_name) for chord_name in song[i][1]], duration=Duration(current_chord_length)))

                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/2)))
                current_chord_length = 0
        else:
            # note
            if song[i][0] == "REST":
                notes_score.append(note.Rest(duration=Duration(current_note_length)))
            else:
                notes_score.append(chord.Chord([note.Note(nameWithOctave=note_name) for note_name in song[i][0]], duration=Duration(current_note_length)))

            # chord
            if song[i][1] == "REST":
                chords_score.append(note.Rest(duration=Duration(current_chord_length)))

                bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                bass_score.append(note.Rest(duration=Duration(current_chord_length/2)))
            else:
                chords_score.append(chord.Chord([note.Note(nameWithOctave=chord_name) for chord_name in song[i][1]], duration=Duration(current_chord_length)))

                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/2)))

    song_stream = Stream()
    song_stream.insert(0, notes_score)
    song_stream.insert(0, chords_score)
    song_stream.insert(0, bass_score)

    if not os.path.exists('melodies'):
        os.makedirs('melodies')
    dt = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    song_stream.write('midi', fp=f'melodies/generated_{dt}.mid')
Exemplo n.º 21
0
        #     new[0] += normal(dur[0], dur[1])
        #     new[1] += normal(freq[0], freq[1])
        notes.append(new)

        total_dur += new[0]

    return MIDIReader.list_to_stream(notes)

if __name__ == '__main__':

    snn = SonataNeuralNetwork()
    for midi in listdir(music_dir):
        print midi
        snn.read(path.join(music_dir, midi))
    print 'training'
    t_net, b_net = snn.train_network()

    # snn.append_errors()
    # td, tf, bd, bf = snn.get_error_vals()

    treble = stream_from_notes([[1, log(261.6)], [1, log(329.6)], [1, log(392)], [1, log(329.6)], [1, log(392)], [1, log(523.3)]], t_net)
    treble.insert(0, clef.TrebleClef())
    bass = stream_from_notes([[0.5, log(130.8)], [0.5, log(164.8)], [0.5, log(196)], [1, log(261.6)], [0.5, log(196)], [1, log(164.8)]], b_net)
    bass.insert(0, clef.BassClef())
    s = Score()
    s.append(treble)
    s.append(bass)
    bass.offset = 0

    s.show()
Exemplo n.º 22
0
def tensors_to_stream(outputs, config, metadata=None):
    cur_measure_number = 0
    parts = {}
    for part_name in outputs.keys():
        if part_name == 'extra':
            continue
        part = Part(id=part_name)
        parts[part_name] = part

    last_time_signature = None
    cur_time_signature = '4/4'
    for step in range(outputs['soprano'].shape[0]):
        extra = outputs['extra'][step]
        if extra[indices_extra['has_time_signature_3/4']].item() == 1:
            cur_time_signature = '3/4'
        elif extra[indices_extra['has_time_signature_4/4']].item() == 1:
            cur_time_signature = '4/4'
        elif extra[indices_extra['has_time_signature_3/2']].item() == 1:
            cur_time_signature = '3/2'
        cur_time_pos = extra[indices_extra['time_pos']].item()
        has_fermata = extra[indices_extra['has_fermata']].item() == 1

        if cur_time_pos == 1.0 or cur_measure_number == 0:
            for part_name, part in parts.items():
                part.append(Measure(number=cur_measure_number))
                if cur_measure_number == 0:
                    if part_name in ['soprano', 'alto']:
                        part[-1].append(clef.TrebleClef())
                    else:
                        part[-1].append(clef.BassClef())
                    key = int(
                        torch.argmax(
                            outputs['extra'][0, indices_extra['has_sharps_0']:
                                             indices_extra['has_sharps_11'] +
                                             1],
                            dim=0).item())
                    if key >= 6:
                        key -= 12
                    part[-1].append(KeySignature(key))
                    part[-1].append(MetronomeMark(number=90))
            cur_measure_number += 1

        if last_time_signature is None or cur_time_signature != last_time_signature:
            for part in parts.values():
                part[-1].append(TimeSignature(cur_time_signature))
            last_time_signature = cur_time_signature

        for part_name, part in parts.items():
            idx = torch.argmax(outputs[part_name][step]).item()
            if idx == indices_parts['is_continued']:
                try:
                    last_element = part[-1].flat.notesAndRests[-1]
                    cur_element = deepcopy(last_element)
                    if last_element.tie is not None and last_element.tie.type == 'stop':
                        last_element.tie = Tie('continue')
                    else:
                        last_element.tie = Tie('start')
                    cur_element.tie = Tie('stop')
                except IndexError:
                    logging.debug(
                        'Warning: "is_continued" on first beat. Replaced by rest.'
                    )
                    cur_element = Rest(quarterLength=config.time_grid)
                part[-1].append(cur_element)
            elif idx == indices_parts['is_rest']:
                part[-1].append(Rest(quarterLength=config.time_grid))
            else:
                pitch = Pitch()
                part[-1].append(Note(pitch, quarterLength=config.time_grid))
                # Set pitch value AFTER appending to measure in order to avoid unnecessary accidentals
                pitch.midi = idx + min_pitches[part_name] - len(indices_parts)

        if has_fermata:
            for part in parts.values():
                fermata = Fermata()
                fermata.type = 'upright'
                part[-1][-1].expressions.append(fermata)

    score = Score()
    if metadata is not None:
        score.append(Metadata())
        score.metadata.title = f"{metadata.title} ({metadata.number})"
        score.metadata.composer = f"Melody: {metadata.composer}\nArrangement: BachNet ({datetime.now().year})"
    for part in parts.values():
        part[-1].rightBarline = 'light-heavy'

    score.append(parts['soprano'])
    if 'alto' in parts:
        score.append(parts['alto'])
        score.append(parts['tenor'])
    score.append(parts['bass'])

    score.stripTies(inPlace=True, retainContainers=True)

    return score
Exemplo n.º 23
0
    return MIDIReader.list_to_stream(notes)


if __name__ == '__main__':

    snn = SonataNeuralNetwork()
    for midi in listdir(music_dir):
        print midi
        snn.read(path.join(music_dir, midi))
    print 'training'
    t_net, b_net = snn.train_network()

    # snn.append_errors()
    # td, tf, bd, bf = snn.get_error_vals()

    treble = stream_from_notes(
        [[1, log(261.6)], [1, log(329.6)], [1, log(392)], [1, log(329.6)],
         [1, log(392)], [1, log(523.3)]], t_net)
    treble.insert(0, clef.TrebleClef())
    bass = stream_from_notes(
        [[0.5, log(130.8)], [0.5, log(164.8)], [0.5, log(196)],
         [1, log(261.6)], [0.5, log(196)], [1, log(164.8)]], b_net)
    bass.insert(0, clef.BassClef())
    s = Score()
    s.append(treble)
    s.append(bass)
    bass.offset = 0

    s.show()
    def predict(self, data, count, temp, length=500):

        songs = list(set([i.song for i in data]))

        bug = True
        while bug:
            try:
                condition = True
                while condition:
                    try:
                        random_song = random.choice(songs)
                        slice_by_instrument = dict(zip(self.target_instruments_str, [[] for i in self.target_instruments_str]))
                        for j in self.target_instruments_str:
                            for i in data:
                                if i.song == random_song and i.instrument == j:
                                    slice_by_instrument[j].append(i)

                        slice_by_instrument_without_rests = dict(zip(self.target_instruments_str, [[] for i in self.target_instruments_str]))

                        for i in slice_by_instrument.keys():
                            for song in slice_by_instrument[i]:
                                if not isinstance(song.chords[0], note.Rest):
                                    slice_by_instrument_without_rests[i].append(song)
                            if len(slice_by_instrument_without_rests[i]) != 0:
                                slice_by_instrument[i] = random.choice(slice_by_instrument_without_rests[i])
                            else:
                                slice_by_instrument[i] = random.choice(slice_by_instrument[i])

                        condition = False
                    except IndexError:
                        continue

                guitar_chords = slice_by_instrument['Electric Guitar'].chords
                guitar_durations = slice_by_instrument['Electric Guitar'].durations
                bass_chords = slice_by_instrument['Electric Bass'].chords
                drum_chords = slice_by_instrument['Piano'].chords


                starting_slice_notes = (np.asarray(encode_using_mapper(guitar_chords, self.guitar_mapper)) / len(self.guitar_mapper))[:20]
                starting_slice_durations = (np.asarray(encode_using_mapper(guitar_durations, self.guitar_durations_mapper)) / len(
                    self.guitar_durations_mapper))[:20]
                starting_slice_bass = (np.asarray(encode_using_mapper(bass_chords, self.bass_mapper)) / len(self.bass_mapper))[:20]
                starting_slice_drum = (np.asarray(encode_using_mapper(drum_chords, self.drum_mapper)) / len(self.drum_mapper))[:20]

                songs_in_db_cnt = len(get_songs_by_author(self.db_name))
                to_generate = count

                for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate):

                    generated_output = generate_multi_instrument_notes(self.model, starting_slice_notes, starting_slice_durations,
                                                                       starting_slice_bass, starting_slice_drum, self.guitar_mapper,
                                                                       self.guitar_durations_mapper, self.bass_mapper,
                                                                       self.drum_mapper, self.guitar_mapper_list, self.durations_mapper_list, temp=temp, length = length)

                    (guitar_output, bass_output, drum_output) = generated_output


                    guitar_part = create_midipart_with_durations(guitar_output, target_instrument=self.target_instruments[0])
                    bass_part = create_midipart_with_durations(bass_output, target_instrument=self.target_instruments[1])
                    #drum_part = create_drum_part_with_durations(drum_output)

                    # TODO fix drum sounds

                    guitar_part.insert(0, self.target_instruments[0])
                    bass_part.insert(0, self.target_instruments[1])
                    #drum_part.insert(0, self.target_instruments[2])

                    full_midi = Score()
                    full_midi.insert(0, guitar_part)
                    full_midi.insert(0, bass_part)
                    #full_midi.insert(0, drum_part)

                    midi_path = f'LSTM_{self.instrument_name}_{j}.mid'

                    full_midi.write('midi', fp=midi_path)
                    midi_to_wav(midi_path, f'static/songs/LSTM_{self.instrument_name}_{j}.wav')

                    self.save_song_to_db(f'LSTM_{self.instrument_name}_{j}.wav')
                bug = False
            except ValueError:
                continue
Exemplo n.º 25
0
    key_name = key.step.upper() if key.mode == "major" else key.step.lower()

    for note in notes:
      if note.part == key.part and note.measure == key.measure:
        note.step = Interval(noteStart=Note(Key(key_name).asKey().tonic), noteEnd=note._music21_object).semitones % 12

  return notes


if __name__ == "__main__":
  """
  How to create Mupix Objects.
  """
  from music21.stream import Score, Part, Measure
  from music21.key import KeySignature
  from music21.note import Note  # noqa

  s = Score()
  p1 = Part(id="part1")
  m1 = Measure(number=1)
  m1.append(KeySignature(3))
  m1.append(Note("C4", type="eighth"))
  m2 = Measure(number=2)
  m2.append(KeySignature(0))
  m2.append(Note("G4", type="eighth"))
  p1.append([m1, m2])
  s.append([p1])

  notes = [NoteObject(item, 1) for item in s.recurse().notes if not item.isChord]
  print(notes)
Exemplo n.º 26
0
    def __init__(self, ranges=False):
        score = self.score = Score()
        self.instruments = self.i = Instruments()
        self.parts = Parts(self.i)

        # Make Metadata
        timestamp = datetime.datetime.utcnow()
        metadata = Metadata()
        metadata.title = 'Early Montreal'
        metadata.composer = 'Jonathan Marmor'
        metadata.date = timestamp.strftime('%Y/%m/%d')
        score.insert(0, metadata)

        [score.insert(0, part) for part in self.parts.l]
        score.insert(0, StaffGroup(self.parts.l))

        if ranges:
            # Don't make a piece, just show the instrument ranges
            for inst, part in zip(self.instruments.l, self.parts.l):
                measure = Measure()
                measure.timeSignature = TimeSignature('4/4')
                low = Note(inst.lowest_note)
                measure.append(low)
                high = Note(inst.highest_note)
                measure.append(high)
                part.append(measure)
            return

        # 18 to 21 minutes
        piece_duration_minutes = scale(random.random(), 0, 1, 18, 21)

        # Make the "songs"
        songs = []
        total_minutes = 0
        n = 1
        while total_minutes < piece_duration_minutes:
            print 'Song {}'.format(n)
            n += 1
            song = Song(self)
            songs.append(song)
            total_minutes += song.duration_minutes

        # Make notation
        previous_duration = None
        for song in songs:
            for bar in song.bars:
                for part in bar.parts:
                    measure = Measure()
                    if bar.tempo:
                        measure.insert(
                            0,
                            MetronomeMark(number=bar.tempo,
                                          referent=Duration(1)))
                        measure.leftBarline = 'double'
                    if bar.duration != previous_duration:
                        ts = TimeSignature('{}/4'.format(bar.duration))
                        measure.timeSignature = ts

                    # Fix Durations
                    durations = [note['duration'] for note in part['notes']]

                    components_list = split_at_beats(durations)
                    components_list = [
                        join_quarters(note_components)
                        for note_components in components_list
                    ]
                    for note, components in zip(part['notes'],
                                                components_list):
                        note['durations'] = components

                    for note in part['notes']:
                        if note['pitch'] == 'rest':
                            n = Rest()
                        if isinstance(note['pitch'], list):
                            pitches = []
                            for pitch_number in note['pitch']:
                                p = Pitch(pitch_number)
                                # Force all flats
                                if p.accidental.name == 'sharp':
                                    p = p.getEnharmonic()
                                pitches.append(p)
                            n = Chord(notes=pitches)

                            # TODO add slurs
                            # TODO add glissandos
                            # TODO add -50 cent marks

                        else:
                            p = Pitch(note['pitch'])
                            # Force all flats
                            if p.accidental.name == 'sharp':
                                p = p.getEnharmonic()
                            n = Note(p)

                            # TODO add slurs
                            # TODO add glissandos
                            # TODO add -50 cent marks

                        d = Duration()
                        if note['duration'] == 0:
                            d.quarterLength = .5
                            d = d.getGraceDuration()
                        else:
                            d.fill(note['durations'])
                        n.duration = d

                        measure.append(n)

                    self.parts.d[part['instrument_name']].append(measure)
                previous_duration = bar.duration
Exemplo n.º 27
0
def test_10():
    top, bot = _setup_parts()
    top.append(Note('G4', quarterLength=1.0))
    bot.append(Note('G3', quarterLength=0.25))
    bot.append(Note('A3', quarterLength=0.75))
    return Score([top, bot])
Exemplo n.º 28
0
from music21.pitch import Pitch
from music21.chord import Chord
from music21.stream import Part, Score
from music21.metadata import Metadata
from music21.duration import Duration
from music21.layout import StaffGroup
from music21.instrument import fromString as get_instrument
from music21.clef import BassClef

timestamp = datetime.datetime.utcnow()
metadata = Metadata()
metadata.title = 'The Title'
metadata.composer = 'Jonathan Marmor'
metadata.date = timestamp.strftime('%Y/%m/%d')

score = Score()
score.insert(0, metadata)

part = Part()
parts = [part]

oboe = get_instrument('oboe')
part.insert(0, oboe)
score.insert(0, part)
score.insert(0, StaffGroup(parts))

for dur in [[1, .5], [.25], [.25, 2]]:
    pitch = Pitch(60)
    note = Note(pitch)
    duration = Duration()
    duration.fill(dur)
Exemplo n.º 29
0
    def predict(self, data, count, temp, length=500):

        songs = list(set([i.song for i in data]))

        bug = True
        while bug:
            try:
                condition = True
                while condition:
                    try:
                        random_song = random.choice(songs)
                        slice_by_instrument = dict(
                            zip(self.target_instruments_str,
                                [[] for i in self.target_instruments_str]))
                        for j in self.target_instruments_str:
                            for i in data:
                                if i.song == random_song and i.instrument == j:
                                    slice_by_instrument[j].append(i)

                        slice_by_instrument_without_rests = dict(
                            zip(self.target_instruments_str,
                                [[] for i in self.target_instruments_str]))

                        for i in slice_by_instrument.keys():
                            for song in slice_by_instrument[i]:
                                if not isinstance(song.chords[0], note.Rest):
                                    slice_by_instrument_without_rests[
                                        i].append(song)
                            if len(slice_by_instrument_without_rests[i]) != 0:
                                slice_by_instrument[i] = random.choice(
                                    slice_by_instrument_without_rests[i])
                            else:
                                slice_by_instrument[i] = random.choice(
                                    slice_by_instrument[i])

                        condition = False
                    except IndexError:
                        continue

                guitar_chords = slice_by_instrument['Electric Guitar'].chords
                guitar_durations = slice_by_instrument[
                    'Electric Guitar'].durations
                bass_chords = slice_by_instrument['Electric Bass'].chords
                bass_durations = slice_by_instrument['Electric Bass'].durations

                combined_guitar = combine_chords_with_durations(
                    guitar_chords, guitar_durations)
                combined_bass = combine_chords_with_durations(
                    bass_chords, bass_durations)

                starting_slice_notes = (np.asarray(
                    encode_using_mapper(combined_guitar,
                                        self.guitar_mapper)))[:20]
                starting_slice_bass = (np.asarray(
                    encode_using_mapper(combined_bass, self.bass_mapper)))[:20]

                songs_in_db_cnt = len(get_songs_by_author(self.db_name))
                to_generate = count

                for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate):

                    generated_guitar = generate_notes(
                        self.guitar_model,
                        starting_slice_notes,
                        self.guitar_mapper,
                        mapperlist=self.guitar_mapper_list,
                        temp=temp,
                        length=length,
                        normalize=False,
                        random_start=False)
                    generated_bass = generate_notes(
                        self.bass_model,
                        starting_slice_bass,
                        self.bass_mapper,
                        mapperlist=self.bass_mapper_list,
                        temp=temp,
                        length=length,
                        normalize=False,
                        random_start=False)

                    guitar_part = create_midipart_with_durations(
                        generated_guitar,
                        target_instrument=self.target_instruments[0])
                    bass_part = create_midipart_with_durations(
                        generated_bass,
                        target_instrument=self.target_instruments[1])

                    guitar_part.insert(0, self.target_instruments[0])
                    bass_part.insert(0, self.target_instruments[1])

                    full_midi = Score()
                    full_midi.insert(0, guitar_part)
                    full_midi.insert(0, bass_part)

                    midi_path = f'Transformer_{self.instrument_name}_{j}.mid'

                    full_midi.write('midi', fp=midi_path)
                    midi_to_wav(
                        midi_path,
                        f'static/songs/Transformer_{self.instrument_name}_{j}.wav'
                    )

                    self.save_song_to_db(
                        f'Transformer_{self.instrument_name}_{j}.wav')
                bug = False
            except:
                continue
def read_vmf_string(vmf_string):
    """
    Reads VMF data from a string to a Score Stream.

    :param vmf_string: The contents of the VMF file as a string.
    :return: A music21 score instance containing the music in the VMF file.
    """

    parts_converted = {}

    vmf = json.loads(vmf_string)

    # create a score
    score = Score()

    # Get the initial data
    number_of_parts = vmf['header']['number_of_parts']
    number_of_voices = vmf['header']['number_of_voices']
    smallest_note = float(Fraction(vmf['header']['tick_value']))

    # create the parts and first measure.
    for voice_number in range(number_of_parts):
        part = Part()
        voice = Voice()

        part.append(voice)

        score.append(part)

    # get the body of the vmf
    body = vmf['body']

    part_number = 0

    # We do this because we want to do each part at a time.
    for voice_number in range(number_of_voices):
        # Get all ticks for a given part.
        part = [tick[voice_number] for tick in body]

        current_element = None
        current_voice = None

        # iterate over each tick
        for tick in part:

            if current_voice is None:
                # Get the parent part if it exists.
                try:
                    current_part = parts_converted[tick[-1]]

                    # add a new voice and write to it.
                    voice = Voice()

                    initial_key_signature = KeySignature(vmf['header']['key_signature']['0.0'])
                    initial_time_signature = TimeSignature(vmf['header']['time_signature']['0.0'])

                    voice.append(initial_key_signature)
                    voice.append(initial_time_signature)

                    current_part.append(voice)

                except KeyError:
                    # Add it to our dictionary otherwise.
                    current_part = score.parts[part_number]
                    part_number += 1

                    parts_converted[tick[-1]] = current_part

                # Get the last voice.
                current_voice = current_part.voices[-1]

            if tick[0] == 1:
                if current_element is not None:
                    # check for precision and adjust
                    rounded = round(current_element.quarterLength)
                    if abs(current_element.quarterLength - rounded) < PRECISION:
                        current_element.quarterLength = rounded

                    # append to the part
                    current_voice.append(current_element)

                # Find how many notes to write. This will always be an int.
                number_of_notes = int(find_number_of_notes_in_tick(tick))

                if number_of_notes == 1:
                    # create a new note
                    current_element = Note(Pitch(pitchClass=tick[3], octave=tick[4]))
                else:
                    pitches = []

                    # create the pitches.
                    # From the beginning to the end of the pitch section of the tick.
                    for i in range(FIRST_PITCH_INDEX, FIRST_PITCH_INDEX + 2 * number_of_notes, 2):
                        pitch = Pitch(pitchClass=tick[i], octave=tick[i + 1])
                        pitches.append(pitch)

                    # create a new chord with these pitches.
                    current_element = Chord(pitches)


                # set the velocity of the note.
                current_element.volume.velocity = DynamicConverter.vmf_to_velocity(tick[DYNAMIC_BIT])
                # set the articulation
                if tick[ARTICULATION_BIT] != 0:
                    current_element.articulations.append(
                        ArticulationConverter.vmf_to_articulation(tick[ARTICULATION_BIT]))

                # set the value for this tick.
                current_element.quarterLength = smallest_note
            elif tick[0] == 2:
                # extend previous note
                current_element.quarterLength += smallest_note

            elif tick[0] == 0 and (isinstance(current_element, note.Note) or current_element is None):
                if current_element is not None:
                    # check for precision and adjust
                    rounded = round(current_element.quarterLength)
                    if abs(current_element.quarterLength - rounded) < PRECISION:
                        current_element.quarterLength = rounded

                    # append to the part
                    current_voice.append(current_element)

                # create new rest
                current_element = Rest()

                # Set the value for this tick.
                current_element.quarterLength = smallest_note

            elif tick[0] == 0 and isinstance(current_element, note.Rest):
                # extend previous rest.
                current_element.quarterLength += smallest_note

        # Append the last element in progress.
        if current_element is not None:
            # check for precision and adjust
            rounded = round(current_element.quarterLength)
            if abs(current_element.quarterLength - rounded) < PRECISION:
                current_element.quarterLength = rounded

            # append to the part
            current_voice.append(current_element)

    # create the stream for time signature changes
    time_signature_stream = Stream()

    for offset, time_signature_str in sorted(vmf['header']['time_signature'].items()):
        time_signature = TimeSignature(time_signature_str)
        time_signature_stream.append(time_signature)
        time_signature_stream[-1].offset = float(offset)

    # finish up the file.
    for part in score.parts:
        for voice in part.voices:
            voice.makeMeasures(inPlace=True, meterStream=time_signature_stream)

        for offset, t in sorted(vmf['header']['tempo'].items()):
            mm = tempo.MetronomeMark(number=t, referent=note.Note(type='quarter'))
            voice.insert(offset, mm)

        for offset, ks in sorted(vmf['header']['key_signature'].items()):
            voice.insert(offset, KeySignature(ks))

    return score