Esempio n. 1
0
    def m21(self):

        score = Score()
        part = Part()
        bars = len(self) // self.bar_size

        measure = Measure()
        measure.insert(0.0, self.meter.m21)

        for bar in range(bars):

            start = bar * self.bar_size
            end = (bar + 1) * self.bar_size
            positions = [position % self.bar_size 
                for position in self.iter_onset_positions(start=start, end=end)
            ]
            # Append an extra position to make sure that the last ioi is
            # between the last note and the end of the bar
            positions.append(self.bar_size)
            offsets = [self.get_offset(p) for p in positions]
            iois = [b - a for a, b in zip(offsets[:-1], offsets[1:])]

            for offset, ioi in zip(offsets[:-1], iois):

                note = Note('a5')
                note.duration.quarterLength = ioi * 4.0
                measure.insert(offset * 4.0, note)

            part.append(measure)
            measure = Measure()

        score.append(part)
        return score.makeMeasures()
Esempio n. 2
0
def to_music21(music: "Music") -> Score:
    """Return a Music object as a music21 Score object.

    Parameters
    ----------
    music : :class:`muspy.Music`
        Music object to convert.

    Returns
    -------
    `music21.stream.Score`
        Converted music21 Score object.

    """
    # Create a new score
    score = Score()

    # Metadata
    if music.metadata:
        score.append(to_music21_metadata(music.metadata))

    # Tracks
    for track in music.tracks:
        # Create a new part
        part = Part()
        part.partName = track.name

        # Add tempos
        for tempo in music.tempos:
            part.append(to_music21_metronome(tempo))

        # Add time signatures
        for time_signature in music.time_signatures:
            part.append(to_music21_time_signature(time_signature))

        # Add key signatures
        for key_signature in music.key_signatures:
            part.append(to_music21_key(key_signature))

        # Add notes to part
        for note in track.notes:
            m21_note = M21Note(_get_pitch_name(note.pitch))
            m21_note.offset = note.time / music.resolution
            m21_note.quarterLength = note.duration / music.resolution
            part.append(m21_note)

        # Append the part to score
        score.append(part)

    return score
Esempio n. 3
0
    def matrix_to_score(self, matrix, verbose=False):
        '''
        Takes a matrix of (P, T, 2) and turn it into a music21.stream.Score object, where P is the number of parts, T is the number of time slices, and dim is the note vector.
        '''
        # (4 parts, # ticks, 2)
        assert len(matrix.shape) == 3, \
            "Input matrix needs to have 3-dimensions."

        num_parts, num_ticks, num_dim = matrix.shape
        assert num_parts == 4, "Input matrix needs to have 4 parts."
        assert num_ticks > 0, "No time slices in this matrix."
        assert num_dim == 2, "Note vector size mismatch."

        # need to make sure all pieces start with an articulated note, even if
        # it's a rest.
        matrix[:, 0, 1] = [1, 1, 1, 1]

        score = Score()
        parts = list(map(self._matrix_to_part, matrix))

        parts[0].insert(0, instrument.Violin())
        parts[0].partName = "Violin I"
        parts[0].clef = clef.TrebleClef()

        parts[1].insert(0, instrument.Violin())
        parts[1].partName = "Violin II"
        parts[1].clef = clef.TrebleClef()

        parts[2].insert(0, instrument.Viola())
        parts[2].clef = clef.AltoClef()

        parts[3].insert(0, instrument.Violoncello())
        parts[3].clef = clef.BassClef()
        _ = list(map(lambda part: score.append(part), parts))

        return score
Esempio n. 4
0
def build_midi(harmony, melody):
    chords_dict = get_chord_dicts()[1]

    song = []
    for i, eighth in enumerate(melody):
        # eighth = multi_hot_to_pianoroll(piano_roll[:midi_range]) # now make_music returns pianorolls already
        # chord = one_hot_to_index(piano_roll[-chord_classes:]) # TODO add chord to midi
        # print(f'EIGHTH: {eighth}') # DEBUG

        song_notes = []
        for note_ in eighth:
            note_name = NOTES[note_%12]
            note_octave = start_octave + note_//12 # starting from C2
            song_notes.append(note_name + str(note_octave))

        song_chords = []
        full_chord = chords_dict[harmony[i]]
        if full_chord != '<unk>':
            for chord_ in full_chord:
                chord_name = NOTES[chord_%12]
                song_chords.append(chord_name + str(start_octave-1))

        song.append(("REST" if len(song_notes) == 0 else song_notes, "REST" if len(song_chords) == 0 else song_chords))

    notes_score = Score()
    notes_score.append(instrument.Piano())
    chords_score = Score()
    chords_score.append(instrument.KeyboardInstrument())
    bass_score = Score()
    bass_score.append(instrument.ElectricBass())

    current_note_length = 0
    current_chord_length = 0

    for i, _ in enumerate(song):

        current_note_length += 0.5
        current_chord_length += 0.5

        # print(f'NOTE: {song[i][0]}\t\t\t- CHORD: {song[i][1]}')

        if i < len(song)-1:
            # note
            if song[i][0] != song[i+1][0]:
                if song[i][0] == "REST":
                    notes_score.append(note.Rest(duration=Duration(current_note_length)))
                else:
                    notes_score.append(chord.Chord([note.Note(nameWithOctave=note_name) for note_name in song[i][0]], duration=Duration(current_note_length)))
                current_note_length = 0

            # chord
            if song[i][1] != song[i+1][1] or current_chord_length == 4:
                if song[i][1] == "REST":
                    chords_score.append(note.Rest(duration=Duration(current_chord_length)))

                    bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                    bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                    bass_score.append(note.Rest(duration=Duration(current_chord_length/2)))
                else:
                    chords_score.append(chord.Chord([note.Note(nameWithOctave=chord_name) for chord_name in song[i][1]], duration=Duration(current_chord_length)))

                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                    bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/2)))
                current_chord_length = 0
        else:
            # note
            if song[i][0] == "REST":
                notes_score.append(note.Rest(duration=Duration(current_note_length)))
            else:
                notes_score.append(chord.Chord([note.Note(nameWithOctave=note_name) for note_name in song[i][0]], duration=Duration(current_note_length)))

            # chord
            if song[i][1] == "REST":
                chords_score.append(note.Rest(duration=Duration(current_chord_length)))

                bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                bass_score.append(note.Rest(duration=Duration(current_chord_length/4)))
                bass_score.append(note.Rest(duration=Duration(current_chord_length/2)))
            else:
                chords_score.append(chord.Chord([note.Note(nameWithOctave=chord_name) for chord_name in song[i][1]], duration=Duration(current_chord_length)))

                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/4)))
                bass_score.append(chord.Chord([note.Note(nameWithOctave=chord_name[:-1]+str(int(chord_name[-1])+1)) for chord_name in song[i][1]], duration=Duration(current_chord_length/2)))

    song_stream = Stream()
    song_stream.insert(0, notes_score)
    song_stream.insert(0, chords_score)
    song_stream.insert(0, bass_score)

    if not os.path.exists('melodies'):
        os.makedirs('melodies')
    dt = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    song_stream.write('midi', fp=f'melodies/generated_{dt}.mid')
Esempio n. 5
0
        #     new[0] += normal(dur[0], dur[1])
        #     new[1] += normal(freq[0], freq[1])
        notes.append(new)

        total_dur += new[0]

    return MIDIReader.list_to_stream(notes)

if __name__ == '__main__':

    snn = SonataNeuralNetwork()
    for midi in listdir(music_dir):
        print midi
        snn.read(path.join(music_dir, midi))
    print 'training'
    t_net, b_net = snn.train_network()

    # snn.append_errors()
    # td, tf, bd, bf = snn.get_error_vals()

    treble = stream_from_notes([[1, log(261.6)], [1, log(329.6)], [1, log(392)], [1, log(329.6)], [1, log(392)], [1, log(523.3)]], t_net)
    treble.insert(0, clef.TrebleClef())
    bass = stream_from_notes([[0.5, log(130.8)], [0.5, log(164.8)], [0.5, log(196)], [1, log(261.6)], [0.5, log(196)], [1, log(164.8)]], b_net)
    bass.insert(0, clef.BassClef())
    s = Score()
    s.append(treble)
    s.append(bass)
    bass.offset = 0

    s.show()
Esempio n. 6
0
def tensors_to_stream(outputs, config, metadata=None):
    cur_measure_number = 0
    parts = {}
    for part_name in outputs.keys():
        if part_name == 'extra':
            continue
        part = Part(id=part_name)
        parts[part_name] = part

    last_time_signature = None
    cur_time_signature = '4/4'
    for step in range(outputs['soprano'].shape[0]):
        extra = outputs['extra'][step]
        if extra[indices_extra['has_time_signature_3/4']].item() == 1:
            cur_time_signature = '3/4'
        elif extra[indices_extra['has_time_signature_4/4']].item() == 1:
            cur_time_signature = '4/4'
        elif extra[indices_extra['has_time_signature_3/2']].item() == 1:
            cur_time_signature = '3/2'
        cur_time_pos = extra[indices_extra['time_pos']].item()
        has_fermata = extra[indices_extra['has_fermata']].item() == 1

        if cur_time_pos == 1.0 or cur_measure_number == 0:
            for part_name, part in parts.items():
                part.append(Measure(number=cur_measure_number))
                if cur_measure_number == 0:
                    if part_name in ['soprano', 'alto']:
                        part[-1].append(clef.TrebleClef())
                    else:
                        part[-1].append(clef.BassClef())
                    key = int(
                        torch.argmax(
                            outputs['extra'][0, indices_extra['has_sharps_0']:
                                             indices_extra['has_sharps_11'] +
                                             1],
                            dim=0).item())
                    if key >= 6:
                        key -= 12
                    part[-1].append(KeySignature(key))
                    part[-1].append(MetronomeMark(number=90))
            cur_measure_number += 1

        if last_time_signature is None or cur_time_signature != last_time_signature:
            for part in parts.values():
                part[-1].append(TimeSignature(cur_time_signature))
            last_time_signature = cur_time_signature

        for part_name, part in parts.items():
            idx = torch.argmax(outputs[part_name][step]).item()
            if idx == indices_parts['is_continued']:
                try:
                    last_element = part[-1].flat.notesAndRests[-1]
                    cur_element = deepcopy(last_element)
                    if last_element.tie is not None and last_element.tie.type == 'stop':
                        last_element.tie = Tie('continue')
                    else:
                        last_element.tie = Tie('start')
                    cur_element.tie = Tie('stop')
                except IndexError:
                    logging.debug(
                        'Warning: "is_continued" on first beat. Replaced by rest.'
                    )
                    cur_element = Rest(quarterLength=config.time_grid)
                part[-1].append(cur_element)
            elif idx == indices_parts['is_rest']:
                part[-1].append(Rest(quarterLength=config.time_grid))
            else:
                pitch = Pitch()
                part[-1].append(Note(pitch, quarterLength=config.time_grid))
                # Set pitch value AFTER appending to measure in order to avoid unnecessary accidentals
                pitch.midi = idx + min_pitches[part_name] - len(indices_parts)

        if has_fermata:
            for part in parts.values():
                fermata = Fermata()
                fermata.type = 'upright'
                part[-1][-1].expressions.append(fermata)

    score = Score()
    if metadata is not None:
        score.append(Metadata())
        score.metadata.title = f"{metadata.title} ({metadata.number})"
        score.metadata.composer = f"Melody: {metadata.composer}\nArrangement: BachNet ({datetime.now().year})"
    for part in parts.values():
        part[-1].rightBarline = 'light-heavy'

    score.append(parts['soprano'])
    if 'alto' in parts:
        score.append(parts['alto'])
        score.append(parts['tenor'])
    score.append(parts['bass'])

    score.stripTies(inPlace=True, retainContainers=True)

    return score
Esempio n. 7
0
    key_name = key.step.upper() if key.mode == "major" else key.step.lower()

    for note in notes:
      if note.part == key.part and note.measure == key.measure:
        note.step = Interval(noteStart=Note(Key(key_name).asKey().tonic), noteEnd=note._music21_object).semitones % 12

  return notes


if __name__ == "__main__":
  """
  How to create Mupix Objects.
  """
  from music21.stream import Score, Part, Measure
  from music21.key import KeySignature
  from music21.note import Note  # noqa

  s = Score()
  p1 = Part(id="part1")
  m1 = Measure(number=1)
  m1.append(KeySignature(3))
  m1.append(Note("C4", type="eighth"))
  m2 = Measure(number=2)
  m2.append(KeySignature(0))
  m2.append(Note("G4", type="eighth"))
  p1.append([m1, m2])
  s.append([p1])

  notes = [NoteObject(item, 1) for item in s.recurse().notes if not item.isChord]
  print(notes)
def read_vmf_string(vmf_string):
    """
    Reads VMF data from a string to a Score Stream.

    :param vmf_string: The contents of the VMF file as a string.
    :return: A music21 score instance containing the music in the VMF file.
    """

    parts_converted = {}

    vmf = json.loads(vmf_string)

    # create a score
    score = Score()

    # Get the initial data
    number_of_parts = vmf['header']['number_of_parts']
    number_of_voices = vmf['header']['number_of_voices']
    smallest_note = float(Fraction(vmf['header']['tick_value']))

    # create the parts and first measure.
    for voice_number in range(number_of_parts):
        part = Part()
        voice = Voice()

        part.append(voice)

        score.append(part)

    # get the body of the vmf
    body = vmf['body']

    part_number = 0

    # We do this because we want to do each part at a time.
    for voice_number in range(number_of_voices):
        # Get all ticks for a given part.
        part = [tick[voice_number] for tick in body]

        current_element = None
        current_voice = None

        # iterate over each tick
        for tick in part:

            if current_voice is None:
                # Get the parent part if it exists.
                try:
                    current_part = parts_converted[tick[-1]]

                    # add a new voice and write to it.
                    voice = Voice()

                    initial_key_signature = KeySignature(vmf['header']['key_signature']['0.0'])
                    initial_time_signature = TimeSignature(vmf['header']['time_signature']['0.0'])

                    voice.append(initial_key_signature)
                    voice.append(initial_time_signature)

                    current_part.append(voice)

                except KeyError:
                    # Add it to our dictionary otherwise.
                    current_part = score.parts[part_number]
                    part_number += 1

                    parts_converted[tick[-1]] = current_part

                # Get the last voice.
                current_voice = current_part.voices[-1]

            if tick[0] == 1:
                if current_element is not None:
                    # check for precision and adjust
                    rounded = round(current_element.quarterLength)
                    if abs(current_element.quarterLength - rounded) < PRECISION:
                        current_element.quarterLength = rounded

                    # append to the part
                    current_voice.append(current_element)

                # Find how many notes to write. This will always be an int.
                number_of_notes = int(find_number_of_notes_in_tick(tick))

                if number_of_notes == 1:
                    # create a new note
                    current_element = Note(Pitch(pitchClass=tick[3], octave=tick[4]))
                else:
                    pitches = []

                    # create the pitches.
                    # From the beginning to the end of the pitch section of the tick.
                    for i in range(FIRST_PITCH_INDEX, FIRST_PITCH_INDEX + 2 * number_of_notes, 2):
                        pitch = Pitch(pitchClass=tick[i], octave=tick[i + 1])
                        pitches.append(pitch)

                    # create a new chord with these pitches.
                    current_element = Chord(pitches)


                # set the velocity of the note.
                current_element.volume.velocity = DynamicConverter.vmf_to_velocity(tick[DYNAMIC_BIT])
                # set the articulation
                if tick[ARTICULATION_BIT] != 0:
                    current_element.articulations.append(
                        ArticulationConverter.vmf_to_articulation(tick[ARTICULATION_BIT]))

                # set the value for this tick.
                current_element.quarterLength = smallest_note
            elif tick[0] == 2:
                # extend previous note
                current_element.quarterLength += smallest_note

            elif tick[0] == 0 and (isinstance(current_element, note.Note) or current_element is None):
                if current_element is not None:
                    # check for precision and adjust
                    rounded = round(current_element.quarterLength)
                    if abs(current_element.quarterLength - rounded) < PRECISION:
                        current_element.quarterLength = rounded

                    # append to the part
                    current_voice.append(current_element)

                # create new rest
                current_element = Rest()

                # Set the value for this tick.
                current_element.quarterLength = smallest_note

            elif tick[0] == 0 and isinstance(current_element, note.Rest):
                # extend previous rest.
                current_element.quarterLength += smallest_note

        # Append the last element in progress.
        if current_element is not None:
            # check for precision and adjust
            rounded = round(current_element.quarterLength)
            if abs(current_element.quarterLength - rounded) < PRECISION:
                current_element.quarterLength = rounded

            # append to the part
            current_voice.append(current_element)

    # create the stream for time signature changes
    time_signature_stream = Stream()

    for offset, time_signature_str in sorted(vmf['header']['time_signature'].items()):
        time_signature = TimeSignature(time_signature_str)
        time_signature_stream.append(time_signature)
        time_signature_stream[-1].offset = float(offset)

    # finish up the file.
    for part in score.parts:
        for voice in part.voices:
            voice.makeMeasures(inPlace=True, meterStream=time_signature_stream)

        for offset, t in sorted(vmf['header']['tempo'].items()):
            mm = tempo.MetronomeMark(number=t, referent=note.Note(type='quarter'))
            voice.insert(offset, mm)

        for offset, ks in sorted(vmf['header']['key_signature'].items()):
            voice.insert(offset, KeySignature(ks))

    return score
Esempio n. 9
0
    return MIDIReader.list_to_stream(notes)


if __name__ == '__main__':

    snn = SonataNeuralNetwork()
    for midi in listdir(music_dir):
        print midi
        snn.read(path.join(music_dir, midi))
    print 'training'
    t_net, b_net = snn.train_network()

    # snn.append_errors()
    # td, tf, bd, bf = snn.get_error_vals()

    treble = stream_from_notes(
        [[1, log(261.6)], [1, log(329.6)], [1, log(392)], [1, log(329.6)],
         [1, log(392)], [1, log(523.3)]], t_net)
    treble.insert(0, clef.TrebleClef())
    bass = stream_from_notes(
        [[0.5, log(130.8)], [0.5, log(164.8)], [0.5, log(196)],
         [1, log(261.6)], [0.5, log(196)], [1, log(164.8)]], b_net)
    bass.insert(0, clef.BassClef())
    s = Score()
    s.append(treble)
    s.append(bass)
    bass.offset = 0

    s.show()