Esempio n. 1
0
def add_part(part: music21.stream.Part, score: music21.stream.Score, id='key_center') -> music21.stream.Score:
    part.id = id
    score.insert(
        len(score.elements),
        part
    )
    return score
Esempio n. 2
0
def transpose_to_all_keys_gen(score: music21.stream.Score, keys: [str]=KEYS) \
        -> Generator[music21.stream.Score, None, None]:
    for key in keys:
        score_key = score.analyze('key')
        if score_key.tonic.fullName == key:
            yield score
        else:
            interval = music21.interval.Interval(score_key.tonic,
                                                 music21.pitch.Pitch(key))
            yield score.transpose(interval)
Esempio n. 3
0
    def transpose(song: m21.stream.Score) -> m21.stream.Score:
        key = song.getElementsByClass(m21.stream.Part)[0].getElementsByClass(
            m21.stream.Measure)[0][4]

        if not isinstance(key, m21.key.Key):
            key = song.analyze("key")

        if key.mode == "major":
            interval = m21.interval.Interval(key.tonic, m21.pitch.Pitch("C"))
        elif key.mode == "minor":
            interval = m21.interval.Interval(key.tonic, m21.pitch.Pitch("A"))

        return song.transpose(interval)
Esempio n. 4
0
def set_tempo(score: music21.stream.Score, bpm: int = 60) -> music21.stream.Score:

    marks_to_remove = []

    # remove current
    for mark in score.flat.getElementsByClass(music21.tempo.MetronomeMark):
        marks_to_remove.append(mark)

    for mark in marks_to_remove:
        score.remove(mark, recurse=True)

    # add new
    for measure in score.parts[0].getElementsByClass(music21.stream.Measure):
        if measure.offset == 0.0:
            tempo = music21.tempo.MetronomeMark(number=bpm)
            tempo.offset = 0.0
            measure.append(tempo)

    return score
Esempio n. 5
0
 def transposed_score_and_metadata_tensors(
         self, score: music21.stream.Score,
         interval: music21.interval.Interval):
     try:
         leadsheet_transposed = score.transpose(interval)
     except ValueError as e:
         raise LeadsheetParsingException(
             f'Leadsheet {leadsheet.metadata.title} '
             f'not properly formatted')
     return leadsheet_transposed
Esempio n. 6
0
def score_to_tensor(score: music21.stream.Score) -> (np.ndarray, np.ndarray):
    n_voices, n_eighths = get_score_shape(score)
    score_tensor = np.zeros((n_voices, n_eighths))
    meta_tensor = np.zeros((n_voices, n_eighths, n_meta_features))
    max_beats_per_measure
    try:
        for i, part in enumerate(score.getElementsByClass(
                music21.stream.Part)):
            for measure in part.getElementsByClass(music21.stream.Measure):
                # we're going to multiply all durations by two,
                # because eighth note is the shortest in the corpus.
                beats_in_measure = measure.duration.quarterLength * 2
                # Get the offset of the beginning of the measure (from the beginning of the piece)
                measure_offset = int(measure.offset)
                for b in range(int(beats_in_measure)):
                    # Annotate each eighth-note pulse in the metadata track
                    meta_tensor[i][measure_offset * 2 + b][idx_beat + b] = 1
                for note in measure.getElementsByClass(music21.note.Note):
                    offset = int(note.offset + measure_offset) * 2
                    for j in range(
                            int(offset),
                            int(offset + note.duration.quarterLength * 2)):
                        # mark the note with its midi pitch throughout its duration
                        score_tensor[i, j] = float(note.midi)
                        if j > offset:
                            # Add a 'slur' annotation for any held note
                            meta_tensor[i, j, idx_slur] = 1
                for rest in measure.getElementsByClass(music21.note.Rest):
                    # Mark all rests in the metadata track
                    offset = int(rest.offset + measure_offset) * 2
                    for j in range(
                            int(offset),
                            int(offset + rest.duration.quarterLength * 2)):
                        meta_tensor[i, j, idx_rest] = 1
        return score_tensor, meta_tensor
    except:
        return None
Esempio n. 7
0
def get_score_shape(score: music21.stream.Score) -> (int, int):
    n_voices: int = len(score.getElementsByClass(music21.stream.Part))
    n_eighth_notes: int = int(score.duration.quarterLength * 2)
    return n_voices, n_eighth_notes
Esempio n. 8
0
def get_key(score: music21.stream.Score) -> str:
    return score.analyze('key').tonic.fullName
Esempio n. 9
0
def n_voices(score: music21.stream.Score) -> int:
    return len(score.getElementsByClass(music21.stream.Part))
Esempio n. 10
0
def extract_part(score: music21.stream.Score, name_part):
    return score.getElementById(name_part)
def getM21ObjectById(theID: int,
                     score: m21.stream.Score) -> m21.base.Music21Object:
    obj = score.recurse().getElementById(theID)
    return obj