コード例 #1
0
ファイル: importmidi.py プロジェクト: kittyshi/partitura
def load_score_midi(fn, part_voice_assign_mode=0, ensure_list=False,
                    quantization_unit=None, estimate_voice_info=True,
                    estimate_key=False, assign_note_ids=True):
    """Load a musical score from a MIDI file and return it as a Part
    instance.

    This function interprets MIDI information as describing a score.
    Pitch names are estimated using Meredith's PS13 algorithm [1]_.
    Assignment of notes to voices can either be done using Chew and
    Wu's voice separation algorithm [2]_, or by choosing one of the
    part/voice assignment modes that assign voices based on
    track/channel information. Furthermore, the key signature can be
    estimated based on Krumhansl's 1990 key profiles [3]_.

    This function expects times to be metrical/quantized. Optionally a
    quantization unit may be specified. If you wish to access the non-
    quantized time of MIDI events you may wish to used the
    `load_performance_midi` function instead.

    Parameters
    ----------
    fn : str
        Path to MIDI file
    part_voice_assign_mode : {0, 1, 2, 3, 4, 5}, optional
        This keyword controls how part and voice information is
        associated to track and channel information in the MIDI file.
        The semantics of the modes is as follows:

        0
            Return one Part per track, with voices assigned by channel
        1
            Return one PartGroup per track, with Parts assigned by channel
            (no voices)
        2
            Return single Part with voices assigned by track (tracks are
            combined, channel info is ignored)
        3
            Return one Part per track, without voices (channel info is
            ignored)
        4
            Return single Part without voices (channel and track info is
            ignored)
        5
            Return one Part per <track, channel> combination, without
            voices  Defaults to 0.
    ensure_list : bool, optional
        When True, return a list independent of how many part or partgroup
        elements were created from the MIDI file. By default, when the
        return value of `load_score_midi` produces a single 
        :class:`partitura.score.Part` or :class:`partitura.score.PartGroup`
        element, the element itself is returned instead of a list
        containing the element. Defaults to False.
    quantization_unit : integer or None, optional
        Quantize MIDI times to multiples of this unit. If None, the
        quantization unit is chosen automatically as the smallest
        division of the parts per quarter (MIDI "ticks") that can be
        represented as a symbolic duration. Defaults to None.
    estimate_key : bool, optional
        When True use Krumhansl's 1990 key profiles [3]_ to determine
        the most likely global key, discarding any key information in
        the MIDI file.
    estimate_voice_info : bool, optional
        When True use Chew and Wu's voice separation algorithm [2]_ to
        estimate voice information. This option is ignored for
        part/voice assignment modes that infer voice information from
        the track/channel info (i.e. `part_voice_assign_mode` equals
        1, 3, 4, or 5). Defaults to True.

    Returns
    -------
    :class:`partitura.score.Part`, :class:`partitura.score.PartGroup`, or a list of these
        One or more part or partgroup objects

    References
    ----------
    .. [1] Meredith, D. (2006). "The ps13 Pitch Spelling Algorithm". Journal 
           of New Music Research, 35(2):121.
    .. [2] Chew, E. and Wu, Xiaodan (2004) "Separating Voices in
           Polyphonic Music: A Contig Mapping Approach". In Uffe Kock, 
           editor, Computer Music Modeling and Retrieval (CMMR), pp. 1–20, 
           Springer Berlin Heidelberg.
    .. [3] Krumhansl, Carol L. (1990) "Cognitive foundations of musical pitch",
           Oxford University Press, New York.

    """
    mid = mido.MidiFile(fn)
    divs = mid.ticks_per_beat

    # these lists will contain information from dedicated tracks for meta
    # information (i.e. without notes)
    global_time_sigs = []
    global_key_sigs = []
    global_tempos = []

    # these dictionaries will contain meta information indexed by track (only
    # for tracks that contain notes)
    time_sigs_by_track = {}
    key_sigs_by_track = {}
    tempos_by_track = {}
    track_names_by_track = {}
    # notes are indexed by (track, channel) tuples
    notes_by_track_ch = {}
    relevant = {'time_signature',
                'key_signature',
                'set_tempo',
                'note_on',
                'note_off'}
    for track_nr, track in enumerate(mid.tracks):
        time_sigs = []
        key_sigs = []
        # tempos = []
        notes = defaultdict(list)
        # dictionary for storing the last onset time and velocity for each
        # individual note (i.e. same pitch and channel)
        sounding_notes = {}
        # current time (will be updated by delta times in messages)
        t_raw = 0

        for msg in track:

            t_raw = t_raw + msg.time

            if msg.type not in relevant:
                continue

            if quantization_unit:
                t = quantize(t_raw, quantization_unit)
            else:
                t = t_raw

            if msg.type == 'time_signature':
                time_sigs.append((t, msg.numerator, msg.denominator))
            if msg.type == 'key_signature':
                key_sigs.append((t, msg.key))
            if msg.type == 'set_tempo':
                global_tempos.append((t, 60*10**6/msg.tempo))
            else:
                note_on = msg.type == 'note_on'
                note_off = msg.type == 'note_off'

                if not (note_on or note_off):
                    continue

                # hash sounding note
                note = note_hash(msg.channel, msg.note)

                # start note if it's a 'note on' event with velocity > 0
                if note_on and msg.velocity > 0:

                    # save the onset time and velocity
                    sounding_notes[note] = (t, msg.velocity)

                # end note if it's a 'note off' event or 'note on' with velocity 0
                elif note_off or (note_on and msg.velocity == 0):

                    if note not in sounding_notes:
                        warnings.warn('ignoring MIDI message %s' % msg)
                        continue

                    # append the note to the list associated with the channel
                    notes[msg.channel].append((sounding_notes[note][0], msg.note, t-sounding_notes[note][0]))
                                              # sounding_notes[note][1]])
                    # remove hash from dict
                    del sounding_notes[note]

        # if a track has no notes, we assume it may contain global time/key sigs
        if not notes:
            global_time_sigs.extend(time_sigs)
            global_key_sigs.extend(key_sigs)
        else:
            # if there are note, we store the info under the track number
            time_sigs_by_track[track_nr] = time_sigs
            key_sigs_by_track[track_nr] = key_sigs
            track_names_by_track[track_nr] = track.name

        for ch, ch_notes in notes.items():
            # if there are any notes, store the notes along with key sig / time
            # sig / tempo information under the key (track_nr, ch_nr)
            if len(ch_notes) > 0:
                notes_by_track_ch[(track_nr, ch)] = ch_notes

    tr_ch_keys = sorted(notes_by_track_ch.keys())
    group_part_voice_keys, part_names, group_names = assign_group_part_voice(
        part_voice_assign_mode,
        tr_ch_keys,
        track_names_by_track)
    
    # for key and time sigs:
    track_to_part_mapping = make_track_to_part_mapping(
        tr_ch_keys,
        group_part_voice_keys)

    # pairs of (part, voice) for each note
    part_voice_list = [[part, voice] for tr_ch, (_, part, voice)
                       in zip(tr_ch_keys, group_part_voice_keys)
                       for i in range(len(notes_by_track_ch[tr_ch]))]

    # pitch spelling, voice estimation and key estimation are done on a
    # structured array (onset, pitch, duration) of all notes in the piece
    # jointly, so we concatenate all notes
    # note_list = sorted(note for notes in (notes_by_track_ch[key] for key in tr_ch_keys) for note in notes)
    note_list = [note for notes in (notes_by_track_ch[key]
                                    for key in tr_ch_keys)
                 for note in notes]
    note_array = np.array(note_list, dtype=[('onset', np.int),
                                            ('pitch', np.int),
                                            ('duration', np.int)])

    LOGGER.debug('pitch spelling')
    spelling_global = analysis.estimate_spelling(note_array)

    if estimate_voice_info:
        LOGGER.debug('voice estimation')
        # TODO: deal with zero duration notes in note_array. Zero duration notes are currently deleted
        estimated_voices = analysis.estimate_voices(note_array)
        assert len(part_voice_list) == len(estimated_voices) 
        for part_voice, voice_est in zip(part_voice_list, estimated_voices):
            if part_voice[1] is None:
                part_voice[1] = voice_est

    if estimate_key:
        LOGGER.debug('key estimation')
        _, mode, fifths = analysis.estimate_key(note_array)
        key_sigs_by_track = {}
        global_key_sigs = [(0, fifths_mode_to_key_name(fifths, mode))]

    if assign_note_ids:
        note_ids = ['n{}'.format(i) for i in range(len(note_array))]
    else:
        note_ids = [None for i in range(len(note_array))]

    time_sigs_by_part = defaultdict(set)
    for tr, ts_list in time_sigs_by_track.items():
        for ts in ts_list:
            for part in track_to_part_mapping[tr]:
                time_sigs_by_part[part].add(ts)
    for ts in global_time_sigs:
        for part in set(part for _, part, _ in group_part_voice_keys):
            time_sigs_by_part[part].add(ts)

    key_sigs_by_part = defaultdict(set)
    for tr, ks_list in key_sigs_by_track.items():
        for ks in ks_list:
            for part in track_to_part_mapping[tr]:
                key_sigs_by_part[part].add(ks)
    for ks in global_key_sigs:
        for part in set(part for _, part, _ in group_part_voice_keys):
            key_sigs_by_part[part].add(ks)

    # names_by_part = defaultdict(set)
    # for tr_ch, pg_p_v in zip(tr_ch_keys, group_part_voice_keys):
    #     print(tr_ch, pg_p_v)
    # for tr, name in track_names_by_track.items():
    #     print(tr, track_to_part_mapping, name)
    #     for part in track_to_part_mapping[tr]:
    #         names_by_part[part] = name

    notes_by_part = defaultdict(list)
    for (part, voice), note, spelling, note_id in zip(part_voice_list,
                                                      note_list,
                                                      spelling_global,
                                                      note_ids):
        notes_by_part[part].append((note, voice, spelling, note_id))

    partlist = []
    part_to_part_group = dict((p, pg) for pg, p, _ in group_part_voice_keys)
    part_groups = {} 
    for part_nr, note_info in notes_by_part.items():
        notes, voices, spellings, note_ids = zip(*note_info)
        part = create_part(divs, notes, spellings, voices, note_ids,
                           sorted(time_sigs_by_part[part_nr]),
                           sorted(key_sigs_by_part[part_nr]),
                           part_id='P{}'.format(part_nr+1),
                           part_name=part_names.get(part_nr, None))

        # print(part.pretty())
        # if this part has an associated part_group number we create a PartGroup
        # if necessary, and add the part to that. The newly created PartGroup is
        # then added to the partlist.
        pg_nr = part_to_part_group[part_nr]
        if pg_nr is None:
            partlist.append(part)
        else:
            if pg_nr not in part_groups:
                part_groups[pg_nr] = score.PartGroup(group_name=group_names.get(pg_nr, None))
                partlist.append(part_groups[pg_nr])
            part_groups[pg_nr].children.append(part)

    # add tempos to first part
    part = next(score.iter_parts(partlist))
    for t, qpm in global_tempos:
        part.add(score.Tempo(qpm, unit='q'), t)

    if not ensure_list and len(partlist) == 1:
        return partlist[0]
    else:
        return partlist
コード例 #2
0
 def test_part(self):
     key = estimate_key(self.score)
     self.assertTrue(key == "Am", "Incorrect key")
コード例 #3
0
 def test_note_array(self):
     key = estimate_key(self.score.note_array)
     self.assertTrue(key == "Am", "Incorrect key")
コード例 #4
0
ファイル: importmatch.py プロジェクト: kittyshi/partitura
def part_from_matchfile(mf):
    part = score.Part('P1', mf.info('piece'))
    # snotes = sorted(mf.snotes, key=attrgetter('OnsetInBeats'))
    snotes = sort_snotes(mf.snotes)
    divs = np.lcm.reduce(np.unique([note.Offset.denominator * (note.Offset.tuple_div or 1)
                                    for note in snotes]))
    part.set_quarter_duration(0, divs)
    min_time = snotes[0].OnsetInBeats  # sorted by OnsetInBeats
    max_time = max(n.OffsetInBeats for n in snotes)

    ts = mf.time_signatures

    beats_map, beat_type_map, min_time_q, max_time_q = make_timesig_maps(ts, max_time)

    bars = np.unique([n.Bar for n in snotes])
    t = min_time
    t = t * 4 / beat_type_map(min_time_q)
    offset = t
    # bar map: bar_number-> start in quarters
    bar_times = {}
    for b0, b1 in iter_current_next(bars, start=0):

        bar_times.setdefault(b1, t)
        if t < 0:
            t = 0
        else:
            # multiply by diff between consecutive bar numbers
            n_bars = b1 - b0
            if t <= max_time_q:
                t += (n_bars * 4 * beats_map(t)) / beat_type_map(t)

    for note in snotes:
        # start of bar in quarter units
        bar_start = bar_times[note.Bar]

        # offset within bar in quarter units
        bar_offset = (note.Beat - 1) * 4 / beat_type_map(bar_start)
        # offset within beat in quarter units
        beat_offset = (4 * note.Offset.numerator
                       / (note.Offset.denominator * (note.Offset.tuple_div or 1)))

        # # anacrusis
        if bar_start < 0:
            # in case of anacrusis we set the bar_start to -bar_duration (in
            # quarters) so that the below calculation is correct
            bar_start = - beats_map(bar_start) * 4 / beat_type_map(bar_start)

        # note onset in divs
        onset_divs = int(divs * (bar_start + bar_offset + beat_offset - offset))
        # print(note.Anchor, onset_divs, bar_start, bar_offset, beat_offset, offset)

        articulations = set()
        if 'staccato' in note.ScoreAttributesList:
            articulations.add('staccato')
        if 'accent' in note.ScoreAttributesList:
            articulations.add('accent')

        # dictionary with keyword args with which the Note (or GraceNote) will be instantiated
        note_attributes = dict(step=note.NoteName,
                               octave=note.Octave,
                               alter=note.Modifier,
                               id=note.Anchor,
                               articulations=articulations)

        staff_nr = next((a[-1] for a in note.ScoreAttributesList if a.startswith('staff')), None)
        try:
            note_attributes['staff'] = int(staff_nr)
        except (TypeError, ValueError):
            # no staff attribute, or staff attribute does not end with a number
            note_attributes['staff'] = None

        note_attributes['voice'] = next((int(a) for a in note.ScoreAttributesList
                                         if NUMBER_PAT.match(a)), None)

        # get rid of this if as soon as we have a way to iterate over the
        # duration components. For now we have to treat the cases simple
        # and compound durations separately.

        if note.Duration.add_components:
            prev_part_note = None

            for i, (num, den, tuple_div) in enumerate(note.Duration.add_components):

                # when we add multiple notes that are tied, the first note will
                # get the original note id, and subsequent notes will get a
                # derived note id (by appending, 'a', 'b', 'c',...)
                if i > 0:
                    # tnote_id = 'n{}_{}'.format(note.Anchor, i)
                    note_attributes['id'] = score.make_tied_note_id(note_attributes['id'])

                part_note = score.Note(**note_attributes)

                duration_divs = int(divs * 4 * num / (den * (tuple_div or 1)))

                assert duration_divs > 0

                offset_divs = onset_divs + duration_divs

                part.add(part_note, onset_divs, offset_divs)

                if prev_part_note:
                    prev_part_note.tie_next = part_note
                    part_note.tie_prev = prev_part_note
                prev_part_note = part_note
                onset_divs = offset_divs

        else:

            num = note.Duration.numerator
            den = note.Duration.denominator
            tuple_div = note.Duration.tuple_div
            duration_divs = int(divs * 4 * num / (den * (tuple_div or 1)))

            offset_divs = onset_divs + duration_divs

            # notes with duration 0, are also treated as grace notes, even if
            # they do not have a 'grace' score attribute
            if ('grace' in note.ScoreAttributesList or
                    note.Duration.numerator == 0):

                part_note = score.GraceNote('appoggiatura', **note_attributes)

            else:

                part_note = score.Note(**note_attributes)

            part.add(part_note, onset_divs, offset_divs)

    # add time signatures
    for (ts_beat_time, ts_bar, (ts_beats, ts_beat_type)) in ts:

        bar_start_divs = int(divs * (bar_times[ts_bar] - offset))  # in quarters
        part.add(score.TimeSignature(ts_beats, ts_beat_type), bar_start_divs)

    # add key signatures
    for (ks_beat_time, ks_bar, keys) in mf.key_signatures:

        if len(keys) > 1:
            # there are multple equivalent keys, so we check which one is most
            # likely according to the key estimator
            est_keys = estimate_key(notes_to_notearray(part.notes_tied), return_sorted_keys=True)
            idx = [est_keys.index(key) if key in est_keys else np.inf
                   for key in keys]
            key_name = keys[np.argmin(idx)]

        else:

            key_name = keys[0]

        fifths, mode = key_name_to_fifths_mode(key_name)
        part.add(score.KeySignature(fifths, mode), 0)

    add_staffs(part)
    # add_clefs(part)

    # add incomplete measure if necessary

    if offset < 0:

        part.add(score.Measure(number=1), 0, int(-offset * divs))

    # add the rest of the measures automatically
    score.add_measures(part)
    # print(part.pretty())
    score.tie_notes(part)
    score.find_tuplets(part)

    if not all([n.voice for n in part.notes_tied]):
        # print('notes without voice detected')
        add_voices(part)

    return part