Пример #1
0
def assign_note_ids(parts):
    # assign note ids to ensure uniqueness across all parts, discarding any
    # existing note ids
    i = 0
    for part in score.iter_parts(parts):
        for n in part.notes:
            n.id = 'n{}'.format(i)
            i += 1
Пример #2
0
def assign_note_ids(parts, keep=False):
    if keep:
        # Keep existing note id's
        for p, part in enumerate(score.iter_parts(parts)):
            for ni, n in enumerate(
                    part.iter_all(score.GenericNote, include_subclasses=True)):
                if isinstance(n, score.Rest):
                    n.id = "p{0}r{1}".format(p, ni) if n.id is None else n.id
                else:
                    n.id = "p{0}n{1}".format(p, ni) if n.id is None else n.id

    else:
        # assign note ids to ensure uniqueness across all parts, discarding any
        # existing note ids
        ni = 0
        ri = 0
        for part in score.iter_parts(parts):
            for n in part.iter_all(score.GenericNote, include_subclasses=True):
                if isinstance(n, score.Rest):
                    n.id = "r{}".format(ri)
                    ri += 1
                else:
                    n.id = "n{}".format(ni)
                    ni += 1
Пример #3
0
    def test_midi_export_mode_4(self):
        m = self._export_and_read(mode=4)
        msg = ('Number of tracks {} does not equal 1 while '
               'testing part_voice_assign_mode=4 in save_score_midi'.format(
                   len(m.tracks)))
        self.assertEqual(1, len(m.tracks), msg)

        note_ch = note_channels(m.tracks[0])
        n_ch = len(set(note_ch))
        msg = ('Track should have 1 channel, '
               'but has {} channels'.format(n_ch))
        self.assertEqual(1, n_ch, msg)
        n_notes_trg = sum(
            len(part.notes_tied) for part in score.iter_parts(self.parts))
        n_notes = len(note_ch)
        msg = ('Track should have {} notes, '
               'but has {} notes'.format(n_notes_trg, n_notes))
Пример #4
0
def get_ppq(parts):
    ppqs = np.concatenate(
        [part.quarter_durations()[:, 1] for part in score.iter_parts(parts)])
    ppq = np.lcm.reduce(ppqs)
    return ppq
Пример #5
0
def save_score_midi(parts,
                    out,
                    part_voice_assign_mode=0,
                    velocity=64,
                    anacrusis_behavior="shift"):
    """Write data from Part objects to a MIDI file

    Parameters
    ----------
    parts : Part, PartGroup or list of these
        The musical score to be saved.
    out : str or file-like object
        Either a filename or a file-like object to write the MIDI data
        to.
    part_voice_assign_mode : {0, 1, 2, 3, 4, 5}, optional
        This keyword controls how part and voice information is
        associated to track and channel information in the MIDI file.
        The semantics of the modes is as follows:

        0
            Write one track for each Part, with channels assigned by
            voices
        1
            Write one track for each PartGroup, with channels assigned by
            Parts (voice info is lost) (There can be multiple levels of
            partgroups, I suggest using the highest level of
            partgroup/part) [note: this will e.g. lead to all strings into
            the same track] Each part not in a PartGroup will be assigned
            its own track
        2
            Write a single track with channels assigned by Part (voice
            info is lost)
        3
            Write one track per Part, and a single channel for all voices
            (voice info is lost)
        4
            Write a single track with a single channel (Part and voice
            info is lost)
        5
            Return one track per <Part, voice> combination, each track
            having a single channel.

        The default mode is 0.
    velocity : int, optional
        Default velocity for all MIDI notes. Defaults to 64.
    anacrusis_behavior : {"shift", "pad_bar"}, optional
        Strategy to deal with anacrusis. If "shift", all
        time points are shifted by the anacrusis (i.e., the first
        note starts at 0). If "pad_bar", the "incomplete" bar  of
        the anacrusis is padded with silence. Defaults to 'shift'.
    """

    ppq = get_ppq(parts)

    events = defaultdict(lambda: defaultdict(list))
    meta_events = defaultdict(lambda: defaultdict(list))

    event_keys = OrderedDict()
    tempos = {}

    quarter_maps = [part.quarter_map for part in score.iter_parts(parts)]

    first_time_point = min(qm(0) for qm in quarter_maps)

    ftp = 0
    # Deal with anacrusis
    if first_time_point < 0:
        if anacrusis_behavior == "shift":
            ftp = first_time_point
        elif anacrusis_behavior == "pad_bar":
            time_signatures = []
            for qm, part in zip(quarter_maps, score.iter_parts(parts)):
                ts_beats, ts_beat_type = part.time_signature_map(0)
                time_signatures.append((ts_beats, ts_beat_type, qm(0)))
            # sort ts according to time
            time_signatures.sort(key=lambda x: x[2])
            ftp = -time_signatures[0][0] / (time_signatures[0][1] / 4)
        else:
            raise Exception(
                'Invalid anacrusis_behavior value, must be one of ("shift", "pad_bar")'
            )

    for qm, part in zip(quarter_maps, score.iter_parts(parts)):

        pg = get_partgroup(part)

        notes = part.notes_tied

        def to_ppq(t):
            # convert div times to new ppq
            return int(ppq * (qm(t) - ftp))

        for tp in part.iter_all(score.Tempo):
            tempos[to_ppq(tp.start.t)] = MetaMessage(
                "set_tempo", tempo=tp.microseconds_per_quarter)

        for ts in part.iter_all(score.TimeSignature):
            meta_events[part][to_ppq(ts.start.t)].append(
                MetaMessage("time_signature",
                            numerator=ts.beats,
                            denominator=ts.beat_type))

        for ks in part.iter_all(score.KeySignature):
            meta_events[part][to_ppq(ks.start.t)].append(
                MetaMessage("key_signature", key=ks.name))

        for note in notes:

            # key is a tuple (part_group, part, voice) that will be
            # converted into a (track, channel) pair.
            key = (pg, part, note.voice)
            events[key][to_ppq(note.start.t)].append(
                Message("note_on", note=note.midi_pitch))
            events[key][to_ppq(note.start.t + note.duration_tied)].append(
                Message("note_off", note=note.midi_pitch))
            event_keys[key] = True

    tr_ch_map = map_to_track_channel(list(event_keys.keys()),
                                     part_voice_assign_mode)

    # replace original event keys (partgroup, part, voice) by (track, ch) keys:
    for key in list(events.keys()):
        evs_by_time = events[key]
        del events[key]
        tr, ch = tr_ch_map[key]
        for t, evs in evs_by_time.items():
            events[tr][t].extend((ev.copy(channel=ch) for ev in evs))

    # figure out in which tracks to replicate the time/key signatures of each part
    part_track_map = partition(lambda x: x[0][1], tr_ch_map.items())
    for part, rest in part_track_map.items():
        part_track_map[part] = set(x[1][0] for x in rest)

    # add the time/key sigs to their corresponding tracks
    for part, m_events in meta_events.items():
        tracks = part_track_map[part]
        for tr in tracks:
            for t, me in m_events.items():
                events[tr][t] = me + events[tr][t]

    n_tracks = max(tr for tr, _ in tr_ch_map.values()) + 1
    tracks = [MidiTrack() for _ in range(n_tracks)]

    # tempo events are handled differently from key/time sigs because the have a
    # global effect. Instead of adding to each relevant track, like the key/time
    # sig events, we add them only to the first track
    for t, tp in tempos.items():
        events[0][t].insert(0, tp)

    for tr, events_by_time in events.items():
        t_prev = 0
        for t in sorted(events_by_time.keys()):
            evs = events_by_time[t]
            delta = t - t_prev
            for ev in evs:
                tracks[tr].append(ev.copy(time=delta))
                delta = 0
            t_prev = t

    midi_type = 0 if n_tracks == 1 else 1

    mf = MidiFile(type=midi_type, ticks_per_beat=ppq)

    for track in tracks:
        mf.tracks.append(track)

    if out:
        if hasattr(out, "write"):
            mf.save(file=out)
        else:
            mf.save(out)
Пример #6
0
def musicxml_to_notearray(fn, flatten_parts=True, sort_onsets=True,
                     expand_grace_notes=True, validate=False,
                     beat_times=True):
    """Return pitch, onset, and duration information for notes from a
    MusicXML file as a structured array.

    By default a single array is returned by combining the note
    information of all parts in the MusicXML file.

    Parameters
    ----------
    fn : str
        Path to a MusicXML file
    flatten_parts : bool
        If `True`, returns a single array containing all notes.
        Otherwise, returns a list of arrays for each part.
    expand_grace_notes : bool or 'delete'
        When True, grace note onset and durations will be adjusted to
        have a non-zero duration.
    beat_times : bool
        When True (default) return onset and duration in beats.
        Otherwise, return the onset and duration in divisions.

    Returns
    -------
    score : structured array or list of structured arrays
        Structured array containing the score. The fields are 'pitch',
        'onset' and 'duration'.
    
    """

    if not isinstance(expand_grace_notes, (bool, str)):
        raise ValueError('`expand_grace_notes` must be a boolean or '
                         '"delete"')
    delete_grace_notes = False
    if isinstance(expand_grace_notes, str):

        if expand_grace_notes in ('omit', 'delete', 'd'):
            expand_grace_notes = False
            delete_grace_notes = True
        else:
            raise ValueError('`expand_grace_notes` must be a boolean or '
                             '"delete"')

    # Parse MusicXML
    parts = load_musicxml(fn, ensure_list=True, validate=validate)
    scr = []
    for part in score.iter_parts(parts):
        # Unfold any repetitions in part
        part = score.unfold_part_maximal(part)
        if expand_grace_notes:
            LOGGER.debug('Expanding grace notes...')
            score.expand_grace_notes(part)

        if beat_times:
            # get beat map
            bm = part.beat_map
            # Build score from beat map
            _score = np.array(
                [(n.midi_pitch, bm(n.start.t), bm(n.end_tied.t) - bm(n.start.t))
                 for n in part.notes_tied],
                dtype=[('pitch', 'i4'), ('onset', 'f4'), ('duration', 'f4')])
        else:
            _score = np.array(
                [(n.midi_pitch, n.start.t, n.end_tied.t - n.start.t)
                 for n in part.notes_tied],
                dtype=[('pitch', 'i4'), ('onset', 'i4'), ('duration', 'i4')])


        # Sort notes according to onset
        if sort_onsets:
            _score = _score[_score['onset'].argsort()]

        if delete_grace_notes:
            LOGGER.debug('Deleting grace notes...')
            _score = _score[_score['duration'] != 0]
        scr.append(_score)

    # Return a structured array if the score has only one part
    if len(scr) == 1:
        return scr[0]
    elif len(scr) > 1 and flatten_parts:
        scr = np.vstack(scr)
        if sort_onsets:
            return scr[scr['onset'].argsort()]
    else:
        return scr
Пример #7
0
def n_items_per_part_voice(pg, cls):
    n_items = []
    for part in score.iter_parts(pg):
        n = sum(1 for _ in part.iter_all(cls))
        n_items.extend([n] * len(set(n.voice for n in part.notes_tied)))
    return n_items
Пример #8
0
def get_part_voice_numbers(parts):
    counter = Counter()
    for i, part in enumerate(score.iter_parts(parts)):
        for note in part.notes_tied:
            counter.update(((i, note.voice), ))
    return counter
Пример #9
0
 def setUp(self):
     self.parts = make_assignment_mode_example()
     # self.targets = get_partgroup_part_voice_numbers(self.parts)
     self.parts_list = list(score.iter_parts(self.parts))
Пример #10
0
def n_notes(pg):
    return sum(len(part.notes_tied) for part in score.iter_parts(pg))
Пример #11
0
def save_musicxml(parts, out=None):
    """Save a one or more Part or PartGroup instances in MusicXML format.
    
    Parameters
    ----------
    parts : list, Part, or PartGroup
        A :class:`partitura.score.Part` object,
        :class:`partitura.score.PartGroup` or a list of these
    out: str, file-like object, or None, optional
        Output file
    
    Returns
    -------
    None or str
        If no output file is specified using `out` the function returns the
        MusicXML data as a string. Otherwise the function returns None.

    """
        
    root = etree.Element('score-partwise')
    
    partlist_e = etree.SubElement(root, 'part-list')
    state = {
        'note_id_counter': {},
        'range_counter': {},
    }

    group_stack = []

    def close_group_stack():
        while group_stack:
            # close group
            etree.SubElement(partlist_e, 'part-group',
                             number='{}'.format(group_stack[-1].number),
                             type='stop')
            # remove from stack
            group_stack.pop()
        
    def handle_parents(part):
        # 1. get deepest parent that is in group_stack (keep track of parents to
        # add)
        pg = part.parent
        to_add = []
        while pg:
            if pg in group_stack:
                break
            to_add.append(pg)
            pg = pg.parent

        
        # close groups while not equal to pg
        while group_stack:
            if pg == group_stack[-1]:
                break
            else:
                # close group
                etree.SubElement(partlist_e, 'part-group',
                                 number='{}'.format(group_stack[-1].number),
                                 type='stop')
                # remove from stack
                group_stack.pop()

        # start all parents in to_add
        for pg in reversed(to_add):
            # start group
            pg_e = etree.SubElement(partlist_e, 'part-group',
                                    number='{}'.format(pg.number),
                                    type='start')
            if pg.group_symbol is not None:
                symb_e = etree.SubElement(pg_e, 'group-symbol')
                symb_e.text = pg.group_symbol
            if pg.group_name is not None:
                name_e = etree.SubElement(pg_e, 'group-name')
                name_e.text = pg.group_name

            group_stack.append(pg)


    for part in score.iter_parts(parts):

        handle_parents(part)
        
        # handle part list entry
        scorepart_e = etree.SubElement(partlist_e, 'score-part', id=part.id)

        partname_e = etree.SubElement(scorepart_e, 'part-name')
        if part.part_name:
            partname_e.text = filter_string(part.part_name)

        if part.part_abbreviation:
            partabbrev_e = etree.SubElement(scorepart_e, 'part-abbreviation')
            partabbrev_e.text = filter_string(part.part_abbreviation)


        # write the part itself
        
        part_e = etree.SubElement(root, 'part', id=part.id)
        # store quarter_map in a variable to avoid re-creating it for each call
        quarter_map = part.quarter_map
        beat_map = part.beat_map
        # ts = part.get_all(score.TimeSignature)

        for measure in part.iter_all(score.Measure):

            part_e.append(etree.Comment(MEASURE_SEP_COMMENT))
            attrib = {}

            if measure.number is not None:

                attrib['number'] = str(measure.number)

            measure_e = etree.SubElement(part_e, 'measure', **attrib)
            contents = linearize_measure_contents(part,
                                                  measure.start,
                                                  measure.end,
                                                  state)
            measure_e.extend(contents)
            
    close_group_stack()

    if out:

        if hasattr(out, 'write'):

            out.write(etree.tostring(root.getroottree(), encoding='UTF-8',
                                     xml_declaration=True,
                                     pretty_print=True, doctype=DOCTYPE))

        else:

            with open(out, 'wb') as f:

                f.write(etree.tostring(root.getroottree(), encoding='UTF-8',
                                       xml_declaration=True,
                                       pretty_print=True, doctype=DOCTYPE))

    else:

        return etree.tostring(root.getroottree(), encoding='UTF-8',
                              xml_declaration=True,
                              pretty_print=True, doctype=DOCTYPE)
Пример #12
0
def load_score_midi(fn, part_voice_assign_mode=0, ensure_list=False,
                    quantization_unit=None, estimate_voice_info=True,
                    estimate_key=False, assign_note_ids=True):
    """Load a musical score from a MIDI file and return it as a Part
    instance.

    This function interprets MIDI information as describing a score.
    Pitch names are estimated using Meredith's PS13 algorithm [1]_.
    Assignment of notes to voices can either be done using Chew and
    Wu's voice separation algorithm [2]_, or by choosing one of the
    part/voice assignment modes that assign voices based on
    track/channel information. Furthermore, the key signature can be
    estimated based on Krumhansl's 1990 key profiles [3]_.

    This function expects times to be metrical/quantized. Optionally a
    quantization unit may be specified. If you wish to access the non-
    quantized time of MIDI events you may wish to used the
    `load_performance_midi` function instead.

    Parameters
    ----------
    fn : str
        Path to MIDI file
    part_voice_assign_mode : {0, 1, 2, 3, 4, 5}, optional
        This keyword controls how part and voice information is
        associated to track and channel information in the MIDI file.
        The semantics of the modes is as follows:

        0
            Return one Part per track, with voices assigned by channel
        1
            Return one PartGroup per track, with Parts assigned by channel
            (no voices)
        2
            Return single Part with voices assigned by track (tracks are
            combined, channel info is ignored)
        3
            Return one Part per track, without voices (channel info is
            ignored)
        4
            Return single Part without voices (channel and track info is
            ignored)
        5
            Return one Part per <track, channel> combination, without
            voices  Defaults to 0.
    ensure_list : bool, optional
        When True, return a list independent of how many part or partgroup
        elements were created from the MIDI file. By default, when the
        return value of `load_score_midi` produces a single 
        :class:`partitura.score.Part` or :class:`partitura.score.PartGroup`
        element, the element itself is returned instead of a list
        containing the element. Defaults to False.
    quantization_unit : integer or None, optional
        Quantize MIDI times to multiples of this unit. If None, the
        quantization unit is chosen automatically as the smallest
        division of the parts per quarter (MIDI "ticks") that can be
        represented as a symbolic duration. Defaults to None.
    estimate_key : bool, optional
        When True use Krumhansl's 1990 key profiles [3]_ to determine
        the most likely global key, discarding any key information in
        the MIDI file.
    estimate_voice_info : bool, optional
        When True use Chew and Wu's voice separation algorithm [2]_ to
        estimate voice information. This option is ignored for
        part/voice assignment modes that infer voice information from
        the track/channel info (i.e. `part_voice_assign_mode` equals
        1, 3, 4, or 5). Defaults to True.

    Returns
    -------
    :class:`partitura.score.Part`, :class:`partitura.score.PartGroup`, or a list of these
        One or more part or partgroup objects

    References
    ----------
    .. [1] Meredith, D. (2006). "The ps13 Pitch Spelling Algorithm". Journal 
           of New Music Research, 35(2):121.
    .. [2] Chew, E. and Wu, Xiaodan (2004) "Separating Voices in
           Polyphonic Music: A Contig Mapping Approach". In Uffe Kock, 
           editor, Computer Music Modeling and Retrieval (CMMR), pp. 1–20, 
           Springer Berlin Heidelberg.
    .. [3] Krumhansl, Carol L. (1990) "Cognitive foundations of musical pitch",
           Oxford University Press, New York.

    """
    mid = mido.MidiFile(fn)
    divs = mid.ticks_per_beat

    # these lists will contain information from dedicated tracks for meta
    # information (i.e. without notes)
    global_time_sigs = []
    global_key_sigs = []
    global_tempos = []

    # these dictionaries will contain meta information indexed by track (only
    # for tracks that contain notes)
    time_sigs_by_track = {}
    key_sigs_by_track = {}
    tempos_by_track = {}
    track_names_by_track = {}
    # notes are indexed by (track, channel) tuples
    notes_by_track_ch = {}
    relevant = {'time_signature',
                'key_signature',
                'set_tempo',
                'note_on',
                'note_off'}
    for track_nr, track in enumerate(mid.tracks):
        time_sigs = []
        key_sigs = []
        # tempos = []
        notes = defaultdict(list)
        # dictionary for storing the last onset time and velocity for each
        # individual note (i.e. same pitch and channel)
        sounding_notes = {}
        # current time (will be updated by delta times in messages)
        t_raw = 0

        for msg in track:

            t_raw = t_raw + msg.time

            if msg.type not in relevant:
                continue

            if quantization_unit:
                t = quantize(t_raw, quantization_unit)
            else:
                t = t_raw

            if msg.type == 'time_signature':
                time_sigs.append((t, msg.numerator, msg.denominator))
            if msg.type == 'key_signature':
                key_sigs.append((t, msg.key))
            if msg.type == 'set_tempo':
                global_tempos.append((t, 60*10**6/msg.tempo))
            else:
                note_on = msg.type == 'note_on'
                note_off = msg.type == 'note_off'

                if not (note_on or note_off):
                    continue

                # hash sounding note
                note = note_hash(msg.channel, msg.note)

                # start note if it's a 'note on' event with velocity > 0
                if note_on and msg.velocity > 0:

                    # save the onset time and velocity
                    sounding_notes[note] = (t, msg.velocity)

                # end note if it's a 'note off' event or 'note on' with velocity 0
                elif note_off or (note_on and msg.velocity == 0):

                    if note not in sounding_notes:
                        warnings.warn('ignoring MIDI message %s' % msg)
                        continue

                    # append the note to the list associated with the channel
                    notes[msg.channel].append((sounding_notes[note][0], msg.note, t-sounding_notes[note][0]))
                                              # sounding_notes[note][1]])
                    # remove hash from dict
                    del sounding_notes[note]

        # if a track has no notes, we assume it may contain global time/key sigs
        if not notes:
            global_time_sigs.extend(time_sigs)
            global_key_sigs.extend(key_sigs)
        else:
            # if there are note, we store the info under the track number
            time_sigs_by_track[track_nr] = time_sigs
            key_sigs_by_track[track_nr] = key_sigs
            track_names_by_track[track_nr] = track.name

        for ch, ch_notes in notes.items():
            # if there are any notes, store the notes along with key sig / time
            # sig / tempo information under the key (track_nr, ch_nr)
            if len(ch_notes) > 0:
                notes_by_track_ch[(track_nr, ch)] = ch_notes

    tr_ch_keys = sorted(notes_by_track_ch.keys())
    group_part_voice_keys, part_names, group_names = assign_group_part_voice(
        part_voice_assign_mode,
        tr_ch_keys,
        track_names_by_track)
    
    # for key and time sigs:
    track_to_part_mapping = make_track_to_part_mapping(
        tr_ch_keys,
        group_part_voice_keys)

    # pairs of (part, voice) for each note
    part_voice_list = [[part, voice] for tr_ch, (_, part, voice)
                       in zip(tr_ch_keys, group_part_voice_keys)
                       for i in range(len(notes_by_track_ch[tr_ch]))]

    # pitch spelling, voice estimation and key estimation are done on a
    # structured array (onset, pitch, duration) of all notes in the piece
    # jointly, so we concatenate all notes
    # note_list = sorted(note for notes in (notes_by_track_ch[key] for key in tr_ch_keys) for note in notes)
    note_list = [note for notes in (notes_by_track_ch[key]
                                    for key in tr_ch_keys)
                 for note in notes]
    note_array = np.array(note_list, dtype=[('onset', np.int),
                                            ('pitch', np.int),
                                            ('duration', np.int)])

    LOGGER.debug('pitch spelling')
    spelling_global = analysis.estimate_spelling(note_array)

    if estimate_voice_info:
        LOGGER.debug('voice estimation')
        # TODO: deal with zero duration notes in note_array. Zero duration notes are currently deleted
        estimated_voices = analysis.estimate_voices(note_array)
        assert len(part_voice_list) == len(estimated_voices) 
        for part_voice, voice_est in zip(part_voice_list, estimated_voices):
            if part_voice[1] is None:
                part_voice[1] = voice_est

    if estimate_key:
        LOGGER.debug('key estimation')
        _, mode, fifths = analysis.estimate_key(note_array)
        key_sigs_by_track = {}
        global_key_sigs = [(0, fifths_mode_to_key_name(fifths, mode))]

    if assign_note_ids:
        note_ids = ['n{}'.format(i) for i in range(len(note_array))]
    else:
        note_ids = [None for i in range(len(note_array))]

    time_sigs_by_part = defaultdict(set)
    for tr, ts_list in time_sigs_by_track.items():
        for ts in ts_list:
            for part in track_to_part_mapping[tr]:
                time_sigs_by_part[part].add(ts)
    for ts in global_time_sigs:
        for part in set(part for _, part, _ in group_part_voice_keys):
            time_sigs_by_part[part].add(ts)

    key_sigs_by_part = defaultdict(set)
    for tr, ks_list in key_sigs_by_track.items():
        for ks in ks_list:
            for part in track_to_part_mapping[tr]:
                key_sigs_by_part[part].add(ks)
    for ks in global_key_sigs:
        for part in set(part for _, part, _ in group_part_voice_keys):
            key_sigs_by_part[part].add(ks)

    # names_by_part = defaultdict(set)
    # for tr_ch, pg_p_v in zip(tr_ch_keys, group_part_voice_keys):
    #     print(tr_ch, pg_p_v)
    # for tr, name in track_names_by_track.items():
    #     print(tr, track_to_part_mapping, name)
    #     for part in track_to_part_mapping[tr]:
    #         names_by_part[part] = name

    notes_by_part = defaultdict(list)
    for (part, voice), note, spelling, note_id in zip(part_voice_list,
                                                      note_list,
                                                      spelling_global,
                                                      note_ids):
        notes_by_part[part].append((note, voice, spelling, note_id))

    partlist = []
    part_to_part_group = dict((p, pg) for pg, p, _ in group_part_voice_keys)
    part_groups = {} 
    for part_nr, note_info in notes_by_part.items():
        notes, voices, spellings, note_ids = zip(*note_info)
        part = create_part(divs, notes, spellings, voices, note_ids,
                           sorted(time_sigs_by_part[part_nr]),
                           sorted(key_sigs_by_part[part_nr]),
                           part_id='P{}'.format(part_nr+1),
                           part_name=part_names.get(part_nr, None))

        # print(part.pretty())
        # if this part has an associated part_group number we create a PartGroup
        # if necessary, and add the part to that. The newly created PartGroup is
        # then added to the partlist.
        pg_nr = part_to_part_group[part_nr]
        if pg_nr is None:
            partlist.append(part)
        else:
            if pg_nr not in part_groups:
                part_groups[pg_nr] = score.PartGroup(group_name=group_names.get(pg_nr, None))
                partlist.append(part_groups[pg_nr])
            part_groups[pg_nr].children.append(part)

    # add tempos to first part
    part = next(score.iter_parts(partlist))
    for t, qpm in global_tempos:
        part.add(score.Tempo(qpm, unit='q'), t)

    if not ensure_list and len(partlist) == 1:
        return partlist[0]
    else:
        return partlist
Пример #13
0
def save_score_midi(parts, out, part_voice_assign_mode=0, velocity=64):
    """Write data from Part objects to a MIDI file

    Parameters
    ----------
    parts : Part, PartGroup or list of these
        The musical score to be saved.
    out : str or file-like object
        Either a filename or a file-like object to write the MIDI data
        to.
    part_voice_assign_mode : {0, 1, 2, 3, 4, 5}, optional
        This keyword controls how part and voice information is
        associated to track and channel information in the MIDI file.
        The semantics of the modes is as follows:

        0
            Write one track for each Part, with channels assigned by
            voices
        1
            Write one track for each PartGroup, with channels assigned by
            Parts (voice info is lost) (There can be multiple levels of
            partgroups, I suggest using the highest level of
            partgroup/part) [note: this will e.g. lead to all strings into
            the same track] Each part not in a PartGroup will be assigned
            its own track
        2
            Write a single track with channels assigned by Part (voice
            info is lost)
        3
            Write one track per Part, and a single channel for all voices
            (voice info is lost)
        4
            Write a single track with a single channel (Part and voice
            info is lost)
        5
            Return one track per <Part, voice> combination, each track
            having a single channel.

    velocity : int, optional
        Default velocity for all MIDI notes.

    """

    ppq = get_ppq(parts)

    events = defaultdict(lambda: defaultdict(list))
    meta_events = defaultdict(lambda: defaultdict(list))

    event_keys = OrderedDict()
    tempos = {}

    for i, part in enumerate(score.iter_parts(parts)):

        pg = get_partgroup(part)

        notes = part.notes_tied
        qm = part.quarter_map
        q_offset = qm(part.first_point.t)

        def to_ppq(t):
            # convert div times to new ppq
            return int(ppq * qm(t))

        for tp in part.iter_all(score.Tempo):
            tempos[to_ppq(tp.start.t)] = MetaMessage(
                'set_tempo', tempo=tp.microseconds_per_quarter)

        for ts in part.iter_all(score.TimeSignature):
            meta_events[part][to_ppq(ts.start.t)].append(
                MetaMessage('time_signature',
                            numerator=ts.beats,
                            denominator=ts.beat_type))

        for ks in part.iter_all(score.KeySignature):
            meta_events[part][to_ppq(ks.start.t)].append(
                MetaMessage('key_signature', key=ks.name))

        for note in notes:

            # key is a tuple (part_group, part, voice) that will be converted into a (track, channel) pair.
            key = (pg, part, note.voice)
            events[key][to_ppq(note.start.t)].append(
                Message('note_on', note=note.midi_pitch))
            events[key][to_ppq(note.end_tied.t)].append(
                Message('note_off', note=note.midi_pitch))

            event_keys[key] = True

    tr_ch_map = map_to_track_channel(list(event_keys.keys()),
                                     part_voice_assign_mode)

    # replace original event keys (partgroup, part, voice) by (track, ch) keys:
    for key in list(events.keys()):
        evs_by_time = events[key]
        del events[key]
        tr, ch = tr_ch_map[key]
        for t, evs in evs_by_time.items():
            events[tr][t].extend((ev.copy(channel=ch) for ev in evs))

    # figure out in which tracks to replicate the time/key signatures of each part
    part_track_map = partition(lambda x: x[0][1], tr_ch_map.items())
    for part, rest in part_track_map.items():
        part_track_map[part] = set(x[1][0] for x in rest)

    # add the time/key sigs to their corresponding tracks
    for part, m_events in meta_events.items():
        tracks = part_track_map[part]
        for tr in tracks:
            for t, me in m_events.items():
                events[tr][t] = me + events[tr][t]

    n_tracks = max(tr for tr, _ in tr_ch_map.values()) + 1
    tracks = [MidiTrack() for _ in range(n_tracks)]

    # tempo events are handled differently from key/time sigs because the have a
    # global effect. Instead of adding to each relevant track, like the key/time
    # sig events, we add them only to the first track
    track0_events = events[0]
    for t, tp in tempos.items():
        events[0][t].insert(0, tp)

    for tr, events_by_time in events.items():
        t_prev = 0
        for t in sorted(events_by_time.keys()):
            evs = events_by_time[t]
            delta = t - t_prev
            for ev in evs:
                tracks[tr].append(ev.copy(time=delta))
                delta = 0
            t_prev = t

    midi_type = 0 if n_tracks == 1 else 1

    mf = MidiFile(type=midi_type, ticks_per_beat=ppq)

    for track in tracks:
        mf.tracks.append(track)

    if out:
        if hasattr(out, 'write'):
            mf.save(file=out)
        else:
            mf.save(out)
Пример #14
0
def musicxml_to_notearray(
    fn,
    flatten_parts=True,
    include_pitch_spelling=False,
    include_key_signature=False,
    include_time_signature=False,
):
    """Return pitch, onset, and duration information for notes from a
    MusicXML file as a structured array.

    By default a single array is returned by combining the note
    information of all parts in the MusicXML file.

    Parameters
    ----------
    fn : str
        Path to a MusicXML file
    flatten_parts : bool
        If `True`, returns a single array containing all notes.
        Otherwise, returns a list of arrays for each part.
    include_pitch_spelling : bool (optional)
        If `True`, includes pitch spelling information for each
        note. Default is False
    include_key_signature : bool (optional)
        If `True`, includes key signature information, i.e.,
        the key signature at the onset time of each note (all
        notes starting at the same time have the same key signature).
        Default is False
    include_time_signature : bool (optional)
        If `True`,  includes time signature information, i.e.,
        the time signature at the onset time of each note (all
        notes starting at the same time have the same time signature).
        Default is False

    Returns
    -------
    score : structured array or list of structured arrays
        Structured array or list of structured arrays containing
        score information.
    """

    parts = load_musicxml(fn, ensure_list=True, force_note_ids="keep")

    note_arrays = []
    for part in score.iter_parts(parts):
        # Unfold any repetitions in part
        unfolded_part = score.unfold_part_maximal(part)
        # Compute note array
        note_array = ensure_notearray(
            notearray_or_part=unfolded_part,
            include_pitch_spelling=include_pitch_spelling,
            include_key_signature=include_key_signature,
            include_time_signature=include_time_signature)
        note_arrays.append(note_array)

    if len(note_arrays) == 1:
        return note_arrays[0]
    elif len(note_arrays) > 1 and flatten_parts:
        return np.hstack(note_arrays)
    else:
        return note_arrays