コード例 #1
0
def p0():
    typs_2_keep = (midi.NoteOnEvent, midi.NoteOffEvent, midi.SetTempoEvent)
    off_mode = True  # True  # whether to encode NoteOffEvents or just NoteOnEvents with 0 velocity
    q = 8  # quantization factor of velocity
    q_map = maps.create_q_map(128, q, encode=True, decode=True)
    t = 8  # the smallest timestep in milliseconds
    n_vel = utils.dynamic_order(
        128, q)  # No. of different available levels of velocity
    n_time = 48  # Available timesteps
    n_pitch = 128 * 2 if off_mode else 128  # Available pitches
    n_pulse = 0  # Number of added pulses
    n_vocab = n_time + n_pitch + n_vel + n_pulse  # Available choices
    time_encoder = temporal.base_digits_encoder  # temporal.timeslips_encoder
    time_decoder = temporal.base_digits_decoder  # temporal.timeslips_decoder
    ekwa = dict(b=n_time)  # dict(t=t, n_time=n_time)
    dkwa = dict(b=n_time)
    return locals()
コード例 #2
0
def categorize_input(x, q, n_time, off_mode, time_encoder, ekwa, **kwargs):
    """
    This function is the inverse of categorize_output.
    This function will encode a midi sequence in the form of a midi.Track/midi.Pattern
    (with 1 track) or a MuGen.utils.MidiObj into a encoded/serialized sequence of integers. 
    A midi.events object, often referred to in this code as 'evnt' will be encoded as an 
    integer, an 'avnt'. The evnt/avnt terminology is used to imitate efferent/afferent neuron
    terminology in neuroscience.

    Args:
        x             - list/midi.Track/midi.Pattern, iterable containing midi.Events
        q             - int, quantization factor of velocity
        n_time        - int, number of bits in one-hot encoding that are used for time slips
        off_mode      - bool, True if NoteOffEvents are included in input sequence
        time_encoder  - int, a function to encode milliseconds
        ekwa          - dict, keyword arguments for the encoder
        
        n_vel         - int, number of bits in one-hot encoding that are used for velocity, 
                        this value is dependent on q
        q_map         - dict, maps quantized velocities to encoding
        p_map         - dict, maps added pulses/note divisions to encoding
        asarray       - bool, default=False, whether to return output as a numpy array
        dtype         - str, default='int', data type of the returned array
        bpm           - int, default=120, tempo in Beats Per Minute, initial tempo of the input
                        sequence, is updated dynamically as tempo changed in input sequence
        sort_velocity - bool, default=False, whether velocity values should be in sorted order 
        sort_pitch    = bool, default=False, whether pitch values should be in sorted order 
        sort_pulse    = bool, default=False, whether pulse values should be in sorted order 

    Vars:
        n_pitch       - int, number of bits in one-hot encoding that are used for pitch 
                        i.e. NoteOnEvents, if off_mode is True an additional 128 bits are
                        used for NoteOffEvents otherwise x should only contain NoteOnEvents
                        with velocity=0 to signify NoteOffEvents
        n_t_p         - int, n_time + n_pitch
        n_t_p_v       - int, n_time + n_pitch + n_vel
        n_vocab       - int, n_time + n_pitch + n_vel + n_pulse, total number of bits used 
                        for one-hot encoding
        mspt          - float, default=None, milliseconds per tick, updated dynamically as we iterate 
                        through the input sequence (x)
        tick          - int, holds the current tick as we iterate through the input sequence (x)
        timestep      - list, as we iterate through the input sequence all events that occur 
                        simultaneously, in a single 'timestep', will be collected here before being
                        processed/encoded/serialized

    Returns:
        ret           - list[int]/np.ndarray[int] - encoded and serialized midi sequence
    """

    if isinstance(x, utils.MidiObj):
        x = x.ptrn

    n_vel = kwargs.get("n_vel", utils.dynamic_order(128, q))
    q_map = kwargs.get("q_map", maps.create_q_map(128, q))
    p_map = kwargs.get("p_map", None)
    asarray = kwargs.get("asarray", False)
    dtype = kwargs.get("dtype", "int")
    sort_velocity = kwargs.get("sort_velocity", False)
    sort_pitch = kwargs.get("sort_pitch", False)
    sort_pulse = kwargs.get("sort_pulse", False)
    bpm = kwargs.get("bpm", 120)  # beats per minute

    n_pitch = 128 * 2 if off_mode else 128
    n_pulse = len(p_map.keys()) if p_map is not None else 0
    n_t_p = n_time + n_pitch
    n_t_p_v = n_time + n_pitch + n_vel
    n_vocab = n_time + n_pitch + n_vel + n_pulse  # n_t_p_v_p

    mspt = None  # miliseconds per tick
    tick = 0
    timestep = []  # timestep/time bubble
    ret = []  # output sequence

    t_encoder = lambda x: time_encoder(x, **ekwa)
    args = [ret, timestep, tick, bpm, mspt]
    static_args = (
        q,
        n_time,
        n_t_p,
        n_t_p_v,
        q_map,
        p_map,
        t_encoder,
        sort_velocity,
        sort_pitch,
        sort_pulse,
    )
    _processor = lambda x: process_timestep(*x, *static_args)

    for evnt in utils.evnt_gen(x):
        if evnt.tick != 0:
            args = _processor(args)
            args[2] = tick = evnt.tick
        args[1].append(evnt)
    ret = _processor(args)[0]
    if asarray:
        ret = np.array(ret, dtype=dtype)
    return ret
コード例 #3
0
def decategorize_output_with_drums(x, q, n_time, off_mode, drm_mode,
                                   time_decoder, dkwa, **kwargs):
    """
    This function is the inverse of categorize_input . 
    Decodes an iterable containing avnts into a midi.Pattern containing midi.events,
    'evnts'.

    Args:
        x             - list[int, ..., int], containing encoded/serialized midi sequence
        q             - int, quantization factor of velocity
        n_time        - int, number of bits used to encode time slips
        off_mode      - bool, True if NoteOffEvents are included in input sequence
        time_decoder  - int, a function to decode to milliseconds
        dkwa          - dict, keyword arguments for the decoder

        n_vel         - int, number of bits in one-hot encoding that are used for velocity, 
                        this value is dependent on q
        q_map         - dict, maps quantized velocities to encoding
        p_map         - dict, maps added pulses/note divisions to encoding
        dtype         - str, default='int', data type of the returned array
        bpm           - int, default=120, tempo in Beats Per Minute, initial tempo of the input
                        sequence, is updated dynamically as tempo changed in input sequence

    Vars:
        n_pitch       - int, number of bits in one-hot encoding that are used for pitch 
                        i.e. NoteOnEvents, if off_mode is True an additional 128 bits are
                        used for NoteOffEvents otherwise x should only contain NoteOnEvents
                        with velocity=0 to signify NoteOffEvents
        n_t_n         - int, n_time + n_note
        n_t_n_v       - int, n_time + n_note + n_vel
        n_vocab       - int, n_time + n_note + n_vel + n_pulse, total number of bits used 
                        for one-hot encoding
        mspt          - float, default=None, milliseconds per tick, updated dynamically as we iterate 
                        through the input sequence (x)
        tick          - int, holds the current tick as we iterate through the input sequence (x)
    
    Returns:
        ptrn          - midi.Pattern, a playable midi sequence
    """
    n_vel = kwargs.get("n_vel", utils.dynamic_order(128, q))
    q_map = kwargs.get("q_map",
                       maps.create_q_map(128, q, encode=False, decode=True))
    p_map = kwargs.get("p_map", None)
    dtype = kwargs.get("dtype", "int")
    bpm = kwargs.get("bpm", 120)
    include_end = kwargs.get("include_end", False)

    n_pitch = 128
    n_drum = 47 if drm_mode else 0
    n_note = 2 * (n_pitch + n_drum) if off_mode else n_pitch + n_drum
    n_pulse = len(p_map.keys()) if p_map is not None else 0
    n_t_p = n_time + n_pitch
    n_t_p_d = n_time + n_pitch + n_drum
    n_t_p_d_p = n_t_p_d + n_pitch

    n_t_n = n_time + n_note
    n_t_n_v = n_time + n_note + n_vel
    n_vocab = n_time + n_note + n_vel + n_pulse  # n_t_n_v_p

    istime = lambda avnt: avnt < n_time
    isvelocity = lambda avnt: n_t_n <= avnt < n_t_n_v
    isnote = lambda avnt: n_time <= avnt < n_t_n
    ison = lambda avnt: n_time <= avnt < n_t_p_d

    if off_mode:
        ispitch = lambda avnt: n_time <= avnt < n_t_p or n_t_p_d <= avnt < n_t_p_d_p
    else:
        ispitch = lambda avnt: n_time <= avnt < n_t_p

    tick = 0
    velocity = 0
    tick_flag = False
    mspt = utils.bpm_to_mspt(bpm)
    cache = []
    ptrn = midi.Pattern(format=1, tick_relative=True, resolution=480)
    trck = midi.Track(tick_relative=True)
    ptrn.append(trck)

    for avnt in x:
        if isvelocity(avnt):
            velocity = q_map["decode"][avnt - n_t_n]
            velocity_flag = True
        elif isnote(avnt):
            if tick_flag:
                tick = int(round(time_decoder(cache, **dkwa) / mspt))
                cache = []
                tick_flag = False
            else:
                tick = 0
            if ispitch(avnt):
                channel = 0
                adj_on = n_time
                adj_off = n_t_p_d
            else:
                channel = 9
                adj_on = n_t_p - 35
                adj_off = n_t_p_d_p - 35
            if ison(avnt):
                evnt = midi.NoteOnEvent(tick=tick,
                                        data=[avnt - adj_on, velocity],
                                        channel=channel)
            else:
                evnt = midi.NoteOffEvent(tick=tick,
                                         data=[avnt - adj_off, velocity],
                                         channel=channel)
            trck.append(evnt)
            velocity_flag = False

        elif istime(avnt):
            cache.append(avnt)
            tick_flag = True
        else:
            pass  # potential to add percussion or 'pulse' events
    if include_end:
        trck.append(midi.EndOfTrackEvent(tick=200))
    return ptrn