Ejemplo n.º 1
0
def pianoroll_to_midi(snippet, filename="Sampled/sample.midi"):
    snippet = np.asarray(snippet, dtype=np.uint8)
    snippet = snippet * 127  # sets velocity of notes from 1 to 127 (max MIDI velocity)

    if snippet.shape[1] == 89:
        snippet = one_hot_pianoroll_to_small_pianoroll(snippet)
        snippet = small_to_full_pianoroll(snippet)
    elif snippet.shape[1] == 88:
        snippet = small_to_full_pianoroll(snippet)
    else:
        if not snippet.shape[1] == 128:
            raise ValueError(
                "input shape does not have 128 pitches (or 88, then it will be converted automatically) and cannot be converted to MIDI!"
            )

    snippet = ppr.Track(pianoroll=snippet)
    snippet = ppr.Multitrack(tracks=[snippet], tempo=120, beat_resolution=4)
    ppr.write(snippet, path_to_root + filename)
Ejemplo n.º 2
0
def get_metrics_from_midi(path):
    metrics = {}

    try:
        track = pypianoroll.Multitrack(path)
        proll = track.get_merged_pianoroll()

        metrics["pitches"] = [pypianoroll.metrics.n_pitches_used(proll)]
        metrics["pitch_classes"] = [pypianoroll.metrics.n_pitch_classes_used(proll)]
        metrics["empty_beats"] = [pypianoroll.metrics.empty_beat_rate(proll, track.beat_resolution)]
        metrics["polyphony_1"] = [pypianoroll.metrics.polyphonic_rate(proll, threshold=1)]
        metrics["polyphony_2"] = [pypianoroll.metrics.polyphonic_rate(proll, threshold=2)]
        metrics["polyphony_3"] = [pypianoroll.metrics.polyphonic_rate(proll, threshold=3)]
        metrics["polyphony_4"] = [pypianoroll.metrics.polyphonic_rate(proll, threshold=4)]
    except:
        pass
    
    return metrics
def read_encode_pad_sequence_pianoroll(filepath, min_note, max_note,
                                       input_seq_len):
    multitrack = pypianoroll.Multitrack(filepath, beat_resolution=4)
    sequence_full = multitrack.tracks[0]
    sequence_full.binarize()
    sequence_full = sequence_full.pianoroll
    seed_sequence = sequence_full[:, min_note:max_note + 1]
    print(seed_sequence.shape)

    print("padding...")
    if len(seed_sequence) > input_seq_len:
        seed_sequence = np.array(seed_sequence[:input_seq_len])
    else:
        zero_padded_seq = np.repeat(0, input_seq_len)
        zero_padded_seq[input_seq_len - len(seed_sequence):] = seed_sequence
        seed_sequence = zero_padded_seq
    print("size after padding: ", seed_sequence.shape)
    return seed_sequence
Ejemplo n.º 4
0
def check_validity(path, beat_res):

    test_files = glob.iglob(path + "**/*.mid*", recursive=True)
    nmbr_files = len(list(glob.iglob(path + "**/*.mid*", recursive=True)))
    faulty = []

    for file in tqdm(test_files, total=nmbr_files):
        try:
            ppr.Multitrack(file, beat_resolution=beat_res).get_stacked_pianoroll()
        except:
            faulty.append(file)

    print('Done.')
    if not faulty:
        print("All your MIDI files are valid.")
    else:
        print("These MIDI files are faulty: ")
        print(*faulty, sep="\n")
def main(files, dst_dir, nr_bars, max_seq_len, bar_len, min_note, max_note):
    print('encoding into pianoroll and saving files...')
    nr_seqs_available = 0
    failed = 0

    for file in tqdm(files):
        try:
            multitrack = pypianoroll.Multitrack(file, beat_resolution=4)
            sequence_full = multitrack.tracks[0]
            sequence_full.binarize()
            sequence_full = sequence_full.pianoroll
            sequence = sequence_full[:, min_note:max_note + 1]

            # HOW TO RECONSTRUCT
            # sequence_zeros = np.zeros((sequence_full.shape))
            # sequence_zeros[:,min_note:max_note+1] = sequence
            # reconstructed = pypianoroll.Multitrack(tracks=[pypianoroll.Track(sequence_zeros)],beat_resolution=4)

            file_id = file.split(os.path.sep)[-1]

            if len(sequence) > max_seq_len:
                nr_windows = compute_sequences_available(
                    len(sequence), max_seq_len, bar_len)
                nr_seqs_available += nr_windows

                for window_index in range(nr_windows - 1):  # drop the last
                    seq_start_index = window_index * bar_len
                    seq_end_index = (window_index + nr_bars) * bar_len
                    seq_window = sequence[seq_start_index:seq_end_index]
                    dst_file = os.path.join(
                        dst_dir, "%s_%s.npy" % (file_id, window_index))
                    np.save(dst_file, seq_window)
            else:
                nr_seqs_available += 1
                new_file_path_magenta = os.path.join(dst_dir, file_id + ".npy")
                sequence_zeros = np.zeros((max_seq_len, sequence.shape[1]))
                sequence_zeros[max_seq_len - len(sequence):] = sequence
                np.save(new_file_path_magenta, sequence_zeros)

        except Exception as _:
            failed += 1

    print('failed = %s' % failed)
    return None
Ejemplo n.º 6
0
def getSlicedPianorollMatrixList(pathToFile,
                                 binarize=True,
                                 beat_resolution=24):

    seqLength = 96

    track = ppr.Multitrack(pathToFile, beat_resolution=beat_resolution)
    #downbeats = track.get_downbeat_steps()
    #print(downbeats)
    track = track.get_stacked_pianoroll()
    """BINARIZE"""
    if (binarize):
        track[track > 0] = 1
    #print(track.dtype)
    #print(track.shape)
    """#DELETE LAST ROWS IN TIME AXIS TO MATCH DIMENSION %96
    ##### BETTER SOLUTION THAN CUTTING OFF THE END OF THE TRACK ???"""
    lengthTemp = track.shape[0]
    #print(lengthTemp%seqLength)
    if (lengthTemp % seqLength != 0):
        track = track[:-(lengthTemp % seqLength), :]
    length = track.shape[0]

    #IF 1 TRACK MIDIFILE
    if (track.shape[2] == 1):
        track = np.squeeze(track, 2)
        #print(track.shape)
        track = np.split(track, int(length / seqLength), axis=0)
        #print(len(track))
        #print(track)

        return track

    #ELSE MULTITRACK MIDIFILE
    else:
        endTrack = []
        for i in range(track.shape[2]):
            track1 = track[:, :, i]
            temp = np.split(track1, int(length / seqLength), axis=0)

            for temp2 in temp:
                endTrack.append(temp2)

    return endTrack
Ejemplo n.º 7
0
def array_to_pypianoroll(array, tempo=60):
    # Order: Piano, Guitar, Strings, Bass, Drums
    programs = [
        1,  # Accoustic Piano
        29,  # Electric muted guitar
        49,  # Orchestral Strings
        34,  # Electric Bass Finger
        118,  # DrumSet
    ]
    is_drum = [False, False, False, False, True]
    tracks = []
    for track in range(array.shape[0]):
        tracks.append(
            pypianoroll.Track(pianoroll=array[track, :, :],
                              program=programs[track],
                              is_drum=is_drum[track]))
    return pypianoroll.Multitrack(tracks=tracks,
                                  tempo=tempo,
                                  beat_resolution=96 // 4)
Ejemplo n.º 8
0
def midi_feature(midi_filename, sampling_fac=2):

    pnrl = pypianoroll.Multitrack(filepath=midi_filename)
    beat_resolution = 24 * sampling_fac

    m_chroma = np.zeros((pnrl.tracks[0].pianoroll.shape[0], 12))
    for i in range(0, pnrl.tracks[0].pianoroll.shape[0]):
        m_chroma[i, int(np.argmax(pnrl.tracks[0].pianoroll[i, :]) % 12)] = True

    melody_embedding = []

    for i_mm in range(0,
                      int(pnrl.tracks[0].pianoroll.shape[0] /
                          beat_resolution)):
        embedding = np.mean(m_chroma[i_mm * beat_resolution:(i_mm + 1) *
                                     beat_resolution],
                            axis=0)
        melody_embedding.append(np.reshape(embedding, (1, 12)))
    melody_embedding = np.vstack(melody_embedding)
    return melody_embedding
Ejemplo n.º 9
0
def load_data(folder):
    data = []
    for dirpath, dirnames, filenames in os.walk(folder):
        for filename in [f for f in filenames if f.endswith(".npz")]:
            pianoroll = piano.Multitrack(os.path.join(dirpath, filename))
            duration = max(roll.pianoroll.shape[0]
                           for roll in pianoroll.tracks)
            values = max(roll.pianoroll.shape[1] for roll in pianoroll.tracks)
            multitrack_bar = []
            for track in sorted(pianoroll.tracks, key=lambda x: x.name):
                #print(track.pianoroll.shape)
                phrases = divide_into_bars(track.pianoroll[:, 0:84],
                                           pianoroll.beat_resolution, duration,
                                           values)
                multitrack_bar.append(phrases)
            multitrack_bar = np.asarray(multitrack_bar)
            multitrack_bar = multitrack_bar.transpose((1, 0, 2, 3, 4))
            data.append(multitrack_bar)
    data = np.vstack(data)
    return data
def convert_to_npz(generated_phrase, songs_directory, song_name):
    padded_phrase = np.pad(
        generated_phrase,
        [(0, 0), (0, 0),
         (LOWEST_NOTE, TOTAL_PIANOROLL_NOTES - LOWEST_NOTE - NUM_NOTES),
         (0, 0)],
        'constant',
        constant_values=False)  #repad notes from 84 to 128
    padded_reshaped_phrase = np.reshape(
        padded_phrase,
        (NUM_TRACKS, NUM_BARS, BEATS_PER_BAR,
         TOTAL_PIANOROLL_NOTES))  #reshape to be in pypianoroll format

    program_list = [0, 0, 24, 32, 48]  #list of instruments
    is_Drum_list = [True, False, False, False, False]
    name_list = ["Drums", "Piano", "Guitar", "Bass", "Strings"]

    pianoroll_list = []

    for track in range(0, NUM_TRACKS):
        track_data = padded_reshaped_phrase[track]

        concated_bars = np.empty((0, TOTAL_PIANOROLL_NOTES), dtype=bool)
        for bar in range(0, NUM_BARS):
            concated_bars = np.concatenate((concated_bars, track_data[bar]),
                                           axis=0)

        pianoroll_list.append(
            pypianoroll.Track(pianoroll=concated_bars,
                              program=program_list[track],
                              is_drum=is_Drum_list[track],
                              name=name_list[track]))

    multitrack = pypianoroll.Multitrack(
        tracks=pianoroll_list,
        tempo=120.0,
        beat_resolution=24,
        downbeat=np.asarray([True] + [False] *
                            (NUM_BARS * BEATS_PER_BAR - 1), ))

    pypianoroll.save(join(songs_directory, (song_name + ".npz")), multitrack)
Ejemplo n.º 11
0
def plot_interpolation_pianorolls(bars=16):
    interpolations = []
    for i in range(0, 6, 2):
        path = "Sampled/" + str(bars) + "bar_interpolation/interpolate_" + str(
            i) + ".midi"

        midi = ppr.parse(path, beat_resolution=4)  # get Multitrack object
        midi = midi.tracks[0]  # get first/only track
        midi.name = ""
        if i == 0:
            midi.name = "start sequence"
        if i == 4:
            midi.name = "end sequence"
        pr = midi.pianoroll

        # padding to full length in case MIDI file ends earlier
        if pr.shape[0] != bars * 16:
            padding = np.zeros((bars * 16 - pr.shape[0], pr.shape[1]))
            pr = np.concatenate((pr, padding), axis=0)
            midi.pianoroll = pr
        interpolations.append(midi)

    mt = ppr.Multitrack(tracks=interpolations, beat_resolution=4)

    if bars == 16:
        p, _ = ppr.plot(mt,
                        yticklabel="number",
                        xtick='beat',
                        xticklabel=False,
                        grid="off")
        # there seems to be a bug in ppr, despite xticklabel=False, the plot still has the labels for each x-axis value
    else:
        p, _ = ppr.plot(mt,
                        yticklabel="number",
                        xtick='beat',
                        xticklabel=True,
                        grid="both")
    p.set_size_inches((8, 8), forward=True)

    filename = str(bars) + "bar_interpolation.png"
    p.savefig(filename)
Ejemplo n.º 12
0
def to_multitrack(mt, n=None):
    """
    Create a multitrack output out of a model tensor
    Input is [n_bars, n_timesteps, n_pitches, n_tracks] tensor.
    If n is given, it's a list of length n_tracks, detailing the LPD-5 number
    for each track.
    TODO: Support custom programs/names just like to_track.
    """
    n_tracks = len(mt)
    if n is not None and len(n) != n_tracks:
        raise ValueError("Must supply n == n_tracks")
    tracks = []
    for i in range(n_tracks):
        if n is None:
            this_n = i
        else:
            this_n = n[i]
        tracks.append(to_track(mt[i], n=i))
    return ppr.Multitrack(tracks=tracks,
                          beat_resolution=12,
                          downbeat=DOWNBEATS_ONEHOT)
Ejemplo n.º 13
0
    def convert_midi_to_tensor(self, input_midi_path):
        """
        Converts a midi to pianoroll tensor

        Parameters
        ----------
        input_midi_path : string
            Full file path to the input midi

        Returns
        -------
        2d numpy array
            2d tensor that is a pianoroll
        """

        multi_track = pypianoroll.Multitrack(
            beat_resolution=Constants.beat_resolution)
        try:
            multi_track.parse_midi(input_midi_path,
                                   algorithm='custom',
                                   first_beat_time=0)
        except Exception as e:
            logger.error("Failed to parse the MIDI file.")

        if len(multi_track.tracks) > 1:
            logger.error("Input MIDI file has more than 1 track.")

        multi_track.pad_to_multiple(self.number_of_timesteps)
        multi_track.binarize()
        pianoroll = multi_track.tracks[0].pianoroll

        if pianoroll.shape[0] > self.number_of_timesteps:
            logger.error("Input MIDI file is longer than 8 bars.")

        # truncate
        tensor = pianoroll[0:self.number_of_timesteps, ]
        tensor = np.expand_dims(tensor, axis=0)
        tensor = np.expand_dims(tensor, axis=3)

        return tensor
Ejemplo n.º 14
0
def pianorollMatrixToTempMidi(matrix,
                              path='../utils/midi_files/temp.mid',
                              prediction=True,
                              show=False,
                              showPlayer=False,
                              autoplay=False):
    # matrix must be of LENGTHxPITCH dimension here: (96 or more,128)
    if (prediction):
        matrix[-3:, :] = 0

    tempTrack = ppr.Track(matrix)
    newTrack = ppr.Multitrack()
    newTrack.append_track(tempTrack)
    newTrack.write(path)

    score = music21.converter.parse(path)
    if (show):
        score.show()
    if (showPlayer):
        score.show('midi')
    if (autoplay):
        music21.midi.realtime.StreamPlayer(score).play()
Ejemplo n.º 15
0
    def __getitem__(self, idx):
        try:
            # load song from midi files and parse to numpy
            track = ppr.Multitrack(self.all_files[idx],
                                   beat_resolution=self.beat_res)
            track = track.get_stacked_pianoroll()

            # if: 1 track midifile
            # else: quick fix for multitrack, melody in almost every song on midi[0]
            if track.shape[2] == 1:
                track = np.squeeze(track, 2)
            else:
                track = track[:, :, 0]

            # if length differs from seq_length, cut it to seq_length
            if track.shape[0] > self.seq_length:
                track = track[:4 * self.beat_res * self.bars]
            elif track.shape[0] < self.seq_length:
                pad_with = self.seq_length - track.shape[0]
                temp = np.zeros((pad_with, 128), dtype=np.uint8)
                track = np.concatenate((track, temp))

            # binarize
            if self.binarize:
                track[track > 0] = 1

            # transpose notes out of range of the 5 chosen octaves
            sequence = transposeNotesHigherLower(track)
            # cut octaves to get input shape [96,60]
            sequence = cutOctaves(sequence)
            # unsqueeze first dimension for input
            sequence = np.expand_dims(sequence, axis=0)
        except:
            if self.verbose:
                print(
                    "MIDI file warning. Skipped a MIDI file because was not working properly."
                )
            sequence = np.zeros((1, self.seq_length, 60), dtype=np.uint8)
        return torch.from_numpy(sequence)
Ejemplo n.º 16
0
def numpy_to_pianoroll(folder):
    #Bass Drums Guitar Piano Strings
    programs = [34,0,30,1,51]
    names = ['Bass' ,'Drums' ,'Guitar' ,'Piano' ,'Strings']
    tempo = np.full((96), 105)
    for filename in os.listdir(folder):
        multisample = np.load(os.path.join(folder,filename))

        for sample,i  in zip(multisample,range(multisample.shape[0])):
            tracks = []
            classes = []
            for instrument,program,name in zip(sample,programs,names):

                print(instrument.shape)
                track = np.vstack(instrument)
                print(track.shape)
                track[track > 0.5] = 100
                track[track < 0.5] = 0
                print(track.shape)
                track = np.pad(track.astype(int),((0,0),(0,44)),mode='constant')
                if name !='Guitar':
                   print(ppr.metrics.qualified_note_rate((track),2))
                print(ppr.metrics.n_pitches_used((track)))
                classes.append(ppr.metrics.n_pitches_used((track)))
                print(track.shape)
                isdrum = False
                if program == 0:
                    isdrum = True
                ppr_track = ppr.Track(track,program,isdrum,name)
                tracks.append(ppr_track)
            ppr_song = ppr.Multitrack(tracks=tracks, tempo=tempo, beat_resolution=24)
            for instrument, clasnum in zip(names,classes):
                print(instrument+':'+str(clasnum))

            print(123)
            plot = ppr.plot_multitrack(ppr_song,mode='separate',ytick='off')
            plt.savefig('gen_samples/'+filename+str(i)+".png",dpi=400)
            ppr.write(ppr_song, 'gen_samples/'+filename+"song")
Ejemplo n.º 17
0
def save_pianoroll(filename, pianoroll, programs, is_drums, tempo,
                   beat_resolution, lowest_pitch):
    """Saves a batched pianoroll array to a npz file."""
    if not np.issubdtype(pianoroll.dtype, np.bool_):
        raise TypeError("Input pianoroll array must have a boolean dtype.")
    if pianoroll.ndim != 5:
        raise ValueError("Input pianoroll array must have 5 dimensions.")
    if pianoroll.shape[-1] != len(programs):
        raise ValueError("Length of `programs` does not match the number of "
                         "tracks for the input array.")
    if pianoroll.shape[-1] != len(is_drums):
        raise ValueError("Length of `is_drums` does not match the number of "
                         "tracks for the input array.")

    reshaped = pianoroll.reshape(
        -1, pianoroll.shape[1] * pianoroll.shape[2], pianoroll.shape[3],
        pianoroll.shape[4])

    # Pad to the correct pitch range and add silence between phrases
    to_pad_pitch_high = 128 - lowest_pitch - pianoroll.shape[3]
    padded = np.pad(
        reshaped, ((0, 0), (0, pianoroll.shape[2]),
                   (lowest_pitch, to_pad_pitch_high), (0, 0)), 'constant')


    # Reshape the batched pianoroll array to a single pianoroll array
    pianoroll_ = padded.reshape(-1, padded.shape[2], padded.shape[3])
    print("pianoroll_", np.shape(pianoroll_))
    # Create the tracks
    tracks = []
    for idx in range(pianoroll_.shape[2]):
        tracks.append(pypianoroll.Track(
            pianoroll_[..., idx], programs[idx], is_drums[idx]))

    # Create and save the multitrack
    multitrack = pypianoroll.Multitrack(
        tracks=tracks, tempo=tempo, beat_resolution=beat_resolution)
    multitrack.save(filename)
Ejemplo n.º 18
0
def get_pianoroll(filepath, min_pitch=0, max_pitch=127):
    """
    Given a path to a pypianoroll .npz file from LPD5-clean dataset, 
    extract the Piano track into a 0-1 normalized pianoroll matrix of
    shape (NUM_PITCHES, ?) where NUM_PITCHES = max_pitch - min_pitch + 1
    """
    # Load pianoroll file as a multitrack object
    multi = pypianoroll.Multitrack(filepath)
    for track in multi.tracks:
        # Non-empty piano pianoroll
        if track.name == "Piano":
            if track.pianoroll.shape[0] > 0:
                proll = track.pianoroll.T
                # Clip and normalize velocities between 0 and 1
                proll = proll.clip(0, 127) / 127.
                # Crop pitch range of pianoroll
                proll = crop_pianoroll(proll, min_pitch, max_pitch)
                return proll
            else:
                return np.array([])  # No pianoroll
    # Error
    print("Unexpected condition: No Piano track in file", filepath)
    return np.array([])  # No pianoroll
Ejemplo n.º 19
0
def write_midi(sequence, output_path):
    """
       Transform the given sequence into MIDI format and store it in the given path
          sequence (List(num_bars): torch.Tensor(1 x 1 x 128 x 96)): sequence of bar encodings
          output_path (str): path to store the transformed MIDI
    """
    # get number of bars
    num_bars = len(sequence)

    # squeeze into List(num_bars): numpy.ndarray(128 x 96)
    melody = [sequence[i][0][0].detach().cpu().numpy() for i in range(num_bars)]

    # transform into MIDI track format (num_bars*RESOLUTION x 128)
    melody = np.concatenate(melody, axis=1).transpose()

    # pack into binary track
    melody_track = pypianoroll.BinaryTrack(pianoroll = melody > 0)

    # pack into multi-track
    multi_track = pypianoroll.Multitrack(resolution=RESOLUTION, tracks=[melody_track])

    # write to output path
    pypianoroll.write(output_path, multi_track)
def process_midi(midi_file, beat_resolution):
    '''Takes path to an input midi file and parses it to pianoroll
    :param input_midi: Path to midi file
    :param beat_resolution
    :return: parsed painoroll
    '''
    multi_track = pypianoroll.Multitrack(beat_resolution=beat_resolution)
    try:
        multi_track.parse_midi(midi_file,
                               algorithm='custom',
                               first_beat_time=0)
    except:
        print("midi file: {} is invalid. Ignoring during preprocessing".format(
            midi_file))
        pass
    # Convert the PianoRoll to binary ignoring the values of velocities
    multi_track.binarize()
    track_indices = list(np.arange(len(
        multi_track.tracks)))  # Merge multiple tracks into a single track
    multi_track.merge_tracks(track_indices=track_indices,
                             mode='any',
                             remove_merged=True)
    pianoroll = multi_track.tracks[0].pianoroll
    return pianoroll
Ejemplo n.º 21
0
    def pooled_process_file(self, args):
        def check_four_fourth(time_sign):
            return time_sign.numerator == 4 and time_sign.denominator == 4

        idx, filepath = args
        fetch_meta = {
        }  # in this dict I will store the id of the corresponding metadata file

        store_meta = False
        pbc = 1
        yeah = 0
        max_bar_silence = 0

        processed_folder = os.path.join(self.data_path, "pianorolls/")
        path, file = os.path.split(filepath)

        artist = path.split(path_sep)[-1]
        filename = file.split(".")[0]

        # test 0: check keysignature = 4/4 always.
        try:
            pm_song = pm.PrettyMIDI(filepath)
        except Exception:
            # print(f'{idx} Not Pretty MIDI  {filepath}')
            return pbc, yeah, fetch_meta

        if not all(
            [check_four_fourth(tmp)
             for tmp in pm_song.time_signature_changes]):
            return pbc, yeah, fetch_meta

        del pm_song  # don't need pretty midi object anymore, now i need pianorolls

        try:
            base_song = pproll.parse(filepath, beat_resolution=4)
        except Exception:
            return pbc, yeah, fetch_meta

        # find a guitar, a bass and a drum instrument
        guitar_tracks, bass_tracks, drums_tracks, string_tracks = self.get_guitar_bass_drums(
            base_song)

        try:
            assert (string_tracks)
        except AssertionError:
            return pbc, yeah, fetch_meta

        # if string_tracks:
        base_song.merge_tracks(string_tracks,
                               mode="max",
                               program=48,
                               name="Strings",
                               remove_merged=True)

        # merging tracks change order of them, need to re-find the new index of Trio track
        guitar_tracks, bass_tracks, drums_tracks, string_tracks = self.get_guitar_bass_drums(
            base_song)

        # take all possible combination of guitar, bass and drums
        for guitar_track in guitar_tracks:
            for bass_track in bass_tracks:
                for drums_track in drums_tracks:
                    # select only trio tracks (and strings)
                    current_tracks = [
                        drums_track, bass_track, guitar_track, -1
                    ]
                    names = ["Drums", "Bass", "Guitar", "Strings"]

                    # create temporary song with only that tracks
                    song = pproll.Multitrack()
                    song.remove_empty_tracks()

                    for i, current_track in enumerate(current_tracks):
                        song.append_track(
                            pianoroll=base_song.tracks[current_track].
                            pianoroll,
                            program=base_song.tracks[current_track].program,
                            is_drum=base_song.tracks[current_track].is_drum,
                            name=names[i])

                    song.beat_resolution = base_song.beat_resolution
                    song.tempo = base_song.tempo

                    song.binarize()
                    song.assign_constant(1)

                    # Test 1: check whether a track is silent during all the song
                    if song.get_empty_tracks():
                        continue

                    pianoroll = song.get_stacked_pianoroll()

                    i = 0
                    while i + self.phrase_size <= pianoroll.shape[0]:
                        window = pianoroll[i:i + self.phrase_size, :, :]
                        # print("window from", i, "to", i+self.phrase_size)

                        # keep only the phrases that have at most one bar of consecutive silence
                        # for each track
                        bar_of_silences = np.array([0] * self.n_tracks)
                        for track in range(self.n_tracks):
                            j = 0
                            while j + self.bar_size <= window.shape[0]:
                                if window[j:j + self.bar_size, :,
                                          track].sum() == 0:
                                    bar_of_silences[track] += 1

                                j += 1  # self.bar_size

                        # if the phrase is good, let's store it
                        if not any(bar_of_silences > max_bar_silence):
                            # data augmentation, random transpose bar
                            for shift in np.random.choice(
                                [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6],
                                    1,
                                    replace=False):
                                tmp = pproll.Multitrack()
                                tmp.remove_empty_tracks()
                                for track in range(self.n_tracks):
                                    tmp.append_track(
                                        pianoroll=window[:, :, track],
                                        program=song.tracks[track].program,
                                        name=config.instrument_names[
                                            song.tracks[track].program],
                                        is_drum=song.tracks[track].is_drum)

                                tmp.beat_resolution = 4
                                tmp.tempo = song.tempo
                                # tmp.name = str(yeah)
                                tmp.name = f"{idx}_{yeah}"

                                # breakpoint()
                                tmp.transpose(shift)
                                tmp.check_validity()
                                # print(os.path.join(processed_folder, f"{idx}_{yeah}" + ".npz"))
                                # tmp.save(os.path.join(processed_folder, f"{idx}_{yeah}" + ".npz"))
                                del tmp
                                store_meta = True
                                # adding link to corresponding metadata file
                                fetch_meta[f"{idx}_{yeah}"] = {
                                    "artist": artist,
                                    "song": filename
                                }
                                yeah += 1

                        i += self.bar_size
                    del song

        del base_song
        return pbc, yeah, fetch_meta
Ejemplo n.º 22
0
def main():
    cur_top = (0, top_p[1])
    with tf.Session(graph=tf.Graph()) as sess:
        context = tf.placeholder(tf.int32, [1, None])
        output = model.model(hparams=hparams, X=context)
        vars = [v for v in tf.trainable_variables() if 'model' in v.name]

        saver = tf.train.Saver(var_list=vars)
        ckpt = tf.train.latest_checkpoint(args.model)
        saver.restore(sess, ckpt)

        pianoroll = np.zeros((trc_len, args.num_bars * 16, 128))

        pospchord = 16 // args.chordbeat
        pre = [end_note]
        if len(chords) > 0:
            pre.extend(chord2tokens(chords.pop(0)))
            cur_top = (0, top_p[0])
        seq = get_sequence(sess, context, pre, cur_top)
        pos = 0
        firstnote = False
        print('Generating Melody...')
        progress = tqdm(total=pianoroll.shape[1])
        while pos < pianoroll.shape[1]:
            for note in seq:
                if (not firstnote) and note >= time_note:
                    continue
                else:
                    firstnote = True
                pre.append(note)
                if note == time_note:
                    pos += 1
                    progress.update(1)
                    if pos % pospchord == 0 and len(chords) > 0:
                        c = chords.pop(0)
                        pre.extend(chord2tokens(c))
                        if c != 'auto':
                            cur_top = (0, top_p[0])
                            break
                        elif cur_top != (0, top_p[1]):
                            cur_top = (0, top_p[1])
                            break
                    if pos >= pianoroll.shape[1]:
                        break
                elif note < time_note:
                    trc = trc_idx.index(note // note_size)
                    mid = note % note_size + note_offset
                    if mid < 128:
                        pianoroll[trc, pos, mid] = 100
            seq = get_sequence(sess, context, pre[-512:], cur_top)

        pr = []
        for i, (t, p) in enumerate(zip(tracks, programs)):
            pr.append(
                pypianoroll.Track(pianoroll=pianoroll[i],
                                  program=p,
                                  is_drum=(t == 'Drums')))
        mt = pypianoroll.Multitrack(tracks=pr,
                                    tempo=args.tempo,
                                    beat_resolution=4)
        mt.write(args.output)
Ejemplo n.º 23
0
def main():
    pre_melody = pypianoroll.load(args.input)
    pre = []
    step = pre_melody.beat_resolution // 4  # 16 beat minimum
    pianoroll = np.zeros((pre_melody.get_max_length(),128,len(programs)))
    for track in pre_melody.tracks:
        if track.is_drum:
            dst_index = 0
        else:
            dst_index = 1
            for i in range(1,len(programs),1):
                if track.program >= programs[i] and (len(programs) == i+1 or track.program < programs[i+1]):
                    dst_index = i
                    break
        pianoroll[0:track.pianoroll.shape[0],:,dst_index] += track.pianoroll
    pianoroll = pianoroll[:,note_offset:note_offset+note_size,trc_idx]
    p = np.where(pianoroll != 0)
    current_seq = []
    def _current(cur_seq):
        cur = []
        for c in sorted(cur_seq):
            if not (c >= note_size and c < note_size*2):
                cur.append(c)
        for c in sorted(cur_seq):
            if (c >= note_size and c < note_size*2):
                cur.append(c)
        return cur # Bass, Piano, etc..., Drums
    pos = 0
    for i in np.argsort(p[0]):
        if p[0][i] % step != 0:
            continue
        if pos < p[0][i]:
            for _ in range(pos,p[0][i],step):
                pre.extend(_current(current_seq))
                pre.append(time_note)
                current_seq = []
        pos = p[0][i]
        j = p[1][i]
        t = p[2][i]
        note = t*note_size + j
        current_seq.append(note)
    pre.extend(_current(current_seq))
    if len(pre) == 0 or pre[-1] != time_note:
        pre.append(time_note)
    if len(pre) > 512:
        pre = pre[-512:]

    cur_top = (0,top_p)
    with tf.Session(graph=tf.Graph()) as sess:
        context = tf.placeholder(tf.int32, [1, None])
        output = model.model(hparams=hparams, X=context)
        vars = [v for v in tf.trainable_variables() if 'model' in v.name]

        saver = tf.train.Saver(var_list=vars)
        ckpt = tf.train.latest_checkpoint(args.model)
        saver.restore(sess, ckpt)

        pianoroll = np.zeros((trc_len, args.num_bars*16, 128))

        seq = get_sequence(sess, context, pre, cur_top)
        pos = 0
        firstnote = False
        print('Generating Melody...')
        progress = tqdm(total=pianoroll.shape[1])
        while pos < pianoroll.shape[1]:
            for note in seq:
                if (not firstnote) and note >= time_note:
                    continue
                else:
                    firstnote = True
                pre.append(note)
                if note == time_note:
                    pos += 1
                    progress.update(1)
                    if pos >= pianoroll.shape[1]:
                        break
                elif note < time_note:
                    trc = trc_idx.index(note // note_size)
                    mid = note % note_size + note_offset
                    if mid < 128:
                        pianoroll[trc,pos,mid] = 100
            seq = get_sequence(sess, context, pre[-512:], cur_top)

        pr = []
        for i,(t,p) in enumerate(zip(tracks,programs)):
            pr.append(pypianoroll.Track(pianoroll=pianoroll[i], program=p, is_drum=(t=='Drums')))
        mt = pypianoroll.Multitrack(tracks=pr, tempo=args.tempo, beat_resolution=4)
        mt.write(args.output)
Ejemplo n.º 24
0
import glob
import sys
import numpy as np
sys.path.append("D:\\data\\magenta-1.0.2\\")
import pypianoroll

from my_encoder import encoder

if __name__ == "__main__":
    fnames = glob.glob("D:\\data\\folkdataset\\*.mid")[:2]
    for fname in fnames:
        seq = encoder.encode(fname)

        pianoroll_original = pypianoroll.Multitrack(fname).tracks[0].pianoroll
        mid = encoder.decode(seq)
        # print(mid)
        pianoroll_encoded = pypianoroll.Multitrack(mid).tracks[0].pianoroll

        equality = pianoroll_encoded == pianoroll_original
        print(fname)
        print(mid)

    # test padding effect
    for i, fname in enumerate(fnames):
        pianoroll_original = pypianoroll.Multitrack(fname).tracks[0].pianoroll

        seq = encoder.encode(fname)
        for _ in range(50):
            seq.append(0)

        mid = encoder.decode(seq, strip_extraneous=True)
Ejemplo n.º 25
0
    for i in range(len(mul.tracks)):
        if not isinstance(mul.tracks[i], mullib.StandardTrack) and not isinstance(mul.tracks[i], mullib.BinaryTrack):
            mul.tracks[i] = mullib.StandardTrack(
                pianoroll=mul.tracks[i].pianoroll,
                name=mul.tracks[i].name,
                program=mul.tracks[i].program,
                is_drum=mul.tracks[i].is_drum
            )

    return mul


multitrack_repr = {"load": multitrack_load,
                   "save": multitrack_save,
                   "instance": mullib.Multitrack(),
                   "lib": mullib}


# ______________MidiArray__________________
def midiarray_save(src, path):
    src.save(path)
    return path


def midiarray_load(path, res):
    midi_data = arrlib.MidiArray()
    midi_data.load(path, res)
    return midi_data

def get_score_spontaneous_music(SORN,
                                readout_layer,
                                steps_spont,
                                seen=None,
                                steps_recovery=0,
                                display=True,
                                stdp_off=True,
                                storage_manager=None,
                                same_timestep_without_feedback_loop=False,
                                create_MIDI=False):
    #exc_neuron_tag, output_recorder_tag, input_recorder_tag
    #'main_exc_group', 'exc_out_rec', 'inp_rec'

    if display:
        print('\nGenerate spontaneous output...')
    source = SORN['music_act', 0]

    if stdp_off:
        SORN.deactivate_mechanisms('STDP')

    for ng in SORN['prediction_source']:
        SORN.add_behaviours_to_neuron_group(
            {100: NeuronRecorder(['n.output'], tag='prediction_rec')}, ng)
    for ng in SORN['text_input_group']:
        SORN.add_behaviours_to_neuron_group(
            {101: NeuronRecorder(['n.pattern_index'], tag='index_rec')}, ng)

    SORN.clear_recorder()
    SORN.recording_on()

    if same_timestep_without_feedback_loop:
        SORN['music_act', 0].active = False
        if steps_recovery > 0:
            SORN.simulate_iterations(steps_recovery,
                                     100,
                                     measure_block_time=display,
                                     disable_recording=True)
        spont_output, pianoroll = get_simu_music_sequence(
            SORN,
            SORN['prediction_source'],
            'n.output',
            readout_classifyer=readout_layer,
            seq_length=steps_spont,
            source=SORN['music_act', 0])  #output generation
    else:
        spont_output, pianoroll = predict_music_sequence(
            readout_layer,
            SORN['prediction_source'],
            'n.output',
            steps_spont,
            SORN,
            SORN['music_act', 0],
            lag=1)

    SORN['music_act', 0].active = True

    if create_MIDI:
        track = piano.Track(pianoroll)
        track.program = source.instrument
        track.binarize()
        track.beat_resolution = source.beat_resolution
        track = piano.Multitrack(tracks=[track],
                                 beat_resolution=source.beat_resolution)
        if storage_manager is not None:
            path = storage_manager.absolute_path
            track.write(path + 'sample.mid')
        else:
            track.write('sample.mid')
            print(
                'warning: no results path defined through storagemanager, MIDI will be saved in code repo'
            )

    #if display:
    print(spont_output)
    SORN.recording_on()

    if stdp_off:
        SORN.activate_mechanisms('STDP')

    score_dict = SORN['music_act', 0].get_music_score(spont_output, pianoroll,
                                                      seen)
    #print(score_dict)
    if storage_manager is not None:
        storage_manager.save_param_dict(score_dict)

    SORN.clear_recorder(['prediction_rec', 'index_rec'])
    SORN.deactivate_mechanisms(['prediction_rec', 'index_rec'])

    return score_dict
Ejemplo n.º 27
0
def get_score_spontaneous_music(SORN,
                                source,
                                readout_layer,
                                steps_spont,
                                split_tracks=False,
                                seen=None,
                                steps_recovery=0,
                                display=True,
                                stdp_off=True,
                                storage_manager=None,
                                same_timestep_without_feedback_loop=False,
                                create_MIDI=False):
    #exc_neuron_tag, output_recorder_tag, input_recorder_tag
    #'main_exc_group', 'exc_out_rec', 'inp_rec'

    if display:
        print('\nGenerate spontaneous output...')

    if stdp_off:
        SORN.deactivate_mechanisms('STDP')

    for ng in SORN['prediction_source']:
        SORN.add_behaviours_to_neuron_group(
            {100: Recorder(['n.output'], tag='prediction_rec')}, ng)
    for ng in SORN['text_input_group']:
        SORN.add_behaviours_to_neuron_group(
            {101: Recorder(['n.pattern_index'], tag='index_rec')}, ng)

    SORN.clear_recorder()
    SORN.recording_on()

    if same_timestep_without_feedback_loop:
        source.behaviour_enabled = False
        if steps_recovery > 0:
            SORN.simulate_iterations(steps_recovery,
                                     100,
                                     measure_block_time=display,
                                     disable_recording=True)
        spont_output, pianoroll = get_simu_music_sequence(
            SORN,
            SORN['prediction_source'],
            'n.output',
            readout_classifyer=readout_layer,
            seq_length=steps_spont,
            source=source)  #output generation
    else:
        spont_output, pianoroll = predict_music_sequence(
            readout_layer,
            SORN['prediction_source'],
            'n.output',
            steps_spont,
            SORN,
            source,
            lag=1)

    source.behaviour_enabled = True

    if create_MIDI and source.is_drum:  # create a percussion track!
        # in this case pianoroll is a sequence of vectors of length alphabet, each letter in the alphabet stands for one instrument

        if split_tracks == False and source.offtoken == False:  # create one long track
            instruments_non_zero = np.nonzero(pianoroll)
            instruments_non_zero = list(set(
                instruments_non_zero[1]))  # for them we have to create tracks

            tracks = []

            for i in range(len(instruments_non_zero)):
                track = np.zeros((len(pianoroll), 128))
                track[:, source.alphabet[instruments_non_zero[
                    i]]] = pianoroll[:, instruments_non_zero[i]]
                track = piano.Track(track)
                #track.program = source.alphabet[instruments_non_zero[i]]
                track.binarize()
                track.beat_resolution = 4
                track.is_drum = True
                tracks.append(track)

            multitrack = piano.Multitrack(tracks=tracks, beat_resolution=4)

            if storage_manager is not None:
                path = storage_manager.absolute_path
                multitrack.write(path + 'sample.mid')
            else:
                multitrack.write('sample.mid')
                print(
                    'warning: no results path defined through storagemanager, MIDI will be saved in code repo'
                )

        else:  # create n tracks of length of input tracks (or diverse length if stop token is active)

            if source.offtoken and not source.ontoken:
                stop_tokens = np.nonzero(
                    pianoroll[:, -1])[0]  # time steps when track is finished
                if stop_tokens.size == 0:  # if it never predicted a stop token
                    start_tokens = [len(pianoroll)
                                    ]  # we just generate one long track
                else:
                    start_tokens = stop_tokens + 1  # so that indexing works

                n_tracks = int(len(start_tokens)) - 1
                #start_tokens = np.insert(start_tokens, 0, 0) # add first start token
                # note that we do not generate a track from the time steps potentially generated after the last stop token and before first stop token

            elif source.ontoken and not source.offtoken:
                start_tokens = np.nonzero(
                    pianoroll[:, -1])[0]  # time steps when track starts
                n_tracks = int(len(start_tokens)) - 1

            elif source.ontoken and source.offtoken:
                n_tracks = 0
                start_tokens = []
                stop_tokens = []
                start = False
                stop = False
                for i in range(len(pianoroll)):
                    if pianoroll[i, -1]:  # we have a start token
                        start = i  # we also overwrite start token if two appear without a stop token in between
                        stop = False
                    if pianoroll[i, -2]:  # we have a stop token
                        stop = i

                    if stop and not start:
                        stop = False

                    if start and stop and stop > start:
                        start_tokens.append(start)
                        stop_tokens.append(stop)
                        n_tracks += 1
                        start = False
                        stop = False

                    if start and stop and stop <= start:
                        start = False
                        stop = False
                #print(start_tokens)
                #print(stop_tokens)
                # we ignore parts when two stop tokens or two start tokens occur after another

            else:  # else we split the generated output after 32 time steps each (length of one track in the corpus)
                len_track = len(source.corpus_blocks[0])
                n_tracks = int(len(pianoroll) / len_track)

            for j in range(n_tracks):
                if source.offtoken and source.ontoken:
                    curr_pianoroll = pianoroll[
                        start_tokens[j]:stop_tokens[j], :int(
                            source.A -
                            2)]  # ignore last two tokens in alphabet
                elif source.offtoken or source.ontoken:
                    curr_pianoroll = pianoroll[start_tokens[j]:start_tokens[
                        j + 1], :int(
                            source.A -
                            1)]  # ignore last token in alphabet (stop token)
                else:
                    curr_pianoroll = pianoroll[j * len_track:(j * len_track) +
                                               len_track, :]
                if np.any(curr_pianoroll
                          ):  # only proceed if it would not be all silence
                    instruments_non_zero = np.nonzero(curr_pianoroll)
                    instruments_non_zero = list(
                        set(instruments_non_zero[1]
                            ))  # for them we have to create tracks

                    tracks = []
                    for i in range(len(instruments_non_zero)):
                        track = np.zeros((len(curr_pianoroll), 128))
                        track[:, source.alphabet[
                            instruments_non_zero[i]]] = curr_pianoroll[:, i]
                        track = piano.Track(track)
                        track.program = source.alphabet[
                            instruments_non_zero[i]]
                        track.binarize()
                        track.beat_resolution = 4
                        track.is_drum = True
                        tracks.append(track)

                    multitrack = piano.Multitrack(tracks=tracks,
                                                  beat_resolution=4)
                    if storage_manager is not None:
                        path = storage_manager.absolute_path
                        multitrack.write(path + 'sample{}.mid'.format(j + 1))
                    else:
                        multitrack.write('sample{}.mid'.format(j + 1))
                        print(
                            'warning: no results path defined through storagemanager, MIDI will be saved in code repo'
                        )

    elif create_MIDI:  # we create just one MIDI track of one instrument (if we have a MusicActivator)
        track = piano.Track(pianoroll)
        track.program = source.instrument
        track.binarize()
        track.beat_resolution = source.beat_resolution
        track = piano.Multitrack(tracks=[track],
                                 beat_resolution=source.beat_resolution)
        if storage_manager is not None:
            path = storage_manager.absolute_path
            track.write(path + 'sample.mid')
        else:
            track.write('sample.mid')
            print(
                'warning: no results path defined through storagemanager, MIDI will be saved in code repo'
            )

    #if display:
    print(spont_output)
    SORN.recording_on()

    if stdp_off:
        SORN.activate_mechanisms('STDP')

    score_dict = source.get_music_score(spont_output, pianoroll)
    #print(score_dict)
    if storage_manager is not None:
        storage_manager.save_param_dict(score_dict)

    SORN.clear_recorder(['prediction_rec', 'index_rec'])
    SORN.deactivate_mechanisms(['prediction_rec', 'index_rec'])

    return score_dict
Ejemplo n.º 28
0
        print(j)
        xo = np.expand_dims(x_con[j], axis=1)
        yo = np.expand_dims(y_con[j], axis=1)
        for k in range(len(x_con[j])):
            print(model.train_on_batch(xo[k], xo[k]), end=' ')
        model.reset_states()
        print('\n')
    print('\n')
    print('Testing')
    print('\n')
    kolo = []
    for j in range(5):
        print(j)
        bolo = []
        xo = np.expand_dims(x_ton[j], axis=1)
        for k in range(len(x_ton[j])):
            bolo.extend(model.predict_on_batch(xo[k]))
        model.reset_states()
        bolo = np.concatenate(np.array(bolo))
        kolo.append(bolo)
    jojo = np.array(kolo)
    for j in range(5):
        r2 = pn.Track(pianoroll=jojo[j] * 300,
                      program=0,
                      is_drum=False,
                      name='my awesome piano')
        multitrack2 = pn.Multitrack(tracks=[r2])
        os.system('mkdir ./results/' + str(i))
        pn.write(multitrack2, './results/' + str(i) + '/' + str(j) + '.mid')
    model.save('stacked.h5')
import tqdm
import numpy as np
from glob import glob
import os
from matplotlib import pyplot as plt
from my_encoder import encoder
import pypianoroll

DATASET_LOC = os.path.abspath("D:\\data\\folkdataset\\")
DATASET_GLOB = os.path.join(DATASET_LOC, "*.mid")
DATASET_FILES = glob(DATASET_GLOB)
NEW_LOCATION = os.path.abspath("D:\\data\\folkmagenta\\")

files = 200
DATASET_FILES = DATASET_FILES[:files]

lens = []

for fname in tqdm.tqdm(DATASET_FILES):
    one_bar = pypianoroll.Multitrack(fname).tracks[0].pianoroll[:96]
    midi_one_bar = pypianoroll.Multitrack(tracks=[pypianoroll.Track(one_bar)])
    midi_one_bar.write('test.mid')
    seq = encoder.encode('test.mid')
    lens.append(len(seq))

print(np.mean(lens), np.std(lens))
print(np.quantile(lens, 0.75))  # 24
plt.hist(lens)
plt.show()
Ejemplo n.º 30
0
def extract_merge(midi_path, instrument_numbers):
    pretty_midi_data, pypiano_data = remove_drum_empty_track(midi_path, drop_drum=False)
    # pretty_midi_data = pretty_midi.PrettyMIDI(midi_path)
    music_tracks = pypianoroll.from_pretty_midi(pretty_midi_data)
    # print(music_tracks)
    collection = defaultdict(list)
    program_id = defaultdict(list)
    program = {}  # json.load()
    program['melody'] = program['bass'] = program['drum'] = -1
    # dict.fromkeys(['piano', 'bass','guitar','drum','string'], [])
    # print(collection)

    for idx, track in enumerate(music_tracks.tracks):
        # print(track)
        # print(pretty_midi_data.instruments[idx].name)

        if track.program == program['melody'] or \
                check_name(track.name, pretty_midi_data.instruments[idx].name, 'melody'):
            collection['melody'].append(track)
            program_id['melody'].append(idx)
        elif track.program in instrument_numbers['drum'] or track.program == program['drum'] or \
                check_name(track.name, pretty_midi_data.instruments[idx].name, 'drum') :
            collection['drum'].append(track)
            program_id['drum'].append(idx)
        elif track.program in instrument_numbers['piano'] or \
                check_name(track.name, pretty_midi_data.instruments[idx].name, 'piano'):
            collection['piano'].append(track)
            program_id['piano'].append(idx)
        elif track.program in instrument_numbers['bass'] or track.program == program['bass'] or \
                check_name(track.name, pretty_midi_data.instruments[idx].name, 'bass'):
            collection['bass'].append(track)
            program_id['bass'].append(idx)
        elif track.program in instrument_numbers['guitar'] or \
                check_name(track.name, pretty_midi_data.instruments[idx].name, 'guitar'):
            collection['guitar'].append(track)
            program_id['guitar'].append(idx)
        else:
            collection['string'].append(track)
            program_id['string'].append(idx)
        # print(collection)

    cnt = count_tracks(collection, program_id)
    if cnt < 3 or (cnt == 2 and len(program_id['melody'])==0 ):
        return None
    # for key in tracks_name:
    #     collection[key] = music_tracks[collection[key]].get_merged_pianoroll()
    merged_pianoroll = get_merged(collection)
    #merged_tracks = pianoroll_to_tracks(merged_pianoroll)
    pypiano_mult = pypianoroll.Multitrack(name=music_tracks.name, resolution=music_tracks.resolution, \
                                          tempo=music_tracks.tempo, downbeat=music_tracks.downbeat)
    for key in tracks_name:
        if len(merged_pianoroll[key]) != 0:
            IS_DRUM = False
            if key == 'drum':
                IS_DRUM = True
            if key == 'melody' or key == 'string':
                Pro = program_id[key][0]
            else:
                Pro = instrument_numbers[key][0]
            track = pypianoroll.StandardTrack(name=key, program=Pro, \
                              is_drum=IS_DRUM, pianoroll=merged_pianoroll[key])
            pypiano_mult.append(track)

    return pypiano_mult