Example #1
0
def main():
    parser = argparse.ArgumentParser(
        description="Converts the LPD 5 dataset to midi files")
    parser.add_argument("--input_dir", help="dataset directory")
    parser.add_argument("--output_dir", help="output directory")
    args = parser.parse_args()

    assert (args.input_dir and args.output_dir)

    input_dir = Path(args.input_dir)
    mask = str(input_dir / "**/*.npz")
    ouput_dir = Path(args.output_dir)

    filenames = glob(mask, recursive=True)
    for filename in filenames:
        print(f"Converting {filename}...")

        filename = Path(filename)
        file_output_dir = ouput_dir / Path(os.path.relpath(
            filename, input_dir)).parent

        if not file_output_dir.is_dir():
            file_output_dir.mkdir(parents=True)

        sample = pp.load(str(filename))
        pp.write(sample, str(file_output_dir / (filename.stem + ".mid")))
Example #2
0
def create_batch(self, i, source, meta_link, pianorolls_path, batch_path):
    dest = []
    labels = []
    # for each pianoroll, store it and the corresponding labels
    for sample in source:
        multitrack = pproll.load(os.path.join(pianorolls_path, sample))
        proll = multitrack.get_stacked_pianoroll()
        dest.append(proll)

        # retrieve corresponding s factors
        sample_id = sample.split(".")[0]
        song_id = meta_link[sample_id]
        label = np.load(
            os.path.join(self.dataset_path, "labels",
                         str(song_id) + ".npy"))
        labels.append(label)
    dest = np.array(dest)
    labels = np.array(labels)
    # preprocess batch, get X and Y
    X, Y = self.preprocess(dest)
    # store everything
    np.save(os.path.join(batch_path, "X", str(i) + ".npy"), X)
    np.save(os.path.join(batch_path, "Y", str(i) + ".npy"), Y)
    np.save(os.path.join(batch_path, "labels", str(i) + ".npy"), labels)
    print("batch - end: " + str(i))
Example #3
0
def main():
    args = parser()
    pypianoroll_file = abspath(args.input_file)
    loaded = pypianoroll.load(pypianoroll_file)

    if args.savepath is None:
        savepath = os.getcwd()
    else:
        savepath = args.savepath
        os.makedirs(savepath, exist_ok=True)

    #Save to plot
    if args.output is None:
        output_filename = os.path.basename(pypianoroll_file).split(".")[0]
        output_filename = join(savepath, output_filename)
    else:
        output_filename = ags.output
        output_filename = join(savepath, output_filename)

    print(output_filename)

    plot_multitrack(loaded, filename=output_filename + ".svg", preset="frame")

    #Save to WAV
    pypianoroll.write(loaded, (output_filename + ".mid"))

    fs = FluidSynth('FluidR3_GM.sf2')
    fs.midi_to_audio((output_filename + ".mid"), (output_filename + ".wav"))
Example #4
0
    def generate_batches(self, path, filenames, batch_size):
        print("Generating batches from data...")
        dataset_len = len(filenames)
        # shuffle samples
        random.shuffle(filenames)

        # discard filenames
        remainder = dataset_len % batch_size
        dataset = np.array(filenames[:-remainder])
        dataset_len = dataset.shape[0]

        assert (dataset_len % batch_size == 0)
        dataset = dataset.reshape((-1, batch_size))
        n_of_batches = dataset.shape[0]

        for i in range(n_of_batches):
            source = dataset[i, :]
            dest = []
            for sample in source:
                multitrack = pproll.load(os.path.join(path, sample))
                proll = multitrack.get_stacked_pianoroll()
                dest.append(proll)

            dest = np.array(dest)
            yield dest
Example #5
0
    def pooled_tensor_batches(self, args):
        idx, source = args
        dest = []
        labels = []

        batch_path = os.path.join(self.data_path, "batches/")
        pianorolls_path = os.path.join(self.data_path, "pianorolls/")
        metadata_path = os.path.join(self.data_path, "metadata/")

        # for each pianoroll, store it and the corresponding labels
        for sample in source:
            multitrack = pproll.load(os.path.join(pianorolls_path, sample))
            proll = multitrack.get_stacked_pianoroll()
            dest.append(proll)

            # retrieve corresponding s factors
            sample_id = sample.split(".")[0]
            labels.append(sample_id)
            # song_id = meta_link[sample_id]
            # label = np.load(os.path.join(self.dataset_path, "labels", str(song_id) + ".npy"))
            # labels.append(label)

        dest = np.array(dest)
        labels = np.array(labels)
        # preprocess batch, get X and Y
        X, _ = self.preprocess(dest)
        # store everything
        with open(os.path.join(batch_path, f'tensors-{idx}.pkl'), 'wb') as f:
            pickle.dump((torch.from_numpy(X), labels), f,
                        pickle.HIGHEST_PROTOCOL)
Example #6
0
def load_data():
	data = []
	# Iterate over all the songs in the ID list
	for msd_id in tqdm(id_list):
	    # Load the multitrack as a Multitrack
	    song_dir = dataset_root / msd_id_to_dirs(msd_id)
	    multitrack = pypianoroll.load(song_dir / os.listdir(song_dir)[0])
	    multitrack.binarize()
	    multitrack.set_resolution(beat)
	    pianoroll = (multitrack.stack() > 0)
	    pianoroll = pianoroll[:, :, lowest_pitch:lowest_pitch + number_of_pitches]
	    n_total_measures = multitrack.get_max_length() // measure_resolution
	    candidate = n_total_measures - number_of_measures
	    target_number_of_samples = min(n_total_measures // number_of_measures, samples_per_song)
	    for selected in np.random.choice(candidate, target_number_of_samples, False):
	        start = selected * measure_resolution
	        end = (selected + number_of_measures) * measure_resolution
	        if (pianoroll.sum(axis=(1, 2)) < 10).any():
	            continue
	        data.append(pianoroll[:, start:end])

	random.shuffle(data)
	data = np.stack(data)
	print(f"Successfully collect {len(data)} samples from {len(id_list)} songs")
	print(f"Data shape : {data.shape}")

	data = torch.as_tensor(data, dtype=torch.float32)
	dataset = torch.utils.data.TensorDataset(data)
	return torch.utils.data.DataLoader(
	    dataset, batch_size=batch_size, drop_last=True, shuffle=True)
Example #7
0
def convert_midi_to_state(
    f_name,
    scale_num=None,
    scale_type="maj",
    twelve_tone_normalize=True,
    save_midi=False,
    save_json=False,
):
    is_midi = f_name.endswith(".mid") or f_name.endswith(".midi")

    sc_num = scale_num
    sc_type = scale_type

    scale = get_scale(scale_num, scale_type)

    if is_midi:
        mt = load(f_name)

        # convert to binary representation
        mt.binarize()

        # ensure that the vector is 0,1 only
        track = mt.get_merged_pianoroll(mode="any").astype(int)

        # NOTE: these are the dimensions
        states = []
        for s in track:
            # compress to scale
            states.append(s)
        if twelve_tone_normalize:
            states = squash_piano_roll_to_chromatic_frames(states)

        if sc_num != None:
            # Squash to scale
            states = [
                squash_state_to_scale(s, CHROMATIC_SCALE[0:12]) for s in states
            ]
        states = list(filter(lambda x: np.sum(x) > 0, states))
        deduped_states = []
        for i, state in enumerate(states):
            if i == 0:
                deduped_states.append(state)
            else:
                # filter out silence
                s = np.sum(state)
                if s > 0 and not np.all(np.equal(state, states[i - 1])):
                    deduped_states.append(state)
        states = deduped_states
    else:
        print("Not midi file: {}".format(f_name))
        exit(1)

    json_states_file = "{f_name}.{ext}".format(f_name=(f_name +
                                                       ".training_states"),
                                               ext="json")

    if save_json:
        write_states_to_file(states, f_name=json_states_file)

    return states
def piano_track(data_file):
    data = pp.load(data_file)
    index = [track.name for track in data.tracks].index("Piano")
    raw_track = data.tracks[index].pianoroll
    step = 4
    track = np.array([ [ np.sign(np.sum(raw_track[i:i+step, j])) for j in range(128) ] for i in range(0, raw_track.shape[0]-1, step) ])

    return track
Example #9
0
    def __load_data_into_memory__(self):
        # check if path exists
        path = os.path.join(self.data_dir)
        assert(os.path.exists(path))

        print('Loading data into memory...')

        # get all npz files, and check if there is at least one
        files = [x for x in glob.glob(path + '/**/*.npz', recursive=True)]
        assert(len(files) > 0)

        print('{} files found.'.format(len(files)))
        
        downsample_factor = self.__class__.DEFAULT_BEAT_RESOLUTION // self.beat_resolution

        n_sequences = []
        
        # load files
        for f in tqdm(files):
            multitrack_roll = pp.load(str(f))

            # not sure what role downbeats play, but all samples in the lpd5 cleansed dataset
            # contain only one downbeat at the beginning, so I check for that, just in case.
            assert(np.all(multitrack_roll.get_downbeat_steps() == [0]))

            # all samples from the lpd5 cleansed dataset should have a beat resolution of 24
            assert(multitrack_roll.beat_resolution == self.__class__.DEFAULT_BEAT_RESOLUTION)

            multitrack_roll.downsample(downsample_factor)

            assert(multitrack_roll.beat_resolution == self.beat_resolution)

            # transpose to get (timesteps, n_instruments, pitch_range)
            stacked = multitrack_roll.get_stacked_pianoroll().transpose(0, 2, 1)

            # all samples from the lpd5 cleansed dataset should have 5 instruments
            assert(stacked.shape[1] == self.__class__.N_INSTRUMENTS)

            stacked = stacked[:, [inst.value for inst in self.instruments], self.lowest_pitch:self.lowest_pitch + self.n_pitches]

            # only add samples if not empty
            if not np.all(stacked == 0):
                i = np.where(stacked > 0)
                # map values from [0, 127] to [0, 1]
                v = stacked[i] / 127
                        
                self.names.append(multitrack_roll.name)
                self.samples.append((stacked.shape, i, v))

                sample_length = multitrack_roll.get_max_length()
                n_sequences.append(self.calc_num_sequences(sample_length))

        self.cum_n_sequences = np.cumsum(n_sequences)
        self.total_n_sequences = self.cum_n_sequences[-1]

        print('Loaded {} samples with a total of {} sequences!'
            .format(len(self.samples), self.total_n_sequences))
Example #10
0
def midi2pianoroll(filename):
    ### Create a `pypianoroll.Multitrack` instance
    multitrack = Multitrack(filepath=filename + '.mid',
                            tempo=120.0,
                            beat_resolution=12)
    ### save pypianoroll
    pypianoroll.save(filename + '.npz', multitrack, compressed=True)
    data = pypianoroll.load(filename + '.npz')
    data_tracks = data.get_stacked_pianorolls()
    data_bool = data_tracks.astype(bool)
    np.save(filename + '.npy', data_bool)
Example #11
0
def load_data_from_MIDI(opt):
    data = pypianoroll.load('training_data/%s/%s' % (opt.input_dir, opt.input_phrase))
    Data = []
    for i in range(opt.ntrack):
        track_data = data.tracks[i].pianoroll
        Data.append(track_data)
    #列表转数组
    Data = np.array(Data)
    Data = Data.reshape(-1, opt.nbar, opt.ntime, opt.npitch, opt.ntrack)
    Data = Data[0:1, :, :, :, :]
    return Data
Example #12
0
    def select_song(self, idx, metadata=True):
        _metadata_ = None

        multitrack = pproll.load(
            os.path.join(self.dataset_path + "songs/" + str(idx) + ".npz"))

        if metadata:
            _metadata_ = self.retrieve_metadata(
                os.path.join(self.dataset_path, "metadata/",
                             str(idx) + ".json"))

        return _metadata_, multitrack
Example #13
0
    def __init__(self, filename=None):

        self.debug = False

        if not self.debug:
            # off debug msg
            self.chan = open(os.devnull, 'w')
        else:
            self.chan = sys.stderr

        if filename is not None:
            ext = os.path.split(filename)[-1].split('.')[-1]

            if ext == 'npz':
                import pypianoroll
                mido.MidiFile.__init__(self)
                data = pypianoroll.load(filename)
                track_len = len(data.tracks)

                self.tracks = [
                    self._get_events_from_roll(
                        data.tracks[i].get_pianoroll_copy().transpose(), i)
                    for i in range(track_len)
                ]

                self.instrument = [track.program for track in data.tracks]
                self.ticks_per_beat = data.beat_resolution

                # assume we follow only 1 bpm in a song
                bpm = data.tempo[0]
                self.set_tempo_bpm(bpm)

            elif ext == 'mid':
                mido.MidiFile.__init__(self, filename)

                self.meta = {}
                # assume only 0 or 1 program change event in each channel
                # default instrument is Piano in ch.0

                for idx, track in enumerate(self.tracks):
                    # remove mido.UnknownMetaMessage in track (which would cause error)
                    self.tracks[idx] = [
                        msg for msg in track
                        if not isinstance(msg, mido.UnknownMetaMessage)
                    ]
                self.instrument = [-1 for _ in range(16)]
                self.get_instrument()

        else:
            mido.MidiFile.__init__(self, filename)

            self.meta = {}
            self.instrument = [-1 for _ in range(16)]
Example #14
0
def load_phrase_from_single_npz(opt):
    """Load and return the training data from a npz file (sparse format)."""
    #with np.load(filename) as f:
        # data = np.zeros(f['shape'], np.bool_)
        # data[[x for x in f['nonzero']]] = True
    data = pypianoroll.load('training_data/%s/%s' % (opt.input_dir, opt.input_phrase))

    Data = []
    for i in range(opt.ntrack):
        track_data = data.tracks[i].pianoroll
        Data.append(track_data)
    #列表转数组
    Data = np.array(Data)
    Data = Data.reshape(-1, opt.nbar, opt.ntime, opt.npitch, opt.ntrack)
    Data = Data[0:1, :, :, :, :]
    return np2torch(Data)
    def _convert_to_tfrecords(self, mode, filename_list):
        filename = self.DATA_PATH + "/" + mode + ".tfrecords"
        if check_path_exists(filename):
            return

        logging.info("Writing {}".format(filename))
        with tf.python_io.TFRecordWriter(filename) as writer:
            for filename in tqdm(filename_list):
                if filename.endswith(".mid") or filename.endswith(".midi"):
                    multi_track = ppr.parse(filename)
                else:
                    multi_track = ppr.load(filename)

                TOTAL_STEPS = self._choose_total_steps(multi_track)
                if TOTAL_STEPS == 1e8:
                    continue
                RANGE = self.INPUT_SIZE
                FINAL_STEPS = math.ceil(TOTAL_STEPS / 24)
                multi_data = np.zeros((FINAL_STEPS, RANGE))

                for track in multi_track.tracks:
                    if not self._is_valid_track(track):
                        continue
                    data = track.pianoroll.astype(int)
                    data = self._sampling(data)
                    multi_data = np.add(multi_data, data)
                multi_data = np.clip(multi_data, 0, 1).astype(int)

                RANGE = self._split_into_segments(multi_data, 1)
                length = self.MAX_LEN

                for start in RANGE:
                    end = start + length
                    if end >= FINAL_STEPS:
                        break
                    example = tf.train.Example(
                        features=tf.train.Features(
                            feature={
                                "pianoroll":
                                self._int64_list_feature(
                                    multi_data[start:end + 1].flatten())
                            }))
                    writer.write(example.SerializeToString())
def parse_data(genres_directory, parsed_directory):

    save_directory_name = "NT-" + str(NUM_TRACKS) + "-NB-" + str(NUM_BARS) + "-BPB-" + str(BEATS_PER_BAR) + "-NN-" + str(NUM_NOTES)
    save_directory_path = join(parsed_directory, save_directory_name)
    os.makedirs(save_directory_path, exist_ok=True)

    for genre in os.listdir(genres_directory):
        if genre in GENRE_LIST:
            print("\n\n\n\n")
            print(genre)
            print("\n\n\n\n")
            genre_directory = join(genres_directory, genre)
            for song in os.listdir(genre_directory):
                print(song)
                song_multitrack = pypianoroll.load(join(genre_directory, song))
                song_multitrack.pad_to_same()
                song_multitrack.pad_to_multiple(BEATS_PER_SET)
                song_divisions = int(((song_multitrack.tracks[0].pianoroll.size)/128)/(BEATS_PER_SET))

                for division in range(0, song_divisions):
                    track_list = []

                    for track in song_multitrack.tracks:
                        current_beat = division*(BEATS_PER_SET)
                        bar_list = []

                        for bar in range(0, 4):
                            beat_list = []

                            for beat in range(current_beat, current_beat+BEATS_PER_BAR):
                                beat_list.append(np.asarray(track.pianoroll[beat][LOWEST_NOTE:LOWEST_NOTE+NUM_NOTES]))

                            bar_list.append(np.asarray(beat_list))
                            current_beat += BEATS_PER_BAR

                        track_list.append(np.asarray(bar_list))

                    filename = genre + "-" + song.split(".")[0] + "-" + str(division)
                    filepath = join(save_directory_path, filename)

                    reshaped_track_list = np.reshape(np.asarray(track_list), (4, 96, 84, 5))
                    np.savez_compressed(filepath, data=np.asarray([reshaped_track_list, genre]))
Example #17
0
    def _compute_all_features(self):
        """Computes all the features (beatsync, framesync) from the audio."""
        # Read multitrack
        self._audio = pypianoroll.load(self.file_struct.audio_file)

        # Get duration (in time step) of the multitrack
        num_timestep = self._audio.tracks[0].pianoroll.shape[0]
        self.tempo = self._audio.tempo[0]
        self.dur = (60. / self.tempo) * num_timestep

        # Compute times
        self._framesync_times = (60. / self.tempo) * np.arange(0, num_timestep)
        self._ann_beats_times = (60. / self.tempo) * np.arange(
            0, num_timestep, self._audio.beat_resolution)
        self._ann_beatsync_times = self._ann_beats_times
        self._est_beats_times = self._ann_beats_times
        self._est_beatsync_times = self._ann_beats_times

        # Compute features
        self._framesync_features = self.compute_features()
        self.compute_beat_sync_features()
Example #18
0
def gbs_piano_roll(filepath):
    pm = pypianoroll.load(filepath).to_pretty_midi()
    instruments = pm.instruments
    # g = Guitar; b = Bass; s = String.
    gbs = []
    for instrument in instruments[2:]:
        if len(instrument.notes) == 0:
            return None
        else:
            gbs.append(instrument.notes)

    piano_roll_length = pm.time_to_tick(pm.get_end_time()) * 4 // pm.resolution
    piano_roll = np.zeros((128 * 3, piano_roll_length), dtype=np.int8)

    for i, instrument in enumerate(gbs):
        for note in instrument:
            start = pm.time_to_tick(note.start) * 4 // pm.resolution
            end = pm.time_to_tick(note.end) * 4 // pm.resolution
            for tick in range(end - start):
                piano_roll[(i * 128) + note.pitch][start + tick] = 1
    # plt.imshow(piano_roll, cmap='gray')
    # plt.show()
    return piano_roll
Example #19
0
def load_npz(filepath):
    multi_tracks = []
    pm = pypianoroll.load(filepath).to_pretty_midi()
    resolution = pm.resolution
    time = pm.get_end_time()
    tick = pm.time_to_tick(time * 8 / resolution)
    tracks = pm.instruments
    for track in tracks:
        piano_roll = track.get_piano_roll(fs=tick / time)
        if tick < 640:
            padded_pr = np.pad(piano_roll,
                               ((0, 0), (0, 640 - len(piano_roll[0]))),
                               'constant').T
        else:
            padded_pr = np.pad(piano_roll,
                               ((0, 0), (0, tick - len(piano_roll[0]))),
                               'constant').T
        multi_tracks.append(padded_pr[:640])

    concat_pr = multi_tracks[0]
    for i in multi_tracks[1:]:
        concat_pr = np.concatenate((concat_pr, i), axis=1)
    return concat_pr
Example #20
0
def main():
    args = parser()
    pypianoroll_directory = abspath(args.input_directory)

    if args.savepath is None:
        savepath = os.getcwd()
    else:
        savepath = args.savepath
        os.makedirs(savepath, exist_ok=True)

    for pypianoroll_file in os.listdir(pypianoroll_directory):
        if os.path.basename(pypianoroll_file).split(".")[1] == "npz":
            loaded = pypianoroll.load(
                join(pypianoroll_directory, pypianoroll_file))

            #Save to plot
            if args.output is None:
                output_filename = os.path.basename(pypianoroll_file).split(
                    ".")[0]
                output_filename = join(savepath, output_filename)
            else:
                output_filename = ags.output + os.path.basename(
                    pypianoroll_file).split(".")[0]
                output_filename = join(savepath, output_filename)

            fig, axs = plot_multitrack(loaded,
                                       filename=output_filename + ".svg",
                                       preset="frame")
            plt.close(fig)

            #Save to WAV
            pypianoroll.write(loaded, (output_filename + ".mid"))

            fs = FluidSynth('FluidR3_GM.sf2')
            fs.midi_to_audio((output_filename + ".mid"),
                             (output_filename + ".wav"))
Example #21
0
def load_npz(path):
    try:
        return pypianoroll.load(path)
    except Exception as e:
        return None
Example #22
0
import pypianoroll
import os

from tqdm import tqdm


def get_all_file_from_directory(path):
    all_files_with_full_path = []
    for path, subdirs, files in os.walk(path):
        for name in files:
            xx = os.path.join(path, name)
            all_files_with_full_path.append((name, xx))
    return all_files_with_full_path


all_files = get_all_file_from_directory("../data/lpd/")
out_dir_name = "../data/lakhdataset_midi/"

for sign, each in tqdm(all_files):
    filep = pypianoroll.load(filepath=each)
    filep.write("{path}{sign}.mid".format(path=out_dir_name, sign=sign))
Example #23
0
    def create_batches(self, batch_size=128):
        print("Building batches from data...")

        batch_path = os.path.join(self.data_path, "batches/")
        if not os.path.exists(batch_path):
            os.makedirs(os.path.join(batch_path, "X"))
            os.makedirs(os.path.join(batch_path, "Y"))
            os.makedirs(os.path.join(batch_path, "labels"))

        pianorolls_path = os.path.join(self.dataset_path, "pianorolls/")
        metadata_path = os.path.join(self.dataset_path, "metadata/")

        _, _, files = next(os.walk(pianorolls_path))

        dataset_len = len(files)

        random.shuffle(files)
        remainder = dataset_len % batch_size
        dataset = np.array(files[:-remainder])
        dataset_len = dataset.shape[0]

        print("dataset_length:", dataset_len)
        print("batch_size:", batch_size)
        print("number of batches:", dataset_len // batch_size)
        print("remainder:", remainder)

        assert (dataset_len % batch_size == 0)
        dataset = dataset.reshape((-1, batch_size))
        n_of_batches = dataset.shape[0]

        # store each batch in a file toghether
        # bar = progressbar.ProgressBar(max_value=n_of_batches)

        meta_link = json.load(
            open(os.path.join(self.dataset_path, "meta_link.json")))
        for i in track(range(n_of_batches), description="Batching data..."):
            # bar.update(i)
            source = dataset[i, :]
            dest = []
            labels = []
            # for each pianoroll, store it and the corresponding labels
            for sample in source:
                multitrack = pproll.load(os.path.join(pianorolls_path, sample))
                proll = multitrack.get_stacked_pianoroll()
                dest.append(proll)

                # retrieve corresponding s factors
                sample_id = sample.split(".")[0]
                song_id = meta_link[sample_id]
                label = np.load(
                    os.path.join(self.dataset_path, "labels",
                                 str(song_id) + ".npy"))
                labels.append(label)

            dest = np.array(dest)
            labels = np.array(labels)
            # preprocess batch, get X and Y
            X, Y = self.preprocess(dest)
            # store everything
            np.save(os.path.join(batch_path, "X", str(i) + ".npy"), X)
            np.save(os.path.join(batch_path, "Y", str(i) + ".npy"), Y)
            np.save(os.path.join(batch_path, "labels",
                                 str(i) + ".npy"), labels)
                print(os.path.join(root, file))
                file_list.append(os.path.join(root, file))
    return file_list

###########################################
# From lpd_5_cleansed to lpd_4dbar_12_npy #
###########################################
root_dir = './lpd_5_cleansed/'
file_list = traverse_dir(root_dir)

tensor_file = np.zeros((0,768,128,5))
i = 0
for file in file_list:
    i += 1
    print(i)
    multitrack = pr.load(file)
    track_list = [3,0,2,1,4]
    file_len = max(multitrack.tracks[0].pianoroll.shape[0],
                   multitrack.tracks[1].pianoroll.shape[0],
                   multitrack.tracks[2].pianoroll.shape[0],
                   multitrack.tracks[3].pianoroll.shape[0],
                   multitrack.tracks[4].pianoroll.shape[0])
    dbar_len = int(np.floor(file_len/768.0))
    if dbar_len > 0:
        tensor_new = []
        
        for track_idx in track_list:
            track = multitrack.tracks[track_idx].pianoroll
            if (track.shape[0]==0):
                track_re = np.zeros((dbar_len,768,128))
                tensor_new.append(track_re)
Example #25
0
 def select_pianoroll(self, idx):
     pianoroll = pproll.load(
         os.path.join(self.dataset_path + "pianorolls/" + str(idx) +
                      ".npz"))
     return pianoroll
Example #26
0
import glob
import pypianoroll


for file in glob.glob("flattened/*.npz"):
    print(file)
    new_obj = pypianoroll.load(file) #loads npz into a pypianoroll multitrack matrix
    new_obj.write(file) #converts to a midi 
Example #27
0
def main():
    pre_melody = pypianoroll.load(args.input)
    pre = []
    step = pre_melody.beat_resolution // 4  # 16 beat minimum
    pianoroll = np.zeros((pre_melody.get_max_length(),128,len(programs)))
    for track in pre_melody.tracks:
        if track.is_drum:
            dst_index = 0
        else:
            dst_index = 1
            for i in range(1,len(programs),1):
                if track.program >= programs[i] and (len(programs) == i+1 or track.program < programs[i+1]):
                    dst_index = i
                    break
        pianoroll[0:track.pianoroll.shape[0],:,dst_index] += track.pianoroll
    pianoroll = pianoroll[:,note_offset:note_offset+note_size,trc_idx]
    p = np.where(pianoroll != 0)
    current_seq = []
    def _current(cur_seq):
        cur = []
        for c in sorted(cur_seq):
            if not (c >= note_size and c < note_size*2):
                cur.append(c)
        for c in sorted(cur_seq):
            if (c >= note_size and c < note_size*2):
                cur.append(c)
        return cur # Bass, Piano, etc..., Drums
    pos = 0
    for i in np.argsort(p[0]):
        if p[0][i] % step != 0:
            continue
        if pos < p[0][i]:
            for _ in range(pos,p[0][i],step):
                pre.extend(_current(current_seq))
                pre.append(time_note)
                current_seq = []
        pos = p[0][i]
        j = p[1][i]
        t = p[2][i]
        note = t*note_size + j
        current_seq.append(note)
    pre.extend(_current(current_seq))
    if len(pre) == 0 or pre[-1] != time_note:
        pre.append(time_note)
    if len(pre) > 512:
        pre = pre[-512:]

    cur_top = (0,top_p)
    with tf.Session(graph=tf.Graph()) as sess:
        context = tf.placeholder(tf.int32, [1, None])
        output = model.model(hparams=hparams, X=context)
        vars = [v for v in tf.trainable_variables() if 'model' in v.name]

        saver = tf.train.Saver(var_list=vars)
        ckpt = tf.train.latest_checkpoint(args.model)
        saver.restore(sess, ckpt)

        pianoroll = np.zeros((trc_len, args.num_bars*16, 128))

        seq = get_sequence(sess, context, pre, cur_top)
        pos = 0
        firstnote = False
        print('Generating Melody...')
        progress = tqdm(total=pianoroll.shape[1])
        while pos < pianoroll.shape[1]:
            for note in seq:
                if (not firstnote) and note >= time_note:
                    continue
                else:
                    firstnote = True
                pre.append(note)
                if note == time_note:
                    pos += 1
                    progress.update(1)
                    if pos >= pianoroll.shape[1]:
                        break
                elif note < time_note:
                    trc = trc_idx.index(note // note_size)
                    mid = note % note_size + note_offset
                    if mid < 128:
                        pianoroll[trc,pos,mid] = 100
            seq = get_sequence(sess, context, pre[-512:], cur_top)

        pr = []
        for i,(t,p) in enumerate(zip(tracks,programs)):
            pr.append(pypianoroll.Track(pianoroll=pianoroll[i], program=p, is_drum=(t=='Drums')))
        mt = pypianoroll.Multitrack(tracks=pr, tempo=args.tempo, beat_resolution=4)
        mt.write(args.output)
Example #28
0
    def plot_loss(self):
        plt.plot(self.disc_loss, c='red')
        plt.plot(self.gen_loss, c='blue')
        plt.title("GAN Loss per Epoch")
        plt.legend(['Discriminator', 'Generator'])
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.savefig('GAN_Loss_per_Epoch_final_1000.png', transparent=True)
        plt.close()


if __name__ == '__main__':
    path = "C:\\Users\\10413\\Desktop\\deep_learning\\project\\midi-lstm-gan-master\\lpd_5\\lpd_5_cleansed"  # 文件夹目录
    listOfFiles = getListOfFiles(path)
    data_train = np.empty([len(listOfFiles), 4 * 96, 128])
    i = 0
    for files in listOfFiles:
        piano_roll = pypianoroll.load(files)
        data = piano_roll.tracks[1].pianoroll
        data2 = piano_roll.tracks[1]
        pypianoroll.plot(data2)  # 打开文件
        if data.shape[0] > (4 * 96 - 1):
            data_train[i, 0:4 * 96 - 1, 0:127] = data[0:4 * 96 - 1, 0:127]
            i = i + 1
        if i > 10:
            break
    gan = GAN(rows=4 * 96)
    gan.train(epochs=2,
              batch_size=32,
              train_data=data_train,
              sample_interval=1)
import copy

from madmom.audio.chroma import DeepChromaProcessor
from madmom.features.chords import DeepChromaChordRecognitionProcessor

##################################
#     .mid to .npy .             #
##################################
# Create a `pypianoroll.Multitrack` instance
print('----------------Start data preprocessing !!-------------------')
multitrack = Multitrack(filepath='./data/preprocessing/mysong_mid_C/test.mid', tempo=120.0, beat_resolution=12)

# save pypianoroll
pr.save('./data/preprocessing/mysong_npy_C/test.npz', multitrack, compressed=True)

data = pr.load('./data/preprocessing/mysong_npy_C/test.npz')
data_tracks = data.get_stacked_pianorolls()
data_bool = data_tracks.astype(bool)

np.save('./data/preprocessing/mysong_npy_C/test.npy',data_bool)
data_npy_C = np.load('./data/preprocessing/mysong_npy_C/test.npy')

# print(np.shape(data_npy_C)) #(3072, 128, 2)

##################################
#     .mid to .wav .             #
##################################
# from music21 import *
# from midi2audio import FluidSynth
# from IPython.display import display, Image, Audio
fs = FluidSynth('/usr/share/sounds/sf2/FluidR3_GM.sf2') # arch
Example #30
0
def load(npz):
    return pypianoroll.load(npz)