def __getitem__(self, index): full_filename = self.full_filenames[index] multitrack = Multitrack(full_filename) multitrack.clip(0, 1) piano_roll = multitrack.tracks[0].pianoroll[::6, :] return torch.FloatTensor(piano_roll.astype(float))
def converter(file): phrase_size = BAR_SIZE * PHRASE_SIZE try: midi_md5 = os.path.splitext( os.path.basename(os.path.join(MIDI_FILE_PATH, file)))[0] multitrack = Multitrack(beat_resolution=BEAT_RESOLUTION, name=midi_md5) pm = pretty_midi.PrettyMIDI(os.path.join(MIDI_FILE_PATH, file)) multitrack.parse_pretty_midi(pm) midi_info = get_midi_info(pm) length = multitrack.get_max_length() padding_size = phrase_size - ( length % phrase_size) if length % phrase_size else 0 tmp = length // phrase_size + (1 if length % phrase_size else 0) multitrack.clip(0, 127) data = multitrack.get_merged_pianoroll(mode='max') if padding_size: data = np.concatenate((data, np.array([[0 for _ in range(128)] for _ in range(padding_size)])), axis=0) data.astype(np.float64) data = data / 127. data[data < 0.35] = 0. data[data >= 0.35] = 1. data_by_phrase = [] phrase_number = [330] + [i for i in range(tmp - 2, -1, -1)] for i in range(0, len(data), phrase_size): data_by_phrase.append(data[i:i + phrase_size]) with open( os.path.join(NP_FILE_PATH, '{}_{}.pkl'.format(tmp, midi_md5)), 'wb') as fp: pickle.dump([np.array(data_by_phrase), phrase_number], fp) return (midi_md5, midi_info) except Exception as e: print(e) return None