def transposition(events, controls, offset=0): # events [steps, batch_size, event_dim] # return events, controls events = np.array(events, dtype=np.int64) controls = np.array(controls, dtype=np.float32) event_feat_ranges = EventSeq.feat_ranges() on = event_feat_ranges['note_on'] off = event_feat_ranges['note_off'] if offset > 0: indeces0 = (((on.start <= events) & (events < on.stop - offset)) | ((off.start <= events) & (events < off.stop - offset))) indeces1 = (((on.stop - offset <= events) & (events < on.stop)) | ((off.stop - offset <= events) & (events < off.stop))) events[indeces0] += offset events[indeces1] += offset - 12 elif offset < 0: indeces0 = (((on.start - offset <= events) & (events < on.stop)) | ((off.start - offset <= events) & (events < off.stop))) indeces1 = (((on.start <= events) & (events < on.start - offset)) | ((off.start <= events) & (events < off.start - offset))) events[indeces0] += offset events[indeces1] += offset + 12 assert ((0 <= events) & (events < EventSeq.dim())).all() if controls is not None: histr = ControlSeq.feat_ranges()['pitch_histogram'] controls[:, :, histr.start:histr.stop] = np.roll( controls[:, :, histr.start:histr.stop], offset, -1) return events, controls return events
def event_indeces_to_midi_file(event_indeces, midi_file_name, velocity_scale=0.8): event_seq = EventSeq.from_array(event_indeces) note_seq = event_seq.to_note_seq() for note in note_seq.notes: note.velocity = int((note.velocity - 64) * velocity_scale + 64) note_seq.to_midi_file(midi_file_name) return len(note_seq.notes)
sess_path = options.sess_path data_path = options.data_path saving_interval = options.saving_interval learning_rate = options.learning_rate batch_size = options.batch_size window_size = options.window_size stride_size = options.stride_size use_transposition = options.use_transposition control_ratio = options.control_ratio teacher_forcing_ratio = options.teacher_forcing_ratio reset_optimizer = options.reset_optimizer enable_logging = options.enable_logging event_dim = EventSeq.dim() control_dim = ControlSeq.dim() model_config = config.model model_params = utils.params2dict(options.model_params) model_config.update(model_params) device = config.device print('-' * 70) print('Session path:', sess_path) print('Dataset path:', data_path) print('Saving interval:', saving_interval) print('-' * 70) print('Hyperparameters:', utils.dict2params(model_config)) print('Learning rate:', learning_rate)
def preprocess_midi_event(path): note_seq = NoteSeq.from_midi_file(path) note_seq.adjust_time(-note_seq.notes[0].start) event_seq = EventSeq.from_note_seq(note_seq) return event_seq.to_array()
import torch from utils.sequence import EventSeq, ControlSeq device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = { 'init_dim': 32, 'event_dim': EventSeq.dim(), 'control_dim': ControlSeq.dim(), 'hidden_dim': 512, 'gru_layers': 3, 'gru_dropout': 0.3, } train = { 'learning_rate': 0.001, 'batch_size': 64, 'window_size': 200, 'stride_size': 10, 'use_transposition': False, 'control_ratio': 1.0, 'teacher_forcing_ratio': 1.0 }
return except: print(' Error') continue for path, future in Bar('Processing').iter(results): print(' ', end='[{}]'.format(path), flush=True) name = os.path.basename(path) code = hashlib.md5(path.encode()).hexdigest() save_path = os.path.join(save_dir, out_fmt.format(name, code)) torch.save(future.result(), save_path) print('Done') """ if __name__ == '__main__': """ preprocess_midi_files_under( midi_root=sys.argv[1], save_dir=sys.argv[2], num_workers=int(sys.argv[3], type='event')) """ path = "../../../egs/dataset/tmp_res/test_seq_bef.midi" save_path = "../../../egs/dataset/tmp_res/test_seq_aft.midi" event_seq_array = preprocess_midi_event(path) event_seq = EventSeq.from_array(event_seq_array) note_seq = event_seq.to_note_seq() note_seq.to_midi_file(save_path)