def transposition(events, controls, offset=0): # events [steps, batch_size, event_dim] # return events, controls events = np.array(events, dtype=np.int64) controls = np.array(controls, dtype=np.float32) event_feat_ranges = EventSeq.feat_ranges() on = event_feat_ranges['note_on'] off = event_feat_ranges['note_off'] if offset > 0: indeces0 = (((on.start <= events) & (events < on.stop - offset)) | ((off.start <= events) & (events < off.stop - offset))) indeces1 = (((on.stop - offset <= events) & (events < on.stop)) | ((off.stop - offset <= events) & (events < off.stop))) events[indeces0] += offset events[indeces1] += offset - 12 elif offset < 0: indeces0 = (((on.start - offset <= events) & (events < on.stop)) | ((off.start - offset <= events) & (events < off.stop))) indeces1 = (((on.start <= events) & (events < on.start - offset)) | ((off.start <= events) & (events < off.start - offset))) events[indeces0] += offset events[indeces1] += offset + 12 assert ((0 <= events) & (events < EventSeq.dim())).all() if controls is not None: histr = ControlSeq.feat_ranges()['pitch_histogram'] controls[:, :, histr.start:histr.stop] = np.roll( controls[:, :, histr.start:histr.stop], offset, -1) return events, controls return events
sess_path = options.sess_path data_path = options.data_path saving_interval = options.saving_interval learning_rate = options.learning_rate batch_size = options.batch_size window_size = options.window_size stride_size = options.stride_size use_transposition = options.use_transposition control_ratio = options.control_ratio teacher_forcing_ratio = options.teacher_forcing_ratio reset_optimizer = options.reset_optimizer enable_logging = options.enable_logging event_dim = EventSeq.dim() control_dim = ControlSeq.dim() model_config = config.model model_params = utils.params2dict(options.model_params) model_config.update(model_params) device = config.device print('-' * 70) print('Session path:', sess_path) print('Dataset path:', data_path) print('Saving interval:', saving_interval) print('-' * 70) print('Hyperparameters:', utils.dict2params(model_config)) print('Learning rate:', learning_rate)
import torch from utils.sequence import EventSeq, ControlSeq device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = { 'init_dim': 32, 'event_dim': EventSeq.dim(), 'control_dim': ControlSeq.dim(), 'hidden_dim': 512, 'gru_layers': 3, 'gru_dropout': 0.3, } train = { 'learning_rate': 0.001, 'batch_size': 64, 'window_size': 200, 'stride_size': 10, 'use_transposition': False, 'control_ratio': 1.0, 'teacher_forcing_ratio': 1.0 }