def train_audio(): # compute receptive field width learnable_steps = 1 batch_size = 1 num_layers = len(params.residual_conv_channels) receptive_steps_per_unit = params.residual_conv_filter_width ** num_layers receptive_steps = (receptive_steps_per_unit - 1) * params.residual_num_blocks + 1 target_width = learnable_steps input_width = receptive_steps # to compute all learnable targets input_width += learnable_steps - 1 ## padding for causal conv block input_width += len(params.causal_conv_channels) quantized_signal = np.mod(np.arange(1, input_width * 10), params.quantization_steps) print quantized_signal for rep in xrange(300): sum_loss = 0 for train in xrange(50): # create batch input_batch, target_batch = create_batch(quantized_signal, batch_size, input_width, target_width) # convert to 1xW image whose #channels is equal to the quantization steps of audio # input_batch.shape = (BATCHSIZE, CHANNELS(=quantization_steps), HEIGHT(=1), WIDTH(=input_width)) input_batch = data.onehot_pixel_image(input_batch, quantization_steps=params.quantization_steps) # training ## causal block output = wavenet.forward_causal_block(input_batch) ## remove causal padding output = wavenet.slice_1d(output, len(params.causal_conv_channels)) ## residual dilated conv block output, sum_skip_connections = wavenet.forward_residual_block(output) ## remove unnecessary elements sum_skip_connections = wavenet.slice_1d(sum_skip_connections, sum_skip_connections.data.shape[3] - target_width) ## softmax block ## Note: do not apply F.softmax output = wavenet.forward_softmax_block(sum_skip_connections, softmax=False) ## compute cross entroy loss = wavenet.cross_entropy(output, target_batch) ## update weights wavenet.backprop(loss) sum_loss += float(loss.data) print sum_loss / 50.0 wavenet.save(args.model_dir)
def train_audio(): target_width = 4 padded_input_width = 8 + 3 + 1 batch_size = 8 quantized_signal = np.mod( np.arange(1, padded_input_width * batch_size * 4), 6) # pad with zero quantized_signal = np.insert(quantized_signal, 0, np.ones((padded_input_width, ), dtype=np.int32), axis=0) print quantized_signal for rep in xrange(50): for step in xrange(10): padded_signal_batch, target_batch = create_batch( quantized_signal, batch_size, padded_input_width, target_width) padded_onehot_batch = data.onehot_pixel_image( padded_signal_batch, quantized_channels=params.quantization_steps) # print padded_signal_batch[0, -1] # print padded_onehot_batch[0, :, 0, -1] # print target_batch[0, -1] output = wavenet.forward_causal_block(padded_onehot_batch) output = wavenet.slice_1d(output, 1) output, sum_skip_connections = wavenet.forward_residual_block( output) sum_skip_connections = wavenet.slice_1d( sum_skip_connections, output.data.shape[3] - target_width) output = wavenet.forward_softmax_block(sum_skip_connections, softmax=False) loss = wavenet.cross_entropy(output, target_batch) wavenet.backprop(loss) loss = float(loss.data) print loss wavenet.save(args.model_dir)
def train_audio(): # compute required input width num_layers = len(params.residual_conv_channels) receptive_width_per_unit = params.residual_conv_filter_width**num_layers receptive_width = (receptive_width_per_unit - 1) * params.residual_num_blocks + 1 # padding for causal conv block causal_padding = len(params.causal_conv_channels) # quantized_signal = np.mod(np.arange(1, 100), 6) quantized_signal = np.repeat(np.arange(0, 10), 100, axis=0) # quantized_signal = np.random.randint(0, params.quantization_steps, 1000) original_signal_width = quantized_signal.size quantized_signal = np.insert(quantized_signal, 0, np.full((receptive_width + causal_padding, ), 0, dtype=np.int32), axis=0) target_width = original_signal_width // 20 batch_size = 2 for epoch in xrange(100): sum_loss = 0 for step in xrange(500): input_batch, target_batch = create_batch( quantized_signal, batch_size, receptive_width + causal_padding, target_width) padded_onehot_batch = data.onehot_pixel_image( input_batch, quantization_steps=params.quantization_steps) # convert to 1xW image whose #channels is equal to the quantization steps of audio # input_batch.shape = (BATCHSIZE, CHANNELS(=quantization_steps), HEIGHT(=1), WIDTH(=input_width)) input_batch = data.onehot_pixel_image( input_batch, quantization_steps=params.quantization_steps) # training ## causal block output = wavenet.forward_causal_block(input_batch) ## remove causal padding # output = wavenet.slice_1d(output, len(params.causal_conv_channels)) ## residual dilated conv block output, sum_skip_connections = wavenet.forward_residual_block( output) ## remove unnecessary elements sum_skip_connections = wavenet.slice_1d( sum_skip_connections, sum_skip_connections.data.shape[3] - target_width) ## softmax block ## Note: do not apply F.softmax output = wavenet.forward_softmax_block(sum_skip_connections, apply_softmax=False) ## compute cross entroy loss = wavenet.cross_entropy(output, target_batch) ## update weights wavenet.backprop(loss) sum_loss += float(loss.data) print epoch, sum_loss wavenet.save(args.model_dir)
def train_audio( filename, batch_size=16, learnable_steps=16, save_per_update=500, train_steps_ratio=0.05, ): # load audio data path_to_file = args.wav_dir + "/" + filename quantized_signal, sampling_rate = data.load_audio_file( path_to_file, quantization_steps=params.quantization_steps) # compute receptive field width num_layers = len(params.residual_conv_channels) receptive_steps_per_unit = params.residual_conv_filter_width**num_layers receptive_steps = (receptive_steps_per_unit - 1) * params.residual_num_blocks + 1 receptive_msec = int(receptive_steps * 1000.0 / sampling_rate) target_width = learnable_steps input_width = receptive_steps # to compute all learnable targets input_width += learnable_steps - 1 ## padding for causal conv block input_width += len(params.causal_conv_channels) # for logging num_updates = 0 total_updates = 0 sum_loss_epoch = 0 sum_loss = 0 start_time = time.time() prev_averate_loss = None max_batches = max( int((quantized_signal.size - input_width) / float(batch_size) * train_steps_ratio), 1) # print "training", filename # print " sampling rate:", sampling_rate, "[Hz]" # print " length:", quantized_signal.size, "[step]" # print " batch_size:", batch_size # print " learnable_steps:", learnable_steps # pad with zero quantized_signal = np.insert(quantized_signal, 0, np.zeros((input_width, ), dtype=np.int32), axis=0) sum_loss_epoch = 0 sum_loss = 0 start_time = time.time() for batch_index in xrange(1, max_batches + 1): # create batch input_batch, target_batch = create_batch(quantized_signal, batch_size, input_width, target_width) # convert to 1xW image whose #channels is equal to the quantization steps of audio # input_batch.shape = (BATCHSIZE, CHANNELS(=quantization_steps), HEIGHT(=1), WIDTH(=input_width)) input_batch = data.onehot_pixel_image( input_batch, quantization_steps=params.quantization_steps) # training ## causal block output = wavenet.forward_causal_block(input_batch) ## remove causal padding output = wavenet.slice_1d(output, len(params.causal_conv_channels)) ## residual dilated conv block output, sum_skip_connections = wavenet.forward_residual_block(output) ## remove unnecessary elements sum_skip_connections = wavenet.slice_1d( sum_skip_connections, sum_skip_connections.data.shape[3] - target_width) ## softmax block ## Note: do not apply F.softmax output = wavenet.forward_softmax_block(sum_skip_connections, softmax=False) ## compute cross entroy loss = wavenet.cross_entropy(output, target_batch) ## update weights wavenet.backprop(loss) # logging loss = float(loss.data) sum_loss_epoch += loss sum_loss += loss total_updates += 1 # save the model if total_updates % save_per_update == 0: wavenet.save(dir=args.model_dir) wavenet.save(dir=args.model_dir) average_loss = sum_loss / float(max_batches) sys.stdout.flush() return average_loss
def train_audio( filename, batch_size=16, train_width=16, repeat=1000, ): # load audio data path_to_file = args.wav_dir + "/" + filename signals, sampling_rate = data.load_audio_file( path_to_file, quantization_steps=params.quantization_steps) # receptive width num_layers = len(params.residual_conv_channels) receptive_width_per_unit = params.residual_conv_filter_width**num_layers receptive_width = (receptive_width_per_unit - 1) * params.residual_num_blocks + 1 receptive_msec = int(receptive_width * 1000.0 / sampling_rate) # receptive field width input_width = receptive_width # padding for causal conv block input_width += len(params.causal_conv_channels) # for logging num_updates = 0 total_updates = 0 sum_loss = 0 prev_average_loss = None # pad with silence signals signals = np.insert(signals, 0, np.full((input_width, ), 127, dtype=np.int32), axis=0) with chainer.using_config("train", True): for batch_index in xrange(0, repeat): # create batch input_batch, target_batch = create_batch(signals, batch_size, input_width, train_width) # convert to 1xW image whose #channels is equal to the quantization steps of audio # input_batch.shape = (BATCHSIZE, CHANNELS(=quantization_steps), HEIGHT(=1), WIDTH(=input_width)) input_batch = data.onehot_pixel_image( input_batch, quantization_steps=params.quantization_steps) # training ## causal block output = wavenet.forward_causal_block(input_batch) ## remove causal padding # output = wavenet.slice_1d(output, len(params.causal_conv_channels)) ## residual dilated conv block output, sum_skip_connections = wavenet.forward_residual_block( output) ## remove unnecessary elements output = wavenet.slice_1d(output, output.data.shape[3] - train_width) sum_skip_connections = wavenet.slice_1d( sum_skip_connections, sum_skip_connections.data.shape[3] - train_width) ## softmax block ## Note: do not apply F.softmax output = wavenet.forward_softmax_block(sum_skip_connections, apply_softmax=False) ## compute cross entroy loss = wavenet.cross_entropy(output, target_batch) ## update weights wavenet.backprop(loss) # logging sum_loss += float(loss.data) total_updates += 1 if batch_index % 10 == 0: sys.stdout.write("\r {} - {} width; {}/{}".format( stdout.BOLD + filename + stdout.END, signals.size, batch_index, repeat)) sys.stdout.flush() wavenet.save(args.model_dir) return sum_loss