def train(): """ training """ model.train() epoch_loss, t0 = [], time.time() training_data_loader = DataLoader( training_set, batch_size=batch_size, num_workers=2, pin_memory=cuda, sampler=SubsetSampler(indices=sample_indecies)) for i_batch, (indexs, (data, targetY, targetX)) in enumerate(training_data_loader, 1): data, targetY, targetX = Variable(data), Variable(targetY), Variable(targetX) if cuda: data = data.cuda(async=True) targetY = targetY.cuda(async=True) targetX = targetX.cuda(async=True) optimizer.zero_grad() mask = model(data) # prediction loss = criterion(apply_mask(mask, targetY), targetX) epoch_loss.append(loss.data[0]) loss.backward() optimizer.step() print("===> Epoch {:2} {:4.1f}% Loss: {:.4e}".format( epoch, i_batch / batch_per_epoch * 100, loss.data[0])) # assume loss is emperical mean of the batch and i.i.d loss, loss_std, t = np.mean(epoch_loss), np.std(epoch_loss) * batch_size**.5, int(time.time() - t0) print("Epoch {} Complete: Avg. Loss: {:.4e} {:.4e} {}".format(epoch, loss, loss_std, int(t / 60))) print(epoch, loss, loss_std, t, sep=',', end=',', file=open(logpath, 'a'))
def clean_sample_(model, mask, Y_m, Y_a, length, save_path): Xh_m = apply_mask(mask, Y_m) y = model.inverse_transform(Xh_m, Y_a, new_length=length) save_audio_(y, save_path)