idxStart = bi * batch_size inputData_np = test_X_std[idxStart:(idxStart + batch_size), :, :] inputData_np_ori = test_X[idxStart:(idxStart + batch_size), :, :] # outputData_np = test_Y_std[idxStart:(idxStart+batch_size),:,:] # outputData_np_ori = test_Y[idxStart:(idxStart+batch_size),:,:] inputData = Variable( torch.from_numpy(inputData_np)).cuda() #(batch, 73, frameNum) # outputGT = Variable(torch.from_numpy(outputData_np)).cuda() #(batch, 73, frameNum) #outputGT = Variable(torch.from_numpy(inputData_np)).cuda() #(batch, 73, frameNum) # ===================forward===================== if "vae" in model.__class__.__name__: output, mu, logvar = model(inputData) loss, recon_loss, kld_loss = modelZoo.vae_loss_function( output, inputData, mu, logvar, criterion, args.weight_kld) else: output = model(inputData) #loss = criterion(output, outputGT) loss = criterion(output, inputData) print('loss: {}'.format(loss)) #De-standardaize output_np = output.data.cpu().numpy() #(batch, featureDim, frames) output_np = output_np * Xstd + Xmean output_np = np.swapaxes(output_np, 1, 2) #(batch, frames, featureDim) output_np = np.reshape(output_np, (-1, featureDim)) output_np = np.swapaxes(output_np, 0, 1) #(featureDim, frames) inputData_np_ori = np.swapaxes(inputData_np_ori, 1,
inputData = Variable( torch.from_numpy(inputData_np)).cuda() #(batch, 73, frameNum) inputData_speech_cuda = Variable( torch.from_numpy(inputdata_speech[:, :, 0])).cuda() #outputGT = Variable(torch.from_numpy(outputData_np)).cuda() #(batch, 73, frameNum) #outputGT = Variable(torch.from_numpy(inputData_np)).cuda() #(batch, 73, frameNum) #################### VAE Only #################### # ===================forward===================== output, mu, logvar = model(inputData, inputData_speech_cuda) #loss = criterion(output, inputData) #loss = modelZoo.vae_loss_function(output, inputData, mu, logvar,criterion) #loss, recon_loss, kld_loss = modelZoo.vae_loss_function(output, inputData, mu, logvar,criterion,args.weight_kld) loss, recon_loss, kld_loss = modelZoo.vae_loss_function( output, inputData[:, :-1, :], mu, logvar, criterion, args.weight_kld) #ignore label in the inputData # ===================backward==================== optimizer.zero_grad() loss.backward() optimizer.step() # ===================log======================== # print('model: {}, epoch [{}/{}], loss:{:.4f} (recon: {:.4f}, kld {:.4f})' # .format(checkpointFolder_base, epoch +pretrain_epoch, num_epochs, loss.item(), recon_loss.item(), kld_loss.item())) avgLoss += loss.item() * batch_size avgReconLoss += recon_loss.item() * batch_size avgKLDLoss += kld_loss.item() * batch_size if tensorboard_bLog: