def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() sample = get_tracer(idx) if self.transform: sample = self.transform(sample) return sample
errG.backward() optimizerG.step() #Display losses if i_batch % 50 == 0: print("Training Losses") print("Epoch: ", epoch, " | i: ", i_batch) print("Discriminator Loss: ", errD.item()) print("Generator Loss: ", errG.item()) print("Gen BCE Loss:", errBCE.item()) print("Gen MSE Loss:", alpha * errMSE.item()) ## Pass through networks and calculate losses errG_val_mse = 0 for i in val_ints: val_tracer = get_tracer(i) val_tracer = torch.from_numpy(val_tracer).unsqueeze(1).to( device=device, dtype=torch.float) val_tracer_incr = get_tracer(i + 1) val_tracer_incr = torch.from_numpy(val_tracer_incr).unsqueeze(1).to( device=device, dtype=torch.float) # Pass through Encoder + Generator val_outputEnc = netEnc(val_tracer).detach() val_outputG = netG(val_outputEnc).detach() errG_val_mse += mse_loss(val_outputG, val_tracer_incr) ## Display Validation losses errG_val_mse /= len(val_ints) errG_val_mse *= alpha
errAE = mse_loss(output, data) errAE.backward() optimizerEnc.step() optimizerDec.step() if i_batch % 50 == 0: print("Test Loss:") print("Epoch: ", epoch, " | i: ", i_batch) print("AutoEncoder Loss: ", errAE.item()) # Get error for Validation set ## Pass through networks and calculate losses errAE_Val = 0 for i in val_ints: val_tracer = get_tracer(i) val_tracer = torch.from_numpy(val_tracer).unsqueeze(1).to( device=device, dtype=torch.float) # Pass through Encoder val_output = netEnc(val_tracer).detach() val_output = netDec(val_output).detach() errAE_Val += mse_loss(val_output, val_tracer) errAE_Val /= len(val_ints) # Storing losses per epoch to plot epoch_list.append(epoch) loss_list.append(errAE.item()) val_loss_list.append(errAE_Val.item())
torch.cuda.is_available() and ngpu > 0) else "cpu") ################# # Instantiating # ################# netEnc = Encoder(ngpu).to(device) netG = Generator(ngpu).to(device) checkpoint = torch.load(filePathToModelAE) netEnc.load_state_dict(checkpoint['netEnc_state_dict']) checkpoint = torch.load(filePathToModelGAN) netG.load_state_dict(checkpoint['netG_state_dict']) mse_loss = nn.MSELoss() tracer_dataset = TracerDataset(transform=ToTensor()) batch_indicies = [] for i in range(3729): batch_indicies.append(i) for i in batch_indicies: data = get_tracer(i) data = torch.from_numpy(data).unsqueeze(0).to(device=device, dtype=torch.float) output = denormalise(netG(netEnc(data)), x_min, x_max) output = np.array(output.squeeze().cpu().detach()) create_tracer_VTU_GAN(i, output, "tGAN") print(i)