def main(): # Split the dataset train_dataset = sunnerData.ImageDataset( root=[['/home/sunner/Music/waiting_for_you_dataset/wait'], ['/home/sunner/Music/waiting_for_you_dataset/real_world']], transform=None, split_ratio=0.1, save_file=True) del train_dataset test_dataset = sunnerData.ImageDataset( file_name='.split.pkl', transform=transforms.Compose([ sunnertransforms.Resize((160, 320)), sunnertransforms.ToTensor(), sunnertransforms.Transpose(sunnertransforms.BHWC2BCHW), sunnertransforms.Normalize(), ])) # Create the data loader loader = sunnerData.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=2) # Use upper wrapper to assign particular iteration loader = sunnerData.IterationLoader(loader, max_iter=1) # Show! for batch_img, _ in loader: batch_img = sunnertransforms.asImg(batch_img, size=(160, 320)) cv2.imshow('show_window', batch_img[0][:, :, ::-1]) cv2.waitKey(0)
def main(): # Create the fundemental data loader loader = sunnerData.DataLoader(sunnerData.ImageDataset( root=[['/home/sunner/Music/waiting_for_you_dataset/wait'], ['/home/sunner/Music/waiting_for_you_dataset/real_world']], transforms=transforms.Compose([ sunnertransforms.Resize((160, 320)), sunnertransforms.ToTensor(), sunnertransforms.ToFloat(), sunnertransforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ])), batch_size=32, shuffle=False, num_workers=2) # Use upper wrapper to assign particular iteration loader = sunnerData.IterationLoader(loader, max_iter=1) # Show! for batch_tensor, _ in loader: batch_img = sunnertransforms.asImg(batch_tensor, size=(160, 320)) cv2.imshow('show_window', batch_img[0][:, :, ::-1]) cv2.waitKey(0) # Or show multiple image in one line sunnertransforms.show(batch_tensor[:10], row=2, column=5)
def train(args): """ This function define the training process Arg: args (napmespace) - The arguments """ # Create the data loader loader = sunnerData.DataLoader( dataset=sunnerData.ImageDataset( root=[[args.train]], transforms=transforms.Compose([ # transforms.RandomCrop(720,720) # transforms.RandomRotation(45) # transforms.RandomHorizontalFlip(), # transforms.ColorJitter(brightness=0.5, contrast=0.5), sunnerTransforms.Resize(output_size=(args.H, args.W)), #transforms.RandomCrop(512,512) sunnerTransforms.ToTensor(), sunnerTransforms.ToFloat(), # sunnerTransforms.Transpose(), sunnerTransforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ])), batch_size=args.batch_size, shuffle=True, num_workers=2) loader = sunnerData.IterationLoader(loader, max_iter=args.n_iter) # Create the model model = GANomaly2D(r=args.r, device=args.device) model.IO(args.resume, direction='load') model.train() # Train! bar = tqdm(loader) for i, (normal_img, ) in enumerate(bar): model.forward(normal_img) model.backward() loss_G, loss_D = model.getLoss() bar.set_description("Loss_G: " + str(loss_G) + " loss_D: " + str(loss_D)) bar.refresh() if i % args.record_iter == 0: model.eval() with torch.no_grad(): z, z_ = model.forward(normal_img) img, img_ = model.getImg() visualizeEncoderDecoder(img, img_, z, z_, i) model.train() model.IO(args.det, direction='save') model.IO(args.det, direction='save')
def main(): # Create the fundemental data loader loader = sunnerData.DataLoader(sunnerData.ImageDataset( root=[['/home/sunner/Music/waiting_for_you_dataset/wait'], ['/home/sunner/Music/waiting_for_you_dataset/real_world']], transform=transforms.Compose([ sunnertransforms.Resize((160, 320)), sunnertransforms.ToTensor(), sunnertransforms.Transpose(sunnertransforms.BHWC2BCHW), sunnertransforms.Normalize(), ])), batch_size=32, shuffle=False, num_workers=2) # Use upper wrapper to assign particular iteration loader = sunnerData.IterationLoader(loader, max_iter=1) # Show! for batch_img, _ in loader: batch_img = sunnertransforms.asImg(batch_img, size=(160, 320)) cv2.imshow('show_window', batch_img[0][:, :, ::-1]) cv2.waitKey(0)