def main(opt): device = 'cuda' if torch.cuda.is_available() else 'cpu' # Dataset print('Dataset....') transform = transforms.Compose([ transforms.Resize((600, 600)), transforms.Grayscale(3), transforms.ToTensor() ]) train_set = myDataset(image_path=opt.train_path, transform=transform) val_set = myDataset(image_path=opt.val_path, transform=transform) train_loader = DataLoader(train_set, batch_size=opt.train_batch_size) val_loader = DataLoader(val_set, batch_size=opt.val_batch_size) # Model print('Model....') model = AutoEncoder() model.to(device) # Optimizer optimizer = optim.Adam(model.parameters(), lr=opt.lr) loss_func = nn.MSELoss() # Train print('Training....') train_epoch_loss = [] val_epoch_loss = [] train_iter_losses = [] val_iter_losses = [] for e in range(opt.epoch): train_iter_loss = train(opt, model, train_loader, optimizer, loss_func, device, e) train_iter_losses += train_iter_loss train_epoch_loss.append(sum(train_iter_loss)) val_iter_loss = val(opt, model, val_loader, loss_func, device, e) val_iter_losses += val_iter_loss val_epoch_loss.append(sum(val_iter_loss)) # save model best = 10000 if val_epoch_loss[-1] < best: print('Saving Model....') torch.save(model, 'weights/AutoEncoder_try1.pth') best = val_epoch_loss[-1] print('Saving Result') plt.figure(figsize=(10, 10)) plt.plot(train_iter_losses) plt.plot(val_iter_losses) plt.legend(['Train_loss', 'Val_loss']) plt.savefig('Result.jpg')
def train(dataloader, parameters, device): model = AutoEncoder(input_dim=1900, nlayers=parameters.get('nlayers', 5), latent=100) model = model.to(device) model.train() train_loss = 0 optimizer = torch.optim.Adam(model.parameters(), lr=parameters.get('lr', 1e-5), weight_decay=parameters.get( 'weight_decay', 0.)) loss_func = torch.nn.MSELoss() for epoch in range(parameters.get('epochs', 1000)): for index, (data, ) in enumerate(dataloader, 1): optimizer.zero_grad() output = model(data) loss = loss_func(output, data) train_loss += loss.item() loss.backward() optimizer.step() return model
def __init__(self, args): self.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') self.id = args.id self.dataset_path = args.dataset model = AutoEncoder(args.E, args.D) if args.load: model.load_state_dict(torch.load(args.load)) self.model = model.to(self.device) self.optimizer = optim.Adam(self.model.parameters(), lr=args.lr) self.metric = nn.MSELoss() self.epoch_num = args.epoch self.batch_size = args.bs self.cluster_num = args.cluster_num self.save = args.save self.testcase = args.testcase self.csv = args.csv self.record_file = None self.best = {'epoch': 0, 'loss': 999} self.test_images = get_test_image(args.dataset)
DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device( 'cpu') cd_loss = ChamferDistance() test_dataset = ShapeNet(partial_path=args.partial_root, gt_path=args.gt_root, split='test') test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers) network = AutoEncoder() network.load_state_dict(torch.load('log/lowest_loss.pth')) network.to(DEVICE) # testing: evaluate the mean cd loss network.eval() with torch.no_grad(): total_loss, iter_count = 0, 0 for i, data in enumerate(test_dataloader, 1): partial_input, coarse_gt, dense_gt = data partial_input = partial_input.to(DEVICE) coarse_gt = coarse_gt.to(DEVICE) dense_gt = dense_gt.to(DEVICE) partial_input = partial_input.permute(0, 2, 1) v, y_coarse, y_detail = network(partial_input)
def train(args): print('Start') if torch.cuda.is_available(): device = 'cuda' torch.set_default_tensor_type('torch.cuda.FloatTensor') else: device = 'cpu' train_epoch = args.train_epoch lr = args.lr beta1 = args.beta1 beta2 = args.beta2 batch_size = args.batch_size noise_var = args.noise_var h_dim = args.h_dim images_path = glob.glob(args.data_dir+'/face_images/*/*.png') random.shuffle(images_path) split_num = int(len(images_path)*0.8) train_path = images_path[:split_num] test_path = images_path[split_num:] result_path = images_path[-15:] train_dataset = MyDataset(train_path) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_dataset = MyDataset(test_path) test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True) result_dataset = MyDataset(result_path) result_dataloader = torch.utils.data.DataLoader(result_dataset, batch_size=result_dataset.__len__(), shuffle=False) result_images = next(iter(result_dataloader)) model = AutoEncoder(h_dim=h_dim).to(device) criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr, (beta1, beta2)) out_path = args.model_dir train_loss_list = [] test_loss_list = [] for epoch in range(train_epoch): model.to(device) loss_train = 0 for x in train_dataloader: noised_x = add_noise(x, noise_var) recon_x = model(noised_x) loss = criterion(recon_x, x) optimizer.zero_grad() loss.backward() optimizer.step() loss_train += loss.item() loss_train /= train_dataloader.__len__() train_loss_list.append(loss_train) if epoch % 1 == 0: with torch.no_grad(): model.eval() loss_test = 0 for x_test in test_dataloader: recon_x_test = model(x_test) loss_test += criterion(recon_x_test, x_test).item() loss_test /= test_dataloader.__len__() test_loss_list.append(loss_test) np.save(os.path.join(out_path, 'train_loss.npy'), np.array(train_loss_list)) np.save(os.path.join(out_path, 'test_loss.npy'), np.array(test_loss_list)) model.train()
class Trainer(object): def __init__(self, train_loader, test_loader, config): self.train_loader = train_loader self.test_loader = test_loader self.config = config self.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") self.num_epochs = config.num_epochs self.lr = config.lr self.in_channel = config.in_channel self.image_size = config.image_size self.hidden_dim = config.hidden_dim self.output_dim = config.output_dim self.log_interval = config.log_interval self.sample_interval = config.sample_interval self.ckpt_interval = config.ckpt_interval self.sample_folder = config.sample_folder self.ckpt_folder = config.ckpt_folder self.build_net() self.vis = Visualizer() def build_net(self): # define network self.net = AutoEncoder(self.in_channel, self.image_size, self.hidden_dim, self.output_dim) if self.config.mode == 'test' and self.config.training_path == '': print("[*] Enter model path!") exit() # if training model exists if self.config.training_path != '': self.net.load_state_dict( torch.load(self.config.training_path, map_location=lambda storage, loc: storage)) print("[*] Load weight from {}!".format(self.config.training_path)) self.net.to(self.device) # add noise to image def add_noise(self, imgs): noise = torch.randn(imgs.size()) * 0.4 noisy_imgs = noise + imgs return noisy_imgs def train(self): # define loss function bce_criterion = nn.BCELoss().to(self.device) mse_criterion = nn.MSELoss().to(self.device) # define optimizer optimizer = Adam(self.net.parameters(), self.lr) step = 0 print("[*] Learning started!") # get fixed sample temp_iter = iter(self.train_loader) fixed_imgs, _ = next(temp_iter) fixed_imgs = fixed_imgs.to(self.device) # save fixed sample image x_path = os.path.join(self.sample_folder, 'fixed_input.png') save_image(fixed_imgs, x_path, normalize=True) print("[*] Save fixed input image!") # make fixed noisy sample and save fixed_noisy_imgs = self.add_noise(fixed_imgs) noisy_x_path = os.path.join(self.sample_folder, 'fixed_noisy_input.png') save_image(fixed_noisy_imgs, noisy_x_path, normalize=True) print("[*] Save fixed noisy input image!") # flatten data tensors fixed_imgs = fixed_imgs.view(fixed_imgs.size(0), -1) fixed_noisy_imgs = fixed_noisy_imgs.view(fixed_imgs.size(0), -1) for epoch in range(self.num_epochs): for i, (imgs, _) in enumerate(self.train_loader): self.net.train() imgs = imgs.view(imgs.size(0), -1) # original images noisy_imgs = self.add_noise(imgs) # add noise noisy_imgs = noisy_imgs.to(self.device) # forwarding outputs = self.net(noisy_imgs) # use noisy image as input bce_loss = bce_criterion(outputs, imgs) mse_loss = mse_criterion(outputs, imgs) # backwarding optimizer.zero_grad() bce_loss.backward() # backward BCE loss optimizer.step() # do logging if (step + 1) % self.log_interval == 0: print("[{}/{}] [{}/{}] BCE loss: {:3f}, MSE loss:{:3f}". format(epoch + 1, self.num_epochs, i + 1, len(self.train_loader), bce_loss.item() / len(imgs), mse_loss.item() / len(imgs))) self.vis.plot("BCE Loss plot", bce_loss.item() / len(imgs)) self.vis.plot("MSE Loss plot", mse_loss.item() / len(imgs)) # do sampling if (step + 1) % self.sample_interval == 0: outputs = self.net(fixed_noisy_imgs) x_hat = outputs.cpu().data.view(outputs.size(0), -1, self.image_size, self.image_size) x_hat_path = os.path.join( self.sample_folder, 'output_epoch{}.png'.format(epoch + 1)) save_image(x_hat, x_hat_path, normalize=True) print("[*] Save sample images!") step += 1 if (epoch + 1) % self.ckpt_interval == 0: ckpt_path = os.path.join(self.ckpt_folder, 'ckpt_epoch{}.pth'.format(epoch + 1)) torch.save(self.net.state_dict(), ckpt_path) print("[*] Checkpoint saved!") print("[*] Learning finished!") ckpt_path = os.path.join(self.ckpt_folder, 'final_model.pth') torch.save(self.net.state_dict(), ckpt_path) print("[*] Final weight saved!")