# --- Load training data and validation/test data --- # labeled_name = 'DIDMDN.txt' unlabeled_name = 'JORDER_200L.txt' val_filename = 'JORDER_200L.txt' # --- Load training data and validation/test data --- # unlbl_train_data_loader = DataLoader(TrainData(crop_size, train_data_dir, unlabeled_name), batch_size=train_batch_size, shuffle=True, num_workers=8) lbl_train_data_loader = DataLoader(TrainData(crop_size, train_data_dir, labeled_name), batch_size=train_batch_size, shuffle=True, num_workers=8) val_data_loader = DataLoader(ValData(val_data_dir, val_filename), batch_size=val_batch_size, shuffle=False, num_workers=8) num_labeled = train_batch_size * len( lbl_train_data_loader) # number of labeled images num_unlabeled = train_batch_size * len( unlbl_train_data_loader) # number of unlabeled images # --- Previous PSNR and SSIM in testing --- # net.eval() old_val_psnr, old_val_ssim = validation(net, val_data_loader, device, category, exp_name) print('old_val_psnr: {0:.2f}, old_val_ssim: {1:.4f}'.format( old_val_psnr, old_val_ssim))
def main(): global opt, name, logger, model, criterion_L1,criterion_mse,model_second,best_psnr,loss_network global edge_loss opt = parser.parse_args() print(opt) import random opt.best_psnr = 0 # Tag_ResidualBlocks_BatchSize name = "%s_%d" % (opt.tag, opt.batchSize) logger = SummaryWriter("runs/" + name) cuda = opt.cuda if cuda and not torch.cuda.is_available(): raise Exception("No GPU found, please run without --cuda") opt.seed = random.randint(1, 10000) print("Random Seed: ", opt.seed) opt.seed_python = random.randint(1, 10000) random.seed(opt.seed_python) print("Random Seed_python: ", opt.seed_python) torch.manual_seed(opt.seed) if cuda: torch.cuda.manual_seed(opt.seed) cudnn.benchmark = True print("==========> Loading datasets") train_data_dir = opt.train val_data_dir = opt.test # --- Load training data and validation/test data --- # training_data_loader = DataLoader(TrainData([240, 240], train_data_dir), batch_size=opt.batchSize, shuffle=True, num_workers=12) indoor_test_loader = DataLoader(ValData(val_data_dir), batch_size=1, shuffle=False, num_workers=12) print("==========> Building model") model = final_Net() criterion_mse = nn.MSELoss(size_average=True) criterion_L1 = nn.L1Loss(size_average=True) print(model) if opt.resume: if os.path.isfile(opt.resume): print("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) opt.start_epoch = checkpoint["epoch"]+1 model.load_state_dict(checkpoint["state_dict"]) else: print("=> no checkpoint found at '{}'".format(opt.resume)) # --- Set the GPU --- # print("==========> Setting GPU") if cuda: model = nn.DataParallel(model, device_ids=[i for i in range(opt.gpus)]).cuda() criterion_L1 = criterion_L1.cuda() criterion_mse = criterion_mse.cuda() # --- Calculate all trainable parameters in network --- # pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print("Total_params: {}".format(pytorch_total_params)) print("==========> Setting Optimizer") # --- Build optimizer --- # optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr) print("==========> Training") for epoch in range(opt.start_epoch, opt.nEpochs + 1): adjust_learning_rate_second(optimizer, epoch-1) train(training_data_loader, indoor_test_loader,optimizer, epoch) save_checkpoint(model, epoch, name) test(indoor_test_loader, epoch)
# --- Load the network weight --- # try: net.load_state_dict(torch.load('{}_haze_best_{}_{}'.format(category, network_height, network_width))) print('--- weight loaded ---') except: print('--- no weight loaded ---') # --- Calculate all trainable parameters in network --- # pytorch_total_params = sum(p.numel() for p in net.parameters() if p.requires_grad) print("Total_params: {}".format(pytorch_total_params)) # --- Load training data and validation/test data --- # train_data_loader = DataLoader(TrainData(crop_size, train_data_dir), batch_size=train_batch_size, shuffle=True, num_workers=24) val_data_loader = DataLoader(ValData(val_data_dir), batch_size=val_batch_size, shuffle=False, num_workers=24) # --- Previous PSNR and SSIM in testing --- # old_val_psnr, old_val_ssim = validation(net, val_data_loader, device, category) print('old_val_psnr: {0:.2f}, old_val_ssim: {1:.4f}'.format(old_val_psnr, old_val_ssim)) for epoch in range(num_epochs): psnr_list = [] start_time = time.time() adjust_learning_rate(optimizer, epoch, category=category) for batch_id, train_data in enumerate(train_data_loader): haze, gt = train_data haze = haze.to(device)
# --- Set category-specific hyper-parameters --- # if category == 'indoor': val_data_dir = './data/test/SOTS/indoor/' elif category == 'outdoor': val_data_dir = './data/test/SOTS/outdoor/' else: raise Exception( 'Wrong image category. Set it to indoor or outdoor for RESIDE dateset.' ) # --- Gpu device --- # device_ids = [Id for Id in range(torch.cuda.device_count())] device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # --- Validation data loader --- # val_data_loader = DataLoader(ValData(val_data_dir), batch_size=val_batch_size, shuffle=False, num_workers=24) # --- Define the network --- # net = GridDehazeNet(height=network_height, width=network_width, num_dense_layer=num_dense_layer, growth_rate=growth_rate) # --- Multi-GPU --- # net = net.to(device) net = nn.DataParallel(net, device_ids=device_ids) # --- Load the network weight --- #