def __init__(self): self.log_dir = settings.log_dir self.model_dir = settings.model_dir self.ssim_loss = settings.ssim_loss ensure_dir(settings.log_dir) ensure_dir(settings.model_dir) ensure_dir('../log_test') logger.info('set log dir as %s' % settings.log_dir) logger.info('set model dir as %s' % settings.model_dir) if len(settings.device_id) > 1: self.net = nn.DataParallel(ODE_DerainNet()).cuda() else: torch.cuda.set_device(settings.device_id[0]) self.net = ODE_DerainNet().cuda() self.l1 = nn.L1Loss().cuda() self.mse = nn.MSELoss().cuda() self.ssim = SSIM().cuda() self.step = 0 self.save_steps = settings.save_steps self.num_workers = settings.num_workers self.batch_size = settings.batch_size self.writers = {} self.dataloaders = {} self.opt_net = Adam(self.net.parameters(), lr=settings.lr) self.sche_net = MultiStepLR(self.opt_net, milestones=[settings.l1, settings.l2], gamma=0.1)
def __init__(self): self.log_dir = settings.log_dir self.model_dir = settings.model_dir ensure_dir(settings.log_dir) ensure_dir(settings.model_dir) logger.info('set log dir as %s' % settings.log_dir) logger.info('set model dir as %s' % settings.model_dir) self.net = TFN().cuda() self.crit = L1Loss().cuda() self.ssim = SSIM().cuda() self.msssim = MSSSIM().cuda() self.step = 0 self.perceptual_weight = settings.perceptual_weight self.loss_weight = settings.loss_weight self.total_variation_weight = settings.total_variation_weight self.ssim_loss_weight = settings.ssim_loss_weight self.save_steps = settings.save_steps self.num_workers = settings.num_workers self.batch_size = settings.batch_size self.writers = {} self.dataloaders = {} self.opt = Adam(self.net.parameters(), lr=settings.lr) self.sche = MultiStepLR( self.opt, milestones=[11000, 70000, 90000, 110000, 130000], gamma=0.1)
def __init__(self): self.device = torch.device("cuda") self.log_dir = './logdir' self.model_dir = './model' ensure_dir(self.log_dir) ensure_dir(self.model_dir) self.log_name = 'train_derain' self.val_log_name = 'val_derain' logger.info('set log dir as %s' % self.log_dir) logger.info('set model dir as %s' % self.model_dir) self.test_data_path = 'testing/real_test_1000.txt' # test dataset txt file path self.train_data_path = 'training/real_world.txt' # train dataset txt file path self.multi_gpu = False torch.cuda.empty_cache() self.net = SPANet().to(self.device) print_network(self.net) self.l1 = nn.L1Loss().to(self.device) self.l2 = nn.MSELoss().to(self.device) self.ssim = SSIM().to(self.device) self.step = 0 self.save_steps = 400 self.num_workers = 1 self.batch_size = 2 self.writers = {} self.dataloaders = {} self.shuffle = True self.opt = Adam(self.net.parameters(), lr=5e-3) self.sche = MultiStepLR(self.opt, milestones=[500, 1500,2000,3000], gamma=0.1) self.step_time = 0
def __init__(self): self.log_dir = settings.log_dir self.model_dir = settings.model_dir ensure_dir(settings.log_dir) ensure_dir(settings.model_dir) logger.info('set log dir as %s' % settings.log_dir) logger.info('set model dir as %s' % settings.model_dir) # self.net = RESCAN().cuda() if len(settings.device_id) >1: self.net = nn.DataParallel(RESCAN()).cuda() else: self.net = RESCAN().cuda() self.crit = MSELoss().cuda() self.ssim = SSIM().cuda() self.step = 0 self.save_steps = settings.save_steps self.num_workers = settings.num_workers self.batch_size = settings.batch_size self.writers = {} self.dataloaders = {} self.opt = Adam(self.net.parameters(), lr=settings.lr) self.sche = MultiStepLR(self.opt, milestones=[240000, 320000], gamma=0.1)
def __init__(self): self.show_dir = settings.show_dir self.model_dir = settings.model_dir ensure_dir(settings.show_dir) ensure_dir(settings.model_dir) logger.info('set show dir as %s' % settings.show_dir) logger.info('set model dir as %s' % settings.model_dir) if len(settings.device_id) > 1: self.net = nn.DataParallel(Net()).cuda() else: self.net = Net().cuda() self.ssim = SSIM().cuda() self.dataloaders = {} self.ssim = SSIM().cuda() self.a = 0 self.t = 0
def __init__(self): self.show_dir = settings.show_dir self.model_dir = settings.model_dir ensure_dir(settings.show_dir) ensure_dir(settings.model_dir) logger.info('set show dir as %s' % settings.show_dir) logger.info('set model dir as %s' % settings.model_dir) self.net = RESCAN().cuda() self.dataloaders = {} self.ssim=SSIM().cuda()
def __init__(self): self.log_dir = settings.log_dir self.model_dir = settings.model_dir ensure_dir(settings.log_dir) ensure_dir(settings.model_dir) logger.info('set log dir as %s' % settings.log_dir) logger.info('set model dir as %s' % settings.model_dir) self.net = RESCAN().cuda() self.crit = MSELoss().cuda() self.ssim = SSIM().cuda() self.dataloaders = {}
def __init__(self): self.show_dir = settings.show_dir self.model_dir = settings.model_dir ensure_dir(settings.show_dir) ensure_dir(settings.model_dir) logger.info('set show dir as %s' % settings.show_dir) logger.info('set model dir as %s' % settings.model_dir) if len(settings.device_id) > 1: self.net = nn.DataParallel(ODE_DerainNet()).cuda() #self.l2 = nn.DataParallel(MSELoss(),settings.device_id) #self.l1 = nn.DataParallel(nn.L1Loss(),settings.device_id) #self.ssim = nn.DataParallel(SSIM(),settings.device_id) #self.vgg = nn.DataParallel(VGG(),settings.device_id) else: torch.cuda.set_device(settings.device_id[0]) self.net = ODE_DerainNet().cuda() self.ssim = SSIM().cuda() self.dataloaders = {} self.ssim = SSIM().cuda() self.a = 0 self.t = 0
def __init__(self): self.log_dir = settings.log_dir self.model_dir = settings.model_dir ensure_dir(settings.log_dir) ensure_dir(settings.model_dir) logger.info('set log dir as %s' % settings.log_dir) logger.info('set model dir as %s' % settings.model_dir) if len(settings.device_id) >1: self.net = nn.DataParallel(Net()).cuda() else: self.net = Net().cuda() self.l2 = MSELoss().cuda() self.l1 = nn.L1Loss().cuda() self.ssim = SSIM().cuda() self.dataloaders = {}
def __init__(self): self.log_dir = settings.log_dir self.model_dir = settings.model_dir ensure_dir(settings.log_dir) ensure_dir(settings.model_dir) ensure_dir('../log_test') logger.info('set log dir as %s' % settings.log_dir) logger.info('set model dir as %s' % settings.model_dir) if torch.cuda.is_available(): self.net = Net().cuda() self.dis_rain_img = Discriminator_rain_img().cuda() self.dis_img = Discriminator_img().cuda() if len(device_ids) > 1: self.net = nn.DataParallel(Net()).cuda() self.dis_rain_img = nn.DataParallel( Discriminator_rain_img()).cuda() self.dis_img = nn.DataParallel(Discriminator_img()).cuda() self.l2 = MSELoss().cuda() self.l1 = nn.L1Loss().cuda() self.ssim = SSIM().cuda() # self.vgg = VGG().cuda() self.bceloss = nn.BCELoss().cuda() self.step = 0 self.ssim_val = 0 self.psnr_val = 0 self.save_steps = settings.save_steps self.num_workers = settings.num_workers self.batch_size = settings.batch_size self.writers = {} self.dataloaders = {} self.opt_net = Adam(self.net.parameters(), lr=settings.lr) self.sche_net = MultiStepLR(self.opt_net, milestones=[settings.l1, settings.l2], gamma=0.1) self.opt_dis_rain_img = Adam(self.dis_rain_img.parameters(), lr=settings.lr) self.sche_dis_rain_img = MultiStepLR( self.opt_dis_rain_img, milestones=[settings.l1, settings.l2], gamma=0.1) self.opt_dis_img = Adam(self.dis_img.parameters(), lr=settings.lr) self.sche_dis_img = MultiStepLR(self.opt_dis_img, milestones=[settings.l1, settings.l2], gamma=0.1)
def __init__(self): self.log_dir = settings.log_dir self.model_dir = settings.model_dir ensure_dir(settings.log_dir) ensure_dir(settings.model_dir) logger.info('set log dir as %s' % settings.log_dir) logger.info('set model dir as %s' % settings.model_dir) self.net = DetailNet().cuda() self.crit = MSELoss().cuda() self.ssim = SSIM().cuda() self.step = 0 self.save_steps = settings.save_steps self.num_workers = settings.num_workers self.batch_size = settings.batch_size self.writers = {} self.dataloaders = {} self.opt = Adam(self.net.parameters(), lr=settings.lr) self.sche = MultiStepLR(self.opt, milestones=[15000, 17500], gamma=0.1)
def __init__(self, args): self.log_dir = args.log_dir self.model_dir = args.model_dir ensure_dir(self.log_dir) ensure_dir(self.model_dir) logger.info('set log dir as %s' % self.log_dir) logger.info('set model dir as %s' % self.model_dir) self.net = ERLNet(in_channels=3, out_channels=3).cuda() self.ssim = SSIM().cuda() self.image_size = args.image_size self.batch_size = args.batch_size self.num_workers = args.num_workers self.step = 0 self.epoch = args.epochs self.now_epoch = 0 self.start_epoch = 0 self.writers = {} self.total_step = 0 self.sessname = args.sessname
def __init__(self, args): self.log_dir = args.log_dir self.model_dir = args.model_dir ensure_dir(self.log_dir) ensure_dir(self.model_dir) logger.info('set log dir as %s' % self.log_dir) logger.info('set model dir as %s' % self.model_dir) self.net = detail_net().cuda() self.ssim = SSIM().cuda() self.image_size = args.image_size self.batch_size = args.batch_size self.num_workers = args.num_workers self.step = 0 self.epoch = args.epochs self.now_epoch = 0 self.start_epoch = 0 self.writers = {} self.total_step = 0 self.sessname = args.sessname if args.loss == "MSE": self.crit = MSELoss().cuda() elif args.loss == "L1Loss": self.crit = L1Loss().cuda() else: self.crit = Myloss().cuda() if args.opt == "SGD": self.opt = SGD(self.net.parameters(), lr=args.lr) else: self.opt = Adam(self.net.parameters(), lr=args.lr) self.sche = MultiStepLR(self.opt, milestones=[30, 60, 90, 120, 150, 180, 210], gamma=0.5)
def __init__(self): self.log_dir = settings.log_dir self.model_dir = settings.model_dir ensure_dir(settings.log_dir) ensure_dir(settings.model_dir) logger.info('set log dir as %s' % settings.log_dir) logger.info('set model dir as %s' % settings.model_dir) if torch.cuda.is_available(): self.net = Net().cuda() self.dis_rain_img = Discriminator_rain_img().cuda() self.dis_img = Discriminator_img().cuda() if len(device_ids) > 1: self.net = nn.DataParallel(Net()).cuda() self.dis_rain_img = nn.DataParallel( Discriminator_rain_img()).cuda() self.dis_img = nn.DataParallel(Discriminator_img()).cuda() self.opt_net = Adam(self.net.parameters(), lr=settings.lr) self.sche_net = MultiStepLR(self.opt_net, milestones=[settings.l1, settings.l2], gamma=0.1) self.opt_dis_rain_img = Adam(self.dis_rain_img.parameters(), lr=settings.lr) self.sche_dis_rain_img = MultiStepLR( self.opt_dis_rain_img, milestones=[settings.l1, settings.l2], gamma=0.1) self.opt_dis_img = Adam(self.dis_rain_img.parameters(), lr=settings.lr) self.sche_dis_img = MultiStepLR(self.opt_dis_rain_img, milestones=[settings.l1, settings.l2], gamma=0.1) self.l2 = MSELoss().cuda() self.l1 = nn.L1Loss().cuda() self.ssim = SSIM().cuda() self.vgg = VGG().cuda() self.dataloaders = {}
def main(): print('Loading dataset ...\n') dataset_train = Dataset(data_path='datasets_arlo/') loader_train = DataLoader(dataset=dataset_train) print('# of training samples :', int(len(loader_train))) # define some hyper-parameters recurr_iter = 4 use_GPU = True model_path = 'logs/real/latest.pth' num_epochs = 2 #model = PRN(recurr_iter, use_GPU) #model= Generator_lstm(recurr_iter, use_GPU) torch.cuda.empty_cache() device = torch.device("cuda") model = SPANet().to(device) print_network(model) model.load_state_dict(torch.load(model_path)) #loss L1 = nn.L1Loss() L2 = nn.MSELoss() #binary_cross_entropy = F.binary_cross_entropy criterion = SSIM() if use_GPU: model = model.cuda() L1.cuda() L2.cuda() criterion.cuda() #binary_cross_entropy.cuda() #optimizer: optimizer = optim.Adam(model.parameters(), lr=1e-4) scheduler = MultiStepLR(optimizer, milestones=[20, 40], gamma=0.1) #record training writer = SummaryWriter('logs/') step = 0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) for i, (input_train, target_train, streak_train) in enumerate(loader_train, 0): start = timeit.default_timer() input_train, target_train, streak_train = Variable( input_train, requires_grad=False), Variable(target_train, requires_grad=False), Variable( streak_train, requires_grad=False) if use_GPU: input_train, target_train, streak_train = input_train.cuda( ), target_train.cuda(), streak_train.cuda() optimizer.zero_grad() model.train() mask, out_train = model(input_train) #out_train = input_train - out_streak #out_streak= torch.clamp(out_streak[:,:,:,:], 0., 1.) l1 = L1(mask[:, 0, :, :], streak_train[:, 0, :, :]) l2 = L2(streak_train[:, 0, :, :], mask[:, 0, :, :]) ssim = criterion(target_train, out_train) pixel_metric = l1 + l2 + ( 1 - ssim ) #L2(streak_train[:,0,:,:],mask[:,0,:,:]) + L1(mask[:,0,:,:], streak_train[:,0,:,:])+ (1- criterion(target_train,out_train)) #L1(streak_train[:,0,:,:], out_streak[:,0,:,:]) + #loss= -pixel_metric loss = pixel_metric loss.backward() optimizer.step() model.eval() mask, out_train = model(input_train) stop = timeit.default_timer() print( "[epoch %d][%d/%d] loss: %.4f, l1 loss: %.4f, l2 loss: %.4f, ssim: %.4f, step time: %.2f" % (epoch + 1, i + 1, len(loader_train), loss.item(), l1.item(), l2.item(), ssim.item(), stop - start)) if step % 10 == 0: writer.add_scalar('loss', loss.item(), step) step += 1 model.eval() mask, out_train = model(input_train) im_target = utils.make_grid(target_train.data, nrow=8, normalize=True, scale_each=True) im_input = utils.make_grid(input_train.data, nrow=8, normalize=True, scale_each=True) out_target = utils.make_grid(out_train.data, nrow=8, normalize=True, scale_each=True) writer.add_image('clean image', im_target, epoch + 1) writer.add_image('rainy image', im_input, epoch + 1) writer.add_image('streak image', out_target, epoch + 1) torch.save(model.state_dict(), 'logs/real/latest.pth')