def eval_dir_func(true_dir, test_dir, mask_dir=None, metrics=MetricType.ALL): logger = get_logger(__name__) avg_scores = deepcopy(default_score_dict) flist = os.listdir(true_dir) file_num = len(flist) logger.info( 'Eval image num: {}\nTrue img: [ {} ]\nTest img: [ {} ]\nMask img: [ {} ]' .format(file_num, true_dir, test_dir, mask_dir)) pixel_types = ['all'] if mask_dir: pixel_types += ['shadow', 'shadow_free'] for fname in flist: ftrue = os.path.join(true_dir, fname) ftest = os.path.join(test_dir, fname) if mask_dir: fmask = os.path.join(mask_dir, fname) scores = eval_func(ftrue, ftest, fmask, metrics) else: scores = eval_func(ftrue, ftest, None, metrics) # if scores['rmse']['all'] < 3: visualize_result(true_dir, test_dir, mask_dir, fname, scores) # print(scores) for pixel_type in pixel_types: if metrics & MetricType.MSE: avg_scores['mse'][ pixel_type] += scores['mse'][pixel_type] / file_num if metrics & MetricType.RMSE: avg_scores['rmse'][ pixel_type] += scores['rmse'][pixel_type] / file_num logger.info(avg_scores) return avg_scores
def print_netowrk(net): logger = get_logger(__name__) num_params = 0 for param in net.parameters(): num_params += param.numel() logger.info(net) logger.info('Total number of parameters: {}'.format(num_params))
def __init__(self, args, path): self.logger = get_logger(__name__) self.mdl_name = 'STCGAN' self.epoch = args.epochs self.batch_size = args.batch_size self.gpu_mode = args.gpu_mode self.mdl_dir = path.mdl_dir self.train_hist = { 'G1_loss': [], 'G2_loss': [], 'D1_loss': [], 'D2_loss': [], } # data_loader # split data train_size, test_size = len(os.listdir(path.train_shadow_dir)), len(os.listdir(path.test_shadow_dir)) train_img_list, test_img_list = list(range(train_size)), list(range(test_size)) # TODO: add augmentation here training_augmentation = get_composed_augmentation() if args.valid_ratio: split_size = int((1 - args.valid_ratio) * total_size) train_img_list, valid_img_list = train_img_list[:split_size], train_img_list[split_size:] train_dataset = ShadowRemovalDataset(path, 'training', train_img_list, training_augmentation) valid_dataset = ShadowRemovalDataset(path, 'validation', valid_img_list) self.train_loader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=8) self.valid_loader = DataLoader(valid_dataset, batch_size=self.batch_size, shuffle=False, num_workers=8) else: train_dataset = ShadowRemovalDataset(path, 'training', train_img_list, training_augmentation) self.train_loader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=8) test_dataset = ShadowRemovalDataset(path, 'testing', test_img_list) self.test_loader = DataLoader(test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=8) # model self.G1 = STCGAN_G1() self.G2 = STCGAN_G2() self.D1 = STCGAN_D1() self.D2 = STCGAN_D2() self.G_opt = optim.Adam(list(self.G1.parameters()) + list(self.G2.parameters()), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_opt = optim.Adam(list(self.D1.parameters()) + list(self.D2.parameters()), lr=args.lrD, betas=(args.beta1, args.beta2)) if self.gpu_mode: self.G1.cuda() self.G2.cuda() self.D1.cuda() self.D2.cuda() self.l1_loss = nn.L1Loss().cuda() self.adversial_loss = nn.CrossEntropyLoss().cuda() else: self.l1_loss = nn.L1Loss() self.adversial_loss = nn.CrossEntropyLoss() self.logger.info('-' * 10 + ' Networks Architecture ' + '-' * 10) utils.print_netowrk(self.G1) utils.print_netowrk(self.G2) utils.print_netowrk(self.D1) utils.print_netowrk(self.D2) self.logger.info('-' * 43)
def __init__(self, path=None, data_type='training', img_list=None, transform=None): self.logger = get_logger(__name__) if not path: from pathHandler import PathHandler self.path = PathHandler() else: self.path = path self.data_type = data_type self.img_list = img_list self.transform = transform
fig.suptitle( 'Image: {}\n[ rmse score ] all: {:.3f}, shadow: {:.3f}, non-shadow: {:.3f}' .format(fname, scores['rmse']['all'], scores['rmse']['shadow'], scores['rmse']['shadow_free'])) for pos, title, filename, kwargs in zip(pos_list, title_list, filename_list, kwargs_list): ax = fig.add_subplot(pos) ax.set_title(title) plt.imshow(mpimg.imread(filename), **kwargs) plt.show() if __name__ == '__main__': log_file = os.path.join('log', os.path.basename(__file__) + '.log') set_logger(log_file) logger = get_logger(__name__) test_metrics = MetricType.MSE | MetricType.RMSE true_dir = os.path.join('processed_dataset', 'ISTD', 'test', 'non_shadow') mask_dir = os.path.join('processed_dataset', 'ISTD', 'test', 'mask') guo_dir = os.path.join('processed_dataset', 'ISTD', 'result', 'Guo') yang_dir = os.path.join('processed_dataset', 'ISTD', 'result', 'Yang') gong_dir = os.path.join('processed_dataset', 'ISTD', 'result', 'Gong') stcgan_dir = os.path.join('processed_dataset', 'ISTD', 'result', 'ST-CGAN') avg_eval_scores = eval_dir_func(true_dir, guo_dir, mask_dir, test_metrics) avg_eval_scores = eval_dir_func(true_dir, yang_dir, mask_dir, test_metrics) avg_eval_scores = eval_dir_func(true_dir, gong_dir, mask_dir, test_metrics) avg_eval_scores = eval_dir_func(true_dir, stcgan_dir, mask_dir, test_metrics)
def __init__(self, args, path): self.logger = get_logger(__name__) self.mdl_name = 'STCGAN' self.epoch = args.epochs self.batch_size = args.batch_size self.valid_ratio = args.valid_ratio self.lambda1 = args.lambda1 self.lambda2 = args.lambda2 self.lambda3 = args.lambda3 self.gpu_mode = args.gpu_mode # self.gpu_id = args.gpu_id self.path = path self.mdl_dir = path.mdl_dir self.train_hist = { 'G_loss': [], 'D_loss': [], 'G1_loss': [], 'G2_loss': [], 'D1_loss': [], 'D2_loss': [], } self.device = torch.device('cuda:{}'.format( args.gpu_id)) if args.gpu_id else torch.device('cpu') # data_loader # split data train_size, test_size = len(os.listdir(path.train_shadow_dir)), len( os.listdir(path.test_shadow_dir)) train_img_list, test_img_list = list(range(train_size)), list( range(test_size)) # TODO: add augmentation here training_augmentation = get_composed_augmentation() if self.valid_ratio: split_size = int((1 - args.valid_ratio) * train_size) train_img_list, valid_img_list = train_img_list[: split_size], train_img_list[ split_size:] train_dataset = ShadowRemovalDataset(path, 'training', train_img_list, training_augmentation) valid_dataset = ShadowRemovalDataset(path, 'validation', valid_img_list) self.train_loader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=8) self.valid_loader = DataLoader(valid_dataset, batch_size=2, shuffle=False, num_workers=8) self.logger.info('Training size: {} Validation size: {}'.format( split_size, train_size - split_size)) else: train_dataset = ShadowRemovalDataset(path, 'training', train_img_list, training_augmentation) self.train_loader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=8) test_dataset = ShadowRemovalDataset(path, 'testing', test_img_list) self.test_loader = DataLoader(test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=8) # model self.G1 = networks.define_G(input_nc=3, output_nc=1, ngf=64, netG='unet_256', gpu_ids=[args.gpu_id]) self.G2 = networks.define_G(input_nc=4, output_nc=3, ngf=64, netG='unet_256', gpu_ids=[args.gpu_id]) self.D1 = networks.define_D(input_nc=3 + 1, ndf=64, netD='pixel', use_sigmoid=True, gpu_ids=[args.gpu_id]) self.D2 = networks.define_D(input_nc=3 + 3 + 1, ndf=64, netD='pixel', use_sigmoid=True, gpu_ids=[args.gpu_id]) self.G_opt = optim.Adam(list(self.G1.parameters()) + list(self.G2.parameters()), lr=args.lrG, betas=(args.beta1, args.beta2)) self.D_opt = optim.Adam(list(self.D1.parameters()) + list(self.D2.parameters()), lr=args.lrD, betas=(args.beta1, args.beta2)) self.l1_loss = nn.L1Loss() self.mse_loss = nn.MSELoss() # for validation # self.bce_loss = nn.BCELoss() self.gan_loss = networks.GANLoss(use_lsgan=False).to(self.device)