def loading_data(): mean_std = cfg_data.MEAN_STD log_para = cfg_data.LOG_PARA train_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), ### own_transforms.RandomHorizontallyFlip() ]) val_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg_data.TRAIN_SIZE) ]) val_main_transform = None # comment this to validate on images cropped like during training instead of full images img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose([ own_transforms.LabelNormalize(log_para) ]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) train_set = Venice(cfg_data.DATA_PATH+'/train', 'train',main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform) train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=True) val_set = Venice(cfg_data.DATA_PATH+'/test', 'test', main_transform=val_main_transform, img_transform=img_transform, gt_transform=gt_transform) val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=False) return train_loader, val_loader, restore_transform
def loading_data(): mean_std = cfg_data.MEAN_STD log_para = cfg_data.LOG_PARA train_main_transform = own_transforms.Compose([ #own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), own_transforms.RandomHorizontallyFlip() ]) val_main_transform = own_transforms.Compose( [own_transforms.RandomCrop(cfg_data.TRAIN_SIZE)]) val_main_transform = None img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose( [own_transforms.LabelNormalize(log_para)]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) train_set = SHHB(cfg_data.DATA_PATH + '/train_data', 'train', main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform) train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=True) test_set = SHHB(cfg_data.DATA_PATH + '/test_data', 'test', main_transform=val_main_transform, img_transform=img_transform, gt_transform=gt_transform) test_loader = DataLoader(test_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=False) val_set = SHHB(cfg_data.DATA_PATH + '/val_data', 'test', main_transform=val_main_transform, img_transform=img_transform, gt_transform=gt_transform) val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=False) return train_loader, val_loader, test_loader, restore_transform
def loading_data(): mean_std = cfg_data.MEAN_STD log_para = cfg_data.LOG_PARA train_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), own_transforms.RandomHorizontallyFlip() ]) val_main_transform = own_transforms.Compose( [own_transforms.RandomCrop(cfg_data.TRAIN_SIZE)]) img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose( [own_transforms.LabelNormalize(log_para)]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) val_folder = cfg_data.VAL_INDEX train_folder = get_train_folder(val_folder) train_set = UCF50(cfg_data.DATA_PATH, train_folder, 'train', main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform) train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=0, collate_fn=SHHA_collate, shuffle=True, drop_last=True) val_set = UCF50(cfg_data.DATA_PATH, map(int, str(val_folder)), 'test', main_transform=val_main_transform, img_transform=img_transform, gt_transform=gt_transform) val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=0, collate_fn=SHHA_collate, shuffle=True, drop_last=False) return train_loader, val_loader, restore_transform
def loading_data(): mean_std = cfg_data.MEAN_STD log_para = cfg_data.LOG_PARA train_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), own_transforms.RandomHorizontallyFlip() ]) val_main_transform = own_transforms.Compose( [own_transforms.RandomCrop(cfg_data.TRAIN_SIZE)]) img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose( [own_transforms.LabelNormalize(log_para)]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) train_set = WE(cfg_data.DATA_PATH + '/train', 'train', main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform) train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=0, shuffle=True, drop_last=True) test_name = cfg_data.VAL_FOLDER val_loader = [] for subname in test_name: sub_set = WE(cfg_data.DATA_PATH + '/test/' + subname, 'test', main_transform=val_main_transform, img_transform=img_transform, gt_transform=gt_transform) val_loader.append( DataLoader(sub_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=0, shuffle=True, drop_last=True)) return train_loader, val_loader, restore_transform
def data_transforms(cfg_data): mean_std = cfg_data.MEAN_STD log_para = cfg_data.LOG_PARA # train and val main data transformations if cfg_data.DATASET == 'City': train_main_transform = own_transforms.Compose([ own_transforms.RandomHorizontallyFlip() ]) val_main_transform = None elif cfg_data.DATASET in ['FDST', 'PETS']: train_main_transform = standard_transforms.Compose([ own_transforms.FreeScale(cfg_data.TRAIN_SIZE), ]) val_main_transform = standard_transforms.Compose([ own_transforms.FreeScale(cfg_data.TRAIN_SIZE), ]) else: train_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), own_transforms.RandomHorizontallyFlip() ]) val_main_transform = None # image and gt transformations if cfg_data.DATASET == 'FDST': gt_transform = standard_transforms.Compose([ own_transforms.GTScaleDown(cfg_data.TRAIN_DOWNRATE), own_transforms.LabelNormalize(log_para) ]) else: gt_transform = standard_transforms.Compose([ own_transforms.LabelNormalize(log_para) ]) img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) return train_main_transform, val_main_transform, img_transform, gt_transform, restore_transform
def loading_data(): mean_std = cfg_data.MEAN_STD log_para = cfg_data.LOG_PARA factor = cfg_data.LABEL_FACTOR train_main_transform = own_transforms.Compose( [own_transforms.RandomHorizontallyFlip()]) img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose([ own_transforms.GTScaleDown(factor), own_transforms.LabelNormalize(log_para) ]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) train_set = SHHA(cfg_data.DATA_PATH + '/train_data', mode='train', preload=True, main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform) train_loader = None if cfg_data.TRAIN_BATCH_SIZE == 1: train_loader = DataLoader(train_set, batch_size=1, num_workers=0, collate_fn=SHHA_raw_collate, shuffle=True, drop_last=True) elif cfg_data.TRAIN_BATCH_SIZE > 1: train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=8, collate_fn=SHHA_crop_collate, shuffle=True, drop_last=True) val_set = SHHA(cfg_data.DATA_PATH + '/test_data', mode='test', preload=True, main_transform=None, img_transform=img_transform, gt_transform=gt_transform) val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=0, collate_fn=SHHA_raw_collate, shuffle=True, drop_last=False) return train_loader, val_loader, restore_transform
def loading_data(): mean_std = cfg_data.MEAN_STD log_para = cfg_data.LOG_PARA train_main_transform = own_transforms.Compose([ # own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), own_transforms.RandomHorizontallyFlip() ]) img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose( [own_transforms.LabelNormalize(log_para)]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) if cfg_data.VAL_MODE == 'rd': test_list = 'test_list.txt' train_list = 'train_list.txt' elif cfg_data.VAL_MODE == 'cc': test_list = 'cross_camera_test_list.txt' train_list = 'cross_camera_train_list.txt' elif cfg_data.VAL_MODE == 'cl': test_list = 'cross_location_test_list.txt' train_list = 'cross_location_train_list.txt' train_set = GCC(cfg_data.DATA_PATH + '/txt_list/' + train_list, 'train', main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform) train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=True) val_set = GCC(cfg_data.DATA_PATH + '/txt_list/' + test_list, 'test', main_transform=None, img_transform=img_transform, gt_transform=gt_transform) val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=False) return train_loader, val_loader, restore_transform
def createTrainData(datasetname, Dataset, cfg_data): folder, list_file = None, None if datasetname in ['SHHA', 'SHHB', 'QNRF', 'JHU', 'NWPU', 'FDST']: list_file = [] list_file.append({ 'data_path': cfg_data.DATA_PATH, 'imgId_txt': cfg_data.TRAIN_LST, 'box_gt_txt': [] }) else: print('dataset is not exist') main_transform = own_transforms.Compose([ own_transforms.ScaleByRateWithMin([0.8, 1.2], cfg_data.TRAIN_SIZE[1], cfg_data.TRAIN_SIZE[0]), own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), own_transforms.RandomHorizontallyFlip(), ]) img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*cfg_data.MEAN_STD) ]) mask_transform = standard_transforms.Compose( [standard_transforms.ToTensor()]) train_set = Dataset(datasetname, 'train', main_transform=main_transform, img_transform=img_transform, mask_transform=mask_transform, list_file=list_file) if datasetname in ['SHHA', 'SHHB', 'QNRF', 'JHU', 'NWPU']: return DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=6, shuffle=True, drop_last=True) elif datasetname in ['FDST']: train_sampler = RandomSampler(data_source=train_set, replacement=True, num_samples=1000) return DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, sampler=train_sampler, num_workers=6, drop_last=True) else: return 'error'
def loading_data(): mean_std = cfg_data.MEAN_STD log_para = cfg_data.LOG_PARA train_main_transform = own_transforms.Compose( [own_transforms.RandomHorizontallyFlip()]) img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose( [own_transforms.LabelNormalize(log_para)]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) train_set = QNRF(cfg_data.DATA_PATH + '/train', 'train', main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform, data_augment=1) train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=0, collate_fn=SHHA_collate_multibatch, shuffle=True, drop_last=True) val_set = QNRF(cfg_data.DATA_PATH + '/test', 'test', main_transform=None, img_transform=img_transform, gt_transform=gt_transform, data_augment=1) val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=0, collate_fn=SHHA_collate_onebatch, shuffle=True, drop_last=False) return train_loader, val_loader, restore_transform
def loading_data(): # shanghai Tech A mean_std = cfg.DATA.MEAN_STD log_para = cfg.DATA.LOG_PARA factor = cfg.DATA.LABEL_FACTOR train_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg.TRAIN.INPUT_SIZE), own_transforms.RandomHorizontallyFlip() ]) val_main_transform = None img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose([ own_transforms.GTScaleDown(factor), own_transforms.LabelNormalize(log_para) ]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) train_set = UCF_QNRF(cfg.DATA.DATA_PATH + '/train', 'train', main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform) train_loader = DataLoader(train_set, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=8, shuffle=True, drop_last=True) val_set = UCF_QNRF(cfg.DATA.DATA_PATH + '/test', 'test', main_transform=val_main_transform, img_transform=img_transform, gt_transform=gt_transform) val_loader = DataLoader(val_set, batch_size=cfg.VAL.BATCH_SIZE, num_workers=8, shuffle=True, drop_last=False) return train_set, train_loader, val_set, val_loader, restore_transform
def loading_data(): # shanghai Tech A mean_std = cfg.DATA.MEAN_STD train_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg.TRAIN.INPUT_SIZE), own_transforms.RandomHorizontallyFlip() ]) val_main_transform = None img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose( [standard_transforms.ToTensor()]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) train_set = SHT_B(cfg.DATA.DATA_PATH + '/train_data', main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform) train_loader = DataLoader(train_set, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=8, shuffle=True, drop_last=True) val_set = SHT_B(cfg.DATA.DATA_PATH + '/test_data', main_transform=val_main_transform, img_transform=img_transform, gt_transform=gt_transform) val_loader = DataLoader(val_set, batch_size=cfg.VAL.BATCH_SIZE, num_workers=8, shuffle=True, drop_last=True) return train_set, train_loader, val_set, val_loader, restore_transform
def createTrainData(datasetname, Dataset, cfg_data): cfg_data.DATA_PATH + '/train' folder, list_file = None, None if datasetname == 'GCC': train_list = common.gccvalmode2list(cfg_data.VAL_MODE, True) list_file = os.path.join(cfg_data.DATA_PATH, 'txt_list', train_list) train_path = cfg_data.DATA_PATH if datasetname == 'NWPU': list_file = os.path.join(cfg_data.DATA_PATH, 'txt_list/train.txt') train_path = cfg_data.DATA_PATH # pdb.set_trace() main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), own_transforms.RandomHorizontallyFlip() ]) img_transform = standard_transforms.Compose([ own_transforms.RGB2Gray(0.1), own_transforms.GammaCorrection([0.4,2]), standard_transforms.ToTensor(), standard_transforms.Normalize(*cfg_data.MEAN_STD) ]) dot_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), own_transforms.tensormul(255.0), own_transforms.LabelNormalize(cfg_data.LOG_PARA), ]) train_set = Dataset(train_path, datasetname, 'train', main_transform = main_transform, img_transform = img_transform, dot_transform = dot_transform, list_file = list_file, folder = folder ) return DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=0, shuffle=True, drop_last=True)
def get_data(train_shape_views_folder, test_shape_views_folder, train_shape_flist, test_shape_flist, train_sketch_folder, test_sketch_folder, train_sketch_flist, test_sketch_flist, height, width, batch_size, workers, pk_flag=False): normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transformer = T.Compose([ # T.RandomSizedRectCrop(height, width), T.RectScale(height, width), T.RandomHorizontalFlip(), T.ToTensor(), normalizer, ]) test_transformer = T.Compose([ T.RectScale(height, width), T.ToTensor(), normalizer, ]) # define sketch dataset extname = '.png' if 'image' not in train_sketch_flist else '.JPEG' sketch_train_data = sk_dataset.Sk_Dataset(train_sketch_folder, train_sketch_flist, transform=train_transformer, ext=extname) sketch_test_data = sk_dataset.Sk_Dataset(test_sketch_folder, test_sketch_flist, transform=test_transformer, ext=extname) # define shape views dataset shape_train_data = sh_views_dataset.Sh_Views_Dataset(train_shape_views_folder, train_shape_flist, transform=train_transformer) shape_test_data = sh_views_dataset.Sh_Views_Dataset(test_shape_views_folder, test_shape_flist, transform=test_transformer) # num_classes = sketch_train_data.num_classes if pk_flag: train_sketch_loader = DataLoader( sketch_train_data, batch_size=batch_size, num_workers=workers, # sampler=RandomIdentitySampler(sketch_train_data.imgs, num_instances), pin_memory=True, drop_last=True) train_shape_loader = DataLoader( shape_train_data, batch_size=batch_size, num_workers=workers, # sampler=RandomIdentitySampler(shape_train_data.imgs, num_instances), pin_memory=True, drop_last=True) else: train_sketch_loader = DataLoader( sketch_train_data, batch_size=batch_size*2, num_workers=workers, shuffle=True, pin_memory=True, drop_last=True) train_shape_loader = DataLoader( shape_train_data, batch_size=batch_size, num_workers=workers, shuffle=True, pin_memory=True, drop_last=True) test_sketch_loader = DataLoader( sketch_test_data, batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) test_shape_loader = DataLoader( shape_test_data, batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True) # sketch_weight = utils.get_weight(sketch_train_data.imgs) # shape_weight = utils.get_weight(shape_train_data.imgs) # cls_weight = sketch_weight / (train_sketch_loader.batch_size*1.0 / train_shape_loader.batch_size) + shape_weight # cls_weight = cls_weight / cls_weight.sum() * cls_weight.size # cls_weight = torch.Tensor(cls_weight) return train_sketch_loader, train_shape_loader, test_sketch_loader, test_shape_loader # , cls_weight
def loading_data(args): mean_std = cfg_data.MEAN_STD log_para = cfg_data.LOG_PARA sou_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), own_transforms.RandomHorizontallyFlip(), # Rand_Augment() ]) # converts a PIL Image(H*W*C) in the range[0,255] # to a torch.FloatTensor of shape (C*H*W) in the range[0.0, 1.0] img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose( [own_transforms.LabelNormalize(log_para)]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) if args.phase == 'DA_train' or args.phase == 'fine_tune': # Load meta-train set IFS_path = '/media/D/ht/C-3-Framework-trans/trans-display/GCC2SHHB/s2t' IFS_path = '/media/D/ht/C-3-Framework-trans/trans-display/GCC2QNRF/s2t' IFS_path = '/media/D/ht/C-3-Framework-trans/trans-display/GCC2WE/s2t' trainset = GCC('train', main_transform=sou_main_transform, img_transform=img_transform, gt_transform=gt_transform, filter_rule=cfg_data.FILTER_RULE, IFS_path=None) sou_loader = DataLoader(trainset, batch_size=cfg_data.sou_batch_size, shuffle=True, num_workers=12, drop_last=True, pin_memory=True) if args.target_dataset == 'QNRF': tar_main_transform = own_transforms.Compose( [own_transforms.RandomHorizontallyFlip()]) trainset = QNRF('train', main_transform=tar_main_transform, img_transform=img_transform, gt_transform=gt_transform) tar_shot_loader = DataLoader(trainset, batch_size=cfg_data.target_shot_size, shuffle=True, num_workers=12, collate_fn=SHHA_collate, drop_last=True) valset = QNRF('val', img_transform=img_transform, gt_transform=gt_transform) tar_val_loader = DataLoader(valset, batch_size=1, num_workers=8, pin_memory=True) testset = QNRF('test', img_transform=img_transform, gt_transform=gt_transform) tar_test_loader = DataLoader(testset, batch_size=1, num_workers=8, pin_memory=True) elif args.target_dataset == 'SHHA': tar_main_transform = own_transforms.Compose( [own_transforms.RandomHorizontallyFlip()]) trainset = SHHA('train', main_transform=tar_main_transform, img_transform=img_transform, gt_transform=gt_transform) tar_shot_loader = DataLoader(trainset, batch_size=cfg_data.target_shot_size, shuffle=True, num_workers=12, collate_fn=SHHA_collate, drop_last=True) valset = SHHA('val', img_transform=img_transform, gt_transform=gt_transform) tar_val_loader = DataLoader(valset, batch_size=1, num_workers=8, pin_memory=True) testset = SHHA('test', img_transform=img_transform, gt_transform=gt_transform) tar_test_loader = DataLoader(testset, batch_size=1, num_workers=8, pin_memory=True) elif args.target_dataset == 'MALL': tar_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg_data.MALL_TRAIN_SIZE), own_transforms.RandomHorizontallyFlip() ]) trainset = MALL('train', main_transform=tar_main_transform, img_transform=img_transform, gt_transform=gt_transform) tar_shot_loader = DataLoader(trainset, batch_size=cfg_data.target_shot_size, shuffle=True, num_workers=12, drop_last=True, pin_memory=True) valset = MALL('val', img_transform=img_transform, gt_transform=gt_transform) tar_val_loader = DataLoader(valset, batch_size=8, num_workers=8, pin_memory=True) testset = MALL('test', img_transform=img_transform, gt_transform=gt_transform) tar_test_loader = DataLoader(testset, batch_size=12, num_workers=8, pin_memory=True) elif args.target_dataset == 'UCSD': tar_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg_data.UCSD_TRAIN_SIZE), own_transforms.RandomHorizontallyFlip(), ]) trainset = UCSD('train', main_transform=tar_main_transform, img_transform=img_transform, gt_transform=gt_transform) tar_shot_loader = DataLoader(trainset, batch_size=cfg_data.target_shot_size, shuffle=True, num_workers=12, drop_last=True, pin_memory=True) valset = UCSD('val', img_transform=img_transform, gt_transform=gt_transform) tar_val_loader = DataLoader(valset, batch_size=8, num_workers=8, pin_memory=True) testset = UCSD('test', img_transform=img_transform, gt_transform=gt_transform) tar_test_loader = DataLoader(testset, batch_size=12, num_workers=8, pin_memory=True) elif args.target_dataset == 'SHHB': tar_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg_data.SHHB_TRAIN_SIZE), own_transforms.RandomHorizontallyFlip(), # Rand_Augment() ]) trainset = SHHB('train', main_transform=tar_main_transform, img_transform=img_transform, gt_transform=gt_transform) tar_shot_loader = DataLoader(trainset, batch_size=cfg_data.target_shot_size, shuffle=True, num_workers=8, drop_last=True, pin_memory=True) valset = SHHB('val', img_transform=img_transform, gt_transform=gt_transform) tar_val_loader = DataLoader(valset, batch_size=8, num_workers=8, pin_memory=True) testset = SHHB('test', img_transform=img_transform, gt_transform=gt_transform) tar_test_loader = DataLoader(testset, batch_size=8, num_workers=8, pin_memory=True) elif args.target_dataset == 'WE': tar_test_loader = [] tar_main_transform = own_transforms.Compose([ own_transforms.RandomCrop(cfg_data.WE_TRAIN_SIZE), own_transforms.RandomHorizontallyFlip(), # Rand_Augment() ]) trainset = WE(None, 'train', main_transform=tar_main_transform, img_transform=img_transform, gt_transform=gt_transform) tar_shot_loader = DataLoader(trainset, batch_size=cfg_data.target_shot_size, shuffle=True, num_workers=8, drop_last=True, pin_memory=True) valset = WE(None, 'val', main_transform=tar_main_transform, img_transform=img_transform, gt_transform=gt_transform) tar_val_loader = DataLoader(valset, batch_size=12, shuffle=False, num_workers=8, drop_last=False, pin_memory=True) for subname in cfg_data.WE_test_list: sub_set = WE(subname, 'test', img_transform=img_transform, gt_transform=gt_transform) tar_test_loader.append( DataLoader(sub_set, batch_size=12, num_workers=8, pin_memory=True)) else: print( "Please set the target dataset as one of them:SHHB, UCF50, QNRF, MALL, UCSD, SHHA" ) return sou_loader, tar_shot_loader, tar_val_loader, tar_test_loader, restore_transform if args.phase == 'pre_train': trainset = GCC('train', main_transform=sou_main_transform, img_transform=img_transform, gt_transform=gt_transform) train_loader = DataLoader(trainset, batch_size=args.pre_batch_size, shuffle=True, num_workers=8, drop_last=True, pin_memory=True) valset = GCC('val', img_transform=img_transform, gt_transform=gt_transform) val_loader = DataLoader(valset, batch_size=12, num_workers=8, pin_memory=True) return train_loader, val_loader, restore_transform
def loading_data(): mean_std = cfg_data.MEAN_STD log_para = cfg_data.LOG_PARA train_main_transform = own_transforms.Compose([ # own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), own_transforms.RandomHorizontallyFlip() ]) img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose( [own_transforms.LabelNormalize(log_para)]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) if cfg_data.VAL_MODE == 'normal': test_list = 'Train Test Splitting list/normal_training/NTU_test_correct.txt' train_list = 'Train Test Splitting list/normal_training/NTU_train_correct.txt' elif cfg_data.VAL_MODE == 'density': test_list = 'density_ab+ssc/NTU_density_test_ssc+ab.txt' train_list = 'density_ab+ssc/NTU_density_train_ssc+ab.txt' elif cfg_data.VAL_MODE == 'normal_ab_only': test_list = 'normal_ab_only/NTU_test_ab_only.txt' train_list = 'normal_ab_only/NTU_train_ab_only.txt' elif cfg_data.VAL_MODE == 'normal_ssc_only': test_list = 'normal_ssc_only/NTU_test_ssc_only.txt' train_list = 'normal_ssc_only/NTU_train_ssc_only.txt' elif cfg_data.VAL_MODE == 'density_ssc_only': test_list = 'density_ssc_only/NTU_density_test_ssc_only.txt' train_list = 'density_ssc_only/NTU_density_train_ssc_only.txt' elif cfg_data.VAL_MODE == 'density_ab_only': test_list = 'density_ab_only/NTU_density_test_ab_only.txt' train_list = 'density_ab_only/NTU_density_train_ab_only.txt' elif cfg_data.VAL_MODE == 'normal_train_ab_test_ssc': test_list = 'normal_train_ab_test_ssc/NTU_test_ssc_correct.txt' train_list = 'normal_train_ab_test_ssc/NTU_train_ab_correct.txt' elif cfg_data.VAL_MODE == 'normal_train_ssc_test_ab': test_list = 'normal_train_ssc_test_ab/NTU_test_ab_correct.txt' train_list = 'normal_train_ssc_test_ab/NTU_train_ssc_correct.txt' elif cfg_data.VAL_MODE == 'density_train_ssc_test_ab': test_list = 'density_train_ssc_test_ab/NTU_density_split_test_ab_correct.txt' train_list = 'density_train_ssc_test_ab/NTU_density_split_train_ssc_correct.txt' elif cfg_data.VAL_MODE == 'density_train_ab_test_ssc': test_list = 'density_train_ab_test_ssc/NTU_density_split_test_ssc_correct.txt' train_list = 'density_train_ab_test_ssc/NTU_density_split_train_ab_correct.txt' elif cfg_data.VAL_MODE == 'hall': test_list = 'new_split_list/test.txt' train_list = 'new_split_list/train.txt' elif cfg_data.VAL_MODE == 'hall_DA': test_list = 'new_split_list/test.txt' train_list = 'Train Test Splitting list/normal_training/NTU_train_correct.txt' train_target_list = 'new_split_list/train.txt' elif cfg_data.VAL_MODE == 'cycleGAN': test_list = 'new_split_list/cycle_test.txt' train_list = 'Train Test Splitting list/normal_training/NTU_train_correct.txt' train_set = NTU(cfg_data.DATA_PATH + train_list, 'train', main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform) train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=True) val_set = NTU(cfg_data.DATA_PATH + test_list, 'test', main_transform=None, img_transform=img_transform, gt_transform=gt_transform) val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=False) if cfg_data.VAL_MODE == 'hall_DA': train_target_set = NTU(cfg_data.DATA_PATH + train_target_list, 'train', main_transform=None, img_transform=img_transform, gt_transform=gt_transform) train_target_loader = DataLoader(train_target_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=True) print('source domain:', train_list) print('target domain:', train_target_list) return train_loader, train_target_loader, val_loader, restore_transform return train_loader, val_loader, restore_transform
def loading_data(): mean_std = cfg_data.MEAN_STD log_para = cfg_data.LOG_PARA factor = cfg_data.LABEL_FACTOR train_main_transform = own_transforms.Compose([ own_transforms.RandomHorizontallyFlip() ]) img_transform = standard_transforms.Compose([ standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std) ]) gt_transform = standard_transforms.Compose([ own_transforms.GTScaleDown(factor), own_transforms.LabelNormalize(log_para) ]) restore_transform = standard_transforms.Compose([ own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage() ]) if cfg_data.IS_CROSS_SCENE: train_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/cross_scene_train', aud_path=cfg_data.AUDIO_PATH, mode='train', main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform, is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS, noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE ) else: train_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/train', aud_path=cfg_data.AUDIO_PATH, mode='train', main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform, is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS, noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE, black_area_ratio=cfg_data.BLACK_AREA_RATIO, is_random=cfg_data.IS_RANDOM, is_denoise=cfg_data.IS_DENOISE ) train_loader = None if cfg_data.TRAIN_BATCH_SIZE == 1: train_loader = DataLoader(train_set, batch_size=1, num_workers=8, shuffle=True, drop_last=True) elif cfg_data.TRAIN_BATCH_SIZE > 1: train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=8, collate_fn=AC_collate, shuffle=True, drop_last=True) if cfg_data.IS_CROSS_SCENE: val_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/cross_scene_val', aud_path=cfg_data.AUDIO_PATH, mode='val', main_transform=None, img_transform=img_transform, gt_transform=gt_transform, is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS, noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE ) else: val_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/val', aud_path=cfg_data.AUDIO_PATH, mode='val', main_transform=None, img_transform=img_transform, gt_transform=gt_transform, is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS, noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE, black_area_ratio=cfg_data.BLACK_AREA_RATIO, is_random=cfg_data.IS_RANDOM, is_denoise=cfg_data.IS_DENOISE ) val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=1, shuffle=False, drop_last=False) if cfg_data.IS_CROSS_SCENE: test_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/cross_scene_test', aud_path=cfg_data.AUDIO_PATH, mode='test', main_transform=None, img_transform=img_transform, gt_transform=gt_transform, is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS, noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE ) else: test_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/test', aud_path=cfg_data.AUDIO_PATH, mode='test', main_transform=None, img_transform=img_transform, gt_transform=gt_transform, is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS, noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE, black_area_ratio=cfg_data.BLACK_AREA_RATIO, is_random=cfg_data.IS_RANDOM, is_denoise=cfg_data.IS_DENOISE ) test_loader = DataLoader(test_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=1, shuffle=False, drop_last=False) return train_loader, val_loader, test_loader, restore_transform
from models.CC import CrowdCounter from config import cfg from misc.utils import * import scipy.io as sio from PIL import Image, ImageOps import shutil #torch.cuda.set_device(0) torch.backends.cudnn.benchmark = True exp_name = 'SHHA_results' mean_std = ([0.452016860247, 0.447249650955, 0.431981861591], [0.23242045939, 0.224925786257, 0.221840232611]) val_main_transform = own_transforms.Compose( [own_transforms.RandomCrop((576, 768))]) img_transform = standard_transforms.Compose( [standard_transforms.ToTensor(), standard_transforms.Normalize(*mean_std)]) restore = standard_transforms.Compose( [own_transforms.DeNormalize(*mean_std), standard_transforms.ToPILImage()]) pil_to_tensor = standard_transforms.ToTensor() dataRoot = 'datasets/ProcessedData/shanghaitech_part_A/test' model_path = 'D:/FromBinusServer/all_ep_37_mae_62.4_mse_108.0.pth' def main():