Esempio n. 1
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    train_main_transform = own_transforms.Compose([
    	own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), ###
    	own_transforms.RandomHorizontallyFlip()
    ])
    val_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg_data.TRAIN_SIZE)
    ])
    val_main_transform = None # comment this to validate on images cropped like during training instead of full images
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose([
        own_transforms.LabelNormalize(log_para)
    ])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = Venice(cfg_data.DATA_PATH+'/train', 'train',main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform)
    train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=True)
    

    val_set = Venice(cfg_data.DATA_PATH+'/test', 'test', main_transform=val_main_transform, img_transform=img_transform, gt_transform=gt_transform)
    val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=False)

    return train_loader, val_loader, restore_transform
def loading_data(train_mode):

    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    train_main_transform = own_transforms.Compose([
    	# own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
    	own_transforms.RandomHorizontallyFlip()
    ])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose([
        own_transforms.LabelNormalize(log_para)
    ])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_loader = None
    if train_mode == 'DA':
        train_set = Mall(cfg_data.DATA_PATH+'/train', 'train',main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform)
        train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=0, shuffle=True, drop_last=True)
    

    val_set = Mall(cfg_data.DATA_PATH+'/test', 'test', main_transform=None, img_transform=img_transform, gt_transform=gt_transform)
    val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=0, shuffle=False, drop_last=False)

    return train_loader, val_loader, restore_transform
Esempio n. 3
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    factor = cfg_data.LABEL_FACTOR
    train_main_transform = own_transforms.Compose(
        [own_transforms.RandomHorizontallyFlip()])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose([
        own_transforms.GTScaleDown(factor),
        own_transforms.LabelNormalize(log_para)
    ])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = SHHA(cfg_data.DATA_PATH + '/train_data',
                     mode='train',
                     preload=True,
                     main_transform=train_main_transform,
                     img_transform=img_transform,
                     gt_transform=gt_transform)

    train_loader = None

    if cfg_data.TRAIN_BATCH_SIZE == 1:
        train_loader = DataLoader(train_set,
                                  batch_size=1,
                                  num_workers=0,
                                  collate_fn=SHHA_raw_collate,
                                  shuffle=True,
                                  drop_last=True)

    elif cfg_data.TRAIN_BATCH_SIZE > 1:
        train_loader = DataLoader(train_set,
                                  batch_size=cfg_data.TRAIN_BATCH_SIZE,
                                  num_workers=8,
                                  collate_fn=SHHA_crop_collate,
                                  shuffle=True,
                                  drop_last=True)

    val_set = SHHA(cfg_data.DATA_PATH + '/test_data',
                   mode='test',
                   preload=True,
                   main_transform=None,
                   img_transform=img_transform,
                   gt_transform=gt_transform)
    val_loader = DataLoader(val_set,
                            batch_size=cfg_data.VAL_BATCH_SIZE,
                            num_workers=0,
                            collate_fn=SHHA_raw_collate,
                            shuffle=True,
                            drop_last=False)

    return train_loader, val_loader, restore_transform
Esempio n. 4
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    train_main_transform = own_transforms.Compose([
        # own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose(
        [own_transforms.LabelNormalize(log_para)])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    if cfg_data.VAL_MODE == 'rd':
        test_list = 'test_list.txt'
        train_list = 'train_list.txt'
    elif cfg_data.VAL_MODE == 'cc':
        test_list = 'cross_camera_test_list.txt'
        train_list = 'cross_camera_train_list.txt'
    elif cfg_data.VAL_MODE == 'cl':
        test_list = 'cross_location_test_list.txt'
        train_list = 'cross_location_train_list.txt'

    train_set = GCC(cfg_data.DATA_PATH + '/txt_list/' + train_list,
                    'train',
                    main_transform=train_main_transform,
                    img_transform=img_transform,
                    gt_transform=gt_transform)
    train_loader = DataLoader(train_set,
                              batch_size=cfg_data.TRAIN_BATCH_SIZE,
                              num_workers=8,
                              shuffle=True,
                              drop_last=True)

    val_set = GCC(cfg_data.DATA_PATH + '/txt_list/' + test_list,
                  'test',
                  main_transform=None,
                  img_transform=img_transform,
                  gt_transform=gt_transform)
    val_loader = DataLoader(val_set,
                            batch_size=cfg_data.VAL_BATCH_SIZE,
                            num_workers=8,
                            shuffle=True,
                            drop_last=False)

    return train_loader, val_loader, restore_transform
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    train_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    val_main_transform = own_transforms.Compose(
        [own_transforms.RandomCrop(cfg_data.TRAIN_SIZE)])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose(
        [own_transforms.LabelNormalize(log_para)])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    val_folder = cfg_data.VAL_INDEX
    train_folder = get_train_folder(val_folder)

    train_set = UCF50(cfg_data.DATA_PATH,
                      train_folder,
                      'train',
                      main_transform=train_main_transform,
                      img_transform=img_transform,
                      gt_transform=gt_transform)
    train_loader = DataLoader(train_set,
                              batch_size=cfg_data.TRAIN_BATCH_SIZE,
                              num_workers=0,
                              collate_fn=SHHA_collate,
                              shuffle=True,
                              drop_last=True)

    val_set = UCF50(cfg_data.DATA_PATH,
                    map(int, str(val_folder)),
                    'test',
                    main_transform=val_main_transform,
                    img_transform=img_transform,
                    gt_transform=gt_transform)
    val_loader = DataLoader(val_set,
                            batch_size=cfg_data.VAL_BATCH_SIZE,
                            num_workers=0,
                            collate_fn=SHHA_collate,
                            shuffle=True,
                            drop_last=False)

    return train_loader, val_loader, restore_transform
Esempio n. 6
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    train_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    val_main_transform = own_transforms.Compose(
        [own_transforms.RandomCrop(cfg_data.TRAIN_SIZE)])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose(
        [own_transforms.LabelNormalize(log_para)])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = WE(cfg_data.DATA_PATH + '/train',
                   'train',
                   main_transform=train_main_transform,
                   img_transform=img_transform,
                   gt_transform=gt_transform)
    train_loader = DataLoader(train_set,
                              batch_size=cfg_data.TRAIN_BATCH_SIZE,
                              num_workers=0,
                              shuffle=True,
                              drop_last=True)

    test_name = cfg_data.VAL_FOLDER

    val_loader = []

    for subname in test_name:
        sub_set = WE(cfg_data.DATA_PATH + '/test/' + subname,
                     'test',
                     main_transform=val_main_transform,
                     img_transform=img_transform,
                     gt_transform=gt_transform)
        val_loader.append(
            DataLoader(sub_set,
                       batch_size=cfg_data.VAL_BATCH_SIZE,
                       num_workers=0,
                       shuffle=True,
                       drop_last=True))

    return train_loader, val_loader, restore_transform
Esempio n. 7
0
def data_transforms(cfg_data):

    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA

    # train and val main data transformations
    if cfg_data.DATASET == 'City':
        train_main_transform = own_transforms.Compose([
            own_transforms.RandomHorizontallyFlip()
        ])
        val_main_transform = None
    elif cfg_data.DATASET in ['FDST', 'PETS']:
        train_main_transform = standard_transforms.Compose([
            own_transforms.FreeScale(cfg_data.TRAIN_SIZE),
        ])
        val_main_transform = standard_transforms.Compose([
            own_transforms.FreeScale(cfg_data.TRAIN_SIZE),
        ])
    else:
        train_main_transform = own_transforms.Compose([
            own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
            own_transforms.RandomHorizontallyFlip()
        ])

        val_main_transform = None

    # image and gt transformations
    if cfg_data.DATASET == 'FDST':
        gt_transform = standard_transforms.Compose([
            own_transforms.GTScaleDown(cfg_data.TRAIN_DOWNRATE),
            own_transforms.LabelNormalize(log_para)
        ])
    else:
        gt_transform = standard_transforms.Compose([
            own_transforms.LabelNormalize(log_para)
        ])

    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])

    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    return train_main_transform, val_main_transform, img_transform, gt_transform, restore_transform
Esempio n. 8
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA

    train_main_transform = own_transforms.Compose(
        [own_transforms.RandomHorizontallyFlip()])

    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])

    gt_transform = standard_transforms.Compose(
        [own_transforms.LabelNormalize(log_para)])

    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = QNRF(cfg_data.DATA_PATH + '/train',
                     'train',
                     main_transform=train_main_transform,
                     img_transform=img_transform,
                     gt_transform=gt_transform,
                     data_augment=1)
    train_loader = DataLoader(train_set,
                              batch_size=cfg_data.TRAIN_BATCH_SIZE,
                              num_workers=0,
                              collate_fn=SHHA_collate_multibatch,
                              shuffle=True,
                              drop_last=True)

    val_set = QNRF(cfg_data.DATA_PATH + '/test',
                   'test',
                   main_transform=None,
                   img_transform=img_transform,
                   gt_transform=gt_transform,
                   data_augment=1)
    val_loader = DataLoader(val_set,
                            batch_size=cfg_data.VAL_BATCH_SIZE,
                            num_workers=0,
                            collate_fn=SHHA_collate_onebatch,
                            shuffle=True,
                            drop_last=False)

    return train_loader, val_loader, restore_transform
Esempio n. 9
0
def loading_data():
    # shanghai Tech A
    mean_std = cfg.DATA.MEAN_STD
    log_para = cfg.DATA.LOG_PARA
    factor = cfg.DATA.LABEL_FACTOR
    train_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg.TRAIN.INPUT_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    val_main_transform = None
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose([
        own_transforms.GTScaleDown(factor),
        own_transforms.LabelNormalize(log_para)
    ])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = UCF_QNRF(cfg.DATA.DATA_PATH + '/train',
                         'train',
                         main_transform=train_main_transform,
                         img_transform=img_transform,
                         gt_transform=gt_transform)
    train_loader = DataLoader(train_set,
                              batch_size=cfg.TRAIN.BATCH_SIZE,
                              num_workers=8,
                              shuffle=True,
                              drop_last=True)

    val_set = UCF_QNRF(cfg.DATA.DATA_PATH + '/test',
                       'test',
                       main_transform=val_main_transform,
                       img_transform=img_transform,
                       gt_transform=gt_transform)
    val_loader = DataLoader(val_set,
                            batch_size=cfg.VAL.BATCH_SIZE,
                            num_workers=8,
                            shuffle=True,
                            drop_last=False)

    return train_set, train_loader, val_set, val_loader, restore_transform
Esempio n. 10
0
def loading_data():
    # shanghai Tech A
    mean_std = cfg.DATA.MEAN_STD
    train_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg.TRAIN.INPUT_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    val_main_transform = None
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose(
        [standard_transforms.ToTensor()])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = SHT_B(cfg.DATA.DATA_PATH + '/train_data',
                      main_transform=train_main_transform,
                      img_transform=img_transform,
                      gt_transform=gt_transform)
    train_loader = DataLoader(train_set,
                              batch_size=cfg.TRAIN.BATCH_SIZE,
                              num_workers=8,
                              shuffle=True,
                              drop_last=True)

    val_set = SHT_B(cfg.DATA.DATA_PATH + '/test_data',
                    main_transform=val_main_transform,
                    img_transform=img_transform,
                    gt_transform=gt_transform)
    val_loader = DataLoader(val_set,
                            batch_size=cfg.VAL.BATCH_SIZE,
                            num_workers=8,
                            shuffle=True,
                            drop_last=True)

    return train_set, train_loader, val_set, val_loader, restore_transform
Esempio n. 11
0
from config import cfg
from misc.utils import *
import scipy.io as sio
from PIL import Image, ImageOps
import time

torch.cuda.set_device(0)
torch.backends.cudnn.benchmark = True

mean_std = ([0.452016860247, 0.447249650955,
             0.431981861591], [0.23242045939, 0.224925786257, 0.221840232611])
img_transform = standard_transforms.Compose(
    [standard_transforms.ToTensor(),
     standard_transforms.Normalize(*mean_std)])
restore = standard_transforms.Compose(
    [own_transforms.DeNormalize(*mean_std),
     standard_transforms.ToPILImage()])
pil_to_tensor = standard_transforms.ToTensor()

model_path = 'all_ep_608_mae_215.3_mse_831.6_nae_1.009.pth'


def re_name_weight(weight_dict):
    #wts = torch.load('xxx.pth')
    new_wts = {}
    for i_key in weight_dict.keys():
        new_key = i_key.replace('module.', '')
        print(new_key)
        new_wts[new_key] = weight_dict[i_key]
    return new_wts
Esempio n. 12
0
def createRestore(mean_std):
    return standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])
Esempio n. 13
0
def loading_data(args):
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA

    sou_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
        own_transforms.RandomHorizontallyFlip(),
        # Rand_Augment()
    ])

    # converts a PIL Image(H*W*C) in the range[0,255]
    # to a torch.FloatTensor of shape (C*H*W) in the range[0.0, 1.0]
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose(
        [own_transforms.LabelNormalize(log_para)])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    if args.phase == 'DA_train' or args.phase == 'fine_tune':
        # Load meta-train set
        IFS_path = '/media/D/ht/C-3-Framework-trans/trans-display/GCC2SHHB/s2t'
        IFS_path = '/media/D/ht/C-3-Framework-trans/trans-display/GCC2QNRF/s2t'
        IFS_path = '/media/D/ht/C-3-Framework-trans/trans-display/GCC2WE/s2t'
        trainset = GCC('train',
                       main_transform=sou_main_transform,
                       img_transform=img_transform,
                       gt_transform=gt_transform,
                       filter_rule=cfg_data.FILTER_RULE,
                       IFS_path=None)
        sou_loader = DataLoader(trainset,
                                batch_size=cfg_data.sou_batch_size,
                                shuffle=True,
                                num_workers=12,
                                drop_last=True,
                                pin_memory=True)

        if args.target_dataset == 'QNRF':
            tar_main_transform = own_transforms.Compose(
                [own_transforms.RandomHorizontallyFlip()])
            trainset = QNRF('train',
                            main_transform=tar_main_transform,
                            img_transform=img_transform,
                            gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=12,
                                         collate_fn=SHHA_collate,
                                         drop_last=True)

            valset = QNRF('val',
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=1,
                                        num_workers=8,
                                        pin_memory=True)

            testset = QNRF('test',
                           img_transform=img_transform,
                           gt_transform=gt_transform)
            tar_test_loader = DataLoader(testset,
                                         batch_size=1,
                                         num_workers=8,
                                         pin_memory=True)
        elif args.target_dataset == 'SHHA':
            tar_main_transform = own_transforms.Compose(
                [own_transforms.RandomHorizontallyFlip()])
            trainset = SHHA('train',
                            main_transform=tar_main_transform,
                            img_transform=img_transform,
                            gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=12,
                                         collate_fn=SHHA_collate,
                                         drop_last=True)

            valset = SHHA('val',
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=1,
                                        num_workers=8,
                                        pin_memory=True)

            testset = SHHA('test',
                           img_transform=img_transform,
                           gt_transform=gt_transform)
            tar_test_loader = DataLoader(testset,
                                         batch_size=1,
                                         num_workers=8,
                                         pin_memory=True)

        elif args.target_dataset == 'MALL':
            tar_main_transform = own_transforms.Compose([
                own_transforms.RandomCrop(cfg_data.MALL_TRAIN_SIZE),
                own_transforms.RandomHorizontallyFlip()
            ])
            trainset = MALL('train',
                            main_transform=tar_main_transform,
                            img_transform=img_transform,
                            gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=12,
                                         drop_last=True,
                                         pin_memory=True)

            valset = MALL('val',
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=8,
                                        num_workers=8,
                                        pin_memory=True)

            testset = MALL('test',
                           img_transform=img_transform,
                           gt_transform=gt_transform)
            tar_test_loader = DataLoader(testset,
                                         batch_size=12,
                                         num_workers=8,
                                         pin_memory=True)

        elif args.target_dataset == 'UCSD':
            tar_main_transform = own_transforms.Compose([
                own_transforms.RandomCrop(cfg_data.UCSD_TRAIN_SIZE),
                own_transforms.RandomHorizontallyFlip(),
            ])
            trainset = UCSD('train',
                            main_transform=tar_main_transform,
                            img_transform=img_transform,
                            gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=12,
                                         drop_last=True,
                                         pin_memory=True)

            valset = UCSD('val',
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=8,
                                        num_workers=8,
                                        pin_memory=True)

            testset = UCSD('test',
                           img_transform=img_transform,
                           gt_transform=gt_transform)
            tar_test_loader = DataLoader(testset,
                                         batch_size=12,
                                         num_workers=8,
                                         pin_memory=True)
        elif args.target_dataset == 'SHHB':
            tar_main_transform = own_transforms.Compose([
                own_transforms.RandomCrop(cfg_data.SHHB_TRAIN_SIZE),
                own_transforms.RandomHorizontallyFlip(),
                # Rand_Augment()
            ])

            trainset = SHHB('train',
                            main_transform=tar_main_transform,
                            img_transform=img_transform,
                            gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=8,
                                         drop_last=True,
                                         pin_memory=True)

            valset = SHHB('val',
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=8,
                                        num_workers=8,
                                        pin_memory=True)

            testset = SHHB('test',
                           img_transform=img_transform,
                           gt_transform=gt_transform)
            tar_test_loader = DataLoader(testset,
                                         batch_size=8,
                                         num_workers=8,
                                         pin_memory=True)

        elif args.target_dataset == 'WE':
            tar_test_loader = []
            tar_main_transform = own_transforms.Compose([
                own_transforms.RandomCrop(cfg_data.WE_TRAIN_SIZE),
                own_transforms.RandomHorizontallyFlip(),
                # Rand_Augment()
            ])
            trainset = WE(None,
                          'train',
                          main_transform=tar_main_transform,
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=8,
                                         drop_last=True,
                                         pin_memory=True)
            valset = WE(None,
                        'val',
                        main_transform=tar_main_transform,
                        img_transform=img_transform,
                        gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=12,
                                        shuffle=False,
                                        num_workers=8,
                                        drop_last=False,
                                        pin_memory=True)

            for subname in cfg_data.WE_test_list:
                sub_set = WE(subname,
                             'test',
                             img_transform=img_transform,
                             gt_transform=gt_transform)
                tar_test_loader.append(
                    DataLoader(sub_set,
                               batch_size=12,
                               num_workers=8,
                               pin_memory=True))
        else:
            print(
                "Please set the target dataset as one of them:SHHB,  UCF50,  QNRF, MALL, UCSD, SHHA"
            )

        return sou_loader, tar_shot_loader, tar_val_loader, tar_test_loader, restore_transform

    if args.phase == 'pre_train':
        trainset = GCC('train',
                       main_transform=sou_main_transform,
                       img_transform=img_transform,
                       gt_transform=gt_transform)
        train_loader = DataLoader(trainset,
                                  batch_size=args.pre_batch_size,
                                  shuffle=True,
                                  num_workers=8,
                                  drop_last=True,
                                  pin_memory=True)

        valset = GCC('val',
                     img_transform=img_transform,
                     gt_transform=gt_transform)
        val_loader = DataLoader(valset,
                                batch_size=12,
                                num_workers=8,
                                pin_memory=True)

        return train_loader, val_loader, restore_transform
Esempio n. 14
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    train_main_transform = own_transforms.Compose([
        # own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose(
        [own_transforms.LabelNormalize(log_para)])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    if cfg_data.VAL_MODE == 'normal':
        test_list = 'Train Test Splitting list/normal_training/NTU_test_correct.txt'
        train_list = 'Train Test Splitting list/normal_training/NTU_train_correct.txt'
    elif cfg_data.VAL_MODE == 'density':
        test_list = 'density_ab+ssc/NTU_density_test_ssc+ab.txt'
        train_list = 'density_ab+ssc/NTU_density_train_ssc+ab.txt'
    elif cfg_data.VAL_MODE == 'normal_ab_only':
        test_list = 'normal_ab_only/NTU_test_ab_only.txt'
        train_list = 'normal_ab_only/NTU_train_ab_only.txt'
    elif cfg_data.VAL_MODE == 'normal_ssc_only':
        test_list = 'normal_ssc_only/NTU_test_ssc_only.txt'
        train_list = 'normal_ssc_only/NTU_train_ssc_only.txt'
    elif cfg_data.VAL_MODE == 'density_ssc_only':
        test_list = 'density_ssc_only/NTU_density_test_ssc_only.txt'
        train_list = 'density_ssc_only/NTU_density_train_ssc_only.txt'
    elif cfg_data.VAL_MODE == 'density_ab_only':
        test_list = 'density_ab_only/NTU_density_test_ab_only.txt'
        train_list = 'density_ab_only/NTU_density_train_ab_only.txt'
    elif cfg_data.VAL_MODE == 'normal_train_ab_test_ssc':
        test_list = 'normal_train_ab_test_ssc/NTU_test_ssc_correct.txt'
        train_list = 'normal_train_ab_test_ssc/NTU_train_ab_correct.txt'
    elif cfg_data.VAL_MODE == 'normal_train_ssc_test_ab':
        test_list = 'normal_train_ssc_test_ab/NTU_test_ab_correct.txt'
        train_list = 'normal_train_ssc_test_ab/NTU_train_ssc_correct.txt'
    elif cfg_data.VAL_MODE == 'density_train_ssc_test_ab':
        test_list = 'density_train_ssc_test_ab/NTU_density_split_test_ab_correct.txt'
        train_list = 'density_train_ssc_test_ab/NTU_density_split_train_ssc_correct.txt'
    elif cfg_data.VAL_MODE == 'density_train_ab_test_ssc':
        test_list = 'density_train_ab_test_ssc/NTU_density_split_test_ssc_correct.txt'
        train_list = 'density_train_ab_test_ssc/NTU_density_split_train_ab_correct.txt'

    elif cfg_data.VAL_MODE == 'hall':
        test_list = 'new_split_list/test.txt'
        train_list = 'new_split_list/train.txt'

    elif cfg_data.VAL_MODE == 'hall_DA':
        test_list = 'new_split_list/test.txt'
        train_list = 'Train Test Splitting list/normal_training/NTU_train_correct.txt'
        train_target_list = 'new_split_list/train.txt'

    elif cfg_data.VAL_MODE == 'cycleGAN':
        test_list = 'new_split_list/cycle_test.txt'
        train_list = 'Train Test Splitting list/normal_training/NTU_train_correct.txt'

    train_set = NTU(cfg_data.DATA_PATH + train_list,
                    'train',
                    main_transform=train_main_transform,
                    img_transform=img_transform,
                    gt_transform=gt_transform)
    train_loader = DataLoader(train_set,
                              batch_size=cfg_data.TRAIN_BATCH_SIZE,
                              num_workers=8,
                              shuffle=True,
                              drop_last=True)

    val_set = NTU(cfg_data.DATA_PATH + test_list,
                  'test',
                  main_transform=None,
                  img_transform=img_transform,
                  gt_transform=gt_transform)
    val_loader = DataLoader(val_set,
                            batch_size=cfg_data.VAL_BATCH_SIZE,
                            num_workers=8,
                            shuffle=True,
                            drop_last=False)

    if cfg_data.VAL_MODE == 'hall_DA':
        train_target_set = NTU(cfg_data.DATA_PATH + train_target_list,
                               'train',
                               main_transform=None,
                               img_transform=img_transform,
                               gt_transform=gt_transform)
        train_target_loader = DataLoader(train_target_set,
                                         batch_size=cfg_data.TRAIN_BATCH_SIZE,
                                         num_workers=8,
                                         shuffle=True,
                                         drop_last=True)
        print('source domain:', train_list)
        print('target domain:', train_target_list)
        return train_loader, train_target_loader, val_loader, restore_transform

    return train_loader, val_loader, restore_transform
Esempio n. 15
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    factor = cfg_data.LABEL_FACTOR
    train_main_transform = own_transforms.Compose([
        own_transforms.RandomHorizontallyFlip()
    ])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose([
        own_transforms.GTScaleDown(factor),
        own_transforms.LabelNormalize(log_para)
    ])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    if cfg_data.IS_CROSS_SCENE:
        train_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/cross_scene_train',
                       aud_path=cfg_data.AUDIO_PATH,
                       mode='train', main_transform=train_main_transform, img_transform=img_transform,
                       gt_transform=gt_transform, is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS,
                       noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE
                       )
    else:
        train_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/train',
                       aud_path=cfg_data.AUDIO_PATH,
                       mode='train', main_transform=train_main_transform, img_transform=img_transform,
                       gt_transform=gt_transform, is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS,
                       noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE,
                       black_area_ratio=cfg_data.BLACK_AREA_RATIO, is_random=cfg_data.IS_RANDOM, is_denoise=cfg_data.IS_DENOISE
                       )
    train_loader = None
    if cfg_data.TRAIN_BATCH_SIZE == 1:
        train_loader = DataLoader(train_set, batch_size=1, num_workers=8, shuffle=True, drop_last=True)
    elif cfg_data.TRAIN_BATCH_SIZE > 1:
        train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=8,
                                  collate_fn=AC_collate, shuffle=True, drop_last=True)

    if cfg_data.IS_CROSS_SCENE:
        val_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/cross_scene_val',
                     aud_path=cfg_data.AUDIO_PATH,
                     mode='val', main_transform=None, img_transform=img_transform, gt_transform=gt_transform,
                     is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS,
                     noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE
                     )
    else:
        val_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/val',
                     aud_path=cfg_data.AUDIO_PATH,
                     mode='val', main_transform=None, img_transform=img_transform, gt_transform=gt_transform,
                     is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS,
                     noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE,
                     black_area_ratio=cfg_data.BLACK_AREA_RATIO, is_random=cfg_data.IS_RANDOM, is_denoise=cfg_data.IS_DENOISE
                     )
    val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=1, shuffle=False, drop_last=False)

    if cfg_data.IS_CROSS_SCENE:
        test_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/cross_scene_test',
                      aud_path=cfg_data.AUDIO_PATH,
                     mode='test', main_transform=None, img_transform=img_transform, gt_transform=gt_transform,
                     is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS,
                     noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE
                     )
    else:
        test_set = AC(img_path=cfg_data.IMAGE_PATH, den_path=cfg_data.DENSITY_PATH + '/test',
                      aud_path=cfg_data.AUDIO_PATH,
                      mode='test', main_transform=None, img_transform=img_transform, gt_transform=gt_transform,
                      is_noise=cfg_data.IS_NOISE, brightness_decay=cfg_data.BRIGHTNESS,
                      noise_sigma=cfg_data.NOISE_SIGMA, longest_side=cfg_data.LONGEST_SIDE,
                      black_area_ratio=cfg_data.BLACK_AREA_RATIO, is_random=cfg_data.IS_RANDOM, is_denoise=cfg_data.IS_DENOISE
                      )
    test_loader = DataLoader(test_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=1, shuffle=False, drop_last=False)

    return train_loader, val_loader, test_loader, restore_transform