예제 #1
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    train_main_transform = own_transforms.Compose([
    	own_transforms.RandomCrop(cfg_data.TRAIN_SIZE), ###
    	own_transforms.RandomHorizontallyFlip()
    ])
    val_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg_data.TRAIN_SIZE)
    ])
    val_main_transform = None # comment this to validate on images cropped like during training instead of full images
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose([
        own_transforms.LabelNormalize(log_para)
    ])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = Venice(cfg_data.DATA_PATH+'/train', 'train',main_transform=train_main_transform, img_transform=img_transform, gt_transform=gt_transform)
    train_loader = DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=True)
    

    val_set = Venice(cfg_data.DATA_PATH+'/test', 'test', main_transform=val_main_transform, img_transform=img_transform, gt_transform=gt_transform)
    val_loader = DataLoader(val_set, batch_size=cfg_data.VAL_BATCH_SIZE, num_workers=8, shuffle=True, drop_last=False)

    return train_loader, val_loader, restore_transform
예제 #2
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    train_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    val_main_transform = own_transforms.Compose(
        [own_transforms.RandomCrop(cfg_data.TRAIN_SIZE)])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose(
        [own_transforms.LabelNormalize(log_para)])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    val_folder = cfg_data.VAL_INDEX
    train_folder = get_train_folder(val_folder)

    train_set = UCF50(cfg_data.DATA_PATH,
                      train_folder,
                      'train',
                      main_transform=train_main_transform,
                      img_transform=img_transform,
                      gt_transform=gt_transform)
    train_loader = DataLoader(train_set,
                              batch_size=cfg_data.TRAIN_BATCH_SIZE,
                              num_workers=0,
                              collate_fn=SHHA_collate,
                              shuffle=True,
                              drop_last=True)

    val_set = UCF50(cfg_data.DATA_PATH,
                    map(int, str(val_folder)),
                    'test',
                    main_transform=val_main_transform,
                    img_transform=img_transform,
                    gt_transform=gt_transform)
    val_loader = DataLoader(val_set,
                            batch_size=cfg_data.VAL_BATCH_SIZE,
                            num_workers=0,
                            collate_fn=SHHA_collate,
                            shuffle=True,
                            drop_last=False)

    return train_loader, val_loader, restore_transform
예제 #3
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    train_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    val_main_transform = own_transforms.Compose(
        [own_transforms.RandomCrop(cfg_data.TRAIN_SIZE)])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose(
        [own_transforms.LabelNormalize(log_para)])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = WE(cfg_data.DATA_PATH + '/train',
                   'train',
                   main_transform=train_main_transform,
                   img_transform=img_transform,
                   gt_transform=gt_transform)
    train_loader = DataLoader(train_set,
                              batch_size=cfg_data.TRAIN_BATCH_SIZE,
                              num_workers=0,
                              shuffle=True,
                              drop_last=True)

    test_name = cfg_data.VAL_FOLDER

    val_loader = []

    for subname in test_name:
        sub_set = WE(cfg_data.DATA_PATH + '/test/' + subname,
                     'test',
                     main_transform=val_main_transform,
                     img_transform=img_transform,
                     gt_transform=gt_transform)
        val_loader.append(
            DataLoader(sub_set,
                       batch_size=cfg_data.VAL_BATCH_SIZE,
                       num_workers=0,
                       shuffle=True,
                       drop_last=True))

    return train_loader, val_loader, restore_transform
예제 #4
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    train_main_transform = own_transforms.Compose([
        #own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    val_main_transform = own_transforms.Compose(
        [own_transforms.RandomCrop(cfg_data.TRAIN_SIZE)])
    val_main_transform = None
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose(
        [own_transforms.LabelNormalize(log_para)])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = SHHB(cfg_data.DATA_PATH + '/train_data',
                     'train',
                     main_transform=train_main_transform,
                     img_transform=img_transform,
                     gt_transform=gt_transform)
    train_loader = DataLoader(train_set,
                              batch_size=cfg_data.TRAIN_BATCH_SIZE,
                              num_workers=8,
                              shuffle=True,
                              drop_last=True)

    test_set = SHHB(cfg_data.DATA_PATH + '/test_data',
                    'test',
                    main_transform=val_main_transform,
                    img_transform=img_transform,
                    gt_transform=gt_transform)
    test_loader = DataLoader(test_set,
                             batch_size=cfg_data.VAL_BATCH_SIZE,
                             num_workers=8,
                             shuffle=True,
                             drop_last=False)

    val_set = SHHB(cfg_data.DATA_PATH + '/val_data',
                   'test',
                   main_transform=val_main_transform,
                   img_transform=img_transform,
                   gt_transform=gt_transform)
    val_loader = DataLoader(val_set,
                            batch_size=cfg_data.VAL_BATCH_SIZE,
                            num_workers=8,
                            shuffle=True,
                            drop_last=False)

    return train_loader, val_loader, test_loader, restore_transform
예제 #5
0
def loading_data():
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA
    factor = cfg_data.LABEL_FACTOR
    train_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose([
        own_transforms.GTScaleDown(factor),
        own_transforms.LabelNormalize(log_para)
    ])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = SHHA(cfg_data.DATA_PATH + '/train',
                     'train',
                     main_transform=train_main_transform,
                     img_transform=img_transform,
                     gt_transform=gt_transform)
    train_loader = None
    if cfg_data.TRAIN_BATCH_SIZE == 1:
        train_loader = DataLoader(train_set,
                                  batch_size=1,
                                  num_workers=0,
                                  shuffle=True,
                                  drop_last=True)
    elif cfg_data.TRAIN_BATCH_SIZE > 1:
        train_loader = DataLoader(train_set,
                                  batch_size=cfg_data.TRAIN_BATCH_SIZE,
                                  num_workers=0,
                                  collate_fn=SHHA_collate,
                                  shuffle=True,
                                  drop_last=True)

    val_set = SHHA(cfg_data.DATA_PATH + '/test',
                   'test',
                   main_transform=None,
                   img_transform=img_transform,
                   gt_transform=gt_transform)
    val_loader = DataLoader(val_set,
                            batch_size=cfg_data.VAL_BATCH_SIZE,
                            num_workers=0,
                            shuffle=True,
                            drop_last=False)

    return train_loader, val_loader, restore_transform
예제 #6
0
def createTrainData(datasetname, Dataset, cfg_data):

    folder, list_file = None, None

    if datasetname in ['SHHA', 'SHHB', 'QNRF', 'JHU', 'NWPU', 'FDST']:
        list_file = []
        list_file.append({
            'data_path': cfg_data.DATA_PATH,
            'imgId_txt': cfg_data.TRAIN_LST,
            'box_gt_txt': []
        })
    else:
        print('dataset is not exist')

    main_transform = own_transforms.Compose([
        own_transforms.ScaleByRateWithMin([0.8, 1.2], cfg_data.TRAIN_SIZE[1],
                                          cfg_data.TRAIN_SIZE[0]),
        own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
        own_transforms.RandomHorizontallyFlip(),
    ])

    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*cfg_data.MEAN_STD)
    ])
    mask_transform = standard_transforms.Compose(
        [standard_transforms.ToTensor()])

    train_set = Dataset(datasetname,
                        'train',
                        main_transform=main_transform,
                        img_transform=img_transform,
                        mask_transform=mask_transform,
                        list_file=list_file)
    if datasetname in ['SHHA', 'SHHB', 'QNRF', 'JHU', 'NWPU']:
        return DataLoader(train_set,
                          batch_size=cfg_data.TRAIN_BATCH_SIZE,
                          num_workers=6,
                          shuffle=True,
                          drop_last=True)
    elif datasetname in ['FDST']:
        train_sampler = RandomSampler(data_source=train_set,
                                      replacement=True,
                                      num_samples=1000)
        return DataLoader(train_set,
                          batch_size=cfg_data.TRAIN_BATCH_SIZE,
                          sampler=train_sampler,
                          num_workers=6,
                          drop_last=True)
    else:
        return 'error'
예제 #7
0
파일: utils.py 프로젝트: maohule/adacrowd
def data_transforms(cfg_data):

    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA

    # train and val main data transformations
    if cfg_data.DATASET == 'City':
        train_main_transform = own_transforms.Compose([
            own_transforms.RandomHorizontallyFlip()
        ])
        val_main_transform = None
    elif cfg_data.DATASET in ['FDST', 'PETS']:
        train_main_transform = standard_transforms.Compose([
            own_transforms.FreeScale(cfg_data.TRAIN_SIZE),
        ])
        val_main_transform = standard_transforms.Compose([
            own_transforms.FreeScale(cfg_data.TRAIN_SIZE),
        ])
    else:
        train_main_transform = own_transforms.Compose([
            own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
            own_transforms.RandomHorizontallyFlip()
        ])

        val_main_transform = None

    # image and gt transformations
    if cfg_data.DATASET == 'FDST':
        gt_transform = standard_transforms.Compose([
            own_transforms.GTScaleDown(cfg_data.TRAIN_DOWNRATE),
            own_transforms.LabelNormalize(log_para)
        ])
    else:
        gt_transform = standard_transforms.Compose([
            own_transforms.LabelNormalize(log_para)
        ])

    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])

    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    return train_main_transform, val_main_transform, img_transform, gt_transform, restore_transform
예제 #8
0
def loading_data():
    # shanghai Tech A
    mean_std = cfg.DATA.MEAN_STD
    log_para = cfg.DATA.LOG_PARA
    factor = cfg.DATA.LABEL_FACTOR
    train_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg.TRAIN.INPUT_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    val_main_transform = None
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose([
        own_transforms.GTScaleDown(factor),
        own_transforms.LabelNormalize(log_para)
    ])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = UCF_QNRF(cfg.DATA.DATA_PATH + '/train',
                         'train',
                         main_transform=train_main_transform,
                         img_transform=img_transform,
                         gt_transform=gt_transform)
    train_loader = DataLoader(train_set,
                              batch_size=cfg.TRAIN.BATCH_SIZE,
                              num_workers=8,
                              shuffle=True,
                              drop_last=True)

    val_set = UCF_QNRF(cfg.DATA.DATA_PATH + '/test',
                       'test',
                       main_transform=val_main_transform,
                       img_transform=img_transform,
                       gt_transform=gt_transform)
    val_loader = DataLoader(val_set,
                            batch_size=cfg.VAL.BATCH_SIZE,
                            num_workers=8,
                            shuffle=True,
                            drop_last=False)

    return train_set, train_loader, val_set, val_loader, restore_transform
예제 #9
0
def loading_data():
    # shanghai Tech A
    mean_std = cfg.DATA.MEAN_STD
    train_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg.TRAIN.INPUT_SIZE),
        own_transforms.RandomHorizontallyFlip()
    ])
    val_main_transform = None
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose(
        [standard_transforms.ToTensor()])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = SHT_B(cfg.DATA.DATA_PATH + '/train_data',
                      main_transform=train_main_transform,
                      img_transform=img_transform,
                      gt_transform=gt_transform)
    train_loader = DataLoader(train_set,
                              batch_size=cfg.TRAIN.BATCH_SIZE,
                              num_workers=8,
                              shuffle=True,
                              drop_last=True)

    val_set = SHT_B(cfg.DATA.DATA_PATH + '/test_data',
                    main_transform=val_main_transform,
                    img_transform=img_transform,
                    gt_transform=gt_transform)
    val_loader = DataLoader(val_set,
                            batch_size=cfg.VAL.BATCH_SIZE,
                            num_workers=8,
                            shuffle=True,
                            drop_last=True)

    return train_set, train_loader, val_set, val_loader, restore_transform
예제 #10
0
def createTrainData(datasetname, Dataset, cfg_data):
    cfg_data.DATA_PATH + '/train'
    folder, list_file = None, None
    if datasetname == 'GCC':
        train_list = common.gccvalmode2list(cfg_data.VAL_MODE, True)
        list_file = os.path.join(cfg_data.DATA_PATH, 'txt_list', train_list)
        train_path = cfg_data.DATA_PATH
    if datasetname == 'NWPU':
        list_file = os.path.join(cfg_data.DATA_PATH, 'txt_list/train.txt')
        train_path = cfg_data.DATA_PATH

    # pdb.set_trace()

    main_transform = own_transforms.Compose([
    	own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
    	own_transforms.RandomHorizontallyFlip()
    ])
    img_transform = standard_transforms.Compose([
        own_transforms.RGB2Gray(0.1),
        own_transforms.GammaCorrection([0.4,2]),
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*cfg_data.MEAN_STD)
    ])
    dot_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        own_transforms.tensormul(255.0),
        own_transforms.LabelNormalize(cfg_data.LOG_PARA),
    ])

    train_set = Dataset(train_path, datasetname, 'train',
        main_transform = main_transform,
        img_transform = img_transform,
        dot_transform = dot_transform,
        list_file = list_file,
        folder = folder    
    )
    return DataLoader(train_set, batch_size=cfg_data.TRAIN_BATCH_SIZE, num_workers=0, shuffle=True, drop_last=True)
예제 #11
0
def loading_data(args):
    mean_std = cfg_data.MEAN_STD
    log_para = cfg_data.LOG_PARA

    sou_main_transform = own_transforms.Compose([
        own_transforms.RandomCrop(cfg_data.TRAIN_SIZE),
        own_transforms.RandomHorizontallyFlip(),
        # Rand_Augment()
    ])

    # converts a PIL Image(H*W*C) in the range[0,255]
    # to a torch.FloatTensor of shape (C*H*W) in the range[0.0, 1.0]
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    gt_transform = standard_transforms.Compose(
        [own_transforms.LabelNormalize(log_para)])
    restore_transform = standard_transforms.Compose([
        own_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    if args.phase == 'DA_train' or args.phase == 'fine_tune':
        # Load meta-train set
        IFS_path = '/media/D/ht/C-3-Framework-trans/trans-display/GCC2SHHB/s2t'
        IFS_path = '/media/D/ht/C-3-Framework-trans/trans-display/GCC2QNRF/s2t'
        IFS_path = '/media/D/ht/C-3-Framework-trans/trans-display/GCC2WE/s2t'
        trainset = GCC('train',
                       main_transform=sou_main_transform,
                       img_transform=img_transform,
                       gt_transform=gt_transform,
                       filter_rule=cfg_data.FILTER_RULE,
                       IFS_path=None)
        sou_loader = DataLoader(trainset,
                                batch_size=cfg_data.sou_batch_size,
                                shuffle=True,
                                num_workers=12,
                                drop_last=True,
                                pin_memory=True)

        if args.target_dataset == 'QNRF':
            tar_main_transform = own_transforms.Compose(
                [own_transforms.RandomHorizontallyFlip()])
            trainset = QNRF('train',
                            main_transform=tar_main_transform,
                            img_transform=img_transform,
                            gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=12,
                                         collate_fn=SHHA_collate,
                                         drop_last=True)

            valset = QNRF('val',
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=1,
                                        num_workers=8,
                                        pin_memory=True)

            testset = QNRF('test',
                           img_transform=img_transform,
                           gt_transform=gt_transform)
            tar_test_loader = DataLoader(testset,
                                         batch_size=1,
                                         num_workers=8,
                                         pin_memory=True)
        elif args.target_dataset == 'SHHA':
            tar_main_transform = own_transforms.Compose(
                [own_transforms.RandomHorizontallyFlip()])
            trainset = SHHA('train',
                            main_transform=tar_main_transform,
                            img_transform=img_transform,
                            gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=12,
                                         collate_fn=SHHA_collate,
                                         drop_last=True)

            valset = SHHA('val',
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=1,
                                        num_workers=8,
                                        pin_memory=True)

            testset = SHHA('test',
                           img_transform=img_transform,
                           gt_transform=gt_transform)
            tar_test_loader = DataLoader(testset,
                                         batch_size=1,
                                         num_workers=8,
                                         pin_memory=True)

        elif args.target_dataset == 'MALL':
            tar_main_transform = own_transforms.Compose([
                own_transforms.RandomCrop(cfg_data.MALL_TRAIN_SIZE),
                own_transforms.RandomHorizontallyFlip()
            ])
            trainset = MALL('train',
                            main_transform=tar_main_transform,
                            img_transform=img_transform,
                            gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=12,
                                         drop_last=True,
                                         pin_memory=True)

            valset = MALL('val',
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=8,
                                        num_workers=8,
                                        pin_memory=True)

            testset = MALL('test',
                           img_transform=img_transform,
                           gt_transform=gt_transform)
            tar_test_loader = DataLoader(testset,
                                         batch_size=12,
                                         num_workers=8,
                                         pin_memory=True)

        elif args.target_dataset == 'UCSD':
            tar_main_transform = own_transforms.Compose([
                own_transforms.RandomCrop(cfg_data.UCSD_TRAIN_SIZE),
                own_transforms.RandomHorizontallyFlip(),
            ])
            trainset = UCSD('train',
                            main_transform=tar_main_transform,
                            img_transform=img_transform,
                            gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=12,
                                         drop_last=True,
                                         pin_memory=True)

            valset = UCSD('val',
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=8,
                                        num_workers=8,
                                        pin_memory=True)

            testset = UCSD('test',
                           img_transform=img_transform,
                           gt_transform=gt_transform)
            tar_test_loader = DataLoader(testset,
                                         batch_size=12,
                                         num_workers=8,
                                         pin_memory=True)
        elif args.target_dataset == 'SHHB':
            tar_main_transform = own_transforms.Compose([
                own_transforms.RandomCrop(cfg_data.SHHB_TRAIN_SIZE),
                own_transforms.RandomHorizontallyFlip(),
                # Rand_Augment()
            ])

            trainset = SHHB('train',
                            main_transform=tar_main_transform,
                            img_transform=img_transform,
                            gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=8,
                                         drop_last=True,
                                         pin_memory=True)

            valset = SHHB('val',
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=8,
                                        num_workers=8,
                                        pin_memory=True)

            testset = SHHB('test',
                           img_transform=img_transform,
                           gt_transform=gt_transform)
            tar_test_loader = DataLoader(testset,
                                         batch_size=8,
                                         num_workers=8,
                                         pin_memory=True)

        elif args.target_dataset == 'WE':
            tar_test_loader = []
            tar_main_transform = own_transforms.Compose([
                own_transforms.RandomCrop(cfg_data.WE_TRAIN_SIZE),
                own_transforms.RandomHorizontallyFlip(),
                # Rand_Augment()
            ])
            trainset = WE(None,
                          'train',
                          main_transform=tar_main_transform,
                          img_transform=img_transform,
                          gt_transform=gt_transform)
            tar_shot_loader = DataLoader(trainset,
                                         batch_size=cfg_data.target_shot_size,
                                         shuffle=True,
                                         num_workers=8,
                                         drop_last=True,
                                         pin_memory=True)
            valset = WE(None,
                        'val',
                        main_transform=tar_main_transform,
                        img_transform=img_transform,
                        gt_transform=gt_transform)
            tar_val_loader = DataLoader(valset,
                                        batch_size=12,
                                        shuffle=False,
                                        num_workers=8,
                                        drop_last=False,
                                        pin_memory=True)

            for subname in cfg_data.WE_test_list:
                sub_set = WE(subname,
                             'test',
                             img_transform=img_transform,
                             gt_transform=gt_transform)
                tar_test_loader.append(
                    DataLoader(sub_set,
                               batch_size=12,
                               num_workers=8,
                               pin_memory=True))
        else:
            print(
                "Please set the target dataset as one of them:SHHB,  UCF50,  QNRF, MALL, UCSD, SHHA"
            )

        return sou_loader, tar_shot_loader, tar_val_loader, tar_test_loader, restore_transform

    if args.phase == 'pre_train':
        trainset = GCC('train',
                       main_transform=sou_main_transform,
                       img_transform=img_transform,
                       gt_transform=gt_transform)
        train_loader = DataLoader(trainset,
                                  batch_size=args.pre_batch_size,
                                  shuffle=True,
                                  num_workers=8,
                                  drop_last=True,
                                  pin_memory=True)

        valset = GCC('val',
                     img_transform=img_transform,
                     gt_transform=gt_transform)
        val_loader = DataLoader(valset,
                                batch_size=12,
                                num_workers=8,
                                pin_memory=True)

        return train_loader, val_loader, restore_transform
예제 #12
0
from config import cfg
from misc.utils import *
import scipy.io as sio
from PIL import Image, ImageOps
import shutil

#torch.cuda.set_device(0)
torch.backends.cudnn.benchmark = True

exp_name = 'SHHA_results'

mean_std = ([0.452016860247, 0.447249650955,
             0.431981861591], [0.23242045939, 0.224925786257, 0.221840232611])

val_main_transform = own_transforms.Compose(
    [own_transforms.RandomCrop((576, 768))])
img_transform = standard_transforms.Compose(
    [standard_transforms.ToTensor(),
     standard_transforms.Normalize(*mean_std)])
restore = standard_transforms.Compose(
    [own_transforms.DeNormalize(*mean_std),
     standard_transforms.ToPILImage()])
pil_to_tensor = standard_transforms.ToTensor()

dataRoot = 'datasets/ProcessedData/shanghaitech_part_A/test'

model_path = 'D:/FromBinusServer/all_ep_37_mae_62.4_mse_108.0.pth'


def main():