示例#1
0
def conf():
    device = 'cuda:0'
    netG = NetG().to(device)
    netD = NetD().to(device)
    optimizerG = optim.Adam(netG.parameters(), 0.0003, betas=(0.5, 0.999))
    optimizerD = optim.Adam(netD.parameters(), 0.0001, betas=(0.5, 0.999))
    epoch = 15
    param = None
    f = ConvNoise(17, 0.01)

    traindataset = CrowdDataset(
        "/net/girlschool/besnier/Crow/train",
        "/net/girlschool/besnier/Crow/indextrain.txt", f,
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop(1024),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]), transforms.Compose([transforms.ToTensor()]),
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop(256),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]))

    testdataset = CrowdDataset(
        "/net/girlschool/besnier/Crow/test",
        "/net/girlschool/besnier/Crow/indextest.txt", f,
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop(1024),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ]), transforms.Compose([transforms.ToTensor()]),
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop(256),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]))

    trainloader = torch.utils.data.DataLoader(traindataset,
                                              batch_size=8,
                                              shuffle=True,
                                              num_workers=1,
                                              drop_last=True)
    testloader = torch.utils.data.DataLoader(testdataset,
                                             batch_size=8,
                                             shuffle=True,
                                             num_workers=1,
                                             drop_last=True)
示例#2
0
def estimate_density_map(img_root, model_param_path, index):
    '''
    Show one estimated density-map.
    img_root: the root of test image data.
    gt_dmap_root: the root of test ground truth density-map data.
    model_param_path: the path of specific mcnn parameters.
    index: the order of the test image in test dataset.
    '''
    device = torch.device("cuda")
    model = CSRNet().to(device)
    model.load_state_dict(torch.load(model_param_path))
    dataset = CrowdDataset(img_root)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=False)
    model.eval()
    for i, (img, gt_dmap) in enumerate(dataloader):
        if i == index:
            img = img.to(device)
            gt_dmap = gt_dmap.to(device)
            # forward propagation
            et_dmap = model(img).detach()
            et_dmap = et_dmap.squeeze(0).squeeze(0).cpu().numpy()
            print(et_dmap.shape)
            plt.imshow(et_dmap, cmap=CM.jet)
            break
示例#3
0
def conf():
    device = 'cuda:0'
    netG = NetG_srgan2().to(device)
    netD = NetD_patch().to(device)
    optimizerG = optim.Adam(netG.parameters(), 0.0002, betas=(0.5, 0.999))
    optimizerD = optim.Adam(netD.parameters(), 0.0004, betas=(0.5, 0.999))
    epoch = 100
    cuda = True
    file = '/SRGAN/Crow_SRGAN_face_replace2'
    f = ConvNoise(21, 0)

    traindataset = CrowdDataset(
        "/net/girlschool/besnier/Crow/train",
        "/net/girlschool/besnier/Crow/indextrain.txt", f,
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop(1024),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]), transforms.Compose([transforms.ToTensor()]),
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop(256),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]))

    testdataset = CrowdDataset(
        "/net/girlschool/besnier/Crow/test",
        "/net/girlschool/besnier/Crow/indextest.txt", f,
        transforms.Compose([
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ]), transforms.Compose([transforms.ToTensor()]),
        transforms.Compose(
            [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))

    trainloader = torch.utils.data.DataLoader(traindataset,
                                              batch_size=8,
                                              shuffle=True,
                                              num_workers=1,
                                              drop_last=True)
    testloader = torch.utils.data.DataLoader(testdataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1,
                                             drop_last=True)
示例#4
0
def get_loader_json(args):
    with open(args.val_json, 'r') as outfile:
        val_list = json.load(outfile)
    val_loader = torch.utils.data.DataLoader(CrowdDataset(
        val_list,
        transform,
        mode='one',
        downsample_ratio=args.downsample,
        test=True),
                                             shuffle=False,
                                             batch_size=1,
                                             pin_memory=True)
    with open(args.train_json, 'r') as outfile:
        train_list = json.load(outfile)
    train_loader = torch.utils.data.DataLoader(CrowdDataset(
        train_list, transform, args.crop_mode, args.downsample,
        args.crop_scale),
                                               shuffle=True,
                                               batch_size=1,
                                               num_workers=8,
                                               pin_memory=True)
    return train_loader, val_loader
示例#5
0
from tqdm import tqdm

# hyper_param
start_step = 1
end_step = 2000
lr = 0.0001
momentum = 0.9
val_interval = 50
save_interval = 100
batch_size = 8

# dataset
train_dir = 'data/train_data'
test_dir = 'data/test_data'

dataset_train = CrowdDataset(img_dir=train_dir, gt_downsample=4)
dataloader_train = torch.utils.data.DataLoader(dataset=dataset_train,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=2,
                                               drop_last=True)

dataset_test = CrowdDataset(img_dir=test_dir, gt_downsample=4)
dataloader_test = torch.utils.data.DataLoader(dataset=dataset_test,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              num_workers=2,
                                              drop_last=True)

# model
net = MCNN()
示例#6
0
def get_loader(train_path, test_path, args):
    train_img_paths = []
    for img_path in glob.glob(os.path.join(train_path, '*.jpg')):
        train_img_paths.append(img_path)
    test_img_paths = []
    for img_path in glob.glob(os.path.join(test_path, '*.jpg')):
        test_img_paths.append(img_path)

    if 'bayes' in args.loss:
        bayes_dataset = Crowd(train_path, args.crop_scale, args.downsample,
                              False, 'train')
        train_loader = torch.utils.data.DataLoader(bayes_dataset,
                                                   collate_fn=bayes_collate,
                                                   batch_size=args.bs,
                                                   shuffle=True,
                                                   num_workers=8,
                                                   pin_memory=True)
        test_loader = torch.utils.data.DataLoader(Crowd(
            test_path, args.crop_scale, args.downsample, False, 'val'),
                                                  batch_size=1,
                                                  num_workers=8,
                                                  pin_memory=True)
    elif args.bn > 0:
        bn_dataset = PatchSet(train_img_paths,
                              transform,
                              c_size=(args.crop_scale, args.crop_scale),
                              crop_n=args.random_crop_n)
        train_loader = torch.utils.data.DataLoader(bn_dataset,
                                                   collate_fn=my_collate_fn,
                                                   shuffle=True,
                                                   batch_size=args.bs,
                                                   num_workers=8,
                                                   pin_memory=True)
        test_loader = torch.utils.data.DataLoader(CrowdDataset(
            test_img_paths,
            transform,
            mode='one',
            downsample_ratio=args.downsample,
            test=True),
                                                  shuffle=False,
                                                  batch_size=1,
                                                  pin_memory=True)
    else:
        single_dataset = CrowdDataset(train_img_paths, transform,
                                      args.crop_mode, args.downsample,
                                      args.crop_scale)
        train_loader = torch.utils.data.DataLoader(single_dataset,
                                                   shuffle=True,
                                                   batch_size=1,
                                                   num_workers=8,
                                                   pin_memory=True)
        test_loader = torch.utils.data.DataLoader(CrowdDataset(
            test_img_paths,
            transform,
            mode='one',
            downsample_ratio=args.downsample,
            test=True),
                                                  shuffle=False,
                                                  batch_size=1,
                                                  pin_memory=True)

    return train_loader, test_loader, train_img_paths, test_img_paths