def setup(self):
        """initial the datasets, model, loss and optimizer"""
        args = self.args
        if torch.cuda.is_available():
            self.device = torch.device("cuda")
            self.device_count = torch.cuda.device_count()
            # for code conciseness, we release the single gpu version
            assert self.device_count == 1
            logging.info('using {} gpus'.format(self.device_count))
        else:
            raise Exception("gpu is not available")

        self.downsample_ratio = args.downsample_ratio
        self.datasets = {
            x: Crowd(os.path.join(args.data_dir, x), args.crop_size,
                     args.downsample_ratio, args.is_gray, x)
            for x in ['train', 'val']
        }
        self.dataloaders = {
            x: DataLoader(self.datasets[x],
                          collate_fn=(train_collate
                                      if x == 'train' else default_collate),
                          batch_size=(args.batch_size if x == 'train' else 1),
                          shuffle=(True if x == 'train' else False),
                          num_workers=args.num_workers * self.device_count,
                          pin_memory=(True if x == 'train' else False))
            for x in ['train', 'val']
        }
        self.model = vgg19()
        self.model.to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=args.lr,
                                    weight_decay=args.weight_decay)

        self.start_epoch = 0
        if args.resume:
            suf = args.resume.rsplit('.', 1)[-1]
            if suf == 'tar':
                checkpoint = torch.load(args.resume, self.device)
                self.model.load_state_dict(checkpoint['model_state_dict'])
                self.optimizer.load_state_dict(
                    checkpoint['optimizer_state_dict'])
                self.start_epoch = checkpoint['epoch'] + 1
            elif suf == 'pth':
                self.model.load_state_dict(torch.load(args.resume,
                                                      self.device))

        self.post_prob = Post_Prob(args.sigma, args.crop_size,
                                   args.downsample_ratio,
                                   args.background_ratio, args.use_background,
                                   self.device)
        self.criterion = Bay_Loss(args.use_background, self.device)
        self.save_list = Save_Handle(max_num=args.max_model_num)
        self.best_mae = np.inf
        self.best_mse = np.inf
        self.best_mae_1 = np.inf
        self.best_mse_1 = np.inf
        self.best_count = 0
        self.best_count_1 = 0
Exemplo n.º 2
0
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('--data-dir',
                    default='/home/teddy/UCF-Train-Val-Test',
                    help='training data directory')
parser.add_argument('--save-dir',
                    default='/home/teddy/vgg',
                    help='model directory')
parser.add_argument('--model', default='best_model_17.pth', help='model name')

parser.add_argument('--device', default='0', help='gpu device')
args = parser.parse_args()

if __name__ == '__main__':

    datasets = Crowd(os.path.join(args.data_dir, 'test'), method='test')
    dataloader = torch.utils.data.DataLoader(datasets,
                                             1,
                                             shuffle=False,
                                             num_workers=8,
                                             pin_memory=True)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.device  # set vis gpu
    device = torch.device('cuda')

    model = fusion_model()
    model.to(device)
    model_path = os.path.join(args.save_dir, args.model)
    checkpoint = torch.load(model_path, device)
    model.load_state_dict(checkpoint)
    model.eval()
Exemplo n.º 3
0
                        help='training data directory')
    parser.add_argument('--save-dir',
                        default='/home/teddy/vgg',
                        help='model directory')
    parser.add_argument('--device', default='0', help='assign device')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip()  # set vis gpu

    datasets = Crowd(os.path.join(args.data_dir, 'test'),
                     512,
                     8,
                     is_gray=False,
                     method='val')
    dataloader = torch.utils.data.DataLoader(datasets,
                                             1,
                                             shuffle=False,
                                             num_workers=8,
                                             pin_memory=False)
    model = vgg19()
    device = torch.device('cuda')
    model.to(device)
    model.load_state_dict(
        torch.load(os.path.join(args.save_dir, 'best_model.pth'), device))
    epoch_minus = []

    for inputs, count, name in dataloader:
Exemplo n.º 4
0
    def setup(self):
        """initial the datasets, model, loss and optimizer"""
        args = self.args
        self.skip_test = args.skip_test
        if torch.cuda.is_available():
            self.device = torch.device("cuda")
            self.device_count = torch.cuda.device_count()
            # for code conciseness, we release the single gpu version
            assert self.device_count == 1
            logging.info('using {} gpus'.format(self.device_count))
        else:
            raise Exception("gpu is not available")

        self.downsample_ratio = args.downsample_ratio
        lists = {}
        train_list = None
        val_list = None
        test_list = None
        lists['train'] = train_list
        lists['val'] = val_list
        lists['test'] = test_list
        self.datasets = {x: Crowd(os.path.join(args.data_dir, x),
                                  args.crop_size,
                                  args.downsample_ratio,
                                  args.is_gray, x, args.resize,
                                  im_list=lists[x]) for x in ['train', 'val']}
        self.dataloaders = {x: DataLoader(self.datasets[x],
                                          collate_fn=(train_collate
                                                      if x == 'train' else default_collate),
                                          batch_size=(args.batch_size
                                          if x == 'train' else 1),
                                          shuffle=(True if x == 'train' else False),
                                          num_workers=args.num_workers*self.device_count,
                                          pin_memory=(True if x == 'train' else False))
                            for x in ['train', 'val']}
        self.datasets['test'] = Crowd(os.path.join(args.data_dir, 'test'),
                                    args.crop_size,
                                    args.downsample_ratio,
                                    args.is_gray, 'val', args.resize, 
                                    im_list=lists['test'])
        self.dataloaders['test'] = DataLoader(self.datasets['test'],
                                    collate_fn=default_collate,
                                    batch_size=1,
                                    shuffle=False,
                                    num_workers=args.num_workers*self.device_count,
                                    pin_memory=False)
        print(len(self.dataloaders['train']))
        print(len(self.dataloaders['val']))

        if self.args.net == 'csrnet':
            self.model = CSRNet()
        else:
            self.model = vgg19()

        self.refiner = IndivBlur8(s=args.s, downsample=self.downsample_ratio, softmax=args.soft)
        refine_params = list(self.refiner.adapt.parameters())

        self.model.to(self.device)
        self.refiner.to(self.device)
        params = list(self.model.parameters()) 
        self.optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
        # self.optimizer = optim.SGD(params, lr=args.lr, momentum=0.95, weight_decay=args.weight_decay)
        self.dml_optimizer = torch.optim.Adam(refine_params, lr=1e-7, weight_decay=args.weight_decay)

        self.start_epoch = 0
        if args.resume:
            suf = args.resume.rsplit('.', 1)[-1]
            if suf == 'tar':
                checkpoint = torch.load(args.resume, self.device)
                self.model.load_state_dict(checkpoint['model_state_dict'])
                self.refiner.load_state_dict(checkpoint['refine_state_dict'])
                self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
                self.start_epoch = checkpoint['epoch'] + 1
            elif suf == 'pth':
                self.model.load_state_dict(torch.load(args.resume, self.device))

        self.crit = torch.nn.MSELoss(reduction='sum')

        self.save_list = Save_Handle(max_num=args.max_model_num)
        self.test_flag = False
        self.best_mae = {}
        self.best_mse = {}
        self.best_epoch = {}
        for stage in ['val', 'test']:
            self.best_mae[stage] = np.inf
            self.best_mse[stage] = np.inf
            self.best_epoch[stage] = 0
Exemplo n.º 5
0
    def setup(self):
        """initial the datasets, model, loss and optimizer"""
        args = self.args
        self.loss = args.loss
        self.skip_test = args.skip_test
        self.add = args.add
        if torch.cuda.is_available():
            self.device = torch.device("cuda")
            self.device_count = torch.cuda.device_count()
            # for code conciseness, we release the single gpu version
            assert self.device_count == 1
            logging.info('using {} gpus'.format(self.device_count))
        else:
            raise Exception("gpu is not available")

        self.downsample_ratio = args.downsample_ratio
        lists = {}
        train_list = None
        val_list = None
        test_list = None
        lists['train'] = train_list
        lists['val'] = val_list
        lists['test'] = test_list
        self.datasets = {
            x: Crowd(os.path.join(args.data_dir, x),
                     args.crop_size,
                     args.downsample_ratio,
                     args.is_gray,
                     x,
                     im_list=lists[x])
            for x in ['train', 'val']
        }
        self.dataloaders = {
            x: DataLoader(
                self.datasets[x],
                collate_fn=(train_collate
                            if x == 'train' else default_collate),
                batch_size=(self.args.batch_size if x == 'train' else 1),
                shuffle=(True if x == 'train' else False),
                num_workers=args.num_workers * self.device_count,
                pin_memory=(True if x == 'train' else False))
            for x in ['train', 'val']
        }
        self.datasets['test'] = Crowd(os.path.join(args.data_dir, 'test'),
                                      args.crop_size,
                                      args.downsample_ratio,
                                      args.is_gray,
                                      'val',
                                      im_list=lists['test'])
        self.dataloaders['test'] = DataLoader(self.datasets['test'],
                                              collate_fn=default_collate,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=args.num_workers *
                                              self.device_count,
                                              pin_memory=False)
        self.model = vgg19(down=self.downsample_ratio,
                           bn=args.bn,
                           o_cn=args.o_cn)
        self.model.to(self.device)
        params = list(self.model.parameters())
        self.optimizer = optim.Adam(params, lr=args.lr)

        self.start_epoch = 0
        if args.resume:
            suf = args.resume.rsplit('.', 1)[-1]
            if suf == 'tar':
                checkpoint = torch.load(args.resume, self.device)
                self.model.load_state_dict(checkpoint['model_state_dict'])
                self.optimizer.load_state_dict(
                    checkpoint['optimizer_state_dict'])
                self.start_epoch = checkpoint['epoch'] + 1
            elif suf == 'pth':
                self.model.load_state_dict(torch.load(args.resume,
                                                      self.device))

        self.post_prob = Full_Post_Prob(args.sigma,
                                        args.alpha,
                                        args.crop_size,
                                        args.downsample_ratio,
                                        args.background_ratio,
                                        args.use_background,
                                        self.device,
                                        add=self.add,
                                        minx=args.minx,
                                        ratio=args.ratio)
        self.criterion = Full_Cov_Gaussian_Loss(args.use_background,
                                                self.device,
                                                weight=self.args.weight,
                                                reg=args.reg)

        self.save_list = Save_Handle(max_num=args.max_model_num)
        self.test_flag = False
        self.best_mae = {}
        self.best_mse = {}
        self.best_epoch = {}
        for stage in ['val', 'test']:
            self.best_mae[stage] = np.inf
            self.best_mse[stage] = np.inf
            self.best_epoch[stage] = 0
Exemplo n.º 6
0
    def setup(self):
        args = self.args
        sub_dir = '{}_input-{}_wot-{}_wtv-{}_reg-{}_nIter-{}_normCood-{}'.format(
            args.dataset, args.crop_size, args.wot, args.wtv, args.reg,
            args.num_of_iter_in_ot, args.norm_cood)

        self.save_dir = os.path.join(args.out_path, 'ckpts', sub_dir)
        if not os.path.exists(self.save_dir):
            os.makedirs(self.save_dir)

        time_str = datetime.strftime(datetime.now(), '%m%d-%H%M%S')
        self.logger = log_utils.get_logger(
            os.path.join(self.save_dir, 'train-{:s}.log'.format(time_str)))
        log_utils.print_config(vars(args), self.logger)

        if torch.cuda.is_available():
            self.device = torch.device("cuda")
            self.device_count = torch.cuda.device_count()
            assert self.device_count == 1
            self.logger.info('Using {} gpus'.format(self.device_count))
        else:
            raise Exception("Gpu is not available")

        dataset_name = args.dataset.lower()
        if dataset_name == 'qnrf':
            from datasets.crowd import Crowd_qnrf as Crowd
        elif dataset_name == 'nwpu':
            from datasets.crowd import Crowd_nwpu as Crowd
        elif dataset_name == 'sha':
            from datasets.crowd import Crowd_sh as Crowd
        elif dataset_name == 'shb':
            from datasets.crowd import Crowd_sh as Crowd
        else:
            raise NotImplementedError

        downsample_ratio = 8
        self.datasets = {
            'train':
            Crowd(os.path.join(args.data_path,
                               DATASET_PATHS[dataset_name]["train_path"]),
                  crop_size=args.crop_size,
                  downsample_ratio=downsample_ratio,
                  method='train'),
            'val':
            Crowd(os.path.join(args.data_path,
                               DATASET_PATHS[dataset_name]["val_path"]),
                  crop_size=args.crop_size,
                  downsample_ratio=downsample_ratio,
                  method='val')
        }

        self.dataloaders = {
            x: DataLoader(self.datasets[x],
                          collate_fn=(train_collate
                                      if x == 'train' else default_collate),
                          batch_size=(args.batch_size if x == 'train' else 1),
                          shuffle=(True if x == 'train' else False),
                          num_workers=args.num_workers * self.device_count,
                          pin_memory=(True if x == 'train' else False))
            for x in ['train', 'val']
        }
        self.model = vgg19()
        self.model.to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=args.lr,
                                    weight_decay=args.weight_decay)

        self.start_epoch = 0
        if args.resume:
            self.logger.info('loading pretrained model from ' + args.resume)
            suf = args.resume.rsplit('.', 1)[-1]
            if suf == 'tar':
                checkpoint = torch.load(args.resume, self.device)
                self.model.load_state_dict(checkpoint['model_state_dict'])
                self.optimizer.load_state_dict(
                    checkpoint['optimizer_state_dict'])
                self.start_epoch = checkpoint['epoch'] + 1
            elif suf == 'pth':
                self.model.load_state_dict(torch.load(args.resume,
                                                      self.device))
        else:
            self.logger.info('random initialization')

        self.ot_loss = OT_Loss(args.crop_size, downsample_ratio,
                               args.norm_cood, self.device,
                               args.num_of_iter_in_ot, args.reg)
        self.tv_loss = nn.L1Loss(reduction='none').to(self.device)
        self.mse = nn.MSELoss().to(self.device)
        self.mae = nn.L1Loss().to(self.device)
        self.save_list = Save_Handle(max_num=1)
        self.best_mae = np.inf
        self.best_mse = np.inf
        self.best_count = 0
    def setup(self):
        train_args = self.train_args
        datargs = self.datargs
        sub_dir = 'input-{}_wot-{}_wtv-{}_reg-{}_nIter-{}_normCood-{}'.format(
            train_args['crop_size'], train_args['wot'], train_args['wtv'],
            train_args['reg'], train_args['num_of_iter_in_ot'],
            train_args['norm_cood'])

        time_str = datetime.strftime(datetime.now(), '%m%d-%H%M%S')
        self.save_dir = os.path.join(train_args['out_path'], 'ckpts',
                                     train_args['conf_name'],
                                     train_args['dataset'], sub_dir, time_str)
        if not os.path.exists(self.save_dir):
            os.makedirs(self.save_dir)
        log_dir = os.path.join(train_args['out_path'], 'runs',
                               train_args['dataset'], train_args['conf_name'],
                               time_str)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)

        # TODO: Verify args
        self.logger = SummaryWriter(log_dir)
        if torch.cuda.is_available():
            self.device = torch.device("cuda")
            self.device_count = torch.cuda.device_count()
            assert self.device_count == 1
        else:
            raise Exception("Gpu is not available")

        dataset_name = train_args['dataset'].lower()
        if dataset_name == 'qnrf':
            from datasets.crowd import Crowd_qnrf as Crowd
        elif dataset_name == 'nwpu':
            from datasets.crowd import Crowd_nwpu as Crowd
        elif dataset_name == 'sha' or dataset_name == 'shb':
            from datasets.crowd import Crowd_sh as Crowd
        elif dataset_name[:3] == 'ucf':
            from datasets.crowd import Crowd_ucf as Crowd
        else:
            raise NotImplementedError
        if dataset_name == 'sha' or dataset_name == 'shb':
            downsample_ratio = train_args['downsample_ratio']
            train_val = Crowd(os.path.join(datargs['data_path'],
                                           datargs["train_path"]),
                              crop_size=train_args['crop_size'],
                              downsample_ratio=downsample_ratio,
                              method='train')
            if dataset_name == 'sha':
                train_set, val = random_split(
                    train_val, [280, 20],
                    generator=torch.Generator().manual_seed(42))
                val_set = ValSubset(val)
            else:
                train_set, val = random_split(
                    train_val, [380, 20],
                    generator=torch.Generator().manual_seed(42))
                val_set = ValSubset(val)
            self.datasets = {'train': train_set, 'val': val_set}
        else:
            downsample_ratio = train_args['downsample_ratio']
            self.datasets = {
                'train':
                Crowd(os.path.join(datargs['data_path'],
                                   datargs["train_path"]),
                      crop_size=train_args['crop_size'],
                      downsample_ratio=downsample_ratio,
                      method='train'),
                'val':
                Crowd(os.path.join(datargs['data_path'], datargs["val_path"]),
                      crop_size=train_args['crop_size'],
                      downsample_ratio=downsample_ratio,
                      method='val')
            }
        self.dataloaders = {
            x: DataLoader(
                self.datasets[x],
                collate_fn=(train_collate
                            if x == 'train' else default_collate),
                batch_size=(train_args['batch_size'] if x == 'train' else 1),
                shuffle=(True if x == 'train' else False),
                num_workers=train_args['num_workers'] * self.device_count,
                pin_memory=(True if x == 'train' else False))
            for x in ['train', 'val']
        }
        self.model = vgg16dres(map_location=self.device)
        self.model.to(self.device)
        # for p in self.model.features.parameters():
        #     p.requires_grad = True
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=train_args['lr'],
                                    weight_decay=train_args['weight_decay'],
                                    amsgrad=False)
        # for _, p in zip(range(10000), next(self.model.children()).children()):
        #     p.requires_grad = False
        #     print("freeze: ", p)
        # print(self.optimizer.param_groups[0])
        self.start_epoch = 0
        self.ot_loss = OT_Loss(train_args['crop_size'], downsample_ratio,
                               train_args['norm_cood'], self.device,
                               self.logger, train_args['num_of_iter_in_ot'],
                               train_args['reg'])
        self.tv_loss = nn.L1Loss(reduction='none').to(self.device)
        self.mse = nn.MSELoss().to(self.device)
        self.mae = nn.L1Loss().to(self.device)
        self.save_list = Save_Handle(max_num=1)
        self.best_mae = np.inf
        self.best_mse = np.inf
        self.best_count = 0
        if train_args['resume']:
            self.logger.add_text(
                'log/train',
                'loading pretrained model from ' + train_args['resume'], 0)
            suf = train_args['resume'].rsplit('.', 1)[-1]
            if suf == 'tar':
                checkpoint = torch.load(train_args['resume'], self.device)
                self.model.load_state_dict(checkpoint['model_state_dict'])
                self.optimizer.load_state_dict(
                    checkpoint['optimizer_state_dict'])
                self.start_epoch = checkpoint['epoch'] + 1
                self.best_count = checkpoint['best_count']
                self.best_mae = checkpoint['best_mae']
                self.best_mse = checkpoint['best_mse']
                print(self.best_mae, self.best_mse, self.best_count)
            elif suf == 'pth':
                self.model.load_state_dict(
                    torch.load(train_args['resume'], self.device))
        else:
            self.logger.add_text('log/train', 'random initialization', 0)
        img_cnts = {
            'val_image_count': len(self.dataloaders['val']),
            'train_image_count': len(self.dataloaders['train'])
        }
        self.logger.add_hparams({
            **self.train_args,
            **img_cnts
        }, {
            'best_mse': np.inf,
            'best_mae': np.inf,
            'best_count': 0
        },
                                run_name='hparams')
Exemplo n.º 8
0
    dataset_name = args.dataset.lower()
    if dataset_name == 'qnrf':
        from datasets.crowd import Crowd_qnrf as Crowd
    elif dataset_name == 'nwpu':
        from datasets.crowd import Crowd_nwpu as Crowd
    elif dataset_name == 'sha':
        from datasets.crowd import Crowd_sh as Crowd
    elif dataset_name == 'shb':
        from datasets.crowd import Crowd_sh as Crowd
    else:
        raise NotImplementedError

    dataset = Crowd(os.path.join(args.data_path,
                                 DATASET_PATHS[dataset_name]["val_path"]),
                    crop_size=args.crop_size,
                    downsample_ratio=DOWNSAMPLE_RATIO,
                    method='val')
    dataloader = torch.utils.data.DataLoader(dataset,
                                             1,
                                             shuffle=False,
                                             num_workers=1,
                                             pin_memory=True)

    if args.pred_density_map_path:
        import cv2
        if not os.path.exists(args.pred_density_map_path):
            os.makedirs(args.pred_density_map_path)

    model = vgg19()
    model.to(device)
Exemplo n.º 9
0
def get_loader(train_path, test_path, downsample_ratio, args):
    train_img_paths = []
    for img_path in glob.glob(os.path.join(train_path, '*.jpg')):
        train_img_paths.append(img_path)
    bg_img_paths = []
    for bg_img_path in glob.glob(
            os.path.join('/home/datamining/Datasets/CrowdCounting/bg/',
                         '*.jpg')):
        bg_img_paths.append(bg_img_path)
    if args.use_bg:
        train_img_paths += bg_img_paths
    test_img_paths = []
    for img_path in glob.glob(os.path.join(test_path, '*.jpg')):
        test_img_paths.append(img_path)

    if args.loss == 'bayes':
        bayes_dataset = Crowd(train_path, args.crop_size, downsample_ratio,
                              False, 'train')
        train_loader = torch.utils.data.DataLoader(bayes_dataset,
                                                   collate_fn=bayes_collate,
                                                   batch_size=args.bs,
                                                   shuffle=True,
                                                   num_workers=8,
                                                   pin_memory=True)
        test_loader = torch.utils.data.DataLoader(Crowd(
            test_path, args.crop_size, downsample_ratio, False, 'val'),
                                                  batch_size=1,
                                                  num_workers=8,
                                                  pin_memory=True)
    elif args.bn > 0:
        bn_dataset = PatchSet(train_img_paths,
                              transform,
                              c_size=(args.crop_size, args.crop_size),
                              crop_n=args.random_crop_n)
        train_loader = torch.utils.data.DataLoader(bn_dataset,
                                                   collate_fn=my_collate_fn,
                                                   shuffle=True,
                                                   batch_size=args.bs,
                                                   num_workers=8,
                                                   pin_memory=True)
        test_loader = torch.utils.data.DataLoader(RawDataset(
            test_img_paths,
            transform,
            mode='one',
            downsample_ratio=downsample_ratio,
            test=True),
                                                  shuffle=False,
                                                  batch_size=1,
                                                  pin_memory=True)
    else:
        single_dataset = RawDataset(train_img_paths, transform, args.crop_mode,
                                    downsample_ratio, args.crop_scale)
        train_loader = torch.utils.data.DataLoader(single_dataset,
                                                   shuffle=True,
                                                   batch_size=1,
                                                   num_workers=8,
                                                   pin_memory=True)
        test_loader = torch.utils.data.DataLoader(RawDataset(
            test_img_paths,
            transform,
            mode='one',
            downsample_ratio=downsample_ratio,
            test=True),
                                                  shuffle=False,
                                                  batch_size=1,
                                                  pin_memory=True)

    return train_loader, test_loader, train_img_paths, test_img_paths
Exemplo n.º 10
0
from torchvision import transforms
import os
import numpy as np
from datasets.crowd import Crowd
from models.vgg import vgg19

patch = 1
# test_dir = '/home/teddy/crowd_data/Sh_A_Train_Val_NP/val'
test_dir = '/home/teddy/UCF-Train-Val-Test/test'
model_path = '/home/teddy/iccv-reproduce-new/1029-225909/best_model_3.pth'
root_dir = '/home/teddy/iccv-reproduce-new/1029-230053'
vis_dir = os.path.join(root_dir, 'vis_test')
if not os.path.exists(vis_dir):
    os.makedirs(vis_dir)

datasets = Crowd(test_dir, 512, 8, is_gray=False, method='val')

dataloader = torch.utils.data.DataLoader(datasets,
                                         1,
                                         shuffle=False,
                                         num_workers=8,
                                         pin_memory=True)

os.environ['CUDA_VISIBLE_DEVICES'] = '0'  # set vis gpu
device = torch.device('cuda')

model = vgg19()
model.to(device)
# model.load_state_dict(torch.load(model_path, device)['model_state_dict'])
model.load_state_dict(torch.load(model_path, device))
epoch_minus = []
Exemplo n.º 11
0
    parser.add_argument('--data-dir',
                        default='/home/teddy/UCF-Train-Val-Test',
                        help='training data directory')
    parser.add_argument('--save-dir',
                        default='/home/teddy/vgg',
                        help='model directory')
    parser.add_argument('--sub-name',
                        default='/home/teddy/vgg',
                        help='model directory')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()

    datasets = Crowd(args.data_dir, 544, 8, is_gray=False, method='test')
    dataloader = torch.utils.data.DataLoader(datasets,
                                             1,
                                             shuffle=False,
                                             num_workers=8,
                                             pin_memory=False)

    model_param_path = os.path.join(args.save_dir, 'best_model.pth')

    saveroot = os.path.join(args.save_dir, 'sub')
    if not os.path.exists(saveroot):
        os.makedirs(saveroot)
    crowd_counting(dataloader, model_param_path,
                   os.path.join(saveroot, args.sub_name))
Exemplo n.º 12
0
def get_loader(train_path, test_path, args):
    train_img_paths = []
    for img_path in glob.glob(os.path.join(train_path, '*.jpg')):
        train_img_paths.append(img_path)
    test_img_paths = []
    for img_path in glob.glob(os.path.join(test_path, '*.jpg')):
        test_img_paths.append(img_path)

    if 'bayes' in args.loss:
        bayes_dataset = Crowd(train_path, args.crop_scale, args.downsample,
                              False, 'train')
        train_loader = torch.utils.data.DataLoader(bayes_dataset,
                                                   collate_fn=bayes_collate,
                                                   batch_size=args.bs,
                                                   shuffle=True,
                                                   num_workers=8,
                                                   pin_memory=True)
        test_loader = torch.utils.data.DataLoader(Crowd(
            test_path, args.crop_scale, args.downsample, False, 'val'),
                                                  batch_size=1,
                                                  num_workers=8,
                                                  pin_memory=True)
    elif args.bn > 0:
        bn_dataset = PatchSet(train_img_paths,
                              transform,
                              c_size=(args.crop_scale, args.crop_scale),
                              crop_n=args.random_crop_n)
        train_loader = torch.utils.data.DataLoader(bn_dataset,
                                                   collate_fn=my_collate_fn,
                                                   shuffle=True,
                                                   batch_size=args.bs,
                                                   num_workers=8,
                                                   pin_memory=True)
        test_loader = torch.utils.data.DataLoader(CrowdDataset(
            test_img_paths,
            transform,
            mode='one',
            downsample_ratio=args.downsample,
            test=True),
                                                  shuffle=False,
                                                  batch_size=1,
                                                  pin_memory=True)
    else:
        single_dataset = CrowdDataset(train_img_paths, transform,
                                      args.crop_mode, args.downsample,
                                      args.crop_scale)
        train_loader = torch.utils.data.DataLoader(single_dataset,
                                                   shuffle=True,
                                                   batch_size=1,
                                                   num_workers=8,
                                                   pin_memory=True)
        test_loader = torch.utils.data.DataLoader(CrowdDataset(
            test_img_paths,
            transform,
            mode='one',
            downsample_ratio=args.downsample,
            test=True),
                                                  shuffle=False,
                                                  batch_size=1,
                                                  pin_memory=True)

    return train_loader, test_loader, train_img_paths, test_img_paths
    dataset_name = args['dataset'].lower()
    if dataset_name == 'qnrf':
        from datasets.crowd import Crowd_qnrf as Crowd
    elif dataset_name == 'nwpu':
        from datasets.crowd import Crowd_nwpu as Crowd
    elif dataset_name == 'sha':
        from datasets.crowd import Crowd_sh as Crowd
    elif dataset_name == 'shb':
        from datasets.crowd import Crowd_sh as Crowd
    elif dataset_name[:3] == 'ucf':
        from datasets.crowd import Crowd_ucf as Crowd
    else:
        raise NotImplementedError
    # TODO: solve deleted checkpoint file issue
    dataset = Crowd(os.path.join(args['data_path'], args["val_path"]),
                    crop_size=crop_size,
                    downsample_ratio=8,
                    method='val')
    dataloader = torch.utils.data.DataLoader(dataset,
                                             1,
                                             shuffle=False,
                                             num_workers=1,
                                             pin_memory=True)
    time_str = datetime.strftime(datetime.now(), '%m%d-%H%M%S')
    log_dir = os.path.join('runs', 'test_res', args['dataset'], time_str)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    logger = SummaryWriter(log_dir)
    create_image = args['pred_density_map']

    model = ddm(map_location=device)
    # model = v(map_location=device)