Пример #1
0
def init_data_loaders(args, use_gpu=True):
    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset)

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False


    trainloader = DataLoader(
        VideoDataset(dataset.train, seq_len=args.seq_len, sample='random',transform=transform_train),
        sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances),
        batch_size=args.train_batch, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True,
    )

    queryloader = DataLoader(
        VideoDataset(dataset.query, seq_len=args.seq_len, sample='random', transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery, seq_len=args.seq_len, sample='random', transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    return dataset, trainloader, queryloader, galleryloader
Пример #2
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        batch_size=args.train_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'cent'})
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_cent = CenterLoss(num_classes=dataset.num_train_pids,
                                feat_dim=model.feat_dim,
                                use_gpu=use_gpu)

    optimizer_model = torch.optim.Adam(model.parameters(),
                                       lr=args.lr,
                                       weight_decay=args.weight_decay)
    optimizer_cent = torch.optim.SGD(criterion_cent.parameters(),
                                     lr=args.lr_cent)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer_model,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_cent, optimizer_model,
              optimizer_cent, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if args.stepsize > 0: scheduler.step()

        if args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Пример #3
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    
    # set a learning rate 
    if args.lr_factor == -1:
        args.lr_factor = random()
    args.lr = args.lr_factor * 10**-args.lr_base
    #print(f"Choose learning rate {args.lr}")

    sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'), mode='a')
    print("==========\nArgs:{}\n==========".format(args))

    #assert torch.distributed.is_available()
    #print("Initializing DDP by nccl-tcp({}) rank({}) world_size({})".format(args.init_method, args.rank, args.world_size))
    #dist.init_process_group(backend='nccl', init_method=args.init_method, rank=args.rank, world_size=args.world_size)
        
    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset, root=args.root)

    # Data augmentation
    spatial_transform_train = [
                ST.Scale((args.height, args.width), interpolation=3),
                ST.RandomHorizontalFlip(),
                ST.ToTensor(),
                ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]
    spatial_transform_train = ST.Compose(spatial_transform_train)

    temporal_transform_train = TT.TemporalRandomCrop(size=args.seq_len, stride=args.sample_stride)
    #temporal_transform_train = TT.TemporalRandomCropPick(size=args.seq_len, stride=args.sample_stride)

    spatial_transform_test = ST.Compose([
                ST.Scale((args.height, args.width), interpolation=3),
                ST.ToTensor(),
                ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ])
    temporal_transform_test = TT.TemporalBeginCrop(size=args.test_frames)

    pin_memory = True if use_gpu else False

    dataset_train = dataset.train
    if args.dataset == 'duke':
        dataset_train = dataset.train_dense
        print('process duke dataset')

    #sampler = RandomIdentitySampler(dataset_train, num_instances=args.num_instances)
    if args.dataset == 'lsvid':
        sampler = RandomIdentitySampler(dataset_train, num_instances=args.num_instances)
    elif args.dataset == 'mars':
        sampler = RandomIdentitySampler(dataset_train, num_instances=args.num_instances)
    trainloader = DataLoader(
        VideoDataset(dataset_train, spatial_transform=spatial_transform_train, temporal_transform=temporal_transform_train),
        sampler=sampler,
        batch_size=args.train_batch, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True,
    )
    '''
    for batch_idx, (vids, pids, camids, img_paths) in enumerate(trainloader):
        print(batch_idx, pids, camids, img_paths)
        break
    return
    '''
    dataset_query = dataset.query
    dataset_gallery = dataset.gallery
    if args.dataset == 'lsvid':
        dataset_query = dataset.val_query
        dataset_gallery = dataset.val_gallery
        print('process lsvid dataset')
        
    queryloader = DataLoader(
        VideoDataset(dataset_query, spatial_transform=spatial_transform_test, temporal_transform=temporal_transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False
    )

    galleryloader = DataLoader(
        VideoDataset(dataset_gallery, spatial_transform=spatial_transform_test, temporal_transform=temporal_transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False
    )
    
    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, use_gpu=use_gpu, num_classes=dataset.num_train_pids, loss={'xent', 'htri'})
    #print(model)
    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])

    criterion_xent = nn.CrossEntropyLoss() 
    criterion_htri = TripletLoss(margin=args.margin, distance=args.distance, use_gpu=use_gpu)
    criterion_htri_c = TripletInterCamLoss(margin=args.margin, distance=args.distance, use_gpu=use_gpu)
    #criterion_htri_c = TripletWeightedInterCamLoss(margin=args.margin, distance=args.distance, use_gpu=use_gpu, alpha=args.cam_alpha)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)
    start_epoch = args.start_epoch

    if use_gpu:
        model = nn.DataParallel(model).cuda()
        #model = model.cuda()
        #model = nn.parallel.DistributedDataParallel(model)

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")
    
    for epoch in range(start_epoch, args.max_epoch):
        #print("Set sampler seed to {}".format(args.seed*epoch))
        #sampler.set_seed(args.seed*epoch)
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, criterion_htri_c, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()
        
        if (epoch+1) >= args.start_eval and (epoch+1) % args.eval_step == 0 or epoch == 0:
            print("==> Test")
            with torch.no_grad():
                rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best: 
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'rank1': rank1,
                'epoch': epoch,
            }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch+1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset)

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        VideoDataset(dataset.train,
                     seq_len=args.seq_len,
                     sample='random',
                     transform=transform_train),
        sampler=RandomIdentitySampler(dataset.train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        VideoDataset(dataset.query,
                     seq_len=args.seq_len,
                     sample='dense',
                     transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery,
                     seq_len=args.seq_len,
                     sample='dense',
                     transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    if args.arch == 'resnet503d':
        model = resnet3d.resnet50(num_classes=dataset.num_train_pids,
                                  sample_width=args.width,
                                  sample_height=args.height,
                                  sample_duration=args.seq_len)
        if not os.path.exists(args.pretrained_model):
            raise IOError("Can't find pretrained model: {}".format(
                args.pretrained_model))
        print("Loading checkpoint from '{}'".format(args.pretrained_model))
        checkpoint = torch.load(args.pretrained_model)
        state_dict = {}
        for key in checkpoint['state_dict']:
            if 'fc' in key: continue
            state_dict[key.partition("module.")
                       [2]] = checkpoint['state_dict'][key]
        model.load_state_dict(state_dict, strict=False)
    else:
        model = models.init_model(name=args.arch,
                                  num_classes=dataset.num_train_pids,
                                  loss={'xent', 'htri'})
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)
    start_epoch = args.start_epoch

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, args.pool, use_gpu)
        return

    start_time = time.time()
    best_rank1 = -np.inf
    if args.arch == 'resnet503d':
        torch.backends.cudnn.benchmark = False
    for epoch in range(start_epoch, args.max_epoch):
        print("==> Epoch {}/{}".format(epoch + 1, args.max_epoch))

        train(model, criterion_xent, criterion_htri, optimizer, trainloader,
              use_gpu)

        if args.stepsize > 0: scheduler.step()

        if args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, args.pool, use_gpu)
            is_best = rank1 > best_rank1
            if is_best: best_rank1 = rank1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
Пример #5
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    
    # set a learning rate 
    #if args.lr_factor == -1:
    #    args.lr_factor = random()
    #args.lr = args.lr_factor * 10**-args.lr_base
    #print(f"Choose learning rate {args.lr}")

    sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'), mode='a')
    print("==========\nArgs:{}\n==========".format(args))

    #assert torch.distributed.is_available()
    #print("Initializing DDP by nccl-tcp({}) rank({}) world_size({})".format(args.init_method, args.rank, args.world_size))
    #dist.init_process_group(backend='nccl', init_method=args.init_method, rank=args.rank, world_size=args.world_size)
        
    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset, root=args.root)

    # Data augmentation
    spatial_transform_train = [
                ST.Scale((args.height, args.width), interpolation=3),
                ST.RandomHorizontalFlip(),
                ST.ToTensor(),
                ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]
    spatial_transform_train = ST.Compose(spatial_transform_train)

    temporal_transform_train = TT.TemporalRandomCrop(size=args.seq_len, stride=args.sample_stride)
    #temporal_transform_train = TT.TemporalRandomCropPick(size=args.seq_len, stride=args.sample_stride)

    spatial_transform_test = ST.Compose([
                ST.Scale((args.height, args.width), interpolation=3),
                ST.ToTensor(),
                ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ])
    temporal_transform_test = TT.TemporalBeginCrop(size=args.test_frames)

    pin_memory = True if use_gpu else False

    dataset_train = dataset.train
    if args.dataset == 'duke':
        dataset_train = dataset.train_dense
        print('process duke dataset')

    #sampler = RandomIdentitySampler(dataset_train, num_instances=args.num_instances)
    if args.dataset == 'lsvid':
        sampler = RandomIdentityCameraSampler(dataset_train, num_instances=args.num_instances, num_cam=15)
    elif args.dataset == 'mars':
        sampler = RandomIdentityCameraSampler(dataset_train, num_instances=args.num_instances, num_cam=6)
    trainloader = DataLoader(
        VideoDataset(dataset_train, spatial_transform=spatial_transform_train, temporal_transform=temporal_transform_train),
        sampler=sampler,
        batch_size=args.train_batch, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True,
    )
    '''
    for batch_idx, (vids, pids, camids, img_paths) in enumerate(trainloader):
        print(batch_idx, pids, camids, img_paths)
        break
    return
    '''
    dataset_query = dataset.query
    dataset_gallery = dataset.gallery
    if args.dataset == 'lsvid':
        dataset_query = dataset.val_query
        dataset_gallery = dataset.val_gallery
        print('process lsvid dataset')
        
    queryloader = DataLoader(
        VideoDataset(dataset_query, spatial_transform=spatial_transform_test, temporal_transform=temporal_transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False
    )

    galleryloader = DataLoader(
        VideoDataset(dataset_gallery, spatial_transform=spatial_transform_test, temporal_transform=temporal_transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False
    )
    
    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, use_gpu=use_gpu, num_classes=dataset.num_train_pids, loss={'xent', 'htri'}, transformer_num_heads=args.transformer_num_heads, transformer_num_layers=args.transformer_num_layers, attention_flatness=True)
    #print(model)
    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])

    criterion_xent = nn.CrossEntropyLoss() 
    criterion_flat = FlatnessLoss(reduction='batchmean', use_gpu=use_gpu)
    criterion_htri_c = TripletInterCamLoss(margin=args.margin, distance=args.distance, use_gpu=use_gpu)
    #criterion_htri_c = TripletWeightedInterCamLoss(margin=args.margin, distance=args.distance, use_gpu=use_gpu, alpha=args.cam_alpha)

    #optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    linear_scaled_lr = args.lr * args.train_batch * len(args.gpu_devices.split(',')) / 512.0
	args.lr = linear_scaled_lr
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu:
        use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset)

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    queryloader = DataLoader(
        VideoDataset(dataset.query,
                     seq_len=args.seq_len,
                     sample='dense',
                     transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery,
                     seq_len=args.seq_len,
                     sample='dense',
                     transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )
    if args.arch == 'resnet503d':
        cudnn.benchmark = False

    print("Initializing model: {}".format(args.arch))
    if args.arch == 'resnet503d':
        model = resnet3d.resnet50(num_classes=dataset.num_train_pids,
                                  sample_width=args.width,
                                  sample_height=args.height,
                                  sample_duration=args.seq_len)
        if not os.path.exists(args.best_model):
            raise IOError("Can't find best model: {}".format(args.best_model))
        print("Loading checkpoint from '{}'".format(args.best_model))
        checkpoint = torch.load(args.best_model)
        state_dict = {}
        for key in checkpoint['state_dict']:
            state_dict[key] = checkpoint['state_dict'][key]
        model.load_state_dict(state_dict, strict=False)
    else:
        model = models.init_model(name=args.arch,
                                  num_classes=dataset.num_train_pids,
                                  loss={'xent', 'htri'})
        if not os.path.exists(args.best_model):
            raise IOError("Can't find best model: {}".format(args.best_model))
        print("Loading checkpoint from '{}'".format(args.best_model))
        checkpoint = torch.load(args.best_model)
        state_dict = {}
        for key in checkpoint['state_dict']:
            state_dict[key] = checkpoint['state_dict'][key]
        model.load_state_dict(state_dict, strict=False)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, args.pool, use_gpu)
        # distmat =  test(model, queryloader, galleryloader, args.pool, use_gpu)  # rnn时不能这么做,否则out of memory
        # if args.vis_ranked_res:
        #     visualize_ranked_results(
        #         distmat, dataset,
        #         save_dir=osp.join(args.save_dir, 'ranked_results'),
        #         topk=20,
        #     )

        return
Пример #7
0
def main():
    args.save_dir = args.save_dir + '/' + args.arch

    torch.manual_seed(args.seed)
    # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu:
        use_gpu = False

    # add data to save_dir
    args.save_dir = args.save_dir + '_' + args.dataset + '_combined_multisteplr11'
    if args.pretrained_model is not None:
        args.save_dir = os.path.dirname(args.pretrained_model)

    if not osp.exists(args.save_dir):
        os.makedirs(args.save_dir)

    log_name = 'test.log' if args.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset)

    print("Train Transforms: \n\
        Random2DTranslation, \n\
        RandomHorizontalFlip, \n\
        ToTensor, \n\
        normalize\
        ")

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        # T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        # T.RandomErasing(p=0.5, scale=(0.02, 0.4), ratio=(0.3, 3.3), value=[0.485, 0.456, 0.406])
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        VideoDataset(dataset.train, seq_len=args.seq_len,
                     sample=args.data_selection, transform=transform_train),
        sampler=RandomIdentitySampler(
            dataset.train, num_instances=args.num_instances),
        batch_size=args.train_batch, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True,
    )

    queryloader = DataLoader(
        VideoDataset(dataset.query, seq_len=args.seq_len,
                     sample='dense', transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery, seq_len=args.seq_len,
                     sample='dense', transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, seq_len=args.seq_len)

    # pretrained model loading
    if args.pretrained_model is not None:
        if not os.path.exists(args.pretrained_model):
            raise IOError("Can't find pretrained model: {}".format(
                args.pretrained_model))
        print("Loading checkpoint from '{}'".format(args.pretrained_model))
        pretrained_state = torch.load(args.pretrained_model)['state_dict']
        print(len(pretrained_state), ' keys in pretrained model')

        current_model_state = model.state_dict()
        pretrained_state = {key: val
                            for key, val in pretrained_state.items()
                            if key in current_model_state and val.size() == current_model_state[key].size()}

        print(len(pretrained_state),
              ' keys in pretrained model are available in current model')
        current_model_state.update(pretrained_state)
        model.load_state_dict(current_model_state)

    print("Model size: {:.5f}M".format(sum(p.numel()
                                           for p in model.parameters())/1000000.0))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)

    optimizer = torch.optim.Adam(
        model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(
            optimizer, step_size=args.stepsize, gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    best_rank1 = -np.inf

    is_first_time = True
    for epoch in range(start_epoch, args.max_epoch):
        eta_seconds = (time.time() - start_time) * (args.max_epoch - epoch) / max(epoch, 1)
        eta_str = str(datetime.timedelta(seconds=int(eta_seconds)))
        print("==> Epoch {}/{} \teta {}".format(epoch+1, args.max_epoch, eta_str))

        train(model, criterion_xent, criterion_htri,
              optimizer, trainloader, use_gpu)

        if args.stepsize > 0:
            scheduler.step()

        rank1 = 'NA'
        mAP = 'NA'
        is_best = False

        if args.eval_step > 0 and (epoch+1) % args.eval_step == 0 or (epoch+1) == args.max_epoch:
            print("==> Test")
            rank1, mAP = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1

        # save the model as required
        if (epoch+1) % args.save_step == 0:
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint({
                'state_dict': state_dict,
                'rank1': rank1,
                'epoch': epoch,
            }, is_best, osp.join(args.save_dir, args.save_prefix, 'model' + '.pth.tar-' + str(epoch+1)))

        is_first_time = False
        if not is_first_time:
            utils.disable_all_print_once()

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
Пример #8
0
def main():
    torch.manual_seed(1)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    print(args)
    # GPU / CPU
    device = torch.device('cuda')

    print("Initializing dataset")
    dataset = data_manager.init_dataset('../imdb/dataset_GEI', 'id_list.csv',
                                        args.cooperative)

    transform = transforms.Compose([
        transforms.RandomAffine(degrees=0, translate=(0.05, 0.02)),
        transforms.ToTensor()
    ])
    transform_test = transforms.Compose([transforms.ToTensor()])
    # trainLoader
    trainLoader = DataLoader(ImageDataset(dataset.train,
                                          sample='random',
                                          transform=transform),
                             sampler=RandomIdentitySampler(dataset.train,
                                                           num_instances=2),
                             batch_size=args.train_batch,
                             num_workers=args.workers)

    # test/val queryLoader
    # test/val galleryLoader
    test_probeLoader = DataLoader(ImageDataset(dataset.test_probe,
                                               sample='dense',
                                               transform=transform_test),
                                  shuffle=False,
                                  batch_size=args.test_batch,
                                  drop_last=False)

    test_galleryLoader = DataLoader(ImageDataset(dataset.test_gallery,
                                                 sample='dense',
                                                 transform=transform_test),
                                    shuffle=False,
                                    batch_size=args.test_batch,
                                    drop_last=False)
    model = models.model.ICDNet_group_mask_mask_early_8().to(device=device)
    #model = models.model.ICDNet_mask()
    #model= nn.DataParallel(model).cuda()
    #model = models.model.icdnet().to(device=device)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_cont = OnlineContrastiveLoss(margin=3)
    #criterion_trip = OnlineTripletLoss(3)
    criterion_trip = TripletLoss(3)
    criterion_sim = OnlineSimLoss()
    criterion_l2 = nn.MSELoss()
    criterion_label = nn.CrossEntropyLoss()

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 betas=(0.5, 0.999))
    #scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)
    scheduler = lr_scheduler.MultiStepLR(optimizer, [140],
                                         gamma=0.1,
                                         last_epoch=-1)

    #checkpoint = torch.load('./save_group_mask_early8_ones2_0002_sa3_500l2_01label_resbottle_shift002_all190_coo0/ep87.pth.tar')
    #model.load_state_dict(checkpoint['state_dict'])
    start_time = time.time()
    best_rank1 = -np.inf
    #args.max_epoch = 1
    cont_iter = 1
    for epoch in range(args.start_epoch, args.max_epoch):
        print("==> {}/{}".format(epoch + 1, args.max_epoch))
        cont_iter = train(epoch, model, criterion_cont, criterion_trip,
                          criterion_sim, criterion_l2, criterion_label,
                          optimizer, scheduler, trainLoader, device, cont_iter)
        if cont_iter > 250000:
            break
        if True:
            print("=============> Test")
            test_f.write("iter" + str(cont_iter) + '\n')
            rank1, correct_rate = test(model, test_probeLoader,
                                       test_galleryLoader, device)
            writer.add_scalar("Test/rank1", rank1, epoch)
            writer.add_scalar("Test/correct", correct_rate, epoch)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
            if is_best:
                state_dict = model.state_dict()
                save_checkpoint(
                    {
                        'state_dict': state_dict,
                        'epoch': epoch,
                        'optimizer': optimizer.state_dict(),
                    }, is_best,
                    osp.join(args.save_dir,
                             'ep' + str(epoch + 1) + '.pth.tar'))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
Пример #9
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log_test1.txt'), mode='a')
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset)

    # Data augmentation

    spatial_transform_test = ST.Compose([
        ST.Scale((args.height, args.width), interpolation=3),
        ST.ToTensor(),
        ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    temporal_transform_test = None

    pin_memory = True if use_gpu else False

    queryloader = DataLoader(VideoDataset(
        dataset.query,
        spatial_transform=spatial_transform_test,
        temporal_transform=temporal_transform_test),
                             batch_size=1,
                             shuffle=False,
                             num_workers=0,
                             pin_memory=pin_memory,
                             drop_last=False)

    galleryloader = DataLoader(VideoDataset(
        dataset.gallery,
        spatial_transform=spatial_transform_test,
        temporal_transform=temporal_transform_test),
                               batch_size=1,
                               shuffle=False,
                               num_workers=0,
                               pin_memory=pin_memory,
                               drop_last=False)

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              use_gpu=use_gpu,
                              num_classes=dataset.num_train_pids,
                              loss={'xent', 'htri'})

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    model.eval()
    with torch.no_grad():
        evaluation(model, args, queryloader, galleryloader, use_gpu)
Пример #10
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))

    dataset = data_manager.init_dataset(root=args.root,
                                        name=args.dataset,
                                        cls_sample=args.cls_sample)
    # print(dataset.train)
    # print(1)
    # 解释器:创建一个transform处理图像数据的设置
    # T.Random2DTranslation:随机裁剪
    # T.RandomHorizontalFlip: 给定概率进行随机水平翻转
    # T.ToTensor: 将PIL或numpy向量[0,255]=>tensor[0.0,1.0]
    # T.Normalize:用均值和标准偏差标准化张量图像,mean[ , , ]三个参数代表三通道
    transform_train = T.Compose([
        # T.RandomCrop(224),
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        # T.Resize(256),
        # T.CenterCrop(224),
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    m = dataset.train
    print(1)
    # Dataloader 提供队列和线程
    # ImageDataset:return data =>img, pid, camid
    # RandomIdentitySampler:定义从数据集中抽取样本的策略
    # num_workers: 子进程数
    # print(dataset.train)
    trainloader = DataLoader(
        AGE_Gender_ImageDataset(dataset.train, transform=transform_train),
        batch_size=args.train_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    testloader = DataLoader(
        AGE_Gender_ImageDataset(dataset.test, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.train_num_class,
                              loss={'xent'})
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    # criterion_xent = CrossEntropyLabelSmooth(num_classes=dataset.train_num_class, use_gpu=use_gpu)
    age_criterion_xent = nn.CrossEntropyLoss()
    gender_criterion_xent = nn.CrossEntropyLoss()
    # optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay)
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        model.parameters()),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, testloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    # best_rank1 = -np.inf
    best_score = 0
    best_MAE = 0
    best_gender_acc = 0
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, age_criterion_xent, gender_criterion_xent,
              optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if args.stepsize > 0: scheduler.step()

        if args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Test")
            MAE, Gender_acc = test(model, testloader, use_gpu)
            Score = Gender_acc * 100 - MAE
            is_best = Score > best_score
            if is_best:
                best_score = Score
                best_MAE = MAE
                best_gender_acc = Gender_acc
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': Score,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print(
        "==> Best best_score(Gender_acc-MAE) {} |Gender_acc {}\t MAE {}|achieved at epoch {}"
        .format(best_score, best_gender_acc, best_MAE, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Пример #11
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset)

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = False

    trainloader = DataLoader(
        VideoDataset(dataset.train,
                     seq_len=args.seq_len,
                     sample='random',
                     transform=transform_train),
        sampler=RandomIdentitySampler(dataset.train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        VideoDataset(dataset.query,
                     seq_len=args.seq_len,
                     sample='dense',
                     transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery,
                     seq_len=args.seq_len,
                     sample='dense',
                     transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))

    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent', 'htri'})
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)
    start_epoch = args.start_epoch

    start_time = time.time()
    print(start_time)

    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        print(batch_idx)
        print('x')
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()
        imgs, pids = Variable(imgs), Variable(pids)

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
Пример #12
0
def main():
    runId = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    cfg.OUTPUT_DIR = os.path.join(cfg.OUTPUT_DIR, runId)
    if not os.path.exists(cfg.OUTPUT_DIR):
        os.mkdir(cfg.OUTPUT_DIR)
    print(cfg.OUTPUT_DIR)
    torch.manual_seed(cfg.RANDOM_SEED)
    random.seed(cfg.RANDOM_SEED)
    np.random.seed(cfg.RANDOM_SEED)
    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID

    use_gpu = torch.cuda.is_available() and cfg.MODEL.DEVICE == "cuda"
    if not cfg.EVALUATE_ONLY:
        sys.stdout = Logger(osp.join(cfg.OUTPUT_DIR, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(cfg.OUTPUT_DIR, 'log_test.txt'))

    print("==========\nConfigs:{}\n==========".format(cfg))

    if use_gpu:
        print("Currently using GPU {}".format(cfg.MODEL.DEVICE_ID))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(cfg.RANDOM_SEED)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(cfg.DATASETS.NAME))

    dataset = data_manager.init_dataset(root=cfg.DATASETS.ROOT_DIR,
                                        name=cfg.DATASETS.NAME)
    print("Initializing model: {}".format(cfg.MODEL.NAME))

    if cfg.MODEL.ARCH == 'video_baseline':
        torch.backends.cudnn.benchmark = False
        model = models.init_model(name=cfg.MODEL.ARCH,
                                  num_classes=625,
                                  pretrain_choice=cfg.MODEL.PRETRAIN_CHOICE,
                                  last_stride=cfg.MODEL.LAST_STRIDE,
                                  neck=cfg.MODEL.NECK,
                                  model_name=cfg.MODEL.NAME,
                                  neck_feat=cfg.TEST.NECK_FEAT,
                                  model_path=cfg.MODEL.PRETRAIN_PATH)

    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    transform_train = T.Compose([
        T.Resize(cfg.INPUT.SIZE_TRAIN),
        T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
        T.Pad(cfg.INPUT.PADDING),
        T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        T.RandomErasing(probability=cfg.INPUT.RE_PROB,
                        mean=cfg.INPUT.PIXEL_MEAN)
    ])
    transform_test = T.Compose([
        T.Resize(cfg.INPUT.SIZE_TEST),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    pin_memory = True if use_gpu else False

    cfg.DATALOADER.NUM_WORKERS = 0

    trainloader = DataLoader(VideoDataset(
        dataset.train,
        seq_len=cfg.DATASETS.SEQ_LEN,
        sample=cfg.DATASETS.TRAIN_SAMPLE_METHOD,
        transform=transform_train,
        dataset_name=cfg.DATASETS.NAME),
                             sampler=RandomIdentitySampler(
                                 dataset.train,
                                 num_instances=cfg.DATALOADER.NUM_INSTANCE),
                             batch_size=cfg.SOLVER.SEQS_PER_BATCH,
                             num_workers=cfg.DATALOADER.NUM_WORKERS,
                             pin_memory=pin_memory,
                             drop_last=True)

    queryloader = DataLoader(VideoDataset(
        dataset.query,
        seq_len=cfg.DATASETS.SEQ_LEN,
        sample=cfg.DATASETS.TEST_SAMPLE_METHOD,
        transform=transform_test,
        max_seq_len=cfg.DATASETS.TEST_MAX_SEQ_NUM,
        dataset_name=cfg.DATASETS.NAME),
                             batch_size=cfg.TEST.SEQS_PER_BATCH,
                             shuffle=False,
                             num_workers=cfg.DATALOADER.NUM_WORKERS,
                             pin_memory=pin_memory,
                             drop_last=False)

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery,
                     seq_len=cfg.DATASETS.SEQ_LEN,
                     sample=cfg.DATASETS.TEST_SAMPLE_METHOD,
                     transform=transform_test,
                     max_seq_len=cfg.DATASETS.TEST_MAX_SEQ_NUM,
                     dataset_name=cfg.DATASETS.NAME),
        batch_size=cfg.TEST.SEQS_PER_BATCH,
        shuffle=False,
        num_workers=cfg.DATALOADER.NUM_WORKERS,
        pin_memory=pin_memory,
        drop_last=False,
    )

    if cfg.MODEL.SYN_BN:
        if use_gpu:
            model = nn.DataParallel(model)
        if cfg.SOLVER.FP_16:
            model = apex.parallel.convert_syncbn_model(model)
        model.cuda()

    start_time = time.time()
    xent = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids)
    tent = TripletLoss(cfg.SOLVER.MARGIN)

    optimizer = make_optimizer(cfg, model)

    scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                  cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
                                  cfg.SOLVER.WARMUP_ITERS,
                                  cfg.SOLVER.WARMUP_METHOD)
    # metrics = test(model, queryloader, galleryloader, cfg.TEST.TEMPORAL_POOL_METHOD, use_gpu)
    no_rise = 0
    best_rank1 = 0
    start_epoch = 0
    for epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCHS):
        # if no_rise == 10:
        #     break
        scheduler.step()
        print("noriase:", no_rise)
        print("==> Epoch {}/{}".format(epoch + 1, cfg.SOLVER.MAX_EPOCHS))
        print("current lr:", scheduler.get_lr()[0])

        train(model, trainloader, xent, tent, optimizer, use_gpu)
        if cfg.SOLVER.EVAL_PERIOD > 0 and (
            (epoch + 1) % cfg.SOLVER.EVAL_PERIOD == 0 or
            (epoch + 1) == cfg.SOLVER.MAX_EPOCHS):
            print("==> Test")

            metrics = test(model, queryloader, galleryloader,
                           cfg.TEST.TEMPORAL_POOL_METHOD, use_gpu)
            rank1 = metrics[0]
            if rank1 > best_rank1:
                best_rank1 = rank1
                no_rise = 0
            else:
                no_rise += 1
                continue

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            torch.save(
                state_dict,
                osp.join(
                    cfg.OUTPUT_DIR, "rank1_" + str(rank1) + '_checkpoint_ep' +
                    str(epoch + 1) + '.pth'))
            # best_p = osp.join(cfg.OUTPUT_DIR, "rank1_" + str(rank1) + '_checkpoint_ep' + str(epoch + 1) + '.pth')

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
Пример #13
0
def main():
    torch.manual_seed(args.seed)  # 为CPU设置种子用于生成随机数,以使得结果是确定的
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices  # 在代码中指定需要使用的GPU
    use_gpu = torch.cuda.is_available()  # 查看当前环境是否支持CUDA,支持返回true,不支持返回false
    if args.use_cpu:
        use_gpu = False

    if not args.evaluate:  # 如果不是评估,那就是训练,输出训练日志;否则输出测试日志。
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))  # 打印所有参数

    if use_gpu:  # 如果使用gpu,输出选定的gpu,
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True  # 在程序刚开始加这条语句可以提升一点训练速度,没什么额外开销
        torch.cuda.manual_seed_all(args.seed)  # 为GPU设置种子用于生成随机数,以使得结果是确定的
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset)  # 初始化数据集,从data_manager.py文件中加载。

    # import transforms as T.
    # T.Compose=一起组合几个变换。
    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),  # 以一个概率进行,首先将图像大小增加到(1 + 1/8),然后执行随机裁剪。
        T.RandomHorizontalFlip(),  # 以给定的概率(0.5)随机水平翻转给定的PIL图像。
        T.ToTensor(),  # 将``PIL Image``或``numpy.ndarray``转换为张量。
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # 用平均值和标准偏差归一化张量图像。
        # input[channel] = (input[channel] - mean[channel]) / std[channel]
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),  # 将输入PIL图像的大小调整为给定大小。
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    # 设置pin_memory=True,则意味着生成的Tensor数据最开始是属于内存中的锁页内存,这样将内存的Tensor转义到GPU的显存就会更快一些。
    pin_memory = True if use_gpu else False

    # DataLoader数据加载器。 组合数据集和采样器,并在数据集上提供单进程或多进程迭代器。
    trainloader = DataLoader(
        # VideoDataset:基于视频的person reid的数据集.(训练的数据集,视频序列长度,采样方法:随机,进行数据增强)
        VideoDataset(dataset.train, seq_len=args.seq_len, sample='random', transform=transform_train),
        # 随机抽样N个身份,然后对于每个身份,随机抽样K个实例,因此批量大小为N * K.
        sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances),
        batch_size=args.train_batch,  # 训练的批次大小
        num_workers=args.workers,  # 多进程的数目
        pin_memory=pin_memory,
        drop_last=True,
    )  # 如果数据集大小不能被批量大小整除,则设置为“True”以删除最后一个不完整的批次。

    queryloader = DataLoader(
        VideoDataset(dataset.query, seq_len=args.seq_len, sample='dense', transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,  # 设置为“True”以使数据在每个时期重新洗牌(默认值:False)。
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,  # 如果“False”和数据集的大小不能被批量大小整除,那么最后一批将更小。
    )

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery, seq_len=args.seq_len, sample='dense', transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))  # 模型的初始化

    if args.arch == 'resnet503d':
        model = resnet3d.resnet50(num_classes=dataset.num_train_pids, sample_width=args.width,
                                  sample_height=args.height, sample_duration=args.seq_len)
        # 如果不存在预训练模型,则报错
        if not os.path.exists(args.pretrained_model):
            raise IOError("Can't find pretrained model: {}".format(args.pretrained_model))
        # 导入预训练的模型
        print("Loading checkpoint from '{}'".format(args.pretrained_model))
        checkpoint = torch.load(args.pretrained_model)
        state_dict = {}  # 状态字典,从checkpoint文件中加载参数
        for key in checkpoint['state_dict']:
            if 'fc' in key:
                continue
            state_dict[key.partition("module.")[2]] = checkpoint['state_dict'][key]
        model.load_state_dict(state_dict, strict=False)
    else:
        model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'xent', 'htri'})
    print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0))

    # 损失函数:xent:softmax交叉熵损失函数。htri:三元组损失函数。
    criterion_xent = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)
    # 优化器:adam
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    # stepsize,逐步减少学习率(> 0表示已启用)
    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
        # lr_scheduler学习率计划,StepLR,将每个参数组的学习速率设置为每个步长时期由gamma衰减的初始lr.
    start_epoch = args.start_epoch  # 手动时期编号(重启时有用)

    if use_gpu:
        model = nn.DataParallel(model).cuda()  # 多GPU训练
        # DataParallel是torch.nn下的一个类,需要制定的参数是module(可以多gpu运行的类函数)和input(数据集)

    if args.evaluate:  # 这里的evaluate没有意义,应该添加代码导入保存的checkpoint,再test
        print("Evaluate only")  # 进行评估
        test(model, queryloader, galleryloader, args.pool, use_gpu)
        return

    start_time = time.time()  # 开始的时间
    best_rank1 = -np.inf  # 初始化,负无穷
    if args.arch == 'resnet503d':  # 如果模型为resnet503d,
        torch.backends.cudnn.benchmark = False

    for epoch in range(start_epoch, args.max_epoch):  # epoch,从开始到最大,进行训练。
        print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
        
        train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu)
        
        if args.stepsize > 0:
            scheduler.step()

        # 如果运行一次评估的需要的epoch数大于0,并且当前epoch+1能整除这个epoch数,或者等于最大epoch数。那么就进行一次评估。
        if args.eval_step > 0 and (epoch+1) % args.eval_step == 0 or (epoch+1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, args.pool, use_gpu)
            is_best = rank1 > best_rank1  # 比较,大于则返回true,否则返回false。
            if is_best:
                best_rank1 = rank1

            if use_gpu:
                state_dict = model.module.state_dict()
                # 函数static_dict()用于返回包含模块所有状态的字典,包括参数和缓存。
            else:
                state_dict = model.state_dict()
            # 保存checkpoint文件
            save_checkpoint({
                'state_dict': state_dict,
                'rank1': rank1,
                'epoch': epoch,
            }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch+1) + '.pth.tar'))
    # 经过的时间
    elapsed = round(time.time() - start_time)  # round() 方法返回浮点数x的四舍五入值
    elapsed = str(datetime.timedelta(seconds=elapsed))  # 对象代表两个时间之间的时间差,
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
Пример #14
0
def attr_main():
    runId = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    args.save_dir = os.path.join(args.save_dir, runId)
    if not os.path.exists(args.save_dir):
        os.mkdir(args.save_dir)
    print(args.save_dir)
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger('./log_train_' + runId + '.txt')
    else:
        sys.stdout = Logger('./log_test_' + runId + '.txt')
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))

    dataset = data_manager.init_dataset(name=args.dataset,
                                        min_seq_len=args.seq_len,
                                        attr=True)
    args.attr_lens = dataset.attr_lens
    args.columns = dataset.columns
    print("Initializing model: {}".format(args.arch))
    # if args.arch == "resnet50ta_attr" or args.arch == "resnet50ta_attr_newarch":
    if args.arch == 'attr_resnet503d':
        model = models.init_model(name=args.arch,
                                  attr_lens=args.attr_lens,
                                  model_type=args.model_type,
                                  num_classes=dataset.num_train_pids,
                                  sample_width=args.width,
                                  sample_height=args.height,
                                  sample_duration=args.seq_len)
        torch.backends.cudnn.benchmark = False
    else:
        model = models.init_model(name=args.arch,
                                  attr_lens=args.attr_lens,
                                  model_type=args.model_type)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    if args.dataset == "duke":
        transform_train = T.Compose([
            T.Random2DTranslation(args.height, args.width),
            # T.RandomHorizontalFlip(),
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

        transform_test = T.Compose([
            T.Resize((args.height, args.width)),
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    elif args.dataset == "mars":
        transform_train = T.Compose([
            T.Random2DTranslation(args.height, args.width),
            T.RandomHorizontalFlip(),
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

        transform_test = T.Compose([
            T.Resize((args.height, args.width)),
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        VideoDataset(dataset.train,
                     seq_len=args.seq_len,
                     sample='random',
                     transform=transform_train,
                     attr=True,
                     attr_loss=args.attr_loss,
                     attr_lens=args.attr_lens),
        sampler=RandomIdentitySampler(dataset.train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = VideoDataset(dataset.query + dataset.gallery,
                               seq_len=args.seq_len,
                               sample='dense',
                               transform=transform_test,
                               attr=True,
                               attr_loss=args.attr_loss,
                               attr_lens=args.attr_lens)

    start_epoch = args.start_epoch

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    start_time = time.time()
    if args.arch == 'resnet503d':
        torch.backends.cudnn.benchmark = False

    # print("Run attribute pre-training")
    if args.attr_loss == "cropy":
        criterion = nn.CrossEntropyLoss()
    elif args.attr_loss == "mse":
        criterion = nn.MSELoss()

    if args.evaluate:
        print("Evaluate only")
        model_root = "/data/chenzy/models/mars/2019-02-26_21-02-13"
        model_paths = []
        for m in os.listdir(model_root):
            if m.endswith("pth"):
                model_paths.append(m)

        model_paths = sorted(model_paths,
                             key=lambda a: float(a.split("_")[1]),
                             reverse=True)
        # model_paths = ['rank1_2.8755379380596713_checkpoint_ep500.pth']
        for m in model_paths:
            model_path = os.path.join(model_root, m)
            print(model_path)

            old_weights = torch.load(model_path)
            new_weights = model.module.state_dict()
            for k in new_weights:
                if k in old_weights:
                    new_weights[k] = old_weights[k]
            model.module.load_state_dict(new_weights)
            avr_acc = attr_test(model, criterion, queryloader, use_gpu)
            # break
        # test(model, queryloader, galleryloader, args.pool, use_gpu)
        return
    if use_gpu:
        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                            model.module.parameters()),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                            model.parameters()),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
    # avr_acc = attr_test(model, criterion, queryloader, use_gpu)
    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)

    best_avr = 0
    no_rise = 0
    for epoch in range(start_epoch, args.max_epoch):
        print("==> Epoch {}/{}".format(epoch + 1, args.max_epoch))
        attr_train(model, criterion, optimizer, trainloader, use_gpu)

        if args.stepsize > 0: scheduler.step()

        if args.eval_step > 0 and ((epoch + 1) % (args.eval_step) == 0 or
                                   (epoch + 1) == args.max_epoch):
            avr_acc = attr_test(model, criterion, queryloader, use_gpu)
            print("avr", avr_acc)
            if avr_acc > best_avr:
                no_rise = 0
                print("==> Test")
                best_avr = avr_acc
                if use_gpu:
                    state_dict = model.module.state_dict()
                else:
                    state_dict = model.state_dict()
                torch.save(
                    state_dict,
                    osp.join(
                        args.save_dir, "avr_" + str(avr_acc) +
                        '_checkpoint_ep' + str(epoch + 1) + '.pth'))
            else:
                no_rise += 1
                print("no_rise:", no_rise)
                if no_rise > 20:
                    break
    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
Пример #15
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu:
        use_gpu = False
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    train_dataset = data_manager.init_dataset(root=args.root,
                                              name='json',
                                              phase='train')
    valid_dataset = data_manager.init_dataset(root=args.root,
                                              name='json',
                                              phase='valid')

    test_dataset = data_manager.init_dataset(root=args.root,
                                             name='json',
                                             phase='test')

    test_mask = test_dataset.mask

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(JsonDataset(train_dataset),
                             num_workers=4,
                             batch_size=args.train_batch,
                             pin_memory=pin_memory,
                             drop_last=True)

    validloader = DataLoader(JsonDataset(valid_dataset),
                             num_workers=4,
                             batch_size=args.test_batch,
                             pin_memory=pin_memory,
                             drop_last=True)

    testloader = DataLoader(JsonDataset(test_dataset),
                            num_workers=4,
                            batch_size=args.test_batch,
                            pin_memory=pin_memory,
                            drop_last=True)

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, use_gpu=use_gpu)
    print("Model size: {:.3f} M".format(count_num_param(model)))

    model.init_weights()
    criterion_label = LabelLoss()

    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.load_weights:
        # load pretrained weights but ignore layers that don't match in size
        print("Loading pretrained weights from '{}'".format(args.load_weights))
        checkpoint = torch.load(args.load_weights)
        pretrain_dict = checkpoint['state_dict']
        model_dict = model.state_dict()
        pretrain_dict = {
            k: v
            for k, v in pretrain_dict.items()
            if k in model_dict and model_dict[k].size() == v.size()
        }
        model_dict.update(pretrain_dict)
        model.load_state_dict(model_dict)

    if args.resume:
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("Loaded checkpoint from '{}'".format(args.resume))
        print("- start_epoch: {}".format(start_epoch))

    # if use_gpu:
    # str_ids = args.gpu_devices.split(',')
    # gpu_ids = []
    # for str_id in str_ids:
    #     id = int(str_id)
    #     if id >= 0:
    #         gpu_ids.append(id)
    # model = nn.DataParallel(model, gpu_ids)
    device = torch.device(
        'cuda' if use_gpu and torch.cuda.is_available() else 'cpu')
    model.to(device)

    if args.evaluate:
        print("Evaluate only")
        start_evaluate_time = time.time()
        test_thetas = evaluate(model, testloader, use_gpu, args.test_batch,
                               test_mask)
        # test_thetas = evaluate(model, validloader, use_gpu, args.test_batch, test_mask)
        evaluate_time = time.time() - start_evaluate_time
        print('Evaluate: {} secs'.format(evaluate_time))
        with open("auto_sample.csv", "r") as csvfiler:
            with open("test_thetas.csv", "w") as csvfilew:
                reader = csv.reader(csvfiler)
                for item in reader:
                    if reader.line_num == 1:
                        writer = csv.writer(csvfilew)
                        writer.writerow(['test_id', 'result'])
                        continue
                    writer = csv.writer(csvfilew)
                    writer.writerow(
                        [item[0],
                         str(test_thetas[reader.line_num - 2])])
        # writer.writerows(map(lambda x: [x], test_thetas))
        return

    start_time = time.time()
    train_time = 0
    best_label_loss = np.inf
    best_epoch = 0
    #
    # print("==> Test")
    # label_loss = test(model, validloader, criterion_label, use_gpu, args.test_batch)
    # print("test label loss RMES() is {}".format(label_loss))
    #
    # print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_label, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        # save model every epoch
        if (epoch + 1) % args.save_step == 0:
            print("==> Now save epoch {} \'s model".format(epoch + 1))
            # if use_gpu:
            #     state_dict = model.state_dict() #  module.
            # else:
            state_dict = model.state_dict()

            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch
            }, False, osp.join(args.save_dir, 'checkpoint_latest.pth'))

        # test model every eval_step
        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or \
                (epoch + 1) == args.max_epoch:
            print("==> Test")
            label_loss = test(model, validloader, criterion_label, use_gpu,
                              args.test_batch)
            is_best = label_loss < best_label_loss

            if is_best:
                best_label_loss = label_loss
                best_epoch = epoch + 1

            # if use_gpu:
            #     state_dict = model.state_dict()
            # else:
            state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'label_loss': label_loss,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) +
                         '.pth'))  # .pth.tar

    print("==> Best Label Loss {:.3}, achieved at epoch {}".format(
        best_label_loss, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Пример #16
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'), mode='a')
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset, root=args.root)

    # Data augmentation
    spatial_transform_train = [
        ST.Scale((args.height, args.width), interpolation=3),
        ST.RandomHorizontalFlip(),
        ST.ToTensor(),
        ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]
    spatial_transform_train = ST.Compose(spatial_transform_train)

    temporal_transform_train = TT.TemporalRandomCrop(size=args.seq_len,
                                                     stride=args.sample_stride)

    spatial_transform_test = ST.Compose([
        ST.Scale((args.height, args.width), interpolation=3),
        ST.ToTensor(),
        ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    temporal_transform_test = TT.TemporalBeginCrop(size=args.test_frames)

    pin_memory = True if use_gpu else False

    dataset_train = dataset.train
    if args.dataset == 'duke':
        dataset_train = dataset.train_dense
        print('process duke dataset')

    trainloader = DataLoader(
        VideoDataset(dataset_train,
                     spatial_transform=spatial_transform_train,
                     temporal_transform=temporal_transform_train),
        sampler=RandomIdentitySampler(dataset_train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(VideoDataset(
        dataset.query,
        spatial_transform=spatial_transform_test,
        temporal_transform=temporal_transform_test),
                             batch_size=args.test_batch,
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=pin_memory,
                             drop_last=False)

    galleryloader = DataLoader(VideoDataset(
        dataset.gallery,
        spatial_transform=spatial_transform_test,
        temporal_transform=temporal_transform_test),
                               batch_size=args.test_batch,
                               shuffle=False,
                               num_workers=args.workers,
                               pin_memory=pin_memory,
                               drop_last=False)

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              use_gpu=use_gpu,
                              num_classes=dataset.num_train_pids,
                              loss={'xent', 'htri'})
    print(model)

    criterion_xent = nn.CrossEntropyLoss()
    criterion_htri = TripletLoss(margin=args.margin,
                                 distance=args.distance,
                                 use_gpu=use_gpu)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)
    start_epoch = args.start_epoch

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):

        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, optimizer,
              trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) >= args.start_eval and (
                epoch + 1) % args.eval_step == 0 or epoch == 0:
            print("==> Test")
            with torch.no_grad():
                rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Пример #17
0
def main():
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    use_gpu = torch.cuda.is_available()
    np.random.seed(args.seed)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    torch.backends.cudnn.deterministic = True
    cudnn.benchmark = True

    print("Initializing train dataset {}".format(args.train_dataset))
    train_dataset = data_manager.init_dataset(name=args.train_dataset)
    print("Initializing test dataset {}".format(args.test_dataset))
    test_dataset = data_manager.init_dataset(name=args.test_dataset)

    # print("Initializing train dataset {}".format(args.train_dataset, split_id=6))
    # train_dataset = data_manager.init_dataset(name=args.train_dataset)
    # print("Initializing test dataset {}".format(args.test_dataset, split_id=6))
    # test_dataset = data_manager.init_dataset(name=args.test_dataset)

    transform_train = T.Compose([
        T.Resize([args.height, args.width]),
        T.RandomHorizontalFlip(),
        T.Pad(10),
        T.RandomCrop([args.height, args.width]),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406])
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    # random_snip  first_snip constrain_random evenly
    trainloader = DataLoader(
        VideoDataset(train_dataset.train,
                     seq_len=args.seq_len,
                     sample='constrain_random',
                     transform=transform_train),
        sampler=RandomIdentitySampler(train_dataset.train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        VideoDataset(test_dataset.query,
                     seq_len=args.seq_len,
                     sample='evenly',
                     transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        VideoDataset(test_dataset.gallery,
                     seq_len=args.seq_len,
                     sample='evenly',
                     transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=train_dataset.num_train_pids,
                              loss={'xent', 'htri'})
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    print("load model {0} from {1}".format(args.arch, args.load_model))
    if args.load_model != '':
        pretrained_model = torch.load(args.load_model)
        model_dict = model.state_dict()
        pretrained_dict = {
            k: v
            for k, v in pretrained_model['state_dict'].items()
            if k in model_dict
        }
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        start_epoch = pretrained_model['epoch'] + 1
        best_rank1 = pretrained_model['rank1']
    else:
        start_epoch = args.start_epoch
        best_rank1 = -np.inf

    criterion = dict()
    criterion['triplet'] = WeightedRegularizedTriplet()
    criterion['xent'] = CrossEntropyLabelSmooth(
        num_classes=train_dataset.num_train_pids)
    criterion['center'] = CenterLoss(num_classes=train_dataset.num_train_pids,
                                     feat_dim=512,
                                     use_gpu=True)
    print(criterion)

    optimizer = dict()
    optimizer['model'] = model.get_optimizer(args)
    optimizer['center'] = torch.optim.SGD(criterion['center'].parameters(),
                                          lr=0.5)

    scheduler = lr_scheduler.MultiStepLR(optimizer['model'],
                                         milestones=args.stepsize,
                                         gamma=args.gamma)

    print(model)
    model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        distmat = test(model,
                       queryloader,
                       galleryloader,
                       args.pool,
                       use_gpu,
                       return_distmat=True)
        return

    start_time = time.time()
    train_time = 0
    best_epoch = args.start_epoch
    print("==> Start training")
    for epoch in range(start_epoch, args.max_epoch):

        scheduler.step()
        print('Epoch', epoch, 'lr', scheduler.get_lr()[0])

        start_train_time = time.time()
        train(epoch, model, criterion, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, args.pool, use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False
    #arch1 should use salience, and arch2 should use semantic parsing
    use_salience = models.use_salience(name=args.arch1)
    use_parsing = models.use_parsing(name=args.arch2)
    save_rank = True if args.save_rank else False
    use_re_ranking = True if args.use_re_ranking else False

    queryloader = DataLoader(
        ImageDataset(dataset.query,
                     transform=transform_test,
                     use_salience=use_salience,
                     use_parsing=use_parsing,
                     salience_base_path=dataset.salience_query_dir,
                     parsing_base_path=dataset.parsing_query_dir),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery,
                     transform=transform_test,
                     use_salience=use_salience,
                     use_parsing=use_parsing,
                     salience_base_path=dataset.salience_gallery_dir,
                     parsing_base_path=dataset.parsing_gallery_dir),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch1))
    model1 = models.init_model(name=args.arch1,
                               num_classes=dataset.num_train_pids,
                               loss={'xent', 'htri'},
                               mid_layer=args.mid_layer)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model1.parameters()) / 1000000.0))
    print("Initializing model: {}".format(args.arch2))
    model2 = models.init_model(name=args.arch2,
                               num_classes=dataset.num_train_pids,
                               loss={'xent', 'htri'},
                               mid_layer=args.mid_layer)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model2.parameters()) / 1000000.0))

    print("Loading checkpoint from '{}'".format(args.resume1))
    checkpoint = torch.load(args.resume1)
    model1.load_state_dict(checkpoint['state_dict'])
    start_epoch = checkpoint['epoch']
    print("Resuming model 1 from epoch {}".format(start_epoch + 1))

    print("Loading checkpoint from '{}'".format(args.resume2))
    checkpoint = torch.load(args.resume2)
    model2.load_state_dict(checkpoint['state_dict'])
    start_epoch = checkpoint['epoch']
    print("Resuming model 2 from epoch {}".format(start_epoch + 1))

    if use_gpu:
        model1 = nn.DataParallel(model1).cuda()
        model2 = nn.DataParallel(model2).cuda()

    test(model1,
         model2,
         queryloader,
         galleryloader,
         use_gpu,
         use_salience=use_salience,
         use_parsing=use_parsing,
         save_dir=args.save_dir,
         epoch=-1,
         save_rank=save_rank,
         use_re_ranking=use_re_ranking)