예제 #1
0
def main(args=None):

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    
    # data generator
    dataset = data_manager.init_imgreid_dataset(
        dataset_path=args.dataset_path, name=args.dataset
    )
    print('dataset.train', len(dataset.train), dataset.num_train_pids)
    train_generator = DataGenerator(dataset.train[0], 
                                    dataset.train[1], 
                                    batch_size=args.batch_size, 
                                    num_classes=dataset.num_train_pids, 
                                    shuffle=True, 
                                    target_size=(args.height, args.width), 
                                    learn_region=args.learn_region)

    # create the callbacks
    callbacks = create_callbacks(args)

    # create model
    model = modellib.HACNN(mode='training', 
                           num_classes=dataset.num_train_pids, 
                           batch_size=args.batch_size, 
                           learn_region=args.learn_region).model

    # save model-summary
    if args.save_summary:
        plot_model(model, show_shapes=True, to_file='model.png')
        print(model.summary())
        
    # load saved weight
    if args.snapshot:
        model.load_weights(args.snapshot, by_name=True, skip_mismatch=False)

    # compile model
    if args.learn_region:
        model.compile(
            loss='categorical_crossentropy',
            # loss_weights={"classifier_global": 0.5, "classifier_local": 0.5},
            metrics=['accuracy'], 
            optimizer=keras.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999),
        )
    else:
        model.compile(
            # loss=losses.cross_entropy_label_smooth(num_classes=dataset.num_train_pids),
            loss='categorical_crossentropy',
            metrics=['accuracy'], 
            optimizer=keras.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999),
        )

    model.fit_generator(
        generator=train_generator,
        epochs=args.epochs,
        verbose=1,
        callbacks=callbacks,
    )
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_imgreid_dataset(
        root=args.root, name=args.dataset, split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled, cuhk03_classic_split=args.cuhk03_classic_split,
        use_lmdb=args.use_lmdb,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train,
                     use_lmdb=args.use_lmdb, lmdb_path=dataset.train_lmdb_path),
        batch_size=args.train_batch, shuffle=True, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test,
                     use_lmdb=args.use_lmdb, lmdb_path=dataset.query_lmdb_path),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test,
                     use_lmdb=args.use_lmdb, lmdb_path=dataset.gallery_lmdb_path),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'xent'}, use_gpu=use_gpu)
    print("Model size: {:.3f} M".format(count_num_param(model)))

    criterion = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.fixbase_epoch > 0:
        if hasattr(model, 'classifier') and isinstance(model.classifier, nn.Module):
            optimizer_tmp = init_optim(args.optim, model.classifier.parameters(), args.fixbase_lr, args.weight_decay)
        else:
            print("Warn: model has no attribute 'classifier' and fixbase_epoch is reset to 0")
            args.fixbase_epoch = 0

    if args.load_weights:
        # load pretrained weights but ignore layers that don't match in size
        print("Loading pretrained weights from '{}'".format(args.load_weights))
        checkpoint = torch.load(args.load_weights)
        pretrain_dict = checkpoint['state_dict']
        model_dict = model.state_dict()
        pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
        model_dict.update(pretrain_dict)
        model.load_state_dict(model_dict)

    if args.resume:
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        rank1 = checkpoint['rank1']
        print("Loaded checkpoint from '{}'".format(args.resume))
        print("- start_epoch: {}\n- rank1: {}".format(start_epoch, rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    if args.fixbase_epoch > 0:
        print("Train classifier for {} epochs while keeping base network frozen".format(args.fixbase_epoch))

        for epoch in range(args.fixbase_epoch):
            start_train_time = time.time()
            train(epoch, model, criterion, optimizer_tmp, trainloader, use_gpu, freeze_bn=True)
            train_time += round(time.time() - start_train_time)

        del optimizer_tmp
        print("Now open all layers for training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)
        
        scheduler.step()
        
        if (epoch+1) > args.start_eval and args.eval_step > 0 and (epoch+1) % args.eval_step == 0 or (epoch+1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1
            
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            
            save_checkpoint({
                'state_dict': state_dict,
                'rank1': rank1,
                'epoch': epoch,
            }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch+1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_imgreid_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
        use_lmdb=args.use_lmdb,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train,
                     transform=transform_train,
                     use_lmdb=args.use_lmdb,
                     lmdb_path=dataset.train_lmdb_path),
        sampler=RandomIdentitySampler(dataset.train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query,
                     transform=transform_test,
                     use_lmdb=args.use_lmdb,
                     lmdb_path=dataset.train_lmdb_path),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery,
                     transform=transform_test,
                     use_lmdb=args.use_lmdb,
                     lmdb_path=dataset.train_lmdb_path),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent', 'htri'})
    print("Model size: {:.3f} M".format(count_num_param(model)))

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)

    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)

    if args.load_weights:
        # load pretrained weights but ignore layers that don't match in size
        if check_isfile(args.load_weights):
            checkpoint = torch.load(args.load_weights)
            pretrain_dict = checkpoint['state_dict']
            model_dict = model.state_dict()
            pretrain_dict = {
                k: v
                for k, v in pretrain_dict.items()
                if k in model_dict and model_dict[k].size() == v.size()
            }
            model_dict.update(pretrain_dict)
            model.load_state_dict(model_dict)
            print("Loaded pretrained weights from '{}'".format(
                args.load_weights))

    if args.resume:
        if check_isfile(args.resume):
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            args.start_epoch = checkpoint['epoch']
            rank1 = checkpoint['rank1']
            print("Loaded checkpoint from '{}'".format(args.resume))
            print("- start_epoch: {}\n- rank1: {}".format(
                args.start_epoch, rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        distmat = test(model,
                       queryloader,
                       galleryloader,
                       use_gpu,
                       return_distmat=True)
        if args.vis_ranked_res:
            visualize_ranked_results(
                distmat,
                dataset,
                save_dir=osp.join(args.save_dir, 'ranked_results'),
                topk=20,
            )
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(args.start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, optimizer,
              trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_imgreid_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
        use_lmdb=args.use_lmdb,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        # T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainset = ImageDataset(dataset.train,
                            transform=transform_train,
                            use_lmdb=args.use_lmdb,
                            lmdb_path=dataset.train_lmdb_path)
    trainloader = DataLoader(
        trainset,
        batch_size=args.train_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryset = ImageDataset(dataset.query,
                            transform=transform_test,
                            use_lmdb=args.use_lmdb,
                            lmdb_path=dataset.query_lmdb_path)
    queryloader = DataLoader(
        queryset,
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryset = ImageDataset(dataset.gallery,
                              transform=transform_test,
                              use_lmdb=args.use_lmdb,
                              lmdb_path=dataset.gallery_lmdb_path)
    galleryloader = DataLoader(
        galleryset,
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent'},
                              use_gpu=use_gpu)
    print("Model size: {:.3f} M".format(count_num_param(model)))
    # summary(model, (3, 160, 64))

    criterion = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids,
                                        use_gpu=use_gpu)
    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.fixbase_epoch > 0:
        if hasattr(model, 'classifier') and isinstance(model.classifier,
                                                       nn.Module):
            optimizer_tmp = init_optim(args.optim,
                                       model.classifier.parameters(),
                                       args.fixbase_lr, args.weight_decay)
        else:
            print(
                "Warn: model has no attribute 'classifier' and fixbase_epoch is reset to 0"
            )
            args.fixbase_epoch = 0

    if args.load_weights:
        # load pretrained weights but ignore layers that don't match in size
        print("Loading pretrained weights from '{}'".format(args.load_weights))
        if torch.cuda.is_available():
            checkpoint = torch.load(args.load_weights)
        else:
            checkpoint = torch.load(args.load_weights, map_location='cpu')
        pretrain_dict = checkpoint['state_dict']
        model_dict = model.state_dict()
        pretrain_dict = {
            k: v
            for k, v in pretrain_dict.items()
            if k in model_dict and model_dict[k].size() == v.size()
        }
        model_dict.update(pretrain_dict)
        model.load_state_dict(model_dict)

    if args.resume:
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        rank1 = checkpoint['rank1']
        print("Loaded checkpoint from '{}'".format(args.resume))
        print("- start_epoch: {}\n- rank1: {}".format(start_epoch, rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    if args.fixbase_epoch > 0:
        print(
            "Train classifier for {} epochs while keeping base network frozen".
            format(args.fixbase_epoch))

        for epoch in range(args.fixbase_epoch):
            start_train_time = time.time()
            train(epoch,
                  model,
                  criterion,
                  optimizer_tmp,
                  trainloader,
                  use_gpu,
                  freeze_bn=True)
            train_time += round(time.time() - start_train_time)

        del optimizer_tmp
        print("Now open all layers for training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))
        '''
        if use_gpu:
            state_dict = model.module.state_dict()
        else:
            state_dict = model.state_dict()
            
        save_checkpoint({
            'state_dict': state_dict,
            'epoch': epoch,
        }, True, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch+1) + '.pth.tar'))
        '''

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
예제 #5
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'), mode='a')
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'), mode='a')
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_imgreid_dataset(name=args.dataset,
                                                dataset_dir=args.root,
                                                fore_dir=args.fore_dir)

    transform_train = ST.Compose([
        ST.Scale((args.height, args.width), interpolation=3),
        ST.RandomHorizontalFlip(),
        ST.ToTensor(),
        ST.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ST.RandomErasing(0.5)
    ])

    transform_test = ST.Compose([
        ST.Scale((args.height, args.width), interpolation=3),
        ST.ToTensor(),
        ST.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset_hardSplit_seg(dataset.train, transform=transform_train),
        sampler=RandomIdentitySampler(dataset.train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids)
    print(model)

    criterion_xent = CrossEntropyLabelSmooth(use_gpu=use_gpu)
    criterion_htri = TripletLoss()
    criterion_mask = MaskLoss()
    criterion_split = HardSplitLoss()
    criterion_cluster = ClusterLoss()

    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)

    if args.resume:
        if check_isfile(args.resume):
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            args.start_epoch = checkpoint['epoch']
            rank1 = checkpoint['rank1']
            print("Loaded checkpoint from '{}'".format(args.resume))
            print("- start_epoch: {}\n- rank1: {}".format(
                args.start_epoch, rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(args.start_epoch, args.max_epoch):

        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, criterion_mask,
              criterion_split, criterion_cluster, optimizer, trainloader,
              use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and (
                epoch + 1) % args.eval_step == 0 or epoch == 0:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
    print("==========\nArgs:{}\n==========".format(args))
def main(args=None):

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    
    # data generator
    dataset = data_manager.init_imgreid_dataset(
        dataset_path=args.dataset_path, name=args.dataset
    )

    query_generator = DataGenerator(dataset.query[0],
                                    dataset.query[1], 
                                    camids=dataset.query[2], 
                                    batch_size=args.batch_size, 
                                    num_classes=dataset.num_query_pids, 
                                    target_size=(args.height, args.width), 
                                    learn_region=args.learn_region,
                                    shuffle=True,
                                    mode='inference')

    gallery_generator = DataGenerator(  dataset.gallery[0],
                                        dataset.gallery[1], 
                                        camids=dataset.gallery[2], 
                                        batch_size=args.batch_size, 
                                        num_classes=dataset.num_gallery_pids, 
                                        target_size=(args.height, args.width), 
                                        learn_region=args.learn_region,
                                        shuffle=True,
                                        mode='inference')

    model = modellib.HACNN(mode='inference', 
                           num_classes=dataset.num_query_pids, 
                           batch_size=args.batch_size, 
                           learn_region=args.learn_region).model

    load_weights(model, filepath=args.snapshot, by_name=True)
    
    '''
    img_path = '/Users/luke/Documents/ml_datasets/person_re_id/videotag_scene/dataset_7_lite/bounding_box_train/3434_0096.jpg'
    img = image.load_img(img_path, target_size=(args.height, args.width))
    img = image.img_to_array(img)
    
    ouput = model.predict(np.array([img]) ,verbose=1)
    print('ouput', np.array(ouput).shape)
    print('ouput', np.argmax(ouput[0]), np.argmax(ouput[1]))
    '''
    # evaluate
    qf, q_pids, q_camids = [], [], []
    for index in range(len(query_generator)):
        imgs, pids, camids = query_generator[index]
        # print('query', index)
        features = model.predict(imgs ,verbose=0)
        # print('features', features)
        qf.append(features)
        q_pids.extend(pids)
        q_camids.extend(camids)
    qf = np.concatenate(qf, axis=0)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)
    # print(qf.shape)
        
    gf, g_pids, g_camids = [], [], []
    for index in range(len(gallery_generator)):
        # print('gallery', index)
        imgs, pids, camids = gallery_generator[index]
        features = model.predict(imgs ,verbose=0)
        # print('features', features)
        gf.append(features)
        g_pids.extend(pids)
        g_camids.extend(camids)
    gf = np.concatenate(gf, axis=0)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)
    
    m, n = qf.shape[0], gf.shape[0]
    '''
    # qf = qf*0.001
    qf_pow = np.square(qf)
    qf_sum = np.sum(qf_pow, axis=1, keepdims=True)
    qf_ext = np.repeat(qf_sum, n, axis=1)

    # gf = gf*0.01
    gf_pow = np.square(gf)
    gf_sum = np.sum(gf_pow, axis=1, keepdims=True)
    gf_ext = np.repeat(gf_sum, m, axis=1)
    gf_ext_t = gf_ext.T
    distmat = qf_ext + gf_ext_t
    distmat = distmat + np.dot(qf, gf.T)*(-2.0)
    '''
    
    print("Compute pairwise euclidean distances")
    qf = np.expand_dims(qf, axis=1)
    qf = np.repeat(qf, n, axis=1)

    gf = np.expand_dims(gf, axis=0)
    gf = np.repeat(gf, m, axis=0)
    
    distmat = np.linalg.norm(qf-gf, axis=2,  keepdims=True)
    distmat = np.squeeze(distmat, axis=2)

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, dataset_type=args.dataset)
    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    ranks=[1, 5, 10, 20]
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1]))
    



    correct_matched = (g_pids == q_pids[:, np.newaxis]).astype(np.int32)
    correct_dist_aver = np.multiply(distmat, correct_matched)

    wrong_matched = (g_pids != q_pids[:, np.newaxis]).astype(np.int32)
    wrong_dist_aver = np.multiply(distmat, wrong_matched)

    print('정답의 distance 평균 : ', np.average(correct_dist_aver))
    print('오답의 distance 평균 : ', np.average(wrong_dist_aver))

    print("------------------")
예제 #7
0
def main():
    torch.manual_seed(1)
    os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_devices
    use_gpu = torch.cuda.is_available()

    sys.stdout = Logger(config.save_dir, config.checkpoint_suffix,
                        config.evaluate)
    print("\n==========\nArgs:")
    config.print_parameter()
    print("==========\n")

    if use_gpu:
        print("Currently using GPU {}".format(config.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(1)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(config.dataset))
    dataset = data_manager.init_imgreid_dataset(name=config.dataset,
                                                root=config.data_root)

    transform_train = T.Compose([
        T.Random2DTranslation(config.height, config.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=data_mean, std=data_std),
    ])

    transform_test = T.Compose([
        T.Resize((config.height, config.width)),
        T.ToTensor(),
        T.Normalize(mean=data_mean, std=data_std),
    ])

    pin_memory = True if use_gpu else False

    # train_batch_sampler = BalancedBatchSampler(dataset.train, n_classes=8, n_samples=8)
    # train_batch_sampler = CCLBatchSampler(dataset.train, n_classes=n_classes, n_samples=n_samples)
    # train_batch_sampler = CCLBatchSamplerV2(dataset.train, n_classes=n_classes, pos_samp_cnt=pos_samp_cnt,
    #                                         neg_samp_cnt=neg_samp_cnt, each_cls_max_cnt=each_cls_max_cnt)
    train_batch_sampler = ClassSampler(dataset.train,
                                       sample_cls_cnt=config.sample_cls_cnt,
                                       each_cls_cnt=config.each_cls_cnt)

    # trainloader = DataLoader(
    #     ImageDataset(dataset.train, transform=transform_train),
    #     batch_sampler=train_batch_sampler, batch_size=args.train_batch,
    #     shuffle=True, num_workers=args.workers, pin_memory=pin_memory, drop_last=True
    # )

    trainloader = DataLoader(ImageDatasetWCL(dataset,
                                             data_type='train',
                                             merge_h=256,
                                             merge_w=256,
                                             mean_std=[data_mean, data_std]),
                             batch_sampler=train_batch_sampler,
                             num_workers=config.workers,
                             pin_memory=pin_memory)

    queryloader = DataLoader(
        ImageDatasetWCL(dataset.query,
                        data_type='query',
                        merge_h=256,
                        merge_w=256,
                        mean_std=[data_mean, data_std]),
        batch_size=config.test_batch,
        shuffle=False,
        num_workers=config.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDatasetWCL(dataset.gallery,
                        data_type='gallery',
                        merge_h=256,
                        merge_w=256,
                        mean_std=[data_mean, data_std]),
        batch_size=config.test_batch,
        shuffle=False,
        num_workers=config.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    if config.dataset == 'vehicleid':
        train_query_loader = None
        train_gallery_loader = None
    else:
        train_query_loader = DataLoader(
            ImageDatasetWCL(dataset.train_query,
                            data_type='train_query',
                            merge_h=256,
                            merge_w=256,
                            mean_std=[data_mean, data_std]),
            batch_size=config.test_batch,
            shuffle=False,
            num_workers=config.workers,
            pin_memory=pin_memory,
            drop_last=False,
        )

        train_gallery_loader = DataLoader(
            ImageDatasetWCL(dataset.train_gallery,
                            data_type='train_gallery',
                            merge_h=256,
                            merge_w=256,
                            mean_std=[data_mean, data_std]),
            batch_size=config.test_batch,
            shuffle=False,
            num_workers=config.workers,
            pin_memory=pin_memory,
            drop_last=False,
        )

    print("Initializing model: {}".format(config.arch))
    model = init_model(name=config.arch,
                       num_classes=dataset.num_train_pids,
                       loss_type=config.loss_type)
    print("Model size: {:.3f} M".format(count_num_param(model)))

    if config.loss_type == 'xent':
        criterion = [nn.CrossEntropyLoss(), nn.CrossEntropyLoss()]
    elif config.loss_type == 'xent_triplet':
        criterion = XentTripletLoss(
            margin=config.margin,
            triplet_selector=RandomNegativeTripletSelector(
                margin=config.margin),
            each_cls_cnt=config.each_cls_cnt,
            n_class=config.sample_cls_cnt)
    elif config.loss_type == 'xent_tripletv2':
        criterion = XentTripletLossV2(
            margin=config.margin,
            triplet_selector=RandomNegativeTripletSelectorV2(
                margin=config.margin),
            each_cls_cnt=config.each_cls_cnt,
            n_class=config.sample_cls_cnt)
        # criterion = XentTripletLossV2(margin=0.04, triplet_selector=RandomNegativeTripletSelectorV2(margin=0.04),
        #                               each_cls_cnt=config.each_cls_cnt, n_class=config.sample_cls_cnt)
        # criterion = XentGroupTripletLossV2(margin=0.8, triplet_selector=AllTripletSelector(margin=0.8),
        #                               each_cls_cnt=config.each_cls_cnt, n_class=config.sample_cls_cnt)
    else:
        raise KeyError("Unsupported loss: {}".format(config.loss_type))

    optimizer = init_optim(config.optim, model.parameters(), config.lr,
                           config.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=config.stepsize,
                                         gamma=config.gamma)

    if config.resume is not None:
        if check_isfile(config.resume):
            checkpoint = torch.load(config.resume)
            pretrain_dict = checkpoint['state_dict']
            model_dict = model.state_dict()
            pretrain_dict = {
                k: v
                for k, v in pretrain_dict.items()
                if k in model_dict and model_dict[k].size() == v.size()
            }
            model_dict.update(pretrain_dict)
            model.load_state_dict(model_dict)
            config.start_epoch = checkpoint['epoch']
            rank1 = checkpoint['rank1']
            if 'mAP' in checkpoint:
                mAP = checkpoint['mAP']
            else:
                mAP = 0
            print("Loaded checkpoint from '{}'".format(config.resume))
            print("- start_epoch: {}\n- rank1: {}\n- mAP: {}".format(
                config.start_epoch, rank1, mAP))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if config.evaluate:
        print("Evaluate only")
        test_model(model, queryloader, galleryloader, train_query_loader,
                   train_gallery_loader, use_gpu, config.test_batch,
                   config.loss_type, config.euclidean_distance_loss)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_map = 0
    best_epoch = 0

    for epoch in range(config.start_epoch, config.max_epoch):
        print("==> Start training")
        start_train_time = time.time()
        scheduler.step()
        print('epoch:', epoch, 'lr:', scheduler.get_lr())
        train(epoch, model, criterion, optimizer, trainloader,
              config.loss_type, config.print_freq)
        train_time += round(time.time() - start_train_time)

        if epoch >= config.start_eval and config.eval_step > 0 and epoch % config.eval_step == 0 \
           or epoch == config.max_epoch:
            print("==> Test")
            rank1, mAP = test_model(model, queryloader, galleryloader,
                                    train_query_loader, train_gallery_loader,
                                    use_gpu, config.test_batch,
                                    config.loss_type,
                                    config.euclidean_distance_loss)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_map = mAP
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'mAP': mAP,
                    'epoch': epoch + 1,
                },
                is_best,
                use_gpu_suo=False,
                fpath=osp.join(
                    config.save_dir, 'checkpoint_ep' + str(epoch + 1) +
                    config.checkpoint_suffix + '.pth.tar'))

    print("==> Best Rank-1 {:.2%}, mAP {:.2%}, achieved at epoch {}".format(
        best_rank1, best_map, best_epoch))
    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
예제 #8
0
import torch
from torch.utils.data import DataLoader

import data_manager
import transforms as T
from data_manager.dataset_loader import ImageDatasetWGL, ImageDatasetWCL

# dataset = 'vehicleid'
# dataset = 'veri776wgl'
dataset = 'veri776wcl'
data_root = '/home/gysj/pytorch-workspace/pytorch-study/data'
batch_size = 1

print("Initializing dataset {}".format(dataset))
dataset = data_manager.init_imgreid_dataset(name=dataset, root=data_root)

# mean_std = scipy.io.loadmat(osp.join(data_root, 'data_mean_std/veri776_train_image_mean_std.mat'))
mean_std = scipy.io.loadmat(
    osp.join(data_root, 'data_mean_std/veri776wcl_train_image_mean_std.mat'))
# data_mean = mean_std['mean'][0].astype(np.float64)
# data_std = mean_std['std'][0].astype(np.float64)
data_mean = mean_std['mean'][0].astype(np.float32)
data_std = mean_std['std'][0].astype(np.float32)

transform_test = T.Compose([
    # T.Resize((100, 100)),
    # T.ToTensor(),
    T.Normalize(mean=data_mean, std=data_std),
])