Esempio n. 1
0
def main(check_model, mm=1):

    dataset = data_manager.init_img_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )
    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    '''trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        batch_size=args.train_batch, shuffle=True, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True,
    )'''
    query = ImageDataset(dataset.query, transform=transform_test)
    if args.dataset == 'beijing':
        query = ImageDataset_forBeijing(dataset.query,
                                        transform=transform_test)

    #gallery = ImageDatasetLazy(dataset.gallery, transform=transform_test)
    gallery = ImageDataset(dataset.gallery, transform=transform_test)
    if args.dataset == 'beijing':
        gallery = ImageDataset_forBeijing(dataset.gallery,
                                          transform=transform_test)

    if args.evaluate:
        #print("Evaluate only")
        if mm == 1:
            cost, recall, precision = test(query, gallery, check_model, mm)
            return cost, recall, precision
        else:
            cost, recall, precision, delay = test(query, gallery, check_model,
                                                  mm)
            return cost, recall, precision, delay
Esempio n. 2
0
def main():
    torch.manual_seed(args.seed)
    #os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    print('whether evaluate', args.evaluate)
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_img_dataset(
        root=args.root,
        name=args.dataset,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        # T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False
    trainloader = DataLoader(
        ImageDataset(dataset.train_data,
                     dataset.train_label,
                     transform=transform_train),
        batch_size=args.train_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    testloader = DataLoader(
        ImageDataset(dataset.test_data,
                     dataset.test_label,
                     transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=40,
                              loss={'xent'},
                              use_gpu=use_gpu)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion = nn.MultiLabelSoftMarginLoss()

    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.load_weights:
        # load pretrained weights but ignore layers that don't match in size
        checkpoint = torch.load(args.load_weights)
        pretrain_dict = checkpoint['state_dict']
        model_dict = model.state_dict()
        pretrain_dict = {
            k: v
            for k, v in pretrain_dict.items()
            if k in model_dict and model_dict[k].size() == v.size()
        }
        model_dict.update(pretrain_dict)
        model.load_state_dict(model_dict)
        print("Loaded pretrained weights from '{}'".format(args.load_weights))

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, testloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if args.stepsize > 0: scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, testloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'hamming_score': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Hamming Score {:.3%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_imgreid_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
        use_lmdb=args.use_lmdb,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        # T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainset = ImageDataset(dataset.train,
                            transform=transform_train,
                            use_lmdb=args.use_lmdb,
                            lmdb_path=dataset.train_lmdb_path)
    trainloader = DataLoader(
        trainset,
        batch_size=args.train_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryset = ImageDataset(dataset.query,
                            transform=transform_test,
                            use_lmdb=args.use_lmdb,
                            lmdb_path=dataset.query_lmdb_path)
    queryloader = DataLoader(
        queryset,
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryset = ImageDataset(dataset.gallery,
                              transform=transform_test,
                              use_lmdb=args.use_lmdb,
                              lmdb_path=dataset.gallery_lmdb_path)
    galleryloader = DataLoader(
        galleryset,
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent'},
                              use_gpu=use_gpu)
    print("Model size: {:.3f} M".format(count_num_param(model)))
    # summary(model, (3, 160, 64))

    criterion = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids,
                                        use_gpu=use_gpu)
    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.fixbase_epoch > 0:
        if hasattr(model, 'classifier') and isinstance(model.classifier,
                                                       nn.Module):
            optimizer_tmp = init_optim(args.optim,
                                       model.classifier.parameters(),
                                       args.fixbase_lr, args.weight_decay)
        else:
            print(
                "Warn: model has no attribute 'classifier' and fixbase_epoch is reset to 0"
            )
            args.fixbase_epoch = 0

    if args.load_weights:
        # load pretrained weights but ignore layers that don't match in size
        print("Loading pretrained weights from '{}'".format(args.load_weights))
        if torch.cuda.is_available():
            checkpoint = torch.load(args.load_weights)
        else:
            checkpoint = torch.load(args.load_weights, map_location='cpu')
        pretrain_dict = checkpoint['state_dict']
        model_dict = model.state_dict()
        pretrain_dict = {
            k: v
            for k, v in pretrain_dict.items()
            if k in model_dict and model_dict[k].size() == v.size()
        }
        model_dict.update(pretrain_dict)
        model.load_state_dict(model_dict)

    if args.resume:
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        rank1 = checkpoint['rank1']
        print("Loaded checkpoint from '{}'".format(args.resume))
        print("- start_epoch: {}\n- rank1: {}".format(start_epoch, rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    if args.fixbase_epoch > 0:
        print(
            "Train classifier for {} epochs while keeping base network frozen".
            format(args.fixbase_epoch))

        for epoch in range(args.fixbase_epoch):
            start_train_time = time.time()
            train(epoch,
                  model,
                  criterion,
                  optimizer_tmp,
                  trainloader,
                  use_gpu,
                  freeze_bn=True)
            train_time += round(time.time() - start_train_time)

        del optimizer_tmp
        print("Now open all layers for training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))
        '''
        if use_gpu:
            state_dict = model.module.state_dict()
        else:
            state_dict = model.state_dict()
            
        save_checkpoint({
            'state_dict': state_dict,
            'epoch': epoch,
        }, True, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch+1) + '.pth.tar'))
        '''

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Esempio n. 4
0
            y_maxrange = new_height - self.height
            # 计算随机裁剪XY轴起点
            x_start = int(round(random.uniform(0, x_maxrange)))
            y_start = int(round(random.uniform(0, y_maxrange)))
            # 进行裁剪
            img = resize_img.crop((x_start, y_start, x_start + self.width,
                                   y_start + self.height))
        return img


if __name__ == '__main__':
    from dataset_manager import Market1501
    from dataset_loader import ImageDataset

    dataset = Market1501()
    train_loader = ImageDataset(dataset.train)
    plt.figure()
    j = 1
    # 从训练集中获取前两张图片进行处理,并使用matplot显示图片
    for batch_id, (img, pid, cid) in enumerate(train_loader):
        if (batch_id < 2):
            transform = Random2DTransform(64, 64, 0.5)
            img_t = transform(img)
            img_t = np.array(img_t)
            plt.subplot(1, 2, j)
            plt.imshow(img)  # 显示图片
            plt.savefig()
            j = j + 1
            plt.subplot(1, 2, j)
            plt.imshow(img_t)  # 显示图片
            plt.show()
if __name__ == '__main__':
    # 使用局部对齐模型
    model = ReIDNet(num_classes=751, loss={'softmax, metric'}, aligned=True)
    # 加载局部对齐模型最优参数
    model.load_state_dict(
        torch.load('./model/param/aligned_trihard_net_params_best.pth'))
    # 指定数据集
    dataset = Market1501()
    # query数据与gallery数据处理器
    transform = T.Compose([
        T.Resize((height, width)),  # 尺度统一
        T.ToTensor(),  # 图片转张量
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                     0.225]),  # 归一化,参数固定
    ])
    # query集吞吐器
    query_data_loader = DataLoader(
        ImageDataset(dataset.query, transform=transform),  # 自定义的数据集,指定使用数据处理器
        batch_size=batch_size,  # 一个批次的大小(一个批次有多少个图片张量)
        drop_last=True,  # 丢弃最后无法称为一整个批次的数据
    )
    # gallery集吞吐器
    gallery_data_loader = DataLoader(
        ImageDataset(dataset.gallery,
                     transform=transform),  # 自定义的数据集,指定使用数据处理器
        batch_size=batch_size,  # 一个批次的大小(一个批次有多少个图片张量)
        drop_last=True,  # 丢弃最后无法称为一整个批次的数据
    )
    # 调用test函数进行算法性能评估
    test(model, query_data_loader, gallery_data_loader)
Esempio n. 6
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        batch_size=args.train_batch,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'cent'})
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_cent = CenterLoss(num_classes=dataset.num_train_pids,
                                feat_dim=model.feat_dim,
                                use_gpu=use_gpu)

    optimizer_model = torch.optim.Adam(model.parameters(),
                                       lr=args.lr,
                                       weight_decay=args.weight_decay)
    optimizer_cent = torch.optim.SGD(criterion_cent.parameters(),
                                     lr=args.lr_cent)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer_model,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_cent, optimizer_model,
              optimizer_cent, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if args.stepsize > 0: scheduler.step()

        if args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
def main():
    use_gpu = torch.cuda.is_available()
    if args.use_cpu:
        use_gpu = False

    if use_gpu:
        pin_memory = True
    else:
        pin_memory = False

    if not args.evaluate:   # If not test model
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))   # Log file is saved in log_train.txt
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    # Uncomment when gpu is used
    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    dataset = data_manager.init_img_dataset(root=args.root, name=args.dataset)

    # 3 dataloader: train, query, gallery
    # Train needs augmentation
    transform_train = T.Compose([
        transform.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances),
        batch_size=args.train_batch, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'softmax', 'metric'})
    print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))

    criterion_class = nn.CrossEntropyLoss()
    criterion_metric = TriHardLoss(margin=args.margin)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    # Learning Rate Decay
    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    # Uncomment when gpu is used
    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only!")
        test(model, queryloader, galleryloader, use_gpu)
        return 0

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Strat training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_class, criterion_metric, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if args.stepsize > 0:
            scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (
            epoch + 1) == args.max_epoch:
            torch.save(model, 'logs/model_metric.pkl')
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'rank1': rank1,
                'epoch': epoch,
            }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_imgreid_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
        use_lmdb=args.use_lmdb,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train,
                     transform=transform_train,
                     use_lmdb=args.use_lmdb,
                     lmdb_path=dataset.train_lmdb_path),
        sampler=RandomIdentitySampler(dataset.train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query,
                     transform=transform_test,
                     use_lmdb=args.use_lmdb,
                     lmdb_path=dataset.train_lmdb_path),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery,
                     transform=transform_test,
                     use_lmdb=args.use_lmdb,
                     lmdb_path=dataset.train_lmdb_path),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent', 'htri'})
    print("Model size: {:.3f} M".format(count_num_param(model)))

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)

    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)

    if args.load_weights:
        # load pretrained weights but ignore layers that don't match in size
        if check_isfile(args.load_weights):
            checkpoint = torch.load(args.load_weights)
            pretrain_dict = checkpoint['state_dict']
            model_dict = model.state_dict()
            pretrain_dict = {
                k: v
                for k, v in pretrain_dict.items()
                if k in model_dict and model_dict[k].size() == v.size()
            }
            model_dict.update(pretrain_dict)
            model.load_state_dict(model_dict)
            print("Loaded pretrained weights from '{}'".format(
                args.load_weights))

    if args.resume:
        if check_isfile(args.resume):
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            args.start_epoch = checkpoint['epoch']
            rank1 = checkpoint['rank1']
            print("Loaded checkpoint from '{}'".format(args.resume))
            print("- start_epoch: {}\n- rank1: {}".format(
                args.start_epoch, rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        distmat = test(model,
                       queryloader,
                       galleryloader,
                       use_gpu,
                       return_distmat=True)
        if args.vis_ranked_res:
            visualize_ranked_results(
                distmat,
                dataset,
                save_dir=osp.join(args.save_dir, 'ranked_results'),
                topk=20,
            )
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(args.start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, optimizer,
              trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Esempio n. 9
0
# img_std = [0.229, 0.224, 0.225]
img_mean = [0.3464, 0.3639, 0.3659]
img_std = [0.2262, 0.2269, 0.2279]
epoch = 100
alpha = 0.4
batch_size = 64
num_workers = 16
# accumulation_steps = 2  # 8个batch更新一次,实际的batch为 batch_size*accumulate_grad_batches

dataset = train_dataset(train_file, img_path)
random.shuffle(dataset)
trainset = dataset[:76800]
testset = dataset[76800:]
# testset = train_dataset(test_file, img_path)
trans = Transform(width, height, img_mean, img_std)
trainLoader = DataLoader(ImageDataset(trainset, transform=trans),
                         batch_size=batch_size,
                         shuffle=False,
                         num_workers=num_workers,
                         pin_memory=True)
# mean, std = calculate_mean_and_std(trainLoader, len(trainset))
# print('mean and std:', mean, std)

testLoader = DataLoader(ImageDataset(testset, transform=trans),
                        batch_size=batch_size,
                        num_workers=num_workers,
                        pin_memory=True)

# net = EmbeddingNet()
net = resnet18(pretrained=False)
net = nn.DataParallel(net).cuda()
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_img_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_train = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(p=0.5),
        T.Pad(10),
        T.RandomCrop([args.height, args.width]),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        torchvision.transforms.RandomErasing(p=0.5,
                                             scale=(0.02, 0.4),
                                             ratio=(0.3, 3.33),
                                             value=(0.4914, 0.4822, 0.4465))
        # T.RandomErasing(probability=0.5, sh=0.4, mean=(0.4914, 0.4822, 0.4465)),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        sampler=RandomIdentitySampler2(dataset.train,
                                       batch_size=args.train_batch,
                                       num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent', 'htri'})
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    #embed()

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)

    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    # if args.stepsize > 0:
    #     scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
    '''------Modify lr_schedule here------'''
    current_schedule = init_lr_schedule(schedule=args.schedule,
                                        warm_up_epoch=args.warm_up_epoch,
                                        half_cos_period=args.half_cos_period,
                                        lr_milestone=args.lr_milestone,
                                        gamma=args.gamma,
                                        stepsize=args.stepsize)

    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
                                                  lr_lambda=current_schedule)
    '''------Please refer to the args.xxx for details of hyperparams------'''
    #embed()
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, optimizer,
              trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        if args.schedule: scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Esempio n. 11
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))

    # tensorboardX
    writer = SummaryWriter(log_dir=osp.join(args.save_dir,'summary'))

    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_img_dataset(
        root=args.root, name=args.dataset, split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled, cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss=args.loss)
    print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0))

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

        if use_gpu:
            model = nn.DataParallel(model).cuda()

    start_time = time.time()
    if args.resume_all:
        print("Loading all checkpoints from '{}'".format(args.resume_all))
        pths = glob.glob(osp.join(args.resume_all,'checkpoint_ep*.tar'))
        best_epoch = 0
        best_rank1 = -np.inf
        # best_rerankepoch = 0
        # best_rerank1 = -np.inf
        for pth in pths:
            epoch = list(map(int, re.findall(pattern=r'ep(\d+)\.pth',string=pth)))
            print("Test epoch {}".format(epoch[0]))
            checkpoint = torch.load(pth)
            model.load_state_dict(checkpoint['state_dict'])
            if use_gpu:
                model = nn.DataParallel(model).cuda()
            rank1 = test(epoch, model, queryloader, galleryloader, use_gpu=True, summary=writer)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch[0]
            model = model.module
        shutil.copyfile(args.resume_all + 'checkpoint_ep' + str(best_epoch) + '.pth.tar', args.resume_all + 'best_checkpoint.pth.tar')
        print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))


    test_time = round(time.time() - start_time)
    test_time = str(datetime.timedelta(seconds=test_time))  
    print("Finished. Testtime (h:m:s): {}.".format(test_time))
Esempio n. 12
0
    if opt.use_dense:
        model_structure = ft_net_dense(n_classe)
    else:
        model_structure = ft_net(n_classe)
    model = load_network(model_structure)
    # Change to test mode
    model = model.eval()
    if use_gpu:
        model = model.cuda()

    dataset = data_manager.init_img_dataset(root=opt.dataset,
                                            name='cuhk03',
                                            split_id=0,
                                            cuhk03_classic_split=True)

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=data_transforms),
        batch_size=32,
        shuffle=False,
        num_workers=4,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=data_transforms),
        batch_size=32,
        shuffle=False,
        num_workers=4,
        drop_last=False,
    )
    test(model, queryloader, galleryloader, use_gpu)
Esempio n. 13
0
pin_memory = True if use_gpu else False

transform_train = T.Compose([
    T.Resize((args_height, args_width)),
    T.ToTensor(),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

dataset = data_manager.init_img_dataset(
    root=args_root,
    name=args_dataset,
    split_id=args_split_id,
)

data_loader = DataLoader(
    ImageDataset(dataset.train, transform=transform_train),
    batch_size=args_train_batch,
    shuffle=True,
    num_workers=args_workers,
    pin_memory=pin_memory,
    drop_last=True,
)

# for batch_idx, (imgs, pids, _) in enumerate(data_loader):
#     print('batch_idx:',batch_idx,'imgs:',imgs.size())


# VAE model
class VAE(nn.Module):
    def __init__(self, image_size=32 * 16 * 3, h_dim=400, z_dim=20):
        super(VAE, self).__init__()
Esempio n. 14
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_img_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent'},
                              use_gpu=use_gpu)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    # criterion = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    # optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay)
    # if args.stepsize > 0:
    #     scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
    # start_epoch = args.start_epoch
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    print("Evaluate only")
    test(model, queryloader, galleryloader, use_gpu)
Esempio n. 15
0
def main():
    global Temporal_corr_prob
    global paths, sorted_frame_list_all, max_frame

    Temporal_corr_prob = np.zeros((9, 9, 5))

    Temporal_corr_prob = np.load('./t_5_corr.npy')
    Temporal_corr_prob[8, :, -1] = [
        0.51415094, 0.20754717, 0.07075472, 0.25, 0.08962264, 0.45283019,
        0.0754717, 0.49528302, 1
    ]
    #Temporal_corr_prob[8,:,-1] = [0.96295517, 0.96295517, 0.64197011, 0.96295517, 0.77036413, 0.96295517,0.64197011, 0.96295517, 1]
    #800 Temporal_corr_prob[8,:,-1] = [0.10991234, 0.03641268, 0.01213756, 0.04517869, 0.01416049, 0.09844909,0.01213756, 0.10249494,1]
    #480 Temporal_corr_prob[8,:,-1] = [0.34993271 , 0.12651413 , 0.04306864,  0.16689098 , 0.05114401,  0.30417227, 0.04306864, 0.32570659,1]
    #720 Temporal_corr_prob[8,:,-1] = [0.45967742, 0.1733871,  0.06451613, 0.22983871, 0.07258065, 0.40725806, 0.06451613, 0.43951613,1]
    #600 Temporal_corr_prob[8,:,-1] = [0.4169468,0.15467384,0.0537996,0.19166106,0.0672495,0.35642233,0.05716207,0.39340955,1]
    #1080 Temporal_corr_prob[8,:,-1] = [0.55533199, 0.23541247, 0.09054326, 0.28973843, 0.11468813, 0.52515091,0.09657948, 0.58551308,1]
    #Temporal_corr_prob[8,:,-1] = [0.49708912, 0.17913121, 0.06717421, 0.2507837,  0.08060905, 0.43439319, 0.07165249, 0.46574116, 1]
    #Temporal_corr_prob[8,2,29] += 0.1
    #Temporal_corr_prob[8,8,29] -= 0.1
    #Temporal_corr_prob[:,:,0:-1] = 0
    #Temporal_corr_prob[:,:,-1] = Spatial_corr_prob
    #Temporal_corr_prob[8,:,-1] = [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]
    dataset = data_manager.init_img_dataset(
        root=args.root,
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        batch_size=args.train_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    #gallery = ImageDatasetLazy(dataset.gallery, transform=transform_test)
    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    paths = dict()
    life_frequency = []
    stand_frequency = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: []}

    frame_list = {
        0: dict(),
        1: dict(),
        2: dict(),
        3: dict(),
        4: dict(),
        5: dict(),
        6: dict(),
        7: dict()
    }

    change = np.zeros((9, 9))
    cal_v = np.zeros(9)
    total_number = 0
    min_frame = 10000000
    max_frame = -1
    with torch.no_grad():
        for batch_idx, (names, imgs, pids, camids,
                        fids) in enumerate(trainloader):
            for s_index in range(len(names)):
                if paths.get(int(pids[s_index])) == None:
                    paths[int(pids[s_index])] = []

                fid_ = fids[s_index] + cam_offsets[camids[s_index]]
                #if paths[int(pids[s_index])] != []:
                #    if paths
                paths[int(pids[s_index])].append((fid_, camids[s_index]))

                if int(fid_) >= max_frame:
                    max_frame = int(fid_)
                if int(fid_) <= min_frame:
                    min_frame = int(fid_)

                if frame_list[int(camids[s_index])].get(int(fid_)) == None:
                    frame_list[int(camids[s_index])][int(fid_)] = []

                frame_list[int(camids[s_index])][int(fid_)].append(
                    int(pids[s_index]))

                cal_v[camids[s_index]] += 1
                total_number += 1
    print("Max frame and min frame : ", max_frame, min_frame)
    print("")
    sorted_frame_list_all = [
    ]  #{ 0 : list(), 1:list(),2:list(),3:list(),4:list(),5:list(),6:list(),7:list()}
    for ind in range(8):
        for key in list(frame_list[ind].keys()):
            tmp_list = frame_list[ind][key]
            tmp_list.insert(0, ind)
            tmp_list.insert(0, key)

            #sorted_frame_list[ind].append(tmp_list)
            sorted_frame_list_all.append(tmp_list)
        #sorted_frame_list[ind] = sorted(sorted_frame_list[ind], key=lambda x: x[0])
        #print("Sorted Index : ",ind, "With term : ",len(sorted_frame_list[ind]))
    sorted_frame_list_all = sorted(sorted_frame_list_all, key=lambda x: x[0])
    #input()
    print("Sorted Index : ", ind, "With term : ", len(sorted_frame_list_all))
    #227540 49700

    #version1(paths,sorted_frame_list)
    #baseline(paths,sorted_frame_list)
    '''global frame_window_size
    global threshold_value
    a = np.zeros(100)
    b = np.zeros(100)
    for i in range(100):
        a[i] = 10*i + 10
        b[i] = pow(0.42,i)

    result = []
    for i in range(100):
        for j in range(100):
            frame_window_size = a[i]
            threshold_value = b[j]
            t,f = version3(paths,sorted_frame_list_all,max_frame)
            print("Time : ",i,j,"with tot : ",t, " and f : ",f, " setting : ",frame_window_size,threshold_value)

            if f < 50 and t < 600000:
                result.append((t,f))
    print(result)'''
    from bayes_opt import BayesianOptimization

    # Bounded region of parameter space

    #pbounds = {'t0':(0.6,1.5),'t1':(0.6,1.5),'t2':(0.2,0.8),'t3':(0.6,1.2),'t4':(0.2,0.8),'t5':(0.5,1.2),'t6':(0.2,0.8),'t7':(0.3,1.2),}

    #wrapper =

    for i in [
            0.96, 0.965, 0.97, 0.975, 0.98, 0.99
    ]:  #1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2.0,2.1,2.2,2.3,2.4,2.8,3,4,5,6,10,100,1e5,1e6]:#0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.735,0.80,0.85,0.9,
        #for k in [7,13,17,20,23,27,31,71,103,143]:
        j = i
        t, f = wrapper(j)
        print(j, ",", t, ",", f)

    #version2(paths,sorted_frame_list_all,max_frame,j)
    '''optimizer = BayesianOptimization(
Esempio n. 16
0
def pred(data, save_path, return_feature=False):
    torch.manual_seed(config.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_devices
    use_gpu = torch.cuda.is_available()
    if config.use_cpu: use_gpu = False

    if use_gpu:
        print("Currently using GPU {}".format(config.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(config.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    transform_test = T.Compose([
        T.Resize((config.height, config.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    testloader = DataLoader(
        ImageDataset(data, transform=transform_test),
        batch_size=config.test_batch,
        shuffle=False,
        num_workers=config.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    model = models.init_model(name=config.arch,
                              num_classes=config.nb_labels,
                              loss={'xent'},
                              use_gpu=use_gpu)

    checkpoint = torch.load(save_path)
    #model.load_state_dict(checkpoint['state_dict'])
    #original is checkpoint as a set (from celeba.py)
    model.load_state_dict(checkpoint)
    if use_gpu:
        model = nn.DataParallel(model).cuda()
    model.eval()

    with torch.no_grad():
        hamming_score, pred_list, feature_list = [], [], []
        float_logit_list = []
        for batch_idx, (imgs) in enumerate(testloader):
            if use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            features, predA = model(imgs)
            predA = predA.cpu()
            # print('features shape {} predA shape'.format(features.shape, predA.shape))
            float_logit_list.append(torch.sigmoid(predA))
            if return_feature is True:
                feature_list.append(features.cpu())

            predAs = torch.round(torch.sigmoid(predA))
            pred_list.append(predAs)
        predA_t = (((torch.cat(pred_list, 0)).float()).numpy()).tolist()
        predA_t = np.array(predA_t)
        float_logit_list = (((torch.cat(float_logit_list,
                                        0)).float()).numpy()).tolist()
        float_logit_list = np.array(float_logit_list)
    if return_feature == True:
        feature_list = (((torch.cat(feature_list,
                                    0)).float()).numpy()).tolist()
        feature_list = np.array(feature_list)
        #print('return feature shape',feature_list.shape)
        #return float_logit_list
        return feature_list
        return predA_t
    else:
        return predA_t
Esempio n. 17
0
def train_each_teacher(num_epoch, train_data, train_label, test_data,
                       test_label, save_path):

    torch.manual_seed(config.seed)
    print('len of train_data in network', len(train_data))
    os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_devices
    print('it is training now')
    use_gpu = torch.cuda.is_available()
    if config.use_cpu: use_gpu = False
    print('whether evaluate', config.evaluate)

    if use_gpu:
        print("Currently using GPU {}".format(config.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(config.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    transform_train = T.Compose([
        T.Random2DTranslation(config.height, config.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((config.height, config.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False
    trainloader = DataLoader(
        ImageDataset(train_data, label=train_label, transform=transform_train),
        batch_size=config.train_batch,
        shuffle=True,
        num_workers=config.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    testloader = DataLoader(
        ImageDataset(test_data, label=test_label, transform=transform_test),
        batch_size=config.test_batch,
        shuffle=False,
        num_workers=config.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format('resnet50m'))
    model = models.init_model(name=config.arch,
                              num_classes=config.nb_labels,
                              loss={'xent'},
                              use_gpu=use_gpu)
    if use_gpu:
        model = nn.DataParallel(model).cuda()
    criterion = nn.MultiLabelSoftMarginLoss()

    optimizer = init_optim(config.optim, model.parameters(), config.lr,
                           config.weight_decay)

    if config.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer,
                                        step_size=config.stepsize,
                                        gamma=config.gamma)

    print("==> Start training")

    start_time = time.time()
    for epoch in range(num_epoch):
        train(epoch, model, criterion, optimizer, trainloader, use_gpu)
        if config.stepsize > 0: scheduler.step()
        rank1 = test(model, testloader, use_gpu)

    rank1 = test(model, testloader, use_gpu)

    if use_gpu:
        state_dict = model.module.state_dict()
    else:
        state_dict = model.state_dict()
    print('save model', save_path)
    torch.save(state_dict, save_path)

    #print("==>  Hamming Score {:.3%}".format(rank1))

    elapsed = round(time.time() - start_time)

    print("Finished. Training time (h:m:s): {}.".format(elapsed))
Esempio n. 18
0
    # 3 dataloader: train, query, gallery
    # Train needs augmentation
    transform_train = T.Compose([
        transform.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    queryloader = DataLoader(ImageDataset(dataset.query,
                                          transform=transform_test),
                             batch_size=args.test_batch,
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=pin_memory,
                             drop_last=False)

    galleryloader = DataLoader(ImageDataset(dataset.gallery,
                                            transform=transform_test),
                               batch_size=args.test_batch,
                               shuffle=False,
                               num_workers=args.workers,
                               pin_memory=pin_memory,
                               drop_last=False)

    model = torch.load('logs/model_metric.pkl')
Esempio n. 19
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_vidreid_dataset(root=args.root,
                                                name=args.dataset)

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    # decompose tracklets into images for image-based training
    new_train = []
    for img_paths, pid, camid in dataset.train:
        for img_path in img_paths:
            new_train.append((img_path, pid, camid))

    trainloader = DataLoader(
        ImageDataset(new_train, transform=transform_train),
        sampler=RandomIdentitySampler(new_train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        VideoDataset(dataset.query,
                     seq_len=args.seq_len,
                     sample='evenly',
                     transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery,
                     seq_len=args.seq_len,
                     sample='evenly',
                     transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              loss={'xent', 'htri'})
    print("Model size: {:.3f} M".format(count_num_param(model)))

    criterion_xent = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)

    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, args.pool, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, optimizer,
              trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (
                epoch + 1) % args.eval_step == 0 or (epoch +
                                                     1) == args.max_epoch:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, args.pool, use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
Esempio n. 20
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))

    # tensorboardX
    # writer = SummaryWriter(log_dir=osp.join(args.save_dir,'summary'))

    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_img_dataset(
        root=args.root, name=args.dataset, split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled, cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    if args.random_erasing:
        transform_train = T.Compose([
            T.Random2DTranslation(args.height, args.width),
            T.RandomHorizontalFlip(),
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            RandomErasing(probability=args.probability, mean=[0.0, 0.0, 0.0]),
        ])
        

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    if args.loss == 'xent,htri':
        trainloader = DataLoader(
            ImageDataset(dataset.train, transform=transform_train),
            sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances),
            batch_size=args.train_batch, num_workers=args.workers,
            pin_memory=pin_memory, drop_last=True,
        )
    elif args.loss == 'xent':
        trainloader = DataLoader(
            ImageDataset(dataset.train, transform=transform_train),
            batch_size=args.train_batch, shuffle=True, num_workers=args.workers,
            pin_memory=pin_memory, drop_last=True,
        )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss=args.loss)
    print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0))

    criterion_xent = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    criterion_htri = TripletLoss(margin=args.margin)
    
    optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay)
    if args.stepsize > 0:
        if not args.warmup:
            scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.resume:
        print("Loading checkpoint from '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return
    def adjust_lr(optimizer, ep):
        if ep < 20:
            lr = 1e-4 * (ep + 1) / 2
        elif ep < 80:
            #lr = 1e-3 * len(args.gpu_devices)
            lr = 1e-3
        elif ep < 180:
            #lr = 1e-4 * len(args.gpu_devices)
            lr = 1e-4
        elif ep < 300:
            #lr = 1e-5 * len(args.gpu_devices)
            lr = 1e-5
        elif ep < 320:
            #lr = 1e-5 * 0.1 ** ((ep - 320) / 80) * len(args.gpu_devices)
            lr = 1e-5 * 0.1 ** ((ep - 320) / 80)
        elif ep < 400:
            lr = 1e-6
        elif ep < 480:
            #lr = 1e-4 * len(args.gpu_devices)
            lr = 1e-4
        else:
            #lr = 1e-5 * len(args.gpu_devices)
            lr = 1e-5
        for p in optimizer.param_groups:
            p['lr'] = lr
    
    length = len(trainloader)
    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    #best_rerank1 = -np.inf
    #best_rerankepoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        if args.stepsize > 0:
            if args.warmup:
                adjust_lr(optimizer, epoch + 1)
            else:
                scheduler.step()
        train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu=use_gpu, summary=None, length=length)
        train_time += round(time.time() - start_train_time)
        
        if (epoch+1) > args.start_eval and args.eval_step > 0 and (epoch+1) % args.eval_step == 0 or (epoch+1) == args.max_epoch:
            print("==> Test")
            rank1 = test(epoch, model, queryloader, galleryloader, use_gpu=True, summary=None)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1
            ####### Best Rerank
            #is_rerankbest = rerank1 > best_rerank1
            #if is_rerankbest:
            #    best_rerank1 = rerank1
            #    best_rerankepoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'rank1': rank1,
                'epoch': epoch,
            }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch+1) + '.pth.tar'))

    writer.close()
    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))
    #print("==> Best Rerank-1 {:.1%}, achieved at epoch {}".format(best_rerank1, best_rerankepoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
def main():
    #数据集加载
    dataset = Market1501()

    #训练数据处理器
    transform_train = T.Compose([
        T.Random2DTransform(height, width),  #尺度统一,随机裁剪
        T.RandomHorizontalFlip(),  #水平翻转
        T.ToTensor(),  #图片转张量
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                     0.225]),  #归一化,参数固定
    ])

    #测试数据处理器
    transform_test = T.Compose([
        T.Resize((height, width)),  #尺度统一
        T.ToTensor(),  #图片转张量
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                     0.225]),  #归一化,参数固定
    ])

    #train数据集吞吐器
    train_data_loader = DataLoader(
        ImageDataset(dataset.train,
                     transform=transform_train),  #自定义的数据集,使用训练数据处理器
        batch_size=train_batch_size,  #一个批次的大小(一个批次有多少个图片张量)
        drop_last=True,  #丢弃最后无法称为一整个批次的数据
    )
    print("train_data_loader inited")

    #query数据集吞吐器
    query_data_loader = DataLoader(
        ImageDataset(dataset.query,
                     transform=transform_test),  #自定义的数据集,使用测试数据处理器
        batch_size=test_batch_size,  #一个批次的大小(一个批次有多少个图片张量)
        shuffle=False,  #不重排
        drop_last=True,  #丢弃最后无法称为一整个批次的数据
    )
    print("query_data_loader inited")

    #gallery数据集吞吐器
    gallery_data_loader = DataLoader(
        ImageDataset(dataset.gallery,
                     transform=transform_test),  #自定义的数据集,使用测试数据处理器
        batch_size=test_batch_size,  #一个批次的大小(一个批次有多少个图片张量)
        shuffle=False,  #不重排
        drop_last=True,  #丢弃最后无法称为一整个批次的数据
    )
    print("gallery_data_loader inited\n")

    #加载模型
    model = ReIDNet(num_classes=751,
                    loss={'softmax'})  #指定分类的数量,与使用的损失函数以便决定模型输出何种计算结果
    print("=>ReIDNet loaded")
    print("Model size: {:.5f}M\n".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    #损失函数
    criterion_class = nn.CrossEntropyLoss()
    """
    优化器
    参数1,待优化的参数
    参数2,学习率
    参数3,权重衰减
    """
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=train_lr,
                                weight_decay=5e-04)
    """
    动态学习率
    参数1,指定使用的优化器
    参数2,mode,可选择‘min’(min表示当监控量停止下降的时候,学习率将减小)或者‘max’(max表示当监控量停止上升的时候,学习率将减小)
    参数3,factor,代表学习率每次降低多少
    参数4,patience,容忍网路的性能不提升的次数,高于这个次数就降低学习率
    参数5,min_lr,学习率的下限
    """
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,
                                               mode='min',
                                               factor=dy_step_gamma,
                                               patience=10,
                                               min_lr=0.0001)

    #如果是测试
    if evaluate:
        test(model, query_data_loader, gallery_data_loader)
        return 0
    #如果是训练
    print('————model start training————\n')
    bt = time.time()  #训练的开始时间
    for epoch in range(start_epoch, end_epoch):
        model.train(True)
        train(epoch, model, criterion_class, optimizer, scheduler,
              train_data_loader)
    et = time.time()  #训练的结束时间
    print('**模型训练结束, 保存最终参数到{}**\n'.format(final_model_path))
    torch.save(model.state_dict(), final_model_path)
    print('————训练总用时{:.2f}小时————'.format((et - bt) / 3600.0))
Esempio n. 22
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'), mode='a')
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'), mode='a')
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_imgreid_dataset(name=args.dataset,
                                                dataset_dir=args.root,
                                                fore_dir=args.fore_dir)

    transform_train = ST.Compose([
        ST.Scale((args.height, args.width), interpolation=3),
        ST.RandomHorizontalFlip(),
        ST.ToTensor(),
        ST.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ST.RandomErasing(0.5)
    ])

    transform_test = ST.Compose([
        ST.Scale((args.height, args.width), interpolation=3),
        ST.ToTensor(),
        ST.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset_hardSplit_seg(dataset.train, transform=transform_train),
        sampler=RandomIdentitySampler(dataset.train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.query, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery, transform=transform_test),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids)
    print(model)

    criterion_xent = CrossEntropyLabelSmooth(use_gpu=use_gpu)
    criterion_htri = TripletLoss()
    criterion_mask = MaskLoss()
    criterion_split = HardSplitLoss()
    criterion_cluster = ClusterLoss()

    optimizer = init_optim(args.optim, model.parameters(), args.lr,
                           args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=args.stepsize,
                                         gamma=args.gamma)

    if args.resume:
        if check_isfile(args.resume):
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            args.start_epoch = checkpoint['epoch']
            rank1 = checkpoint['rank1']
            print("Loaded checkpoint from '{}'".format(args.resume))
            print("- start_epoch: {}\n- rank1: {}".format(
                args.start_epoch, rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(args.start_epoch, args.max_epoch):

        start_train_time = time.time()
        train(epoch, model, criterion_xent, criterion_htri, criterion_mask,
              criterion_split, criterion_cluster, optimizer, trainloader,
              use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and (
                epoch + 1) % args.eval_step == 0 or epoch == 0:
            print("==> Test")
            rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint(
                {
                    'state_dict': state_dict,
                    'rank1': rank1,
                    'epoch': epoch,
                }, is_best,
                osp.join(args.save_dir,
                         'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(
        best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print(
        "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".
        format(elapsed, train_time))
    print("==========\nArgs:{}\n==========".format(args))
    tfms.Random2DTranslation(height, width),
    tfms.RandomHorizontalFlip(),
    tfms.ToTensor(),
    tfms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

tfms_test = tfms.Compose([
    tfms.Resize(size=(height, width)),
    tfms.ToTensor(),
    tfms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

pin_memory = True

trainloader = DataLoader(
    ImageDataset(dataset.train, transform=tfms_train),
    sampler=RandomIdentitySampler(dataset.train, num_instances=num_instances),
    batch_size=train_batch,
    num_workers=workers,
    pin_memory=pin_memory,
    drop_last=True,
)

queryloader = DataLoader(
    ImageDataset(dataset.query, transform=tfms_test),
    batch_size=test_batch,
    shuffle=False,
    num_workers=workers,
    pin_memory=pin_memory,
    drop_last=False,
)
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(
        name=args.dataset,
        split_id=args.split_id,
        cuhk03_labeled=args.cuhk03_labeled,
        cuhk03_classic_split=args.cuhk03_classic_split,
    )

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False
    #arch1 should use salience, and arch2 should use semantic parsing
    use_salience = models.use_salience(name=args.arch1)
    use_parsing = models.use_parsing(name=args.arch2)
    save_rank = True if args.save_rank else False
    use_re_ranking = True if args.use_re_ranking else False

    queryloader = DataLoader(
        ImageDataset(dataset.query,
                     transform=transform_test,
                     use_salience=use_salience,
                     use_parsing=use_parsing,
                     salience_base_path=dataset.salience_query_dir,
                     parsing_base_path=dataset.parsing_query_dir),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.gallery,
                     transform=transform_test,
                     use_salience=use_salience,
                     use_parsing=use_parsing,
                     salience_base_path=dataset.salience_gallery_dir,
                     parsing_base_path=dataset.parsing_gallery_dir),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing model: {}".format(args.arch1))
    model1 = models.init_model(name=args.arch1,
                               num_classes=dataset.num_train_pids,
                               loss={'xent', 'htri'},
                               mid_layer=args.mid_layer)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model1.parameters()) / 1000000.0))
    print("Initializing model: {}".format(args.arch2))
    model2 = models.init_model(name=args.arch2,
                               num_classes=dataset.num_train_pids,
                               loss={'xent', 'htri'},
                               mid_layer=args.mid_layer)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model2.parameters()) / 1000000.0))

    print("Loading checkpoint from '{}'".format(args.resume1))
    checkpoint = torch.load(args.resume1)
    model1.load_state_dict(checkpoint['state_dict'])
    start_epoch = checkpoint['epoch']
    print("Resuming model 1 from epoch {}".format(start_epoch + 1))

    print("Loading checkpoint from '{}'".format(args.resume2))
    checkpoint = torch.load(args.resume2)
    model2.load_state_dict(checkpoint['state_dict'])
    start_epoch = checkpoint['epoch']
    print("Resuming model 2 from epoch {}".format(start_epoch + 1))

    if use_gpu:
        model1 = nn.DataParallel(model1).cuda()
        model2 = nn.DataParallel(model2).cuda()

    test(model1,
         model2,
         queryloader,
         galleryloader,
         use_gpu,
         use_salience=use_salience,
         use_parsing=use_parsing,
         save_dir=args.save_dir,
         epoch=-1,
         save_rank=save_rank,
         use_re_ranking=use_re_ranking)