def __init__(self, architecture='resnet101', pretrained_type='gl18', soa_layers='45', mode='train'):
        super(ResNetSOAs, self).__init__()

        base_model = vars(models)[architecture](pretrained=True)
        last_feat_in = base_model.inplanes
        base_model = nn.Sequential(*list(base_model.children())[:-2])

        if pretrained_type=='caffenet' and architecture in FEATURES:
            print(">> {}: for '{}' custom pretrained features '{}' are used"
                .format(os.path.basename(__file__), architecture, os.path.basename(FEATURES[architecture])))
            model_dir = os.path.join(get_data_root(), 'networks')
            base_model.load_state_dict(model_zoo.load_url(FEATURES[architecture], model_dir=model_dir))
        elif pretrained_type in ['SfM120k', 'gl18'] and architecture in FEATURES:
            pretrained_name = pretrained_type + '-tl-' + architecture + '-gem-w'
            print(">> {}: for '{}' custom pretrained features '{}' are used"
                .format(os.path.basename(__file__), architecture, os.path.basename(PRETRAINED[pretrained_name])))
            model_dir = os.path.join(get_data_root(), 'networks')
            base_model.load_state_dict(extract_features_from_e2e(model_zoo.load_url(PRETRAINED[pretrained_name], model_dir=model_dir)))

        res_blocks = list(base_model.children())

        self.conv1 = nn.Sequential(*res_blocks[0:2])
        self.conv2_x = nn.Sequential(*res_blocks[2:5])
        self.conv3_x = res_blocks[5]
        self.conv4_x = res_blocks[6]
        self.conv5_x = res_blocks[7]

        self.soa_layers = soa_layers
        if '4' in self.soa_layers:
            print("SOA_4:")
            self.soa4 = SOABlock(in_ch=last_feat_in // 2, k=4)
        if '5' in self.soa_layers:
            print("SOA_5:")
            self.soa5 = SOABlock(in_ch=last_feat_in, k=2)
def main():
    args = parser.parse_args()

    # check if test dataset are downloaded
    # and download if they are not
    download_distractors(get_data_root())

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network
    net = load_network(network_name=args.network)
    net.mode = 'test'

    print(">>>> loaded network: ")
    print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))

    print(">>>> Evaluating scales: {}".format(ms))

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # evaluate on test datasets
    dataset = 'revisitop1m'
    start = time.time()

    print('>> {}: Extracting...'.format(dataset))

    # prepare config structure for the test dataset
    cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
    images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
    qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
    try:
        bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]
    except:
        bbxs = None  # for holidaysmanrot and copydays

    # extract database and query vectors
    print('>> {}: database images...'.format(dataset))
    vecs = extract_vectors(net,
                           images,
                           args.image_size,
                           transform,
                           ms=ms,
                           mode='test')
    torch.save(vecs, args.network + '_vecs_' + dataset + '.pt')

    print('>> {}: elapsed time: {}'.format(dataset,
                                           htime(time.time() - start)))
def load_network(network_name='resnet101-solar-best.pth'):
    # loading network
    # pretrained networks (downloaded automatically)
    print(">> Loading network:\n>>>> '{}'".format(network_name))
    state = torch.load(os.path.join(get_data_root(), 'networks', network_name))

    # parsing net params from meta
    # architecture, pooling, mean, std required
    # the rest has default values, in case that is doesnt exist
    net_params = {}
    net_params['architecture'] = state['meta']['architecture']
    net_params['pooling'] = state['meta']['pooling']
    net_params['local_whitening'] = state['meta'].get('local_whitening', False)
    net_params['regional'] = state['meta'].get('regional', False)
    net_params['whitening'] = state['meta'].get('whitening', False)
    net_params['mean'] = state['meta']['mean']
    net_params['std'] = state['meta']['std']
    net_params['pretrained'] = False
    net_params['pretrained_type'] = None
    net_params['soa'] = state['meta']['soa'] 
    net_params['soa_layers'] = state['meta']['soa_layers']
    net = init_network(net_params) 
    net.load_state_dict(state['state_dict'])

    return net
Esempio n. 4
0
def test(datasets, net, epoch, summary):

    #ms = [1]
    ms = [1, 2**(1 / 2), 1 / 2**(1 / 2)]
    print('>> Evaluating network on test datasets...')

    # for testing we use image size of max 1024
    image_size = 1024

    # moving network to gpu and eval mode
    net.mode = 'test'
    net.to(device)
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if args.test_whiten:
        start = time.time()

        print('>> {}: Learning whitening...'.format(args.test_whiten))

        # loading db
        db_root = os.path.join(get_data_root(), 'train', args.test_whiten)
        ims_root = os.path.join(db_root, 'ims')
        db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.test_whiten))
        with open(db_fn, 'rb') as f:
            db = pickle.load(f)
        images = [
            cid2filename(db['cids'][i], ims_root)
            for i in range(len(db['cids']))
        ]

        # extract whitening vectors
        print('>> {}: Extracting...'.format(args.test_whiten))
        wvecs = extract_vectors(net, None, images, image_size,
                                transform)  # implemented with torch.no_grad

        # learning whitening
        print('>> {}: Learning...'.format(args.test_whiten))
        wvecs = wvecs.numpy()
        m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
        Lw = {'m': m, 'P': P}

        print('>> {}: elapsed time: {}'.format(args.test_whiten,
                                               htime(time.time() - start)))
    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.test_datasets.split(',')
    for dataset in datasets:
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
        bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]

        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net,
                               images,
                               image_size,
                               transform,
                               summary=summary,
                               mode='test',
                               ms=ms)  # implemented with torch.no_grad

        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net,
                                qimages,
                                image_size,
                                transform,
                                bbxs,
                                summary=summary,
                                mode='test',
                                ms=ms)  # implemented with torch.no_grad

        print('>> {}: Evaluating...'.format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        compute_map_and_print(dataset,
                              ranks,
                              cfg['gnd'],
                              summary=summary,
                              epoch=epoch)

        #       for protocol in ['hard']: #'easy', 'medium', 'hard']:
        #           plot_ranks(qimages, images, ranks, cfg['gnd'], bbxs, summary, dataset, epoch, 20, protocol)
        if Lw is not None:
            # whiten the vectors
            vecs_lw = whitenapply(vecs, Lw['m'], Lw['P'])
            qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])

            # search, rank, and print
            scores = np.dot(vecs_lw.T, qvecs_lw)
            ranks = np.argsort(-scores, axis=0)
            compute_map_and_print(dataset + ' + whiten',
                                  ranks,
                                  cfg['gnd'],
                                  summary=summary,
                                  epoch=epoch)

        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))
Esempio n. 5
0
def main():
    global args, min_loss, device
    args = parser.parse_args()

    # manually check if there are unknown test datasets
    for dataset in args.test_datasets.split(','):
        if dataset not in test_datasets_names:
            raise ValueError(
                'Unsupported or unknown test dataset: {}!'.format(dataset))

    # check if test dataset are downloaded
    # and download if they are not
    download_test(get_data_root())

    # create export dir if it doesnt exist
    directory = "{}".format(args.training_dataset)
    directory += "_{}".format(args.arch)
    directory += "_{}".format(args.pool) + str(args.p)
    if args.local_whitening:
        directory += "_lwhiten"
    if args.regional:
        directory += "_r"
    if args.whitening:
        directory += "_whiten"
    if not args.pretrained:
        directory += "_notpretrained"
    if args.soa:
        directory += "_soa_"
        directory += args.soa_layers
        directory = os.path.join('second_order_attn', directory)
    if args.unfreeze_last:
        directory += "_unfreeze_last"
    if args.sos:
        directory += "_SOS_lambda{:.2f}".format(args._lambda)
    directory += "_{}".format(args.pretrained_type)
    directory += "_{}_m{:.2f}".format(args.loss, args.loss_margin)
    directory += "_{}_lr{:.1e}_lrd{:.1e}_wd{:.1e}".format(
        args.optimizer, args.lr, args.lr_decay, args.weight_decay)
    directory += "_nnum{}_qsize{}_psize{}".format(args.neg_num,
                                                  args.query_size,
                                                  args.pool_size)
    directory += "_bsize{}_uevery{}_imsize{}".format(args.batch_size,
                                                     args.update_every,
                                                     args.image_size)

    device = torch.device('cuda:' + args.gpu_id)

    args.directory = os.path.join(args.directory, directory)
    print(">> Creating directory if it does not exist:\n>> '{}'".format(
        args.directory))
    if not os.path.exists(args.directory):
        os.makedirs(args.directory)

    # set random seeds
    torch.manual_seed(0)
    torch.cuda.manual_seed_all(0)
    np.random.seed(0)
    random.seed(0)

    # initialize model
    if args.pretrained:
        print(">> Using pre-trained model '{}'".format(args.arch))
    else:
        print(">> Using model from scratch (random weights) '{}'".format(
            args.arch))
    model_params = {}
    model_params['architecture'] = args.arch
    model_params['pooling'] = args.pool
    model_params['p'] = args.p
    model_params['local_whitening'] = args.local_whitening
    model_params['regional'] = args.regional
    model_params['whitening'] = args.whitening
    # model_params['mean'] = ...  # will use default
    # model_params['std'] = ...  # will use default
    model_params['pretrained'] = args.pretrained
    model_params['pretrained_type'] = args.pretrained_type
    model_params['flatten_desc'] = args.flatten_desc
    model_params['soa'] = args.soa
    model_params['soa_layers'] = args.soa_layers

    model = init_network(model_params)

    # move network to gpu
    model.to(device)
    # define loss function (criterion) and optimizer
    if args.loss == 'contrastive':
        criterion = ContrastiveLoss(margin=args.loss_margin).to(device)
    elif args.loss == 'triplet':
        criterion = TripletLoss(margin=args.loss_margin).to(device)
    else:
        raise (RuntimeError("Loss {} not available!".format(args.loss)))

    if args.sos:
        criterionB = SOSLoss().to(device)
    else:
        criterionB = None

    # parameters split into features, pool, whitening
    # IMPORTANT: no weight decay for pooling parameter p in GeM or regional-GeM
    parameters = []
    # add feature parameters
    if args.soa:
        for p in model.features.conv1.parameters():
            p.requires_grad = False
        for p in model.features.conv2_x.parameters():
            p.requires_grad = False
        for p in model.features.conv3_x.parameters():
            p.requires_grad = False
        for p in model.features.conv4_x.parameters():
            p.requires_grad = False
        if args.unfreeze_last:
            parameters.append({
                'params': model.features.conv5_x.parameters(),
                'lr': args.lr * 0.0
            })  #, 'weight_decay': 0})
        if '4' in args.soa_layers:
            parameters.append({'params': model.features.soa4.parameters()
                               })  #, 'lr': args.lr*10}) #, 'weight_decay': 0})
        if '5' in args.soa_layers:
            parameters.append({'params': model.features.soa5.parameters()
                               })  #, 'lr': args.lr*10}) #, 'weight_decay': 0})
    else:
        parameters.append({'params': model.features.parameters()})
    # add local whitening if exists
    if model.lwhiten is not None:
        parameters.append({'params': model.lwhiten.parameters()})
    # add pooling parameters (or regional whitening which is part of the pooling layer!)
    if not args.regional:
        # global, only pooling parameter p weight decay should be 0
        if args.pool == 'gem':
            parameters.append({
                'params': model.pool.parameters(),
                'lr': args.lr * 100,
                'weight_decay': 0
            })
        elif args.pool == 'gemmp':
            parameters.append({
                'params': model.pool.parameters(),
                'lr': args.lr * 100,
                'weight_decay': 0
            })
    else:
        # regional, pooling parameter p weight decay should be 0,
        # and we want to add regional whitening if it is there
        if args.pool == 'gem':
            parameters.append({
                'params': model.pool.rpool.parameters(),
                'lr': args.lr * 1,
                'weight_decay': 0
            })
        elif args.pool == 'gemmp':
            parameters.append({
                'params': model.pool.rpool.parameters(),
                'lr': args.lr * 100,
                'weight_decay': 0
            })
        if model.pool.whiten is not None:
            parameters.append({'params': model.pool.whiten.parameters()})
    # add final whitening if exists
    if model.whiten is not None:
        parameters.append({'params': model.whiten.parameters()})
    # define optimizer
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(parameters,
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(parameters,
                                     args.lr,
                                     weight_decay=args.weight_decay)

    # define learning rate decay schedule
    # TODO: maybe pass as argument in future implementation?
    exp_decay = math.exp(-args.lr_decay)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                       gamma=exp_decay)

    # optionally resume from a checkpoint
    start_epoch = 0
    checkpoint_exists = False
    if args.resume:
        args.resume = os.path.join(args.directory, args.resume)
        if os.path.isfile(args.resume):
            # load checkpoint weights and update model and optimizer
            print(">> Loading checkpoint:\n>> '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            min_loss = checkpoint['min_loss']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print(">>>> loaded checkpoint:\n>>>> '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            # important not to forget scheduler updating
            scheduler = torch.optim.lr_scheduler.ExponentialLR(
                optimizer, gamma=exp_decay, last_epoch=checkpoint['epoch'] - 1)
            checkpoint_exists = True
        else:
            print(">> No checkpoint found at '{}'".format(args.resume))

    # Data loading code
    #if args.training_dataset.startswith('megadepth'):
    #    model.meta['mean'] += sum(mean for mean in model.meta['mean']) / 3
    #    model.meta['std'] += sum(std for std in model.meta['std']) / 3

    normalize = transforms.Normalize(mean=model.meta['mean'],
                                     std=model.meta['std'])
    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    if args.training_dataset.startswith('gl18'):
        train_dataset = TuplesBatchedDataset(name=args.training_dataset,
                                             mode='train',
                                             imsize=args.image_size,
                                             nnum=args.neg_num,
                                             qsize=args.query_size,
                                             poolsize=args.pool_size,
                                             transform=transform)
    else:
        train_dataset = TuplesDataset(name=args.training_dataset,
                                      mode='train',
                                      imsize=args.image_size,
                                      nnum=args.neg_num,
                                      qsize=args.query_size,
                                      poolsize=args.pool_size,
                                      transform=transform)

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True,
        sampler=None,
        drop_last=True  #, collate_fn=collate_tuples
    )
    if args.val:
        if args.training_dataset.startswith('gl18'):
            val_dataset = TuplesBatchedDataset(name=args.training_dataset,
                                               mode='val',
                                               imsize=args.image_size,
                                               nnum=args.neg_num,
                                               qsize=args.val_query_size,
                                               poolsize=args.val_pool_size,
                                               transform=transform)
        else:
            val_dataset = TuplesDataset(name=args.training_dataset,
                                        mode='val',
                                        imsize=args.image_size,
                                        nnum=args.neg_num,
                                        qsize=args.val_query_size,
                                        poolsize=args.val_pool_size,
                                        transform=transform)
        val_loader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True,
                                                 drop_last=True)

    # set up tensorboad
    summary = tb_setup(args.directory, checkpoint_exists)
    ################################################### evaluate the network before starting
    # this might not be necessary?
    #   test(args.test_datasets, model, start_epoch, summary)

    for epoch in range(start_epoch, args.epochs):

        # set manual seeds per epoch

        random.seed(epoch)
        np.random.seed(epoch)
        torch.manual_seed(epoch)
        torch.cuda.manual_seed_all(epoch)

        loss = train(train_loader, model, criterion, criterionB, optimizer,
                     epoch, summary)

        # evaluate on test datasets every test_freq epochs
        with torch.no_grad():
            if args.val:
                loss = validate(val_loader, model, criterion, criterionB,
                                epoch, summary)
            if (epoch + 1) % args.test_freq == 0:
                # evaluate on validation set
                test(args.test_datasets, model, epoch + 1, summary)

        # remember best loss and save checkpoint
        is_best = loss < min_loss
        min_loss = min(loss, min_loss)

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'meta': model.meta,
                'state_dict': model.state_dict(),
                'min_loss': min_loss,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.directory)

        # train for one epoch on train set
        # adjust learning rate for each epoch
        scheduler.step()
Esempio n. 6
0
def init_network(params):

    # parse params with default values
    architecture = params.get('architecture', 'resnet101')
    local_whitening = params.get('local_whitening', False)
    pooling = params.get('pooling', 'gem')
    p = params.get('p', 3.)
    regional = params.get('regional', False)
    whitening = params.get('whitening', False)
    mean = params.get('mean', [0.485, 0.456, 0.406])
    std = params.get('std', [0.229, 0.224, 0.225])
    pretrained = params.get('pretrained', True)
    soa = params.get('soa', False)
    soa_layers = params.get('soa_layers', '45')
    pretrained_type = params.get('pretrained_type', 'SfM120k')
    flatten_desc = params.get('flatten_desc', False)
    mode = params.get('mode', 'train')

    # get output dimensionality size
    dim = OUTPUT_DIM[architecture]

    # loading network from torchvision
    if pretrained:
        if architecture not in FEATURES:
            # initialize with network pretrained on imagenet in pytorch
            net_in = getattr(torchvision.models, architecture)(pretrained=True)
        else:
            # initialize with random weights, later on we will fill features with custom pretrained network
            net_in = getattr(torchvision.models,
                             architecture)(pretrained=False)
    else:
        # initialize with random weights
        net_in = getattr(torchvision.models, architecture)(pretrained=False)

    # initialize features
    # take only convolutions for features,
    # always ends with ReLU to make last activations non-negative
    if architecture.startswith('alexnet'):
        features = list(net_in.features.children())[:-1]
    elif architecture.startswith('vgg'):
        features = list(net_in.features.children())[:-1]
    elif architecture.startswith('resnet'):
        #feat0.weight.data = torch.nn.init.kaiming_normal_(feat0.weight.data)
        features = list(net_in.children())[:-2]
    elif architecture.startswith('densenet'):
        features = list(net_in.features.children())
        features.append(nn.ReLU(inplace=True))
    elif architecture.startswith('squeezenet'):
        features = list(net_in.features.children())
    else:
        raise ValueError(
            'Unsupported or unknown architecture: {}!'.format(architecture))

    # initialize local whitening
    if local_whitening:
        lwhiten = nn.Linear(dim, dim, bias=True)
        # TODO: lwhiten with possible dimensionality reduce

        if pretrained:
            lw = architecture
            if lw in L_WHITENING:
                print(
                    ">> {}: for '{}' custom computed local whitening '{}' is used"
                    .format(os.path.basename(__file__), lw,
                            os.path.basename(L_WHITENING[lw])))
                whiten_dir = os.path.join(get_data_root(), 'whiten')
                lwhiten.load_state_dict(
                    model_zoo.load_url(L_WHITENING[lw], model_dir=whiten_dir))
            else:
                print(
                    ">> {}: for '{}' there is no local whitening computed, random weights are used"
                    .format(os.path.basename(__file__), lw))

    else:
        lwhiten = None

    # initialize pooling
    if pooling == 'gemmp':
        pool = POOLING[pooling](p=p, mp=dim)
    else:
        pool = POOLING[pooling](p=p)

    # initialize regional pooling
    if regional:
        rpool = pool
        rwhiten = nn.Linear(dim, dim, bias=True)
        # TODO: rwhiten with possible dimensionality reduce

        if pretrained:
            rw = '{}-{}-r'.format(architecture, pooling)
            if rw in R_WHITENING:
                print(
                    ">> {}: for '{}' custom computed regional whitening '{}' is used"
                    .format(os.path.basename(__file__), rw,
                            os.path.basename(R_WHITENING[rw])))
                whiten_dir = os.path.join(get_data_root(), 'whiten')
                rwhiten.load_state_dict(
                    model_zoo.load_url(R_WHITENING[rw], model_dir=whiten_dir))
            else:
                print(
                    ">> {}: for '{}' there is no regional whitening computed, random weights are used"
                    .format(os.path.basename(__file__), rw))

        pool = Rpool(rpool, rwhiten)

    # initialize whitening
    if whitening:
        whiten = nn.Linear(dim, dim, bias=True)
        # TODO: whiten with possible dimensionality reduce

        if pretrained:
            w = architecture
            if local_whitening:
                w += '-lw'
            w += '-' + pooling
            if regional:
                w += '-r'
            if w in WHITENING:
                print(">> {}: for '{}' custom computed whitening '{}' is used".
                      format(os.path.basename(__file__), w,
                             os.path.basename(WHITENING[w])))
                whiten_dir = os.path.join(get_data_root(), 'whiten')
                whiten.load_state_dict(
                    model_zoo.load_url(WHITENING[w], model_dir=whiten_dir))
            else:
                print(
                    ">> {}: for '{}' there is no whitening computed, random weights are used"
                    .format(os.path.basename(__file__), w))
    else:
        whiten = None

    # create meta information to be stored in the network
    meta = {
        'architecture': architecture,
        'local_whitening': local_whitening,
        'pooling': pooling,
        'regional': regional,
        'whitening': whitening,
        'mean': mean,
        'std': std,
        'outputdim': dim,
        'soa': soa,
        'soa_layers': soa_layers,
    }

    # create a generic image retrieval network
    net = SOLAR_Global_Retrieval(architecture,
                                 features,
                                 lwhiten,
                                 pool,
                                 whiten,
                                 meta,
                                 pretrained_type=pretrained_type,
                                 soa_layers=soa_layers,
                                 mode=mode)

    return net
Esempio n. 7
0
    def __init__(self, name, mode, imsize=None, nnum=5, qsize=2000, poolsize=20000, transform=None, loader=default_loader):

        if not (mode == 'train' or mode == 'val'):
            raise(RuntimeError("MODE should be either train or val, passed as string"))

        if name.startswith('retrieval-SfM'):
            # setting up paths
            data_root = get_data_root()
            db_root = os.path.join(data_root, 'train', name)
            ims_root = os.path.join(db_root, 'ims')

            # loading db
            db_fn = os.path.join(db_root, '{}.pkl'.format(name))
            with open(db_fn, 'rb') as f:
                db = pickle.load(f)[mode]

            # setting fullpath for images
            self.images = [cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))]

        elif name.startswith('gl'):
            ## TODO: NOT IMPLEMENTED YET PROPOERLY (WITH AUTOMATIC DOWNLOAD)

            # setting up paths
            data_root = get_data_root()
            db_root = os.path.join(data_root, 'train', name)

            self.ims_root = os.path.join('/media/ssd/datasets/gl18', 'train')

            # loading db
            db_fn = os.path.join(db_root, 'db_{}.pkl'.format(name))
            with open(db_fn, 'rb') as f:
                db = pickle.load(f)[mode]

            # setting fullpath for images
            self.images = [os.path.join(self.ims_root, db['cids'][i]+'.jpg') for i in range(len(db['cids']))]

            self.pfns = db['pidxs']

        else:
            raise(RuntimeError("Unknown dataset name!"))

        # initializing tuples dataset
        self.name = name
        self.mode = mode
        self.imsize = imsize
        self.clusters = db['cluster']
        self.qpool = db['qidxs']
        self.ppool = db['pidxs']
        self.bbxs = db['bbxs']

        if mode == 'train':
            print('__'*50)
            print('Dataset:', name)
            print('__'*50)
        print('__'*50)
        print('Total number of', mode, 'samples: ', len(self.qpool))
        print('__'*50)

        ## If we want to keep only unique q-p pairs
        ## However, ordering of pairs will change, although that is not important
        # qpidxs = list(set([(self.qidxs[i], self.pidxs[i]) for i in range(len(self.qidxs))]))
        # self.qidxs = [qpidxs[i][0] for i in range(len(qpidxs))]
        # self.pidxs = [qpidxs[i][1] for i in range(len(qpidxs))]

        # size of training subset for an epoch
        self.nnum = nnum
        self.qsize = min(qsize, len(self.qpool))
        self.poolsize = min(poolsize, len(self.images))
        self.qidxs = None
        self.pidxs = None
        self.nidxs = None

        self.transform = transform
        self.loader = loader

        self.print_freq = 10
Esempio n. 8
0
def main():
    args = parser.parse_args()

    # check if there are unknown datasets
    for dataset in args.datasets.split(','):
        if dataset not in datasets_names:
            raise ValueError(
                'Unsupported or unknown dataset: {}!'.format(dataset))

    # check if test dataset are downloaded
    # and download if they are not
    download_test(get_data_root())

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network
    net = load_network(network_name=args.network)
    net.mode = 'test'

    print(">>>> loaded network: ")
    print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))

    print(">>>> Evaluating scales: {}".format(ms))

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # evaluate on test datasets
    datasets = args.datasets.split(',')
    for dataset in datasets:
        summary_ranks = tb_setup(
            os.path.join('specs/ranks/', dataset, args.network))
        summary_embeddings = tb_setup(
            os.path.join('specs/embeddings/', dataset, args.network))
        start = time.time()

        print('')
        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
        try:
            bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]
        except:
            bbxs = None  # for holidaysmanrot and copydays

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net,
                               images,
                               args.image_size,
                               transform,
                               ms=ms,
                               mode='test')
        vecs = vecs.numpy()

        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net,
                                qimages,
                                args.image_size,
                                transform,
                                bbxs=bbxs,
                                ms=ms,
                                mode='test')
        qvecs = qvecs.numpy()

        print('>> {}: Evaluating...'.format(dataset))

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        compute_map_and_print(dataset, ranks, cfg['gnd'])

        print('')

        # plot retrieval rankings and save to tensorboard summary
        for protocol in ['easy', 'medium', 'hard']:
            plot_ranks(qimages, images, ranks, cfg['gnd'], bbxs, summary_ranks,
                       dataset, 'solar-best: ', 20, protocol)

        print('')

        # plot embeddings for cluster visualisation in tensorboard/projector
        plot_embeddings(images,
                        vecs,
                        summary_embeddings,
                        imsize=64,
                        sample_freq=1)

        print('')
        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))
Esempio n. 9
0
def main():
    args = parser.parse_args()

    # check if there are unknown datasets
    for dataset in args.datasets.split(','):
        if dataset not in datasets_names:
            raise ValueError(
                'Unsupported or unknown dataset: {}!'.format(dataset))

    # check if test dataset are downloaded
    # and download if they are not
    download_test(get_data_root())

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network
    net = load_network(network_name=args.network)
    net.mode = 'test'

    print(">>>> loaded network: ")
    print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))

    print(">>>> Evaluating scales: {}".format(ms))

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # evaluate on test datasets
    datasets = args.datasets.split(',')
    for dataset in datasets:
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
        try:
            bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]
        except:
            bbxs = None  # for holidaysmanrot and copydays

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net,
                               images,
                               args.image_size,
                               transform,
                               ms=ms,
                               mode='test')

        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net,
                                qimages,
                                args.image_size,
                                transform,
                                bbxs=bbxs,
                                ms=ms,
                                mode='test')
        qvecs = qvecs.numpy()

        print('>> {}: Evaluating...'.format(dataset))

        vecs_1m = torch.load(args.network + '_vecs_' + 'revisitop1m' + '.pt')
        vecs = torch.cat([vecs, vecs_1m], dim=1)
        vecs = vecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        compute_map_and_print(dataset, ranks, cfg['gnd'])

        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))