コード例 #1
0
def testmodel(mymodel, args, cuda_gpu):
    mytraindata = myDataset(path=args.json_file,
                            height=args.height,
                            width=args.width,
                            autoaugment=args.autoaugment)
    mytrainloader = torch.utils.data.DataLoader(mytraindata,
                                                batch_size=1,
                                                shuffle=False)
    gnd = loadquery(args.valdata_dir)

    mymodel.eval()
    with torch.no_grad():
        print('>> Extracting descriptors for query images...')
        qloader = torch.utils.data.DataLoader(ImagesFromList(
            root='',
            images=[i['queryimgid'] for i in gnd],
            imsize=mytraindata.height,
            transform=mytraindata.transform),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=0,
                                              pin_memory=True)
        # qoolvecs = torch.zeros(args.classnum, len(gnd)).cuda()
        qoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(gnd)).cuda()
        lenq = len(qloader)
        for i, input in enumerate(qloader):
            out = mymodel(input.cuda())
            qoolvecs[:, i] = out.data.squeeze()
            if (i + 1) % 10 == 0:
                print('\r>>>> {}/{} done...'.format(i + 1, lenq), end='')
        print('')

        poolvecs = torch.zeros(OUTPUT_DIM[args.backbone],
                               len(mytrainloader)).cuda()
        idlist = []
        print('>> Extracting descriptors for database images...')
        for index, data in enumerate(mytrainloader):
            batch_x, batch_y, batch_id = data
            idlist.append(batch_id[0])
            if cuda_gpu:
                batch_x = batch_x.cuda()

            batch_x = batch_x.float()
            # batch_x, batch_y = Variable(batch_x), Variable(batch_y)
            out = mymodel(batch_x)
            poolvecs[:, index] = out
            if (index + 1) % 10 == 0:
                print('\r>>>> {}/{} done...'.format(index + 1,
                                                    len(mytrainloader)),
                      end='')

        vecs = poolvecs.cpu().numpy()
        qvecs = qoolvecs.cpu().numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        dataset = args.json_file.split('/')[-1].replace("all.json", "")
        compute_map_and_print(dataset, ranks, gnd, idlist)
コード例 #2
0
def extract_local_vectors(net,
                          images,
                          image_size,
                          transform,
                          bbxs=None,
                          ms=[1],
                          msp=1,
                          print_freq=10):
    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # creating dataset loader
    loader = torch.utils.data.DataLoader(ImagesFromList(root='',
                                                        images=images,
                                                        imsize=image_size,
                                                        bbxs=bbxs,
                                                        transform=transform),
                                         batch_size=1,
                                         shuffle=False,
                                         num_workers=8,
                                         pin_memory=True)

    # extracting vectors
    with torch.no_grad():
        vecs = []
        for i, input in enumerate(loader):
            input = input.cuda()

            if len(ms) == 1:
                vecs.append(extract_ssl(net, input))
            else:
                # TODO: not implemented yet
                # vecs.append(extract_msl(net, input, ms, msp))
                raise NotImplementedError

            if (i + 1) % print_freq == 0 or (i + 1) == len(images):
                print('\r>>>> {}/{} done...'.format((i + 1), len(images)),
                      end='')
        print('')

    return vecs
コード例 #3
0
def extract_vectors(net,
                    images,
                    image_size,
                    transform,
                    bbxs=None,
                    ms=[1],
                    msp=1,
                    print_freq=10):
    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # creating dataset loader
    loader = torch.utils.data.DataLoader(ImagesFromList(root='',
                                                        images=images,
                                                        imsize=image_size,
                                                        bbxs=bbxs,
                                                        transform=transform),
                                         batch_size=1,
                                         shuffle=False)

    # extracting vectors
    with torch.no_grad():
        vecs = torch.zeros(net.meta['outputdim'], len(images))
        for i, input in enumerate(loader):
            input = input.cuda()

            if len(ms) == 1 and ms[0] == 1:
                vecs[:, i] = extract_ss(net, input)
            else:
                vecs[:, i] = extract_ms(net, input, ms, msp)

            if (i + 1) % print_freq == 0 or (i + 1) == len(images):
                print('\r>>>> {}/{} done...'.format((i + 1), len(images)),
                      end='')
        print('')

    return vecs
コード例 #4
0
    def forward(self, x, need_feature=True):

        for name, module in self._baselayer._modules.items():
            x = module(x)
            self.features[name] = x

        x = self.pool(x)
        self.features['pool'] = x
        x = self.Flatten(x)
        x = self.embedding_linear(x)

        return x


if __name__ == '__main__':
    qloader = torch.utils.data.DataLoader(ImagesFromList(
        root='', images=['/home/shibaorong/modelTorch/test.jpg'], imsize=224),
                                          batch_size=1,
                                          shuffle=False,
                                          num_workers=0,
                                          pin_memory=True)
    kargs = {}
    kargs['name'] = 'resnet50'
    kargs['num_classes'] = 11
    initmodel = networks_map['resnet50'](pretrained=True)
    re = Delg(initmodel, **kargs)

    for input in qloader:
        out = re(input)
        print(out)
コード例 #5
0
    def create_triplet_classbased_2(self, mymodel, args):
        print('create_triplet_classed_based_2....................')

        #candidatesindexs = random.sample([i for i in range(len(self.filenames))], int(len(self.filenames) / args.classnum))
        candidatesindexs = self.sample(args.classnum)

        vecs = torch.zeros([OUTPUT_DIM[args.backbone], len(self.filenames)])
        tripletlist = []

        mymodel.eval()
        with torch.no_grad():
            print('>> Extracting descriptors for query images...')
            qloader = torch.utils.data.DataLoader(ImagesFromList(
                root='',
                images=self.filenames,
                imsize=self.height,
                transform=self.transform),
                                                  batch_size=1,
                                                  shuffle=False,
                                                  num_workers=80,
                                                  pin_memory=True)
            for i, input in enumerate(qloader):
                out = mymodel(input.cuda())
                if isinstance(out, dict):
                    out = out['feature']
                vecs[:, i] = out
                if (i + 1) % 10 == 0:
                    print('\r>>>> {}/{} done...'.format(
                        i + 1, len(self.filenames)),
                          end='')

        qvecs = vecs[:, candidatesindexs]
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)

        for i in range(len(candidatesindexs)):

            qlabel = self.labels[candidatesindexs[i]]
            aimg = self.filenames[candidatesindexs[i]]

            rank = ranks[:, i]
            mark = False
            for indj in range(len(self.filenames)):
                if indj > self.qscale:
                    continue
                j = rank[indj]
                rlabel = self.labels[j]

                if qlabel == rlabel:
                    if mark:
                        pimg = self.filenames[j]
                        nimg = self.filenames[rank[indj - 1]]
                        nlabel = self.labels[rank[indj - 1]]
                        tripletlist.append([(aimg, rlabel), (pimg, rlabel),
                                            (nimg, nlabel)])
                    mark = False

                else:
                    mark = True

            self.triplet_pool = tripletlist
コード例 #6
0
def testmodel(args, cuda_gpu, type='extractor', similartype='dot'):
    mymodel = builGraph.getModel(args.backbone,
                                 args.classnum,
                                 args.gpu,
                                 type,
                                 cuda_gpu=cuda_gpu,
                                 pretrained=False)
    if os.path.exists(args.train_dir):
        print(args.train_dir)
        checkpoint = torch.load(args.train_dir, map_location='cpu')
        mymodel.load_state_dict(checkpoint['model_state_dict'])
    print(similartype)
    mytraindata = myDataset(path=args.json_file,
                            height=args.height,
                            width=args.width,
                            autoaugment=args.autoaugment)
    mytrainloader = torch.utils.data.DataLoader(mytraindata,
                                                batch_size=args.batch_size,
                                                num_workers=10,
                                                shuffle=False)

    gnd = parseeye(args.json_file, args.valdata_dir)
    #gnd=random.sample(gnd,50)
    mymodel.eval()
    with torch.no_grad():
        print('>> Extracting descriptors for query images...')
        qloader = torch.utils.data.DataLoader(ImagesFromList(
            root='',
            images=[i['queryimgid'] for i in gnd],
            imsize=mytraindata.height,
            transform=mytraindata.transform),
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=0,
                                              pin_memory=True)
        if type == 'base':
            qoolvecs = torch.zeros(args.classnum, len(gnd)).cuda()
        elif type == 'extractor':
            qoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(gnd)).cuda()
        lenq = len(qloader)
        train_acc = 0.
        for i, input in enumerate(qloader):
            out = mymodel(input.cuda())
            if isinstance(out, list):
                out = torch.cat(out, dim=0)

            qoolvecs[:, i] = out.data.squeeze()
            prediction = torch.argmax(out, 1)

            if (i + 1) % 10 == 0:
                print('\r>>>> {}/{} done...'.format(i + 1, lenq), end='')
        print('')

        if type == 'extractor':
            poolvecs = torch.zeros(OUTPUT_DIM[args.backbone],
                                   len(mytrainloader)).cuda()
        elif type == 'base':
            poolvecs = torch.zeros(args.classnum, len(mytrainloader)).cuda()
        idlist = []
        train_acc = 0
        print('>> Extracting descriptors for database images...')
        for index, data in enumerate(mytrainloader):
            batch_x, batch_y, batch_id = data
            idlist.append(batch_id[0])
            if cuda_gpu:
                batch_x = batch_x.cuda()
                batch_y = batch_y.cuda()

            batch_x = batch_x.float()
            # batch_x, batch_y = Variable(batch_x), Variable(batch_y)
            out = mymodel(batch_x)
            if isinstance(out, list):
                out = torch.cat(out, dim=0)

            prediction = torch.argmax(out, 1)
            train_acc += (prediction == batch_y).sum().float()
            acc = train_acc / len(batch_x)

            poolvecs[:, index] = out
            if (index + 1) % 10 == 0:
                print('\r>>>> {}/{} done...'.format(index + 1,
                                                    len(mytrainloader)),
                      end='')

        vecs = poolvecs.cpu().numpy()
        qvecs = qoolvecs.cpu().numpy()

        # search, rank, and print
        #scores = np.dot(vecs.T, qvecs)
        #ranks = np.argsort(-scores, axis=0)
        if similartype == 'dot':
            scores = np.dot(vecs.T, qvecs)
            ranks = np.argsort(-scores, axis=0)
        elif similartype == 'euclidean':
            dis = np.zeros([vecs.shape[1], qvecs.shape[1]])

            for j in range(qvecs.shape[1]):
                d = (vecs - np.reshape(qvecs[:, j], (qvecs.shape[0], 1)))**2
                disj = np.sum(d, axis=0)
                dis[:, j] = disj
            ranks = np.argsort(dis, axis=0)
        #compute_map_and_print(dataset, ranks, gnd, idlist)
        '''scale = [5,10,20,30,40,50,60]
        reranks = ranks
        for s in scale:
            rerankvec = np.zeros(qvecs.shape)

            for i in range(qvecs.shape[1]):
                features = np.asarray([vecs[:, j] for j in reranks[:s, i]])
                rerankvec[:, i] = np.average(features, axis=0)
            scores = np.dot(vecs.T, rerankvec) + scores
            reranks = np.argsort(-scores, axis=0)

        ranks=reranks'''
        print('RQE.....................')
        map = 0.
        mrr = 0.
        nq = len(gnd)  # number of queries
        aps = np.zeros(nq)
        nempty = 0

        for i in np.arange(nq):
            qgnd = np.array(gnd[i]['ok'])

            # no positive images, skip from the average
            if qgnd.shape[0] == 0:
                aps[i] = float('nan')
                nempty += 1
                continue

            try:
                qgndj = np.array(gnd[i]['junk'])
            except:
                qgndj = np.empty(0)
            r = [idlist[j] for j in ranks[:, i]]
            # sorted positions of positive and junk images (0 based)
            pos = np.arange(ranks.shape[0])[np.in1d(r, qgnd)]
            junk = np.arange(ranks.shape[0])[np.in1d(r, qgndj)]

            k = 0
            ij = 0
            if len(junk):
                # decrease positions of positives based on the number of
                # junk images appearing before them
                ip = 0
                while (ip < len(pos)):
                    while (ij < len(junk) and pos[ip] > junk[ij]):
                        k += 1
                        ij += 1
                    pos[ip] = pos[ip] - k
                    ip += 1

            # compute ap
            ap = compute_ap(pos, len(qgnd))
            mr = 1 / (pos[0] + 1)
            map = map + ap
            mrr = mrr + mr
            aps[i] = ap

            # compute precision @ k
            pos += 1  # get it to 1-based

        map = map / (nq - nempty)
        mrr = mrr / (nq - nempty)
        print(type)
        print('>> {}: mAP {:.2f}'.format('eye', np.around(map * 100,
                                                          decimals=2)))
        print('>> {}: MRR {:.2f}'.format('eye', np.around(mrr * 100,
                                                          decimals=2)))
        return map, mrr
コード例 #7
0
  def create_triplet(self,mymodel,featuredim=2048):
    choose_labels=random.sample(self.labels,self.K)
    candidates=[]
    for label in choose_labels:
      for name in self.datapool[label]:
        candidates.append([name,label])
    #candidates=random.sample(candidates,self.mining_batch_size)

    random.shuffle(candidates)
    #vecs=torch.zeros([featuredim,self.mining_batch_size])
    vecs = torch.zeros([featuredim, len(candidates)])
    tripletlist=[]

    mymodel.eval()
    with torch.no_grad():
      print('>> Extracting descriptors for query images...')
      qloader = torch.utils.data.DataLoader(
        ImagesFromList(root='', images=[i[0] for i in candidates], imsize=self.height,
                       transform=self.transform),
        batch_size=1, shuffle=False, num_workers=0, pin_memory=True
      )
      for i, input in enumerate(qloader):
        out = mymodel(input.cuda())
        vecs[:, i] = out
        if (i + 1) % 10 == 0:
          print('\r>>>> {}/{} done...'.format(i + 1, len(candidates)), end='')

    qindexs = random.sample([i for i in range(len(candidates))], self.mining_batch_size)
    qvecs = vecs[:, qindexs]
    scores = np.dot(vecs.T, qvecs)
    ranks = np.argsort(-scores, axis=0)

    for i in range(ranks.shape[1]):

      qlabel=candidates[qindexs[i]][1]
      aimg=candidates[qindexs[i]][0]
      pimg=None
      nimg=None
      pmark=0
      nmark=0
      rank=ranks[:,i]
      mark = False
      for indj in range(len(candidates)):
        if indj>self.T:
          continue
        j=rank[indj]
        rlabel=candidates[j][1]

        if qlabel==rlabel:
          if mark:
            pimg = candidates[j][0]
            nimg=candidates[rank[indj-1]][0]
            tripletlist.append((aimg, pimg, nimg))
          mark=False
        else:
          mark=True
        '''if (rlabel!=qlabel) and (nimg is None):
          nimg=candidates[j][0]
          nmark=indj
          if indj>2:
            pimg=candidates[rank[indj-1]][0]
            pmark=indj

        elif (rlabel==qlabel) and (nimg is not None):
          pimg=candidates[j][0]
          pmark=indj
        if pimg is not None and nimg is not None and pmark-nmark<self.T:
          tripletlist.append((aimg,pimg,nimg))
          break'''


    self.triplets=tripletlist
    '''if len(self.triplets)==0:
コード例 #8
0
  def create_epoch_tuples(self, net):

    print('>> Creating tuples for an epoch of {}-{}...'.format(self.name, self.mode))
    print(">>>> used network: ")
    print(net.meta_repr())

    ## ------------------------
    ## SELECTING POSITIVE PAIRS
    ## ------------------------

    # draw qsize random queries for tuples
    idxs2qpool = torch.randperm(len(self.qpool))[:self.qsize]
    self.qidxs = [self.qpool[i] for i in idxs2qpool]
    self.pidxs = [self.ppool[i] for i in idxs2qpool]

    ## ------------------------
    ## SELECTING NEGATIVE PAIRS
    ## ------------------------

    # if nnum = 0 create dummy nidxs
    # useful when only positives used for training
    if self.nnum == 0:
      self.nidxs = [[] for _ in range(len(self.qidxs))]
      return 0

    # draw poolsize random images for pool of negatives images
    idxs2images = torch.randperm(len(self.images))[:self.poolsize]

    # prepare network
    net.cuda()
    net.eval()

    # no gradients computed, to reduce memory and increase speed
    with torch.no_grad():

      print('>> Extracting descriptors for query images...')
      # prepare query loader
      loader = torch.utils.data.DataLoader(
        ImagesFromList(root='', images=[self.images[i] for i in self.qidxs], imsize=self.imsize,
                       transform=self.transform),
        batch_size=1, shuffle=False, num_workers=8, pin_memory=True
      )
      # extract query vectors
      qvecs = torch.zeros(net.meta['outputdim'], len(self.qidxs)).cuda()
      for i, input in enumerate(loader):
        qvecs[:, i] = net(input.cuda()).data.squeeze()
        if (i + 1) % self.print_freq == 0 or (i + 1) == len(self.qidxs):
          print('\r>>>> {}/{} done...'.format(i + 1, len(self.qidxs)), end='')
      print('')

      print('>> Extracting descriptors for negative pool...')
      # prepare negative pool data loader
      loader = torch.utils.data.DataLoader(
        ImagesFromList(root='', images=[self.images[i] for i in idxs2images], imsize=self.imsize,
                       transform=self.transform),
        batch_size=1, shuffle=False, num_workers=8, pin_memory=True
      )
      # extract negative pool vectors
      poolvecs = torch.zeros(net.meta['outputdim'], len(idxs2images)).cuda()
      for i, input in enumerate(loader):
        poolvecs[:, i] = net(input.cuda()).data.squeeze()
        if (i + 1) % self.print_freq == 0 or (i + 1) == len(idxs2images):
          print('\r>>>> {}/{} done...'.format(i + 1, len(idxs2images)), end='')
      print('')

      print('>> Searching for hard negatives...')
      # compute dot product scores and ranks on GPU
      scores = torch.mm(poolvecs.t(), qvecs)
      scores, ranks = torch.sort(scores, dim=0, descending=True)
      avg_ndist = torch.tensor(0).float().cuda()  # for statistics
      n_ndist = torch.tensor(0).float().cuda()  # for statistics
      # selection of negative examples
      self.nidxs = []
      for q in range(len(self.qidxs)):
        # do not use query cluster,
        # those images are potentially positive
        qcluster = self.clusters[self.qidxs[q]]
        clusters = [qcluster]
        nidxs = []
        r = 0
        while len(nidxs) < self.nnum:
          potential = idxs2images[ranks[r, q]]
          # take at most one image from the same cluster
          if not self.clusters[potential] in clusters:
            nidxs.append(potential)
            clusters.append(self.clusters[potential])
            avg_ndist += torch.pow(qvecs[:, q] - poolvecs[:, ranks[r, q]] + 1e-6, 2).sum(dim=0).sqrt()
            n_ndist += 1
          r += 1
        self.nidxs.append(nidxs)
      print('>>>> Average negative l2-distance: {:.2f}'.format(avg_ndist / n_ndist))
      print('>>>> Done')

    return (avg_ndist / n_ndist).item()  # return average negative l2-distance
コード例 #9
0
    def create_epoch(self, branch_c, branch_p):
        querylen = int(len(self.c) * 2 / 3)
        candidatesindexs = random.sample([i for i in range(len(self.c))],
                                         querylen)
        qvecs = torch.zeros([2048, querylen])
        vecs = torch.zeros([2048, len(self.p)])

        with torch.no_grad():
            print('>> Extracting descriptors for query images...')
            qloader = torch.utils.data.DataLoader(ImagesFromList(
                root='',
                images=[self.c[i][1] for i in candidatesindexs],
                imsize=self.imsize,
                transform=self.transform),
                                                  batch_size=1,
                                                  shuffle=False,
                                                  num_workers=0,
                                                  pin_memory=True)
            for i, input in enumerate(qloader):
                out = branch_c(input.cuda())
                if isinstance(out, dict):
                    out = out['feature']
                qvecs[:, i] = out
                if (i + 1) % 10 == 0:
                    print('\r>>>> {}/{} done...'.format(i + 1, querylen),
                          end='')

            print('>> Extracting descriptors for gallery images...')
            qloader = torch.utils.data.DataLoader(ImagesFromList(
                root='',
                images=[i[1] for i in self.p],
                imsize=self.imsize,
                transform=self.transform),
                                                  batch_size=1,
                                                  shuffle=False,
                                                  num_workers=0,
                                                  pin_memory=True)
            for i, input in enumerate(qloader):
                out = branch_p(input.cuda())
                if isinstance(out, dict):
                    out = out['feature']
                vecs[:, i] = out
                if (i + 1) % 10 == 0:
                    print('\r>>>> {}/{} done...'.format(i + 1, len(self.p)),
                          end='')

        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        tripletlist = []

        for i in range(len(candidatesindexs)):

            qlabel = self.c[candidatesindexs[i]][0]
            aimg = self.c[candidatesindexs[i]][1]
            pimg = None
            nimg = None

            rank = ranks[:, i]
            for indj in range(len(self.p)):
                if indj > self.qscale:
                    continue
                j = rank[indj]
                rlabel = self.p[j][0]
                if (rlabel != qlabel) and (nimg is None):
                    nimg = self.p[j][1]
                    nlabel = rlabel
                    if indj > 2:
                        pimg = self.p[rank[indj - 1]][1]
                        plabel = self.p[rank[indj - 1]][0]
                elif (rlabel == qlabel) and (nimg is not None):
                    pimg = self.p[j][1]
                    plabel = self.p[j][0]

                if pimg is not None and nimg is not None:
                    tripletlist.append([(aimg, qlabel), (pimg, plabel),
                                        (nimg, nlabel)])
                    break

        random.shuffle(tripletlist)
        self.triplet_pool = tripletlist
コード例 #10
0
def testmodel(args, cuda_gpu, type='multitrain', similartype='dot'):
    res = {}
    mymodel = builGraph.getModel(args.backbone,
                                 args.classnum,
                                 args.gpu,
                                 type,
                                 cuda_gpu=cuda_gpu,
                                 pretrained=False)
    if os.path.exists(args.train_dir):
        print(args.train_dir)
        checkpoint = torch.load(args.train_dir, map_location='cpu')
        mymodel.load_state_dict(checkpoint['model_state_dict'])
    print(similartype)
    mytraindata = myDataset(path=args.json_file,
                            height=args.height,
                            width=args.width,
                            autoaugment=args.autoaugment)
    mytrainloader = torch.utils.data.DataLoader(mytraindata,
                                                batch_size=args.batch_size,
                                                num_workers=50,
                                                shuffle=False)

    gnd = parseeye(args.json_file, args.valdata_dir)

    mymodel.eval()
    with torch.no_grad():
        print('>> Extracting descriptors for query images...')
        qloader = torch.utils.data.DataLoader(ImagesFromList(
            root='',
            images=[i['queryimgid'] for i in gnd],
            imsize=mytraindata.height,
            transform=mytraindata.transform),
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=50,
                                              pin_memory=True)

        cqoolvecs = torch.zeros(args.classnum, len(gnd)).cuda()
        eqoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(gnd)).cuda()
        iqoolvecs = torch.zeros(OUTPUT_DIM[args.backbone], len(gnd)).cuda()

        lenq = len(qloader)
        for i, input in enumerate(qloader):
            out = mymodel(input.cuda())
            if isinstance(out, list):
                out = torch.cat(out, dim=0)

            cqoolvecs[:, i] = out['out']
            eqoolvecs[:, i] = out['feature']
            iqoolvecs[:, i] = out['intersect']
            if (i + 1) % 10 == 0:
                print('\r>>>> {}/{} done...'.format(i + 1, lenq), end='')
        print('')

        epoolvecs = torch.zeros(OUTPUT_DIM[args.backbone],
                                len(mytrainloader)).cuda()
        ipoolvecs = torch.zeros(OUTPUT_DIM[args.backbone],
                                len(mytrainloader)).cuda()
        cpoolvecs = torch.zeros(args.classnum, len(mytrainloader)).cuda()

        idlist = []
        print('>> Extracting descriptors for database images...')
        for index, data in enumerate(mytrainloader):
            batch_x, batch_y, batch_id = data
            idlist.append(batch_id[0])
            if cuda_gpu:
                batch_x = batch_x.cuda()

            batch_x = batch_x.float()
            # batch_x, batch_y = Variable(batch_x), Variable(batch_y)
            out = mymodel(batch_x)
            if isinstance(out, list):
                out = torch.cat(out, dim=0)
            cpoolvecs[:, index] = out['out']
            epoolvecs[:, index] = out['feature']
            ipoolvecs[:, index] = out['intersect']
            if (index + 1) % 10 == 0:
                print('\r>>>> {}/{} done...'.format(index + 1,
                                                    len(mytrainloader)),
                      end='')

        cvecs = cpoolvecs.cpu().numpy()
        evecs = epoolvecs.cpu().numpy()
        ivecs = ipoolvecs.cpu().numpy()
        cqvecs = cqoolvecs.cpu().numpy()
        eqvecs = eqoolvecs.cpu().numpy()
        iqvecs = iqoolvecs.cpu().numpy()

        # search, rank, and print
        #scores = np.dot(vecs.T, qvecs)
        #ranks = np.argsort(-scores, axis=0)

        cscores = np.dot(cvecs.T, cqvecs)
        cranks = np.argsort(-cscores, axis=0)

        escores = np.dot(evecs.T, eqvecs)
        eranks = np.argsort(-escores, axis=0)

        iscores = np.dot(ivecs.T, iqvecs)
        iranks = np.argsort(-iscores, axis=0)
        '''cscores = torch.mm(cpoolvecs.t(), cqoolvecs)
        cranks = torch.argsort(-cscores, axis=0).cpu().numpy()

        escores = torch.mm(epoolvecs.t(), eqoolvecs)
        eranks = torch.argsort(-escores, axis=0).cpu().numpy()

        iscores = torch.mm(ipoolvecs.t(), iqoolvecs)
        iranks = torch.argsort(-iscores, axis=0).cpu().numpy()'''

        rrank = [cranks, eranks, iranks]

        for index, ranks in enumerate(rrank):
            if index == 0:
                print('base................')
            elif index == 1:
                print('extractor.....................')
            else:
                print('intersect......................')

            map = 0.
            mrr = 0.
            nq = len(gnd)  # number of queries
            aps = np.zeros(nq)
            nempty = 0

            for i in np.arange(nq):
                qgnd = np.array(gnd[i]['ok'])

                # no positive images, skip from the average
                if qgnd.shape[0] == 0:
                    aps[i] = float('nan')
                    nempty += 1
                    continue

                try:
                    qgndj = np.array(gnd[i]['junk'])
                except:
                    qgndj = np.empty(0)
                r = [idlist[j] for j in ranks[:, i]]
                # sorted positions of positive and junk images (0 based)
                pos = np.arange(ranks.shape[0])[np.in1d(r, qgnd)]
                junk = np.arange(ranks.shape[0])[np.in1d(r, qgndj)]

                k = 0
                ij = 0
                if len(junk):
                    # decrease positions of positives based on the number of
                    # junk images appearing before them
                    ip = 0
                    while (ip < len(pos)):
                        while (ij < len(junk) and pos[ip] > junk[ij]):
                            k += 1
                            ij += 1
                        pos[ip] = pos[ip] - k
                        ip += 1

                # compute ap
                ap = compute_ap(pos, len(qgnd))
                mr = 1 / (pos[0] + 1)
                map = map + ap
                mrr = mrr + mr
                aps[i] = ap

                # compute precision @ k
                pos += 1  # get it to 1-based

            map = map / (nq - nempty)
            mrr = mrr / (nq - nempty)
            print(type)
            print('>> {}: mAP {:.2f}'.format('eye',
                                             np.around(map * 100, decimals=2)))
            print('>> {}: MRR {:.2f}'.format('eye',
                                             np.around(mrr * 100, decimals=2)))

            res[mmap[index]] = {'MAP': map, 'MRR': mrr}

        return res
コード例 #11
0
def testTriplet(params, transform):
    mytraindata = OnlineTripletData(path=params['data_dir'],
                                    autoaugment=params['autoaugment'],
                                    outputdim=params['class_num'],
                                    imsize=params['height'],
                                    transform=transform)
    cuda_gpu = torch.cuda.is_available()
    miningmodel = builGraph.getModel(params['modelName'],
                                     params['class_num'],
                                     params['Gpu'],
                                     'triplet',
                                     cuda_gpu=cuda_gpu)
    gnd = loadquery(params['valdata_dir'])

    if os.path.exists(params['train_dir']):
        checkpoint = torch.load(params['train_dir'])
        miningmodel.load_state_dict(checkpoint['model_state_dict'])

    miningmodel.eval()

    with torch.no_grad():
        print('>> Extracting descriptors for query images...')
        qloader = torch.utils.data.DataLoader(ImagesFromList(
            root='',
            images=[i['queryimgid'] for i in gnd],
            imsize=mytraindata.imsize,
            transform=mytraindata.transform),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=0,
                                              pin_memory=True)
        qoolvecs = torch.zeros(params['class_num'], len(gnd)).cuda()
        for i, input in enumerate(qloader):
            out, _ = miningmodel(input.cuda())
            qoolvecs[:, i] = out.data.squeeze()
            if (i + 1) % mytraindata.print_freq == 0 or (
                    i + 1) == mytraindata.qsize:
                print('\r>>>> {}/{} done...'.format(i + 1, mytraindata.qsize),
                      end='')
        print('')

        print('>> Extracting descriptors for data images...')
        dloader = torch.utils.data.DataLoader(ImagesFromList(
            root='',
            images=[i['filenames'] for i in mytraindata.data],
            imsize=mytraindata.imsize,
            transform=mytraindata.transform),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=0,
                                              pin_memory=True)
        poolvecs = torch.zeros(params['class_num'],
                               len(mytraindata.data)).cuda()
        idlist = [i['filenames'] for i in mytraindata.data]
        for i, input in enumerate(dloader):
            out, _ = miningmodel(input.cuda())
            poolvecs[:, i] = out.data.squeeze()
            if (i + 1) % mytraindata.print_freq == 0 or (
                    i + 1) == mytraindata.qsize:
                print('\r>>>> {}/{} done...'.format(i + 1, mytraindata.qsize),
                      end='')
        print('')

        vecs = poolvecs.cpu().numpy()
        qvecs = qoolvecs.cpu().numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        dataset = params['data_dir'].split('/')[-1].replace("train.json", "")
        compute_map_and_print(dataset, ranks, gnd, idlist)
コード例 #12
0
def testOnlinepair(params, transform):
    mytraindata = myDataset(path=params['data_dir'],
                            height=params['height'],
                            width=params['width'],
                            autoaugment=params['autoaugment'],
                            transform=transform)
    mytrainloader = torch.utils.data.DataLoader(mytraindata,
                                                batch_size=1,
                                                shuffle=False)
    gnd = loadquery(params['valdata_dir'])
    cuda_gpu = torch.cuda.is_available()
    '''mymodel = builGraph.getModel(params['modelName'], params['class_num'], params['Gpu'],
                                 params['model_type'],cuda_gpu=cuda_gpu)'''
    mymodel = builGraph.getModel(params['modelName'],
                                 params['class_num'],
                                 params['Gpu'],
                                 'triplet',
                                 cuda_gpu=cuda_gpu)

    if os.path.exists(params['train_dir']):
        checkpoint = torch.load(params['train_dir'])
        mymodel.load_state_dict(checkpoint['model_state_dict'])

    mymodel.eval()
    with torch.no_grad():
        print('>> Extracting descriptors for query images...')
        qloader = torch.utils.data.DataLoader(ImagesFromList(
            root='',
            images=[i['queryimgid'] for i in gnd],
            imsize=mytraindata.height,
            transform=mytraindata.transform),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=0,
                                              pin_memory=True)
        qoolvecs = torch.zeros(params['class_num'], len(gnd)).cuda()
        lenq = len(qloader)
        for i, input in enumerate(qloader):
            out, _, _ = mymodel(input.cuda(), input.cuda(), input.cuda())
            qoolvecs[:, i] = out[0].data.squeeze()
            if (i + 1) % 10 == 0:
                print('\r>>>> {}/{} done...'.format(i + 1, lenq), end='')
        print('')

        poolvecs = torch.zeros(params['class_num'], len(mytrainloader)).cuda()
        idlist = []
        for index, data in enumerate(mytrainloader):
            batch_x, batch_y, batch_id = data
            idlist.append(batch_id[0])
            if cuda_gpu:
                batch_x = batch_x.cuda()
                batch_y = batch_y.cuda()
            batch_x = batch_x.float()
            # batch_x, batch_y = Variable(batch_x), Variable(batch_y)
            out, _, _ = mymodel(batch_x, batch_x, batch_x)
            poolvecs[:, index] = out[0]

        vecs = poolvecs.cpu().numpy()
        qvecs = qoolvecs.cpu().numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        dataset = params['data_dir'].split('/')[-1].replace("train.json", "")
        compute_map_and_print(dataset, ranks, gnd, idlist)
    '''relu_ip1_list = []