Exemplo n.º 1
0
    def on_epoch_end(self, epoch, logs=None):
        """Compute the MAP of the validation set predictions."""
        y_true, y_pred = self.predict()
        map_k = evaluation.compute_map(y_true, y_pred, self.k)
        map_k_min = min(evaluation.compute_map(y_true, y_pred, self.k, True))

        # Log the computed value
        logs = logs or {}
        logs['val_map'] = map_k
        logs['val_map_min'] = map_k_min
Exemplo n.º 2
0
    def get_mAP(self):
        a = 0
        if self.cat_flag == False:
            t = time.time()
            print('Runing per image evaluation........')
            for i in range(len(self.pred_boxes)):
                try:
                    per_image = my_eval.compute_map(
                        self.gt_boxes[i + 1],
                        self.gt_class_ids[i + 1],
                        self.pred_boxes[i + 1],
                        self.pred_class_ids[i + 1],
                        self.pred_scores[i + 1],
                        iou_threshold=self.iou_threshold)
                except:
                    continue

                b = per_image[0]
                F = per_image[1]
                self.eval['FP_bg'] += F['FP_bg']
                self.eval['FP_cls'] += F['FP_cls']
                self.eval['FN'] += F['FN']
                # if b < 0.8:
                #     print("Image:%d hasn't been fed up =.=" % i)
                a += b
            print('Accumulating evaluation results........')
            a = a / len(self.pred_boxes)
            self.eval['mAP'] = a
            print('Done (t=%fs).' % (time.time() - t))
            print(
                "Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=all ] = %f"
                % a)
        else:
            IoU = np.arange(0.5, 1, 0.05)
            AP = np.zeros([10])
            t = time.time()
            print('Runing per class evaluation........')
            for i in range(10):
                try:
                    AP[i] = my_eval.compute_map_cat(self.gt_boxes_cat,
                                                    self.pred_boxes_cat,
                                                    self.pred_scores_cat,
                                                    self.cat_num, IoU[i])
                except:
                    print('AP[%d] computation fails' % i)
            print('Accumulating evaluation results........')
            print('Done (t=%fs).' % (time.time() - t))
            mAP = np.mean(AP)
            self.eval['mAP'] = mAP
            self.eval['mAP50'] = AP[0]
            self.eval['mAP75'] = AP[5]
            print(
                'Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=all ] = %f'
                % mAP)
            print(
                'Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=all ] = %f'
                % AP[0])
            print(
                'Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=all ] = %f'
                % AP[5])
Exemplo n.º 3
0
    def on_epoch_end(self, epoch, logs=None):
        """Compute the MAP of the validation set predictions."""
        x, y_true = self.validation_data[:2]
        y_pred = self.model.predict(x)
        map_k = evaluation.compute_map(y_true, y_pred, self.k)

        # Log the computed value
        logs = logs or {}
        logs['val_map'] = map_k
Exemplo n.º 4
0
 def err_analysis(self):  # unfinished
     print('Runing error analysis......')
     t = time.time()
     if self.cat_flag == True:
         self.fetch_data()
     for i in range(len(self.pred_boxes)):
         per_image = my_eval.compute_map(self.gt_boxes[i + 1],
                                         self.gt_class_ids[i + 1],
                                         self.pred_boxes[i + 1],
                                         self.pred_class_ids[i + 1],
                                         self.pred_scores[i + 1],
                                         iou_threshold=self.iou_threshold)
         b = per_image[0]
         F = per_image[1]
         self.eval['FP_bg'] += F['FP_bg']
         self.eval['FP_cls'] += F['FP_cls']
         self.eval['FN'] += F['FN']
     print('Analysis finished (t=%fs)' % (time.time() - t))
     print('Attention : threshold was designed as : %f' % self.threshold)
     print('Number of FP_cls : %d' % self.eval['FP_cls'])
     print('Number of FP_bg : %d' % self.eval['FP_bg'])
     print('Number of FN : %d' % self.eval['FN'])
def validate(sketch_dataloader, shape_dataloader, model, criterion, epoch, opt):

    """
    test for one epoch on the testing set
    """
    sketch_losses = utils.AverageMeter()
    sketch_top1 = utils.AverageMeter()

    shape_losses = utils.AverageMeter()
    shape_top1 = utils.AverageMeter()

    net_whole, net_bp, net_vp, net_ap, net_cls = model
    # optim_sketch, optim_shape, optim_centers = optimizer
    crt_cls, crt_tlc, w1, w2 = criterion

    net_whole.eval()
    net_bp.eval()
    net_vp.eval()
    net_ap.eval()
    net_cls.eval()

    sketch_features = []
    sketch_scores = []
    sketch_labels = []

    shape_features = []
    shape_scores = []
    shape_labels = []

    batch_time = utils.AverageMeter()
    end = time.time()

    for i, (sketches, k_labels) in enumerate(sketch_dataloader):
        sketches_v = Variable(sketches.cuda())
        k_labels_v = Variable(k_labels.long().cuda())
        sketch_feat = net_whole(sketches_v)
        sketch_score = net_cls(sketch_feat)

        loss = crt_cls(sketch_score, k_labels_v)

        prec1 = utils.accuracy(sketch_score.data, k_labels_v.data, topk=(1,))[0]
        sketch_losses.update(loss.data[0], sketch_score.size(0)) # batchsize
        sketch_top1.update(prec1[0], sketch_score.size(0))
        sketch_features.append(sketch_feat.data.cpu())
        sketch_labels.append(k_labels)
        sketch_scores.append(sketch_score.data.cpu())

        batch_time.update(time.time() - end)
        end = time.time()

        if i % opt.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                      i, len(sketch_dataloader), batch_time=batch_time, loss=sketch_losses,
                      top1=sketch_top1))
    print(' *Sketch Prec@1 {top1.avg:.3f}'.format(top1=sketch_top1))

    batch_time = utils.AverageMeter()
    end = time.time()
    for i, (shapes, p_labels) in enumerate(shape_dataloader):
        shapes = shapes.view(shapes.size(0)*shapes.size(1), shapes.size(2), shapes.size(3), shapes.size(4))
        # expanding: (bz * 12) x 3 x 224 x 224
        shapes = shapes.expand(shapes.size(0), 3, shapes.size(2), shapes.size(3))

        shapes_v = Variable(shapes.cuda())
        p_labels_v = Variable(p_labels.long().cuda())

        o_bp = net_bp(shapes_v)
        o_vp = net_vp(o_bp)
        shape_feat = net_ap(o_vp)
        shape_score = net_cls(shape_feat)

        loss = crt_cls(shape_score, p_labels_v)

        prec1 = utils.accuracy(shape_score.data, p_labels_v.data, topk=(1,))[0]
        shape_losses.update(loss.data[0], shape_score.size(0)) # batchsize
        shape_top1.update(prec1[0], shape_score.size(0))
        shape_features.append(shape_feat.data.cpu())
        shape_labels.append(p_labels)
        shape_scores.append(shape_score.data.cpu())

        batch_time.update(time.time() - end)
        end = time.time()

        if i % opt.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                      i, len(shape_dataloader), batch_time=batch_time, loss=shape_losses,
                      top1=shape_top1))
    print(' *Shape Prec@1 {top1.avg:.3f}'.format(top1=shape_top1))

    shape_features = torch.cat(shape_features, 0).numpy()
    sketch_features = torch.cat(sketch_features, 0).numpy()

    shape_scores = torch.cat(shape_scores, 0).numpy()
    sketch_scores = torch.cat(sketch_scores, 0).numpy()

    shape_labels = torch.cat(shape_labels, 0).numpy()
    sketch_labels = torch.cat(sketch_labels, 0).numpy()

    # d = compute_distance(sketch_features.copy(), shape_features.copy(), l2=False)
    # scio.savemat('test/example.mat',{'d':d, 'feat':dataset_features, 'labels':dataset_labels})
    # AUC, mAP = map_and_auc(sketch_labels.copy(), shape_labels.copy(), d)
    # print(' * Feature AUC {0:.5}   mAP {0:.5}'.format(AUC, mAP))

    d_feat = compute_distance(sketch_features.copy(), shape_features.copy(), l2=False)
    d_feat_norm = compute_distance(sketch_features.copy(), shape_features.copy(), l2=True)
    mAP_feat = compute_map(sketch_labels.copy(), shape_labels.copy(), d_feat)
    mAP_feat_norm = compute_map(sketch_labels.copy(), shape_labels.copy(), d_feat_norm)
    print(' * Feature mAP {0:.5%}\tNorm Feature mAP {1:.5%}'.format(mAP_feat, mAP_feat_norm))


    d_score = compute_distance(sketch_scores.copy(), shape_scores.copy(), l2=False)
    mAP_score = compute_map(sketch_labels.copy(), shape_labels.copy(), d_score)
    d_score_norm = compute_distance(sketch_scores.copy(), shape_scores.copy(), l2=True)
    mAP_score_norm = compute_map(sketch_labels.copy(), shape_labels.copy(), d_score_norm)
    if opt.sf:
        shape_paths = [img[0] for img in shape_dataloader.dataset.shape_target_path_list]
        sketch_paths = [img[0] for img in sketch_dataloader.dataset.sketch_target_path_list]
        scio.savemat('{}/test_feat_temp.mat'.format(opt.checkpoint_folder), {'score_dist':d_score, 'score_dist_norm': d_score_norm, 'feat_dist': d_feat, 'feat_dist_norm': d_feat_norm,'sketch_features':sketch_features, 'sketch_labels':sketch_labels, 'sketch_scores': sketch_scores,
        'shape_features':shape_features, 'shape_labels':shape_labels, 'sketch_paths':sketch_paths, 'shape_paths':shape_paths})
    print(' * Score mAP {0:.5%}\tNorm Score mAP {1:.5%}'.format(mAP_score, mAP_score_norm))
    return [sketch_top1.avg, shape_top1.avg, mAP_feat, mAP_feat_norm, mAP_score, mAP_score_norm]