def evaluate1(map_imgid2y, map_imgid2o, N=50, S=1000):
    print('Starting Evaluation')
    mAPM = meter.mAPMeter()
    APM = meter.APMeter()

    imglist = []
    with open('images_coco.txt') as (f):
        while True:
            imgname = f.readline().strip('\n')
            if imgname == '':
                break
            imglist.append(imgname)

    for j in range(N):
        picked_imgs = rd.sample([i for i in range(60000, len(map_imgid2y))], S)
        y_pile = []
        out_pile = []
        for i in picked_imgs:
            imgname = imglist[i]
            y_s = map_imgid2y[imgname[:-4]]
            y_pile.append(y_s)
            t_o = map_imgid2o[imgname[:-4]]
            out_pile.append(t_o)

            
        y_pile = torch.stack(y_pile).cuda()
        out_pile = torch.stack(out_pile).cuda()
        mAPM.add(out_pile, y_pile)
        APM.add(out_pile, y_pile)

        


    print('Overall mAP:', 100*mAPM.value())
    print('Classwise AP:', 100*APM.value())
Beispiel #2
0
def evaluate_scores(gt: torch.Tensor, scores: torch.Tensor, model_cfg):
    num_ins, num_labels = gt.shape
    if gt.is_sparse:
        gt_np = gt.coalesce()
        gt_np = scipy.sparse.coo_matrix(
            (gt_np.values().cpu().numpy(), gt_np.indices().cpu().numpy()),
            shape=(num_ins, num_labels))
    else:
        gt_np = scipy.sparse.coo_matrix(gt.cpu().numpy(),
                                        shape=(num_ins, num_labels))
    if isinstance(scores, torch.Tensor):
        scores_np = scores.numpy()
    else:
        scores_np = scores

    inv_propen = xc_metrics.compute_inv_propesity(gt_np, model_cfg["ps_A"],
                                                  model_cfg["ps_B"])

    acc = xc_metrics.Metrics(true_labels=gt_np,
                             inv_psp=inv_propen,
                             remove_invalid=False)
    map_meter = meter.mAPMeter()
    # map meter requires tensor
    gt_dense = gt if not gt.is_sparse else gt.to_dense()
    map_meter.add(scores, gt_dense)

    prec, ndcg, PSprec, PSnDCG = acc.eval(scores_np, model_cfg["at_k"])
    d = {
        "prec": prec,
        "ndcg": ndcg,
        "psp": PSprec,
        "psn": PSnDCG,
        "mAP": [map_meter.value()]
    }
    return d
Beispiel #3
0
def compute_scores(model, loader, label_mapping=None, b=None, weight=None):
    """
        Get all scores. For the sake of inverse propensity, we need to first collect all labels.
        TODO: -  of course we can compute it in advance
    :param model:
    :param loader:
    :return: gt & pred numpy ndarray: num_instances x num_labels. loss: scalar
    :return: mAP: scalar
    """
    model.eval()
    cuda = torch.cuda.is_available()
    torch.cuda.empty_cache()
    if cuda and not isinstance(model,
                               torch.nn.DataParallel) and not model.is_cuda():
        model = model.cuda()
    gt = []
    scores = []
    if weight is not None:
        loss_func = torch.nn.BCEWithLogitsLoss(weight=weight)
    else:
        loss_func = torch.nn.BCEWithLogitsLoss()
    loss_meter = AverageMeter()
    map_meter = meter.mAPMeter()
    with torch.no_grad():
        for i, data in enumerate(loader):
            X, y = data
            X = X.to_dense()
            if label_mapping is not None:
                # map original to hashed labels
                y = get_mapped_labels(y, label_mapping, b)
            if cuda:
                X = X.cuda()
                y = y.cuda()
            out = model(X)
            if label_mapping is not None:
                loss_meter.update(loss_func(out, y), X.shape[0])
            out = torch.sigmoid(out)
            if label_mapping is not None:
                map_meter.add(out.detach(),
                              y)  # map_meter uses softmax scores -
            # or whatever? scoring function is monotonic
            # append cuda tensor
            gt.append(y)
            scores.append(out.cpu().detach())
    gt = torch.cat(gt)
    scores = torch.cat(scores)  # scores must be dense
    scores = scores.numpy()
    mAP = map_meter.value()
    return gt, scores, loss_meter.avg, mAP
Beispiel #4
0
    def testmAPMeter(self):
        mtr = meter.mAPMeter()
        target = torch.Tensor([0, 1, 0, 1])
        output = torch.Tensor([0.1, 0.2, 0.3, 4])
        weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])
        mtr.add(output, target)

        ap = mtr.value()
        val = (1 * 1.0 / 1.0 + 0 * 1.0 / 2.0 + 2.0 * 1.0 / 3.0 +
               0 * 1.0 / 4.0) / 2.0
        self.assertTrue(math.fabs(ap - val) < 0.01, msg="mAP test1 failed")

        mtr.reset()
        mtr.add(output, target, weight)
        ap = mtr.value()
        val = (1 * 0.1 / 0.1 + 0 * 2.0 / 2.1 + 1.1 * 1 / 3.1 +
               0 * 1.0 / 4.0) / 2.0
        self.assertTrue(math.fabs(ap - val) < 0.01, msg="mAP test2 failed")

        # Test multiple K's
        target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)
        output = torch.Tensor([[0.1, 0.2, 0.3, 4], [4, 3, 2,
                                                    1]]).transpose(0, 1)
        weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)
        mtr.reset()
        mtr.add(output, target, weight)
        ap = mtr.value()
        self.assertTrue(
            math.fabs(ap - torch.Tensor([
                (1 * 3.0 / 3.0 + 0 * 3.0 / 5.0 + 3.5 * 1 / 5.5 +
                 0 * 3.5 / 6.5) / 2.0,
                (0 * 1.0 / 1.0 + 1 * 0.5 / 1.5 + 0 * 0.5 / 3.5 +
                 1 * 3.5 / 6.5) / 2.0,
            ]).mean()) < 0.01,
            msg="mAP test3 failed",
        )

        mtr.reset()
        mtr.add(output, target)
        ap = mtr.value()
        self.assertTrue(
            math.fabs(ap - torch.Tensor([
                (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0 + 0 * 1.0 / 4.0) /
                2.0,
                (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2 * 1.0 / 4.0) /
                2.0,
            ]).mean()) < 0.01,
            msg="mAP test4 failed",
        )
Beispiel #5
0
def evaluate(net, dataloader, logger=None):
    is_training = net.training
    net.eval()
    criterion = nn.CrossEntropyLoss()

    val_cm = meter.ConfusionMeter(N_CLASSES)
    val_mAP = meter.mAPMeter()

    total_loss = 0.0
    total = 0.0

    with torch.no_grad():
        for inputs, targets, img_path in dataloader:
            batch_size = inputs.size(0)
            inputs, targets = inputs.to(device), targets.to(device)

            score = net(inputs)
            loss = criterion(score, targets)

            total_loss += loss.item() * batch_size
            total += batch_size

            # *********************** confusion matrix and mAP ***********************
            one_hot = torch.zeros(targets.size(0), 3).scatter_(1, targets.data.cpu().unsqueeze(1), 1)
            val_cm.add(F.softmax(score, dim=1).data, targets.data)
            val_mAP.add(F.softmax(score, dim=1).data, one_hot)

    val_cm = val_cm.value()
    val_acc = 100. * sum([val_cm[c][c] for c in range(N_CLASSES)]) / val_cm.sum()
    val_sp = [100. * (val_cm.sum() - val_cm.sum(0)[i] - val_cm.sum(1)[i] + val_cm[i][i]) / (val_cm.sum() - val_cm.sum(1)[i])
                for i in range(N_CLASSES)]
    val_se = [100. * val_cm[i][i] / val_cm.sum(1)[i] for i in range(N_CLASSES)]


    results = {
        'val_loss': total_loss / total,
        'acc': val_acc
    }

    msg = '  Val   loss: %.3f | Acc: %.3f%% (%d)' % \
          (results['val_loss'], results['acc'], total)
    if logger:
        logger.log(msg)
    else:
        print(msg)

    net.train(is_training)
    return results
Beispiel #6
0
    def testmAPMeter(self):
        mtr = meter.mAPMeter()
        target = torch.Tensor([0, 1, 0, 1])
        output = torch.Tensor([0.1,  0.2, 0.3, 4])
        weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])
        mtr.add(output, target)

        ap = mtr.value()
        val = (1*1.0/1.0 + 0*1.0/2.0 + 2.0*1.0/3.0 + 0*1.0/4.0)/2.0
        self.assertTrue(
          math.fabs(ap-val) < 0.01,
          msg='mAP test1 failed'
        )

        mtr.reset()
        mtr.add(output, target, weight)
        ap = mtr.value()
        val = (1*0.1/0.1 + 0*2.0/2.1 + 1.1*1/3.1 + 0*1.0/4.0)/2.0
        self.assertTrue(
            math.fabs(ap-val) < 0.01, msg='mAP test2 failed')

        # Test multiple K's
        target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)
        output = torch.Tensor([[.1, .2, .3, 4], [4, 3, 2, 1]]).transpose(0, 1)
        weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)
        mtr.reset()
        mtr.add(output, target, weight)
        ap = mtr.value()
        self.assertTrue(
          math.fabs(ap -
                    torch.Tensor([
                        (1*3.0/3.0 + 0*3.0/5.0 + 3.5*1/5.5 + 0*3.5/6.5)/2.0,
                        (0*1.0/1.0 + 1*0.5/1.5 + 0*0.5/3.5 + 1*3.5/6.5)/2.0
                        ]).mean()) < 0.01, msg='mAP test3 failed')

        mtr.reset()
        mtr.add(output, target)
        ap = mtr.value()
        self.assertTrue(
          math.fabs(ap -
                    torch.Tensor([
                        (1*1.0 + 0*1.0/2.0 + 2*1.0/3.0 + 0*1.0/4.0)/2.0,
                        (0*1.0 + 1*1.0/2.0 + 0*1.0/3.0 + 2*1.0/4.0)/2.0
                        ]).mean()) < 0.01, msg='mAP test4 failed')
def val_3class(model, dataloader, data_scale):
    # ============================ Prepare Metrics ==========================
    val_cm = meter.ConfusionMeter(config.num_classes)
    val_mAP = meter.mAPMeter()

    softmax = functional.softmax

    # ================================ Validate ==============================
    for i, (image, label, image_path) in tqdm(enumerate(dataloader)):

        # ******************* prepare input and go through the model *******************
        if config.use_gpu:
            last_image, cur_image, next_image = image[0].cuda(), image[1].cuda(), image[2].cuda()
            last_label, cur_label, next_label = label[0].cuda(), label[1].cuda(), label[2].cuda()
        else:
            last_image, cur_image, next_image = image[0], image[1], image[2]
            last_label, cur_label, next_label = label[0], label[1], label[2]

        last_image.requires_grad = False
        cur_image.requires_grad = False
        next_image.requires_grad = False
        last_label.requires_grad = False
        cur_label.requires_grad = False
        next_label.requires_grad = False

        score, _, _ = model(last_image, cur_image, next_image)

        # *********************** confusion matrix and mAP ***********************
        one_hot = torch.zeros(cur_label.size(0), 3).scatter_(1, cur_label.data.cpu().unsqueeze(1), 1)

        val_cm.add(softmax(score, dim=1).data, cur_label.data)
        val_mAP.add(softmax(score, dim=1).data, one_hot)

    # *********************** accuracy and sensitivity ***********************
    val_cm = val_cm.value()
    val_accuracy = 100. * sum([val_cm[c][c] for c in range(config.num_classes)]) / val_cm.sum()
    val_sp = [100. * (val_cm.sum() - val_cm.sum(0)[i] - val_cm.sum(1)[i] + val_cm[i][i]) / (val_cm.sum() - val_cm.sum(1)[i])
              for i in range(config.num_classes)]
    val_se = [100. * val_cm[i][i] / val_cm.sum(1)[i] for i in range(config.num_classes)]
    val_cm = val_cm / np.expand_dims(np.array(data_scale), axis=1)  # 计算指标时按照balance后的matrix来算,展示的时候还原

    return val_cm.astype(dtype=np.int32), val_mAP.value().numpy(), val_sp, val_se, val_accuracy
    gt = None
    logging.info("Evaluating config %s" % (a.model))
    logging.info("Dataset config %s" % (a.dataset))
    if a.cost:
        logging.info("Evaluating cost-sensitive method: %s" % (a.cost))

    # get inverse propensity

    _, labels, _, _, _ = data_utils.read_data(test_file)
    inv_propen = xc_metrics.compute_inv_propesity(labels, model_cfg["ps_A"],
                                                  model_cfg["ps_B"])
    gts = []
    scaled_eval_flags = []
    eval_flags = []
    ps_eval_flags = []
    map_meter = meter.mAPMeter()

    for i, data in enumerate(tqdm.tqdm(test_loader)):
        print(i, 'th data')
        pred_avg_meter = AverageMeter()
        X, gt = data
        bs = X.shape[0]
        for r in range(R):
            print("REP", r, end='\t')
            x = X
            feat_mapping = get_feat_hash(feat_path, r)
            if model_cfg['is_feat_hash']:
                x = x.coalesce()
                ind = x.indices()
                v = x.values()
                ind[1] = torch.from_numpy(feat_mapping[ind[1]])
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.autograd import Variable as V

from torchnet import meter
from config.config import cfg
from util.visualize import Visualizer
import cv2
import numpy as np
from lib.core.visImage import tensor_to_np

# create visulized env
vis = Visualizer("reidatt", port=8097)
# measures created
AP = meter.APMeter()
mAP = meter.mAPMeter()
Loss_meter = meter.AverageValueMeter()
# set cuda env
os.environ["CUDA_VISIBLE_DEVICES"] = "1"


def inverse_normalize(img):
    # if opt.caffe_pretrain:
    #    img = img + (np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1))
    #    return img[::-1, :, :]
    # approximate un-normalize for visualize
    return (img * 0.225 + 0.45).clip(min=0, max=1) * 255


def show_keypoint(initimg, mask, title=None):
    #mask = mask.repeat(3, 1, 1)
def val_epoch(epoch, data_loader, model, criterion, opt, vis,vallogwindow):
    print('validation at epoch {}'.format(epoch))

    model.eval()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accuracies = AverageMeter()
    mmap = meter.mAPMeter()
    AP = meter.APMeter()
    top = meter.ClassErrorMeter(topk=[1, 3, 5], accuracy=True)
    mmap.reset()
    AP.reset()
    top.reset()
    end_time = time.time()
    for i, (inputs, targets) in enumerate(data_loader):
        data_time.update(time.time() - end_time)
        if type(inputs) is list:
            inputs = [Variable(inputs[ii].cuda()) for ii in range(len(inputs))]
        else:
            inputs = Variable(inputs.cuda())
        targets = targets.cuda()
        with torch.no_grad():
            #inputs = Variable(inputs)
            targets = Variable(targets)
            outputs ,context= model(inputs)
            #if i %5==0:
            #for jj in range(num):
            #    org_img = inverse_normalize(inputs[0,jj,:,:,:].detach().cpu().numpy())
            #    show_keypoint(org_img, context[0].detach().cpu(),vis=vis,title = str(jj+1))

            loss = criterion(outputs, targets)
            acc = calculate_accuracy(outputs, targets)

            losses.update(loss.data.item(), targets.detach().size(0))
            accuracies.update(acc, targets.detach().size(0))
            one_hot = torch.zeros_like(outputs).cuda().scatter_(1, targets.view(-1, 1), 1)
            mmap.add(outputs.detach(), one_hot.detach())
            top.add(outputs.detach(), targets.detach())
            AP.add(outputs.detach(), one_hot.detach())
        batch_time.update(time.time() - end_time)
        end_time = time.time()
        print('Epoch: [{0}][{1}/{2}]\t'
              'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
              'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              'Acc {acc.val:.3f} ({acc.avg:.3f})\t'
              'mmap {mmap}\t'
              'top1 3 5: {top}\t'.format(
            epoch,
            i + 1,
            len(data_loader),
            batch_time=batch_time,
            data_time=data_time,
            loss=losses,
            acc=accuracies,
            mmap=mmap.value(),
            top=top.value() ))

    vis.text("gpu:{}, epoch: {},loss: {},accu:{},mAP:{}, top135 {}\nAP:{}".format(torch.cuda.current_device(),epoch,losses.avg,accuracies.avg,mmap.value(),top.value(),AP.value())
    ,win=vallogwindow,append=True)
    #exit()
    #if epoch==10:
    #    exit()
    return losses.avg, mmap.value()
Beispiel #11
0
 def __init__(self, class_num=20):
     self.mAPMeter = mAPMeter()
     self.class_num = class_num
     self.true_array = np.zeros((1, class_num))
     self.total = 0
     self.cnt_array = np.zeros((1, class_num)) # count occurrence of labels
Beispiel #12
0
        root = '/DB/rhome/bllai/PyTorchProjects/Vertebrae_Collapse'
        test_paths = [os.path.join(root, 'dataset/test_VB.csv')]
        test_data = VB_Dataset(test_paths,
                               num_classes=3,
                               phase='test',
                               useRGB=True,
                               usetrans=True,
                               padding=True,
                               balance='upsample')
        test_dataloader = DataLoader(test_data,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=4)

        test_cm = meter.ConfusionMeter(3)
        test_mAP = meter.mAPMeter()
        softmax = functional.softmax

        for image, label, image_path in tqdm(test_dataloader):
            image.requires_grad = True

            if args.use_cuda:
                model.cuda()
                image = image.cuda()

            score = model(image)

            prob = softmax(score, dim=1).detach().cpu().numpy()[0]
            one_hot = torch.zeros(label.size(0),
                                  3).scatter_(1,
                                              label.data.cpu().unsqueeze(1), 1)
Beispiel #13
0
def main(args):
    """Run training."""
    val_perf = []  # summary of validation performance, and the training loss

    test_data, test_vid2name = dl.read_data(args, "test")

    num_steps_test = int(
        math.ceil(test_data.num_examples / float(args.batch_size)))
    """ load st-gcn model """
    model = Next_Pred(
        in_channels=2,
        num_class=30,
    ).cuda()
    """ init learnable weights """
    saved_file = torch.load('./weights/act_lstm.pth')
    model.load_state_dict(saved_file['model_state_dict'])
    model.eval()
    print(model)

    with torch.no_grad():
        # for batch in tqdm(val_data.get_batches(args.batch_size,
        gt_list = []
        score_list = []
        mrt = meter.mAPMeter()
        ''' init data structure for tensorflow mAP eval '''
        future_act_scores = {actid: [] for actid in activity2id.values()}
        future_act_labels = {actid: [] for actid in activity2id.values()}
        act_ap = None

        for batch in tqdm(test_data.get_batches(args.batch_size,
                                                num_steps=num_steps_test,
                                                shuffle=False,
                                                full=True),
                          total=num_steps_test,
                          ascii=True):

            batch_idx = batch[0]
            batch_val = batch[1]

            data, other_boxes_seq = get_data_feed(batch_val,
                                                  data_type='test',
                                                  N=args.batch_size)

            # process gt data
            gt = data['future_activity']
            labels = [
                torch.zeros(30).scatter_(0, torch.tensor(x), 1.) for x in gt
            ]
            gt = torch.stack(labels, 0)

            # process kp data # [batch, seq, 34]
            input_tensor = np.stack(data['obs_kp_rel'])
            input_tensor = torch.from_numpy(input_tensor)
            input_tensor = input_tensor.view(args.batch_size, args.obs_len, -1)

            # process appear data ==> [batch, seq, dim]
            input_appear_tensor = torch.from_numpy(
                np.mean(data['obs_person_feat'], (2, 3)))

            # process obs_other_box_class data ==> [batch, seq, 15, 10]
            other_box_cls = np.stack(data['obs_other_box_class'])
            other_box_cls = torch.from_numpy(other_box_cls)
            # other_box_feat = other_box_feat.unsqueeze(-1).permute(0, 3, 1, 2, -1)

            # process obs_other_box_geo data ==> [batch, seq, 15, 4]
            other_box_geo = np.stack(data['obs_other_box'])
            other_box_geo = torch.from_numpy(other_box_geo)

            # process grid class data ==> [batch, seq, 1]
            obs_grid_cls = np.stack(
                data['obs_grid_class'])[:, 0, :].astype('long')
            obs_grid_cls = np.reshape(obs_grid_cls,
                                      (args.batch_size, args.obs_len, 1))
            obs_grid_cls = torch.from_numpy(obs_grid_cls)
            # labels = [torch.zeros(576).scatter_(0, torch.tensor(x), 1.) for x in obs_grid_cls]
            # obs_grid_cls = torch.stack(labels, 0).view(args.batch_size, args.obs_len, -1)

            # process grid target data ==> [batch, seq, 4]
            obs_grid_target = np.stack(data['obs_grid_target'])[:, 0]
            obs_grid_target = torch.from_numpy(obs_grid_target)

            # process traj data ==> [batch, seq, 2]
            input_traj_tensor = np.stack(data['obs_traj_rel'])
            input_traj_tensor = torch.from_numpy(input_traj_tensor)

            # convert to gpu
            gt = gt.cuda()
            input_tensor = input_tensor.cuda()
            input_appear_tensor = input_appear_tensor.cuda()
            other_box_cls = other_box_cls.cuda()
            other_box_geo = other_box_geo.cuda()
            obs_grid_cls = obs_grid_cls.cuda()
            obs_grid_target = obs_grid_target.cuda()
            input_traj_tensor = input_traj_tensor.cuda()

            out = model(input_tensor, input_appear_tensor, \
                mode='train')
            mrt.add(out, gt)

        print("Average Precision is {}".format(mrt.value()))
 def __init__(self):
     super(mAPMetric, self).__init__()
     self.m_ap = mAPMeter()
Beispiel #15
0
def main(args):
    """Run training."""
    val_perf = []  # summary of validation performance, and the training loss

    train_data, train_vid2name = dl.read_data(args, "train")
    val_data, val_vid2name = dl.read_data(args, "val")
    test_data, test_vid2name = dl.read_data(args, "test")
    train_vid2name, val_vid2name = train_vid2name.item(), val_vid2name.item()

    args.train_num_examples = train_data.num_examples

    num_steps = int(math.ceil(
        train_data.num_examples / float(args.batch_size))) * args.num_epochs
    num_steps_test = int(
        math.ceil(test_data.num_examples / float(args.batch_size)))
    num_steps_val = int(
        math.ceil(val_data.num_examples / float(args.batch_size)))
    """ load st-gcn model """
    model = Next_Pred(
        in_channels=2,
        num_class=30,
    ).cuda()
    """ init learnable weights """
    model = model.apply(weights_init)
    """ init multi-class loss func """
    # criterion = nn.CrossEntropyLoss()
    multi_criterion = nn.BCEWithLogitsLoss()
    # multi_criterion = nn.MultiLabelSoftMarginLoss()
    """ init optim """
    # learning_rate = 0.1
    # optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.0001)
    optimizer = torch.optim.Adadelta(model.parameters())
    best_ap = 0.0
    """ Loop batch """
    for idx, batch in enumerate(
            tqdm(train_data.get_batches(args.batch_size, num_steps=num_steps),
                 total=num_steps,
                 ascii=True)):

        batch_idx = batch[0]
        batch_train = batch[1]

        data, other_boxes_seq = get_data_feed(batch_train,
                                              data_type='train',
                                              N=args.batch_size)

        # process gt data
        gt = data['future_activity']
        labels = [torch.zeros(30).scatter_(0, torch.tensor(x), 1.) for x in gt]
        gt = torch.stack(labels, 0)

        # process kp data # [batch, seq, 34]
        # input_tensor = np.stack(data['obs_kp_rel'])
        input_tensor = np.stack(data['obs_kp_rel'])
        input_tensor = torch.from_numpy(input_tensor)
        input_tensor = input_tensor.view(args.batch_size, args.obs_len, -1)

        # process appear data ==> [batch, seq, dim]
        input_appear_tensor = torch.from_numpy(
            np.mean(data['obs_person_feat'], (2, 3)))

        # process obs_other_box_class data ==> [batch, seq, 15, 10]
        other_box_cls = np.stack(data['obs_other_box_class'])
        other_box_cls = torch.from_numpy(other_box_cls)
        # other_box_feat = other_box_feat.unsqueeze(-1).permute(0, 3, 1, 2, -1)

        # process obs_other_box_geo data ==> [batch, seq, 15, 4]
        other_box_geo = np.stack(data['obs_other_box'])
        other_box_geo = torch.from_numpy(other_box_geo)

        # process grid class data ==> [batch, seq, 1]
        obs_grid_cls = np.stack(data['obs_grid_class'])[:, 0, :].astype('long')
        obs_grid_cls = np.reshape(obs_grid_cls,
                                  (args.batch_size, args.obs_len, 1))
        obs_grid_cls = torch.from_numpy(obs_grid_cls)
        # labels = [torch.zeros(576).scatter_(0, torch.tensor(x), 1.) for x in obs_grid_cls]
        # obs_grid_cls = torch.stack(labels, 0).view(args.batch_size, args.obs_len, -1)

        # process grid target data ==> [batch, seq, 4]
        obs_grid_target = np.stack(data['obs_grid_target'])[:, 0]
        obs_grid_target = torch.from_numpy(obs_grid_target)

        # process traj data ==> [batch, seq, 2]
        input_traj_tensor = np.stack(data['obs_traj'])
        input_traj_tensor = torch.from_numpy(input_traj_tensor)
        ''' obtain perseon-object adjacency matrix '''
        # convert to gpu
        gt = gt.cuda()
        input_tensor = input_tensor.cuda()
        input_appear_tensor = input_appear_tensor.cuda()
        other_box_cls = other_box_cls.cuda()
        other_box_geo = other_box_geo.cuda()
        obs_grid_cls = obs_grid_cls.cuda()
        obs_grid_target = obs_grid_target.cuda()
        input_traj_tensor = input_traj_tensor.cuda()

        out = model(input_tensor, input_appear_tensor, \
                    mode='train')

        optimizer.zero_grad()

        # produce loss given out & gt
        loss = multi_criterion(out, gt)
        if math.isnan(loss):
            print("loss is nan, stop training")
            exit()
        else:
            print("Loss is {}".format(loss))

        # perf optim
        loss.backward()
        optimizer.step()
        """ run evaluation every 10 steps """
        if (idx + 1) % 300 == 0:
            with torch.no_grad():
                # for batch in tqdm(val_data.get_batches(args.batch_size,
                gt_list = []
                score_list = []
                mrt = meter.mAPMeter()
                ''' init data structure for tensorflow mAP eval '''
                future_act_scores = {
                    actid: []
                    for actid in activity2id.values()
                }
                future_act_labels = {
                    actid: []
                    for actid in activity2id.values()
                }
                act_ap = None

                for batch in tqdm(test_data.get_batches(
                        args.batch_size,
                        num_steps=num_steps_test,
                        shuffle=False,
                        full=True),
                                  total=num_steps_test,
                                  ascii=True):

                    batch_idx = batch[0]
                    batch_val = batch[1]

                    data, other_boxes_seq = get_data_feed(batch_val,
                                                          data_type='test',
                                                          N=args.batch_size)

                    # process gt data
                    gt = data['future_activity']
                    labels = [
                        torch.zeros(30).scatter_(0, torch.tensor(x), 1.)
                        for x in gt
                    ]
                    gt = torch.stack(labels, 0)

                    # process kp data # [batch, seq, 34]
                    input_tensor = np.stack(data['obs_kp_rel'])
                    input_tensor = torch.from_numpy(input_tensor)
                    input_tensor = input_tensor.view(args.batch_size,
                                                     args.obs_len, -1)

                    # process appear data ==> [batch, seq, dim]
                    input_appear_tensor = torch.from_numpy(
                        np.mean(data['obs_person_feat'], (2, 3)))

                    # process obs_other_box_class data ==> [batch, seq, 15, 10]
                    other_box_cls = np.stack(data['obs_other_box_class'])
                    other_box_cls = torch.from_numpy(other_box_cls)
                    # other_box_feat = other_box_feat.unsqueeze(-1).permute(0, 3, 1, 2, -1)

                    # process obs_other_box_geo data ==> [batch, seq, 15, 4]
                    other_box_geo = np.stack(data['obs_other_box'])
                    other_box_geo = torch.from_numpy(other_box_geo)

                    # process grid class data ==> [batch, seq, 1]
                    obs_grid_cls = np.stack(
                        data['obs_grid_class'])[:, 0, :].astype('long')
                    obs_grid_cls = np.reshape(
                        obs_grid_cls, (args.batch_size, args.obs_len, 1))
                    obs_grid_cls = torch.from_numpy(obs_grid_cls)
                    # labels = [torch.zeros(576).scatter_(0, torch.tensor(x), 1.) for x in obs_grid_cls]
                    # obs_grid_cls = torch.stack(labels, 0).view(args.batch_size, args.obs_len, -1)

                    # process grid target data ==> [batch, seq, 4]
                    obs_grid_target = np.stack(data['obs_grid_target'])[:, 0]
                    obs_grid_target = torch.from_numpy(obs_grid_target)

                    # process traj data ==> [batch, seq, 2]
                    input_traj_tensor = np.stack(data['obs_traj_rel'])
                    input_traj_tensor = torch.from_numpy(input_traj_tensor)
                    ''' obtain perseon-object adjacency matrix '''
                    # convert to gpu
                    gt = gt.cuda()
                    input_tensor = input_tensor.cuda()
                    input_appear_tensor = input_appear_tensor.cuda()
                    other_box_cls = other_box_cls.cuda()
                    other_box_geo = other_box_geo.cuda()
                    obs_grid_cls = obs_grid_cls.cuda()
                    obs_grid_target = obs_grid_target.cuda()
                    input_traj_tensor = input_traj_tensor.cuda()

                    out = model(input_tensor, input_appear_tensor, \
                        mode='train')
                    mrt.add(out, gt)

                    # ''' perf mAP eval from tensorflow code '''
                    # for i in range(len(gt)):
                    #     this_future_act_labels = gt[i]
                    #     for j in range(len(this_future_act_labels)):
                    #         actid = j
                    #         future_act_labels[actid].append(this_future_act_labels[j])
                    #         future_act_scores[actid].append(out[i, j])

            # ''' one-shot for mAP '''
            # act_ap = []
            # for actid in future_act_labels:
            #     list_ = [{"score": future_act_scores[actid][i],
            #                 "label": future_act_labels[actid][i]}
            #             for i in range(len(future_act_labels[actid]))]
            #     ap = compute_ap(list_)
            #     act_ap.append(ap)

            # act_ap = np.mean(act_ap)
            # print("Mean Average Precision of TF code is {}".format(act_ap))
            print(out[0])
            print("Average Precision is {}".format(mrt.value()))
            print("Saved best perf mAP model is {}".format(best_ap))
            ''' check to save the model '''
            if mrt.value() > best_ap:

                best_ap = mrt.value()
                save_path = './tf_replic_v2/test_set/'
                if os.path.isdir(save_path) is False:
                    os.mkdir(save_path)

                torch.save(
                    {
                        'epoch': idx,
                        'model_state_dict': model.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict(),
                        'act_mAP': act_ap,
                    },
                    os.path.join(save_path,
                                 'model_best_act_{}.pth'.format(best_ap)))
Beispiel #16
0
def train_epoch(epoch, data_loader, model, criterion, optimizer, opt, vis,
                trainlogwindow):
    print('train at epoch {}'.format(epoch))

    model.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accuracies = AverageMeter()

    mmap = meter.mAPMeter()
    top = meter.ClassErrorMeter(topk=[1, 3, 5], accuracy=True)
    mmap.reset()
    top.reset()
    end_time = time.time()
    for i, (inputs, targets) in enumerate(data_loader):
        data_time.update(time.time() - end_time)
        targets = targets.cuda()
        if type(inputs) is list:
            inputs = [Variable(inputs[ii]).cuda() for ii in range(len(inputs))]
        else:
            inputs = inputs.cuda()
            #inputs, targets_a, targets_b, lam = mixup_data(inputs, targets, opt.DATASET.ALPHA, True)
            #inputs, targets_a, targets_b = Variable(inputs), Variable(targets_a), Variable(targets_b)
            inputs = Variable(inputs)
        #print(targets)
        targets = Variable(targets)

        outputs, context = model(inputs)
        #loss_func = mixup_criterion(targets_a, targets_b, lam)
        #loss = loss_func(criterion, outputs)
        loss = criterion(outputs, targets)
        #print(outputs.shape)
        #print(targets)
        acc = calculate_accuracy(outputs, targets)
        one_hot = torch.zeros_like(outputs).cuda().scatter_(
            1, targets.view(-1, 1), 1)
        mmap.add(outputs.detach(), one_hot.detach())
        top.add(outputs.detach(), targets.detach())
        losses.update(loss.data.item(), targets.detach().size(0))
        accuracies.update(acc, targets.detach().size(0))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end_time)
        end_time = time.time()

        vis.text(
            "gpu{}, epoch: {},batch:{},iter: {},loss: {},acc:{},lr: {}\n".format(torch.cuda.current_device(),epoch, i + 1,(epoch - 1) * len(data_loader) + (i + 1),losses.val, \
                                                  accuracies.val,optimizer.param_groups[0]['lr'])
                                                    ,win=trainlogwindow,append=True)

        print('Epoch: [{0}][{1}/{2}]\t'
              'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
              'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              'Acc {acc.val:.3f} ({acc.avg:.3f})\t'
              'mmap {mmap}\t'
              'top1 3 5: {top}\t'.format(epoch,
                                         i + 1,
                                         len(data_loader),
                                         batch_time=batch_time,
                                         data_time=data_time,
                                         loss=losses,
                                         acc=accuracies,
                                         mmap=mmap.value(),
                                         top=top.value()))
    vis.text(
        "total:\n gpu:{} epoch: {},loss: {},lr: {}, accu:{},mAP:{}, top135 {}\n"
        .format(torch.cuda.current_device(), epoch, losses.avg,
                optimizer.param_groups[0]['lr'], accuracies.avg, mmap.value(),
                top.value()),
        win=trainlogwindow,
        append=True)
    if torch.cuda.current_device() == 0:
        print("saveing ckp ########################################")
        if epoch % opt.MODEL.CKP_DURING == 0:
            save_file_path = os.path.join(opt.MODEL.RESULT, opt.MODEL.NAME,
                                          'save_{}.pth'.format(epoch))
            if not os.path.exists(
                    os.path.join(opt.MODEL.RESULT, opt.MODEL.NAME)):
                os.makedirs(os.path.join(opt.MODEL.RESULT, opt.MODEL.NAME))
            states = {
                'epoch': epoch + 1,
                'arch': opt.MODEL.NAME,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }
            torch.save(states, save_file_path)
    return losses.avg, mmap.value()
Beispiel #17
0
def test_3class(**kwargs):
    config.parse(kwargs)

    # ============================================= Prepare Data =============================================
    test_data = VB_Dataset(config.test_paths,
                           phase='test',
                           num_classes=config.num_classes,
                           useRGB=config.useRGB,
                           usetrans=config.usetrans,
                           padding=config.padding,
                           balance=config.data_balance)
    test_dataloader = DataLoader(test_data,
                                 batch_size=config.batch_size,
                                 shuffle=False,
                                 num_workers=config.num_workers)

    test_dist, test_scale = test_data.dist(), test_data.scale

    print('Test Image:', test_data.__len__())
    print('Test Data Distribution:', test_dist)

    # ============================================= Prepare Model ============================================
    # model = AlexNet(num_classes=config.num_classes)
    # model = Vgg16(num_classes=config.num_classes)
    model = ResNet18(num_classes=config.num_classes)
    # model = ResNet50(num_classes=config.num_classes)
    # print(model)

    if config.load_model_path:
        model.load(config.load_model_path)
        print('Model has been loaded!')
    else:
        print("Don't load model")
    if config.use_gpu:
        model.cuda()
    if config.parallel:
        model = torch.nn.DataParallel(model,
                                      device_ids=list(range(
                                          config.num_of_gpu)))
    model.eval()

    # ============================ Prepare Metrics ==========================
    test_cm = meter.ConfusionMeter(config.num_classes)
    test_mAP = meter.mAPMeter()
    y_true_0, y_scores_0 = [], []
    y_true_1, y_scores_1 = [], []
    y_true_2, y_scores_2 = [], []
    results = []
    features, colors = [], []  # for t-SNE

    softmax = functional.softmax

    # ================================== Test ===============================
    for i, (image, label, image_path) in tqdm(enumerate(test_dataloader)):

        # ******************* prepare input and go through the model *******************
        if config.use_gpu:
            image = image.cuda()
            label = label.cuda()
        image.requires_grad = False
        label.requires_grad = False

        score = model(image)

        # *********************** t-SNE feature and colors ***********************
        features.append(score.detach().cpu().numpy())
        for c in label.cpu().numpy().tolist():
            if c == 0:
                colors.append('springgreen')
            elif c == 1:
                colors.append('mediumblue')
            elif c == 2:
                colors.append('red')
            else:
                raise ValueError

        # *********************** confusion matrix and mAP ***********************
        one_hot = torch.zeros(label.size(0),
                              3).scatter_(1,
                                          label.data.cpu().unsqueeze(1), 1)

        test_cm.add(softmax(score, dim=1).data, label.data)
        test_mAP.add(softmax(score, dim=1).data, one_hot)

        positive_score_0 = [
            item[0]
            for item in softmax(score, dim=1).data.cpu().numpy().tolist()
        ]
        positive_score_1 = [
            item[1]
            for item in softmax(score, dim=1).data.cpu().numpy().tolist()
        ]
        positive_score_2 = [
            item[2]
            for item in softmax(score, dim=1).data.cpu().numpy().tolist()
        ]
        label_0 = [
            1 if item == 0 else 0
            for item in label.data.cpu().numpy().tolist()
        ]
        label_1 = [
            1 if item == 1 else 0
            for item in label.data.cpu().numpy().tolist()
        ]
        label_2 = [
            1 if item == 2 else 0
            for item in label.data.cpu().numpy().tolist()
        ]

        y_true_0.extend(label_0)  # 用于sklearn计算AUC和ROC
        y_true_1.extend(label_1)
        y_true_2.extend(label_2)
        y_scores_0.extend(positive_score_0)
        y_scores_1.extend(positive_score_1)
        y_scores_2.extend(positive_score_2)

        # ******************************** record prediction results ******************************
        for l, p, ip in zip(label.detach(),
                            softmax(score, dim=1).detach(), image_path):
            if p[0] > p[1] and p[0] > p[2]:
                results.append((ip, int(l), 0, round(float(p[0]), 4),
                                round(float(p[1]), 4), round(float(p[2]), 4)))
            elif p[1] > p[0] and p[1] > p[2]:
                results.append((ip, int(l), 1, round(float(p[0]), 4),
                                round(float(p[1]), 4), round(float(p[2]), 4)))
            else:
                results.append((ip, int(l), 2, round(float(p[0]), 4),
                                round(float(p[1]), 4), round(float(p[2]), 4)))

    # ================================== accuracy and sensitivity ==================================
    AUC_0 = roc_auc_score(np.array(y_true_0),
                          np.array(y_scores_0),
                          average='weighted')
    AUC_1 = roc_auc_score(np.array(y_true_1),
                          np.array(y_scores_1),
                          average='weighted')
    AUC_2 = roc_auc_score(np.array(y_true_2),
                          np.array(y_scores_2),
                          average='weighted')

    test_cm = test_cm.value()
    test_accuracy = 100. * sum(
        [test_cm[c][c] for c in range(config.num_classes)]) / test_cm.sum()
    test_sp = [
        100. * (test_cm.sum() - test_cm.sum(0)[i] - test_cm.sum(1)[i] +
                test_cm[i][i]) / (test_cm.sum() - test_cm.sum(1)[i])
        for i in range(config.num_classes)
    ]
    test_se = [
        100. * test_cm[i][i] / test_cm.sum(1)[i]
        for i in range(config.num_classes)
    ]
    test_cm = test_cm / np.expand_dims(
        np.array(test_scale), axis=1)  # 计算指标时按照balance后的matrix来算,展示的时候还原

    # ============================================ t-SNE ===========================================
    features = np.concatenate(features, axis=0)
    tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
    Y = tsne.fit_transform(features)  # 转换后的输出
    fig = plt.figure(figsize=(8, 8))
    ax = fig.add_subplot(1, 1, 1)
    plt.scatter(Y[:, 0], Y[:, 1], c=colors, cmap=plt.cm.Spectral)
    ax.xaxis.set_major_formatter(NullFormatter())  # 设置标签显示格式为空
    ax.yaxis.set_major_formatter(NullFormatter())
    plt.savefig(
        f'results/{config.load_model_path.split("/")[-1][:-4]}_logits2.png')
    # ipdb.set_trace()

    # ================================ Save and Print Prediction Results ===========================
    if config.result_file:
        write_csv(os.path.join('results', config.result_file),
                  tag=['path', 'label', 'predict', 'p1', 'p2', 'p3'],
                  content=results)

    print('test_acc:', test_accuracy)
    print('test_sp0:', test_sp[0], 'test_sp1:', test_sp[1], 'test_sp2:',
          test_sp[2])
    print('test_se0:', test_se[0], 'test_se1:', test_se[1], 'test_se2:',
          test_se[2])
    print('mSP:', round(sum(test_sp) / 3, 5), 'mSE:',
          round(sum(test_se) / 3, 5))
    print('test_mAUC:', (AUC_0 + AUC_1 + AUC_2) / 3)
    print('test_mAP:', test_mAP.value().numpy())
    print('test_cm:')
    print(test_cm.astype(dtype=np.int32))
Beispiel #18
0
def val_3class(model, dataloader, data_scale):
    # ============================ Prepare Metrics ==========================
    val_cm = meter.ConfusionMeter(config.num_classes)
    val_mAP = meter.mAPMeter()
    y_true_0, y_scores_0 = [], []
    y_true_1, y_scores_1 = [], []
    y_true_2, y_scores_2 = [], []

    softmax = functional.softmax

    # ================================ Validate ==============================
    for i, (image, label, image_path) in tqdm(enumerate(dataloader)):

        # ******************* prepare input and go through the model *******************
        if config.use_gpu:
            image = image.cuda()
            label = label.cuda()

        image.requires_grad = False
        label.requires_grad = False

        score = model(image)

        # *********************** confusion matrix and mAP ***********************
        one_hot = torch.zeros(label.size(0),
                              3).scatter_(1,
                                          label.data.cpu().unsqueeze(1), 1)

        val_cm.add(softmax(score, dim=1).data, label.data)
        val_mAP.add(softmax(score, dim=1).data, one_hot)

        positive_score_0 = [
            item[0]
            for item in softmax(score, dim=1).data.cpu().numpy().tolist()
        ]
        positive_score_1 = [
            item[1]
            for item in softmax(score, dim=1).data.cpu().numpy().tolist()
        ]
        positive_score_2 = [
            item[2]
            for item in softmax(score, dim=1).data.cpu().numpy().tolist()
        ]
        label_0 = [
            1 if item == 0 else 0
            for item in label.data.cpu().numpy().tolist()
        ]
        label_1 = [
            1 if item == 1 else 0
            for item in label.data.cpu().numpy().tolist()
        ]
        label_2 = [
            1 if item == 2 else 0
            for item in label.data.cpu().numpy().tolist()
        ]

        y_true_0.extend(label_0)  # 用于sklearn计算AUC和ROC
        y_true_1.extend(label_1)
        y_true_2.extend(label_2)
        y_scores_0.extend(positive_score_0)
        y_scores_1.extend(positive_score_1)
        y_scores_2.extend(positive_score_2)

    # *********************** accuracy and sensitivity ***********************
    AUC_0 = roc_auc_score(np.array(y_true_0),
                          np.array(y_scores_0),
                          average='weighted')
    AUC_1 = roc_auc_score(np.array(y_true_1),
                          np.array(y_scores_1),
                          average='weighted')
    AUC_2 = roc_auc_score(np.array(y_true_2),
                          np.array(y_scores_2),
                          average='weighted')

    val_cm = val_cm.value()
    val_accuracy = 100. * sum(
        [val_cm[c][c] for c in range(config.num_classes)]) / val_cm.sum()
    val_sp = [
        100. *
        (val_cm.sum() - val_cm.sum(0)[i] - val_cm.sum(1)[i] + val_cm[i][i]) /
        (val_cm.sum() - val_cm.sum(1)[i]) for i in range(config.num_classes)
    ]
    val_se = [
        100. * val_cm[i][i] / val_cm.sum(1)[i]
        for i in range(config.num_classes)
    ]
    val_cm = val_cm / np.expand_dims(
        np.array(data_scale), axis=1)  # 计算指标时按照balance后的matrix来算,展示的时候还原

    return val_cm.astype(dtype=np.int32), val_mAP.value().numpy(
    ), val_sp, val_se, (AUC_0 + AUC_1 + AUC_2) / 3, val_accuracy
Beispiel #19
0
def train(train_loads_iter, train_loaders, model, criterion, optimizer, epoch,
          args, print_func):
    batch_time = CNN_utils.AverageMeter()
    data_time = CNN_utils.AverageMeter()
    losses = CNN_utils.AverageMeter()
    mAPs = mAPMeter()

    # switch to train mode
    model.train()
    if args.fix_BN:
        CNN_utils.fix_BN(model)

    batch_iters = math.ceil(args.num_iter / args.batch_size)
    for i in range(batch_iters):
        start = time.time()
        l_loss = []
        #allloss_var = 0

        optimizer.zero_grad()
        for ds in range(args.num_datasets):
            args.ind = ds

            kout = args.topX or args.class_len[args.ind] // 2

            end = time.time()
            try:
                (input, target) = train_loads_iter[ds].next()
            except StopIteration:
                train_loads_iter[ds] = iter(train_loaders[ds])
                (input, target) = train_loads_iter[ds].next()

            # measure data loading time
            data_time.update(time.time() - end)

            if args.gpu is not None:
                input = input.cuda(args.gpu, non_blocking=True)

            target = target.cuda(args.gpu, non_blocking=True)
            # target_idx = target.nonzero() [:,1]
            # if torch.max(target) >= 0 and torch.max(target) < args.class_len[args.ind]:
            # compute output
            #print("Input shape {}".format(input.shape))
            output = model(input)
            output_i = output[args.ind]
            #print("Output_i device {}".format(output_i.device))
            loss = criterion(output_i, target)
            l_loss.append(loss.item())
            #allloss_var += loss
            loss.backward()

            APs.append(output_i.detach(), target)

        losses.update(sum(l_loss), input.size(0))
        APs.update(sum(APs) / len(APs), input.size(0))

        #optimizer.zero_grad()
        #allloss_var.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - start)

        if i % args.print_freq == 0:
            print_func('Epoch: [{0}][{1}/{2}]\t'
                       'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                       'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                       'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                       'mAP {topX.val:.3f} ({topX.avg:.3f})'.format(
                           epoch,
                           i,
                           batch_iters,
                           batch_time=batch_time,
                           data_time=data_time,
                           loss=losses,
                           top1=top1,
                           topX=APs))
Beispiel #20
0
def test():
    logdir = ARGS.net.split(ARGS.net.split('/')[-1])[0]
    logger = Logger(logdir, train=False)
    LOGDIR = logger.logdir

    test_data = My_Dataset(ARGS.test_paths, phase='test', num_classes=N_CLASSES, padding=ARGS.padding)
    test_loader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=False, num_workers=8)

    net = ResNet18(N_CLASSES)
    net = net.to(device)
    
    criterion = nn.CrossEntropyLoss()

    ckpt_t = torch.load(ARGS.net)
    net.load_state_dict(ckpt_t['net'])
    BEST_EPOCH = ckpt_t['epoch']

    # log args
    args_msg = 'ARGS \rBEST_EPOCH : {} \r'\
               'test_path: {} \r'\
               'MODEL_LOAD_PATH: {}\r '.format(BEST_EPOCH, ARGS.test_paths, ARGS.net)
    logger.log(args_msg)

    test_cm = meter.ConfusionMeter(N_CLASSES)
    test_mAP = meter.mAPMeter()

    total_loss = 0.0
    total = 0.0

    net.eval()

    with torch.no_grad():
        for inputs, targets, img_path in test_loader:
            batch_size = inputs.size(0)
            inputs, targets = inputs.to(device), targets.to(device)

            score = net(inputs)
            loss = criterion(score, targets)

            total_loss += loss.item() * batch_size
            total += batch_size

            # *********************** confusion matrix and mAP ***********************
            one_hot = torch.zeros(targets.size(0), 3).scatter_(1, targets.data.cpu().unsqueeze(1), 1)
            test_cm.add(F.softmax(score, dim=1).data, targets.data)
            test_mAP.add(F.softmax(score, dim=1).data, one_hot)

    test_cm = test_cm.value()
    test_acc = 100. * sum([test_cm[c][c] for c in range(N_CLASSES)]) / test_cm.sum()
    test_sp = [100. * (test_cm.sum() - test_cm.sum(0)[i] - test_cm.sum(1)[i] + test_cm[i][i]) / (test_cm.sum() - test_cm.sum(1)[i])
                for i in range(N_CLASSES)]
    test_se = [100. * test_cm[i][i] / test_cm.sum(1)[i] for i in range(N_CLASSES)]


    results = {
        'loss': total_loss / total,
        'acc': test_acc
    }

    msg = 'Test   loss: %.3f | Acc: %.3f%% (%d)' % \
          (results['loss'], results['acc'], total)
    if logger:
        logger.log(msg)
    else:
        print(msg)

    logger.log("Best Accuracy : {}".format(test_acc))
Beispiel #21
0
def calculate_mAP(output, target):
    import torchnet.meter as meter
    mtr = meter.mAPMeter()
    mtr.add(output, target)
    ap = mtr.value()
    return ap
def test_3class(**kwargs):
    config.parse(kwargs)

    # ============================================= Prepare Data =============================================
    test_data = ContextVB_Dataset(config.test_paths, phase='test', num_classes=config.num_classes, useRGB=config.useRGB,
                                  usetrans=config.usetrans, padding=config.padding, balance=config.data_balance)
    test_dataloader = DataLoader(test_data, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)

    test_dist, test_scale = test_data.dist(), test_data.scale

    print('Test Image:', test_data.__len__())
    print('Test Data Distribution:', test_dist)

    # ============================================= Prepare Model ============================================
    model = ContextNet(num_classes=config.num_classes)
    print(model)

    if config.load_model_path:
        model.load(config.load_model_path)
        print('Model has been loaded!')
    else:
        print("Don't load model")
    if config.use_gpu:
        model.cuda()
    if config.parallel:
        model = torch.nn.DataParallel(model, device_ids=[x for x in range(config.num_of_gpu)])
    model.eval()

    # ============================ Prepare Metrics ==========================
    test_cm = meter.ConfusionMeter(config.num_classes)
    test_mAP = meter.mAPMeter()
    results = []

    softmax = functional.softmax

    # ================================== Test ===============================
    for i, (image, label, image_path) in tqdm(enumerate(test_dataloader)):

        # ******************* prepare input and go through the model *******************
        if config.use_gpu:
            last_image, cur_image, next_image = image[0].cuda(), image[1].cuda(), image[2].cuda()
            last_label, cur_label, next_label = label[0].cuda(), label[1].cuda(), label[2].cuda()
        else:
            last_image, cur_image, next_image = image[0], image[1], image[2]
            last_label, cur_label, next_label = label[0], label[1], label[2]

        last_image.requires_grad = False
        cur_image.requires_grad = False
        next_image.requires_grad = False
        last_label.requires_grad = False
        cur_label.requires_grad = False
        next_label.requires_grad = False

        score, diff1, diff2 = model(last_image, cur_image, next_image)

        # *********************** confusion matrix and mAP ***********************
        one_hot = torch.zeros(cur_label.size(0), 3).scatter_(1, cur_label.data.cpu().unsqueeze(1), 1)

        test_cm.add(softmax(score, dim=1).data, cur_label.data)
        test_mAP.add(softmax(score, dim=1).data, one_hot)

        # ******************************** record prediction results ******************************
        for l, p, ip in zip(cur_label.detach(), softmax(score, dim=1).detach(), image_path):
            if p[0] > p[1] and p[0] > p[2]:
                results.append((ip, int(l), 0, round(float(p[0]), 4), round(float(p[1]), 4), round(float(p[2]), 4)))
            elif p[1] > p[0] and p[1] > p[2]:
                results.append((ip, int(l), 1, round(float(p[0]), 4), round(float(p[1]), 4), round(float(p[2]), 4)))
            else:
                results.append((ip, int(l), 2, round(float(p[0]), 4), round(float(p[1]), 4), round(float(p[2]), 4)))

    # ================================== accuracy and sensitivity ==================================
    test_cm = test_cm.value()
    test_accuracy = 100. * sum([test_cm[c][c] for c in range(config.num_classes)]) / test_cm.sum()
    test_sp = [100. * (test_cm.sum() - test_cm.sum(0)[i] - test_cm.sum(1)[i] + test_cm[i][i]) / (test_cm.sum() - test_cm.sum(1)[i])
               for i in range(config.num_classes)]
    test_se = [100. * test_cm[i][i] / test_cm.sum(1)[i] for i in range(config.num_classes)]
    test_cm = test_cm / np.expand_dims(np.array(test_scale), axis=1)  # 计算指标时按照balance后的matrix来算,展示的时候还原

    # ================================ Save and Print Prediction Results ===========================
    if config.result_file:
        write_csv(os.path.join('results', config.result_file), tag=['path', 'label', 'predict', 'p1', 'p2', 'p3'], content=results)

    print('test_acc:', test_accuracy)
    print('test_sp0:', test_sp[0], 'test_sp1:', test_sp[1], 'test_sp2:', test_sp[2])
    print('test_se0:', test_se[0], 'test_se1:', test_se[1], 'test_se2:', test_se[2])
    print('test_mAP:', test_mAP.value().numpy())
    print('test_cm:')
    print(test_cm.astype(dtype=np.int32))