예제 #1
0
def eval_by_dl(model, dl, device, zero_labels=False, wb=None, caption="train"):
    model.eval()
    positive = 0
    train_len = len(dl.dataset)
    apmeter = APMeter()

    with torch.no_grad():
        for idx, (X, tgt) in tqdm(enumerate(dl)):
            tgt_ = torch.Tensor(tgt["label"]).long().to(device)
            tgt_class = torch.zeros_like(tgt_) if zero_labels else tgt_
            tgt_ = F.one_hot(tgt_, num_classes=5).long()
            tgt_class = F.one_hot(tgt_class,
                                  num_classes=5).unsqueeze(dim=1).float()
            X = X.to(device)  #
            X = X.squeeze(dim=1).permute(0, 2, 1)
            emb, output = model(X, tgt_class)

            A = torch.argmax(output, dim=-1).reshape(-1)
            B = torch.argmax(tgt_, dim=-1).reshape(-1)

            # if zero_labels:
            #      print(f"\n{A=}\n{tgt_=}")
            apmeter.add(A, tgt_)
            is_right = (A == B)
            positive += torch.sum(is_right)

    accuracy = positive / train_len
    if wb:
        wb.log({f"{caption}, accuracy": accuracy})
    print(f"Accuracy on {caption.upper()} dataset: {accuracy * 100:.2f}%\n")
    print(f"mAP on {caption.upper()} dataset: {apmeter.value()}%\n")
예제 #2
0
class mAPMeter(meter.Meter):
    """
    The mAPMeter measures the mean average precision over all classes.
    The mAPMeter is designed to operate on `NxK` Tensors `output` and
    `target`, and optionally a `Nx1` Tensor weight where (1) the `output`
    contains model output scores for `N` examples and `K` classes that ought to
    be higher when the model is more convinced that the example should be
    positively labeled, and smaller when the model believes the example should
    be negatively labeled (for instance, the output of a sigmoid function); (2)
    the `target` contains only values 0 (for negative examples) and 1
    (for positive examples); and (3) the `weight` ( > 0) represents weight for
    each sample.
    """
    def __init__(self):
        super(mAPMeter, self).__init__()
        self.apmeter = APMeter()

    def reset(self):
        self.apmeter.reset()
        self.val = 0

    def update(self, output, target, weight=None):
        output = output.detach()
        target = target.detach()
        self.apmeter.add(output, target, weight)
        self.val = self.apmeter.value().mean()
예제 #3
0
파일: artosisnet.py 프로젝트: eqy/autotosis
def train(train_loader, model, criterion, optimizer, epoch, args):
    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    precision = AverageMeter('Precis:', ':1.6f')
    recall = AverageMeter('Recall:', ':1.6f')
    average_precision = APMeter()
    average_precision_wrapper = APMeterWrapper(average_precision)
    progress = ProgressMeter(
        len(train_loader),
        [batch_time, data_time, losses, top1, average_precision_wrapper],
        prefix="Epoch: [{}]".format(epoch))

    # switch to train mode
    model.train()

    end = time.time()
    for i, (images, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        if args.fp16:
            images = images.half()

        if args.gpu is not None and torch.cuda.device_count():
            images = images.cuda(args.gpu, non_blocking=True)
        if torch.cuda.device_count():
            target = target.cuda(args.gpu, non_blocking=True)

        # compute output
        output = model(images)
        loss = criterion(output, target)

        # measure accuracy and record loss
        acc1, prec, rec, prec_weight, rec_weight = accuracy(output, target)
        losses.update(loss.detach().item(), images.size(0))
        top1.update(acc1, images.size(0))
        #precision.update(prec, prec_weight)
        #recall.update(rec, rec_weight)

        # compute gradient and do SGD step
        # optimizer.zero_grad()
        for param in model.parameters():
            param.grad = None
        loss.backward()
        optimizer.step()

        with torch.no_grad():
            average_precision.add(output[:, 1], (target == 1).float())

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            progress.display(i)
예제 #4
0
파일: artosisnet.py 프로젝트: eqy/autotosis
def validate(val_loader, model, criterion, args):
    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    precision = AverageMeter('Precis:', ':1.6f')
    recall = AverageMeter('Recall:', ':1.6f')
    average_precision = APMeter()
    average_precision_meter = APMeterWrapper(average_precision)

    progress = ProgressMeter(
        len(val_loader), [batch_time, losses, top1, average_precision_meter],
        prefix='Test: ')

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            if args.fp16:
                images = images.half()

            if args.gpu is not None:
                if torch.cuda.device_count():
                    images = images.cuda(args.gpu, non_blocking=True)
            if torch.cuda.device_count():
                target = target.cuda(args.gpu, non_blocking=True)

            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, prec, rec, prec_weight, rec_weight = accuracy(output, target)
            losses.update(loss.item(), images.size(0))
            top1.update(acc1, images.size(0))
            #precision.update(prec, prec_weight)
            #recall.update(rec, rec_weight)

            average_precision.add(output[:, 1], (target == 1).float())

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                progress.display(i)

        # TODO: this should also be done with the ProgressMeter
        print(' * Acc@1 {top1.avg:.3f} AP: {ap:.3f}'.format(
            top1=top1, ap=average_precision.value()[0]))

    return top1.avg, precision.avg, recall.avg
예제 #5
0
            raise Exception("You must have at least one model.")
        else:
            #  Sum of avg is larger than 1 -> that is the feature, no problem
            d = evaluate_scores(gt, scores, model_cfg)
            log_eval_results(d)

    if 'trim_eval' in types or 'only_tail' in types:
        #   find tail labels using  training set.
        filepath = 'data/{n1}/{n1}_train.txt'.format(n1=name)
        print(filepath)
        rate = [float(f) for f in a.rate.split(',')]
        discard_sets, count_np = get_discard_set(filepath, 'cumsum', rate)
        all_label_set = set(range(num_labels))
        rest_labels = [all_label_set - d for d in discard_sets]
        if 'trim_eval' in types:
            for r, dis_set, rest in zip(rate, discard_sets, rest_labels):
                logging.info(
                    "Evaluate when trimming off {num_dis} labels (cumsum rate: {rate:.2f}%%, actual rate: {r2:.2f}%%)".format(
                        num_dis=len(dis_set), rate=r * 100, r2=len(dis_set) / num_labels * 100))
                dis_list = sorted(list(dis_set))
                rest_list = sorted(list(rest))
                new_score = np.copy(scores)
                new_score[:, dis_list] = 0
                log_eval_results(evaluate_scores(gt, new_score, model_cfg))

                # eval on head and tail labels, using original scores
                ap = APMeter()
                ap.add(scores, gt.todense())
                logging.info("AP of tail labels and head labels: %.2f, %.2f.\n" % (
                    ap.value()[dis_list].mean() * 100, ap.value()[rest_list].mean() * 100))
예제 #6
0
import numpy as np

import torch
from torchnet.meter import APMeter

X_train = np.load('SVM_PurePose/train_feature_list_hmdb.npy')
X_train = np.transpose(X_train)
y_train = np.load('SVM_PurePose/train_labels_list_hmdb.npy')
print('Training data loaded!')

X_test = np.load('SVM_PurePose/test_feature_list_hmdb.npy')
X_test = np.transpose(X_test)
y_test = np.load('SVM_PurePose/test_labels_list_hmdb.npy')
print('Test data loaded!')

meter = APMeter()


def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    
    print(output.shape)
    print(target.shape)
    
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
#    pred = output.unsqueeze(0)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))
예제 #7
0
 def __init__(self):
     super(mAPMeter, self).__init__()
     self.apmeter = APMeter()