コード例 #1
0
def train(epoch, train_loader, learner, args):
    # This function optimize the objective

    # Initialize all meters
    data_timer = Timer()
    batch_timer = Timer()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    confusion = Confusion(args.out_dim)

    # Setup learner's configuration
    print('\n\n==== Epoch:{0} ===='.format(epoch))
    learner.train()
    learner.step_schedule(epoch)

    # The optimization loop
    data_timer.tic()
    batch_timer.tic()
    if args.print_freq > 0:  # Enable to print mini-log
        print('Itr            |Batch time     |Data Time      |Loss')
    for i, (input, target) in enumerate(train_loader):

        data_time.update(data_timer.toc())  # measure data loading time

        # Prepare the inputs
        if args.use_gpu:
            input = input.cuda()
            target = target.cuda()
        train_target, eval_target = prepare_task_target(input, target, args)

        # Optimization
        loss, output = learner.learn(input, train_target)

        # Update the performance meter
        confusion.add(output, eval_target)

        # Measure elapsed time
        batch_time.update(batch_timer.toc())
        data_timer.toc()

        # Mini-Logs
        losses.update(loss, input.size(0))
        if args.print_freq > 0 and ((i % args.print_freq == 0) or
                                    (i == len(train_loader) - 1)):
            print('[{0:6d}/{1:6d}]\t'
                  '{batch_time.val:.4f} ({batch_time.avg:.4f})\t'
                  '{data_time.val:.4f} ({data_time.avg:.4f})\t'
                  '{loss.val:.3f} ({loss.avg:.3f})'.format(
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

    # Loss-specific information
    if args.loss == 'CE':
        print('[Train] ACC: ', confusion.acc())
    elif args.loss in ['KCL', 'MCL']:
        args.cluster2Class = confusion.optimal_assignment(
            train_loader.num_classes
        )  # Save the mapping in args to use in eval
        if args.out_dim <= 20:  # Avoid to print a large confusion matrix
            confusion.show()
        print('Clustering scores:', confusion.clusterscores())
        print('[Train] ACC: ', confusion.acc())
    elif args.loss == 'DPS':
        confusion.show(width=15,
                       row_labels=['GT_dis-simi', 'GT_simi'],
                       column_labels=['Pred_dis-simi', 'Pred_simi'])
        print('[Train] similar pair f1-score:',
              confusion.f1score(1))  # f1-score for similar pair (label:1)
        print('[Train] dissimilar pair f1-score:', confusion.f1score(0))
コード例 #2
0
def evaluate(eval_loader, model, args):

    # Initialize all meters
    confusion = Confusion(args.out_dim)

    print('---- Evaluation ----')
    model.eval()
    for i, (input, target) in enumerate(eval_loader):

        # Prepare the inputs
        if args.use_gpu:
            with torch.no_grad():
                input = input.cuda()
                target = target.cuda()
        _, eval_target = prepare_task_target(input, target, args)

        # Inference
        output = model(input)

        # Update the performance meter
        output = output.detach()
        confusion.add(output, eval_target)

    # Loss-specific information
    KPI = 0
    if args.loss == 'CE':
        KPI = confusion.acc()
        print('[Test] ACC: ', KPI)
    elif args.loss in ['KCL', 'MCL']:
        confusion.optimal_assignment(eval_loader.num_classes,
                                     args.cluster2Class)
        if args.out_dim <= 20:
            confusion.show()
        print('Clustering scores:', confusion.clusterscores())
        KPI = confusion.acc()
        print('[Test] ACC: ', KPI)
    elif args.loss == 'DPS':
        confusion.show(width=15,
                       row_labels=['GT_dis-simi', 'GT_simi'],
                       column_labels=['Pred_dis-simi', 'Pred_simi'])
        KPI = confusion.f1score(1)
        print('[Test] similar pair f1-score:',
              KPI)  # f1-score for similar pair (label:1)
        print('[Test] dissimilar pair f1-score:', confusion.f1score(0))
    return KPI