Пример #1
0
def output_attention(val_loader, model, epoch, use_cuda, save_dir):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    model.eval()

    end = time.time()
    bar = Bar('Processing', max=len(val_loader))

    fw = open(os.path.join(save_dir, 'attention.txt'), 'w')
    for batch_idx, (inputs, targets) in enumerate(val_loader):
        data_time.update(time.time() - end)
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets)
        probs, attention = model(inputs)

        bs, c, w, h = attention.size()
        attention = attention.sum(1)
        attention = attention.cpu().data.numpy()
        attention = attention.reshape((bs, -1))
        for index in range(bs):
            hot = ''
            for j in range(w * h):
                hot += '{:.3f} '.format(attention[index][j])
            hot += '\n'
            fw.write(hot)
        prec1, prec5 = accuracy(probs.data, targets.data, topk=(1,5))
        top1.update(prec1[0], inputs.size(0))
        top5.update(prec5[0], inputs.size(0))
        bar.shuffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
            batch=batch_idx + 1,
            size=len(val_loader),
            data=data_time.avg,
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            top1=top1.avg,
            top5=top5.avg
        )
        bar.next()
    bar.finish()
    fw.close()
Пример #2
0
def output_prob(val_loader, model, epoch, use_cuda, save_dir, image_names,
                criterion):
    global avg_acc

    batch_time = AverageMeter()
    data_time = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    model.eval()

    end = time.time()
    bar = Bar('Processing', max=len(val_loader))

    if not os.path.exists(save_dir):
        os.mkdir(save_dir)

    fw = open(os.path.join(save_dir, 'test_top10_probability_336.txt'), 'w')
    submit_result = []
    count = 0
    class_preds = []
    class_targets = []
    for batch_idx, (inputs, targets, weights) in enumerate(val_loader):
        data_time.update(time.time() - end)

        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = torch.autograd.Variable(
            inputs, volatile=True), torch.autograd.Variable(targets)

        bs, ncrops, c, h, w = inputs.size(0), inputs.size(1), inputs.size(
            2), inputs.size(3), inputs.size(4)
        inputs = inputs.view(-1, c, h, w)
        outputs, sub_outputs = model(inputs)
        outputs = outputs.view(bs, ncrops, -1).mean(1)
        targets = torch.squeeze(targets)
        #loss = criterion(outputs, targets)
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))

        probs = torch.nn.functional.softmax(outputs)
        topk_probs, topk_preds = probs.topk(20, 1, True, True)
        pred_float, pred = outputs.data.topk(5, 1, True, True)
        pred = pred.cpu().numpy()
        pred = pred.reshape((-1))
        output_probs = topk_probs.data.cpu().numpy()
        output_preds = topk_preds.data.cpu().numpy()
        real_targets = targets.cpu().data.numpy()
        #real_targets = real_targets.reshape(-1)

        #loss_num = loss.data
        for index in range(outputs.data.size(0)):
            probability = str(real_targets[index])
            #probability += ' {} {:.3f}'.format(image_names[count], loss_num[index])
            for i in range(20):
                probability += ' {}:{:.3f}'.format(output_preds[index][i],
                                                   output_probs[index][i])
            fw.write(probability)
            fw.write('\n')

            submit_each = []
            submit_each.append(image_names[count])
            submit_each.extend(list(pred[index * 5:(index + 1) * 5]))
            submit_result.append(submit_each)
            count += 1
            class_preds.append(list(pred[index * 5:(index + 1) * 5]))
            class_targets.append(real_targets[index])

        top1.update(prec1[0], inputs.size(0))
        top5.update(prec5[0], inputs.size(0))
        batch_time.update(time.time() - end)
        end = time.time()

        bar.shuffix = '({batch}/{size}) | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
            batch=batch_idx + 1,
            size=len(val_loader),
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            top1=top1.avg,
            top5=top5.avg)
        bar.next()
    bar.finish()

    df = pd.DataFrame(submit_result,
                      columns=[
                          'image_id', 'predicted_0', 'predicted_1',
                          'predicted_2', 'predicted_3', 'predicted_4'
                      ])
    df.to_csv(os.path.join(save_dir, 'test_280.csv'),
              index=False,
              header=False,
              sep=' ')
    class_acc1, class_acc5 = class_accuracy(np.array(class_targets),
                                            np.array(class_preds))
    print('average per class accuracy: top1: {:.4f} top5: {:.4f}'.format(
        class_acc1, class_acc5))
    fw.close()