Esempio n. 1
0
def validate_10crop(val_loader: DataLoader, model: nn.Module, cuda=False):
    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(len(val_loader),
                             batch_time,
                             losses,
                             top1,
                             top5,
                             prefix='Test: ')

    # we use only this criterion at the moment
    criterion = nn.CrossEntropyLoss()
    # CUDA if used
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()
    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (input, target) in enumerate(val_loader):
            if cuda:
                input, target = input.cuda(non_blocking=True), target.cuda(
                    non_blocking=True)
            # this assumes input is ncrop
            bs, ncrops, c, h, w = input.size()
            # compute output
            temp_output = model(input.view(-1, c, h, w))
            output = temp_output.view(bs, ncrops, -1).mean(1)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(acc1[0], input.size(0))
            top5.update(acc5[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % 10 == 0:
                progress.print(i)

        # TODO: this should also be done with the ProgressMeter
        print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
                                                                    top5=top5))
Esempio n. 2
0
def eval_video(video_data):
    i, data, label = video_data
    num_crop = args.test_crops

    if args.modality == 'RGB':
        length = 3
    elif args.modality == 'Flow':
        # length = 10
        length = 20
    elif args.modality == 'RGBDiff':
        length = 18
    else:
        raise ValueError("Unknown modality " + args.modality)

    # input_var = torch.autograd.Variable(data.view(-1, length, data.size(2), data.size(3)),
    # volatile=True)
    input_var = torch.autograd.Variable(data.view(-1, args.test_segments,
                                                  length, data.size(2),
                                                  data.size(3)),
                                        volatile=True)
    # print(input_var.size())
    # input('...')
    outputs = net(input_var)
    rst = outputs.data.cpu().numpy().copy()
    # print(rst.size)
    # input('...')
    target = label.cuda(async=True)
    # prec1, prec5 = accuracy(outputs.view((num_crop, num_class)).mean(dim=0, keepdim=True),
    # target, topk=(1,5))
    prec1, prec5 = accuracy(outputs.data, target, topk=(1, 5))
    top1.update(prec1[0], 1)
    top5.update(prec5[0], 1)
    # return i, rst.reshape((num_crop, args.test_segments, num_class)).mean(axis=0).reshape(
    # (args.test_segments, 1, num_class)
    # ), label[0]
    # rst = net(input_var).data.cpu().numpy().copy()
    # print(rst.shape)
    # # input('...')
    return i, rst.reshape((num_crop, num_class)).mean(axis=0), label[0]
Esempio n. 3
0
	elif(str_val == 9):
		X_train,Y_train = hd.convert_to_matrix(df,test = False)
		X_test = hd.convert_to_matrix(df_test,test = True)
		print("Shape of X_train, Y_train and X_test are " + str(X_train.shape) + str(Y_train.shape) + str(X_test.shape))
		print()

	elif(str_val == 10):
		print("Are u Sure you have trained the Model and got your Predictions")
		check_pred_str = input("So you want to continue\n")
		
		if(check_pred_str == 'n' or check_pred_str == 'N'):
			print("Revert back then")
			continue

		test_acc = mn.accuracy(Y_test,true_pred)
		train_acc = mn.accuracy(train_y,Y_train)

		credits = mn.prize(test_acc,train_acc,credits)

		print("Your Test Prediction Value is " + str(test_acc))
		print("Your Train accuracy is " + str(train_acc))
		print()
		print("Good! You got some Credits")
		print("Available Credits are " + str(credits))
		print()

	elif(str_val == 11):
		print("Options Available")
		print()
		print("1 - Shows a Column")
def test_accuracy():
    labels = jnp.array([0, 2, 1])
    logits = jnp.array([[-1., 2., 1.], [5., -6., 2.], [-1., 0., -2.]])
    assert accuracy(logits, labels) == pytest.approx(0.333, 0.01)
Esempio n. 5
0
def evaluate(test_loader, model1, model2, criterion, eval_logger, softmax,
             analysis_recorder):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    s_n_top1 = AverageMeter()
    s_n_top5 = AverageMeter()

    # switch to evaluate mode
    model1.eval()
    model2.eval()

    end = time.time()
    for i, (inputs, target) in enumerate(test_loader):
        target = target.cuda(async=True)
        # print(inputs.size())
        # input('...')
        input_var = torch.autograd.Variable(inputs[0], volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        v_path = inputs[1][0].replace(' ', '-')
        # compute output
        output1 = model1(input_var)
        output2 = model2(input_var)
        # print(output1.size())
        # print(output2.size())
        # input('...')
        output1_b = softmax(output1)
        output2_b = softmax(output2)
        # loss = criterion(output, target_var)
        loss = criterion(output1_b, output2_b)
        loss = torch.sqrt(loss)

        prec1, prec5 = accuracy(output1.data, target, topk=(1, 5))
        top1.update(prec1[0], 1)
        top5.update(prec5[0], 1)
        prec1, prec5 = accuracy(output2.data, target, topk=(1, 5))
        s_n_top1.update(prec1[0], 1)
        s_n_top5.update(prec5[0], 1)

        _, n_n_pred = output1_b.max(1)
        _, s_n_pred = output2_b.max(1)
        GT_class_name = class_to_name[target.cpu().numpy()[0]]
        if (n_n_pred.data == target).cpu().numpy():
            if_correct = 1
        else:
            if_correct = 0
        # print('v_path:', v_path)
        # print('n_n_pred:', n_n_pred)
        # print('s_n_pred:', s_n_pred)
        # print('target:', target)
        # print('GT_class_name:', GT_class_name)
        # print('if_correct:', if_correct)
        # input('...')

        losses.update(loss.data[0], 1)

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        analysis_data_line = ('{path} {if_correct} {loss.val:.4f} '
                              '{GT_class_name} {GT_class_index} '
                              '{n_n_pred} {s_n_pred}'.format(
                                  path=v_path,
                                  if_correct=if_correct,
                                  loss=losses,
                                  GT_class_name=GT_class_name,
                                  GT_class_index=target.cpu().numpy()[0],
                                  n_n_pred=n_n_pred.data.cpu().numpy()[0],
                                  s_n_pred=s_n_pred.data.cpu().numpy()[0]))
        with open(analysis_recorder, 'a') as f:
            f.write(analysis_data_line + '\n')

        if i % 20 == 0:
            log_line = (
                'Test: [{0}/{1}]\t'
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\t'
                's_n_Prec@1 {s_n_top1.val:.3f} ({s_n_top1.avg:.3f})\t'
                's_n_Prec@5 {s_n_top5.val:.3f} ({s_n_top5.avg:.3f})'.format(
                    i,
                    len(test_loader),
                    batch_time=batch_time,
                    loss=losses,
                    top1=top1,
                    top5=top5,
                    s_n_top1=s_n_top1,
                    s_n_top5=s_n_top5))
            print(log_line)
            # eval_logger.write(log_line+'\n')
            with open(eval_logger, 'a') as f:
                f.write(log_line + '\n')

    # print(('Testing Results: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Loss {loss.avg:.5f}'
    # .format(top1=top1, top5=top5, loss=losses)))
    log_line = ('Testing Results: Loss {loss.avg:.5f}'.format(loss=losses))
    print(log_line)
    with open(eval_logger, 'a') as f:
        f.write(log_line + '\n\n')

    return
Esempio n. 6
0
def evaluate(test_loader, model, criterion, eval_logger, softmax, 
        analysis_recorder):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    n_s_top1 = AverageMeter()
    n_s_top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (inputs, target) in enumerate(test_loader):
        # print(inputs[0].size())
        # input('...')
        target = target.cuda(async=True)
        norm_input_var = torch.autograd.Variable(inputs[0], volatile=True)
        abnorm_input_var = torch.autograd.Variable(inputs[1], volatile=True)
        v_path = inputs[4][0].replace(' ', '-')
        # idx_list = inputs[2]
        # t_to_idx = {x:i for i, x in enumerate(idx_list[0])}
        # ab_idx_list = inputs[3]
        target_var = torch.autograd.Variable(target, volatile=True)

        # compute output
        norm_output = model(norm_input_var)
# === garbage ===
        # norm_output_ab = torch.autograd.Variable(torch.Tensor(norm_output.size())).cuda()
        # # print(norm_output.size()) # [1, 16, 512, 7, 7]
        # for j in range(norm_output.size()[1]):
            # # print(idx_list[0][j])
            # norm_output_ab[0, j, ...] = norm_output[0, t_to_idx[ab_idx_list[0, j]], ...]
        # # print(norm_output_ab[0][0, 0, ...])
# === garbage ===
        abnorm_output = model(abnorm_input_var)
        # print(norm_output)
        # print(norm_output.size())
        # print(abnorm_output)
        # print(abnorm_output.size())
        # input('..')
        # loss = criterion(output, target_var)
        norm_sm = softmax(norm_output)
        abnorm_sm = softmax(abnorm_output)
        loss = criterion(norm_sm, abnorm_sm)
        # print('np_loss:', np_loss)
        loss = torch.sqrt(loss)
        # print('loss:', loss)
        # input('...')
        prec1, prec5 = accuracy(norm_output.data, target, topk=(1,5))
        top1.update(prec1[0], 1)
        top5.update(prec5[0], 1)
        prec1, prec5 = accuracy(abnorm_output.data, target, topk=(1,5))
        n_s_top1.update(prec1[0], 1)
        n_s_top5.update(prec5[0], 1)

        _, n_n_pred = norm_sm.max(1)
        _, n_s_pred = abnorm_sm.max(1)
        GT_class_name = class_to_name[target.cpu().numpy()[0]]
        # print(norm_sm)
        # print('v_path:', v_path)
        # print('n_n_pred:', n_n_pred)
        # print('n_s_pred:', n_s_pred)
        # print('target:', target)
        # print('GT_class_name:', GT_class_name)
        if (n_n_pred.data == target).cpu().numpy():
            if_correct = 1
        else:
            if_correct = 0
        # print('if_correct:', if_correct)
        # input('...')


        losses.update(loss.data[0], 1)

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        analysis_data_line = ('{path} {if_correct} {loss.val:.4f} '
                '{GT_class_name} {GT_class_index} '
                '{n_n_pred} {n_s_pred}'.format(
                    path=v_path, if_correct=if_correct, loss=losses, 
                    GT_class_name=GT_class_name, 
                    GT_class_index=target.cpu().numpy()[0], 
                    n_n_pred=n_n_pred.data.cpu().numpy()[0], 
                    n_s_pred=n_s_pred.data.cpu().numpy()[0]))
        with open(analysis_recorder, 'a') as f:
            f.write(analysis_data_line+'\n')

        if i % 20 == 0:
            log_line = ('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\t'
                  'n_s_Prec@1 {n_s_top1.val:.3f} ({n_s_top1.avg:.3f})\t'
                  'n_s_Prec@5 {n_s_top5.val:.3f} ({n_s_top5.avg:.3f})'.format(
                   i, len(test_loader), batch_time=batch_time, loss=losses, 
                   top1=top1, top5=top5, n_s_top1=n_s_top1, n_s_top5=n_s_top5))
            print(log_line)
            # eval_logger.write(log_line+'\n')
            with open(eval_logger, 'a') as f:
                f.write(log_line+'\n')

    # print(('Testing Results: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Loss {loss.avg:.5f}'
          # .format(top1=top1, top5=top5, loss=losses)))
    log_line = ('Testing Results: Loss {loss.avg:.5f}'
          .format(loss=losses))
    print(log_line)
    with open(eval_logger, 'a') as f:
        f.write(log_line+'\n\n')

    return