def validation(data_loader, model, criterion, thresholds):
    model.eval()

    loss_handler = AverageMeter()
    accuracy_handler = [AverageMeter() for _ in thresholds]
    score_handler = [AverageMeter() for _ in thresholds]

    with torch.no_grad():
        for i, (image, target) in enumerate(data_loader):
            image = image.to(device)
            target = target.to(device)

            output = model(image).view(-1)

            loss = criterion(output, target)
            loss_handler.update(loss)

            target = target.byte()
            for i, threshold in enumerate(thresholds):
                pred = torch.sigmoid(output) > threshold

                accuracy = metrics.accuracy(pred, target)
                score = metrics.min_c(pred, target)

                accuracy_handler[i].update(accuracy)
                score_handler[i].update(score)

    return (loss_handler.avg, [i.avg for i in accuracy_handler],
            [i.avg for i in score_handler])
def train(data_loader, 
          teacher, 
          student, 
          criterion, optimizer, epoch, config):
    
    top_1 = AverageMeter()
    top_n = AverageMeter()
#     kl_losses = AverageMeter()
    ce_losses = AverageMeter()
    mse_losses = AverageMeter()
    
    teacher.eval()
    student.train()

    tq = tqdm(total=len(data_loader) * config['batch_size'])
    
    for i, (image, target) in enumerate(data_loader):
        image = image.to(device)
        target = target.to(device)
        
        teacher_output, teacher_cosine, teacher_feature = teacher(image, target)
        student_output, student_cosine, student_feature = student(image, target)

        ce_loss, mse_loss = criterion(teacher_output, teacher_cosine, teacher_feature, 
                                      student_output, student_cosine, student_feature, 
                                      target)

#         kl_losses.update(kl_loss.item())
        ce_losses.update(ce_loss.item())
        mse_losses.update(mse_loss.item())

        loss = ce_loss * config['ce_weight'] + mse_loss * config['mse_weight']
        
        acc_1, acc_n = accuracy(student_cosine, target, topk=(1, 10))
        top_1.update(acc_1.item())
        top_n.update(acc_n.item())

        loss.backward()
        if (i + 1) % config['step'] == 0:
            optimizer.step()
            optimizer.zero_grad()

        current_lr = get_learning_rate(optimizer)

        tq.set_description('Epoch {}, lr {:.2e}'.format(epoch + 1, current_lr))
        tq.set_postfix(ce_loss='{:.4f}'.format(ce_losses.avg),
#                        kl_loss='{:.4f}'.format(kl_losses.avg),
                       mse_loss='{:.4f}'.format(mse_losses.avg),
                       top_1='{:.4f}'.format(top_1.avg),
                       top_n='{:.4f}'.format(top_n.avg))
        tq.update(config['batch_size'])

    tq.close()
def train(data_loader, model, criterion, optimizer, epoch, config):
    model.train()

    loss_handler = AverageMeter()
    accuracy_handler = AverageMeter()
    score_handler = AverageMeter()

    tq = tqdm(total=len(data_loader) * config['batch_size'])
    tq.set_description('Epoch {}, lr {:.2e}'.format(
        epoch + 1, get_learning_rate(optimizer)))

    for i, (image, target) in enumerate(data_loader):
        image = image.to(device)
        target = target.to(device)

        output = model(image).view(-1)

        loss = criterion(output, target)
        loss.backward()

        batch_size = image.size(0)

        if (i + 1) % config['step'] == 0:
            optimizer.step()
            optimizer.zero_grad()

        pred = torch.sigmoid(output) > 0.5
        target = target > 0.5

        accuracy = metrics.accuracy(pred, target)
        score = metrics.min_c(pred, target)

        loss_handler.update(loss)
        accuracy_handler.update(accuracy)
        score_handler.update(score)

        current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        current_lr = get_learning_rate(optimizer)

        tq.update(batch_size)
        tq.set_postfix(loss='{:.4f}'.format(loss_handler.avg),
                       accuracy='{:.5f}'.format(accuracy_handler.avg),
                       score='{:.5f}'.format(score_handler.avg))
    tq.close()
示例#4
0
def train(data_loader, model, margin, criterion, optimizer, epoch, config):
    top_1 = AverageMeter()
    top_n = AverageMeter()
    losses = AverageMeter()

    model.train()
    margin.train()

    tq = tqdm(total=len(data_loader) * config['batch_size'])

    for i, (image, target) in enumerate(data_loader):
        image = image.to(device)
        target = target.to(device)

        feature = model(image)
        output, cosine = margin(feature, target)

        loss = criterion(output, target)
        losses.update(loss.item(), image.size(0))

        acc_1, acc_n = accuracy(cosine, target, topk=(1, 10))
        top_1.update(acc_1[0], image.size(0))
        top_n.update(acc_n[0], image.size(0))

        loss.backward()
        if (i + 1) % config['step'] == 0:
            optimizer.step()
            optimizer.zero_grad()

        current_lr = get_learning_rate(optimizer)

        tq.set_description('Epoch {}, lr {:.2e}'.format(epoch + 1, current_lr))
        tq.set_postfix(loss='{:.4f}'.format(losses.avg),
                       top_1='{:.4f}'.format(top_1.avg),
                       top_n='{:.4f}'.format(top_n.avg))
        tq.update(config['batch_size'])

    tq.close()
示例#5
0
文件: base.py 项目: lvaitzxc/npml
 def score(self, y_true: ndarray, y_pred: ndarray) -> float:
     return accuracy(y_true, y_pred)