Exemplo n.º 1
0
def compute_loss(input, target, smoothing):
    if config.loss.type == 'focal':
        compute_class_loss = FocalLoss(gamma=config.loss.focal.gamma)
    elif config.loss.type == 'f2':
        compute_class_loss = f2_loss
    elif config.loss.type == 'lsep':
        compute_class_loss = lsep_loss
    elif config.loss.type == 'lovasz':
        compute_class_loss = lovasz_loss
    else:
        raise AssertionError('invalid loss {}'.format(config.loss.type))

    if smoothing is not None:
        target = (1 - smoothing) * target + smoothing / 2

    if config.model.predict_thresh:
        logits, thresholds = input.split(input.size(-1) // 2, -1)
        thresholds = thresholds[:, :1]

        class_loss = compute_class_loss(input=logits, target=target)
        thresh_loss = bce_loss(input=logits - thresholds, target=target)

        loss = (class_loss + thresh_loss) / 2
    else:
        loss = compute_class_loss(input=input, target=target)

    assert loss.dim() == 0

    return loss
Exemplo n.º 2
0
def train():
    model.train()

    acc_loss, acc_crit, acc_reg = [], [], []

    while True:
        for iter, (data, target, _) in enumerate(dl_train):
            print('iter: {}'.format(iter))

            target = torch.clamp(target[:, aus], 0, 1)
            data, target = data.cuda(), target.cuda()
            data, target = Variable(data).float(), Variable(target).float()

            optimizer.zero_grad()
            pred, mu, logvar = model(data)

            print(torch.mean((mu[0:1, :]-mu[1:2, :])**2, 1))
            print(torch.mean((mu[0:1, :]-mu[2:, :])**2, 1))

            crit_val = bce_loss(pred, target)
            metric_val = npair_loss(mu[0:1, :], mu[1:2, :], mu[2:, :])
            reg_val = kld(mu, logvar) / len(data)

            loss = args.alpha*metric_val + args.beta*reg_val
            loss.backward()
            optimizer.step()

            if iter % args.log_interval == 0:
                print('Train Epoch: {} [{}/{}]\tLoss: {:.4f} + {}*{:.4f} + {}*{:.8f} = {:.4f}'.format(
                    epoch, iter, n_iter_train, crit_val.data, args.alpha, metric_val.data, args.beta, reg_val.data, loss.data))

                break

                info = {
                    'loss_train': loss.data,
                    'crit_train': crit_val.data,
                    'reg_train': reg_val.data
                }

                for tag, value in info.items():
                    logger.scalar_summary(
                        tag, value, n_iter_train*(epoch-1)+iter+1)

                    acc_loss.append(loss.data.cpu().numpy())
                    acc_crit.append(crit_val.data.cpu().numpy())
                    acc_reg.append(reg_val.data.cpu().numpy())

            return np.mean(acc_loss), np.mean(acc_crit), np.mean(acc_reg)
Exemplo n.º 3
0
def train():
    print(model.training)
    model.train()
    print(model.training)

    acc_loss, acc_crit, acc_reg = [], [], []

    for iter, (data, target, _) in enumerate(dl_train):
        target = torch.clamp(target[:, aus], 0, 1)
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data).float(), Variable(target).float()

        optimizer.zero_grad()
        pred, mu, logvar = model(data)
        crit_val = bce_loss(pred, target)
        reg_val = kld(mu, logvar) / len(data)

        loss = crit_val + beta * reg_val
        loss.backward()
        optimizer.step()

        if iter % args.log_interval == 0:
            print('Train Epoch: {} [{}/{}]\tLoss: {:.4f} + {}*{:.8f} = {:.4f}'.
                  format(epoch, iter, n_iter_train, crit_val.data, beta,
                         reg_val.data, loss.data))

            info = {
                'loss_train': loss.data,
                'crit_train': crit_val.data,
                'reg_train': reg_val.data
            }

            for tag, value in info.items():
                logger.scalar_summary(tag, value,
                                      n_iter_train * (epoch - 1) + iter + 1)

        acc_loss.append(loss.data.cpu().numpy())
        acc_crit.append(crit_val.data.cpu().numpy())
        acc_reg.append(reg_val.data.cpu().numpy())

    return np.mean(acc_loss), np.mean(acc_crit), np.mean(acc_reg)
Exemplo n.º 4
0
def test(model, data_loader, params):
    # NOTE the test loss only tracks the BCE it is not the full loss used during training
    # the test loss is the -log() of the correct class prediction probability, the lower is better
    model.eval()
    loss_avg = ut.AverageMeter()

    inds = torch.arange(params['batch_size']).to(params['device'])
    with torch.no_grad():

        for loc_feat, loc_class in data_loader:
            '''
            loc_feat: (batch_size, input_feat_dim)
            loc_class: (batch_size)
            '''
            # loc_pred: (batch_size, num_classes)
            loc_pred = model(loc_feat)
            # pos_loss: (batch_size)
            pos_loss = lo.bce_loss(loc_pred[inds[:loc_feat.shape[0]],
                                            loc_class])
            loss = pos_loss.mean()

            loss_avg.update(loss.item(), loc_feat.shape[0])

    print('Test loss   : {:.4f}'.format(loss_avg.avg))
Exemplo n.º 5
0
def test():
    model.eval()
    f1s, acc_loss, acc_crit, acc_reg = [], [], [], []
    for i, dl_test_pose in enumerate(dl_test):
        targets, preds = [], []
        print(
            '-----------------------------------Evaluating POSE {} ------------------------- '
            .format(poses[i]))
        for iter, (data, target, _) in enumerate(dl_test_pose):
            target = torch.clamp(target[:, aus], 0, 1)
            data, target = data.cuda(), target.cuda()
            data, target = Variable(data).float(), Variable(target).float()

            with torch.no_grad():
                pred, mu, logvar = model(data)

                pred = F.sigmoid(pred)
                crit_val = bce_loss(pred, target)
                reg_val = kld(mu, logvar) / len(data)
                loss = crit_val + beta * reg_val

                acc_crit.append(crit_val.data)
                acc_reg.append(reg_val.data)
                acc_loss.append(loss.data)

                preds.append(pred)
                targets.append(target.data.cpu().numpy())

        preds = np.asarray(np.concatenate(preds))
        print('preds min:{}, max:{}, mean:{}'.format(preds.min(), preds.max(),
                                                     np.mean(preds)))
        targets = np.clip(np.rint(np.concatenate(targets)), 0,
                          1).astype(np.uint8)
        ''' Evaluate model per pose'''
        f1_pose = []
        for t in eval_thresholds:
            preds_f = np.copy(preds)
            preds_f[np.where(preds_f < t)] = 0
            preds_f[np.where(preds_f >= t)] = 1

            preds_f = np.reshape(preds_f, (-1, n_classes))

            if t == 0.5:
                print('--------EVAL PRED------ t = {}'.format(t))
                _, _, f1, _, _ = evaluate_model(targets, preds_f, verbose=True)
            else:
                _, _, f1, _, _ = evaluate_model(targets,
                                                preds_f,
                                                verbose=False)

            f1_pose.append(f1)

        f1s.append(f1_pose)
    ''' Log validation loss '''
    info = {
        'loss_test': np.mean(acc_loss),
        'crit_test': np.mean(acc_crit),
        'reg_test': np.mean(acc_reg)
    }

    for tag, value in info.items():
        logger.scalar_summary(tag, value, epoch)
    ''' Log F1 per threshold'''
    f1s = np.mean(f1s, axis=0)
    for i, t in enumerate(eval_thresholds):
        info = {'f1_val_t_' + str(t): f1s[i]}

        for tag, value in info.items():
            logger.scalar_summary(tag, value, epoch)

    return np.mean(acc_loss), np.mean(acc_crit), np.mean(acc_reg), f1s