示例#1
0
def loss_fn(output, label, batch_size, trans_feat, num_classes):
    loss = F.nll_loss(output.view(-1, num_classes), label.flatten())
    if B > 0:
        loss *= B
    if opt.feature_transform:
        loss += feature_transform_regularizer(trans_feat) * 0.001
    return loss
 def loss_fn(output, label, batch_size, trans_feat):
     if B > 0:
         loss = B * F.nll_loss(output.view(B * batch_size, -1), label)
     else:
         loss = F.nll_loss(output, label)
     if args.feature_transform:
         loss += feature_transform_regularizer(trans_feat) * 0.001
     return loss
    train_correct = 0
    total_trainset = 0

    for i, data in enumerate(dataloader, 0):
        points, target = data
        target = target[:, 0]
        points = points.transpose(2, 1)
        points, target = points.cuda(), target.cuda()

        optimizer.zero_grad()
        classifier = classifier.train()
        pred, trans, trans_feat = classifier(points)
        loss = F.nll_loss(pred, target)
        if opt.feature_transform:
            loss += feature_transform_regularizer(trans_feat) * 0.001
        loss.backward()
        optimizer.step()

        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.data).cpu().sum()
        train_correct += correct.item()
        total_trainset += points.size()[0]

        #print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), correct.item() / float(opt.batchSize)))

    print('[%d] time: %f' % (epoch, time.time() - start_time))
    print("train accuracy {}".format(train_correct / float(total_trainset)))

    # if i % 10 == 0:
    # j, data = next(enumerate(testdataloader, 0))