Exemplo n.º 1
0
def evaluate(model_test):
    model_test.eval()
    total_correct=0

    data_eval =pts_cls_dataset(datalist_path=args.data_eval,data_argument=False,num_points=args.num_pts,use_extra_feature=args.normal)
    eval_loader = torch.utils.data.DataLoader(data_eval,
                    batch_size=4, shuffle=True, collate_fn=pts_collate)
    print ("dataset size:",len(eval_loader.dataset))

    for batch_idx, (pts, label) in enumerate(eval_loader):
        if is_GPU:
            pts = Variable(pts.cuda())
            label = Variable(label.cuda())
        else:
            pts = Variable(pts)
            label = Variable(label)
        pred,trans = net(pts)

        _, pred_index = torch.max(pred, dim=1)
        num_correct = (pred_index.eq(label)).data.cpu().sum().item()
        total_correct +=num_correct

    print ('the average correct rate:{}'.format(total_correct*1.0/(len(eval_loader.dataset))))

    model_test.train()
    with open(logname,'a') as f:
        f.write('\nthe evaluate average accuracy:{}'.format(total_correct*1.0/(len(eval_loader.dataset))))
Exemplo n.º 2
0
def evaluate(model_test):
    model_test.eval()
    total_correct=0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    data_eval = pts_cls_dataset(datalist_path=args.data_eval,data_argument=False,num_points=args.num_pts,use_extra_feature=args.normal)
    eval_loader = torch.utils.data.DataLoader(data_eval,
                    batch_size=args.batch_size, shuffle=True, collate_fn=pts_collate)
    print ("dataset size:",len(eval_loader.dataset))

    for batch_idx, (pts, label) in enumerate(eval_loader):
        if is_GPU:
            pts = Variable(pts.cuda())
            label = Variable(label.cuda())
        else:
            pts = Variable(pts)
            label = Variable(label)
        pred,trans = net(pts)

        loss = critenrion(pred, label)
        K = trans.size(1)
        reg_loss = torch.bmm(trans, trans.transpose(2, 1))
        if is_GPU:
            iden = Variable(torch.eye(K).cuda())
        else:
            iden = Variable(torch.eye(K))
        reg_loss -= iden
        reg_loss = reg_loss * reg_loss

        loss = loss + reg_loss.sum()
        loss_sum += loss.item()

        _, pred_index = torch.max(pred, dim=1)
        num_correct = (pred_index.eq(label)).data.cpu().sum().item()
        total_correct +=num_correct

        for idx,l in enumerate(label):
            total_seen_class[l] += 1
            total_correct_class[l] += (pred_index.eq(label))[idx]

        print ('finish {}/{}'.format(batch_idx*args.batch_size,len(eval_loader.dataset)))

    class_accuracies = np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        print ('%10s:\t%0.3f' % (name, class_accuracies[i]))

    print ('eval accuracy:{}'.format(total_correct*1.0/(len(eval_loader.dataset))))
    print ('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
Exemplo n.º 3
0
else:
    resume = args.resume

logname = os.path.join(LOG_DIR,'log.txt')
optfile = os.path.join(LOG_DIR,'opt.txt')
with open(optfile, 'wt') as opt_f:
    opt_f.write('------------ Options -------------\n')
    for k, v in sorted(vars(args).items()):
        opt_f.write('%s: %s\n' % (str(k), str(v)))
    opt_f.write('-------------- End ----------------\n')

if is_GPU:
    torch.cuda.set_device(args.gpu)


my_dataset=pts_cls_dataset(datalist_path=args.data,num_points=args.num_pts,use_extra_feature=args.normal)
data_loader = torch.utils.data.DataLoader(my_dataset,
            batch_size=args.batch_size, shuffle=True, num_workers=4,collate_fn=pts_collate)
if args.normal:
    pts_featdim = 6
else:
    pts_featdim = 3

net = PointNet_cls(num_pts=args.num_pts,feat_dim=pts_featdim)
if is_GPU:
    net = net.cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.999))
critenrion = nn.NLLLoss()

def save_checkpoint(epoch,model,num_iter):
    torch.save({
Exemplo n.º 4
0
        x = self.conv4(x)
        x = self.conv5(x)  ##[N,1024,P]

        feature_vector = torch.squeeze(self.max_pool(x))  ##[N,1024]
        scores = self.classifer(feature_vector)  ##[N,40]
        pred = F.log_softmax(scores, dim=-1)

        return pred, trans2


if __name__ == '__main__':
    from data_utils import pts_cls_dataset, pts_collate
    num_pts = 2048

    my_dataset = pts_cls_dataset(
        datalist_path='/home/yuzhe/Downloads/3d_data/modelnet/test_files.txt',
        num_points=num_pts,
        use_extra_feature=True)
    data_loader = torch.utils.data.DataLoader(my_dataset,
                                              batch_size=2,
                                              shuffle=True,
                                              collate_fn=pts_collate)

    net = PointNet_cls(num_pts=num_pts, feat_dim=6)
    for batch_idx, (pts, label) in enumerate(data_loader):
        if False:
            pts = Variable(pts.cuda())
            label = Variable(label.cuda())
        else:
            pts = Variable(pts)
            label = Variable(label)
        pred, _ = net(pts)