def inicializar_pointnet():
    
    print("Loading 3D Object Classification Model:")
    start = time.time()
    global classifier
    classifier = PointNetCls(k=2) #IMP, 2 é o número de classes, depois alterar.
    classifier.cuda()
    #classifier.load_state_dict(torch.load('/home/socialab/FCHarDNet/cls_model_40.pth'))
    classifier.load_state_dict(torch.load('/home/socialab/human_vision/FCHarDNet/cls_model_40.pth'))    
    classifier.eval()
    end = time.time()
    print("  (time): " + str(end-start))
Ejemplo n.º 2
0
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = len(dataset.classes)
print('classes', num_classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass

classifier = PointNetCls(k=num_classes,
                         feature_transform=opt.feature_transform)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
classifier.cuda()

num_batch = len(dataset) / opt.batchSize

for epoch in range(opt.nepoch):
    scheduler.step()
    for i, data in enumerate(dataloader, 0):
        points, target = data
        target = target[:, 0]
Ejemplo n.º 3
0
        attack = (adversarial_untargeted.JGBA, {"eps": eps, "n": n, "eps_iter":eps_iter})
    elif adv == 'JGBA_sw':
        attack = (adversarial_untargeted.JGBA_sw, {"eps": eps, "n": n, "eps_iter":eps_iter})

    attack_fn = attack[0]
    attack_param = attack[1]

    if not os.path.exists(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter))):
        os.makedirs(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)))
    
    with open(os.path.join('dataset', 'random1024', 'whole_data_and_whole_label.pkl'), 'rb') as fid:
        whole_data, whole_label = pkl.load(fid)

    if model_name == 'PointNet':
        from pointnet.model import PointNetCls
        model = PointNetCls(k=40, feature_transform=True, predict_logit=True)
        checkpoint = 'pointnet/cls_model_201.pth'
    else:
        print('No such model architecture')
        assert False

    model = model.to(device)
    model.load_state_dict(torch.load(checkpoint))
    model.eval()

    pytorch_utils.requires_grad_(model, False)
    
    print("Model name\t%s" % model_name)

    cnt = 0        # adv pointcloud successfully attacked
    CNT = 0        # clean pointcloud correctly classified
opt = parser.parse_args()
print(opt)

test_dataset = ShapeNetDataset(
    root='../shapenetcore_partanno_segmentation_benchmark_v0',
    split='test',
    classification=True,
    npoints=opt.num_points,
    data_augmentation=False)

testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=32,
                                             shuffle=True)

classifier = PointNetCls(k=len(test_dataset.classes))
classifier.cuda()
classifier.load_state_dict(torch.load(opt.model))
classifier.eval()

for i, data in enumerate(testdataloader, 0):
    points, target = data
    points, target = Variable(points), Variable(target[:, 0])
    print(points.shape)
    points = points.transpose(2, 1)
    print('---------')
    print(target)
    points, target = points.cuda(), target.cuda()
    pred, _, _ = classifier(points)
    loss = F.nll_loss(pred, target)
    print(torch.exp(pred))
    end_time = time.time()
    print("classification~ cost:{:.1f}ms ".format(
        (end_time - start_time) * 1000))
    print('-------------------------------------')


def listener():
    # In ROS, nodes are uniquely named. If two nodes with the same
    # name are launched, the previous one is kicked off. The
    # anonymous=True flag means that rospy will choose a unique
    # name for our 'listener' node so that multiple listeners can
    # run simultaneously.
    rospy.init_node('obj_classify', anonymous=True)
    rospy.Subscriber("fused_lidar", pc2, callback)
    global pub
    pub = rospy.Publisher("classification", pc2, queue_size=20)
    # spin() simply keeps python from exiting until this node is stopped
    rospy.spin()


if __name__ == '__main__':
    # 网络加载
    classifier = PointNetCls(k=num_classes)  # 训练模型为16类
    classifier.cuda()
    classifier.load_state_dict(torch.load(path_model))
    classifier.eval()  # 测试模式

    pub = None
    listener()
Ejemplo n.º 6
0
def our_main():
    from utils.show3d_balls import showpoints
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--batchSize', type=int, default=32, help='input batch size')
    parser.add_argument(
        '--num_points', type=int, default=2000, help='input batch size')
    parser.add_argument(
        '--workers', type=int, help='number of data loading workers', default=4)
    parser.add_argument(
        '--nepoch', type=int, default=250, help='number of epochs to train for')
    parser.add_argument('--outf', type=str, default='cls', help='output folder')
    parser.add_argument('--model', type=str, default='', help='model path')
    parser.add_argument('--dataset', type=str, required=True, help="dataset path")
    parser.add_argument('--dataset_type', type=str, default='shapenet', help="dataset type shapenet|modelnet40")
    parser.add_argument('--feature_transform', action='store_true', help="use feature transform")

    opt = parser.parse_args()
    print(opt)

    blue = lambda x: '\033[94m' + x + '\033[0m'

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    if opt.dataset_type == 'shapenet':
        dataset = ShapeNetDataset(
            root=opt.dataset,
            classification=True,
            npoints=opt.num_points)

        test_dataset = ShapeNetDataset(
            root=opt.dataset,
            classification=True,
            split='test',
            npoints=opt.num_points,
            data_augmentation=False)
    elif opt.dataset_type == 'modelnet40':
        dataset = ModelNetDataset(
            root=opt.dataset,
            npoints=opt.num_points,
            split='trainval')

        test_dataset = ModelNetDataset(
            root=opt.dataset,
            split='test',
            npoints=opt.num_points,
            data_augmentation=False)
    else:
        exit('wrong dataset type')


    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batchSize,
        shuffle=True,
        num_workers=int(opt.workers))

    testdataloader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=opt.batchSize,
            shuffle=True,
            num_workers=int(opt.workers))

    print(len(dataset), len(test_dataset))
    num_classes = len(dataset.classes)
    print('classes', num_classes)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    classifier = PointNetCls(k=num_classes, feature_transform=opt.feature_transform)

    if opt.model != '':
        classifier.load_state_dict(torch.load(opt.model))


    optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
    classifier.cuda()

    num_batch = len(dataset) / opt.batchSize

    ## python train_classification.py --dataset ../dataset --nepoch=4 --dataset_type  shapenet
    for epoch in range(opt.nepoch):
        scheduler.step()
        for i, data in enumerate(dataloader, 0):
            points, target = data
            target = target[:, 0]
            showpoints(points[0].numpy())
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            classifier = classifier.train()
            pred, trans, trans_feat = classifier(points)
            loss = F.nll_loss(pred, target)
            if opt.feature_transform:
                loss += feature_transform_regularizer(trans_feat) * 0.001
            loss.backward()
            optimizer.step()
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()
            print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), correct.item() / float(opt.batchSize)))

            if i % 10 == 0:
                j, data = next(enumerate(testdataloader, 0))
                points, target = data
                target = target[:, 0]
                points = points.transpose(2, 1)
                points, target = points.cuda(), target.cuda()
                classifier = classifier.eval()
                pred, _, _ = classifier(points)
                loss = F.nll_loss(pred, target)
                pred_choice = pred.data.max(1)[1]
                correct = pred_choice.eq(target.data).cpu().sum()
                print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch, blue('test'), loss.item(), correct.item()/float(opt.batchSize)))

        torch.save(classifier.state_dict(), '%s/cls_model_%d.pth' % (opt.outf, epoch))

    total_correct = 0
    total_testset = 0
    for i,data in tqdm(enumerate(testdataloader, 0)):
        points, target = data
        target = target[:, 0]
        points = points.transpose(2, 1)
        points, target = points.cuda(), target.cuda()
        classifier = classifier.eval()
        pred, _, _ = classifier(points)
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.data).cpu().sum()
        total_correct += correct.item()
        total_testset += points.size()[0]

    print("final accuracy {}".format(total_correct / float(total_testset)))
Ejemplo n.º 7
0
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = len(dataset.classes)
print('classes', num_classes)

if opt.model_num == 'all':
    print('Compute all model...')
    acc_list = []
    for i in range(250):
        print('model %d' % i)
        model_num = i

        classifier = PointNetCls(k=num_classes,
                                 feature_transform=opt.feature_transform)

        model_path = 'trained/cls/cls_model_' + str(model_num) + '.pth'
        classifier.load_state_dict(torch.load(model_path))

        optimizer = optim.Adam(classifier.parameters(),
                               lr=0.001,
                               betas=(0.9, 0.999))
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=20,
                                              gamma=0.5)
        classifier.cuda()

        total_correct = 0
        total_testset = 0
        for i, data in tqdm(enumerate(testdataloader, 0)):
Ejemplo n.º 8
0
                                   data_augmentation=False)
elif opt.dataset_type == "hdf5_modelnet40":
    test_dataset = HDF5_ModelNetDataset(root=opt.dataset,
                                        split='test',
                                        npoints=opt.num_points,
                                        data_augmentation=False)
else:
    print('wrong dataset_type')

print('{}: number of test examples:{}'.format(opt.dataset_type,
                                              len(test_dataset)))

testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=1)  #, shuffle=True)

classifier = PointNetCls(k=len(test_dataset.classes),
                         feature_transform=opt.feature_transform)
classifier.cuda()
classifier.load_state_dict(torch.load(opt.model))
classifier.eval()

class_id2name = {
    test_dataset.cat[key]: key
    for key in list(test_dataset.cat.keys())
}
failure_cases = dict()
count = 0
for i, data in enumerate(testdataloader, 0):
    points, target, filename = data
    points, target = Variable(points), Variable(target[:, 0])
    points = points.transpose(2, 1)
    points, target = points.cuda(), target.cuda()
Ejemplo n.º 9
0
# X是测试集的新图片,label是批量(每次32个)读取的3维模型 真实标签    # 详细看3.5.1节


print(len(dataset), len(test_dataset))  # 12137的训练数据集的迭代器, 每个迭代器32个3维模型,  2874个测试数据集的迭代器, 每个迭代器也是32个三维模型
num_classes = len(dataset.classes)      # 训练数据集一共有16类物体
print('classes', num_classes)           # classes 16


try:
    os.makedirs(opt.outf)
except OSError:
    pass


# 3. 定义网络, 分类任务的网络, 具体在model.py中的class PointNetCls(nn.Module)
classifier = PointNetCls(k=num_classes, feature_transform=opt.feature_transform)
print(classifier)   # 打印出来可以观察到详细的网络结构

if opt.model != '':     # 如果模型存在就导入
    classifier.load_state_dict(torch.load(opt.model))


# 4. 定义优化算法, optimizer 是 optim类 创建的实例, scheduler 为了调整学习率
optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))       # 使用Adam算法, betas(β)为超参数
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)           # scheduler.step()调用step_size次, 学习率才会调整一次
classifier.cuda()   # 让模型在cuda上训练

print(len(dataset)) # 12137的训练数据集的迭代器, 每个迭代器32个3维模型   相当于dataset中一共读入了12137*32=388384个三维模型
num_batch = len(dataset) / opt.batchSize    # 所以每次参与训练的迭代器数量为 12137 / 32= 379.28个迭代器, 三维模型为 379*32=12128
print('num_batch: %d' % (num_batch))
Ejemplo n.º 10
0
import torch.nn.functional as F
import numpy as np
import time


parser = argparse.ArgumentParser()

parser.add_argument('--model', type=str, default = '',  help='model path')
parser.add_argument('--num_points', type=int, default=2500, help='input batch size')
parser.add_argument('--folder', type=str, default='', help='folder with input files')

opt = parser.parse_args()
print(opt)

#Creamos un clasificador PointNet, pero solo para extraer features
classifier = PointNetCls(k=16, only_feature=True, feature_transform=True)
classifier.cuda()

#Cargamos un modelo pre-entrenado
classifier.load_state_dict(torch.load(opt.model))
classifier.eval()

#Son 200 modelos
for model in range(1, 201):
    desc = []
    start_time = time.time()

    #Cada modelo tiene 200 parches
    for patch in range(200):
        #Leemos la nube de puntos
        filepath = opt.folder + '/' + str(model) + '_' + str(patch) + '.xyz'
        test_dataset,
        batch_size=opt.batchSize,
        shuffle=True,
        drop_last=True,
        num_workers=int(opt.workers))

print(len(dataset), len(test_dataset)) # 12137 2874
num_classes = len(dataset.classes)
print('classes', num_classes) #classes 16

try:
    os.makedirs(opt.outf)
except OSError:
    pass

classifier = PointNetCls(k=num_classes, feature_transform=opt.feature_transform) #调用model.py的PointNetCls定义分类函数

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model)) #如果有预训练模型,将预训练模型加载


optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999)) #优化函数,可以替换成SGD之类的
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
classifier.cuda()

num_batch = len(dataset) / opt.batchSize #batch数目

for epoch in range(opt.nepoch): #在一个epoch下
    scheduler.step()
    for i, data in enumerate(dataloader, 0):
        points, target = data #读取待训练对象点云与标签
Ejemplo n.º 12
0
                                             num_workers=int(opt.workers))

    testdataloader = torch.utils.data.DataLoader(test_dataset,
                                                 batch_size=opt.batchSize,
                                                 shuffle=True,
                                                 num_workers=int(opt.workers))

    print(len(dataset), len(test_dataset))
    num_classes = len(dataset.classes)
    print('classes', num_classes)
    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    classifier = PointNetCls(k=num_classes,
                             feature_transform=opt.feature_transform)
    if opt.gpu != -1:
        classifier = torch.nn.DataParallel(classifier).to(device)
    else:
        classifier.to(device)

    if opt.model != '':
        classifier.load_state_dict(torch.load(opt.model))

    optimizer = optim.Adam(classifier.parameters(),
                           lr=0.001,
                           betas=(0.9, 0.999))
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)

    num_batch = len(dataset) / opt.batchSize