def inicializar_pointnet():
    
    print("Loading 3D Object Classification Model:")
    start = time.time()
    global classifier
    classifier = PointNetCls(k=2) #IMP, 2 é o número de classes, depois alterar.
    classifier.cuda()
    #classifier.load_state_dict(torch.load('/home/socialab/FCHarDNet/cls_model_40.pth'))
    classifier.load_state_dict(torch.load('/home/socialab/human_vision/FCHarDNet/cls_model_40.pth'))    
    classifier.eval()
    end = time.time()
    print("  (time): " + str(end-start))
Exemplo n.º 2
0
            loss += feature_transform_regularizer(trans_feat) * 0.001
        loss.backward()
        optimizer.step()
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.data).cpu().sum()
        print('[%d: %d/%d] train loss: %f accuracy: %f' %
              (epoch, i, num_batch, loss.item(),
               correct.item() / float(opt.batchSize)))

        if i % 10 == 0:
            j, data = next(enumerate(testdataloader, 0))
            points, target = data
            target = target[:, 0]
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            classifier = classifier.eval()
            pred, _, _ = classifier(points)
            loss = F.nll_loss(pred, target)
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()
            print('[%d: %d/%d] %s loss: %f accuracy: %f' %
                  (epoch, i, num_batch, blue('test'), loss.item(),
                   correct.item() / float(opt.batchSize)))

    torch.save(classifier.state_dict(),
               '%s/cls_model_%d.pth' % (opt.outf, epoch))

total_correct = 0
total_testset = 0
for i, data in tqdm(enumerate(testdataloader, 0)):
    points, target = data
Exemplo n.º 3
0
        os.makedirs(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)))
    
    with open(os.path.join('dataset', 'random1024', 'whole_data_and_whole_label.pkl'), 'rb') as fid:
        whole_data, whole_label = pkl.load(fid)

    if model_name == 'PointNet':
        from pointnet.model import PointNetCls
        model = PointNetCls(k=40, feature_transform=True, predict_logit=True)
        checkpoint = 'pointnet/cls_model_201.pth'
    else:
        print('No such model architecture')
        assert False

    model = model.to(device)
    model.load_state_dict(torch.load(checkpoint))
    model.eval()

    pytorch_utils.requires_grad_(model, False)
    
    print("Model name\t%s" % model_name)

    cnt = 0        # adv pointcloud successfully attacked
    CNT = 0        # clean pointcloud correctly classified
    
    for idx in tqdm(range(len(whole_data))):
        x = whole_data[idx]
        label = whole_label[idx]
        
        with torch.no_grad():
            y_pred = model(torch.from_numpy(x[np.newaxis,:,:]).float().to(device))
            y_pred_idx = np.argmax(y_pred.detach().cpu().numpy().flatten())
Exemplo n.º 4
0
        optimizer.step()  # 梯度下降,参数优化
        pred_choice = pred.data.max(1)[1]  # max(1)返回每一行中的最大值及索引,[1]取出索引(代表着类别)
        correct = pred_choice.eq(
            target.data).cpu().sum()  # 判断和target是否匹配,并计算匹配的数量
        print('[%d: %d/%d] train loss: %f accuracy: %f' %
              (epoch, i, num_batch, loss.item(),
               correct.item() / float(opt.batchSize)))

        # 每10次batch之后,进行一次测试
        if i % 10 == 0:
            j, data = next(enumerate(testdataloader, 0))
            points, target = data
            target = target[:, 0]
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            classifier = classifier.eval()  # 测试模式,固定住BN和dropout
            pred, _, _ = classifier(points)
            loss = F.nll_loss(pred, target)
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()
            print('[%d: %d/%d] %s loss: %f accuracy: %f' %
                  (epoch, i, num_batch, blue('test'), loss.item(),
                   correct.item() / float(opt.batchSize)))

    torch.save(classifier.state_dict(),
               '%s/cls_model_%d.pth' % (opt.outf, epoch))

total_correct = 0
total_testset = 0
for i, data in tqdm(enumerate(testdataloader, 0)):
    points, target = data
    end_time = time.time()
    print("classification~ cost:{:.1f}ms ".format(
        (end_time - start_time) * 1000))
    print('-------------------------------------')


def listener():
    # In ROS, nodes are uniquely named. If two nodes with the same
    # name are launched, the previous one is kicked off. The
    # anonymous=True flag means that rospy will choose a unique
    # name for our 'listener' node so that multiple listeners can
    # run simultaneously.
    rospy.init_node('obj_classify', anonymous=True)
    rospy.Subscriber("fused_lidar", pc2, callback)
    global pub
    pub = rospy.Publisher("classification", pc2, queue_size=20)
    # spin() simply keeps python from exiting until this node is stopped
    rospy.spin()


if __name__ == '__main__':
    # 网络加载
    classifier = PointNetCls(k=num_classes)  # 训练模型为16类
    classifier.cuda()
    classifier.load_state_dict(torch.load(path_model))
    classifier.eval()  # 测试模式

    pub = None
    listener()
Exemplo n.º 6
0
def our_main():
    from utils.show3d_balls import showpoints
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--batchSize', type=int, default=32, help='input batch size')
    parser.add_argument(
        '--num_points', type=int, default=2000, help='input batch size')
    parser.add_argument(
        '--workers', type=int, help='number of data loading workers', default=4)
    parser.add_argument(
        '--nepoch', type=int, default=250, help='number of epochs to train for')
    parser.add_argument('--outf', type=str, default='cls', help='output folder')
    parser.add_argument('--model', type=str, default='', help='model path')
    parser.add_argument('--dataset', type=str, required=True, help="dataset path")
    parser.add_argument('--dataset_type', type=str, default='shapenet', help="dataset type shapenet|modelnet40")
    parser.add_argument('--feature_transform', action='store_true', help="use feature transform")

    opt = parser.parse_args()
    print(opt)

    blue = lambda x: '\033[94m' + x + '\033[0m'

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    if opt.dataset_type == 'shapenet':
        dataset = ShapeNetDataset(
            root=opt.dataset,
            classification=True,
            npoints=opt.num_points)

        test_dataset = ShapeNetDataset(
            root=opt.dataset,
            classification=True,
            split='test',
            npoints=opt.num_points,
            data_augmentation=False)
    elif opt.dataset_type == 'modelnet40':
        dataset = ModelNetDataset(
            root=opt.dataset,
            npoints=opt.num_points,
            split='trainval')

        test_dataset = ModelNetDataset(
            root=opt.dataset,
            split='test',
            npoints=opt.num_points,
            data_augmentation=False)
    else:
        exit('wrong dataset type')


    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batchSize,
        shuffle=True,
        num_workers=int(opt.workers))

    testdataloader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=opt.batchSize,
            shuffle=True,
            num_workers=int(opt.workers))

    print(len(dataset), len(test_dataset))
    num_classes = len(dataset.classes)
    print('classes', num_classes)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    classifier = PointNetCls(k=num_classes, feature_transform=opt.feature_transform)

    if opt.model != '':
        classifier.load_state_dict(torch.load(opt.model))


    optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
    classifier.cuda()

    num_batch = len(dataset) / opt.batchSize

    ## python train_classification.py --dataset ../dataset --nepoch=4 --dataset_type  shapenet
    for epoch in range(opt.nepoch):
        scheduler.step()
        for i, data in enumerate(dataloader, 0):
            points, target = data
            target = target[:, 0]
            showpoints(points[0].numpy())
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            classifier = classifier.train()
            pred, trans, trans_feat = classifier(points)
            loss = F.nll_loss(pred, target)
            if opt.feature_transform:
                loss += feature_transform_regularizer(trans_feat) * 0.001
            loss.backward()
            optimizer.step()
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()
            print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), correct.item() / float(opt.batchSize)))

            if i % 10 == 0:
                j, data = next(enumerate(testdataloader, 0))
                points, target = data
                target = target[:, 0]
                points = points.transpose(2, 1)
                points, target = points.cuda(), target.cuda()
                classifier = classifier.eval()
                pred, _, _ = classifier(points)
                loss = F.nll_loss(pred, target)
                pred_choice = pred.data.max(1)[1]
                correct = pred_choice.eq(target.data).cpu().sum()
                print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch, blue('test'), loss.item(), correct.item()/float(opt.batchSize)))

        torch.save(classifier.state_dict(), '%s/cls_model_%d.pth' % (opt.outf, epoch))

    total_correct = 0
    total_testset = 0
    for i,data in tqdm(enumerate(testdataloader, 0)):
        points, target = data
        target = target[:, 0]
        points = points.transpose(2, 1)
        points, target = points.cuda(), target.cuda()
        classifier = classifier.eval()
        pred, _, _ = classifier(points)
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.data).cpu().sum()
        total_correct += correct.item()
        total_testset += points.size()[0]

    print("final accuracy {}".format(total_correct / float(total_testset)))
Exemplo n.º 7
0
        if opt.feature_transform:
            loss += feature_transform_regularizer(trans_feat) * 0.001
        loss.backward()                                 # 小批量的损失对模型参数求梯度
        optimizer.step()                                # 梯度下降优化 以batch为单位, 通过调用optim实例的step函数来迭代模型参数, w, b
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.data).cpu().sum().item()
        # n += target.shape[0]
        print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), correct / float(opt.batchSize)))

        if i % 10 == 0:         # 每10个batch执行一次,即为每10*batchSize个点云执行一次验证
            j, data = next(enumerate(testdataloader, 0))
            points, target = data
            target = target[:, 0]
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()   # 复制到cuda上进行计算
            classifier = classifier.eval()                  # 模型设置为评估模式
            pred, _, _ = classifier(points)
            # print(pred.shape)                             # torch.Size([32, 16])          输入有32个点云模型, 输出就有32*16大小的矩阵, 32行代表32个点云模型, 每行16个值代表可能属于16个类别的分数
            loss = F.nll_loss(pred, target)
            pred_choice = pred.data.max(1)[1]               # 按行寻找最大值的位置
            correct = pred_choice.eq(target.data).cpu().sum().item()
            print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch, blue('test'), loss.item(), correct/float(opt.batchSize)))
    # print('n = ', n)
    torch.save(classifier.state_dict(), '%s/cls_model_%d.pth' % (opt.outf, epoch))      # 每个epoch都保存一个模型


total_correct = 0
total_testset = 0
for i,data in tqdm(enumerate(testdataloader, 0)):           # enumerate在字典上是枚举、列举的意思, enumerate参数为可遍历/可迭代的对象(如列表、字符串), 后面的0是索引从0开始
    points, target = data
    target = target[:, 0]