示例#1
0
def pred_one(sess, ops, pointcloud_data):
    is_training = False
    num_votes = FLAGS.num_votes

    pred_val_sum = np.zeros((1, NUM_CLASSES))

    for vote_idx in range(num_votes):

        rotation = vote_idx / float(num_votes) * np.pi * 2
        rotated_data = provider.rotate_point_cloud_by_angle(
            pointcloud_data, rotation)

        feed_dict = {
            ops['pointclouds_pl']: rotated_data,
            ops['is_training_pl']: is_training
        }

        pred_val = sess.run([ops['pred']], feed_dict=feed_dict)[0]
        pred_val_sum += pred_val
        idx = np.argmax(pred_val)

        print("Predicted shape as: '{}' with rotation: {}".format(
            SHAPE_NAMES[idx], np.degrees(rotation)))

    final_idx = np.argmax(pred_val_sum)
    print("Final prediction:", SHAPE_NAMES[final_idx])

    if VISUALIZE:
        from show3d_balls import showpoints

        showpoints(pointcloud_data[0])
 def show_result(self, pointcloud, label, gt_label=None):
     if gt_label is None:
         gt_label = np.zeros(self.num_points,dtype=int)
     
     else:
         gt_label = gt_label.astype(int)
     
     cmap = plt.cm.get_cmap("hsv", 5)
     cmap = np.array([cmap(i) for i in range(5)])[:,:3]        
     gt = cmap[gt_label, :]
     pred = cmap[label, :]
     ps = pointcloud[:,0:3]
     show3d_balls.showpoints(ps, gt, pred, ballradius=3)
示例#3
0
def test(gripper_dir=None, gripper_name="robotiq_3f"):
    in_gripper_file_list = [
        line for line in os.listdir(gripper_dir)
        if line.startswith(gripper_name)
    ]
    in_gripper_list = []
    for idx, env_i in enumerate(in_gripper_file_list):
        env_dir = os.path.join(gripper_dir, env_i)
        obj_pcs = np.load(env_dir)
        in_gripper_list.append(obj_pcs)

    in_gripper = np.array(in_gripper_list)
    gt_gripper = in_gripper
    out_gripper, gripper_feat = sess.run([out_gripper_tf, gripper_feat_tf],
                                         feed_dict={
                                             in_gripper_tf: in_gripper,
                                             gt_gripper_tf: gt_gripper
                                         })

    print(gripper_feat.shape)
    gripper_mean = np.mean(gripper_feat, axis=0)
    gripper_max = np.max(gripper_feat, axis=0)
    gripper_min = np.min(gripper_feat, axis=0)
    print(gripper_mean.shape)

    recon_dir = gripper_dir
    mean_feat_file = os.path.join(recon_dir, 'mean.npy')
    max_feat_file = os.path.join(recon_dir, 'max.npy')
    min_feat_file = os.path.join(recon_dir, 'min.npy')

    print(mean_feat_file)
    print(max_feat_file)
    print(min_feat_file)
    np.save(mean_feat_file, gripper_mean)
    np.save(max_feat_file, gripper_max)
    np.save(min_feat_file, gripper_min)

    if 1:
        for gj in range(len(in_gripper)):
            green = np.zeros((4096, 3))
            green[:2048, 0] = 255.0
            green[2048:, 1] = 255.0
            pred_gripper = np.copy(out_gripper[gj])

            gt__gripper = np.copy(gt_gripper[gj])
            gripper_two = np.zeros((4096, 3))
            gripper_two[:2048, :] = pred_gripper
            gripper_two[2048:, :] = gt__gripper
            showpoints(gripper_two, c_gt=green, waittime=50,
                       freezerot=False)  ### GRB
示例#4
0
def viz_pcl(ballradius=3):
    '''
    Visualize the input image, GT and predicted point cloud
    '''
    for idx in range(n_plots):
        img_name, img_id = models[idx][0].split('_')

        # Load the gt and pred point clouds
        gt_pcl = np.load(join(pcl_data_dir, img_name,
                'pcl_1024_fps_trimesh.npy'))
        pcl = np.load(names[idx])[:,:3]
        pcl = remove_outliers(pcl)

        # Load and display input image
        ip_img = sc.imread(join(data_dir,
            img_name,'render_%s.png'%(img_id)))

        # RGB to BGR for cv2 display
        ip_img = np.flip(ip_img[:,:,:3], -1)
        cv2.imshow('', ip_img)

        show3d_balls.showpoints(gt_pcl, ballradius=ballradius)
        show3d_balls.showpoints(pcl, ballradius=ballradius)
        saveBool = show3d_balls.showtwopoints(gt_pcl, pcl, ballradius=ballradius)
示例#5
0
print("model %d/%d" % (idx, len(d)))  #选取model,例如0/704
point, seg = d[idx]
print(point.size(), seg.size())  #输出点云点数,seg
point_np = point.numpy()

cmap = plt.cm.get_cmap("hsv", 10)  #上色
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg.numpy() - 1, :]

state_dict = torch.load(opt.model)  #加载模型
classifier = PointNetDenseCls(k=state_dict['conv4.weight'].size()[0])
classifier.load_state_dict(state_dict)
classifier.eval()  #固定

point = point.transpose(1, 0).contiguous()  #仿射变换
point = Variable(point.view(
    1,
    point.size()[0],
    point.size()[1]))  #point.size()[0]为3,point.size()[1]为2500,view相当于resize
pred, _, _ = classifier(point)  #预测
pred_choice = pred.data.max(2)[
    1]  #pred_choice为每个点的分类,例如输出为tensor([[1, 0, 0, ..., 0, 0, 2]])
print(pred_choice)

#print(pred_choice.size()) #torch.size[1,2500]
pred_color = cmap[pred_choice.numpy()[0], :]  #根据类别为每个点上不同颜色

#print(pred_color.shape)#torch.size[3,2500]
showpoints(point_np, gt, pred_color)  #调用show3d_ball程序显示3d图像
            if self.return_cls_label:
                return point_set, normal, seg, cls
            else:
                return point_set, normal, seg
        
    def __len__(self):
        return len(self.datapath)


if __name__ == '__main__':
    d = PartNormalDataset(root = '../data/shapenetcore_partanno_segmentation_benchmark_v0_normal', split='trainval', npoints=3000)
    print(len(d))

    i = 500
    ps, normal, seg = d[i]
    print d.datapath[i]
    print np.max(seg), np.min(seg)
    print(ps.shape, seg.shape, normal.shape)
    print ps
    print normal
    
    sys.path.append('../utils')
    import show3d_balls
    show3d_balls.showpoints(ps, normal+1, ballradius=8)

    d = PartNormalDataset(root = '../data/shapenetcore_partanno_segmentation_benchmark_v0_normal', classification = True)
    print(len(d))
    ps, normal, cls = d[0]
    print(ps.shape, type(ps), cls.shape,type(cls))

示例#7
0
import numpy as np
from show3d_balls import showpoints

data = np.load(
    "/data/lixin_backup/3DDLComparison/data/modelnet30/pc_train.npy")
label = np.load(
    "/data/lixin_backup/3DDLComparison/data/modelnet30/label_train.npy")

for i in range(len(data)):
    if label[i] == 0:
        showpoints(data[i])
示例#8
0
sys.path.append('/home/s/pointnet.pytorch')

# points=np.loadtxt('./point_test.pts')
points=np.loadtxt(''
                  'point_test.pts', dtype=np.float32)  #预测只能输入float32的格式的数据

print(points.shape)

# 可视化
cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
# gt = cmap[seg.numpy() - 1, :]

# 可视化点云
showpoints(points)

#采样到2500个点
choice = np.random.choice(len(points), 2500, replace=True)
# print('choice:{}'.format(choice))
points = points[choice, :]
print('points[choice, :]:{}'.format(points))
point_np=points



# 载入模型
state_dict = torch.load('seg/seg_model_Chair_1.pth')
classifier = PointNetDenseCls(k= state_dict['conv4.weight'].size()[0])
classifier.load_state_dict(state_dict)
classifier.eval()  #设置为评估状态
示例#9
0
            _gt_scaled, _pred_scaled = sess.run([gt_pcl_scaled, pred_pcl_scaled], 
                feed_dict={gt_pcl:batch_gt, pred_pcl:_pred_pcl})
            C,F,B,E = sess.run([chamfer_distance, dists_forward, 
                dists_backward, emd], 
                feed_dict={gt_pcl_scaled:_gt_scaled, 
                    pred_pcl_scaled:_pred_scaled})

            # visualize
            if args.visualize:
                # Rotate point clouds to align axes
                pr = rotate(_pred_scaled,-90,-90).eval()
                gt = rotate(_gt_scaled,-90,-90).eval()
                for b in xrange(BATCH_SIZE):
                    print 'Model:{} C:{:.6f} F:{:.6f} B:{:.6f} E:{:.6f}'.format(fids[b],C[b],F[b],B[b],E[b])
                    cv2.imshow('', batch_ip[b])
                    show3d_balls.showpoints(pr[b], ballradius=3)
                    show3d_balls.showpoints(gt[b], ballradius=3)
                    saveBool = show3d_balls.showtwopoints(gt[b], pr[b],
                            ballradius=args.ballradius)

            # screenshots and gifs
            elif args.save_screenshots or args.save_gifs:
                # Rotate point clouds to align axes
                pr = rotate(_pred_scaled,-90,-90).eval()
                gt = rotate(_gt_scaled,-90,-90).eval()
                for b in xrange(BATCH_SIZE):
                    save_screenshots(gt[b], pr[b], batch_ip[b], 
                           screenshot_dir, fids[b], args.eval_set, args)
                print 'done'

            # save metrics to csv
def fun(xyz1,xyz2,pts2):
    with tf.device('/cpu:0'):
        points = tf.constant(np.expand_dims(pts2,0))
        xyz1 = tf.constant(np.expand_dims(xyz1,0))
        xyz2 = tf.constant(np.expand_dims(xyz2,0))
        dist, idx = three_nn(xyz1, xyz2)
        #weight = tf.ones_like(dist)/3.0
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
        norm = tf.tile(norm, [1,1,3])
        print(norm)
        weight = (1.0/dist) / norm
        interpolated_points = three_interpolate(points, idx, weight)
    with tf.Session('') as sess:
        tmp,pts1,d,w = sess.run([xyz1, interpolated_points, dist, weight])
        #print w
        pts1 = pts1.squeeze()
    return pts1

pts1 = fun(xyz1,xyz2,pts2) 
all_pts = np.zeros((104,3))
all_pts[0:100,:] = pts1
all_pts[100:,:] = pts2
all_xyz = np.zeros((104,3))
all_xyz[0:100,:]=xyz1
all_xyz[100:,:]=xyz2
showpoints(xyz2, pts2, ballradius=8)
showpoints(xyz1, pts1, ballradius=8)
showpoints(all_xyz, all_pts, ballradius=8)
        point_set = point_set[choice, :]
        seg = seg[choice]
        if self.classification:
            return point_set, cls
        else:
            return point_set, seg

    def __len__(self):
        return len(self.datapath)


if __name__ == '__main__':
    d = PartDataset(root = os.path.join(BASE_DIR, 'data/shapenetcore_partanno_segmentation_benchmark_v0'), class_choice = ['Chair'], split='trainval')
    print(len(d))
    import time
    tic = time.time()
    i = 100
    ps, seg = d[i]
    print(np.max(seg), np.min(seg))
    print(time.time() - tic)
    print(ps.shape, type(ps), seg.shape,type(seg))
    sys.path.append('utils')
    import show3d_balls
    show3d_balls.showpoints(ps, ballradius=8)

    d = PartDataset(root = os.path.join(BASE_DIR, 'data/shapenetcore_partanno_segmentation_benchmark_v0'), classification = True)
    print(len(d))
    ps, cls = d[0]
    print(ps.shape, type(ps), cls.shape,type(cls))

示例#12
0
    data_augmentation=False)

idx = opt.idx

print("model %d/%d" % (idx, len(d)))
point, seg = d[idx]
print(point.size(), seg.size())
point_np = point.numpy()

cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg.numpy() - 1, :]

state_dict = torch.load(opt.model)
classifier = PointNetDenseCls(k= state_dict['conv4.weight'].size()[0])
classifier.load_state_dict(state_dict)
classifier.eval()

point = point.transpose(1, 0).contiguous()

point = Variable(point.view(1, point.size()[0], point.size()[1]))
pred, _, _ = classifier(point)
pred_choice = pred.data.max(2)[1]
print(pred_choice)

#print(pred_choice.size())
pred_color = cmap[pred_choice.numpy()[0], :]

#print(pred_color.shape)
showpoints(point_np, gt, pred_color)
def fun(xyz1,xyz2,pts2):
    with tf.device('/cpu:0'):
        points = tf.constant(np.expand_dims(pts2,0))
        xyz1 = tf.constant(np.expand_dims(xyz1,0))
        xyz2 = tf.constant(np.expand_dims(xyz2,0))
        dist, idx = three_nn(xyz1, xyz2)
        #weight = tf.ones_like(dist)/3.0
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True)
        norm = tf.tile(norm, [1,1,3])
        print norm
        weight = (1.0/dist) / norm
        interpolated_points = three_interpolate(points, idx, weight)
    with tf.Session('') as sess:
        tmp,pts1,d,w = sess.run([xyz1, interpolated_points, dist, weight])
        #print w
        pts1 = pts1.squeeze()
    return pts1

pts1 = fun(xyz1,xyz2,pts2) 
all_pts = np.zeros((104,3))
all_pts[0:100,:] = pts1
all_pts[100:,:] = pts2
all_xyz = np.zeros((104,3))
all_xyz[0:100,:]=xyz1
all_xyz[100:,:]=xyz2
showpoints(xyz2, pts2, ballradius=8)
showpoints(xyz1, pts1, ballradius=8)
showpoints(all_xyz, all_pts, ballradius=8)
                                       max_num,
                                       unit_density=unit_density,
                                       keep_prob=keep_prob,
                                       sparse=sparse)
            cloud.append(cloud_point)
            labels.append(l)
            print i
        pickle.dump(cloud, fp)
        pickle.dump(labels, fp)


if __name__ == '__main__':
    # visualize one scene
    point_cloud, batch_label = gen_scene(1.5,
                                         1.5,
                                         3.0,
                                         0.2,
                                         10,
                                         20,
                                         unit_density=100000,
                                         keep_prob=0.9,
                                         sparse=False)
    print "len(point_cloud)", len(point_cloud), batch_label
    c_gt = np.zeros((batch_label.shape[0], 3))
    color_list = np.asarray([[64, 224, 208], [220, 20, 60], [173, 255, 47]])
    for i in range(batch_label.shape[0]):
        c_gt[i, :] = color_list[int(batch_label[i]), :]
    showpoints(point_cloud, c_gt=c_gt, normalizecolor=False)
    # generate dataset
    # gen_data_set("prim_train_overlaps_20", 2000, 0.9, 1, 10, 20, 0.2, 1.5, 1.5, 3.0, unit_density = 100000, sparse=False)
    # gen_data_set("prim_test_overlaps_20", 500, 0.9, 1, 10, 20, 0.2, 1.5, 1.5, 3.0, unit_density = 100000, sparse=False)
示例#15
0
        else:
            precision = float(TP) / (TP + FP)
        recall = float(TP) / (TP + FN)

        #print "precision", precision
        #print "recall", recall
        total_prec = total_prec + precision
        total_recall = total_recall + recall

        #-------------------
        acc = correct / 4000.
        #print acc
        gt = cmap[seg, :]
        result = cmap[segp, :]
        ps_show = ps[:, 0:3]
        show3d_balls.showpoints(ps_show, gt, result, ballradius=3)
        total_acc += acc
        fakecolor = np.zeros((ps_show.shape[0], 3))
        fakecolor[:, 1] = 255
        predict_result = np.c_[ps_show, fakecolor]
        gt_result = np.copy(predict_result)
        predict_result[segp > 0.9, 3] = 0
        predict_result[segp > 0.9, 4] = 80
        predict_result[segp > 0.9, 5] = 255
        gt_result[seg > 0.9, 3] = 0
        gt_result[seg > 0.9, 4] = 80
        gt_result[seg > 0.9, 5] = 255

        input_ps = np.c_[ps_show, np.ones((ps_show.shape[0], 3))]
        input_ps[ps[:, 3] > 0, 3] = 255
        #export_ply(input_ps,"input.ply")
示例#16
0
                    chamfer_distance_scaled
                ], feed_dict)
                _pcl_gt, _pcl_out = sess.run([pcl_gt_scaled, pcl_out_scaled],
                                             feed_dict)
                pdb.set_trace()

                N_ERR += n_err
                fwd_dist += np.mean(fwd)
                bwd_dist += np.mean(bwd)
                chamfer_dist += np.mean(chamfer)

                if FLAGS.display:
                    cv2.imshow('img', ip_img)
                    _pcl_gt[0] = rotate(_pcl_gt[0], 90, 90)
                    _pcl_out[0] = rotate(_pcl_out[0], 90, 90)
                    show3d_balls.showpoints(_pcl_gt[0], ballradius=3)
                    show3d_balls.showpoints(_pcl_out[0], ballradius=3)
                    saveBool = show3d_balls.showtwopoints(_pcl_gt[0],
                                                          _pcl_out[0],
                                                          ballradius=3)
                    print 'Model:%s, Ch:%.5f, fwd:%.5f, bwd:%.5f' % (
                        model_name, chamfer, fwd, bwd)

                elif FLAGS.save_outputs:
                    gt_rot = rotate(_pcl_gt[0], 90)
                    pred_rot = rotate(_pcl_out[0], 90)
                    save_screenshots(gt_rot, pred_rot, ip_img, out_dir,
                                     model_name + '_' + model_id, mode)

            fwd_dist = (fwd_dist / cnt) * 1000
            bwd_dist = (bwd_dist / cnt) * 1000
示例#17
0
文件: test.py 项目: joosm/pointnet2
               'loss': loss}
        return sess, ops

def inference(sess, ops, pc, batch_size):
    ''' pc: BxNx3 array, return BxN pred '''
    assert pc.shape[0]%batch_size == 0
    num_batches = pc.shape[0]/batch_size
    logits = np.zeros((pc.shape[0], pc.shape[1], NUM_CLASSES))
    for i in range(num_batches):
        feed_dict = {ops['pointclouds_pl']: pc[i*batch_size:(i+1)*batch_size,...],
                     ops['is_training_pl']: False}
        batch_logits = sess.run(ops['pred'], feed_dict=feed_dict)
        logits[i*batch_size:(i+1)*batch_size,...] = batch_logits
    return np.argmax(logits, 2)

if __name__=='__main__':

    import matplotlib.pyplot as plt
    cmap = plt.cm.get_cmap("hsv", 4)
    cmap = np.array([cmap(i) for i in range(10)])[:,:3]

    for i in range(len(TEST_DATASET)):
        ps, seg = TEST_DATASET[i]
        sess, ops = get_model(batch_size=1, num_point=ps.shape[0])
        segp = inference(sess, ops, np.expand_dims(ps,0), batch_size=1) 
        segp = segp.squeeze()

        gt = cmap[seg, :]
        pred = cmap[segp, :]
        show3d_balls.showpoints(ps, gt, pred, ballradius=8)
示例#18
0
    def __len__(self):
        return len(self.datapath)


if __name__ == '__main__':
    d = PartNormalDataset(
        root='../data/shapenetcore_partanno_segmentation_benchmark_v0_normal',
        split='trainval',
        npoints=3000)
    print(len(d))

    i = 500
    ps, normal, seg = d[i]
    print(d.datapath[i])
    print(np.max(seg), np.min(seg))
    print(ps.shape, seg.shape, normal.shape)
    print(ps)
    print(normal)

    sys.path.append('../utils')
    import show3d_balls
    show3d_balls.showpoints(ps, normal + 1, ballradius=8)

    d = PartNormalDataset(
        root='../data/shapenetcore_partanno_segmentation_benchmark_v0_normal',
        classification=True)
    print(len(d))
    ps, normal, cls = d[0]
    print(ps.shape, type(ps), cls.shape, type(cls))
示例#19
0
        }
        batch_logits = sess.run(ops['pred'], feed_dict=feed_dict)
        logits[i * batch_size:(i + 1) * batch_size, ...] = batch_logits
    return logits


if __name__ == '__main__':

    num_group = FLAGS.num_group
    color_list = []
    for i in range(num_group):
        color_list.append(np.random.random((3, )))

    sess, ops = get_model(batch_size=1, num_point=NUM_POINT)
    indices = np.arange(len(TEST_DATASET))
    np.random.shuffle(indices)
    for i in range(len(TEST_DATASET)):
        ps, seg = TEST_DATASET[indices[i]]
        pred = inference(sess, ops, np.expand_dims(ps, 0), batch_size=1)
        pred = pred.squeeze()

        show3d_balls.showpoints(ps, ballradius=8)
        show3d_balls.showpoints(pred, ballradius=8)

        if num_group > 1:
            c_gt = np.zeros_like(pred)
            for i in range(num_group):
                c_gt[i * NUM_POINT / num_group:(i + 1) * NUM_POINT /
                     num_group, :] = color_list[i]
            show3d_balls.showpoints(pred, c_gt=c_gt, ballradius=8)
示例#20
0
            ops['pointclouds_pl']: batch_data,
            ops['is_training_pl']: False
        }
        batch_logits = sess.run(ops['pred'], feed_dict=feed_dict)
        logits[i * batch_size:(i + 1) * batch_size, ...] = batch_logits
    return np.argmax(logits, 2)


if __name__ == '__main__':

    import matplotlib.pyplot as plt
    cmap = plt.cm.get_cmap("hsv", 4)
    cmap = np.array([cmap(i) for i in range(10)])[:, :3]

    for i in range(len(TEST_DATASET)):
        _, file_pointcloud = TEST_DATASET.datapath[i]
        filename = os.path.basename(file_pointcloud)

        ps, _, seg, centroid, m = TEST_DATASET[i]
        sess, ops = get_model(batch_size=1, num_point=ps.shape[0])
        segp = inference(sess, ops, np.expand_dims(ps, 0), batch_size=1)
        gt = cmap[seg, :]
        pred = cmap[segp, :]

        original_pointcloud = part_dataset_all_normal.retrieve_original_pointcloud(
            ps, centroid, m)
        result = np.concatenate((original_pointcloud, segp.T), axis=1)
        np.savetxt('infer/' + filename, result, delimiter=',', fmt='%f')

        show3d_balls.showpoints(ps, gt, pred, ballradius=8)
            rsz = int(pred[i].shape[0]**0.5 + 0.5)
            cv2.imshow('x',
                       big(heatmap(pred[i].reshape((rsz, rsz, 3))[:, :, 0])))
            cv2.imshow('y',
                       big(heatmap(pred[i].reshape((rsz, rsz, 3))[:, :, 1])))
            cv2.imshow('z',
                       big(heatmap(pred[i].reshape((rsz, rsz, 3))[:, :, 2])))
            cv2.imshow('data', data[i])

            while True:
                cmd = show3d.showpoints(
                    showpoints,
                    c0=c0,
                    c1=c1,
                    c2=c2,
                    waittime=100,
                    magnifyBlue=(0 if colorflag == 1 else 0),
                    background=((128, 128, 128) if colorflag == 1 else
                                (0, 0, 0)),
                    ballradius=(2 if colorflag == 1 else 12)) % 256
                if cmd == ord('c'):
                    colorflag = 1 - colorflag
                    updatecolor()
                if cmd == ord('z'):
                    showz = 1 - showz
                    updatecolor()
                if cmd == ord(' '):
                    break
                if cmd == ord('q'):
                    break
            if cmd == ord('q'):
示例#22
0
def test(base, pc_real):
    nnn = 1
    gripper_size = nnn
    num_batch = 1
    for batch_id in range(int(num_batch)):
        in_gripper_feat_list = []
        in_objenv_list = []
        in_objnor_list = []
        gripper_id_list = []

        gripper_max_list = []
        gripper_mean_list = []
        gripper_min_list = []

        old_id_new_list = []

        gripper_index = np.array([11])
        #in_gripper_id = 2
        #input_gripper_index = np.array([5])
        #gripper_index = np.array([11])#np.random.choice(np.array([1,2,3,4,5,7,8,9,11,12,13]),FLAGS.batch_size,replace=True)
        rra = np.random.uniform(0, 1)

        for bbi in range(int(1)):
            rotation_degree_array = np.arange(0, 360, 30)
            rotation_degree = np.random.choice(rotation_degree_array, 1)
            rotmat = np.zeros((3, 3))
            rotmat[0, 0] = np.cos(rotation_degree)
            rotmat[0, 1] = -np.sin(rotation_degree)
            rotmat[1, 0] = np.sin(rotation_degree)
            rotmat[1, 1] = np.cos(rotation_degree)
            rotmat[2, 2] = 1
            translx, transly, translz = np.random.uniform(-0.05,
                                                          0.05,
                                                          size=(3, ))
            transl = np.array([translx, transly, translz / 5.0])

            objenv_list = []
            for ggi in range(gripper_size):
                gripper_id = gripper_index[ggi]
                bi = bbi * gripper_size + ggi
                gripper_id_list.append(gripper_id)
                gripper_id = str(gripper_id)

                if int(gripper_id) < 11:
                    #in_gripper_name = 'robotiq_2f'
                    #gripper_path_mean = os.path.join(GRIPPER_TOP_DIR,in_gripper_name,'mean.npy')
                    #gripper_path_max = os.path.join(GRIPPER_TOP_DIR,in_gripper_name,'max.npy')
                    #gripper_path_min = os.path.join(GRIPPER_TOP_DIR,in_gripper_name,'min.npy')

                    #gripper_path_mean = os.path.join(GRIPPER_TOP_DIR,'G'+str(in_gripper_id),'mean.npy')
                    #gripper_path_max = os.path.join(GRIPPER_TOP_DIR,'G'+str(in_gripper_id),'max.npy')
                    #gripper_path_min = os.path.join(GRIPPER_TOP_DIR,'G'+str(in_gripper_id),'min.npy')

                    gripper_path_mean = os.path.join(GRIPPER_TOP_DIR,
                                                     'G' + str(gripper_id),
                                                     'mean.npy')
                    gripper_path_max = os.path.join(GRIPPER_TOP_DIR,
                                                    'G' + str(gripper_id),
                                                    'max.npy')
                    gripper_path_min = os.path.join(GRIPPER_TOP_DIR,
                                                    'G' + str(gripper_id),
                                                    'min.npy')

                    gripper_feat_mean = np.load(gripper_path_mean)
                    gripper_feat_max = np.load(gripper_path_max)
                    gripper_feat_min = np.load(gripper_path_min)
                    gripper_feat = np.hstack([
                        gripper_feat_mean, gripper_feat_max, gripper_feat_min
                    ])[0]
                    in_gripper_feat_list.append(gripper_feat)
                else:
                    if 1:
                        gripper_name = 'None'
                        if int(gripper_id) == 12:
                            gripper_ends_with = '_par_bh282tmp_label_stage1.npy'
                            gripper_name = 'bh_282'
                        elif int(gripper_id) == 11:
                            gripper_ends_with = '_par_robotiq_3f_fullest_tmp_label_stage1.npy'
                            gripper_name = 'robotiq_3f'
                        elif int(gripper_id) == 13:
                            gripper_ends_with = '_par_kinova_3f_fullest_tmp_labelkinova_stage1.npy'
                            gripper_name = 'kinova_kg3'
                        gripper_path_mean = os.path.join(
                            GRIPPER_TOP_DIR, str(gripper_name), 'mean.npy')
                        gripper_path_max = os.path.join(
                            GRIPPER_TOP_DIR, str(gripper_name), 'max.npy')
                        gripper_path_min = os.path.join(
                            GRIPPER_TOP_DIR, str(gripper_name), 'min.npy')

                        gripper_feat_mean = np.load(gripper_path_mean)
                        gripper_feat_max = np.load(gripper_path_max)
                        gripper_feat_min = np.load(gripper_path_min)
                        gripper_feat = np.hstack([
                            gripper_feat_mean, gripper_feat_max,
                            gripper_feat_min
                        ])[0]
                        in_gripper_feat_list.append(gripper_feat)

                in_objenv_list.append(pc_real)

        in_gripper_feat = np.array(in_gripper_feat_list)
        in_objenv = np.array(in_objenv_list)
        print("in_objenv", in_objenv.shape)

        for _ in range(30):
            pred_label, out_single_point_top_1024_index, out_single_point_top_index = sess.run(
                [
                    pred_label_tf, out_single_point_top_1024_index_tf,
                    out_single_point_top_index_tf
                ],
                feed_dict={
                    gripper_feat_tf: in_gripper_feat,
                    obj_pc_tf: in_objenv
                })

            if 0:
                for gj in range(1):
                    s_p = np.copy(in_objenv[gj])
                    s_p[:, 2] *= -1.0
                    c_c = np.zeros((2048, 3))
                    c_c[:, 0] = 255.0  #194.0
                    pred_c = out_single_point_top_index[gj][0:120]
                    c_c[pred_c, 0] = 0.0  # Prediction Red
                    c_c[pred_c, 1] = 255.0
                    showpoints(s_p, c_gt=c_c, waittime=5,
                               freezerot=False)  ### GRB
                    #input("raw")

            # stage2
            if 1:
                out_single_point_top_1024_index_v2, out_two_points_top_index = sess.run(
                    [
                        out_single_point_top_1024_index_tf,
                        out_two_points_top_index_tf
                    ],
                    feed_dict={
                        gripper_feat_tf: in_gripper_feat,
                        obj_pc_tf: in_objenv
                    })

                assert np.all(out_single_point_top_1024_index_v2 ==
                              out_single_point_top_1024_index)

                for gj in range(1):
                    two_points_label = out_two_points_top_index[gj]

                    top_two_points_index_1 = two_points_label // TOP_K2
                    top_two_points_index_1 = out_single_point_top_index[gj][
                        top_two_points_index_1]
                    top_two_points_index_2 = out_single_point_top_1024_index[
                        gj][two_points_label % TOP_K2]

                    top1_two_points_index_1 = top_two_points_index_1[0:12]
                    top1_two_points_index_2 = top_two_points_index_2[0:12]

                    gtt = np.vstack([[top1_two_points_index_1],
                                     [top1_two_points_index_2]]).T[:50]

                    if 0:
                        s_p = np.copy(in_objenv[gj])
                        s_p[:, 2] *= -1.0
                        c_pred = np.zeros((2048, 3))
                        c_pred[:, 0] = 255.0  #255#194.0
                        c_pred[top1_two_points_index_1, 0] = 0.0
                        c_pred[top1_two_points_index_1, 1] = 255.0
                        c_pred[top1_two_points_index_1, 2] = 0.0
                        c_pred[top1_two_points_index_2, 0] = 0.0
                        c_pred[top1_two_points_index_2, 2] = 255.0
                        showpoints(s_p,
                                   c_gt=c_pred,
                                   waittime=20,
                                   ballradius=4,
                                   freezerot=False)  ## GRB
                        #input("raw")

            # stage3 oldr
            if 1:
                out_corr_top_index_stage3 = sess.run(
                    out_corr_top_index_stage3_tf,
                    feed_dict={
                        gripper_feat_tf: in_gripper_feat,
                        obj_pc_tf: in_objenv
                    })

                for gj in range(nnn):
                    third_point_set_label = out_corr_top_index_stage3[gj]

                    top_corr2_index = third_point_set_label // TOP_K2

                    two_points_label_ = out_two_points_top_index[gj]
                    top_two_points_index_1_ = two_points_label_ // TOP_K2
                    top_two_points_index_1_ = out_single_point_top_index[gj][
                        top_two_points_index_1_]
                    top_two_points_index_2_ = out_single_point_top_1024_index[
                        gj][two_points_label_ % TOP_K2]

                    top_f1_index_1 = top_two_points_index_1_[top_corr2_index]
                    top_f2_index_2 = top_two_points_index_2_[top_corr2_index]
                    top_f3_index_3 = out_single_point_top_1024_index[gj][
                        third_point_set_label % TOP_K2]

                    top1_f1_index_1 = top_f1_index_1[0:12]
                    top1_f2_index_2 = top_f2_index_2[0:12]
                    top1_f3_index_3 = top_f3_index_3[0:12]

                    gggt = np.vstack([[top1_f1_index_1], [top1_f2_index_2],
                                      [top1_f3_index_3]]).T

                    #if 1:
                    s_p = np.copy(in_objenv[0])
                    s_p[:, 2] *= -1.0
                    c_pred = np.zeros((2048, 3))
                    c_pred[:, 0] = 255.0  #255#194.0
                    c_pred[:, 1] = 255  #194.0
                    c_pred[:, 2] = 255.0  #214.0
                    c_pred[top1_f1_index_1, 0] = 0.0
                    c_pred[top1_f1_index_1, 1] = 255.0
                    c_pred[top1_f1_index_1, 2] = 0.0
                    c_pred[top1_f2_index_2, 0] = 255.0
                    c_pred[top1_f2_index_2, 1] = 0.0
                    c_pred[top1_f2_index_2, 2] = 0.0
                    c_pred[top1_f3_index_3, 0] = 0.0
                    c_pred[top1_f3_index_3, 1] = 0.0
                    c_pred[top1_f3_index_3, 2] = 255.0
                    showpoints(s_p,
                               c_gt=c_pred,
                               waittime=5,
                               ballradius=4,
                               freezerot=False)  ## GRB
示例#23
0
# print(point.shape)
point = point.transpose(1, 0).contiguous()
point = Variable(point.view(1, point.size()[0], point.size()[1]))
print(point.shape)

if torch.cuda.is_available():
    print("cuda!!")
    classifier.cuda()
    point, cls, seg = point.cuda(), cls.cuda(), seg.cuda()

print(cls)
cls_pred, seg_pred, _ = classifier(point, to_categorical(cls, num_classes))
seg_pred = seg_pred.contiguous().view(-1, num_part)
# print(seg_pred.shape)
print(cls_pred)
print(cls_pred.shape)
print(cls_pred.max(0)[1])
print(cls_pred.max(1)[1])
pred_choice = seg_pred.data.max(1)[1]
print(max(pred_choice), min(pred_choice))
correct = pred_choice.eq(seg.data).cpu().sum()
print("coreect", correct)
print("acc", correct.item() / 2500)
print(pred_choice.shape)
dif = (pred_choice == seg).cpu().numpy().astype(int)
print(dif)
dif_color = cmap[dif, :]
# pred_color = cmap[pred_choice.cpu().numpy(),:]

showpoints(point_np, dif_color)