コード例 #1
0
ファイル: data_util.py プロジェクト: liuyuan-pal/PCDL
def test_compute_covars():
    import time
    import Points2Voxel
    import sys
    sys.path.append('..')
    from s3dis.draw_util import output_points

    points, nidxs, labels=read_modelnet_v2('../data/ModelNetTrain/nidxs/ply_data_train2.h5')
    begin=time.time()
    # points=exchange_dims_zy(points)
    # points=rotate(points)

    nidxs=np.ascontiguousarray(nidxs,dtype=np.int32)
    points=np.ascontiguousarray(points,dtype=np.float32)

    # covars_np=compute_covar(points,nidxs)
    # covars_np/=np.sqrt(np.sum(covars_np**2,axis=2,keepdims=True))
    # print np.sum(covars_np)
    # print covars_np[0,0,:]

    covars=Points2Voxel.ComputeCovars(points,nidxs,16,0)
    # print np.sum(covars)
    # print covars[0,0,:]
    print 'cost {} s'.format(time.time()-begin,)#np.mean(np.abs(covars-covars_np)))
    # print np.sqrt(np.sum(covars_np**2,axis=2))
    # print np.sqrt(np.sum(covars**2,axis=2))

    from sklearn.cluster import KMeans
    kmeans=KMeans(5)
    preds=kmeans.fit_predict(covars[1])
    colors=np.random.randint(0,255,[5,3])
    output_points('cluster.txt',points[1],colors[preds,:])
コード例 #2
0
ファイル: data_util.py プロジェクト: liuyuan-pal/PCDL
def test_nidxs():
    import sys
    sys.path.append('..')
    from s3dis.draw_util import output_points
    points, nidxs, label=read_modelnet_v2('ply_data_train0.h5')
    k=np.random.randint(0,points.shape[0])
    t=0
    for pt,idx in zip(points[k],nidxs[k]):
        colors=np.random.randint(0,255,3)
        colors=np.repeat(colors[None,:],[17])
        pts=np.concatenate([pt[None,:],points[k][idx]],axis=0)
        output_points('test{}.txt'.format(t),pts,colors)
        t+=1
コード例 #3
0
def test_voxel_filling_color_net():
    import numpy as np
    from s3dis.draw_util import output_points
    voxel_num = 64000
    batch_size = 32
    points_pl = tf.placeholder(tf.float32, [batch_size, 4096, 15], 'points')
    true_state_pl = tf.placeholder(tf.float32, [batch_size, voxel_num],
                                   'voxel_true_state')
    true_color_pl = tf.placeholder(tf.float32, [batch_size, voxel_num, 3],
                                   'voxel_true_color')

    feats, _ = vanilla_pointnet_encoder(points_pl)
    voxel_state, voxel_color = fc_voxel_decoder(feats, voxel_num, True)

    filling_loss = voxel_filling_loss(voxel_state, true_state_pl)
    color_loss = voxel_color_loss(voxel_color, true_color_pl)
    loss = filling_loss + color_loss

    opt = tf.train.AdamOptimizer(1e-3)
    minimize_op = opt.minimize(loss)

    points = np.random.uniform(-1, 1, [batch_size, 4096, 3])
    true_state = np.random.uniform(0, 1, [batch_size, voxel_num])
    true_state = np.asarray(true_state > 0.9, np.float32)
    true_state = np.asarray(true_state, np.float32)

    true_color = np.random.uniform(-1, 1, [batch_size, voxel_num, 3])

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = False
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())

    output_points('true.txt', voxel2points(true_color[0]))
    begin = time.time()
    for i in range(100000):
        _, loss_val, pred_state, pred_color = sess.run(
            [minimize_op, loss, voxel_state, voxel_color], {
                points_pl: points,
                true_state_pl: true_state,
                true_color_pl: true_color
            })

        if i % 100 == 0:
            output_points('pred{}.txt'.format(i), voxel2points(pred_color[0]))
            print 'step {} loss val {} | {} examples/s'.format(
                i, loss_val, 100 * batch_size / (time.time() - begin))
コード例 #4
0
def test_one_epoch(ops, pls, sess, saver, testset, epoch_num, feed_dict):
    total = 0
    begin_time = time.time()
    test_loss = []
    left_size = FLAGS.dump_num
    for i, feed_in in enumerate(testset):
        points_list, covars_list = unpack_feats_labels(feed_in, FLAGS.num_gpus)

        feed_dict[pls['points']] = points_list[:, :, :3]
        feed_dict[pls['covars']] = covars_list
        total += points_list.shape[0]

        loss, gen_pts = sess.run([ops['total_loss'], ops['gen_pts']],
                                 feed_dict)
        test_loss.append(loss / FLAGS.num_gpus)

        # output generated points
        for _ in range(3):
            if left_size > 0 and random.random() < 0.8:
                idx = np.random.randint(0, points_list.shape[0], dtype=np.int)
                # colors=np.asarray(points_list[idx,:,3:]*128+128,dtype=np.int)
                # fn=os.path.join(FLAGS.dump_dir,'{}_{}_true.txt'.format(epoch_num,left_size))
                # output_points(fn,points_list[idx,:,:3],colors)
                fn = os.path.join(
                    FLAGS.dump_dir,
                    '{}_{}_true.txt'.format(epoch_num, left_size))
                output_points(fn, points_list[idx, :, :3])

                # colors=np.asarray(gen_pts[idx,:,3:]*128+128,dtype=np.int)
                # colors[colors>255]=255
                # colors[colors<0]=0
                # fn=os.path.join(FLAGS.dump_dir,'{}_{}_recon.txt'.format(epoch_num,left_size))
                # output_points(fn,gen_pts[idx,:,:3],colors)
                fn = os.path.join(
                    FLAGS.dump_dir,
                    '{}_{}_recon.txt'.format(epoch_num, left_size))
                output_points(fn, gen_pts[idx, :, :3])
                left_size -= 1

    test_loss = np.mean(np.asarray(test_loss))
    log_str(
        'epoch {} test_loss {} cost {} s'.format(epoch_num, test_loss,
                                                 time.time() - begin_time),
        FLAGS.log_file)

    checkpoint_path = os.path.join(FLAGS.save_dir,
                                   'unsupervise{}.ckpt'.format(epoch_num))
    saver.save(sess, checkpoint_path)
コード例 #5
0
def test_random_rotate_sample_block():
    train_list,test_list=get_block_train_test_split()
    import random
    random.shuffle(train_list)
    for fn in train_list[:1]:
        points,labels=read_room_pkl('../data/S3DIS/room_block_10_10/{}'.format(fn))

        labels=np.asarray(labels,dtype=np.int32)
        points=np.ascontiguousarray(points,dtype=np.float32)
        points[:,:2]-=np.min(points[:,:2],axis=0,keepdims=True)
        max_coor=np.max(points[:,:2],axis=0)
        maxx,maxy=max_coor[0],max_coor[1]
        begin = time.time()
        block_points_list,block_labels_list=PointsUtil.UniformSampleBlock(points,labels,1.0,5.0,0.8,10,maxx,maxy)

        print 'cost {} s'.format(time.time()-begin)
        for i,pts in enumerate(block_points_list):
            output_points('test/{}.txt'.format(i),pts)
コード例 #6
0
def output_gen_points(points_list, voxel_state_list, voxel_color_list,
                      gen_state, gen_color, left_size, epoch_num):
    idx = np.random.randint(0, points_list.shape[0], dtype=np.int)

    pts = points_list[idx, :, :]
    pts[:, :2] += 0.5
    pts[:, 3:] += 1.0
    pts[:, 3:] *= 127
    fn = os.path.join(FLAGS.dump_dir,
                      '{}_{}_points.txt'.format(epoch_num, left_size))
    output_points(fn, pts)

    true_state_pts = voxel2points(voxel_state_list[idx])
    fn = os.path.join(FLAGS.dump_dir,
                      '{}_{}_state_true.txt'.format(epoch_num, left_size))
    output_points(fn, true_state_pts)

    gen_state[idx][gen_state[idx] < 0.0] = 0.0
    gen_state[idx][gen_state[idx] > 1.0] = 1.0
    pred_state_pts = voxel2points(gen_state[idx])
    fn = os.path.join(FLAGS.dump_dir,
                      '{}_{}_state_pred.txt'.format(epoch_num, left_size))
    output_points(fn, pred_state_pts)

    true_color_pts = voxel2points(voxel_color_list[idx])
    fn = os.path.join(FLAGS.dump_dir,
                      '{}_{}_color_true.txt'.format(epoch_num, left_size))
    output_points(fn, true_color_pts)

    gen_color[idx][gen_color[idx] < 0.0] = 0.0
    gen_color[idx][gen_color[idx] > 1.0] = 1.0
    pred_color_pts = voxel2points(gen_color[idx])
    pred_color_pts = pred_color_pts[pred_state_pts[:, 3] > 127, :]
    fn = os.path.join(FLAGS.dump_dir,
                      '{}_{}_color_pred.txt'.format(epoch_num, left_size))
    output_points(fn, pred_color_pts)
コード例 #7
0
def output_gen_points(pts, voxels, gen_state, file_idx, epoch_num, dump_dir):
    if pts is not None:
        pts[:, :] += 1.0
        pts[:, :] /= 2.0
        fn = os.path.join(dump_dir,
                          '{}_{}_points.txt'.format(epoch_num, file_idx))
        output_points(fn, pts)

    if voxels is not None:
        true_state_pts = voxel2points(voxels)
        fn = os.path.join(dump_dir,
                          '{}_{}_state_true.txt'.format(epoch_num, file_idx))
        output_points(fn, true_state_pts)

    if gen_state is not None:
        gen_state[gen_state < 0.0] = 0.0
        gen_state[gen_state > 1.0] = 1.0
        pred_state_pts = voxel2points(gen_state)
        fn = os.path.join(dump_dir,
                          '{}_{}_state_pred.txt'.format(epoch_num, file_idx))
        output_points(fn, pred_state_pts)
コード例 #8
0
def test_data_iter():
    from provider import ProviderV3
    from s3dis.draw_util import output_points
    from s3dis.data_util import get_class_colors
    train_list, test_list = get_block_train_test_split()
    import random
    random.shuffle(train_list)

    train_list = [
        'data/S3DIS/room_block_10_10/' + fn for fn in train_list[:20]
    ]
    test_list = ['data/S3DIS/room_block_10_10/' + fn for fn in test_list]
    train_provider = ProviderV3(train_list, 'train', 4, read_fn)
    try:
        max_label = 0
        begin = time.time()
        i = 0
        colors = get_class_colors()
        count = 0
        for data in train_provider:
            data2 = unpack_feats_labels(data, 4)
            # i+=1
            print data2[0][0].shape
            print data2[1][0].shape
            print data2[2][0].shape
            print data2[3][0].shape
            print data2[4][0].shape
            # print '/////////////'
            # count+=1
            # print data[0][0].shape,data[1][0].shape
            points = data[0][0]
            labels = data[1][0]
            # print np.min(points,axis=0)
            # print np.max(points,axis=0)
            # max_label=max(np.max(labels),max_label)
            nidxs = data2[2][0]
            nidxs_lens = data2[3][0]
            nidxs_bgs = data2[4][0]
            output_points('test_result/class{}.txt'.format(i), points,
                          colors[labels[:, 0], :])
            for npti in xrange(len(nidxs_lens)):
                output_points(
                    'test_result/nidxs{}.txt'.format(npti),
                    points[nidxs[nidxs_bgs[npti]:nidxs_bgs[npti] +
                                 nidxs_lens[npti]]])
            # points[:,3:6]*=128
            # points[:,3:6]+=128
            # points[:,3:6][points[:,3:6]>255]=255
            # points[:,3:6][points[:,3:6]<0]=0
            #
            # output_points('test_result/color{}.txt'.format(i),points)
            # output_points('test_result/room{}.txt'.format(i),points[:,6:],points[:,3:6])
            if i >= 1:
                break
            i += 1
            # pass

        print 'cost {} s'.format(time.time() - begin)

        print count
    finally:
        train_provider.close()
コード例 #9
0
        maxx,maxy=max_coor[0],max_coor[1]
        begin = time.time()
        block_points_list,block_labels_list=PointsUtil.UniformSampleBlock(points,labels,1.0,5.0,0.8,10,maxx,maxy)

        print 'cost {} s'.format(time.time()-begin)
        for i,pts in enumerate(block_points_list):
            output_points('test/{}.txt'.format(i),pts)

if __name__=="__main__":
    train_list,test_list=get_block_train_test_split()
    import random
    random.shuffle(train_list)
    for fn in train_list[:1]:
        points,labels=read_room_pkl('../data/S3DIS/room_block_10_10/'+fn)
        points=np.ascontiguousarray(points,dtype=np.float32)
        labels=np.ascontiguousarray(labels,dtype=np.int32)
        points[:,:3]-=np.min(points[:,:3],axis=0,keepdims=True)
        output_points('original.txt', points)
        begin = time.time()
        points,labels=PointsUtil.GridDownSample(points,labels,0.05)
        print 'cost {} s'.format(time.time()-begin)

        colors=get_class_colors()

        output_points('downsample.txt', points)





コード例 #10
0
    train_epoch = 50000
    split_num = 30

    output_epoch = 1000
    log_epoch = 30
    dump_dir = 'unsupervise/modelnet_voxel_experiment'

    sess, pls, ops = network(split_num)

    points, nidxs, labels = read_modelnet_v2(
        'data/ModelNetTrain/nidxs/ply_data_train0.h5')
    points = points[:train_num]
    nidxs = nidxs[:train_num]
    labels = labels[:train_num]

    output_points(dump_dir + '/points.txt', points[0])

    for i in range(train_epoch):
        rot_points = rotate(points)
        # rot_points=np.copy(points)
        voxels = points2voxel_gpu_modelnet(rot_points, split_num, 0)
        covars = points2covars_gpu(rot_points, nidxs, 16, 0)

        feed_dict = {}
        feed_dict[pls['points']] = rot_points
        feed_dict[pls['covars']] = covars
        feed_dict[pls['states']] = voxels
        feed_dict[pls['is_training']] = True

        _, loss, gen_state, lr = sess.run(
            [ops['train'], ops['loss'], ops['gen_state'], ops['lr']],
コード例 #11
0
def test_one_epoch(ops, pls, sess, saver, testset, epoch_num, feed_dict):
    total = 0
    begin_time = time.time()
    test_loss = []
    left_size = FLAGS.dump_num
    for i, feed_in in enumerate(testset):
        points_list, covars_list, voxel_state_list, voxel_color_list=\
            unpack_feats_labels(feed_in,FLAGS.num_gpus)

        feed_dict[pls['points']] = points_list
        feed_dict[pls['covars']] = covars_list
        feed_dict[pls['voxel_state']] = voxel_state_list
        feed_dict[pls['voxel_color']] = voxel_color_list
        total += points_list.shape[0]

        loss, gen_state, gen_color = sess.run(
            [ops['total_loss'], ops['voxel_state'], ops['voxel_color']],
            feed_dict)
        test_loss.append(loss / FLAGS.num_gpus)

        # output generated voxels
        for i in range(3):
            if left_size > 0 and random.random() < 0.9:
                idx = np.random.randint(0, points_list.shape[0], dtype=np.int)

                pts = points_list[idx, :, :]
                pts[:, :2] += 0.5
                pts[:, 3:] += 1.0
                pts[:, 3:] *= 127
                fn = os.path.join(
                    FLAGS.dump_dir,
                    '{}_{}_points.txt'.format(epoch_num, left_size))
                output_points(fn, pts)

                true_state_pts = voxel2points(voxel_state_list[idx])
                fn = os.path.join(
                    FLAGS.dump_dir,
                    '{}_{}_state_true.txt'.format(epoch_num, left_size))
                output_points(fn, true_state_pts)

                gen_state[idx][gen_state[idx] < 0.0] = 0.0
                gen_state[idx][gen_state[idx] > 1.0] = 1.0
                pred_state_pts = voxel2points(gen_state[idx])
                fn = os.path.join(
                    FLAGS.dump_dir,
                    '{}_{}_state_pred.txt'.format(epoch_num, left_size))
                output_points(fn, pred_state_pts)

                true_color_pts = voxel2points(voxel_color_list[idx])
                fn = os.path.join(
                    FLAGS.dump_dir,
                    '{}_{}_color_true.txt'.format(epoch_num, left_size))
                output_points(fn, true_color_pts)

                gen_color[idx][gen_color[idx] < 0.0] = 0.0
                gen_color[idx][gen_color[idx] > 1.0] = 1.0
                pred_color_pts = voxel2points(gen_color[idx])
                fn = os.path.join(
                    FLAGS.dump_dir,
                    '{}_{}_color_pred.txt'.format(epoch_num, left_size))
                output_points(fn, pred_color_pts)

                left_size -= 1

    test_loss = np.mean(np.asarray(test_loss))
    log_str(
        'epoch {} test_loss {} cost {} s'.format(epoch_num, test_loss,
                                                 time.time() - begin_time),
        FLAGS.log_file)

    checkpoint_path = os.path.join(FLAGS.save_dir,
                                   'unsupervise{}.ckpt'.format(epoch_num))
    saver.save(sess, checkpoint_path)
コード例 #12
0
ファイル: point_evaluate.py プロジェクト: liuyuan-pal/PCDL
    what = saver.restore(sess, model_path)

    path = 'data/S3DIS/point/fpfh/'
    print 'miou aiou macc oacc'
    colors = get_class_colors()
    all_labels = []
    all_preds = []
    for fn in test_list:
        feats, labels = read_points_feats(path + fn + '.h5')
        logit_vals = sess.run(logits,
                              feed_dict={
                                  pls['feats']: feats,
                                  pls['is_training']: False
                              })
        preds = np.argmax(logit_vals, axis=1)
        output_points(fn + '_true.txt', feats[:, :3], colors[labels, :])
        output_points(fn + '_pred.txt', feats[:, :3], colors[preds, :])

        all_labels.append(labels)
        all_preds.append(preds)

    all_labels = np.concatenate(all_labels, axis=0)
    all_preds = np.concatenate(all_preds, axis=0)

    get_class_names()

    iou, miou, oiou, acc, macc, oacc = compute_iou(all_labels, all_preds)
    print 'miou {} oiou {} macc {} oacc {}'.format(miou, oiou, macc, oacc)
    for i, name in enumerate(get_class_names()):
        print '{} iou: {}'.format(name, iou[i])
コード例 #13
0
def test_one_epoch(ops,pls,sess,saver,testset,epoch_num,feed_dict):
    total=0
    begin_time=time.time()
    test_loss=[]
    all_preds,all_labels=[],[]
    all_error_models,all_error_preds,all_error_gts=[],[],[]
    for i,feed_in in enumerate(testset):
        points_list, covars_list, labels_list= unpack_feats_labels(feed_in,FLAGS.num_gpus)

        feed_dict[pls['points']]=points_list
        feed_dict[pls['covars']]=covars_list
        feed_dict[pls['labels']]=labels_list[:,0]
        feed_dict[pls['is_training']]=False

        total+=points_list.shape[0]

        loss,preds=sess.run([ops['total_loss'],ops['preds']],feed_dict)
        test_loss.append(loss)

        preds=preds.flatten()
        labels_list=labels_list.flatten()
        all_preds.append(preds)
        all_labels.append(labels_list)

        mask=preds!=labels_list
        all_error_models.append(points_list[mask])  # n,k,3
        all_error_preds.append(preds[mask])         # n
        all_error_gts.append(labels_list[mask])     # n

    all_preds=np.concatenate(all_preds,axis=0)
    all_labels=np.concatenate(all_labels,axis=0)

    test_loss=np.mean(np.asarray(test_loss))

    acc,macc,oacc=compute_acc(all_labels,all_preds,FLAGS.num_classes)

    if not FLAGS.eval:
        log_str('mean acc {:.5} overall acc {:.5} loss {:.5} cost {:.3} s'.format(
            macc, oacc, test_loss, time.time()-begin_time
        ),FLAGS.log_file)
        checkpoint_path = os.path.join(FLAGS.save_dir, 'unsupervise{}.ckpt'.format(epoch_num))
        saver.save(sess,checkpoint_path)
    else:

        print 'mean acc {:.5} overall acc {:.5} loss {:.5} cost {:.3} s'.format(
            macc, oacc, test_loss, time.time()-begin_time
        )
        names=get_classes_name()
        for name,accuracy in zip(names,acc):
            print '{} : {}'.format(name,accuracy)

        if FLAGS.confusion_matrix:
            from s3dis.draw_util import plot_confusion_matrix
            plot_confusion_matrix(all_preds,all_labels,names,save_path=FLAGS.confusion_matrix_path)

        if FLAGS.output_error_models:
            from s3dis.draw_util import output_points
            all_error_models=np.concatenate(all_error_models,axis=0)
            all_error_preds=np.concatenate(all_error_preds,axis=0)
            all_error_gts=np.concatenate(all_error_gts,axis=0)
            error_num=all_error_gts.shape[0]
            assert np.sum(all_labels!=all_preds)==error_num
            for k in xrange(error_num):
                output_points(FLAGS.output_error_path+'{}_{}_{}.txt'.format(
                    names[all_error_gts[k]],names[all_error_preds[k]],k),all_error_models[k])