Esempio n. 1
0
def test_time():
    from draw_util import output_points, get_class_colors
    from sklearn.cluster import KMeans
    colors = get_class_colors()

    train_list, test_list = get_block_train_test_split()
    random.shuffle(train_list)
    train_list = ['data/S3DIS/room_block_10_10/' + fn for fn in train_list]
    # filename=train_list[0]
    filename = 'data/S3DIS/room_block_10_10/49_Area_1_office_8.pkl'
    points, labels = read_room_pkl(filename)  # [n,6],[n,1]
    for i in xrange(10):
        t = time.time()
        xyzs, rgbs, covars, lbls = sample_block(points,
                                                labels,
                                                0.1,
                                                3.0,
                                                1.5,
                                                min_pn=2048,
                                                use_rescale=True,
                                                use_flip=True,
                                                use_rotate=True)
        print 'sample block cost {} s '.format(time.time() - t)

    t = time.time()
    xyzs, rgbs, covars, lbls, nidxs, nidxs_lens, nidxs_bgs, cidxs=\
        normalize_block(xyzs,rgbs,covars,lbls,0.2,True,0.8,1.0,True,2.5)
    print 'normalize cost {} s '.format(time.time() - t)
Esempio n. 2
0
def test_normalize():
    from draw_util import output_points, get_class_colors
    from sklearn.cluster import KMeans
    colors = get_class_colors()

    train_list, test_list = get_block_train_test_split()
    random.shuffle(train_list)
    train_list = ['data/S3DIS/room_block_10_10/' + fn for fn in train_list]
    # filename=train_list[0]
    filename = 'data/S3DIS/room_block_10_10/49_Area_1_office_8.pkl'
    points, labels = read_room_pkl(filename)  # [n,6],[n,1]
    begin = time.time()
    xyzs, rgbs, covars, lbls = sample_block(points,
                                            labels,
                                            SAMPLE_STRIDE,
                                            BLOCK_SIZE,
                                            BLOCK_STRIDE,
                                            min_pn=2048,
                                            use_rescale=True,
                                            use_flip=True,
                                            use_rotate=True)
    print 'sample_block cost {} s'.format(time.time() - begin)

    # for j in xrange(len(xyzs)):
    #     print np.min(xyzs[j],axis=0),np.max(xyzs[j],axis=0)
    #     print np.min(rgbs[j],axis=0),np.max(rgbs[j],axis=0)
    #     print xyzs[j].shape,lbls[j].shape
    # output_points('test_result/label_init{}.txt'.format(j),xyzs[j],colors[lbls[j].flatten(),:])
    # output_points('test_result/lrgbs_init{}.txt'.format(j),xyzs[j],rgbs[j])

    xyzs, rgbs, covars, lbls, nidxs, nidxs_lens, nidxs_bgs, cidxs=\
        normalize_block(xyzs,rgbs,covars,lbls,0.2,True,0.8,1.0,True,2.5)

    for j in xrange(len(xyzs)):
        print xyzs[j].shape, rgbs[j].shape, covars[j].shape, lbls[
            j].shape, nidxs[j].shape, nidxs_lens[j].shape, nidxs_bgs[
                j].shape, cidxs[j].shape
        print np.min(xyzs[j], axis=0), np.max(xyzs[j], axis=0)
        print np.min(rgbs[j], axis=0), np.max(rgbs[j], axis=0)
        print 'avg nn size: {}'.format(len(nidxs[j]) / float(len(xyzs[j])))
        # print xyzs[j].shape,lbls[j].shape
        output_points('test_result/label{}.txt'.format(j), xyzs[j],
                      colors[lbls[j].flatten(), :])
        output_points('test_result/lrgbs{}.txt'.format(j), xyzs[j],
                      np.asarray(rgbs[j] * 128 + 127, np.int32))

    for j in xrange(len(xyzs[0])):
        output_points(
            'test_result/nn{}.txt'.format(j),
            xyzs[0][nidxs[0][nidxs_bgs[0][j]:nidxs_bgs[0][j] +
                             nidxs_lens[0][j]], :])
def visual_room():
    train_list, test_list = get_block_train_test_split()
    train_list += test_list
    file_list = [fn for fn in train_list if fn.split('_')[-2] == 'office']
    from draw_util import get_class_colors, output_points
    colors = get_class_colors()
    for fn in file_list:
        xyzs, rgbs, covars, labels, block_mins = read_pkl(
            'data/S3DIS/office_block/' + fn)
        for k in xrange(len(xyzs)):
            xyzs[k] += block_mins[k]
        xyzs = np.concatenate(xyzs, axis=0)
        labels = np.concatenate(labels, axis=0)

        output_points('test_result/{}.txt'.format(fn), xyzs, colors[labels])
Esempio n. 4
0
def test_sample():
    from draw_util import output_points, get_class_colors
    from sklearn.cluster import KMeans
    colors = get_class_colors()

    train_list, test_list = get_block_train_test_split()
    random.shuffle(train_list)
    train_list = ['data/S3DIS/room_block_10_10/' + fn for fn in train_list]
    filename = train_list[0]
    # filename='data/S3DIS/room_block_10_10/58_Area_2_auditorium_2.pkl'
    points, labels = read_room_pkl(filename)  # [n,6],[n,1]
    print np.min(points, axis=0)
    begin = time.time()
    xyzs, rgbs, covars, lbls = sample_block(points,
                                            labels,
                                            0.075,
                                            1.5,
                                            1.5,
                                            min_pn=2048 / 2)
    #use_rescale=True,use_flip=True,use_rotate=True)
    print 'sample_block cost {} s'.format(time.time() - begin)

    print np.min(np.concatenate(xyzs, axis=0), axis=0)
    kc = np.random.randint(0, 255, [5, 3])
    for j in xrange(len(xyzs)):
        # print xyzs[j].shape,lbls[j].shape
        output_points('test_result/label{}.txt'.format(j), xyzs[j],
                      colors[lbls[j].flatten(), :])
        output_points('test_result/lrgbs{}.txt'.format(j), xyzs[j], rgbs[j])

    kmeans = KMeans(5)
    preds = kmeans.fit_predict(np.concatenate(covars, axis=0))
    output_points('test_result/kmeans.txt', np.concatenate(xyzs, axis=0),
                  kc[preds.flatten(), :])

    pt_num = [len(xyz) for xyz in xyzs]
    print 'avg pt num: {}'.format(np.mean(pt_num))
def test_one_epoch(ops,
                   pls,
                   sess,
                   saver,
                   testset,
                   epoch_num,
                   feed_dict,
                   summary_writer=None):
    begin_time = time.time()
    test_loss = []
    all_preds, all_labels = [], []
    colors = get_class_colors()
    weights = get_class_loss_weights()
    for i, feed_in in enumerate(testset):
        xyzs, rgbs, covars, lbls, nidxs, nidxs_lens, nidxs_bgs, cidxs, block_bgs, block_lens=\
            default_unpack_feats_labels(feed_in,FLAGS.num_gpus)

        for k in xrange(FLAGS.num_gpus):
            feed_dict[pls['xyzs'][k]] = xyzs[k]
            feed_dict[pls['rgbs'][k]] = rgbs[k]
            feed_dict[pls['covars'][k]] = covars[k]
            feed_dict[pls['lbls'][k]] = lbls[k]
            feed_dict[pls['nidxs'][k]] = nidxs[k]
            feed_dict[pls['nidxs_lens'][k]] = nidxs_lens[k]
            feed_dict[pls['nidxs_bgs'][k]] = nidxs_bgs[k]
            feed_dict[pls['cidxs'][k]] = cidxs[k]
            all_labels.append(lbls[k])
            if FLAGS.weighted_loss:
                feed_dict[pls['weights'][k]] = weights[lbls[k]]

        feed_dict[pls['is_training']] = False

        if FLAGS.eval and FLAGS.num_monitor:
            loss, preds, summary = sess.run(
                [ops['total_loss'], ops['preds'], ops['summary']], feed_dict)
            summary_writer.add_summary(summary)
        else:
            loss, preds = sess.run([ops['total_loss'], ops['preds']],
                                   feed_dict)
        test_loss.append(loss)
        all_preds.append(preds)

        # output labels and true
        if FLAGS.eval and FLAGS.eval_output:
            cur = 0
            for k in xrange(FLAGS.num_gpus):
                restore_xyzs = xyzs[k]
                restore_xyzs[:, :2] = restore_xyzs[:, :2] * 1.5 + 1.5
                restore_xyzs[:, 2] += 1.0
                restore_xyzs[:, 2] *= block_lens[k][2] / 2
                restore_xyzs += block_bgs[k]
                output_points('test_result/{}_{}_true.txt'.format(i, k),
                              restore_xyzs, colors[lbls[k], :])
                output_points('test_result/{}_{}_pred.txt'.format(i, k),
                              restore_xyzs,
                              colors[preds[cur:cur + len(xyzs[k])], :])
                cur += len(xyzs[k])

        if FLAGS.eval and FLAGS.num_monitor and i >= 2:
            break

    all_preds = np.concatenate(all_preds, axis=0)
    all_labels = np.concatenate(all_labels, axis=0)

    test_loss = np.mean(np.asarray(test_loss))

    iou, miou, oiou, acc, macc, oacc = compute_iou(all_labels, all_preds)

    log_str(
        'mean iou {:.5} overall iou {:5} loss {:5} \n mean acc {:5} overall acc {:5} cost {:3} s'
        .format(miou, oiou, test_loss, macc, oacc,
                time.time() - begin_time), FLAGS.log_file)

    if not FLAGS.eval:
        checkpoint_path = os.path.join(FLAGS.save_dir,
                                       'model{}.ckpt'.format(epoch_num))
        saver.save(sess, checkpoint_path)
    else:
        names = get_class_names()
        for i in xrange(len(names)):
            print '{} iou {} acc {}'.format(names[i], iou[i], acc[i])
        qn = qxyzs.shape[0]
        rn = 1000000
        qrn = qn / rn
        if qn % rn != 0: qrn += 1
        # print 'qrn {} sxyzs num {}'.format(qrn,sxyzs.shape[0])
        qprobs = []
        for t in xrange(qrn):
            beg_idxs = t * rn
            end_idxs = min((t + 1) * rn, qn)
            qrprobs = interpolate(sxyzs, sprobs, qxyzs[beg_idxs:end_idxs])
            qprobs.append(qrprobs)

        qprobs = np.concatenate(qprobs, axis=0)
        qpreds = np.argmax(qprobs, axis=1)

        colors = get_class_colors()
        spreds = np.argmax(sprobs, axis=1)

        fp, tp, fn = acc_val(labels.flatten(), qpreds.flatten(), fp, tp, fn)

        # if fi<=5:
        #     idxs=libPointUtil.gridDownsampleGPU(sxyzs,0.01,False)
        #     sxyzs=sxyzs[idxs]
        #     spreds=spreds[idxs]
        #     slbls=slbls[idxs]
        #     output_points('test_result/{}spreds.txt'.format(fi),sxyzs,colors[spreds,:])
        #     output_points('test_result/{}slabel.txt'.format(fi),sxyzs,colors[slbls,:])
        #
        #     idxs=libPointUtil.gridDownsampleGPU(qxyzs,0.01,False)
        #     qxyzs=qxyzs[idxs]
        #     qpreds=qpreds[idxs]
def test_one_epoch(ops,
                   pls,
                   sess,
                   saver,
                   testset,
                   epoch_num,
                   feed_dict,
                   summary_writer=None):
    begin_time = time.time()
    test_loss = []
    all_preds, all_labels = [], []
    colors = get_class_colors()
    # incorrect_rate=[]
    for i, feed_in in enumerate(testset):
        _, batch_labels, block_mins = fill_feed_dict(feed_in, feed_dict, pls,
                                                     FLAGS.num_gpus)

        feed_dict[pls['is_training']] = False
        all_labels += batch_labels

        loss, preds = sess.run([ops['total_loss'], ops['preds']], feed_dict)
        test_loss.append(loss)
        all_preds.append(preds)

        # cur_labels=np.concatenate(batch_labels,axis=0)
        # cur_preds=preds
        # cur_rate=np.sum(cur_labels != cur_preds) / float(len(cur_preds))
        # incorrect_rate.append(cur_rate)

        # output labels and true
        if (FLAGS.eval and FLAGS.eval_output):
            cur = 0
            for k in xrange(FLAGS.num_gpus):
                xyzs = feed_dict[pls['cxyzs'][k][0]]
                lbls = feed_dict[pls['lbls'][k]]
                xyzs += block_mins[k]
                output_points('test_result/{}_{}_true.txt'.format(i, k), xyzs,
                              colors[lbls, :])
                output_points('test_result/{}_{}_pred.txt'.format(i, k), xyzs,
                              colors[preds[cur:cur + len(xyzs)], :])
                cur += len(xyzs)

    all_preds = np.concatenate(all_preds, axis=0)
    all_labels = np.concatenate(all_labels, axis=0)

    test_loss = np.mean(np.asarray(test_loss))

    iou, miou, oiou, acc, macc, oacc = compute_iou(all_labels, all_preds)

    # print np.histogram(incorrect_rate,30)

    log_str(
        'mean iou {:.5} overall iou {:5} loss {:5} \n mean acc {:5} overall acc {:5} cost {:3} s'
        .format(miou, oiou, test_loss, macc, oacc,
                time.time() - begin_time), FLAGS.log_file)

    if not FLAGS.eval:
        checkpoint_path = os.path.join(FLAGS.save_dir,
                                       'model{}.ckpt'.format(epoch_num))
        saver.save(sess, checkpoint_path)
    else:
        names = get_class_names()
        for i in xrange(len(names)):
            print '{} iou {} acc {}'.format(names[i], iou[i], acc[i])