Example #1
0
def test_read_semantic_dataset():
    from provider import Provider, default_unpack_feats_labels
    train_list, test_list = get_semantic3d_block_train_list()
    # print train_list
    # exit(0)
    train_list = [
        'data/Semantic3D.Net/block/sampled/merged/' + fn for fn in train_list
    ]
    test_list = [
        'data/Semantic3D.Net/block/sampled/merged/' + fn for fn in test_list
    ]
    read_fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train', 4, read_fn)
    test_provider = Provider(test_list, 'test', 4, read_fn)

    try:
        begin = time.time()
        i = 0
        for data in train_provider:
            i += 1
            cxyzs, rgbs, covars, lbls, = default_unpack_feats_labels(data, 4)
            for k in xrange(4):
                print len(cxyzs[k])

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        train_provider.close()
        test_provider.close()
Example #2
0
def fill_feed_dict(feed_in, feed_dict, pls, num_gpus):
    cxyzs, dxyzs, covars, lbls, vlens, vlens_bgs, vcidxs, cidxs, nidxs, nidxs_bgs, nidxs_lens, block_mins = \
        default_unpack_feats_labels(feed_in, num_gpus)

    batch_pt_num = 0
    batch_labels = []
    for k in xrange(num_gpus):
        for t in xrange(3):
            feed_dict[pls['cxyzs'][k][t]] = cxyzs[k][t]
            feed_dict[pls['nidxs'][k][t]] = nidxs[k][t]
            feed_dict[pls['nidxs_lens'][k][t]] = nidxs_lens[k][t]
            feed_dict[pls['nidxs_bgs'][k][t]] = nidxs_bgs[k][t]
            feed_dict[pls['cidxs'][k][t]] = cidxs[k][t]

        feed_dict[pls['covars'][k]] = covars[k]
        feed_dict[pls['lbls'][k]] = lbls[k]
        feed_dict[pls['weights'][k]] = lbl_weights[lbls[k]]

        for t in xrange(2):
            feed_dict[pls['dxyzs'][k][t]] = dxyzs[k][t]
            feed_dict[pls['vlens'][k][t]] = vlens[k][t]
            feed_dict[pls['vlens_bgs'][k][t]] = vlens_bgs[k][t]
            feed_dict[pls['vcidxs'][k][t]] = vcidxs[k][t]

        batch_pt_num += lbls[k].shape[0]
        batch_labels.append(lbls[k])

    return batch_pt_num, batch_labels, block_mins
def train_one_epoch(ops, pls, sess, summary_writer, trainset, epoch_num,
                    feed_dict):
    epoch_begin = time.time()
    total_correct, total_block, total_points = 0, 0, 0
    begin_time = time.time()
    total_losses = []
    weights = get_class_loss_weights()
    for i, feed_in in enumerate(trainset):
        xyzs, rgbs, covars, lbls, nidxs, nidxs_lens, nidxs_bgs, cidxs, block_bgs, block_lens=\
            default_unpack_feats_labels(feed_in,FLAGS.num_gpus)

        for k in xrange(FLAGS.num_gpus):
            feed_dict[pls['xyzs'][k]] = xyzs[k]
            feed_dict[pls['rgbs'][k]] = rgbs[k]
            feed_dict[pls['covars'][k]] = covars[k]
            feed_dict[pls['lbls'][k]] = lbls[k]
            feed_dict[pls['nidxs'][k]] = nidxs[k]
            feed_dict[pls['nidxs_lens'][k]] = nidxs_lens[k]
            feed_dict[pls['nidxs_bgs'][k]] = nidxs_bgs[k]
            feed_dict[pls['cidxs'][k]] = cidxs[k]

            total_points += lbls[k].shape[0]
            if FLAGS.weighted_loss:
                feed_dict[pls['weights'][k]] = weights[lbls[k]]

        feed_dict[pls['is_training']] = True
        total_block += FLAGS.num_gpus

        _, loss_val, correct_num = sess.run(
            [ops['apply_grad'], ops['total_loss'], ops['correct_num']],
            feed_dict)
        total_losses.append(loss_val)
        total_correct += correct_num

        if i % FLAGS.log_step == 0:
            summary, global_step = sess.run(
                [ops['summary'], ops['global_step']], feed_dict)

            log_str(
                'epoch {} step {} loss {:.5} acc {:.5} | {:.5} examples/s'.
                format(epoch_num, i, np.mean(np.asarray(total_losses)),
                       float(total_correct) / total_points,
                       float(total_block) / (time.time() - begin_time)),
                FLAGS.log_file)

            summary_writer.add_summary(summary, global_step)
            total_correct, total_block, total_points = 0, 0, 0
            begin_time = time.time()
            total_losses = []

    log_str('epoch {} cost {} s'.format(epoch_num,
                                        time.time() - epoch_begin),
            FLAGS.log_file)
def fill_feed_dict(feed_in,feed_dict,pls,num_gpus):
    cxyzs, covars, lbls, block_mins = default_unpack_feats_labels(feed_in, num_gpus)
    # 0 2 3 11
    batch_pt_num=0
    batch_labels=[]
    for k in xrange(num_gpus):
        feed_dict[pls['xyzs'][k]]=cxyzs[k][0]
        feed_dict[pls['feats'][k]]=covars[k]
        feed_dict[pls['lbls'][k]]=lbls[k]

        batch_pt_num += lbls[k].shape[0]
        batch_labels.append(lbls[k])

    return batch_pt_num,batch_labels,block_mins
def fill_feed_dict(feed_in,feed_dict,pls,num_gpus):
    cxyzs, rgbs, covars, lbls, block_mins = default_unpack_feats_labels(feed_in, num_gpus)
    batch_pt_num=0
    batch_labels=[]
    for k in xrange(num_gpus):
        feed_dict[pls['xyzs'][k]]=cxyzs[k]
        # feats=np.concatenate([rgbs[k],covars[k]],axis=1)
        # feed_dict[pls['feats'][k]]=feats
        feed_dict[pls['feats'][k]]=rgbs[k]
        feed_dict[pls['lbls'][k]]=lbls[k]

        batch_pt_num += lbls[k].shape[0]
        batch_labels.append(lbls[k])

    return batch_pt_num,batch_labels,block_mins
def fill_feed_dict(feed_in, feed_dict, pls, num_gpus):
    cur_data = default_unpack_feats_labels(feed_in, num_gpus)
    if len(cur_data) == 4: xyzs, rgbs, covars, lbls = cur_data
    else: xyzs, rgbs, covars, lbls, _ = cur_data

    batch_pt_num = 0
    for k in xrange(num_gpus):
        feed_dict[pls['xyzs'][k]] = xyzs[k]
        feed_dict[pls['feats'][k]] = rgbs[k]
        # feats=np.concatenate([rgbs[k],covars[k]],axis=1)
        # feed_dict[pls['feats'][k]]=feats
        feed_dict[pls['lbls'][k]] = lbls[k]

        batch_pt_num += lbls[k].shape[0]

    return batch_pt_num
def fill_feed_dict(feed_in, feed_dict, pls, num_gpus):
    xyzs, rgbs, covars, lbls, ctx_xyzs, ctx_idxs, block_mins = \
        default_unpack_feats_labels(feed_in, num_gpus)

    batch_pt_num = 0
    for k in xrange(num_gpus):
        feed_dict[pls['xyzs'][k]] = xyzs[k]
        feats = np.concatenate([rgbs[k], covars[k]], axis=1)
        feed_dict[pls['feats'][k]] = feats
        feed_dict[pls['lbls'][k]] = lbls[k]
        feed_dict[pls['lbls'][k]] = lbls[k]
        feed_dict[pls['ctx_pts'][k]] = ctx_xyzs[k]
        feed_dict[pls['ctx_idxs'][k]] = ctx_idxs[k]

        batch_pt_num += lbls[k].shape[0]

    return batch_pt_num
Example #8
0
def test_read_s3dis_dataset():
    from provider import Provider, default_unpack_feats_labels
    train_list, test_list = get_block_train_test_split()
    train_list = ['data/S3DIS/sampled_train/' + fn for fn in train_list]
    test_list = ['data/S3DIS/sampled_test/' + fn for fn in test_list]
    train_list += test_list

    def fn(model, filename):
        data = read_pkl(filename)
        return data[0], data[2], data[3], data[4], data[12]

    train_provider = Provider(train_list, 'train', 4, fn)
    test_provider = Provider(test_list, 'test', 4, fn)

    try:
        begin = time.time()
        i = 0
        for data in train_provider:
            i += 1
            cxyzs, rgbs, covars, lbls, block_mins = default_unpack_feats_labels(
                data, 4)
            for k in xrange(4):
                min_xyz = np.min(cxyzs[k][0], axis=0)
                # print min_xyz
                eps = 1e-5
                min_val = np.asarray([-1.5, -1.5, 0.0]) - eps
                val = np.asarray(np.floor(min_xyz - min_val), np.int32)
                print val
                assert val[0] >= 0
                assert val[1] >= 0
                assert val[2] >= 0

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        train_provider.close()
        test_provider.close()
def test_one_epoch(ops,
                   pls,
                   sess,
                   saver,
                   testset,
                   epoch_num,
                   feed_dict,
                   summary_writer=None):
    begin_time = time.time()
    test_loss = []
    all_preds, all_labels = [], []
    colors = get_class_colors()
    weights = get_class_loss_weights()
    for i, feed_in in enumerate(testset):
        xyzs, rgbs, covars, lbls, nidxs, nidxs_lens, nidxs_bgs, cidxs, block_bgs, block_lens=\
            default_unpack_feats_labels(feed_in,FLAGS.num_gpus)

        for k in xrange(FLAGS.num_gpus):
            feed_dict[pls['xyzs'][k]] = xyzs[k]
            feed_dict[pls['rgbs'][k]] = rgbs[k]
            feed_dict[pls['covars'][k]] = covars[k]
            feed_dict[pls['lbls'][k]] = lbls[k]
            feed_dict[pls['nidxs'][k]] = nidxs[k]
            feed_dict[pls['nidxs_lens'][k]] = nidxs_lens[k]
            feed_dict[pls['nidxs_bgs'][k]] = nidxs_bgs[k]
            feed_dict[pls['cidxs'][k]] = cidxs[k]
            all_labels.append(lbls[k])
            if FLAGS.weighted_loss:
                feed_dict[pls['weights'][k]] = weights[lbls[k]]

        feed_dict[pls['is_training']] = False

        if FLAGS.eval and FLAGS.num_monitor:
            loss, preds, summary = sess.run(
                [ops['total_loss'], ops['preds'], ops['summary']], feed_dict)
            summary_writer.add_summary(summary)
        else:
            loss, preds = sess.run([ops['total_loss'], ops['preds']],
                                   feed_dict)
        test_loss.append(loss)
        all_preds.append(preds)

        # output labels and true
        if FLAGS.eval and FLAGS.eval_output:
            cur = 0
            for k in xrange(FLAGS.num_gpus):
                restore_xyzs = xyzs[k]
                restore_xyzs[:, :2] = restore_xyzs[:, :2] * 1.5 + 1.5
                restore_xyzs[:, 2] += 1.0
                restore_xyzs[:, 2] *= block_lens[k][2] / 2
                restore_xyzs += block_bgs[k]
                output_points('test_result/{}_{}_true.txt'.format(i, k),
                              restore_xyzs, colors[lbls[k], :])
                output_points('test_result/{}_{}_pred.txt'.format(i, k),
                              restore_xyzs,
                              colors[preds[cur:cur + len(xyzs[k])], :])
                cur += len(xyzs[k])

        if FLAGS.eval and FLAGS.num_monitor and i >= 2:
            break

    all_preds = np.concatenate(all_preds, axis=0)
    all_labels = np.concatenate(all_labels, axis=0)

    test_loss = np.mean(np.asarray(test_loss))

    iou, miou, oiou, acc, macc, oacc = compute_iou(all_labels, all_preds)

    log_str(
        'mean iou {:.5} overall iou {:5} loss {:5} \n mean acc {:5} overall acc {:5} cost {:3} s'
        .format(miou, oiou, test_loss, macc, oacc,
                time.time() - begin_time), FLAGS.log_file)

    if not FLAGS.eval:
        checkpoint_path = os.path.join(FLAGS.save_dir,
                                       'model{}.ckpt'.format(epoch_num))
        saver.save(sess, checkpoint_path)
    else:
        names = get_class_names()
        for i in xrange(len(names)):
            print '{} iou {} acc {}'.format(names[i], iou[i], acc[i])
Example #10
0
def test_scannet():
    from provider import Provider, default_unpack_feats_labels
    with open('cached/scannet_train_filenames.txt', 'r') as f:
        train_list = [line.strip('\n') for line in f.readlines()]
    train_list = [
        'data/ScanNet/sampled_train/{}'.format(fn) for fn in train_list
    ]
    test_list = [
        'data/ScanNet/sampled_test/test_{}.pkl'.format(i) for i in xrange(312)
    ]
    read_fn = lambda model, filename: read_pkl(filename)

    train_provider = Provider(train_list, 'train', 4, read_fn)
    test_provider = Provider(test_list, 'test', 4, read_fn)

    try:
        begin = time.time()
        i = 0
        class_count = np.zeros(21)
        for data in train_provider:
            i += 1
            cxyzs, dxyzs, covars, lbls, vlens, vlens_bgs, vcidxs, cidxs, nidxs, nidxs_bgs, nidxs_lens, block_mins = \
                default_unpack_feats_labels(data, 4)
            for t in xrange(4):
                cur_count, _ = np.histogram(lbls[t], np.arange(22))
                class_count += cur_count
            if i % 500 == 0:
                print i
            # for k in xrange(4):
            #     for t in xrange(3):
            #         print 'batch {} data {} lvl {} cxyz min {} max {} ptnum {}'.format(i,k,t,np.min(cxyzs[k][t],axis=0),
            #                                                                            np.max(cxyzs[k][t],axis=0),
            #                                                                            cxyzs[k][t].shape[0])
            #         assert cidxs[k][t].shape[0]==nidxs[k][t].shape[0]
            #         assert nidxs_bgs[k][t].shape[0]==cxyzs[k][t].shape[0]
            #         assert nidxs_lens[k][t].shape[0]==cxyzs[k][t].shape[0]
            #         assert np.sum(nidxs_lens[k][t])==nidxs[k][t].shape[0]
            #         assert nidxs_bgs[k][t][-1]+nidxs_lens[k][t][-1]==nidxs[k][t].shape[0]
            #         assert np.max(cidxs[k][t])==cxyzs[k][t].shape[0]-1
            #         print 'lvl {} avg nsize {}'.format(t,cidxs[k][t].shape[0]/float(cxyzs[k][t].shape[0]))
            #
            #     # print 'covars min {} max {}'.format(np.min(covars[k],axis=0),np.max(covars[k],axis=0))
            #     # print np.min(covars[k],axis=0)
            #     # print np.max(covars[k],axis=0)
            #
            #     for t in xrange(2):
            #         print 'batch {} data {} lvl {} dxyz min {} max {} ptnum {}'.format(i,k,t,np.min(dxyzs[k][t],axis=0),
            #                                                                            np.max(dxyzs[k][t],axis=0),
            #                                                                            dxyzs[k][t].shape[0])
            #         assert vlens[k][t].shape[0]==cxyzs[k][t+1].shape[0]
            #         assert vlens_bgs[k][t].shape[0]==cxyzs[k][t+1].shape[0]
            #         assert np.sum(vlens[k][t])==cxyzs[k][t].shape[0]
            #         assert vlens_bgs[k][t][-1]+vlens[k][t][-1]==cxyzs[k][t].shape[0]
            #         assert np.max(vcidxs[k][t])==cxyzs[k][t+1].shape[0]-1
            #     print '////////////////////'
            #
            # colors=np.random.randint(0,256,[21,3])
            # print lbls[0].shape,colors[lbls[0],:].shape,cxyzs[0][0].shape
            # output_hierarchy(cxyzs[0][0],cxyzs[0][1],cxyzs[0][2],
            #                  np.ones([cxyzs[0][0].shape[0],3]),lbls[0],
            #                  vlens[0][0],vlens[0][1],dxyzs[0][0],dxyzs[0][1],0.15,0.5,colors=colors)
            # break

        class_names = get_scannet_class_names()
        for count, name in zip(class_count, class_names):
            print '{}:\t\t{}'.format(name, count)

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)
    finally:
        train_provider.close()
        test_provider.close()
Example #11
0
def test_model_read():
    from provider import Provider, default_unpack_feats_labels
    train_list = [
        'data/ModelNet40/ply_data_train{}.pkl'.format(i) for i in xrange(5)
    ]
    test_list = [
        'data/ModelNet40/ply_data_test{}.pkl'.format(i) for i in xrange(2)
    ]
    fn = lambda model, filename: read_pkl(filename)
    train_provider = Provider(train_list, 'train', 4, fn, max_cache=1)
    test_provider = Provider(test_list, 'test', 4, fn, max_cache=1)

    try:
        begin = time.time()
        i = 0
        for data in train_provider:
            print len(data[0])
            i += 1
            print 'cost {}s'.format(time.time() - begin)
            labels, cxyzs, dxyzs, covars, vlens, vlens_bgs, vcidxs, cidxs, nidxs, nidxs_bgs, nidxs_lens = \
                default_unpack_feats_labels(data, 4)
            for k in xrange(4):
                for t in xrange(3):
                    print 'batch {} data {} lvl {} cxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(cxyzs[k][t], axis=0),
                        np.max(cxyzs[k][t], axis=0), cxyzs[k][t].shape[0])
                    assert cidxs[k][t].shape[0] == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert nidxs_lens[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert np.sum(nidxs_lens[k][t]) == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t][-1] + nidxs_lens[k][t][-1] == nidxs[
                        k][t].shape[0]
                    assert np.max(cidxs[k][t]) == cxyzs[k][t].shape[0] - 1
                    print 'lvl {} avg nsize {}'.format(
                        t, cidxs[k][t].shape[0] / float(cxyzs[k][t].shape[0]))

                # print 'covars min {} max {}'.format(np.min(covars[k],axis=0),np.max(covars[k],axis=0))
                # print np.min(covars[k],axis=0)
                # print np.max(covars[k],axis=0)

                for t in xrange(2):
                    print 'batch {} data {} lvl {} dxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(dxyzs[k][t], axis=0),
                        np.max(dxyzs[k][t], axis=0), dxyzs[k][t].shape[0])
                    assert vlens[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert vlens_bgs[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert np.sum(vlens[k][t]) == cxyzs[k][t].shape[0]
                    assert vlens_bgs[k][t][-1] + vlens[k][t][-1] == cxyzs[k][
                        t].shape[0]
                    assert np.max(vcidxs[k][t]) == cxyzs[k][t + 1].shape[0] - 1
                print '////////////////////'

            # output_hierarchy(cxyzs[0][0],cxyzs[0][1],cxyzs[0][2],
            #                  np.ones([cxyzs[0][0].shape[0],3]),
            #                  np.ones([cxyzs[0][0].shape[0]],dtype=np.int32),
            #                  vlens[0][0],vlens[0][1],dxyzs[0][0],dxyzs[0][1],0.2,0.5)

        print 'batch_num {}'.format(i * 4)
        print 'test set cost {} s'.format(time.time() - begin)
    finally:
        train_provider.close()
        test_provider.close()
Example #12
0
def test_model_hierarchy():
    from provider import Provider, default_unpack_feats_labels
    train_list = [
        'data/ModelNet40/ply_data_train{}.h5'.format(i) for i in xrange(5)
    ]
    test_list = [
        'data/ModelNet40/ply_data_test{}.h5'.format(i) for i in xrange(2)
    ]

    # train_provider = Provider(train_list,'train',4,read_model_hierarchy,max_cache=1)
    test_provider = Provider(test_list[1:],
                             'test',
                             4,
                             read_model_hierarchy,
                             max_cache=1)

    print len(train_list)
    try:
        begin = time.time()
        i = 0
        for data in test_provider:
            print data[0][0]
            i += 1
            print 'cost {}s'.format(time.time() - begin)
            labels, cxyzs, dxyzs, covars, vlens, vlens_bgs, vcidxs, cidxs, nidxs, nidxs_bgs, nidxs_lens = \
                default_unpack_feats_labels(data, 4)
            for k in xrange(4):
                for t in xrange(3):
                    print 'batch {} data {} lvl {} cxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(cxyzs[k][t], axis=0),
                        np.max(cxyzs[k][t], axis=0), cxyzs[k][t].shape[0])
                    assert cidxs[k][t].shape[0] == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert nidxs_lens[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert np.sum(nidxs_lens[k][t]) == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t][-1] + nidxs_lens[k][t][-1] == nidxs[
                        k][t].shape[0]
                    assert np.max(cidxs[k][t]) == cxyzs[k][t].shape[0] - 1
                    print 'lvl {} avg nsize {}'.format(
                        t, cidxs[k][t].shape[0] / float(cxyzs[k][t].shape[0]))

                # print 'covars min {} max {}'.format(np.min(covars[k],axis=0),np.max(covars[k],axis=0))
                # print np.min(covars[k],axis=0)
                # print np.max(covars[k],axis=0)

                for t in xrange(2):
                    print 'batch {} data {} lvl {} dxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(dxyzs[k][t], axis=0),
                        np.max(dxyzs[k][t], axis=0), dxyzs[k][t].shape[0])
                    assert vlens[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert vlens_bgs[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert np.sum(vlens[k][t]) == cxyzs[k][t].shape[0]
                    assert vlens_bgs[k][t][-1] + vlens[k][t][-1] == cxyzs[k][
                        t].shape[0]
                    assert np.max(vcidxs[k][t]) == cxyzs[k][t + 1].shape[0] - 1
                print '////////////////////'

            output_hierarchy(cxyzs[0][0], cxyzs[0][1], cxyzs[0][2],
                             np.ones([cxyzs[0][0].shape[0], 3]),
                             np.ones([cxyzs[0][0].shape[0]],
                                     dtype=np.int32), vlens[0][0], vlens[0][1],
                             dxyzs[0][0], dxyzs[0][1], 0.2, 0.5)

            if i > 1:
                break
        print 'batch_num {}'.format(i * 4)
        print 'test set cost {} s'.format(time.time() - begin)
        # begin = time.time()
        # i = 0
        # for data in train_provider:
        #     i+=1
        #     print data[0]
        #     if i%2500==0:
        #         print 'cost {} s'.format(time.time()-begin)
        #
        # print 'batch_num {}'.format(i * 4)
        # print 'train set cost {} s'.format(time.time() - begin)

    finally:
        print 'done'
        # train_provider.close()
        test_provider.close()
Example #13
0
def test_data_iter_hierarchy():
    from provider import Provider, default_unpack_feats_labels
    from draw_util import output_points, get_class_colors
    import time
    import random

    train_list, test_list = get_block_train_test_split_ds()
    # random.shuffle(train_list)
    train_list = [
        'data/S3DIS/room_block_10_10_ds0.03/' + fn for fn in train_list
    ]
    test_list = [
        'data/S3DIS/room_block_10_10_ds0.03/' + fn for fn in test_list
    ]
    train_list = train_list[:251]
    test_list = test_list[:len(test_list) / 5]

    train_provider = Provider(train_list, 'train', 4, read_fn_hierarchy)
    test_provider = Provider(test_list, 'test', 4, read_fn_hierarchy)
    print len(train_list)
    try:
        # begin=time.time()
        # i=0
        # for data in test_provider:
        #     i+=1
        #     pass
        # print 'batch_num {}'.format(i*4)
        # print 'test set cost {} s'.format(time.time()-begin)
        begin = time.time()
        i = 0
        for data in train_provider:
            i += 1
            cxyzs, dxyzs, rgbs, covars, lbls, vlens, vlens_bgs, vcidxs, cidxs, nidxs, nidxs_bgs, nidxs_lens, block_mins = \
                default_unpack_feats_labels(data, 4)
            for k in xrange(4):
                for t in xrange(3):
                    print 'batch {} data {} lvl {} cxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(cxyzs[k][t], axis=0),
                        np.max(cxyzs[k][t], axis=0), cxyzs[k][t].shape[0])
                    assert cidxs[k][t].shape[0] == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert nidxs_lens[k][t].shape[0] == cxyzs[k][t].shape[0]
                    assert np.sum(nidxs_lens[k][t]) == nidxs[k][t].shape[0]
                    assert nidxs_bgs[k][t][-1] + nidxs_lens[k][t][-1] == nidxs[
                        k][t].shape[0]
                    assert np.max(cidxs[k][t]) == cxyzs[k][t].shape[0] - 1
                    print 'lvl {} avg nsize {}'.format(
                        t, cidxs[k][t].shape[0] / float(cxyzs[k][t].shape[0]))

                print 'rgb min {} max {}'.format(np.min(rgbs[k], axis=0),
                                                 np.max(rgbs[k], axis=0))
                # print 'covars min {} max {}'.format(np.min(covars[k],axis=0),np.max(covars[k],axis=0))
                # print np.min(covars[k],axis=0)
                # print np.max(covars[k],axis=0)

                for t in xrange(2):
                    print 'batch {} data {} lvl {} dxyz min {} max {} ptnum {}'.format(
                        i, k, t, np.min(dxyzs[k][t], axis=0),
                        np.max(dxyzs[k][t], axis=0), dxyzs[k][t].shape[0])
                    assert vlens[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert vlens_bgs[k][t].shape[0] == cxyzs[k][t + 1].shape[0]
                    assert np.sum(vlens[k][t]) == cxyzs[k][t].shape[0]
                    assert vlens_bgs[k][t][-1] + vlens[k][t][-1] == cxyzs[k][
                        t].shape[0]
                    assert np.max(vcidxs[k][t]) == cxyzs[k][t + 1].shape[0] - 1
                print '////////////////////'

            output_hierarchy(cxyzs[0][0], cxyzs[0][1], cxyzs[0][2],
                             rgbs[0] * 127 + 128, lbls[0], vlens[0][0],
                             vlens[0][1], dxyzs[0][0], dxyzs[0][1], 0.2, 0.5)

            if i > 1:
                break

        print 'batch_num {}'.format(i * 4)
        print 'train set cost {} s'.format(time.time() - begin)

    finally:
        print 'done'
        train_provider.close()
        test_provider.close()