Ejemplo n.º 1
0
def compute_weight():
    from io_util import get_block_train_test_split, get_class_names
    import numpy as np
    train_list, test_list = get_block_train_test_split()

    test_list = ['data/S3DIS/sampled_test/' + fs for fs in test_list]
    train_list = ['data/S3DIS/sampled_train/' + fs for fs in train_list]
    test_list += train_list
    labels = []
    for fs in test_list:
        labels += read_pkl(fs)[4]
    labels = np.concatenate(labels, axis=0)

    labelweights, _ = np.histogram(labels, range(14))
    plt.figure(0, figsize=(10, 8), dpi=80)
    plt.bar(np.arange(len(labelweights)),
            labelweights,
            tick_label=get_class_names())
    plt.savefig('s3dis_dist.png')
    plt.close()

    print labelweights
    labelweights = labelweights.astype(np.float32)
    labelweights = labelweights / np.sum(labelweights)
    labelweights = 1 / np.log(1.2 + labelweights)

    print labelweights
def test_one_epoch(ops, pls, sess, testset, epoch_num, feed_dict):
    begin_time = time.time()
    test_loss = []
    all_preds, all_labels = [], []
    for i, feed_in in enumerate(testset):
        fill_feed_dict(feed_in, feed_dict, pls, FLAGS.num_gpus)
        feed_dict[pls['is_training']] = False

        loss, batch_labels, preds = sess.run(
            [ops['total_loss'], ops['labels'], ops['preds']], feed_dict)
        test_loss.append(loss)
        all_preds.append(preds)
        all_labels.append(batch_labels)

    all_preds = np.concatenate(all_preds, axis=0)
    all_labels = np.concatenate(all_labels, axis=0)

    test_loss = np.mean(np.asarray(test_loss))

    iou, miou, oiou, acc, macc, oacc = compute_iou(all_labels, all_preds)

    log_str(
        'mean iou {:.5} overall iou {:5} loss {:5} \n mean acc {:5} overall acc {:5} cost {:3} s'
        .format(miou, oiou, test_loss, macc, oacc,
                time.time() - begin_time), feats_log)

    names = get_class_names()
    for i in xrange(len(names)):
        log_str('{} iou {} acc {}'.format(names[i], iou[i], acc[i]), feats_log,
                False)
def test_one_epoch(ops,pls,sess,saver,testset,epoch_num,feed_dict,summary_writer=None):
    begin_time=time.time()
    test_loss=[]
    all_preds,all_labels=[],[]
    for i,feed_in in enumerate(testset):
        fill_feed_dict(feed_in,feed_dict,pls,FLAGS.num_gpus)
        feed_dict[pls['is_training']] = False

        loss,batch_labels,preds=sess.run([ops['total_loss'],ops['labels'],ops['preds']],feed_dict)
        test_loss.append(loss)
        all_preds.append(preds)
        all_labels.append(batch_labels)

    all_preds=np.concatenate(all_preds,axis=0)
    all_labels=np.concatenate(all_labels,axis=0)

    test_loss=np.mean(np.asarray(test_loss))

    iou, miou, oiou, acc, macc, oacc = compute_iou(all_labels,all_preds)

    log_str('mean iou {:.5} overall iou {:5} loss {:5} \n mean acc {:5} overall acc {:5} cost {:3} s'.format(
        miou, oiou, test_loss, macc, oacc, time.time()-begin_time
    ),FLAGS.log_file)

    if not FLAGS.eval:
        checkpoint_path = os.path.join(FLAGS.save_dir, 'model{}.ckpt'.format(epoch_num))
        saver.save(sess,checkpoint_path)
    else:
        names=get_class_names()
        for i in xrange(len(names)):
            print '{} iou {} acc {}'.format(names[i],iou[i],acc[i])
Ejemplo n.º 4
0
def sort_bar_classes():
    fns = ['sort_' + fn + '.log' for fn in feats_sort_name]

    for cn in get_class_names():
        mean_iou = []
        for fi, fn in enumerate(fns):
            if fi == 0: fn = 'feats_stage2_pool.log'
            mious = read_iou_class('pointnet_10_concat_pre_compare/' + fn, cn)
            mean_iou.append(np.mean(mious[10:]))

        plt.figure(0, figsize=(16, 12), dpi=80)
        plt.bar(
            np.arange(len(mean_iou)),
            mean_iou,
        )
        plt.xticks(np.arange(len(mean_iou)), feats_sort_name)
        plt.legend()
        plt.savefig(
            'test_result/sort_feats_compare_bar_mean_{}.png'.format(cn))
        plt.close()
Ejemplo n.º 5
0
def test_one_epoch(ops,
                   pls,
                   sess,
                   saver,
                   testset,
                   epoch_num,
                   feed_dict,
                   summary_writer=None):
    begin_time = time.time()
    test_loss = []
    all_preds, all_labels = [], []
    colors = get_class_colors()
    weights = get_class_loss_weights()
    for i, feed_in in enumerate(testset):
        xyzs, rgbs, covars, lbls, nidxs, nidxs_lens, nidxs_bgs, cidxs, block_bgs, block_lens=\
            default_unpack_feats_labels(feed_in,FLAGS.num_gpus)

        for k in xrange(FLAGS.num_gpus):
            feed_dict[pls['xyzs'][k]] = xyzs[k]
            feed_dict[pls['rgbs'][k]] = rgbs[k]
            feed_dict[pls['covars'][k]] = covars[k]
            feed_dict[pls['lbls'][k]] = lbls[k]
            feed_dict[pls['nidxs'][k]] = nidxs[k]
            feed_dict[pls['nidxs_lens'][k]] = nidxs_lens[k]
            feed_dict[pls['nidxs_bgs'][k]] = nidxs_bgs[k]
            feed_dict[pls['cidxs'][k]] = cidxs[k]
            all_labels.append(lbls[k])
            if FLAGS.weighted_loss:
                feed_dict[pls['weights'][k]] = weights[lbls[k]]

        feed_dict[pls['is_training']] = False

        if FLAGS.eval and FLAGS.num_monitor:
            loss, preds, summary = sess.run(
                [ops['total_loss'], ops['preds'], ops['summary']], feed_dict)
            summary_writer.add_summary(summary)
        else:
            loss, preds = sess.run([ops['total_loss'], ops['preds']],
                                   feed_dict)
        test_loss.append(loss)
        all_preds.append(preds)

        # output labels and true
        if FLAGS.eval and FLAGS.eval_output:
            cur = 0
            for k in xrange(FLAGS.num_gpus):
                restore_xyzs = xyzs[k]
                restore_xyzs[:, :2] = restore_xyzs[:, :2] * 1.5 + 1.5
                restore_xyzs[:, 2] += 1.0
                restore_xyzs[:, 2] *= block_lens[k][2] / 2
                restore_xyzs += block_bgs[k]
                output_points('test_result/{}_{}_true.txt'.format(i, k),
                              restore_xyzs, colors[lbls[k], :])
                output_points('test_result/{}_{}_pred.txt'.format(i, k),
                              restore_xyzs,
                              colors[preds[cur:cur + len(xyzs[k])], :])
                cur += len(xyzs[k])

        if FLAGS.eval and FLAGS.num_monitor and i >= 2:
            break

    all_preds = np.concatenate(all_preds, axis=0)
    all_labels = np.concatenate(all_labels, axis=0)

    test_loss = np.mean(np.asarray(test_loss))

    iou, miou, oiou, acc, macc, oacc = compute_iou(all_labels, all_preds)

    log_str(
        'mean iou {:.5} overall iou {:5} loss {:5} \n mean acc {:5} overall acc {:5} cost {:3} s'
        .format(miou, oiou, test_loss, macc, oacc,
                time.time() - begin_time), FLAGS.log_file)

    if not FLAGS.eval:
        checkpoint_path = os.path.join(FLAGS.save_dir,
                                       'model{}.ckpt'.format(epoch_num))
        saver.save(sess, checkpoint_path)
    else:
        names = get_class_names()
        for i in xrange(len(names)):
            print '{} iou {} acc {}'.format(names[i], iou[i], acc[i])
        fp, tp, fn = acc_val(labels.flatten(), qpreds.flatten(), fp, tp, fn)

        # if fi<=5:
        #     idxs=libPointUtil.gridDownsampleGPU(sxyzs,0.01,False)
        #     sxyzs=sxyzs[idxs]
        #     spreds=spreds[idxs]
        #     slbls=slbls[idxs]
        #     output_points('test_result/{}spreds.txt'.format(fi),sxyzs,colors[spreds,:])
        #     output_points('test_result/{}slabel.txt'.format(fi),sxyzs,colors[slbls,:])
        #
        #     idxs=libPointUtil.gridDownsampleGPU(qxyzs,0.01,False)
        #     qxyzs=qxyzs[idxs]
        #     qpreds=qpreds[idxs]
        #     labels=labels[idxs]
        #     points=points[idxs]
        #     output_points('test_result/{}qpreds.txt'.format(fi),qxyzs,colors[qpreds,:])
        #     output_points('test_result/{}qlabel.txt'.format(fi),qxyzs,colors[labels.flatten(),:])
        #     output_points('test_result/{}qcolor.txt'.format(fi),points)

        # iou, miou, oiou, acc, macc, oacc=val2iou(fp,tp,fn)
        # print 'mean iou {:.5} overall iou {:5} \nmean acc {:5} overall acc {:5}'.format(miou, oiou, macc, oacc)
        # break

    iou, miou, oiou, acc, macc, oacc = val2iou(fp, tp, fn)
    print 'mean iou {:.5} overall iou {:5} \nmean acc {:5} overall acc {:5}'.format(
        miou, oiou, macc, oacc)

    names = get_class_names()
    for i in xrange(len(names)):
        print '{} iou {} acc {}'.format(names[i], iou[i], acc[i])
def test_one_epoch(ops,
                   pls,
                   sess,
                   saver,
                   testset,
                   epoch_num,
                   feed_dict,
                   summary_writer=None):
    begin_time = time.time()
    test_loss = []
    all_preds, all_labels = [], []
    colors = get_class_colors()
    # incorrect_rate=[]
    for i, feed_in in enumerate(testset):
        _, batch_labels, block_mins = fill_feed_dict(feed_in, feed_dict, pls,
                                                     FLAGS.num_gpus)

        feed_dict[pls['is_training']] = False
        all_labels += batch_labels

        loss, preds = sess.run([ops['total_loss'], ops['preds']], feed_dict)
        test_loss.append(loss)
        all_preds.append(preds)

        # cur_labels=np.concatenate(batch_labels,axis=0)
        # cur_preds=preds
        # cur_rate=np.sum(cur_labels != cur_preds) / float(len(cur_preds))
        # incorrect_rate.append(cur_rate)

        # output labels and true
        if (FLAGS.eval and FLAGS.eval_output):
            cur = 0
            for k in xrange(FLAGS.num_gpus):
                xyzs = feed_dict[pls['cxyzs'][k][0]]
                lbls = feed_dict[pls['lbls'][k]]
                xyzs += block_mins[k]
                output_points('test_result/{}_{}_true.txt'.format(i, k), xyzs,
                              colors[lbls, :])
                output_points('test_result/{}_{}_pred.txt'.format(i, k), xyzs,
                              colors[preds[cur:cur + len(xyzs)], :])
                cur += len(xyzs)

    all_preds = np.concatenate(all_preds, axis=0)
    all_labels = np.concatenate(all_labels, axis=0)

    test_loss = np.mean(np.asarray(test_loss))

    iou, miou, oiou, acc, macc, oacc = compute_iou(all_labels, all_preds)

    # print np.histogram(incorrect_rate,30)

    log_str(
        'mean iou {:.5} overall iou {:5} loss {:5} \n mean acc {:5} overall acc {:5} cost {:3} s'
        .format(miou, oiou, test_loss, macc, oacc,
                time.time() - begin_time), FLAGS.log_file)

    if not FLAGS.eval:
        checkpoint_path = os.path.join(FLAGS.save_dir,
                                       'model{}.ckpt'.format(epoch_num))
        saver.save(sess, checkpoint_path)
    else:
        names = get_class_names()
        for i in xrange(len(names)):
            print '{} iou {} acc {}'.format(names[i], iou[i], acc[i])