Пример #1
0
def get_hard_samples(sess, ops):
    is_training = True
    train_idxs = np.arange(0, len(TRAIN_DATASET))
    np.random.shuffle(train_idxs)
    num_batches = len(TRAIN_DATASET) / BATCH_SIZE
    hard_neg_idxs = []
    # test on training set
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        batch_data, batch_cls_label, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_feature_vec = \
            get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        feed_dict = {
            ops['pointclouds_pl']: batch_data,
            ops['features_pl']: batch_feature_vec,
            ops['cls_label_pl']: batch_cls_label,
            ops['is_training_pl']: is_training,
        }
        cls_logits_val = sess.run(ops['cls_logits'], feed_dict=feed_dict)
        cls_preds_val = np.argmax(cls_logits_val, 1)
        incorrect = cls_preds_val != batch_cls_label
        false_positive = np.logical_and(incorrect, batch_cls_label == 3)
        for i, sample_idx in enumerate(range(start_idx, end_idx)):
            # if false_positive[i]:
            if incorrect[i]:
                hard_neg_idxs.append(sample_idx)
    log_string("Find {0} hard negative samples".format(len(hard_neg_idxs)))
    return hard_neg_idxs
Пример #2
0
def main_batch_from_rgb_detection(output_filename, result_dir=None):
    ps_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []

    test_idxs = np.arange(0, len(TEST_DATASET))
    print(len(TEST_DATASET))
    raw_input()
    batch_size = 32
    num_batches = int((len(TEST_DATASET)+batch_size-1)/batch_size)
    
    batch_data_to_feed = np.zeros((batch_size, NUM_POINT, NUM_CHANNEL))
    batch_one_hot_to_feed = np.zeros((batch_size, NUM_CLASS))
    sess, ops = get_model(batch_size=batch_size, num_point=NUM_POINT)
    for batch_idx in range(num_batches):
        print(batch_idx)
        start_idx = batch_idx * batch_size
        end_idx = min(len(TEST_DATASET), (batch_idx+1) * batch_size)
        cur_batch_size = end_idx - start_idx

        batch_data, batch_rot_angle, batch_rgb_prob, batch_one_hot_vec = get_batch(TEST_DATASET, test_idxs, start_idx, end_idx, NUM_POINT, NUM_CHANNEL, from_rgb_detection=True)
        batch_data_to_feed[0:cur_batch_size,...] = batch_data
        batch_one_hot_to_feed[0:cur_batch_size,:] = batch_one_hot_vec
	batch_output, batch_center_pred, batch_hclass_pred, batch_hres_pred, batch_sclass_pred, batch_sres_pred, batch_scores = inference(sess, ops, batch_data_to_feed, batch_one_hot_to_feed, batch_size=batch_size)
        print(batch_hclass_pred.shape, batch_hres_pred.shape)
        print(batch_sclass_pred.shape, batch_sres_pred.shape)
	
        for i in range(cur_batch_size):
            ps_list.append(batch_data[i,...])
            segp_list.append(batch_output[i,...])
            center_list.append(batch_center_pred[i,:])
            heading_cls_list.append(batch_hclass_pred[i])
            heading_res_list.append(batch_hres_pred[i])
            size_cls_list.append(batch_sclass_pred[i])
            size_res_list.append(batch_sres_pred[i,:])
            rot_angle_list.append(batch_rot_angle[i])
            #score_list.append(batch_scores[i] + np.log(batch_rgb_prob[i])) # Combine 3D BOX score and 2D RGB detection score
            score_list.append(batch_rgb_prob[i]) # 2D RGB detection score

    if FLAGS.dump_result:
        save_zipped_pickle([ps_list, segp_list, center_list, heading_cls_list, heading_res_list, size_cls_list, size_res_list, rot_angle_list, score_list], output_filename) 

    # Write detection results for KITTI evaluation
    print(len(ps_list))
    raw_input()
    write_detection_results(result_dir, TEST_DATASET.id_list, TEST_DATASET.type_list, TEST_DATASET.box2d_list, center_list, heading_cls_list, heading_res_list, size_cls_list, size_res_list, rot_angle_list, score_list)
    # Make sure for each frame (no matter if we have measurment for that frame), there is a TXT file
    output_dir = os.path.join(result_dir, 'data')
    to_fill_filename_list = [line.rstrip()+'.txt' for line in open(FLAGS.idx_path)]
    fill_files(output_dir, to_fill_filename_list)
Пример #3
0
def test(output_filename, result_dir=None):
    ''' Test frustum pointnets with GT 2D boxes.
    Write test results to KITTI format label files.
    todo (rqi): support variable number of points.
    '''
    total_time = 0.0

    ps_list = []
    seg_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []

    test_idxs = np.arange(0, len(TEST_DATASET))
    batch_size = BATCH_SIZE
    num_batches = len(TEST_DATASET) / batch_size

    sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT)
    correct_cnt = 0
    profile_list = []
    for batch_idx in range(RUN_SIZE):
        print('batch idx: %d' % (batch_idx))
        start_idx = batch_idx * batch_size
        end_idx = (batch_idx + 1) * batch_size

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        print("batch_data: ", batch_data.shape)
        print("batch_one_hot_vec: ", batch_one_hot_vec.shape)

        #batch_output, batch_center_pred, \
        #batch_hclass_pred, batch_hres_pred, \
        #batch_sclass_pred, batch_sres_pred, batch_scores = \
        t1 = inference(sess,
                       ops,
                       batch_data,
                       batch_one_hot_vec,
                       batch_size=batch_size)

        if (batch_idx < 5): continue

        # total_time += (end_time - start_time)
        total_time += t1

    print("Avg time: ", total_time / RUN_SIZE)
Пример #4
0
def eval_one_epoch(sess, ops, test_writer, tracks=False, lstm_params=None):
    ''' Simple evaluation for one epoch on the frustum dataset.
    ops is dict mapping from string to tf ops """
    '''
    global EPOCH_CNT
    is_training = False
    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))
    test_idxs = np.arange(0, len(TEST_DATASET))
    num_batches = len(TEST_DATASET) / BATCH_SIZE

    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0
    iou3d_correct_cnt_05 = 0
    # E: This is necessary to collect features of batches before the evaluation
    if tracks:
        for batch_idx in range(int(num_batches)):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            # E: Get also batch_indices which shows the (world_id,frame_id,track_id) of the objects in the batch
            # E: Batch indices are valid (non-empty) only if the tracks flag is True
            batch_data, batch_label, batch_center, \
            batch_hclass, batch_hres, \
            batch_sclass, batch_sres, \
            batch_rot_angle, batch_one_hot_vec, batch_indices = \
                get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                          NUM_POINT, NUM_CHANNEL,tracks=tracks)

            # Emec added the feature line
            # E: Get the features at the prev time steps of the objects in the batch
            batch_feat_lstm = get_batch_features(
                TEST_DATASET.feature_dict,
                batch_wft=batch_indices,
                tau=lstm_params['tau'],
                feat_len=lstm_params['feat_vec_len'],
                rev_order=True)
            # E: Get the number of tracks at the tau prev. time steps for each object in the batch: How many of the tau-1 frames before the current frames of the objects contain the same object with the same track id
            batch_seq_len = batch_track_num(
                feature_dict=TEST_DATASET.feature_dict, wfts=batch_indices)

            feed_dict = {
                ops['pointclouds_pl']: batch_data,
                ops['one_hot_vec_pl']: batch_one_hot_vec,
                ops['labels_pl']: batch_label,
                ops['centers_pl']: batch_center,
                ops['heading_class_label_pl']: batch_hclass,
                ops['heading_residual_label_pl']: batch_hres,
                ops['size_class_label_pl']: batch_sclass,
                ops['size_residual_label_pl']: batch_sres,
                ops['is_training_pl']: is_training,
                ops['end_points']['lstm_layer']['feat_input']: batch_feat_lstm,
                ops['end_points']['lstm_layer']['pf_seq_len']: batch_seq_len
            }
            '''
            summary, step, loss_val, logits_val, iou2ds, iou3ds, box_est_feature_vec = \
                sess.run([ops['merged'], ops['step'],
                          ops['loss'], ops['logits'],
                          ops['end_points']['iou2ds'], ops['end_points']['iou3ds'],
                          ops['end_points']['box_est_feature_vec']],
                         feed_dict=feed_dict)
            '''
            box_est_feature_vec = \
                sess.run(ops['end_points']['box_est_feature_vec'],
                         feed_dict=feed_dict)

            update_batch_features(feature_dict=TEST_DATASET.feature_dict,
                                  batch_wft=batch_indices,
                                  batch_feat_vecs=box_est_feature_vec)

    # Simple evaluation with batches
    for batch_idx in range(int(num_batches)):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        # E: Get also batch_indices which shows the (world_id,frame_id,track_id) of the objects in the batch
        # E: Batch indices are valid (non-empty) only if the tracks flag is True
        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec, batch_indices = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                      NUM_POINT, NUM_CHANNEL,tracks=tracks)

        if tracks:
            # Emec added the feature line
            # E: Get the features at the prev time steps of the objects in the batch
            batch_feat_lstm = get_batch_features(
                TEST_DATASET.feature_dict,
                batch_wft=batch_indices,
                tau=lstm_params['tau'],
                feat_len=lstm_params['feat_vec_len'],
                rev_order=True)
            # E: Get the number of tracks at the tau prev. time steps for each object in the batch: How many of the tau-1 frames before the current frames of the objects contain the same object with the same track id
            batch_seq_len = batch_track_num(
                feature_dict=TEST_DATASET.feature_dict, wfts=batch_indices)

            feed_dict = {
                ops['pointclouds_pl']: batch_data,
                ops['one_hot_vec_pl']: batch_one_hot_vec,
                ops['labels_pl']: batch_label,
                ops['centers_pl']: batch_center,
                ops['heading_class_label_pl']: batch_hclass,
                ops['heading_residual_label_pl']: batch_hres,
                ops['size_class_label_pl']: batch_sclass,
                ops['size_residual_label_pl']: batch_sres,
                ops['is_training_pl']: is_training,
                ops['end_points']['lstm_layer']['feat_input']: batch_feat_lstm,
                ops['end_points']['lstm_layer']['pf_seq_len']: batch_seq_len
            }

            summary, step, loss_val, logits_val, iou2ds, iou3ds, box_est_feature_vec = \
                sess.run([ops['merged'], ops['step'],
                          ops['loss'], ops['logits'],
                          ops['end_points']['iou2ds'], ops['end_points']['iou3ds'],
                          ops['end_points']['box_est_feature_vec']],
                         feed_dict=feed_dict)

            update_batch_features(feature_dict=TEST_DATASET.feature_dict,
                                  batch_wft=batch_indices,
                                  batch_feat_vecs=box_est_feature_vec)
        else:
            feed_dict = {
                ops['pointclouds_pl']: batch_data,
                ops['one_hot_vec_pl']: batch_one_hot_vec,
                ops['labels_pl']: batch_label,
                ops['centers_pl']: batch_center,
                ops['heading_class_label_pl']: batch_hclass,
                ops['heading_residual_label_pl']: batch_hres,
                ops['size_class_label_pl']: batch_sclass,
                ops['size_residual_label_pl']: batch_sres,
                ops['is_training_pl']: is_training
            }

            summary, step, loss_val, logits_val, iou2ds, iou3ds = \
                sess.run([ops['merged'], ops['step'],
                          ops['loss'], ops['logits'],
                          ops['end_points']['iou2ds'], ops['end_points']['iou3ds']],
                         feed_dict=feed_dict)
        test_writer.add_summary(summary, step)

        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val
        for l in range(NUM_CLASSES):
            total_seen_class[l] += np.sum(batch_label == l)
            total_correct_class[l] += (np.sum((preds_val == l)
                                              & (batch_label == l)))
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.nansum(iou3ds)
        iou3d_correct_cnt += np.sum(iou3ds >= 0.7)
        iou3d_correct_cnt_05 += np.sum(iou3ds >= 0.5)
        for i in range(BATCH_SIZE):
            segp = preds_val[i, :]
            segl = batch_label[i, :]
            part_ious = [0.0 for _ in range(NUM_CLASSES)]
            for l in range(NUM_CLASSES):
                if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0):
                    part_ious[l] = 1.0  # class not present
                else:
                    part_ious[l] = np.sum((segl == l) & (segp == l)) / \
                                   float(np.sum((segl == l) | (segp == l)))

    log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('eval segmentation accuracy: %f' % \
               (total_correct / float(total_seen)))
    log_string('eval segmentation avg class acc: %f' % \
               (np.mean(np.array(total_correct_class) / \
                        np.array(total_seen_class, dtype=np.float))))
    log_string('eval box IoU (ground/3D): %f / %f' % \
               (iou2ds_sum / float(num_batches * BATCH_SIZE), iou3ds_sum / \
                float(num_batches * BATCH_SIZE)))
    log_string('eval box estimation accuracy (IoU=0.7): %f' % \
               (float(iou3d_correct_cnt) / float(num_batches * BATCH_SIZE)))
    log_string('eval box estimation accuracy (IoU=0.5): %f' % \
               (float(iou3d_correct_cnt_05) / float(num_batches * BATCH_SIZE)))

    EPOCH_CNT += 1
    eval_box_est_acc = float(iou3d_correct_cnt) / float(
        num_batches * BATCH_SIZE)
    return eval_box_est_acc
Пример #5
0
def test_from_rgb_detection(output_filename, result_dir=None):
    ''' Test frustum pointents with 2D boxes from a RGB detector.
    Write test results to KITTI format label files.
    todo (rqi): support variable number of points.
    '''
    ps_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []
    onehot_list = []

    test_idxs = np.arange(0, len(TEST_DATASET))
    print(len(TEST_DATASET))
    batch_size = BATCH_SIZE
    num_batches = int((len(TEST_DATASET)+batch_size-1)/batch_size)
    
    batch_data_to_feed = np.zeros((batch_size, NUM_POINT, NUM_CHANNEL))
    batch_one_hot_to_feed = np.zeros((batch_size, 3))
    sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT)
    for batch_idx in range(num_batches):
        print('batch idx: %d' % (batch_idx))
        start_idx = batch_idx * batch_size
        end_idx = min(len(TEST_DATASET), (batch_idx+1) * batch_size)
        cur_batch_size = end_idx - start_idx

        batch_data, batch_rot_angle, batch_rgb_prob, batch_one_hot_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL, from_rgb_detection=True)
        batch_data_to_feed[0:cur_batch_size,...] = batch_data
        batch_one_hot_to_feed[0:cur_batch_size,:] = batch_one_hot_vec

        # Run one batch inference
        batch_output, batch_center_pred, batch_hclass_pred, batch_hres_pred, \
        batch_sclass_pred, batch_sres_pred, batch_scores = inference(sess, ops, batch_data_to_feed, batch_one_hot_to_feed, batch_size=batch_size)
	
        for i in range(cur_batch_size):
            ps_list.append(batch_data[i,...])
            segp_list.append(batch_output[i,...])
            center_list.append(batch_center_pred[i,:])
            heading_cls_list.append(batch_hclass_pred[i])
            heading_res_list.append(batch_hres_pred[i])
            size_cls_list.append(batch_sclass_pred[i])
            size_res_list.append(batch_sres_pred[i,:])
            rot_angle_list.append(batch_rot_angle[i])
            #score_list.append(batch_scores[i])
            score_list.append(batch_rgb_prob[i]) # 2D RGB detection score
            onehot_list.append(batch_one_hot_vec[i])

    if FLAGS.dump_result:
        with open(output_filename, 'wp') as fp:
            pickle.dump(ps_list, fp)
            pickle.dump(segp_list, fp)
            pickle.dump(center_list, fp)
            pickle.dump(heading_cls_list, fp)
            pickle.dump(heading_res_list, fp)
            pickle.dump(size_cls_list, fp)
            pickle.dump(size_res_list, fp)
            pickle.dump(rot_angle_list, fp)
            pickle.dump(score_list, fp)
            pickle.dump(onehot_list, fp)

    # Write detection results for KITTI evaluation
    print('Number of point clouds: %d' % (len(ps_list)))
    write_detection_results(result_dir, TEST_DATASET.id_list,
        TEST_DATASET.type_list, TEST_DATASET.box2d_list,
        center_list, heading_cls_list, heading_res_list,
        size_cls_list, size_res_list, rot_angle_list, score_list)
    # Make sure for each frame (no matter if we have measurment for that frame),
    # there is a TXT file
    output_dir = os.path.join(result_dir, 'data')
    if FLAGS.idx_path is not None:
        to_fill_filename_list = [line.rstrip()+'.txt' \
            for line in open(FLAGS.idx_path)]
        fill_files(output_dir, to_fill_filename_list)
Пример #6
0
def train_one_epoch(sess, ops, train_writer):
    ''' Training for one epoch on the frustum dataset.
    ops is dict mapping from string to tf ops
    '''
    is_training = True
    log_string(str(datetime.now()))

    # Shuffle train samples
    train_idxs = np.arange(0, len(TRAIN_DATASET))
    np.random.shuffle(train_idxs)
    num_batches = len(TRAIN_DATASET) / BATCH_SIZE

    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0

    # Training with batches
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        feed_dict = {
            ops['pointclouds_pl']: batch_data,
            ops['one_hot_vec_pl']: batch_one_hot_vec,
            ops['labels_pl']: batch_label,
            ops['centers_pl']: batch_center,
            ops['heading_class_label_pl']: batch_hclass,
            ops['heading_residual_label_pl']: batch_hres,
            ops['size_class_label_pl']: batch_sclass,
            ops['size_residual_label_pl']: batch_sres,
            ops['is_training_pl']: is_training,
        }

        summary, step, _, loss_val, logits_val, centers_pred_val, \
        iou2ds, iou3ds = \
            sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['logits'], ops['centers_pred'],
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds']],
                feed_dict=feed_dict)

        train_writer.add_summary(summary, step)

        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)
        print("iou2ds", iou2ds)
        print("iou3ds", iou3ds)
        print("iou2ds_sum", iou2ds_sum)
        print("iou3ds_sum", iou3ds_sum)
        print("centers_pred_val", centers_pred_val)
        print("box center", batch_center)
        print("logits_val", logits_val)
        iou3d_correct_cnt += np.sum(iou3ds >= 0.5)

        if (batch_idx + 1) % 10 == 0:
            log_string(' -- %03d / %03d --' % (batch_idx + 1, num_batches))
            log_string('mean loss: %f' % (loss_sum / 10))
            log_string('segmentation accuracy: %f' % \
                (total_correct / float(total_seen)))
            log_string('box IoU (ground/3D): %f / %f' % \
                (iou2ds_sum / float(BATCH_SIZE*10), iou3ds_sum / float(BATCH_SIZE*10)))
            log_string('box estimation accuracy (IoU=0.5): %f' % \
                (float(iou3d_correct_cnt)/float(BATCH_SIZE*10)))
            total_correct = 0
            total_seen = 0
            loss_sum = 0
            iou2ds_sum = 0
            iou3ds_sum = 0
            iou3d_correct_cnt = 0
Пример #7
0
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    global EPOCH_CNT
    is_training = False
    test_idxs = np.arange(0, len(TEST_DATASET))
    num_batches = len(TEST_DATASET) / BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    shape_ious = []
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    iou2ds_sum = 0
    iou3ds_sum = 0

    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        batch_data, batch_label, batch_center, batch_hclass, batch_hres, batch_sclass, batch_sres, batch_rot_angle, batch_one_hot_vec = get_batch(
            TEST_DATASET, test_idxs, start_idx, end_idx, NUM_POINT,
            NUM_CHANNEL)

        feed_dict = {
            ops['pointclouds_pl']: batch_data,
            ops['one_hot_vec_pl']: batch_one_hot_vec,
            ops['labels_pl']: batch_label,
            ops['centers_pl']: batch_center,
            ops['heading_class_label_pl']: batch_hclass,
            ops['heading_residual_label_pl']: batch_hres,
            ops['size_class_label_pl']: batch_sclass,
            ops['size_residual_label_pl']: batch_sres,
            ops['is_training_pl']: is_training
        }
        summary, step, loss_val, logits_val, iou2ds, iou3ds = sess.run(
            [
                ops['merged'], ops['step'], ops['loss'], ops['logits'],
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds']
            ],
            feed_dict=feed_dict)
        test_writer.add_summary(summary, step)
        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val
        for l in range(NUM_CLASSES):
            total_seen_class[l] += np.sum(batch_label == l)
            total_correct_class[l] += (np.sum((preds_val == l)
                                              & (batch_label == l)))
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)

        for i in range(BATCH_SIZE):
            segp = preds_val[i, :]
            segl = batch_label[i, :]
            part_ious = [0.0 for _ in range(NUM_CLASSES)]
            for l in range(NUM_CLASSES):
                if (np.sum(segl == l) == 0) and (
                        np.sum(segp == l)
                        == 0):  # part is not present, no logitsiction as well
                    part_ious[l] = 1.0
                else:
                    part_ious[l] = np.sum((segl == l) & (segp == l)) / float(
                        np.sum((segl == l) | (segp == l)))
            shape_ious.append(np.mean(part_ious))

        #if batch_idx == 100:
        #    break

    print(len(shape_ious))
    log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))))
    log_string('eval mIoU: %f' % (np.mean(shape_ious)))
    log_string('eval box IoU (ground/3D): %f / %f' %
               (iou2ds_sum / float(num_batches * BATCH_SIZE),
                iou3ds_sum / float(num_batches * BATCH_SIZE)))

    EPOCH_CNT += 1
    return loss_sum / float(num_batches)
Пример #8
0
def eval_one_epoch(sess, ops, test_writer):
    """ ops: dict mapping from string to tf ops """
    global EPOCH_CNT
    is_training = False
    test_idxs = np.arange(0, len(TEST_DATASET))
    num_batches = len(TEST_DATASET)/BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    shape_ious = []
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    iou2ds_sum = 0
    iou3ds_sum = 0
    
    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT))
    
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1) * BATCH_SIZE
        batch_data, batch_label, batch_center, batch_hclass, batch_hres, batch_sclass, batch_sres, batch_rot_angle, batch_one_hot_vec = get_batch(TEST_DATASET, test_idxs, start_idx, end_idx, NUM_POINT, NUM_CHANNEL)

        feed_dict = {ops['pointclouds_pl']: batch_data,
                     ops['one_hot_vec_pl']: batch_one_hot_vec,
                     ops['labels_pl']: batch_label,
                     ops['centers_pl']: batch_center,
                     ops['heading_class_label_pl']: batch_hclass,
                     ops['heading_residual_label_pl']: batch_hres,
                     ops['size_class_label_pl']: batch_sclass,
                     ops['size_residual_label_pl']: batch_sres,
                     ops['is_training_pl']: is_training}
        summary, step, loss_val, logits_val, iou2ds, iou3ds = sess.run([ops['merged'], ops['step'],
            ops['loss'], ops['logits'], 
            ops['end_points']['iou2ds'], ops['end_points']['iou3ds']], feed_dict=feed_dict)
        test_writer.add_summary(summary, step)
        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE*NUM_POINT)
        loss_sum += loss_val
        for l in range(NUM_CLASSES):
            total_seen_class[l] += np.sum(batch_label==l)
            total_correct_class[l] += (np.sum((preds_val==l) & (batch_label==l)))
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)

        for i in range(BATCH_SIZE):
            segp = preds_val[i,:]
            segl = batch_label[i,:] 
            part_ious = [0.0 for _ in range(NUM_CLASSES)]
            for l in range(NUM_CLASSES):
                if (np.sum(segl==l) == 0) and (np.sum(segp==l) == 0): # part is not present, no logitsiction as well
                    part_ious[l] = 1.0
                else:
                    part_ious[l] = np.sum((segl==l) & (segp==l)) / float(np.sum((segl==l) | (segp==l)))
            shape_ious.append(np.mean(part_ious))
        
        #if batch_idx == 100:
        #    break

    print(len(shape_ious))
    log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
    log_string('eval mIoU: %f' % (np.mean(shape_ious)))
    log_string('eval box IoU (ground/3D): %f / %f' % (iou2ds_sum / float(num_batches*BATCH_SIZE), iou3ds_sum / float(num_batches*BATCH_SIZE)))
         
    EPOCH_CNT += 1
    return loss_sum/float(num_batches)
Пример #9
0
def eval_one_epoch(sess, ops, test_writer):
    ''' Simple evaluation for one epoch on the frustum dataset.
    ops is dict mapping from string to tf ops """
    '''
    global EPOCH_CNT
    is_training = False
    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))
    test_idxs = np.arange(0, len(TEST_DATASET))
    num_batches = len(TEST_DATASET) / BATCH_SIZE

    # To collect statistics
    total_cls_correct = 0
    total_cls_seen = 0
    total_seen_class = [0 for _ in range(NUM_OBJ_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_OBJ_CLASSES)]
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_obj_sample = 0
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0

    # Simple evaluation with batches
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        batch_data, batch_cls_label, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_feature_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)
        feed_dict = {
            ops['pointclouds_pl']: batch_data,
            ops['features_pl']: batch_feature_vec,
            ops['cls_label_pl']: batch_cls_label,
            ops['labels_pl']: batch_label,
            ops['centers_pl']: batch_center,
            ops['heading_class_label_pl']: batch_hclass,
            ops['heading_residual_label_pl']: batch_hres,
            ops['size_class_label_pl']: batch_sclass,
            ops['size_residual_label_pl']: batch_sres,
            ops['is_training_pl']: is_training
        }

        summary, step, loss_val, loss_endpoints, cls_logits_val, logits_val, iou2ds, iou3ds = \
            sess.run([ops['merged'], ops['step'],
                ops['loss'], ops['loss_endpoints'], ops['cls_logits'], ops['logits'],
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds']],
                feed_dict=feed_dict)
        test_writer.add_summary(summary, step)
        if np.isnan(loss_val):
            print('nan loss in batch: ', batch_idx)
            print('loss_endpoints: ', loss_endpoints)

        # classification acc
        cls_preds_val = np.argmax(cls_logits_val, 1)
        cls_correct = np.sum(cls_preds_val == batch_cls_label)
        total_cls_correct += cls_correct
        total_cls_seen += BATCH_SIZE
        for l in range(NUM_OBJ_CLASSES):
            total_seen_class[l] += np.sum(batch_cls_label == l)
            total_correct_class[l] += (np.sum((cls_preds_val == l)
                                              & (batch_cls_label == l)))

        # only calculate seg acc and regression performance with object labels
        obj_mask = batch_cls_label < g_type2onehotclass['NonObject']
        obj_sample_num = np.sum(obj_mask)
        total_obj_sample += obj_sample_num
        # segmentation acc
        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val[obj_mask] == batch_label[obj_mask])
        total_correct += correct
        total_seen += (obj_sample_num * NUM_POINT)
        loss_sum += loss_val
        iou2ds_sum += np.sum(iou2ds[obj_mask])
        iou3ds_sum += np.sum(iou3ds[obj_mask])
        iou3d_correct_cnt += np.sum(iou3ds[obj_mask] >= 0.7)

        # for i in range(BATCH_SIZE):
        #     segp = preds_val[i,:]
        #     segl = batch_label[i,:]
        #     part_ious = [0.0 for _ in range(NUM_SEG_CLASSES)]
        #     for l in range(NUM_SEG_CLASSES):
        #         if (np.sum(segl==l) == 0) and (np.sum(segp==l) == 0):
        #             part_ious[l] = 1.0 # class not present
        #         else:
        #             part_ious[l] = np.sum((segl==l) & (segp==l)) / \
        #                 float(np.sum((segl==l) | (segp==l)))

    log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('classification accuracy: %f' % \
        (total_cls_correct / float(total_cls_seen)))
    log_string('eval segmentation accuracy: %f'% \
        (total_correct / float(total_seen)))
    avg_cls_acc = np.mean(np.array(total_correct_class) / \
        np.array(total_seen_class,dtype=np.float))
    log_string('eval classification avg class acc: %f' % avg_cls_acc)
    log_string('eval box IoU (ground/3D): %f / %f' % \
        (iou2ds_sum / float(total_obj_sample), iou3ds_sum / \
            float(total_obj_sample)))
    log_string('eval box estimation accuracy (IoU=0.7): %f' % \
        (float(iou3d_correct_cnt)/float(total_obj_sample)))
    box_estimation_acc = float(iou3d_correct_cnt) / float(total_obj_sample)
    mean_loss = loss_sum / float(num_batches)
    EPOCH_CNT += 1
    return mean_loss, avg_cls_acc
Пример #10
0
def eval_one_epoch(fpointnet,device):
    '''
    @author chonepieceyb
    :param fpointnet:  网络对象
    :param device: 设备
    :return:
    '''
    # get data
    global EPOCH_CNT
    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))
    test_idxs = np.arange(0, len(TEST_DATASET))
    num_batches = len(TEST_DATASET) // BATCH_SIZE

    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0

    fpointnet.eval()  # 训练模式
    for batch_idx in range(int(num_batches)):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1)* BATCH_SIZE
        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                      NUM_POINT, NUM_CHANNEL)

        # convert to torch tensor and change data  format
        batch_data_gpu = torch.from_numpy(batch_data).permute(0,2,1).to(device,dtype=torch.float32)                        #
        batch_label_gpu= torch.from_numpy(batch_label).to(device,dtype=torch.int64)
        batch_center_gpu = torch.from_numpy(batch_center).to(device,dtype=torch.float32)
        batch_hclass_gpu = torch.from_numpy(batch_hclass).to(device,dtype=torch.int64)
        batch_hres_gpu = torch.from_numpy(batch_hres).to(device,dtype=torch.float32)
        batch_sclass_gpu = torch.from_numpy(batch_sclass).to(device,dtype=torch.int64)
        batch_sres_gpu = torch.from_numpy(batch_sres).to(device,dtype=torch.float32)
        batch_one_hot_vec_gpu  = torch.from_numpy(batch_one_hot_vec).to(device ,dtype=torch.float32)

        # eval
        with torch.no_grad():
            end_points = fpointnet.forward(batch_data_gpu,batch_one_hot_vec_gpu)
            loss, losses = get_loss(batch_label_gpu,batch_center_gpu,batch_hclass_gpu,batch_hres_gpu,batch_sclass_gpu,batch_sres_gpu,end_points)
        #get data   and transform dataformat from torch style to tensorflow style
        loss_val = loss.cpu().data.numpy()
        logits_val = end_points['mask_logits'].data.cpu().numpy()
        iou2ds,iou3ds,accuracy = compute_summary(end_points,batch_label_gpu,batch_center,batch_hclass,batch_hres,batch_sclass,batch_sres)
        preds_val = np.argmax(logits_val, 1)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val
        for l in range(NUM_CLASSES):
            total_seen_class[l] += np.sum(batch_label == l)
            total_correct_class[l] += (np.sum((preds_val == l) & (batch_label == l)))
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)
        iou3d_correct_cnt += np.sum(iou3ds >= 0.7)

        for i in range(BATCH_SIZE):
            segp = preds_val[i,:]
            segl = batch_label[i,:]
            part_ious = [0.0 for _ in range(NUM_CLASSES)]
            for l in range(NUM_CLASSES):
                if (np.sum(segl==l) == 0) and (np.sum(segp==l) == 0):
                    part_ious[l] = 1.0 # class not present
                else:
                    part_ious[l] = np.sum((segl==l) & (segp==l)) / \
                        float(np.sum((segl==l) | (segp==l)))

    log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('eval segmentation accuracy: %f' % \
               (total_correct / float(total_seen)))
    log_string('eval segmentation avg class acc: %f' % \
               (np.mean(np.array(total_correct_class) / \
                        np.array(total_seen_class, dtype=np.float))))
    log_string('eval box IoU (ground/3D): %f / %f' % \
               (iou2ds_sum / float(num_batches * BATCH_SIZE), iou3ds_sum / \
                float(num_batches * BATCH_SIZE)))
    log_string('eval box estimation accuracy (IoU=0.7): %f' % \
               (float(iou3d_correct_cnt) / float(num_batches * BATCH_SIZE)))

    EPOCH_CNT += 1
Пример #11
0
def train_one_epoch(fpointnet,device,optimizer):
    '''
    @author Qiao
    :param fpointnet: 网络
    :param device: 设备
    :return:
    '''
    global EPOCH_CNT

    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d TRAINING ----' % (EPOCH_CNT))
    # 按照原始数据集大小进行取值
    if FLAGS.train_batch_num == None:
        train_idxs = np.arange(0, len(TRAIN_DATASET))
        np.random.shuffle(train_idxs)                                                 #随机
        num_batches = len(TRAIN_DATASET)//BATCH_SIZE
    else:
        num_batches = int(FLAGS.train_batch_num)
        num_batches = min(num_batches,len(TRAIN_DATASET)//BATCH_SIZE)
        train_idxs = np.arange(0, BATCH_SIZE*num_batches)
        np.random.shuffle(train_idxs)
    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0

    # Training with batches
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1) * BATCH_SIZE

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
        get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        pointclouds_pl = torch.from_numpy(batch_data)
        pointclouds_pl = pointclouds_pl.permute(0, 2, 1)
        pointclouds_pl = pointclouds_pl.to(device,dtype=torch.float32)
        one_hot_vec_pl = torch.from_numpy(batch_one_hot_vec)
        one_hot_vec_pl = one_hot_vec_pl.to(device,dtype=torch.float32)

        labels_pl = torch.from_numpy(batch_label).to(device,dtype=torch.int64)
        centers_pl = torch.from_numpy(batch_center).to(device,dtype=torch.float32)
        heading_class_label_pl = torch.from_numpy(batch_hclass).to(device,dtype=torch.int64)
        heading_residual_label_pl = torch.from_numpy(batch_hres).to(device,dtype=torch.float32)
        size_class_label_pl = torch.from_numpy(batch_sclass).to(device,dtype=torch.int64)
        size_residual_label_pl = torch.from_numpy(batch_sres).to(device,dtype=torch.float32)

        fpointnet.train()

        end_points = fpointnet.forward(pointclouds_pl, one_hot_vec_pl)
        loss, losses = get_loss(labels_pl, centers_pl,\
                  heading_class_label_pl, heading_residual_label_pl,\
                  size_class_label_pl, size_residual_label_pl, end_points)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        loss_val = loss.cpu().detach().numpy()
        logits_val = end_points['mask_logits'].cpu().detach().numpy()
        iou2ds,iou3ds,accuracy = compute_summary(end_points,labels_pl ,batch_center,\
                                                 batch_hclass,batch_hres,batch_sclass,batch_sres)
        preds_val = np.argmax(logits_val, 1)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE*NUM_POINT)
        loss_sum += loss_val
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)
        iou3d_correct_cnt += np.sum(iou3ds>=0.7)

        iou2d_t = np.sum(iou2ds)/float(BATCH_SIZE)
        iou3d_t = np.sum(iou3ds)/float(BATCH_SIZE)
        writer.add_scalar('iou2ds', iou2d_t, global_step=EPOCH_CNT*batch_idx)
        writer.add_scalar('iou3ds', iou3d_t, global_step=EPOCH_CNT*batch_idx)
        for key,value in losses.items():
            writer.add_scalar(key, losses[key].cpu().data.numpy(), global_step=EPOCH_CNT*batch_idx)
        # writer.add_scalar('total_loss', loss, global_step=EPOCH_CNT*batch_idx)
        for param_group in optimizer.param_groups:
            learning_rate = param_group['lr']
        writer.add_scalar('learning_rate', learning_rate, global_step=EPOCH_CNT*batch_idx)
        writer.add_scalar('segmentation accuracy', accuracy, global_step=EPOCH_CNT*batch_idx)

        if (batch_idx+1)%10 == 0:
                log_string(' -- %03d / %03d --' % (batch_idx+1, num_batches))
                log_string('mean loss: %f' % (loss_sum / 10))
                log_string('segmentation accuracy: %f' % \
                    (total_correct / float(total_seen)))
                log_string('box IoU (ground/3D): %f / %f' % \
                    (iou2ds_sum / float(BATCH_SIZE*10), iou3ds_sum / float(BATCH_SIZE*10)))
                log_string('box estimation accuracy (IoU=0.7): %f' % \
                    (float(iou3d_correct_cnt)/float(BATCH_SIZE*10)))
                total_correct = 0
                total_seen = 0
                loss_sum = 0
                iou2ds_sum = 0
                iou3ds_sum = 0
                iou3d_correct_cnt = 0
Пример #12
0
def test(output_filename, result_dir=None):
    ''' Test frustum pointnets with GT 2D boxes.
    Write test results to KITTI format label files.
    todo (rqi): support variable number of points.
    '''
    ps_list = []
    seg_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []

    test_idxs = np.arange(0, len(TEST_DATASET))
    batch_size = BATCH_SIZE
    num_batches = len(TEST_DATASET)/batch_size

    sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT)
    correct_cnt = 0
    for batch_idx in range(num_batches):
        print('batch idx: %d' % (batch_idx))
        start_idx = batch_idx * batch_size
        end_idx = (batch_idx+1) * batch_size

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

	batch_output, batch_center_pred, \
        batch_hclass_pred, batch_hres_pred, \
        batch_sclass_pred, batch_sres_pred, batch_scores = \
            inference(sess, ops, batch_data,
                batch_one_hot_vec, batch_size=batch_size)

        correct_cnt += np.sum(batch_output==batch_label)
	
        for i in range(batch_output.shape[0]):
            ps_list.append(batch_data[i,...])
            seg_list.append(batch_label[i,...])
            segp_list.append(batch_output[i,...])
            center_list.append(batch_center_pred[i,:])
            heading_cls_list.append(batch_hclass_pred[i])
            heading_res_list.append(batch_hres_pred[i])
            size_cls_list.append(batch_sclass_pred[i])
            size_res_list.append(batch_sres_pred[i,:])
            rot_angle_list.append(batch_rot_angle[i])
            score_list.append(batch_scores[i])

    print("Segmentation accuracy: %f" % \
        (correct_cnt / float(batch_size*num_batches*NUM_POINT)))

    if FLAGS.dump_result:
        with open(output_filename, 'wp') as fp:
            pickle.dump(ps_list, fp)
            pickle.dump(seg_list, fp)
            pickle.dump(segp_list, fp)
            pickle.dump(center_list, fp)
            pickle.dump(heading_cls_list, fp)
            pickle.dump(heading_res_list, fp)
            pickle.dump(size_cls_list, fp)
            pickle.dump(size_res_list, fp)
            pickle.dump(rot_angle_list, fp)
            pickle.dump(score_list, fp)

    # Write detection results for KITTI evaluation
    write_detection_results(result_dir, TEST_DATASET.id_list,
        TEST_DATASET.type_list, TEST_DATASET.box2d_list, center_list,
        heading_cls_list, heading_res_list,
        size_cls_list, size_res_list, rot_angle_list, score_list)
Пример #13
0
def test_from_rgb_detection(output_filename, result_dir=None):
    ''' Test frustum pointents with 2D boxes from a RGB detector.
    Write test results to KITTI format label files.
    todo (rqi): support variable number of points.
    '''
    ps_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []
    onehot_list = []

    test_idxs = np.arange(0, len(TEST_DATASET))
    print(len(TEST_DATASET))
    batch_size = BATCH_SIZE
    num_batches = int((len(TEST_DATASET)+batch_size-1)/batch_size)
    
    batch_data_to_feed = np.zeros((batch_size, NUM_POINT, NUM_CHANNEL))
    batch_one_hot_to_feed = np.zeros((batch_size, 3))
    sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT)
    for batch_idx in range(num_batches):
        print('batch idx: %d' % (batch_idx))
        start_idx = batch_idx * batch_size
        end_idx = min(len(TEST_DATASET), (batch_idx+1) * batch_size)
        cur_batch_size = end_idx - start_idx

        batch_data, batch_rot_angle, batch_rgb_prob, batch_one_hot_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL, from_rgb_detection=True)
        batch_data_to_feed[0:cur_batch_size,...] = batch_data
        batch_one_hot_to_feed[0:cur_batch_size,:] = batch_one_hot_vec

        # Run one batch inference
	batch_output, batch_center_pred, \
        batch_hclass_pred, batch_hres_pred, \
        batch_sclass_pred, batch_sres_pred, batch_scores = \
            inference(sess, ops, batch_data_to_feed,
                batch_one_hot_to_feed, batch_size=batch_size)
	
        for i in range(cur_batch_size):
            ps_list.append(batch_data[i,...])
            segp_list.append(batch_output[i,...])
            center_list.append(batch_center_pred[i,:])
            heading_cls_list.append(batch_hclass_pred[i])
            heading_res_list.append(batch_hres_pred[i])
            size_cls_list.append(batch_sclass_pred[i])
            size_res_list.append(batch_sres_pred[i,:])
            rot_angle_list.append(batch_rot_angle[i])
            #score_list.append(batch_scores[i])
            score_list.append(batch_rgb_prob[i]) # 2D RGB detection score
            onehot_list.append(batch_one_hot_vec[i])

    if FLAGS.dump_result:
        with open(output_filename, 'wp') as fp:
            pickle.dump(ps_list, fp)
            pickle.dump(segp_list, fp)
            pickle.dump(center_list, fp)
            pickle.dump(heading_cls_list, fp)
            pickle.dump(heading_res_list, fp)
            pickle.dump(size_cls_list, fp)
            pickle.dump(size_res_list, fp)
            pickle.dump(rot_angle_list, fp)
            pickle.dump(score_list, fp)
            pickle.dump(onehot_list, fp)

    # Write detection results for KITTI evaluation
    print('Number of point clouds: %d' % (len(ps_list)))
    write_detection_results(result_dir, TEST_DATASET.id_list,
        TEST_DATASET.type_list, TEST_DATASET.box2d_list,
        center_list, heading_cls_list, heading_res_list,
        size_cls_list, size_res_list, rot_angle_list, score_list)
    # Make sure for each frame (no matter if we have measurment for that frame),
    # there is a TXT file
    output_dir = os.path.join(result_dir, 'data')
    if FLAGS.idx_path is not None:
        to_fill_filename_list = [line.rstrip()+'.txt' \
            for line in open(FLAGS.idx_path)]
        fill_files(output_dir, to_fill_filename_list)
Пример #14
0
def test_from_video_detection(dataset, output_filename, result_dir=None, sequence_dir=None):
    ''' Test frustum pointents with 2D boxes from a RGB detector.
	Write test results to KITTI format label files.
	todo (rqi): support variable number of points.
	'''
    ps_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []
    onehot_list = []
    hypotheses_list = []
    is_hypotheses = dataset.is_hypotheses

    test_idxs = np.arange(0, len(dataset))
    print(len(dataset))
    batch_size = BATCH_SIZE
    num_batches = int((len(dataset) + batch_size - 1) / batch_size)

    batch_data_to_feed = np.zeros((batch_size, NUM_POINT, NUM_CHANNEL))
    batch_one_hot_to_feed = np.zeros((batch_size, 3))
    sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT)
    for batch_idx in range(num_batches):
        print('batch idx: %d' % (batch_idx))
        start_idx = batch_idx * batch_size
        end_idx = min(len(dataset), (batch_idx + 1) * batch_size)
        cur_batch_size = end_idx - start_idx

        cur_batch = get_batch(dataset, test_idxs, start_idx, end_idx,
                      NUM_POINT, NUM_CHANNEL, from_rgb_detection=True)
        batch_data, batch_rot_angle, batch_rgb_prob = cur_batch[:3]
        cur_batch = cur_batch[3:]
        if dataset.one_hot:
            batch_one_hot_vec = cur_batch.pop(0)

        if dataset.is_hypotheses:
            batch_hypotheses = cur_batch.pop(0)

        batch_data_to_feed[0:cur_batch_size, ...] = batch_data
        batch_one_hot_to_feed[0:cur_batch_size, :] = batch_one_hot_vec

        # Run one batch inference
        batch_output, batch_center_pred, \
        batch_hclass_pred, batch_hres_pred, \
        batch_sclass_pred, batch_sres_pred, batch_scores = \
            inference(sess, ops, batch_data_to_feed,
                      batch_one_hot_to_feed, batch_size=batch_size)

        for i in range(cur_batch_size):
            ps_list.append(batch_data[i, ...])
            segp_list.append(batch_output[i, ...])
            center_list.append(batch_center_pred[i, :])
            heading_cls_list.append(batch_hclass_pred[i])
            heading_res_list.append(batch_hres_pred[i])
            size_cls_list.append(batch_sclass_pred[i])
            size_res_list.append(batch_sres_pred[i, :])
            rot_angle_list.append(batch_rot_angle[i])
            # score_list.append(batch_scores[i])
            score_list.append(batch_rgb_prob[i])  # 2D RGB detection score
            onehot_list.append(batch_one_hot_vec[i])
            if is_hypotheses:
                hypotheses_list.append(batch_hypotheses[i])

    if FLAGS.dump_result:
        with open(output_filename, 'wb') as fp:
            pickle.dump(ps_list, fp)
            pickle.dump(segp_list, fp)
            pickle.dump(center_list, fp)
            pickle.dump(heading_cls_list, fp)
            pickle.dump(heading_res_list, fp)
            pickle.dump(size_cls_list, fp)
            pickle.dump(size_res_list, fp)
            pickle.dump(rot_angle_list, fp)
            pickle.dump(score_list, fp)
            pickle.dump(onehot_list, fp)
            if is_hypotheses:
                pickle.dump(hypotheses_list, fp)

    # Write detection results for KITTI evaluation
    print('Number of point clouds: %d' % (len(ps_list)))
    write_detection_results(result_dir, dataset.id_list,
                            dataset.type_list, dataset.box2d_list,
                            center_list, heading_cls_list, heading_res_list,
                            size_cls_list, size_res_list, rot_angle_list, score_list, hypotheses_list)
    # Make sure for each frame (no matter if we have measurment for that frame),
    # there is a TXT file
    output_dir = os.path.join(result_dir, 'data')
    if sequence_dir is not None:
        image_dir = os.path.join(sequence_dir, "image_03/data")
        to_fill_filename_list = [os.path.splitext(f)[0] + '.txt' \
                                 for f in os.listdir(image_dir)]
        fill_files(output_dir, to_fill_filename_list)
Пример #15
0
def eval_one_epoch(sess, ops, test_writer):
    ''' Simple evaluation for one epoch on the frustum dataset.
    ops is dict mapping from string to tf ops """
    '''
    global EPOCH_CNT
    is_training = False
    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT))
    test_idxs = np.arange(0, len(TEST_DATASET))
    num_batches = len(TEST_DATASET)/BATCH_SIZE

    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0
   
    # Simple evaluation with batches 
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1) * BATCH_SIZE

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        feed_dict = {ops['pointclouds_pl']: batch_data,
                     ops['one_hot_vec_pl']: batch_one_hot_vec,
                     ops['labels_pl']: batch_label,
                     ops['centers_pl']: batch_center,
                     ops['heading_class_label_pl']: batch_hclass,
                     ops['heading_residual_label_pl']: batch_hres,
                     ops['size_class_label_pl']: batch_sclass,
                     ops['size_residual_label_pl']: batch_sres,
                     ops['is_training_pl']: is_training}

        summary, step, loss_val, logits_val, iou2ds, iou3ds = \
            sess.run([ops['merged'], ops['step'],
                ops['loss'], ops['logits'], 
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds']],
                feed_dict=feed_dict)
        test_writer.add_summary(summary, step)

        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE*NUM_POINT)
        loss_sum += loss_val
        for l in range(NUM_CLASSES):
            total_seen_class[l] += np.sum(batch_label==l)
            total_correct_class[l] += (np.sum((preds_val==l) & (batch_label==l)))
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)
        iou3d_correct_cnt += np.sum(iou3ds>=0.7)

        for i in range(BATCH_SIZE):
            segp = preds_val[i,:]
            segl = batch_label[i,:] 
            part_ious = [0.0 for _ in range(NUM_CLASSES)]
            for l in range(NUM_CLASSES):
                if (np.sum(segl==l) == 0) and (np.sum(segp==l) == 0): 
                    part_ious[l] = 1.0 # class not present
                else:
                    part_ious[l] = np.sum((segl==l) & (segp==l)) / \
                        float(np.sum((segl==l) | (segp==l)))

    log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('eval segmentation accuracy: %f'% \
        (total_correct / float(total_seen)))
    log_string('eval segmentation avg class acc: %f' % \
        (np.mean(np.array(total_correct_class) / \
            np.array(total_seen_class,dtype=np.float))))
    log_string('eval box IoU (ground/3D): %f / %f' % \
        (iou2ds_sum / float(num_batches*BATCH_SIZE), iou3ds_sum / \
            float(num_batches*BATCH_SIZE)))
    log_string('eval box estimation accuracy (IoU=0.7): %f' % \
        (float(iou3d_correct_cnt)/float(num_batches*BATCH_SIZE)))
         
    EPOCH_CNT += 1
Пример #16
0
def train_one_epoch(sess, ops, train_writer):
    ''' Training for one epoch on the frustum dataset.
    ops is dict mapping from string to tf ops
    '''
    is_training = True
    log_string(str(datetime.now()))
    
    # Shuffle train samples
    train_idxs = np.arange(0, len(TRAIN_DATASET))
    np.random.shuffle(train_idxs)
    num_batches = len(TRAIN_DATASET)/BATCH_SIZE

    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0

    # Training with batches
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1) * BATCH_SIZE

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        feed_dict = {ops['pointclouds_pl']: batch_data,
                     ops['one_hot_vec_pl']: batch_one_hot_vec,
                     ops['labels_pl']: batch_label,
                     ops['centers_pl']: batch_center,
                     ops['heading_class_label_pl']: batch_hclass,
                     ops['heading_residual_label_pl']: batch_hres,
                     ops['size_class_label_pl']: batch_sclass,
                     ops['size_residual_label_pl']: batch_sres,
                     ops['is_training_pl']: is_training,}

        summary, step, _, loss_val, logits_val, centers_pred_val, \
        iou2ds, iou3ds = \
            sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['logits'], ops['centers_pred'],
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds']], 
                feed_dict=feed_dict)

        train_writer.add_summary(summary, step)

        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE*NUM_POINT)
        loss_sum += loss_val
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)
        iou3d_correct_cnt += np.sum(iou3ds>=0.7)

        if (batch_idx+1)%10 == 0:
            log_string(' -- %03d / %03d --' % (batch_idx+1, num_batches))
            log_string('mean loss: %f' % (loss_sum / 10))
            log_string('segmentation accuracy: %f' % \
                (total_correct / float(total_seen)))
            log_string('box IoU (ground/3D): %f / %f' % \
                (iou2ds_sum / float(BATCH_SIZE*10), iou3ds_sum / float(BATCH_SIZE*10)))
            log_string('box estimation accuracy (IoU=0.7): %f' % \
                (float(iou3d_correct_cnt)/float(BATCH_SIZE*10)))
            total_correct = 0
            total_seen = 0
            loss_sum = 0
            iou2ds_sum = 0
            iou3ds_sum = 0
            iou3d_correct_cnt = 0
Пример #17
0
def train_one_epoch(sess, ops, train_writer, idxs_to_use=None):
    ''' Training for one epoch on the frustum dataset.
    ops is dict mapping from string to tf ops
    '''
    is_training = True
    log_string(str(datetime.now()))

    # Shuffle train samples
    if not idxs_to_use:
        train_idxs = np.arange(0, len(TRAIN_DATASET))
    else:
        log_string('Training with classification hard samples.')
        train_idxs = idxs_to_use
    np.random.shuffle(train_idxs)
    num_batches = len(train_idxs) / BATCH_SIZE

    # To collect statistics
    total_cls_correct = 0
    total_cls_seen = 0
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_obj_sample = 0
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0

    # Training with batches
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        batch_data, batch_cls_label, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_feature_vec = \
            get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        feed_dict = {
            ops['pointclouds_pl']: batch_data,
            ops['features_pl']: batch_feature_vec,
            ops['cls_label_pl']: batch_cls_label,
            ops['labels_pl']: batch_label,
            ops['centers_pl']: batch_center,
            ops['heading_class_label_pl']: batch_hclass,
            ops['heading_residual_label_pl']: batch_hres,
            ops['size_class_label_pl']: batch_sclass,
            ops['size_residual_label_pl']: batch_sres,
            ops['is_training_pl']: is_training,
        }

        summary, step, _, loss_val, cls_logits_val, logits_val, centers_pred_val, \
        iou2ds, iou3ds = \
            sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['cls_logits'], ops['logits'], ops['centers_pred'],
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds']],
                feed_dict=feed_dict)

        train_writer.add_summary(summary, step)

        # classification acc
        cls_preds_val = np.argmax(cls_logits_val, 1)
        cls_correct = np.sum(cls_preds_val == batch_cls_label)
        total_cls_correct += cls_correct
        total_cls_seen += BATCH_SIZE
        # only calculate seg acc and regression performance with object labels
        obj_mask = batch_cls_label < g_type2onehotclass['NonObject']
        obj_sample_num = np.sum(obj_mask)
        total_obj_sample += obj_sample_num
        # segmentation acc
        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val[obj_mask] == batch_label[obj_mask])
        total_correct += correct
        total_seen += (obj_sample_num * NUM_POINT)
        loss_sum += loss_val
        iou2ds_sum += np.sum(iou2ds[obj_mask])
        iou3ds_sum += np.sum(iou3ds[obj_mask])
        iou3d_correct_cnt += np.sum(iou3ds[obj_mask] >= 0.7)

        if (batch_idx + 1) % 10 == 0:
            log_string(' -- %03d / %03d --' % (batch_idx + 1, num_batches))
            log_string('mean loss: %f' % (loss_sum / 10))
            log_string('classification accuracy: %f' % \
                (total_cls_correct / float(total_cls_seen)))
            if total_seen > 0:
                log_string('segmentation accuracy: %f' % \
                    (total_correct / float(total_seen)))
            log_string('box IoU (ground/3D): %f / %f' % \
                (iou2ds_sum / float(total_obj_sample), iou3ds_sum / float(total_obj_sample)))
            log_string('box estimation accuracy (IoU=0.7): %f' % \
                (float(iou3d_correct_cnt)/float(total_obj_sample)))
            total_cls_correct = 0
            total_correct = 0
            total_cls_seen = 0
            total_seen = 0
            total_obj_sample = 0
            loss_sum = 0
            iou2ds_sum = 0
            iou3ds_sum = 0
            iou3d_correct_cnt = 0
def evaluate(sess, ops, test_writer):
    ''' 
    
    '''
    #global frustum map
    global frustum_map

    is_training = False
    test_idxs = np.arange(0, len(TEST_DATASET))
    num_batches = len(TEST_DATASET)/BATCH_SIZE

    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0

    #for batch_idx in range(num_batches):
    for batch_idx in range(17334):
        
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1) * BATCH_SIZE

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        feed_dict = {ops['pointclouds_pl']: batch_data,
                     ops['one_hot_vec_pl']: batch_one_hot_vec,
                     ops['labels_pl']: batch_label,
                     ops['centers_pl']: batch_center,
                     ops['heading_class_label_pl']: batch_hclass,
                     ops['heading_residual_label_pl']: batch_hres,
                     ops['size_class_label_pl']: batch_sclass,
                     ops['size_residual_label_pl']: batch_sres,
                     ops['is_training_pl']: is_training}

        summary, loss_val, logits_val, iou2ds, iou3ds = \
            sess.run([ops['merged'],
                ops['loss'], ops['logits'], 
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds']],
                feed_dict=feed_dict)

        test_writer.add_summary(summary)

        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE*NUM_POINT)
        loss_sum += loss_val

        for l in range(NUM_CLASSES):
            total_seen_class[l] += np.sum(batch_label==l)
            total_correct_class[l] += (np.sum((preds_val==l) & (batch_label==l)))
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)
        iou3d_correct_cnt += np.sum(iou3ds>=0.7)

        for i in range(BATCH_SIZE):
            segp = preds_val[i,:]
            segl = batch_label[i,:] 
            part_ious = [0.0 for _ in range(NUM_CLASSES)]
            for l in range(NUM_CLASSES):
                if (np.sum(segl==l) == 0) and (np.sum(segp==l) == 0): 
                    part_ious[l] = 1.0 # class not present
                else:
                    part_ious[l] = np.sum((segl==l) & (segp==l)) / \
                        float(np.sum((segl==l) | (segp==l)))

        if iou3ds < 0.7:
            log_string('**** Frustum Number %03d from Image %06d and 2D box %06d****' % (batch_idx,frustum_map[batch_idx]['img_indx'],frustum_map[batch_idx]['obj_indx']))
            log_string('loss: %f' % (loss_val))
            log_string('Frustum segmentation accuracy: %f'% (correct / float(NUM_POINT)))
            log_string('Frustum box IoU (ground/3D): %f / %f' % (iou2ds, iou3ds))
        
    log_string("*********** Summary Results **********")
    log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('eval segmentation accuracy: %f'% \
        (total_correct / float(total_seen)))
    log_string('eval segmentation avg class acc: %f' % \
        (np.mean(np.array(total_correct_class) / \
            np.array(total_seen_class,dtype=np.float))))
    log_string('eval box IoU (ground/3D): %f / %f' % \
        (iou2ds_sum / float(num_batches*BATCH_SIZE), iou3ds_sum / \
            float(num_batches*BATCH_SIZE)))
    log_string('eval box estimation accuracy (IoU=0.7): %f' % \
        (float(iou3d_correct_cnt)/float(num_batches*BATCH_SIZE)))
Пример #19
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train samples
    train_idxs = np.arange(0, len(TRAIN_DATASET))
    np.random.shuffle(train_idxs)
    num_batches = len(TRAIN_DATASET)/BATCH_SIZE
    
    log_string(str(datetime.now()))

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    iou2ds_sum = 0
    iou3ds_sum = 0
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1) * BATCH_SIZE
        batch_data, batch_label, batch_center, batch_hclass, batch_hres, batch_sclass, batch_sres, batch_rot_angle, batch_one_hot_vec = get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx, NUM_POINT, NUM_CHANNEL)
        #print 'Ground truth centers: ', batch_center
        # Augment batched point clouds by rotation and jittering
        aug_data = batch_data
        #aug_data = provider.random_scale_point_cloud(batch_data)
        #aug_data = provider.jitter_point_cloud(aug_data)
        feed_dict = {ops['pointclouds_pl']: aug_data,
                     ops['one_hot_vec_pl']: batch_one_hot_vec,
                     ops['labels_pl']: batch_label,
                     ops['centers_pl']: batch_center,
                     ops['heading_class_label_pl']: batch_hclass,
                     ops['heading_residual_label_pl']: batch_hres,
                     ops['size_class_label_pl']: batch_sclass,
                     ops['size_residual_label_pl']: batch_sres,
                     ops['is_training_pl']: is_training,}
        summary, step, _, loss_val, logits_val, centers_pred_val, iou2ds, iou3ds = sess.run([ops['merged'], ops['step'],
            ops['train_op'], ops['loss'], ops['logits'], ops['centers_pred'],
            ops['end_points']['iou2ds'], ops['end_points']['iou3ds']], feed_dict=feed_dict)
        #print 'Predicted centers: ', centers_pred_val
        train_writer.add_summary(summary, step)
        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE*NUM_POINT)
        loss_sum += loss_val
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)

        if (batch_idx+1)%10 == 0:
            log_string(' -- %03d / %03d --' % (batch_idx+1, num_batches))
            log_string('mean loss: %f' % (loss_sum / 10))
            log_string('accuracy: %f' % (total_correct / float(total_seen)))
            log_string('Box IoU (ground/3D): %f / %f' % (iou2ds_sum / float(BATCH_SIZE*10), iou3ds_sum / float(BATCH_SIZE*10)))
            total_correct = 0
            total_seen = 0
            loss_sum = 0
            iou2ds_sum = 0
            iou3ds_sum = 0
Пример #20
0
def main_batch(output_filename, result_dir=None):
    ps_list = []
    seg_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []

    test_idxs = np.arange(0, len(TEST_DATASET))
    batch_size = 32
    num_batches = int((len(TEST_DATASET)+batch_size-1)/batch_size)

    batch_data_to_feed = np.zeros((batch_size, NUM_POINT, NUM_CHANNEL))
    batch_one_hot_to_feed = np.zeros((batch_size, NUM_CLASS))
    sess, ops = get_model(batch_size=batch_size, num_point=NUM_POINT)
    correct_cnt = 0
    for batch_idx in range(num_batches):
        #if batch_idx == 50: break #TODO: remove this line!!
        print(batch_idx)
        start_idx = batch_idx * batch_size
        end_idx = min(len(TEST_DATASET), (batch_idx+1) * batch_size)
        cur_batch_size = end_idx - start_idx

        batch_data, batch_label, batch_center, batch_hclass, batch_hres, batch_sclass, batch_sres, batch_rot_angle, batch_one_hot_vec = get_batch(TEST_DATASET, test_idxs, start_idx, end_idx, NUM_POINT, NUM_CHANNEL)
        batch_data_to_feed[0:cur_batch_size,...] = batch_data
        batch_one_hot_to_feed[0:cur_batch_size,:] = batch_one_hot_vec
	batch_output, batch_center_pred, batch_hclass_pred, batch_hres_pred, batch_sclass_pred, batch_sres_pred, batch_scores = inference(sess, ops, batch_data_to_feed, batch_one_hot_to_feed, batch_size=batch_size)
        print(batch_hclass_pred.shape, batch_hres_pred.shape)
        print(batch_sclass_pred.shape, batch_sres_pred.shape)
        #raw_input()
        correct_cnt += np.sum(batch_output[0:cur_batch_size,...]==batch_label[0:cur_batch_size,...])
	
        for i in range(cur_batch_size):
            ps_list.append(batch_data[i,...])
            seg_list.append(batch_label[i,...])
            segp_list.append(batch_output[i,...])
            center_list.append(batch_center_pred[i,:])
            heading_cls_list.append(batch_hclass_pred[i])
            heading_res_list.append(batch_hres_pred[i])
            size_cls_list.append(batch_sclass_pred[i])
            size_res_list.append(batch_sres_pred[i,:])
            rot_angle_list.append(batch_rot_angle[i])
            score_list.append(batch_scores[i])
    print("Accuracy: ", correct_cnt / float(len(TEST_DATASET)*NUM_POINT))

    save_zipped_pickle([ps_list, segp_list, center_list, heading_cls_list, heading_res_list, size_cls_list, size_res_list, rot_angle_list, score_list], output_filename) 

    # Write detection results for KITTI evaluation
    write_detection_results(result_dir, TEST_DATASET.id_list, TEST_DATASET.type_list, TEST_DATASET.box2d_list, center_list, heading_cls_list, heading_res_list, size_cls_list, size_res_list, rot_angle_list, score_list)
Пример #21
0
def test_from_rgb_detection(output_filename, result_dir=None):
    ''' Test frustum pointents with 2D boxes from a RGB detector.
    Write test results to KITTI format label files.
    todo (rqi): support variable number of points.
    '''
    ps_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []
    onehot_list = []

    test_idxs = np.arange(0, len(TEST_DATASET))
    print(len(TEST_DATASET))
    batch_size = BATCH_SIZE
    num_batches = int((len(TEST_DATASET) + batch_size - 1) / batch_size)
    total_time = 0.0
    profile_list = []
    time_list = []
    batch_data_to_feed = np.zeros((batch_size, NUM_POINT, NUM_CHANNEL))
    batch_one_hot_to_feed = np.zeros((batch_size, 3))
    sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT)
    for batch_idx in range(RUN_SIZE):
        print('batch idx: %d' % (batch_idx))
        start_idx = batch_idx * batch_size
        end_idx = min(len(TEST_DATASET), (batch_idx + 1) * batch_size)
        cur_batch_size = end_idx - start_idx

        batch_data, batch_rot_angle, batch_rgb_prob, batch_one_hot_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL, from_rgb_detection=True)

        print("batch data: ", batch_data.shape)
        print("batch_one_hot_vec: ", batch_one_hot_vec.shape)
        batch_data_to_feed[0:cur_batch_size, ...] = batch_data
        batch_one_hot_to_feed[0:cur_batch_size, :] = batch_one_hot_vec

        # Run one batch inference
        time_diff = inference(sess,
                              ops,
                              batch_data_to_feed,
                              batch_one_hot_to_feed,
                              batch_size=batch_size)

        if (batch_idx < 3): continue
        print(time_diff)
        total_time += time_diff
        time_list.append(time_diff)
        '''
        profile = parse_timeline("timeline.json")
        tmp_list = dict2list(profile, pointnet_ops)
        profile_list.append(tmp_list)
        print(tmp_list)
        '''

    print('Avg. time:', statistics.mean(time_list))
    print('Time stddev: ', statistics.stdev(time_list))
    # Write detection results for KITTI evaluation

    avg_list = []
    dev_list = []
    for i in range(len(profile_list[0])):
        avg_list.append(statistics.mean([item[i] for item in profile_list]))
        dev_list.append(statistics.stdev([item[i] for item in profile_list]))

    print(avg_list)
    print(dev_list)
Пример #22
0
def train_one_epoch(sess, ops, train_writer):
    ''' Training for one epoch on the frustum dataset.
    ops is dict mapping from string to tf ops
    '''
    is_training = True
    log_string(str(datetime.now()))
    # Shuffle train samples
    train_idxs = np.arange(0, len(TRAIN_DATASET))
    np.random.shuffle(train_idxs)
    num_batches = len(TRAIN_DATASET)//BATCH_SIZE

    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0

    # Training with batches
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1) * BATCH_SIZE

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        feed_dict = {ops['pointclouds_pl']: batch_data,
                     ops['one_hot_vec_pl']: batch_one_hot_vec,
                     ops['labels_pl']: batch_label,
                     ops['centers_pl']: batch_center,
                     ops['heading_class_label_pl']: batch_hclass,
                     ops['heading_residual_label_pl']: batch_hres,
                     ops['size_class_label_pl']: batch_sclass,
                     ops['size_residual_label_pl']: batch_sres,
                     ops['is_training_pl']: is_training,}

        summary, step, _, loss_val, logits_val, centers_pred_val, \
        iou2ds, iou3ds = \
            sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['end_points']['mask_logits'], ops['end_points']['center'],
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds']],
                feed_dict=feed_dict)

        train_writer.add_summary(summary, step)


        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE*NUM_POINT)
        loss_sum += loss_val
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)
        iou3d_correct_cnt += np.sum(iou3ds>=0.7)

        if (batch_idx+1)%LOG_FREQ == 0:
            segacc = total_correct / float(total_seen)
            iou2, iou3 = iou2ds_sum / float(BATCH_SIZE*LOG_FREQ), iou3ds_sum / float(BATCH_SIZE*LOG_FREQ)
            boxacc = float(iou3d_correct_cnt)/float(BATCH_SIZE*LOG_FREQ)
            content = ' %03d / %03d loss: %2.4f segacc: %.4f IoU(ground/3D): %.4f / %.4f boxAcc(0.7): %.4f' \
                %(batch_idx+1, num_batches, loss_sum / LOG_FREQ, segacc, iou2, iou3, boxacc)
            log_string(content)
            total_correct = 0
            total_seen = 0
            loss_sum = 0
            iou2ds_sum = 0
            iou3ds_sum = 0
            iou3d_correct_cnt = 0
Пример #23
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train samples
    train_idxs = np.arange(0, len(TRAIN_DATASET))
    np.random.shuffle(train_idxs)
    num_batches = len(TRAIN_DATASET) / BATCH_SIZE

    log_string(str(datetime.now()))

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    iou2ds_sum = 0
    iou3ds_sum = 0
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        batch_data, batch_label, batch_center, batch_hclass, batch_hres, batch_sclass, batch_sres, batch_rot_angle, batch_one_hot_vec = get_batch(
            TRAIN_DATASET, train_idxs, start_idx, end_idx, NUM_POINT,
            NUM_CHANNEL)
        #print 'Ground truth centers: ', batch_center
        # Augment batched point clouds by rotation and jittering
        aug_data = batch_data
        #aug_data = provider.random_scale_point_cloud(batch_data)
        #aug_data = provider.jitter_point_cloud(aug_data)
        feed_dict = {
            ops['pointclouds_pl']: aug_data,
            ops['one_hot_vec_pl']: batch_one_hot_vec,
            ops['labels_pl']: batch_label,
            ops['centers_pl']: batch_center,
            ops['heading_class_label_pl']: batch_hclass,
            ops['heading_residual_label_pl']: batch_hres,
            ops['size_class_label_pl']: batch_sclass,
            ops['size_residual_label_pl']: batch_sres,
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, logits_val, centers_pred_val, iou2ds, iou3ds = sess.run(
            [
                ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['logits'], ops['centers_pred'],
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds']
            ],
            feed_dict=feed_dict)
        #print 'Predicted centers: ', centers_pred_val
        train_writer.add_summary(summary, step)
        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)

        if (batch_idx + 1) % 10 == 0:
            log_string(' -- %03d / %03d --' % (batch_idx + 1, num_batches))
            log_string('mean loss: %f' % (loss_sum / 10))
            log_string('accuracy: %f' % (total_correct / float(total_seen)))
            log_string('Box IoU (ground/3D): %f / %f' %
                       (iou2ds_sum / float(BATCH_SIZE * 10),
                        iou3ds_sum / float(BATCH_SIZE * 10)))
            total_correct = 0
            total_seen = 0
            loss_sum = 0
            iou2ds_sum = 0
            iou3ds_sum = 0
Пример #24
0
def eval_one_epoch(sess, ops, test_writer, epoch, saver):
    ''' Simple evaluation for one epoch on the frustum dataset.
    ops is dict mapping from string to tf ops """
    '''
    global EPOCH_CNT, best_boxacc, best_epoch
    is_training = False
    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT))
    test_idxs = np.arange(0, len(TEST_DATASET))
    num_batches = len(TEST_DATASET)//BATCH_SIZE

    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0

    # Simple evaluation with batches
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1) * BATCH_SIZE

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        feed_dict = {ops['pointclouds_pl']: batch_data,
                     ops['one_hot_vec_pl']: batch_one_hot_vec,
                     ops['labels_pl']: batch_label,
                     ops['centers_pl']: batch_center,
                     ops['heading_class_label_pl']: batch_hclass,
                     ops['heading_residual_label_pl']: batch_hres,
                     ops['size_class_label_pl']: batch_sclass,
                     ops['size_residual_label_pl']: batch_sres,
                     ops['is_training_pl']: is_training}

        summary, step, loss_val, logits_val, iou2ds, iou3ds = \
            sess.run([ops['merged'], ops['step'],
                ops['loss'], ops['end_points']['mask_logits'],
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds']],
                feed_dict=feed_dict)
        test_writer.add_summary(summary, step)

        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE*NUM_POINT)
        loss_sum += loss_val
        #print("preds_val:",preds_val.shape)
        #print("batch_label:",batch_label.shape)
        for l in range(NUM_CLASSES):
            total_seen_class[l] += np.sum(batch_label==l)
            total_correct_class[l] += (np.sum((preds_val==l) & (batch_label==l)))
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)
        iou3d_correct_cnt += np.sum(iou3ds>=0.7)

        for i in range(BATCH_SIZE):
            segp = preds_val[i,:]
            segl = batch_label[i,:]
            part_ious = [0.0 for _ in range(NUM_CLASSES)]
            for l in range(NUM_CLASSES):
                if (np.sum(segl==l) == 0) and (np.sum(segp==l) == 0):
                    part_ious[l] = 1.0 # class not present
                else:
                    part_ious[l] = np.sum((segl==l) & (segp==l)) / \
                        float(np.sum((segl==l) | (segp==l)))
    mean_loss = loss_sum / float(num_batches)
    segacc = total_correct / float(total_seen)
    segclassacc = np.mean(np.array(total_correct_class) / \
        np.array(total_seen_class,dtype=np.float))
    iou2 = iou2ds_sum / float(num_batches*BATCH_SIZE)
    iou3 = iou3ds_sum / float(num_batches*BATCH_SIZE)
    boxacc = float(iou3d_correct_cnt)/float(num_batches*BATCH_SIZE)
    content = ' loss: %2.4f segacc: %.4f seg_C_acc: %.4f IoU(ground/3D): %.4f / %.4f boxAcc(0.7): %.4f' \
        %(mean_loss, segacc, segclassacc, iou2, iou3, boxacc)
    log_string(content)

    model_name = "model_%s_%1.3f.ckpt" % (epoch, boxacc)
    save_path = saver.save(sess, os.path.join(LOG_DIR, model_name))
    log_string("Model saved in file: %s" % save_path)
    if boxacc> best_boxacc:
        best_boxacc = boxacc
        best_epoch  = epoch
        best_model_path = os.path.join(LOG_DIR, "model_best.ckpt")
        save_path = saver.save(sess, os.path.join(LOG_DIR, model_name))
        log_string("Best Model( %s ) saved in file: %s" % (model_name, best_model_path))

    EPOCH_CNT += 1
Пример #25
0
def eval_one_epoch(sess, ops, test_dataset, res, split):
    ''' Simple evaluation for one epoch on the frustum dataset.
    ops is dict mapping from string to tf ops """
    '''
    global EPOCH_CNT
    is_training = False
    log_string(str(datetime.now()))
    log_string(res + '---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))
    test_idxs = np.arange(0, len(test_dataset))
    num_batches = len(test_dataset) / BATCH_SIZE

    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0
    box_pred_nbr_sum = 0

    ps_list = []
    seg_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []
    center_GT = []
    heading_class_GT = []
    heading_res_GT = []
    size_class_GT = []
    size_residual_GT = []

    # Simple evaluation with batches
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(test_dataset, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        feed_dict = {
            ops['pointclouds_pl']: batch_data,
            ops['one_hot_vec_pl']: batch_one_hot_vec,
            ops['labels_pl']: batch_label,
            ops['centers_pl']: batch_center,
            ops['heading_class_label_pl']: batch_hclass,
            ops['heading_residual_label_pl']: batch_hres,
            ops['size_class_label_pl']: batch_sclass,
            ops['size_residual_label_pl']: batch_sres,
            ops['is_training_pl']: is_training
        }

        summary, step, loss_val, logits_val, \
        centers_pred_val,heading_scores, heading_residuals, size_scores, size_residuals,\
        iou2ds, iou3ds, box_pred_nbr  = \
            sess.run([ops['merged'], ops['step'],
                ops['loss'], ops['logits'],
                ops['end_points']['center'],ops['end_points']['heading_scores'],
                ops['end_points']['heading_residuals'],ops['end_points']['size_scores'],
                ops['end_points']['size_residuals'],
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds'],ops['end_points']['box_pred_nbr']],

                feed_dict=feed_dict)
        #test_writer.add_summary(summary, step)
        batch_seg_prob = softmax(logits_val)[:, :, 1]  # BxN
        batch_seg_mask = np.argmax(logits_val, 2)  # BxN
        mask_mean_prob = np.sum(batch_seg_prob * batch_seg_mask, 1)  # B,
        mask_mean_prob = mask_mean_prob / np.sum(batch_seg_mask, 1)
        heading_prob = np.max(softmax(heading_scores), 1)  # B
        size_prob = np.max(softmax(size_scores), 1)  # B,
        batch_scores = np.log(mask_mean_prob) + np.log(heading_prob) + np.log(
            size_prob)

        heading_cls = np.argmax(heading_scores, 1)  # B
        size_cls = np.argmax(size_scores, 1)  # B
        heading_res = np.array([heading_residuals[i, heading_cls[i]] \
                                for i in range(batch_data.shape[0])])
        size_res = np.vstack([size_residuals[i, size_cls[i], :] \
                              for i in range(batch_data.shape[0])])

        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val

        for i in range(batch_data.shape[0]):
            ps_list.append(batch_data[i, ...])
            seg_list.append(batch_label[i, ...])
            segp_list.append(preds_val[i, ...])
            center_list.append(centers_pred_val[i, :])
            heading_cls_list.append(heading_cls[i])
            heading_res_list.append(heading_res[i])
            size_cls_list.append(size_cls[i])
            size_res_list.append(size_res[i, :])
            rot_angle_list.append(batch_rot_angle[i])
            score_list.append(batch_scores[i])
            center_GT.append(batch_center[i])
            heading_class_GT.append(batch_hclass[i])
            heading_res_GT.append(batch_hres[i])
            size_class_GT.append(batch_sclass[i])
            size_residual_GT.append(batch_sres[i])
            correct = np.sum(preds_val == batch_label)

        for l in range(NUM_CLASSES):
            total_seen_class[l] += np.sum(batch_label == l)
            total_correct_class[l] += (np.sum((preds_val == l)
                                              & (batch_label == l)))
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)
        iou3d_correct_cnt += np.sum(iou3ds >= 0.5)
        box_pred_nbr_sum += np.sum(box_pred_nbr)

        for i in range(BATCH_SIZE):
            segp = preds_val[i, :]
            segl = batch_label[i, :]

            part_ious = [0.0 for _ in range(NUM_CLASSES)]
            for l in range(NUM_CLASSES):
                if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0):
                    part_ious[l] = 1.0  # class not present
                else:
                    part_ious[l] = np.sum((segl==l) & (segp==l)) / \
                        float(np.sum((segl==l) | (segp==l)))

    print("num_batches", num_batches)
    log_string(res + 'eval mean loss: %f' % (loss_sum / float(num_batches)))
    log_string(res+'eval segmentation accuracy: %f'% \
        (total_correct / float(total_seen)))
    log_string(res+'eval segmentation avg class acc: %f' % \
        (np.mean(np.array(total_correct_class) / \
            np.array(total_seen_class,dtype=np.float))))
    log_string(res+'eval box IoU (ground/3D): %f / %f' % \
        (iou2ds_sum / max(float(box_pred_nbr_sum),1.0), iou3ds_sum / \
            max(float(box_pred_nbr_sum),1.0)))
    log_string(res+'eval box estimation accuracy (IoU=0.5): %f' % \
        (float(iou3d_correct_cnt)/max(float(box_pred_nbr_sum),1.0)))
    IOU3d, GT_box_list, pred_box_list = compare_box_iou(
        res, split, test_dataset.id_list, test_dataset.indice_box,
        size_residual_GT, size_class_GT, heading_res_GT, heading_class_GT,
        center_GT, score_list, size_res_list, size_cls_list, heading_res_list,
        heading_cls_list, center_list, segp_list, seg_list)
    eval_per_frame(test_dataset.id_list, test_dataset.indice_box, ps_list,
                   seg_list, segp_list, GT_box_list, pred_box_list, IOU3d,
                   score_list)

    write_detection_results_test(res, split, test_dataset.id_list, center_list,
                                 heading_cls_list, heading_res_list,
                                 size_cls_list, size_res_list, rot_angle_list,
                                 segp_list, score_list)
Пример #26
0
def test_from_rgb_detection(output_filename, result_dir=None):
    global lstm_parameters
    ''' Test frustum pointents with 2D boxes from a RGB detector.
    Write test results to KITTI format label files.
    todo (rqi): support variable number of points.
    '''
    ps_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []
    onehot_list = []

    test_idxs = np.arange(0, len(TEST_DATASET))
    print(len(TEST_DATASET))
    batch_size = BATCH_SIZE
    num_batches = len(TEST_DATASET)/batch_size#int((len(TEST_DATASET)+batch_size-1)/batch_size)
    
    batch_data_to_feed = np.zeros((batch_size, NUM_POINT, NUM_CHANNEL))
    batch_one_hot_to_feed = np.zeros((batch_size, 3))
    sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT, num_channel=NUM_CHANNEL,
                                    lstm_params=lstm_parameters)
    
    # To get features of all frames
    if FLAGS.tracks:
        for batch_idx in range(int(num_batches)):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            # E: Get also batch_indices which shows the (world_id,frame_id,track_id) of the objects in the batch
            # E: Batch indices are valid (non-empty) only if the tracks flag is True
            '''
            batch_data, batch_label, batch_center, \
            batch_hclass, batch_hres, \
            batch_sclass, batch_sres, \
            batch_rot_angle, batch_one_hot_vec, batch_indices = \
            '''
            batch_data, batch_rot_angle, batch_rgb_prob, batch_one_hot_vec, batch_indices = \
                get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                          NUM_POINT, NUM_CHANNEL,tracks=FLAGS.tracks, from_rgb_detection=True)

            # Emec added the feature line
            # E: Get the features at the prev time steps of the objects in the batch
            batch_feat_lstm = get_batch_features(TEST_DATASET.feature_dict,
                                                 batch_wft=batch_indices,tau=FLAGS.tau,
                                                 feat_len=feat_len,rev_order=True)
            # E: Get the number of tracks at the tau prev. time steps for each object in the batch: How many of the tau-1 frames before the current frames of the objects contain the same object with the same track id 
            batch_seq_len = batch_track_num(feature_dict=TEST_DATASET.feature_dict,wfts=batch_indices)

            feed_dict = {ops['pointclouds_pl']: batch_data,
                         ops['one_hot_vec_pl']: batch_one_hot_vec,
                         ops['is_training_pl']: False,
                         ops['end_points']['lstm_layer']['feat_input']:batch_feat_lstm,
                         ops['end_points']['lstm_layer']['pf_seq_len']:batch_seq_len}

            box_est_feature_vec = \
                sess.run(ops['end_points']['box_est_feature_vec'],
                         feed_dict=feed_dict)
                
            update_batch_features(feature_dict=TEST_DATASET.feature_dict,batch_wft=batch_indices,
                                  batch_feat_vecs=box_est_feature_vec)
            
    for batch_idx in range(int(num_batches)):
        # print('batch idx: %d' % (batch_idx))
        start_idx = batch_idx * batch_size
        end_idx = min(len(TEST_DATASET), (batch_idx+1) * batch_size)
        cur_batch_size = end_idx - start_idx

        batch_data, batch_rot_angle, batch_rgb_prob, batch_one_hot_vec, batch_indices = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL, from_rgb_detection=True)
        batch_data_to_feed[0:cur_batch_size,...] = batch_data
        batch_one_hot_to_feed[0:cur_batch_size,:] = batch_one_hot_vec

        # Run one batch inference
        if FLAGS.tracks:
            batch_output, batch_center_pred, \
            batch_hclass_pred, batch_hres_pred, \
            batch_sclass_pred, batch_sres_pred, batch_scores = \
                inference(sess, ops, batch_data_to_feed,
                    batch_one_hot_to_feed, batch_size=batch_size,
                        track_data=[batch_feat_lstm,batch_seq_len])
        else:
            batch_output, batch_center_pred, \
            batch_hclass_pred, batch_hres_pred, \
            batch_sclass_pred, batch_sres_pred, batch_scores = \
                inference(sess, ops, batch_data_to_feed,
                    batch_one_hot_to_feed, batch_size=batch_size)

        for i in range(cur_batch_size):
            ps_list.append(batch_data[i,...])
            segp_list.append(batch_output[i,...])
            center_list.append(batch_center_pred[i,:])
            heading_cls_list.append(batch_hclass_pred[i])
            heading_res_list.append(batch_hres_pred[i])
            size_cls_list.append(batch_sclass_pred[i])
            size_res_list.append(batch_sres_pred[i,:])
            rot_angle_list.append(batch_rot_angle[i])
            #score_list.append(batch_scores[i])
            score_list.append(batch_rgb_prob[i]) # 2D RGB detection score
            onehot_list.append(batch_one_hot_vec[i])

    if FLAGS.dump_result:
        with open(output_filename, 'wb') as fp:
            pickle.dump(ps_list, fp)
            pickle.dump(segp_list, fp)
            pickle.dump(center_list, fp)
            pickle.dump(heading_cls_list, fp)
            pickle.dump(heading_res_list, fp)
            pickle.dump(size_cls_list, fp)
            pickle.dump(size_res_list, fp)
            pickle.dump(rot_angle_list, fp)
            pickle.dump(score_list, fp)
            pickle.dump(onehot_list, fp)

    # Write detection results for KITTI evaluation
    # print('Number of point clouds: %d' % (len(ps_list)))
    # Write detection results for KITTI evaluation
    if TRACKING:
        write_track_detection_results(result_dir, TEST_DATASET.id_list,
            TEST_DATASET.type_list, TEST_DATASET.box2d_list, center_list,
            heading_cls_list, heading_res_list,
            size_cls_list, size_res_list, rot_angle_list, score_list,dataset=TEST_DATASET)
        
    #write_detection_results(result_dir, TEST_DATASET.id_list,
    #    TEST_DATASET.type_list, TEST_DATASET.box2d_list,
    #    center_list, heading_cls_list, heading_res_list,
    #    size_cls_list, size_res_list, rot_angle_list, score_list)
    # Make sure for each frame (no matter if we have measurment for that frame),
    # there is a TXT file
    '''
Пример #27
0
def eval_one_epoch(sess, ops, test_writer):
    ''' Simple evaluation for one epoch on the frustum dataset.
    ops is dict mapping from string to tf ops """
    '''
    global EPOCH_CNT
    is_training = False
    log_string(str(datetime.now()))
    log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))
    test_idxs = np.arange(0, len(TEST_DATASET))
    num_batches = len(TEST_DATASET) / BATCH_SIZE

    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0

    # Simple evaluation with batches
    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        feed_dict = {
            ops['pointclouds_pl']: batch_data,
            ops['one_hot_vec_pl']: batch_one_hot_vec,
            ops['labels_pl']: batch_label,
            ops['centers_pl']: batch_center,
            ops['heading_class_label_pl']: batch_hclass,
            ops['heading_residual_label_pl']: batch_hres,
            ops['size_class_label_pl']: batch_sclass,
            ops['size_residual_label_pl']: batch_sres,
            ops['is_training_pl']: is_training
        }

        summary, step, loss_val, logits_val, iou2ds, iou3ds = \
            sess.run([ops['merged'], ops['step'],
                ops['loss'], ops['logits'],
                ops['end_points']['iou2ds'], ops['end_points']['iou3ds']],
                feed_dict=feed_dict)
        test_writer.add_summary(summary, step)

        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val
        for l in range(NUM_CLASSES):
            total_seen_class[l] += np.sum(batch_label == l)
            total_correct_class[l] += (np.sum((preds_val == l)
                                              & (batch_label == l)))
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.sum(iou3ds)
        iou3d_correct_cnt += np.sum(iou3ds >= 0.5)

        for i in range(BATCH_SIZE):
            segp = preds_val[i, :]
            segl = batch_label[i, :]
            part_ious = [0.0 for _ in range(NUM_CLASSES)]
            for l in range(NUM_CLASSES):
                if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0):
                    part_ious[l] = 1.0  # class not present
                else:
                    part_ious[l] = np.sum((segl==l) & (segp==l)) / \
                        float(np.sum((segl==l) | (segp==l)))

    log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('eval segmentation accuracy: %f'% \
        (total_correct / float(total_seen)))
    log_string('eval segmentation avg class acc: %f' % \
        (np.mean(np.array(total_correct_class) / \
            np.array(total_seen_class,dtype=np.float))))
    log_string('eval box IoU (ground/3D): %f / %f' % \
        (iou2ds_sum / float(num_batches*BATCH_SIZE), iou3ds_sum / \
            float(num_batches*BATCH_SIZE)))
    log_string('eval box estimation accuracy (IoU=0.7): %f' % \
        (float(iou3d_correct_cnt)/float(num_batches*BATCH_SIZE)))

    EPOCH_CNT += 1
Пример #28
0
def test(output_filename, result_dir=None):
    ''' Test frustum pointnets with GT 2D boxes.
    Write test results to KITTI format label files.
    todo (rqi): support variable number of points.
    '''
    global lstm_parameters
    ps_list = []
    seg_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []

    test_idxs = np.arange(0, len(TEST_DATASET))
    batch_size = BATCH_SIZE
    num_batches = len(TEST_DATASET)/batch_size

    sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT, num_channel=NUM_CHANNEL,
                                    lstm_params=lstm_parameters)
    correct_cnt = 0
    
    # To get features of all frames
    if FLAGS.tracks:
        for batch_idx in range(int(num_batches)):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE
            # E: Get also batch_indices which shows the (world_id,frame_id,track_id) of the objects in the batch
            # E: Batch indices are valid (non-empty) only if the tracks flag is True
            batch_data, batch_label, batch_center, \
            batch_hclass, batch_hres, \
            batch_sclass, batch_sres, \
            batch_rot_angle, batch_one_hot_vec, batch_indices = \
                get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                          NUM_POINT, NUM_CHANNEL,tracks=FLAGS.tracks)

            # Emec added the feature line
            # E: Get the features at the prev time steps of the objects in the batch
            batch_feat_lstm = get_batch_features(TEST_DATASET.feature_dict,
                                                 batch_wft=batch_indices,tau=FLAGS.tau,
                                                 feat_len=feat_len,rev_order=True)
            # E: Get the number of tracks at the tau prev. time steps for each object in the batch: How many of the tau-1 frames before the current frames of the objects contain the same object with the same track id 
            batch_seq_len = batch_track_num(feature_dict=TEST_DATASET.feature_dict,wfts=batch_indices)

            feed_dict = {ops['pointclouds_pl']: batch_data,
                         ops['one_hot_vec_pl']: batch_one_hot_vec,
                         ops['labels_pl']: batch_label,
                         ops['centers_pl']: batch_center,
                         ops['heading_class_label_pl']: batch_hclass,
                         ops['heading_residual_label_pl']: batch_hres,
                         ops['size_class_label_pl']: batch_sclass,
                         ops['size_residual_label_pl']: batch_sres,
                         ops['is_training_pl']: False,
                         ops['end_points']['lstm_layer']['feat_input']:batch_feat_lstm,
                         ops['end_points']['lstm_layer']['pf_seq_len']:batch_seq_len}

            box_est_feature_vec = \
                sess.run(ops['end_points']['box_est_feature_vec'],
                         feed_dict=feed_dict)
                
            update_batch_features(feature_dict=TEST_DATASET.feature_dict,batch_wft=batch_indices,
                                  batch_feat_vecs=box_est_feature_vec)

    
    print('Inference started!')
    for batch_idx in range(int(num_batches)):
        
        # print('batch idx: %d' % (batch_idx))
        start_idx = batch_idx * batch_size
        end_idx = (batch_idx+1) * batch_size

        
        if FLAGS.tracks:
            batch_data, batch_label, batch_center, \
            batch_hclass, batch_hres, batch_sclass, batch_sres, \
            batch_rot_angle, batch_one_hot_vec,batch_indices = \
                get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL,tracks=FLAGS.tracks)
            batch_feat_lstm = get_batch_features(TEST_DATASET.feature_dict,
                                                 batch_wft=batch_indices,tau=FLAGS.tau,
                                                 feat_len=feat_len,rev_order=True)
            batch_seq_len = batch_track_num(feature_dict=TEST_DATASET.feature_dict,wfts=batch_indices)
            batch_output, batch_center_pred, \
            batch_hclass_pred, batch_hres_pred, \
            batch_sclass_pred, batch_sres_pred, batch_scores = \
                inference(sess, ops, batch_data,
                    batch_one_hot_vec, batch_size=batch_size,
                    track_data=[batch_feat_lstm,batch_seq_len])
            
            
        else:
            batch_data, batch_label, batch_center, \
            batch_hclass, batch_hres, batch_sclass, batch_sres, \
            batch_rot_angle, batch_one_hot_vec,batch_indices = \
                get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                    NUM_POINT, NUM_CHANNEL,tracks=FLAGS.tracking)
            batch_output, batch_center_pred, \
            batch_hclass_pred, batch_hres_pred, \
            batch_sclass_pred, batch_sres_pred, batch_scores = \
                inference(sess, ops, batch_data,
                    batch_one_hot_vec, batch_size=batch_size)

        correct_cnt += np.sum(batch_output==batch_label)

        for i in range(batch_output.shape[0]):
            ps_list.append(batch_data[i,...])
            seg_list.append(batch_label[i,...])
            segp_list.append(batch_output[i,...])
            center_list.append(batch_center_pred[i,:])
            heading_cls_list.append(batch_hclass_pred[i])
            heading_res_list.append(batch_hres_pred[i])
            size_cls_list.append(batch_sclass_pred[i])
            size_res_list.append(batch_sres_pred[i,:])
            rot_angle_list.append(batch_rot_angle[i])
            score_list.append(batch_scores[i])

    print("Segmentation accuracy: %f" % \
        (correct_cnt / float(batch_size*num_batches*NUM_POINT)))

    if FLAGS.dump_result:
        with open(output_filename, 'wb') as fp:
            pickle.dump(ps_list, fp)
            pickle.dump(seg_list, fp)
            pickle.dump(segp_list, fp)
            pickle.dump(center_list, fp)
            pickle.dump(heading_cls_list, fp)
            pickle.dump(heading_res_list, fp)
            pickle.dump(size_cls_list, fp)
            pickle.dump(size_res_list, fp)
            pickle.dump(rot_angle_list, fp)
            pickle.dump(score_list, fp)

    # Write detection results for KITTI evaluation
    if TRACKING:
        write_track_detection_results(result_dir, TEST_DATASET.id_list,
            TEST_DATASET.type_list, TEST_DATASET.box2d_list, center_list,
            heading_cls_list, heading_res_list,
            size_cls_list, size_res_list, rot_angle_list, score_list,dataset=TEST_DATASET)
    else:
        write_detection_results(result_dir, TEST_DATASET.id_list,
            TEST_DATASET.type_list, TEST_DATASET.box2d_list, center_list,
            heading_cls_list, heading_res_list,
            size_cls_list, size_res_list, rot_angle_list, score_list)
Пример #29
0
def test(output_filename, result_dir=None):
    ''' Test frustum pointnets with GT 2D boxes.
    Write test results to KITTI format label files.
    todo (rqi): support variable number of points.
    '''
    ps_list = []
    seg_list = []
    segp_list = []
    center_list = []
    heading_cls_list = []
    heading_res_list = []
    size_cls_list = []
    size_res_list = []
    rot_angle_list = []
    score_list = []

    test_idxs = np.arange(0, len(TEST_DATASET))
    batch_size = BATCH_SIZE
    num_batches = int(len(TEST_DATASET)/batch_size)

    sess, ops = get_session_and_ops(batch_size=batch_size, num_point=NUM_POINT)
    correct_cnt = 0
    for batch_idx in range(num_batches):
        print('batch idx: %d' % (batch_idx))
        start_idx = batch_idx * batch_size
        end_idx = (batch_idx+1) * batch_size

        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec = \
            get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,
                NUM_POINT, NUM_CHANNEL)

        batch_output, batch_center_pred, batch_hclass_pred, batch_hres_pred, \
        batch_sclass_pred, batch_sres_pred,
        batch_scores = inference(sess, ops, batch_data, batch_one_hot_vec, batch_size=batch_size)

        correct_cnt += np.sum(batch_output==batch_label)
	
        for i in range(batch_output.shape[0]):
            ps_list.append(batch_data[i,...])
            seg_list.append(batch_label[i,...])
            segp_list.append(batch_output[i,...])
            center_list.append(batch_center_pred[i,:])
            heading_cls_list.append(batch_hclass_pred[i])
            heading_res_list.append(batch_hres_pred[i])
            size_cls_list.append(batch_sclass_pred[i])
            size_res_list.append(batch_sres_pred[i,:])
            rot_angle_list.append(batch_rot_angle[i])
            score_list.append(batch_scores[i])

    print("Segmentation accuracy: %f" % \
        (correct_cnt / float(batch_size*num_batches*NUM_POINT)))

    if FLAGS.dump_result:
        with open(output_filename, 'wp') as fp:
            pickle.dump(ps_list, fp)
            pickle.dump(seg_list, fp)
            pickle.dump(segp_list, fp)
            pickle.dump(center_list, fp)
            pickle.dump(heading_cls_list, fp)
            pickle.dump(heading_res_list, fp)
            pickle.dump(size_cls_list, fp)
            pickle.dump(size_res_list, fp)
            pickle.dump(rot_angle_list, fp)
            pickle.dump(score_list, fp)

    # Write detection results for KITTI evaluation
    write_detection_results(result_dir, TEST_DATASET.id_list,
        TEST_DATASET.type_list, TEST_DATASET.box2d_list, center_list,
        heading_cls_list, heading_res_list,
        size_cls_list, size_res_list, rot_angle_list, score_list)
Пример #30
0
def train_one_epoch(sess, ops, train_writer, tracks=False, lstm_params=None):
    ''' Training for one epoch on the frustum dataset.
    ops is dict mapping from string to tf ops
    
    tracks: To utilize the track ids from the dataset and also generate feature dictionaries
    '''
    is_training = True
    log_string(str(datetime.now()))

    # Shuffle train samples
    train_idxs = np.arange(0, len(TRAIN_DATASET))
    np.random.shuffle(train_idxs)
    num_batches = len(TRAIN_DATASET) / BATCH_SIZE

    # To collect statistics
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    iou2ds_sum = 0
    iou3ds_sum = 0
    iou3d_correct_cnt = 0
    all_return = []
    # Training with batches
    for batch_idx in range(int(num_batches)):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE
        # Emec added batch indices
        # E: Get also batch_indices which shows the (world_id,frame_id,track_id) of the objects in the batch
        # E: Batch indices are valid (non-empty) only if the tracks flag is True
        batch_data, batch_label, batch_center, \
        batch_hclass, batch_hres, \
        batch_sclass, batch_sres, \
        batch_rot_angle, batch_one_hot_vec,batch_indices = \
            get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx,
                      NUM_POINT, NUM_CHANNEL,tracks=tracks)
        # E: If the lstm layers will be used
        if tracks:
            # Emec added the feature line
            # E: Get the features at the prev time steps of the objects in the batch
            batch_feat_lstm = get_batch_features(
                TRAIN_DATASET.feature_dict,
                batch_wft=batch_indices,
                tau=lstm_params['tau'],
                feat_len=lstm_params['feat_vec_len'],
                rev_order=True)
            # E: Get the number of tracks at the tau prev. time steps for each object in the batch: How many of the tau-1 frames before the current frames of the objects contain the same object with the same track id
            batch_seq_len = batch_track_num(
                feature_dict=TRAIN_DATASET.feature_dict, wfts=batch_indices)

            # E: Append the feed dictionary with the lstm parameters
            feed_dict = {
                ops['pointclouds_pl']: batch_data,
                ops['one_hot_vec_pl']: batch_one_hot_vec,
                ops['labels_pl']: batch_label,
                ops['centers_pl']: batch_center,
                ops['heading_class_label_pl']: batch_hclass,
                ops['heading_residual_label_pl']: batch_hres,
                ops['size_class_label_pl']: batch_sclass,
                ops['size_residual_label_pl']: batch_sres,
                ops['is_training_pl']: is_training,
                ops['end_points']['lstm_layer']['feat_input']: batch_feat_lstm,
                ops['end_points']['lstm_layer']['pf_seq_len']: batch_seq_len
            }

            # Emec added box_est_feature_vec
            summary, step, _, loss_val, logits_val, centers_pred_val, iou2ds, iou3ds, box_est_feature_vec = \
                sess.run(
                    [ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['logits'], ops['centers_pred'],
                     ops['end_points']['iou2ds'], ops['end_points']['iou3ds'],ops['end_points']['box_est_feature_vec'] ],
                    feed_dict=feed_dict)
        else:

            feed_dict = {
                ops['pointclouds_pl']: batch_data,
                ops['one_hot_vec_pl']: batch_one_hot_vec,
                ops['labels_pl']: batch_label,
                ops['centers_pl']: batch_center,
                ops['heading_class_label_pl']: batch_hclass,
                ops['heading_residual_label_pl']: batch_hres,
                ops['size_class_label_pl']: batch_sclass,
                ops['size_residual_label_pl']: batch_sres,
                ops['is_training_pl']: is_training
            }
            # Emec added box_est_feature_vec
            summary, step, _, loss_val, logits_val, centers_pred_val, iou2ds, iou3ds, box_est_feature_vec = \
                sess.run(
                    [ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['logits'], ops['centers_pred'],
                     ops['end_points']['iou2ds'], ops['end_points']['iou3ds'],ops['end_points']['box_est_feature_vec'] ],
                    feed_dict=feed_dict)

        train_writer.add_summary(summary, step)

        preds_val = np.argmax(logits_val, 2)
        correct = np.sum(preds_val == batch_label)
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val
        iou2ds_sum += np.sum(iou2ds)
        iou3ds_sum += np.nansum(iou3ds)
        iou3d_correct_cnt += np.sum(iou3ds >= 0.7)
        # Emec added below

        if tracks:
            all_return.append(
                [batch_indices, box_est_feature_vec, batch_feat_lstm])
            update_batch_features(feature_dict=TRAIN_DATASET.feature_dict,
                                  batch_wft=batch_indices,
                                  batch_feat_vecs=box_est_feature_vec)

    log_string('number of batches: %d' % num_batches)
    log_string('training mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('training segmentation accuracy: %f' %
               (total_correct / float(total_seen)))
    log_string('training box IoU (ground/3D): %f / %f' %
               (iou2ds_sum / float(num_batches * BATCH_SIZE),
                iou3ds_sum / float(num_batches * BATCH_SIZE)))
    log_string('training box estimation accuracy (IoU=0.7): %f' %
               (float(iou3d_correct_cnt) / float(num_batches * BATCH_SIZE)))
    if tracks:
        return all_return