def eval_one_epoch(sess, ops, test_writer): """ ops: dict mapping from string to tf ops """ global EPOCH_CNT is_training = False test_idxs = np.arange(0, len(TEST_DATASET)) num_batches = len(TEST_DATASET) / BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 shape_ious = [] total_seen_class = [0 for _ in range(NUM_SEG_CLASSES)] total_correct_class = [0 for _ in range(NUM_SEG_CLASSES)] iou2ds_sum = 0 iou3ds_sum = 0 W_iou2ds_sum = 0 W_iou3ds_sum = 0 log_string(str(datetime.now())) log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT)) # ==================================== AP EVALUATION ==================================== # Calculate mean AP eval_prefix1 = 'F_' predictions = main_batch(TEST_DATASET, FLAGS.TEST_CLS, len(ALL_CLASSES), NUM_POINT, FLAGS.NUM_CHANNELS, prefix=eval_prefix1, sess_ops=(sess, ops), output_filename=None) rec, prec, ap, mean_ap = evaluate_predictions(predictions, DATASET, ALL_CLASSES, TEST_DATASET, FLAGS.TEST_CLS) add_ap_summary(ap, mean_ap, test_writer, EPOCH_CNT, prefix='AP_Intermediate/') log_string(get_ap_info(ap, mean_ap)) # Final result that we are interested in eval_prefix2 = 'F2_' predictions = main_batch(TEST_DATASET, FLAGS.TEST_CLS, len(ALL_CLASSES), NUM_POINT, FLAGS.NUM_CHANNELS, prefix=eval_prefix2, sess_ops=(sess, ops), output_filename=None) rec, prec, ap, mean_ap = evaluate_predictions(predictions, DATASET, ALL_CLASSES, TEST_DATASET, FLAGS.TEST_CLS) add_ap_summary(ap, mean_ap, test_writer, EPOCH_CNT, prefix='AP_Test/') log_string(get_ap_info(ap, mean_ap)) EPOCH_CNT += 1
def save_predictions(data_path, train_preds, val_preds, test_preds, model): val_path = data_path + 'train-val.csv' train_path = data_path + 'train-train.csv' timestr = time.strftime("%Y%m%d-%H%M%S") train_fname = '%s-%s-train.out' % (timestr, model) val_fname = '%s-%s-val.out' % (timestr, model) test_fname = '%s-%s-test.out' % (timestr, model) # Save to files if train_preds is not None: save_df(train_preds, fname=train_fname) save_df(val_preds, fname=val_fname) save_df(test_preds, fname=test_fname) # Evaluate if train_preds is not None: print('Evaluating train...') evaluate.evaluate_predictions(pred_path=train_fname, true_path=train_path) print('Evaluating validation...') evaluate.evaluate_predictions(pred_path=val_fname, true_path=val_path)
def predict_eval(self, model_path, input_data, input_labels, output_path): ''' Make predictions and evaluate them ''' from evaluate import evaluate_predictions time = self.make_predictions(model_path, input_data, output_path) results = evaluate_predictions(output_path, input_labels, verbose=self.verbose) results['Time'] = time return results
def eval_one_epoch(sess, ops, test_writer): """ ops: dict mapping from string to tf ops """ global EPOCH_CNT is_training = False test_idxs = np.arange(0, len(TEST_DATASET)) num_batches = len(TEST_DATASET) / BATCH_SIZE total_correct = 0 total_seen = 0 loss_sum = 0 shape_ious = [] total_seen_class = [0 for _ in range(NUM_SEG_CLASSES)] total_correct_class = [0 for _ in range(NUM_SEG_CLASSES)] iou2ds_sum = 0 iou3ds_sum = 0 log_string(str(datetime.now())) log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT)) # Calculate mean AP # Final result that we are interested in predictions = main_batch(TEST_DATASET, FLAGS.TEST_CLS, len(ALL_CLASSES), NUM_POINT, FLAGS.NUM_CHANNELS, prefix='', sess_ops=(sess, ops), output_filename=None) rec, prec, ap, mean_ap = evaluate_predictions(predictions, DATASET, ALL_CLASSES, TEST_DATASET, FLAGS.TEST_CLS) add_ap_summary(ap, mean_ap, test_writer, EPOCH_CNT, prefix='AP_Test/') log_string(get_ap_info(ap, mean_ap)) for batch_idx in range(num_batches): start_idx = batch_idx * BATCH_SIZE end_idx = (batch_idx + 1) * BATCH_SIZE # Prepare data generally batch_data, batch_img_crop, batch_label, batch_center, batch_hclass, batch_hres, \ batch_sclass, batch_sres, batch_box2d, batch_rtilt, batch_k, batch_rot_angle, \ batch_img_dims, batch_one_hot_vec = \ TEST_DATASET.get_batch(test_idxs, start_idx, end_idx, NUM_POINT, FLAGS.NUM_CHANNELS) batch_is_data_2D = np.zeros(BATCH_SIZE) # Setup data + run_ops according to training type (strong/semi/weak) feed_dict = { ops['pc_pl']: batch_data, #ops['img_pl'] : batch_img_crop, ops['one_hot_vec_pl']: batch_one_hot_vec, ops['y_seg_pl']: batch_label, ops['y_centers_pl']: batch_center, ops['y_orient_cls_pl']: batch_hclass, ops['y_orient_reg_pl']: batch_hres, ops['y_dims_cls_pl']: batch_sclass, ops['y_dims_reg_pl']: batch_sres, ops['is_training_pl']: is_training, ops['box2D_pl']: batch_box2d, ops['Rtilt_pl']: batch_rtilt, ops['K_pl']: batch_k, ops['rot_frust_pl']: np.expand_dims(batch_rot_angle, axis=1), ops['img_dim_pl']: batch_img_dims, ops['is_data_2D_pl']: batch_is_data_2D, ops['y_seg_pl']: batch_label } run_ops = [ ops['merged'], ops['step'], ops['logits'], ops['end_points']['iou2ds'], ops['end_points']['iou3ds'], ops['semi_loss'] ] summary, step, logits_val, iou2ds, iou3ds, loss_val = \ sess.run(run_ops, feed_dict=feed_dict) # Statistics preds_val = np.argmax(logits_val, 2) correct = np.sum(preds_val == batch_label) total_correct += correct total_seen += (BATCH_SIZE * NUM_POINT) loss_sum += loss_val for l in range(NUM_SEG_CLASSES): total_seen_class[l] += np.sum(batch_label == l) total_correct_class[l] += (np.sum((preds_val == l) & (batch_label == l))) iou2ds_sum += np.sum(iou2ds) iou3ds_sum += np.sum(iou3ds) for i in range(BATCH_SIZE): segp = preds_val[i, :] segl = batch_label[i, :] part_ious = [0.0 for _ in range(NUM_SEG_CLASSES)] for l in range(NUM_SEG_CLASSES): if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0): # part is not present, no logitsiction as well part_ious[l] = 1.0 else: part_ious[l] = np.sum((segl == l) & (segp == l)) / float( np.sum((segl == l) | (segp == l))) shape_ious.append(part_ious) test_writer.add_summary(summary, step) # Add summaries shape_ious = np.array(shape_ious) test_writer.add_summary( tf.Summary(value=[ tf.Summary.Value(tag='Seg_IOU/background_new', simple_value=np.mean(shape_ious[:, 0])) ]), EPOCH_CNT) test_writer.add_summary( tf.Summary(value=[ tf.Summary.Value(tag='Seg_IOU/instance_new', simple_value=np.mean(shape_ious[:, 1])) ]), EPOCH_CNT) test_writer.add_summary( tf.Summary(value=[ tf.Summary.Value(tag='Seg_IOU/Mean', simple_value=np.mean(shape_ious)) ]), EPOCH_CNT) log_string('eval mean loss: %f' % (loss_sum / float(num_batches))) log_string('eval accuracy: %f' % (total_correct / float(total_seen))) log_string('eval avg class acc: %f' % \ (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) log_string('eval mIoU: %f' % (np.mean(shape_ious))) log_string('eval box IoU (ground/3D) : %f / %f' % (iou2ds_sum / float(num_batches * BATCH_SIZE), iou3ds_sum / float(num_batches * BATCH_SIZE))) EPOCH_CNT += 1 return loss_sum / float(num_batches)
def vis_predictions3D(pred_files, gt_file, number_to_show=10, filenums=None): from roi_seg_box3d_dataset import class2type idx = 0 COLS = number_to_show ap_infos = {} classes, file_nums, mean_box_ious, mean_seg_ious, box_ious, seg_ious = [], [], [], [], [], [] vtk_pcs_with_col, vtk_pcs_wo_col, vtk_imgs, vtk_gt_boxes, vtk_pred_boxes, vtk_texts = [], [], [], [], [], [] choices = [] test_dataset = ROISegBoxDataset(WHITE_LIST, npoints=2048, split='val', rotate_to_center=True, overwritten_data_path=gt_file, from_rgb_detection=False) for n, pred_file in enumerate(pred_files): # Lists of different items from predictions predictions = load_zipped_pickle(pred_file) ps_l, seg_gt_l, seg_pred_l, center_l, heading_cls_l, heading_res_l, size_cls_l, size_res_l, rot_angle_l, \ score_l, cls_type_l, file_num_l, box2d_l, box3d_l = predictions if n == 0: # Choosing equal number of objects per class to display cls_types = [] options = {} for i, cls_type in enumerate(cls_type_l): if not class2type[cls_type] in WHITE_LIST: continue if options.get(cls_type) is None: options[cls_type] = [i] cls_types.append(cls_type) else: options[cls_type].append(i) # Make use of array_split to divide into fairly equal groups arr = np.array_split([1] * number_to_show, len(options.keys())) random.shuffle(arr) for i, group in enumerate(arr): cls_type = cls_types[i] choice_list = np.random.choice(options[cls_type], len(group), replace=False) #replace=True) choices.extend(choice_list) print('Number of objects in whitelist: %d' % len(options)) # Compute overall statistics if not FLAGS.rgb_detection: print('==== Computing overall statistics for %s ====' % pred_file) from evaluate import evaluate_predictions, get_ap_info rec, prec, ap, mean_ap = evaluate_predictions(predictions, dataset, CLASSES, test_dataset, WHITE_LIST) ap['Mean AP'] = mean_ap for classname in ap.keys(): if ap_infos.get(classname) is None: ap_infos[classname] = [] ap_infos[classname].append('%11s: [%.1f]' % (classname, 100. * ap[classname])) box_iou_sum, seg_iou_sum = 0, 0 for i in range(len(ps_l)): seg_gt = seg_gt_l[i] box3d = box3d_l[i] seg_pred = seg_pred_l[i] center = center_l[i] heading_cls = heading_cls_l[i] heading_res = heading_res_l[i] size_cls = size_cls_l[i] size_res = size_res_l[i] rot_angle = rot_angle_l[i] gt_box3d = rotate_pc_along_y(np.copy(box3d), rot_angle) heading_angle = class2angle(heading_cls, heading_res, NUM_HEADING_BIN) box_size = class2size(size_cls, size_res) pred_box3d = get_3d_box(box_size, heading_angle, center) # Box IOU shift_arr = np.array([4,5,6,7,0,1,2,3]) box_iou3d, _ = box3d_iou(gt_box3d[shift_arr,:], pred_box3d) # Seg IOU seg_iou = get_seg_iou(seg_gt, seg_pred, 2) box_iou_sum += box_iou3d seg_iou_sum += seg_iou mean_box_iou = box_iou_sum / len(ps_l) mean_seg_iou = seg_iou_sum / len(ps_l) mean_box_ious.append(mean_box_iou) mean_seg_ious.append(mean_seg_iou) for i in choices: row, col = idx // COLS, idx % COLS idx += 1 ps = ps_l[i] seg_pred = seg_pred_l[i] center = center_l[i] heading_cls = heading_cls_l[i] heading_res = heading_res_l[i] size_cls = size_cls_l[i] size_res = size_res_l[i] rot_angle = rot_angle_l[i] score = score_l[i] cls_type = cls_type_l[i] file_num = file_num_l[i] seg_gt = seg_gt_l[i] if not FLAGS.rgb_detection else [] box2d = box2d_l[i] box3d = box3d_l[i] if not FLAGS.rgb_detection else [] # Visualize point cloud (with and without color) vtk_pc_wo_col = vis.VtkPointCloud(ps) vtk_pc = vis.VtkPointCloud(ps, gt_points=seg_gt, pred_points=seg_pred) vis.vtk_transform_actor(vtk_pc_wo_col.vtk_actor, translate=(SEP*col,SEP*row,0)) vis.vtk_transform_actor(vtk_pc.vtk_actor, translate=(SEP*col,SEP*row,0)) vtk_pcs_wo_col.append(vtk_pc_wo_col.vtk_actor) vtk_pcs_with_col.append(vtk_pc.vtk_actor) # Visualize GT 3D box if FLAGS.rgb_detection: objects = dataset.get_label_objects(file_num) calib = dataset.get_calibration(file_num) for obj in objects: if obj.classname not in WHITE_LIST: continue box3d_pts_2d, box3d_pts_3d = compute_box_3d(obj, calib) box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d) box3d_pts_3d = rotate_pc_along_y(np.copy(box3d_pts_3d), rot_angle) vtk_box3D = vis.vtk_box_3D(box3d_pts_3d, color=vis.Color.LightGreen) vis.vtk_transform_actor(vtk_box3D, translate=(SEP*col,SEP*row,0)) vtk_gt_boxes.append(vtk_box3D) else: gt_box3d = rotate_pc_along_y(np.copy(box3d), rot_angle) vtk_gt_box3D = vis.vtk_box_3D(gt_box3d, color=vis.Color.LightGreen) vis.vtk_transform_actor(vtk_gt_box3D, translate=(SEP*col,SEP*row,0)) vtk_gt_boxes.append(vtk_gt_box3D) # Visualize Pred 3D box heading_angle = class2angle(heading_cls, heading_res, NUM_HEADING_BIN) box_size = class2size(size_cls, size_res) pred_box3d = get_3d_box(box_size, heading_angle, center) vtk_pred_box3D = vis.vtk_box_3D(pred_box3d, color=vis.Color.White) vis.vtk_transform_actor(vtk_pred_box3D, translate=(SEP*col,SEP*row,0)) vtk_pred_boxes.append(vtk_pred_box3D) # Visualize Images box2d_col = vis.Color.LightGreen if not FLAGS.rgb_detection else vis.Color.Orange img_filename = os.path.join(IMG_DIR, '%06d.jpg' % file_num) vtk_img = vis.vtk_image(img_filename, box2Ds_list=[[box2d]], box2Ds_cols=[box2d_col]) vis.vtk_transform_actor(vtk_img, scale=(IMG_SCALE,IMG_SCALE,IMG_SCALE), rot=(0,180,180), translate=(-2+SEP*col,2+SEP*row,10)) vtk_imgs.append(vtk_img) # Other information classes.append(class2type[cls_type].capitalize()) file_nums.append(str(file_num)) if not FLAGS.rgb_detection: shift_arr = np.array([4,5,6,7,0,1,2,3]) box_iou3d, _ = box3d_iou(gt_box3d[shift_arr,:], pred_box3d) box_ious.append(box_iou3d) seg_iou = get_seg_iou(seg_gt, seg_pred, 2) seg_ious.append(seg_iou) # Visualize overall statistics vtk_texts.extend(vis.vtk_text([('Model: %s' % pred_file.split('/')[-1]) for pred_file in pred_files], arr_type='text', sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,2.5,2))) vtk_texts.extend(vis.vtk_text(['Mean Box IOU:'] * len(pred_files), arr_type='text', sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,3,2))) vtk_texts.extend(vis.vtk_text(mean_box_ious, arr_type='float', color=True, sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-10,3,2))) vtk_texts.extend(vis.vtk_text(['Mean Seg IOU:'] * len(pred_files), arr_type='text', sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,3.5,2))) vtk_texts.extend(vis.vtk_text(mean_seg_ious, arr_type='float', color=True, sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-10,3.5,2))) for i, (cls_name, ap_info) in enumerate(ap_infos.items()): vtk_texts.extend(vis.vtk_text(ap_info, arr_type='text', color=True, sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,4+i*0.5,2))) # Visualize text information vtk_texts.extend(vis.vtk_text(['Class:'] * len(classes), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,3,2))) vtk_texts.extend(vis.vtk_text(classes, arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0.5,3,2))) vtk_texts.extend(vis.vtk_text(['File:'] * len(file_nums), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,3.5,2))) vtk_texts.extend(vis.vtk_text(file_nums, arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0.25,3.5,2))) if not FLAGS.rgb_detection: vtk_texts.extend(vis.vtk_text(['Box:'] * len(box_ious), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,4,2))) vtk_texts.extend(vis.vtk_text(box_ious, arr_type='float', color=True, sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0,4,2))) vtk_texts.extend(vis.vtk_text(['Seg:'] * len(seg_ious), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,4.5,2))) vtk_texts.extend(vis.vtk_text(seg_ious, arr_type='float', color=True, sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0,4.5,2))) key_to_actors_to_hide = { 'g': vtk_gt_boxes, 'p': vtk_pred_boxes, 'i': vtk_imgs, 'c': vtk_pcs_wo_col, 't': vtk_texts } return vtk_pcs_with_col, key_to_actors_to_hide