def evaluate_scan(pred_file, gt_file, confusion): try: pred_ids = util_3d.load_ids(pred_file) except Exception as e: util.print_error('unable to load ' + pred_file + ': ' + str(e)) try: gt_ids = util_3d.load_ids(gt_file) except Exception as e: util.print_error('unable to load ' + gt_file + ': ' + str(e)) # sanity checks pred_ids = pred_ids[0:gt_ids.shape[0]] if not pred_ids.shape == gt_ids.shape: util.print_error( '%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True) for (gt_val, pred_val) in izip(gt_ids.flatten(), pred_ids.flatten()): gt_instance_val = gt_val % 1000 pred_instance_val = pred_val % 1000 if gt_instance_val not in VALID_INSTANCE_ID: continue if pred_instance_val not in VALID_INSTANCE_ID: pred_val = UNKNOWN_ID confusion[gt_instance_val][pred_instance_val] += 1 return gt_ids
def evaluate_scan(pred_file, gt_file, confusion): try: pred_ids = util_3d.load_ids(pred_file) except: util.print_error('unable to load ' + pred_file + ': ' + str(e)) try: gt_ids = util_3d.load_ids(gt_file) except: util.print_error('unable to load ' + gt_file + ': ' + str(e)) # sanity checks if not pred_ids.shape == gt_ids.shape: util.print_error('%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True) for (gt_val,pred_val) in izip(gt_ids.flatten(),pred_ids.flatten()): if gt_val not in VALID_CLASS_IDS: continue if pred_val not in VALID_CLASS_IDS: pred_val = UNKNOWN_ID confusion[gt_val][pred_val] += 1
def visualize(pred_file, mesh_file, output_file): if not output_file.endswith('.ply'): util.print_error('output file must be a .ply file') colors = util.create_color_palette() num_colors = len(colors) ids = util_3d.load_ids(pred_file) with open(mesh_file, 'rb') as f: plydata = PlyData.read(f) num_verts = plydata['vertex'].count if num_verts != len(ids): util.print_error('#predicted labels = ' + str(len(ids)) + 'vs #mesh vertices = ' + str(num_verts)) # *_vh_clean_2.ply has colors already for i in range(num_verts): if ids[i] >= num_colors: util.print_error('found predicted label ' + str(ids[i]) + ' not in nyu40 label set') color = colors[ids[i]] plydata['vertex']['red'][i] = color[0] plydata['vertex']['green'][i] = color[1] plydata['vertex']['blue'][i] = color[2] plydata.write(output_file)
for (li,label_name) in enumerate(CLASS_LABELS): avg_dict["classes"][label_name] = {} #avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :]) avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li,oAllBut25]) avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50]) avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25]) return avg_dict def assign_instances_for_scan(pred_file, gt_file, pred_path): try: pred_info = util_3d.read_instance_prediction_file(pred_file, pred_path) except Exception, e: util.print_error('unable to load ' + pred_file + ': ' + str(e)) try: gt_ids = util_3d.load_ids(gt_file) except Exception, e: util.print_error('unable to load ' + gt_file + ': ' + str(e)) # get gt instances gt_instances = util_3d.get_instances(gt_ids, VALID_CLASS_IDS, CLASS_LABELS, ID_TO_LABEL) # associate gt2pred = deepcopy(gt_instances) for label in gt2pred: for gt in gt2pred[label]: gt['matched_pred'] = [] pred2gt = {} for label in CLASS_LABELS: pred2gt[label] = [] num_pred_instances = 0 # mask of void labels in the groundtruth
def assign_instances_for_scan(pred_file, gt_file, pred_path): pred_info = util_3d.read_instance_prediction_file(pred_file, pred_path) try: pred_info = util_3d.read_instance_prediction_file(pred_file, pred_path) except: util.print_error('unable to load ' + pred_file) try: gt_ids = util_3d.load_ids(gt_file) except: util.print_error('unable to load ' + gt_file) # get gt instances gt_instances = util_3d.get_instances(gt_ids, VALID_CLASS_IDS, CLASS_LABELS, ID_TO_LABEL) # associate gt2pred = deepcopy(gt_instances) for label in gt2pred: for gt in gt2pred[label]: gt['matched_pred'] = [] pred2gt = {} for label in CLASS_LABELS: pred2gt[label] = [] num_pred_instances = 0 # mask of void labels in the groundtruth bool_void = np.logical_not(np.in1d(gt_ids // 1000, VALID_CLASS_IDS)) # go thru all prediction masks for pred_index, pred_mask_file in enumerate(pred_info): label_id = int(pred_info[pred_mask_file]['label_id']) conf = pred_info[pred_mask_file]['conf'] if not label_id in ID_TO_LABEL: continue label_name = ID_TO_LABEL[label_id] # read the mask pred_mask = util_3d.load_ids(pred_mask_file) if len(pred_mask) != len(gt_ids): util.print_error( 'wrong number of lines in ' + pred_mask_file + '(%d) vs #mesh vertices (%d), please double check and/or re-download the mesh' % (len(pred_mask), len(gt_ids))) # convert to binary pred_mask = np.not_equal(pred_mask, 0) num = np.count_nonzero(pred_mask) if num < opt.min_region_sizes[0]: continue # skip if empty pred_instance = {} pred_instance['filename'] = pred_mask_file pred_instance['pred_id'] = num_pred_instances pred_instance['label_id'] = label_id pred_instance['vert_count'] = num pred_instance['confidence'] = conf pred_instance['void_intersection'] = np.count_nonzero( np.logical_and(bool_void, pred_mask)) # matched gt instances matched_gt = [] # go thru all gt instances with matching label #print(gt2pred[label_name]) for (gt_num, gt_inst) in enumerate(gt2pred[label_name]): intersection = np.count_nonzero( np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask)) #print((gt_ids == gt_inst['instance_id']).sum(), pred_mask.sum(), intersection) if intersection > 0: gt_copy = gt_inst.copy() pred_copy = pred_instance.copy() gt_copy['intersection'] = intersection pred_copy['intersection'] = intersection matched_gt.append(gt_copy) gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy) #print(label_name, pred_index, num_pred_instances, num, conf, pred_instance['void_intersection'], len(matched_gt)) pred_instance['matched_gt'] = matched_gt num_pred_instances += 1 pred2gt[label_name].append(pred_instance) return gt2pred, pred2gt
def evaluate_scan(pred_file, gt_file, confusion): try: pred_ids = util_3d.load_ids(pred_file) except Exception, e: util.print_error('unable to load ' + pred_file + ': ' + str(e))
'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture' ] VALID_CLASS_IDS = np.array( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1 def evaluate_scan(pred_file, gt_file, confusion): try: pred_ids = util_3d.load_ids(pred_file) except Exception, e: util.print_error('unable to load ' + pred_file + ': ' + str(e)) try: gt_ids = util_3d.load_ids(gt_file) except Exception, e: util.print_error('unable to load ' + gt_file + ': ' + str(e)) # sanity checks if not pred_ids.shape == gt_ids.shape: util.print_error( '%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True) for (gt_val, pred_val) in izip(gt_ids.flatten(), pred_ids.flatten()): if gt_val not in VALID_CLASS_IDS: continue if pred_val not in VALID_CLASS_IDS: pred_val = UNKNOWN_ID confusion[gt_val][pred_val] += 1