Exemple #1
0
def main():
    for LOG_DIR in FLAGS.log_dir:
        CKPT_DIR = os.path.join(LOG_DIR, 'trained_models')
        if not os.path.exists(LOG_DIR):
            print('ERROR: log_dir %s does not exist! Please Check!' % LOG_DIR)
            exit(1)
        VALID_DIR = os.path.join(LOG_DIR, FLAGS.valid_dir)
        if not os.path.exists(VALID_DIR):
            print('ERROR: valid_dir %s does not exist! Run valid.py first!' % VALID_DIR)
            exit(1)
        LOG_DIR = os.path.join(LOG_DIR, FLAGS.eval_dir)
        check_mkdir(LOG_DIR)
        PRED_DIR = os.path.join(LOG_DIR, FLAGS.pred_dir)
        force_mkdir(PRED_DIR)
        if FLAGS.visu_dir is not None:
            VISU_DIR = os.path.join(LOG_DIR, FLAGS.visu_dir)
            force_mkdir(VISU_DIR)

        os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
        os.system('cp %s %s' % (__file__, LOG_DIR)) # bkp of train procedure
        LOG_FOUT = open(os.path.join(LOG_DIR, 'log_eval.txt'), 'w')
        LOG_FOUT.write(str(FLAGS)+'\n')

        data_in_dir = '../../data/ins_seg_h5_for_sgpn/%s-%d/' % (FLAGS.category, FLAGS.level_id)
        test_h5_fn_list = []
        for item in os.listdir(data_in_dir):
            if item.endswith('.h5') and item.startswith('test-'):
                test_h5_fn_list.append(os.path.join(data_in_dir, item))

        NUM_CLASSES = 1
        print('force Semantic Labels: ', NUM_CLASSES)
        NUM_INS = FLAGS.num_ins
        print('Number of Instances: ', NUM_INS)

        # load validiation hyper-parameters
        pw_sim_thres = np.loadtxt(os.path.join(VALID_DIR, 'per_category_pointwise_similarity_threshold.txt')).reshape(NUM_CLASSES)
        avg_group_size = np.loadtxt(os.path.join(VALID_DIR, 'per_category_average_group_size.txt')).reshape(NUM_CLASSES)
        min_group_size = 0.25 * avg_group_size

        for i in range(NUM_CLASSES):
            print('%d %f %d' % (i, pw_sim_thres[i], min_group_size[i]))

        # main
        log_string('pid: %s'%(str(os.getpid())), LOG_FOUT)
        eval(NUM_CLASSES=NUM_CLASSES, NUM_POINT=NUM_POINT, NUM_INS=NUM_INS, CKPT_DIR=CKPT_DIR,
             test_h5_fn_list=test_h5_fn_list, min_group_size=min_group_size, pw_sim_thres=pw_sim_thres,
             PRED_DIR=PRED_DIR, VISU_DIR=VISU_DIR, LOG_FOUT=LOG_FOUT)
        LOG_FOUT.close()
from progressbar import ProgressBar
from subprocess import call
from PIL import Image
import scipy.misc as misc

from detect_adj import compute_adj
from detect_ref_sym import compute_ref_sym, atob_ref_sym
from detect_trans_sym import compute_trans_sym, atob_trans_sym
from detect_rot_sym import compute_rot_sym, atob_rot_sym

anno_id = sys.argv[1]
in_dir = os.path.join('../../data', anno_id)
render_dir = os.path.join(in_dir, 'parts_render_after_merging')

out_dir = os.path.join('results', anno_id)
check_mkdir(out_dir)
visu_dir = os.path.join(out_dir, 'visu')
force_mkdir(visu_dir)

parent_dir = os.path.join(visu_dir, 'parent')
os.mkdir(parent_dir)
info_dir = os.path.join(visu_dir, 'info')
os.mkdir(info_dir)
child_dir = os.path.join(visu_dir, 'child')
os.mkdir(child_dir)

json_fn = os.path.join(in_dir, 'result_after_merging.json')
with open(json_fn, 'r') as fin:
    data = json.load(fin)[0]

found_edges = dict()
Exemple #3
0
def eval_per_class_ap(stat_fn, gt_dir, pred_dir, iou_threshold=0.5, plot_dir=None):
    """ Input:  stat_fn contains all part ids and names 
                gt_dir contains test-xx.h5
                pred_dir contains test-xx.h5
        Output: aps: Average Prediction Scores for each part category, evaluated on all test shapes
                mAP: mean AP
    """
    print('Evaluation Start.')
    print('Ground-truth Directory: %s' % gt_dir)
    print('Prediction Directory: %s' % pred_dir)

    if plot_dir is not None:
        check_mkdir(plot_dir)

    # read stat_fn
    with open(stat_fn, 'r') as fin:
        part_name_list = [item.rstrip().split()[1] for item in fin.readlines()]
    print('Part Name List: ', part_name_list)
    n_labels = len(part_name_list)
    print('Total Number of Semantic Labels: %d' % n_labels)

    # check all h5 files
    test_h5_list = []
    for item in os.listdir(gt_dir):
        if item.startswith('test-') and item.endswith('.h5'):
            if not os.path.exists(os.path.join(pred_dir, item)):
                print('ERROR: h5 file %s is in gt directory but not in pred directory.')
                exit(1)
            test_h5_list.append(item)

    # read each h5 file and collect per-part-category true_pos, false_pos and confidence scores
    true_pos_list = [[] for item in part_name_list]
    false_pos_list = [[] for item in part_name_list]
    conf_score_list = [[] for item in part_name_list]

    gt_npos = np.zeros((n_labels), dtype=np.int32)

    for item in test_h5_list:
        print('Testing %s' % item)

        gt_mask, gt_mask_label, gt_mask_valid, gt_mask_other = load_gt_h5(os.path.join(gt_dir, item))
        pred_mask, pred_label, pred_valid, pred_conf = load_pred_h5(os.path.join(pred_dir, item))

        n_shape = gt_mask.shape[0]
        gt_n_ins = gt_mask.shape[1]
        pred_n_ins = pred_mask.shape[1]

        for i in range(n_shape):
            cur_pred_mask = pred_mask[i, ...]
            cur_pred_label = pred_label[i, :]
            cur_pred_conf = pred_conf[i, :]
            cur_pred_valid = pred_valid[i, :]
            
            cur_gt_mask = gt_mask[i, ...]
            cur_gt_label = gt_mask_label[i, :]
            cur_gt_valid = gt_mask_valid[i, :]
            cur_gt_other = gt_mask_other[i, :]

            # classify all valid gt masks by part categories
            gt_mask_per_cat = [[] for item in part_name_list]
            for j in range(gt_n_ins):
                if cur_gt_valid[j]:
                    sem_id = cur_gt_label[j]
                    gt_mask_per_cat[sem_id].append(j)
                    gt_npos[sem_id] += 1

            # sort prediction and match iou to gt masks
            cur_pred_conf[~cur_pred_valid] = 0.0
            order = np.argsort(-cur_pred_conf)

            gt_used = np.zeros((gt_n_ins), dtype=np.bool)

            for j in range(pred_n_ins):
                idx = order[j]
                if cur_pred_valid[idx]:
                    sem_id = cur_pred_label[idx]

                    iou_max = 0.0; cor_gt_id = -1;
                    for k in gt_mask_per_cat[sem_id]:
                        if not gt_used[k]:
                            # Remove points with gt label *other* from the prediction
                            # We will not evaluate them in the IoU since they can be assigned any label
                            clean_cur_pred_mask = (cur_pred_mask[idx, :] & (~cur_gt_other))

                            intersect = np.sum(cur_gt_mask[k, :] & clean_cur_pred_mask)
                            union = np.sum(cur_gt_mask[k, :] | clean_cur_pred_mask)
                            iou = intersect * 1.0 / union
                            
                            if iou > iou_max:
                                iou_max = iou
                                cor_gt_id = k
                                
                    if iou_max > iou_threshold:
                        gt_used[cor_gt_id] = True

                        # add in a true positive
                        true_pos_list[sem_id].append(True)
                        false_pos_list[sem_id].append(False)
                        conf_score_list[sem_id].append(cur_pred_conf[idx])
                    else:
                        # add in a false positive
                        true_pos_list[sem_id].append(False)
                        false_pos_list[sem_id].append(True)
                        conf_score_list[sem_id].append(cur_pred_conf[idx])

    # compute per-part-category AP
    aps = np.zeros((n_labels), dtype=np.float32)
    ap_valids = np.ones((n_labels), dtype=np.bool)
    for i in range(n_labels):
        has_pred = (len(true_pos_list[i]) > 0)
        has_gt = (gt_npos[i] > 0)

        if not has_gt:
            ap_valids[i] = False
            continue

        if has_gt and not has_pred:
            continue

        cur_true_pos = np.array(true_pos_list[i], dtype=np.float32)
        cur_false_pos = np.array(false_pos_list[i], dtype=np.float32)
        cur_conf_score = np.array(conf_score_list[i], dtype=np.float32)

        # sort according to confidence score again
        order = np.argsort(-cur_conf_score)
        sorted_true_pos = cur_true_pos[order]
        sorted_false_pos = cur_false_pos[order]

        out_plot_fn = None
        if plot_dir is not None:
            out_plot_fn = os.path.join(plot_dir, part_name_list[i].replace('/', '-')+'.png')

        aps[i] = compute_ap(sorted_true_pos, sorted_false_pos, gt_npos[i], plot_fn=out_plot_fn)

    # compute mean AP
    mean_ap = np.sum(aps * ap_valids) / np.sum(ap_valids)

    return aps, ap_valids, gt_npos, mean_ap
Exemple #4
0
def eval_recall_iou_nosem_ab(stat_fn,
                             gt_dir,
                             pred_dir,
                             level_id,
                             iou_threshold=0.5,
                             plot_dir=None):
    """ Input:  stat_fn contains all part ids and names 
                gt_dir contains test-xx.h5
                pred_dir contains test-xx.h5
        Output: aps: Average Prediction Scores for each part category, evaluated on all test shapes
                mAP: mean AP
    """
    #print('Evaluation Start.')
    #print('Ground-truth Directory: %s' % gt_dir)
    #print('Prediction Directory: %s' % pred_dir)

    if plot_dir is not None and not os.path.exists(plot_dir):
        check_mkdir(plot_dir)

    # read stat_fn
    with open(stat_fn, 'r') as fin:
        part_name_list = [item.rstrip().split()[1] for item in fin.readlines()]
    #print('Part Name List: ', part_name_list)
    n_labels = len(part_name_list)
    #print('Total Number of Semantic Labels: %d' % n_labels)

    # check all h5 files
    test_h5_list = []
    for item in os.listdir(gt_dir):
        if item.startswith('test-') and item.endswith('.h5'):
            if not os.path.exists(os.path.join(pred_dir, 'Level_3', item)):
                print(
                    'ERROR: h5 file %s is in gt directory but not in pred directory.'
                )
                #exit(1)
            test_h5_list.append(item)

    # read each h5 file and collect per-part-category true_pos, false_pos and confidence scores
    #true_pos_list = [[] for item in part_name_list]
    #false_pos_list = [[] for item in part_name_list]
    #conf_score_list = [[] for item in part_name_list]
    true_pos_list = []
    false_pos_list = []
    conf_score_list = []
    iou_list = []

    gt_npos = np.zeros((n_labels), dtype=np.int32)
    gt_ins_num = 0

    for item in test_h5_list:
        print('Testing %s' % item)

        gt_mask, gt_mask_label, gt_mask_valid, gt_mask_other = load_gt_h5(
            os.path.join(gt_dir, item))
        pred_mask, pred_valid, pred_conf = load_pred_h5_nosem(
            os.path.join(pred_dir, 'Level_3', item))

        n_shape = gt_mask.shape[0]
        gt_n_ins = gt_mask.shape[1]
        pred_n_ins = pred_mask.shape[1]

        for i in range(n_shape):
            cur_pred_mask = pred_mask[i, ...]
            cur_pred_conf = pred_conf[i, :]
            cur_pred_valid = pred_valid[i, :]

            cur_gt_mask = gt_mask[i, ...]
            cur_gt_valid = gt_mask_valid[i, :]
            cur_gt_other = gt_mask_other[i, :]

            gt_ins_num += np.sum(cur_gt_valid)
            ## classify all valid gt masks by part categories
            #gt_mask_per_cat = [[] for item in part_name_list]
            #for j in range(gt_n_ins):
            #    if cur_gt_valid[j]:
            #        sem_id = cur_gt_label[j]
            #        gt_mask_per_cat[sem_id].append(j)
            #        gt_npos[sem_id] += 1

            # sort prediction and match iou to gt masks
            assert np.sum(cur_gt_mask[np.sum(cur_gt_valid):]) == 0
            cur_gt_mask = cur_gt_mask[:np.sum(cur_gt_valid)]
            cur_pred_mask = cur_pred_mask[np.where(cur_pred_valid)]
            # remove other mask
            cur_pred_mask = cur_pred_mask & ~np.tile(
                np.expand_dims(cur_gt_other, axis=0),
                [cur_pred_mask.shape[0], 1])
            intersect = np.matmul(cur_gt_mask.astype(
                np.int32), (cur_pred_mask.transpose(1, 0).astype(np.int32)))
            union = cur_gt_mask.shape[1] - np.matmul(
                1 - cur_gt_mask.astype(np.int32), 1 -
                (cur_pred_mask.transpose(1, 0).astype(np.int32)))
            iou = intersect / (union + 1e-6)
            row, column = linear_sum_assignment(-iou)
            iou_list += list(iou[row, column])
            if row.shape[0] < cur_gt_mask.shape[0]:
                iou_list += list(np.zeros(cur_gt_mask.shape[0] - row.shape[0]))

    iou_arr = np.array(iou_list)
    recall_iou_list = []
    #xx = np.linspace(0.05,0.55,11)-0.05+0.5
    xx = np.linspace(0.05, 0.5, 10) - 0.05 + 0.5
    for i in range(10):
        p = xx[i]
        recall_iou_list.append(np.sum(iou_arr >= p) / iou_arr.shape[0])
    recall_iou_arr = np.array(recall_iou_list)

    if plot_dir is not None:
        plot_fn = os.path.join(plot_dir,
                               gt_dir.split('/')[-2] + '_realliou' + '.png')
        import matplotlib.pyplot as plt
        import matplotlib
        matplotlib.use('Agg')
        fig = plt.figure()
        plt.plot(xx, recall_iou_arr, 'b-')
        plt.title('Recall versus IoU (AR: %4.2f%%)' %
                  (np.mean(recall_iou_arr)))
        plt.xlabel('IoU')
        plt.ylabel('Recall')
        plt.xlim([0.5, 1])
        plt.ylim([0, 1])
        fig.savefig(plot_fn)
        plt.close(fig)

    # compute mean AP
    #mean_ap = np.sum(aps * ap_valids) / np.sum(ap_valids)

    return recall_iou_arr  #, ap_valids, gt_npos, mean_ap
# Input&Output Settings
parser.add_argument(
    '--log_dir',
    type=str,
    default='log',
    help=
    'Directory that stores all training logs and trained models [default: log]'
)

FLAGS = parser.parse_args()

# save settings
MODEL = importlib.import_module(FLAGS.model)  # import network module
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model + '.py')
LOG_DIR = FLAGS.log_dir
check_mkdir(LOG_DIR)

os.system('cp %s %s' % (MODEL_FILE, LOG_DIR))  # bkp of model def
os.system('cp %s %s' % (__file__, LOG_DIR))  # bkp of train procedure
flog = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
flog.write(str(FLAGS) + '\n')


def printout(flog, data):
    print(data)
    flog.write(data + '\n')


# load meta data files
stat_in_fn = '../../stats/after_merging_label_ids/%s-level-%d.txt' % (
    FLAGS.category, FLAGS.level_id)