コード例 #1
0
def write_chip_table(internal_dir,
                     cx2_cid,
                     cx2_gid,
                     cx2_nid,
                     cx2_roi,
                     cx2_theta,
                     prop_dict=None):
    helpers.__PRINT_WRITES__ = True
    print('Writing Chip Table')
    # Make chip_table.csv
    header = '# chip table'
    column_labels = [
        'ChipID', 'ImgID', 'NameID', 'roi[tl_x  tl_y  w  h]', 'theta'
    ]
    column_list = [cx2_cid, cx2_gid, cx2_nid, cx2_roi, cx2_theta]
    column_type = [int, int, int, list, float]
    if not prop_dict is None:
        for key, val in prop_dict.iteritems():
            column_labels.append(key)
            column_list.append(val)
            column_type.append(str)

    chip_table = ld2.make_csv_table(column_labels, column_list, header,
                                    column_type)
    chip_table_fpath = join(internal_dir, ld2.CHIP_TABLE_FNAME)
    write_to_wrapper(chip_table_fpath, chip_table)
コード例 #2
0
def write_name_table(internal_dir, nx2_nid, nx2_name):
    helpers.__PRINT_WRITES__ = True
    # Make name_table.csv
    column_labels = ['nid', 'name']
    column_list = [nx2_nid[2:], nx2_name[2:]]  # dont write ____ for backcomp
    header = '# name table'
    name_table = ld2.make_csv_table(column_labels, column_list, header)
    name_table_fpath = join(internal_dir, ld2.NAME_TABLE_FNAME)
    write_to_wrapper(name_table_fpath, name_table)
コード例 #3
0
ファイル: convert_db.py プロジェクト: Erotemic/hotspotter
def write_name_table(internal_dir, nx2_nid, nx2_name):
    helpers.__PRINT_WRITES__ = True
    # Make name_table.csv
    column_labels = ['nid', 'name']
    column_list = [nx2_nid[2:], nx2_name[2:]]  # dont write ____ for backcomp
    header = '# name table'
    name_table = ld2.make_csv_table(column_labels, column_list, header)
    name_table_fpath  = join(internal_dir, ld2.NAME_TABLE_FNAME)
    write_to_wrapper(name_table_fpath, name_table)
コード例 #4
0
ファイル: convert_db.py プロジェクト: Erotemic/hotspotter
def write_image_table(internal_dir, gx2_gid, gx2_gname):
    helpers.__PRINT_WRITES__ = True
    # Make image_table.csv
    column_labels = ['gid', 'gname', 'aif']  # do aif for backwards compatibility
    gx2_aif = np.ones(len(gx2_gid), dtype=np.uint32)
    column_list   = [gx2_gid, gx2_gname, gx2_aif]
    header = '# image table'
    image_table = ld2.make_csv_table(column_labels, column_list, header)
    image_table_fpath = join(internal_dir, ld2.IMAGE_TABLE_FNAME)
    write_to_wrapper(image_table_fpath, image_table)
コード例 #5
0
def write_image_table(internal_dir, gx2_gid, gx2_gname):
    helpers.__PRINT_WRITES__ = True
    # Make image_table.csv
    column_labels = ['gid', 'gname',
                     'aif']  # do aif for backwards compatibility
    gx2_aif = np.ones(len(gx2_gid), dtype=np.uint32)
    column_list = [gx2_gid, gx2_gname, gx2_aif]
    header = '# image table'
    image_table = ld2.make_csv_table(column_labels, column_list, header)
    image_table_fpath = join(internal_dir, ld2.IMAGE_TABLE_FNAME)
    write_to_wrapper(image_table_fpath, image_table)
コード例 #6
0
ファイル: convert_db.py プロジェクト: Erotemic/hotspotter
def write_chip_table(internal_dir, cx2_cid, cx2_gid, cx2_nid,
                     cx2_roi, cx2_theta, prop_dict=None):
    helpers.__PRINT_WRITES__ = True
    print('Writing Chip Table')
    # Make chip_table.csv
    header = '# chip table'
    column_labels = ['ChipID', 'ImgID', 'NameID', 'roi[tl_x  tl_y  w  h]', 'theta']
    column_list   = [cx2_cid, cx2_gid, cx2_nid, cx2_roi, cx2_theta]
    column_type   = [int, int, int, list, float]
    if not prop_dict is None:
        for key, val in prop_dict.iteritems():
            column_labels.append(key)
            column_list.append(val)
            column_type.append(str)

    chip_table = ld2.make_csv_table(column_labels, column_list, header, column_type)
    chip_table_fpath  = join(internal_dir, ld2.CHIP_TABLE_FNAME)
    write_to_wrapper(chip_table_fpath, chip_table)
コード例 #7
0
def oxsty_mAP_results(allres):
    print('oxsty_results> Building oxsty results')
    hs = allres.hs
    qcx2_res = allres.qcx2_res
    SV = allres.SV
    # Check directorys where ranked lists of images names will be put
    oxsty_qres_dname = 'oxsty_ranked_lists' +allres.title_suffix
    oxsty_qres_dpath = join(hs.dirs.qres_dir, oxsty_qres_dname)
    helpers.ensure_path(oxsty_qres_dpath)

    oxford_gt_dir = join(hs.dirs.db_dir, 'oxford_style_gt')
    helpers.assertpath(oxford_gt_dir)
    compute_ap_exe = normpath(join(oxford_gt_dir, '../compute_ap'))
    if not helpers.checkpath(compute_ap_exe):
        compute_ap_exe = normpath(join(oxford_gt_dir, '/compute_ap'))
    helpers.assertpath(compute_ap_exe)
    # Get the mAP scores using philbins program
    query_mAP_list = []
    query_mAP_cx   = []
    for qcx in iter(hs.test_sample_cx):
        res = qcx2_res[qcx]
        mAP = get_oxsty_mAP_score_from_res(hs, res, SV, oxsty_qres_dpath,
                                           compute_ap_exe, oxford_gt_dir)
        query_mAP_list.append(mAP)
        query_mAP_cx.append(qcx)
    print('')
    # Calculate the scalar mAP score for the experiemnt
    scalar_mAP = np.mean(np.array(query_mAP_list))
    scalar_mAP_str = '# mAP score = %r\n' % scalar_mAP
    # build a CSV file with the results
    header  = '# Oxford Style Map Scores: title_suffix=%r\n' % allres.title_suffix
    header += scalar_mAP_str
    header += helpers.get_timestamp(format='comment')+'\n'
    header += '# Full Parameters: \n#' + params.param_string().replace('\n','\n#')+'\n\n'
    column_labels = ['QCX', 'mAP']
    column_list   = [query_mAP_cx, query_mAP_list]
    oxsty_map_csv = load_data2.make_csv_table(column_labels, column_list, header)
    return oxsty_map_csv, scalar_mAP_str
コード例 #8
0
def build_rankres_str(allres):
    'Builds csv files showing the cxs/scores/ranks of the query results'
    hs = allres.hs
    #SV = allres.SV
    #qcx2_res = allres.qcx2_res
    cx2_cid = hs.tables.cx2_cid
    #cx2_nx = hs.tables.cx2_nx
    test_samp = hs.test_sample_cx
    train_samp = hs.train_sample_cx
    indx_samp = hs.indexed_sample_cx
    # Get organized data for csv file
    (qcx2_top_true_rank,
     qcx2_top_true_score,
     qcx2_top_true_cx)  = allres.top_true_qcx_arrays

    (qcx2_bot_true_rank,
     qcx2_bot_true_score,
     qcx2_bot_true_cx)  = allres.bot_true_qcx_arrays

    (qcx2_top_false_rank,
     qcx2_top_false_score,
     qcx2_top_false_cx) = allres.top_false_qcx_arrays
    # Number of groundtruth per query
    qcx2_numgt = np.zeros(len(cx2_cid)) - 2
    for qcx in test_samp:
        qcx2_numgt[qcx] = len(hs.get_other_indexed_cxs(qcx))
    # Easy to digest results
    num_chips = len(test_samp)
    num_nonquery = len(np.setdiff1d(indx_samp, test_samp))
    # Find the test samples WITH ground truth
    test_samp_with_gt = np.array(test_samp)[qcx2_numgt[test_samp] > 0]
    if len(test_samp_with_gt) == 0:
        warnings.warn('[rr2] there were no queries with ground truth')
    #train_nxs_set = set(cx2_nx[train_samp])
    flag_cxs_fn = hs.flag_cxs_with_name_in_sample

    def ranks_less_than_(thresh, intrain=None):
        #Find the number of ranks scoring more than thresh
        # Get statistics with respect to the training set
        if len(test_samp_with_gt) == 0:
            test_cxs_ = np.array([])
        elif intrain is None:  # report all
            test_cxs_ =  test_samp_with_gt
        else:  # report either or
            in_train_flag = flag_cxs_fn(test_samp_with_gt, train_samp)
            if intrain is False:
                in_train_flag = True - in_train_flag
            test_cxs_ =  test_samp_with_gt[in_train_flag]
        # number of test samples with ground truth
        num_with_gt = len(test_cxs_)
        if num_with_gt == 0:
            return [], ('NoGT', 'NoGT', -1, 'NoGT')
        # find tests with ranks greater and less than thresh
        testcx2_ttr = qcx2_top_true_rank[test_cxs_]
        greater_cxs = test_cxs_[np.where(testcx2_ttr > thresh)[0]]
        num_greater = len(greater_cxs)
        num_less    = num_with_gt - num_greater
        num_greater = num_with_gt - num_less
        frac_less   = 100.0 * num_less / num_with_gt
        fmt_tup     = (num_less, num_with_gt, frac_less, num_greater)
        return greater_cxs, fmt_tup

    greater5_cxs, fmt5_tup = ranks_less_than_(5)
    greater1_cxs, fmt1_tup = ranks_less_than_(1)
    #
    gt5_intrain_cxs, fmt5_in_tup = ranks_less_than_(5, intrain=True)
    gt1_intrain_cxs, fmt1_in_tup = ranks_less_than_(1, intrain=True)
    #
    gt5_outtrain_cxs, fmt5_out_tup = ranks_less_than_(5, intrain=False)
    gt1_outtrain_cxs, fmt1_out_tup = ranks_less_than_(1, intrain=False)
    #
    allres.greater1_cxs = greater1_cxs
    allres.greater5_cxs = greater5_cxs
    #print('greater5_cxs = %r ' % (allres.greater5_cxs,))
    #print('greater1_cxs = %r ' % (allres.greater1_cxs,))
    # CSV Metadata
    header = '# Experiment allres.title_suffix = ' + allres.title_suffix + '\n'
    header +=  helpers.get_timestamp(format_='comment') + '\n'
    # Scalar summary
    scalar_summary  = '# Num Query Chips: %d \n' % num_chips
    scalar_summary += '# Num Query Chips with at least one match: %d \n' % len(test_samp_with_gt)
    scalar_summary += '# Num NonQuery Chips: %d \n' % num_nonquery
    scalar_summary += '# Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (fmt5_tup)
    scalar_summary += '# Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (fmt1_tup)

    scalar_summary += '# InTrain Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (fmt5_in_tup)
    scalar_summary += '# InTrain Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (fmt1_in_tup)

    scalar_summary += '# OutTrain Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (fmt5_out_tup)
    scalar_summary += '# OutTrain Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (fmt1_out_tup)
    header += scalar_summary
    # Experiment parameters
    #header += '# Full Parameters: \n' + helpers.indent(params.param_string(), '#') + '\n\n'
    # More Metadata
    header += textwrap.dedent('''
    # Rank Result Metadata:
    #   QCX  = Query chip-index
    # QGNAME = Query images name
    # NUMGT  = Num ground truth matches
    #    TT  = top true
    #    BT  = bottom true
    #    TF  = top false''').strip()
    # Build the CSV table
    test_sample_gx = hs.tables.cx2_gx[test_samp]
    test_sample_gname = hs.tables.gx2_gname[test_sample_gx]
    test_sample_gname = [g.replace('.jpg', '') for g in test_sample_gname]
    column_labels = ['QCX', 'NUM GT',
                     'TT CX', 'BT CX', 'TF CX',
                     'TT SCORE', 'BT SCORE', 'TF SCORE',
                     'TT RANK', 'BT RANK', 'TF RANK',
                     'QGNAME', ]
    column_list = [
        test_samp, qcx2_numgt[test_samp],
        qcx2_top_true_cx[test_samp], qcx2_bot_true_cx[test_samp],
        qcx2_top_false_cx[test_samp], qcx2_top_true_score[test_samp],
        qcx2_bot_true_score[test_samp], qcx2_top_false_score[test_samp],
        qcx2_top_true_rank[test_samp], qcx2_bot_true_rank[test_samp],
        qcx2_top_false_rank[test_samp], test_sample_gname, ]
    column_type = [int, int, int, int, int,
                   float, float, float, int, int, int, str, ]
    rankres_str = ld2.make_csv_table(column_labels, column_list, header, column_type)
    # Put some more data at the end
    problem_true_pairs = zip(allres.problem_true.qcxs, allres.problem_true.cxs)
    problem_false_pairs = zip(allres.problem_false.qcxs, allres.problem_false.cxs)
    problem_str = '\n'.join( [
        '#Problem Cases: ',
        '# problem_true_pairs = ' + repr(problem_true_pairs),
        '# problem_false_pairs = ' + repr(problem_false_pairs)])
    rankres_str += '\n' + problem_str
    # Attach results to allres structure
    allres.rankres_str = rankres_str
    allres.scalar_summary = scalar_summary
    allres.problem_false_pairs = problem_false_pairs
    allres.problem_true_pairs = problem_true_pairs
    allres.problem_false_pairs = problem_false_pairs
    allres.problem_true_pairs = problem_true_pairs
コード例 #9
0
def build_rankres_str(allres):
    'Builds csv files showing the cxs/scores/ranks of the query results'
    hs = allres.hs
    #SV = allres.SV
    #qcx2_res = allres.qcx2_res
    cx2_cid = hs.tables.cx2_cid
    #cx2_nx = hs.tables.cx2_nx
    test_samp = hs.test_sample_cx
    train_samp = hs.train_sample_cx
    indx_samp = hs.indexed_sample_cx
    # Get organized data for csv file
    (qcx2_top_true_rank, qcx2_top_true_score,
     qcx2_top_true_cx) = allres.top_true_qcx_arrays

    (qcx2_bot_true_rank, qcx2_bot_true_score,
     qcx2_bot_true_cx) = allres.bot_true_qcx_arrays

    (qcx2_top_false_rank, qcx2_top_false_score,
     qcx2_top_false_cx) = allres.top_false_qcx_arrays
    # Number of groundtruth per query
    qcx2_numgt = np.zeros(len(cx2_cid)) - 2
    for qcx in test_samp:
        qcx2_numgt[qcx] = len(hs.get_other_indexed_cxs(qcx))
    # Easy to digest results
    num_chips = len(test_samp)
    num_nonquery = len(np.setdiff1d(indx_samp, test_samp))
    # Find the test samples WITH ground truth
    test_samp_with_gt = np.array(test_samp)[qcx2_numgt[test_samp] > 0]
    if len(test_samp_with_gt) == 0:
        warnings.warn('[rr2] there were no queries with ground truth')
    #train_nxs_set = set(cx2_nx[train_samp])
    flag_cxs_fn = hs.flag_cxs_with_name_in_sample

    def ranks_less_than_(thresh, intrain=None):
        #Find the number of ranks scoring more than thresh
        # Get statistics with respect to the training set
        if len(test_samp_with_gt) == 0:
            test_cxs_ = np.array([])
        elif intrain is None:  # report all
            test_cxs_ = test_samp_with_gt
        else:  # report either or
            in_train_flag = flag_cxs_fn(test_samp_with_gt, train_samp)
            if intrain is False:
                in_train_flag = True - in_train_flag
            test_cxs_ = test_samp_with_gt[in_train_flag]
        # number of test samples with ground truth
        num_with_gt = len(test_cxs_)
        if num_with_gt == 0:
            return [], ('NoGT', 'NoGT', -1, 'NoGT')
        # find tests with ranks greater and less than thresh
        testcx2_ttr = qcx2_top_true_rank[test_cxs_]
        greater_cxs = test_cxs_[np.where(testcx2_ttr > thresh)[0]]
        num_greater = len(greater_cxs)
        num_less = num_with_gt - num_greater
        num_greater = num_with_gt - num_less
        frac_less = 100.0 * num_less / num_with_gt
        fmt_tup = (num_less, num_with_gt, frac_less, num_greater)
        return greater_cxs, fmt_tup

    greater5_cxs, fmt5_tup = ranks_less_than_(5)
    greater1_cxs, fmt1_tup = ranks_less_than_(1)
    #
    gt5_intrain_cxs, fmt5_in_tup = ranks_less_than_(5, intrain=True)
    gt1_intrain_cxs, fmt1_in_tup = ranks_less_than_(1, intrain=True)
    #
    gt5_outtrain_cxs, fmt5_out_tup = ranks_less_than_(5, intrain=False)
    gt1_outtrain_cxs, fmt1_out_tup = ranks_less_than_(1, intrain=False)
    #
    allres.greater1_cxs = greater1_cxs
    allres.greater5_cxs = greater5_cxs
    #print('greater5_cxs = %r ' % (allres.greater5_cxs,))
    #print('greater1_cxs = %r ' % (allres.greater1_cxs,))
    # CSV Metadata
    header = '# Experiment allres.title_suffix = ' + allres.title_suffix + '\n'
    header += helpers.get_timestamp(format_='comment') + '\n'
    # Scalar summary
    scalar_summary = '# Num Query Chips: %d \n' % num_chips
    scalar_summary += '# Num Query Chips with at least one match: %d \n' % len(
        test_samp_with_gt)
    scalar_summary += '# Num NonQuery Chips: %d \n' % num_nonquery
    scalar_summary += '# Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (fmt5_tup)
    scalar_summary += '# Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (
        fmt1_tup)

    scalar_summary += '# InTrain Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (
        fmt5_in_tup)
    scalar_summary += '# InTrain Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (
        fmt1_in_tup)

    scalar_summary += '# OutTrain Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (
        fmt5_out_tup)
    scalar_summary += '# OutTrain Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (
        fmt1_out_tup)
    header += scalar_summary
    # Experiment parameters
    #header += '# Full Parameters: \n' + helpers.indent(params.param_string(), '#') + '\n\n'
    # More Metadata
    header += textwrap.dedent('''
    # Rank Result Metadata:
    #   QCX  = Query chip-index
    # QGNAME = Query images name
    # NUMGT  = Num ground truth matches
    #    TT  = top true
    #    BT  = bottom true
    #    TF  = top false''').strip()
    # Build the CSV table
    test_sample_gx = hs.tables.cx2_gx[test_samp]
    test_sample_gname = hs.tables.gx2_gname[test_sample_gx]
    test_sample_gname = [g.replace('.jpg', '') for g in test_sample_gname]
    column_labels = [
        'QCX',
        'NUM GT',
        'TT CX',
        'BT CX',
        'TF CX',
        'TT SCORE',
        'BT SCORE',
        'TF SCORE',
        'TT RANK',
        'BT RANK',
        'TF RANK',
        'QGNAME',
    ]
    column_list = [
        test_samp,
        qcx2_numgt[test_samp],
        qcx2_top_true_cx[test_samp],
        qcx2_bot_true_cx[test_samp],
        qcx2_top_false_cx[test_samp],
        qcx2_top_true_score[test_samp],
        qcx2_bot_true_score[test_samp],
        qcx2_top_false_score[test_samp],
        qcx2_top_true_rank[test_samp],
        qcx2_bot_true_rank[test_samp],
        qcx2_top_false_rank[test_samp],
        test_sample_gname,
    ]
    column_type = [
        int,
        int,
        int,
        int,
        int,
        float,
        float,
        float,
        int,
        int,
        int,
        str,
    ]
    rankres_str = ld2.make_csv_table(column_labels, column_list, header,
                                     column_type)
    # Put some more data at the end
    problem_true_pairs = zip(allres.problem_true.qcxs, allres.problem_true.cxs)
    problem_false_pairs = zip(allres.problem_false.qcxs,
                              allres.problem_false.cxs)
    problem_str = '\n'.join([
        '#Problem Cases: ',
        '# problem_true_pairs = ' + repr(problem_true_pairs),
        '# problem_false_pairs = ' + repr(problem_false_pairs)
    ])
    rankres_str += '\n' + problem_str
    # Attach results to allres structure
    allres.rankres_str = rankres_str
    allres.scalar_summary = scalar_summary
    allres.problem_false_pairs = problem_false_pairs
    allres.problem_true_pairs = problem_true_pairs
    allres.problem_false_pairs = problem_false_pairs
    allres.problem_true_pairs = problem_true_pairs