Exemple #1
0
def get_namescore_nonvoting_feature_flags(fm_list, fs_list, dnid_list, name_groupxs, kpts1=None):
    r"""
    fm_list = [fm[:min(len(fm), 10)] for fm in fm_list]
    fs_list = [fs[:min(len(fs), 10)] for fs in fs_list]
    """
    fx1_list = [fm.T[0] for fm in fm_list]
    # Group annotation matches by name
    name_grouped_fx1_list = vt.apply_grouping_(fx1_list, name_groupxs)
    name_grouped_fs_list  = vt.apply_grouping_(fs_list,  name_groupxs)
    # Stack up all matches to a particular name, keep track of original indicies via offets
    name_invertable_flat_fx1_list = list(map(ut.invertible_flatten2_numpy, name_grouped_fx1_list))
    name_grouped_fx1_flat = ut.get_list_column(name_invertable_flat_fx1_list, 0)
    name_grouped_invertable_cumsum_list = ut.get_list_column(name_invertable_flat_fx1_list, 1)
    name_grouped_fs_flat = list(map(np.hstack, name_grouped_fs_list))
    if kpts1 is not None:
        xys1_ = vt.get_xys(kpts1).T
        kpts_xyid_list = vt.compute_unique_data_ids(xys1_)
        # Make nested group for every name by query feature index (accounting for duplicate orientation)
        name_grouped_xyid_flat = list(kpts_xyid_list.take(fx1) for fx1 in name_grouped_fx1_flat)
        xyid_groupxs_list = list(vt.group_indices(xyid_flat)[1] for xyid_flat in name_grouped_xyid_flat)
        name_group_fx1_groupxs_list = xyid_groupxs_list
    else:
        # Make nested group for every name by query feature index
        fx1_groupxs_list = [vt.group_indices(fx1_flat)[1] for fx1_flat in name_grouped_fx1_flat]
        name_group_fx1_groupxs_list = fx1_groupxs_list
    name_grouped_fid_grouped_fs_list = [
        vt.apply_grouping(fs_flat, fid_groupxs)
        for fs_flat, fid_groupxs in zip(name_grouped_fs_flat, name_group_fx1_groupxs_list)
    ]

    # Flag which features are valid in this grouped space. Only one keypoint should be able to vote
    # for each group
    name_grouped_fid_grouped_isvalid_list = [
        np.array([fs_group.max() == fs_group for fs_group in fid_grouped_fs_list])
        for fid_grouped_fs_list in name_grouped_fid_grouped_fs_list
    ]

    # Go back to being grouped only in name space
    #dtype = np.bool
    name_grouped_isvalid_flat_list = [
        vt.invert_apply_grouping2(fid_grouped_isvalid_list, fid_groupxs, dtype=np.bool)
        for fid_grouped_isvalid_list, fid_groupxs in zip(name_grouped_fid_grouped_isvalid_list, name_group_fx1_groupxs_list)
    ]

    name_grouped_isvalid_unflat_list = [
        ut.unflatten2(isvalid_flat, invertable_cumsum_list)
        for isvalid_flat, invertable_cumsum_list in zip(name_grouped_isvalid_flat_list, name_grouped_invertable_cumsum_list)
    ]

    # Reports which features were valid in name scoring for every annotation
    featflag_list = vt.invert_apply_grouping(name_grouped_isvalid_unflat_list, name_groupxs)
    return featflag_list
Exemple #2
0
    def execute(request, parent_rowids=None, use_cache=None, postprocess=True):
        """ HACKY REIMPLEMENTATION """
        ut.colorprint('[req] Executing request %s' % (request, ), 'yellow')
        table = request.depc[request.tablename]
        if use_cache is None:
            use_cache = not ut.get_argflag('--nocache')
        if parent_rowids is None:
            parent_rowids = request.parent_rowids
        else:
            # previously defined in execute subset
            # subparent_rowids = request.make_parent_rowids(
            # qaids, request.daids)
            logger.info('given %d specific parent_rowids' %
                        (len(parent_rowids), ))

        # vsone hack (i,j) same as (j,i)
        if request._symmetric:
            import vtool as vt

            directed_edges = np.array(parent_rowids)
            undirected_edges = vt.to_undirected_edges(directed_edges)
            edge_ids = vt.compute_unique_data_ids(undirected_edges)
            unique_rows, unique_rowx, inverse_idx = np.unique(
                edge_ids, return_index=True, return_inverse=True)
            parent_rowids_ = ut.take(parent_rowids, unique_rowx)
        else:
            parent_rowids_ = parent_rowids

        # Compute and cache any uncomputed results
        rowids = table.get_rowid(parent_rowids_,
                                 config=request,
                                 recompute=not use_cache)
        # Load all results
        result_list = table.get_row_data(rowids)

        if request._symmetric:
            result_list = ut.take(result_list, inverse_idx)

        if postprocess and hasattr(request, 'postprocess_execute'):
            logger.info('Converting results')
            result_list = request.postprocess_execute(parent_rowids,
                                                      result_list)
            pass
        return result_list
Exemple #3
0
    def execute(request, parent_rowids=None, use_cache=None, postprocess=True):
        """ HACKY REIMPLEMENTATION """
        ut.colorprint('[req] Executing request %s' % (request,), 'yellow')
        table = request.depc[request.tablename]
        if use_cache is None:
            use_cache = not ut.get_argflag('--nocache')
        if parent_rowids is None:
            parent_rowids = request.parent_rowids
        else:
            print('given %d specific parent_rowids' % (len(parent_rowids),))

        # vsone hack (i,j) same as (j,i)
        if request._symmetric:
            import vtool as vt
            directed_edges = np.array(parent_rowids)
            undirected_edges = vt.to_undirected_edges(directed_edges)
            edge_ids = vt.compute_unique_data_ids(undirected_edges)
            unique_rows, unique_rowx, inverse_idx = np.unique(edge_ids, return_index=True, return_inverse=True)
            parent_rowids_ = ut.take(parent_rowids, unique_rowx)
        else:
            parent_rowids_ = parent_rowids

        # Compute and cache any uncomputed results
        rowids = table.get_rowid(parent_rowids_, config=request,
                                 recompute=not use_cache)
        # Load all results
        result_list = table.get_row_data(rowids)

        if request._symmetric:
            result_list = ut.take(result_list, inverse_idx)

        if postprocess and hasattr(request, 'postprocess_execute'):
            print('Converting results')
            result_list = request.postprocess_execute(parent_rowids, result_list)
            pass
        return result_list
Exemple #4
0
def case_type_sample(testres, num_per_group=1, with_success=True,
                     with_failure=True, min_success_diff=0):
    category_poses = testres.partition_case_types(min_success_diff=min_success_diff)
    # STRATIFIED SAMPLE OF CASES FROM GROUPS
    #mode = 'failure'
    rng = np.random.RandomState(0)
    ignore_keys = ['total_failure', 'total_success']
    #ignore_keys = []
    #sample_keys = []
    #sample_vals = []
    flat_sample_dict = ut.ddict(list)

    #num_per_group = 1
    modes = []
    if with_success:
        modes += ['success']
    if with_failure:
        modes += ['failure']

    for mode in modes:
        for truth in ['gt', 'gf']:
            type2_poses = category_poses[mode + '_' + truth]
            for key, posses in six.iteritems(type2_poses):
                if key not in ignore_keys:
                    if num_per_group is not None:
                        sample_posses = ut.random_sample(posses, num_per_group, rng=rng)
                    else:
                        sample_posses = posses

                    flat_sample_dict[mode + '_' + truth + '_' + key].append(sample_posses)

    #list(map(np.vstack, flat_sample_dict.values()))
    sample_keys = flat_sample_dict.keys()
    sample_vals = list(map(np.vstack, flat_sample_dict.values()))

    has_sample = np.array(list(map(len, sample_vals))) > 0
    has_sample_idx = np.nonzero(has_sample)[0]

    print('Unsampled categories = %s' % (
        ut.list_str(ut.compress(sample_keys, ~has_sample))))
    print('Sampled categories = %s' % (
        ut.list_str(ut.compress(sample_keys, has_sample))))

    sampled_type_list = ut.take(sample_keys, has_sample_idx)
    sampled_cases_list = ut.take(sample_vals, has_sample_idx)

    sampled_lbl_list = ut.flatten([[lbl] * len(cases)
                                   for lbl, cases in zip(sampled_type_list, sampled_cases_list)])
    if len(sampled_cases_list) == 0:
        return [], []
    sampled_case_list = np.vstack(sampled_cases_list)

    # Computes unique test cases and groups them with all case labels
    caseid_list = vt.compute_unique_data_ids(sampled_case_list)
    unique_case_ids = ut.unique_ordered(caseid_list)
    labels_list = ut.dict_take(ut.group_items(sampled_lbl_list, caseid_list), unique_case_ids)
    cases_list = np.vstack(ut.get_list_column(ut.dict_take(ut.group_items(sampled_case_list, caseid_list), unique_case_ids), 0))

    #sampled_case_list = np.vstack(ut.flatten(sample_vals))
    #sampled_case_list = sampled_case_list[vt.unique_row_indexes(case_pos_list)]
    case_pos_list = cases_list
    case_labels_list = labels_list
    #case_pos_list.shape
    #vt.unique_row_indexes(case_pos_list).shape
    return case_pos_list, case_labels_list