示例#1
0
def modify_tags(tags_list, direct_map=None, regex_map=None, regex_aug=None,
                delete_unmapped=False, return_unmapped=False,
                return_map=False):
    import utool as ut
    tag_vocab = ut.unique(ut.flatten(tags_list))
    alias_map = ut.odict()
    if regex_map is not None:
        alias_map.update(**ut.build_alias_map(regex_map, tag_vocab))
    if direct_map is not None:
        alias_map.update(ut.odict(direct_map))

    new_tags_list = tags_list
    new_tags_list = ut.alias_tags(new_tags_list, alias_map)

    if regex_aug is not None:
        alias_aug = ut.build_alias_map(regex_aug, tag_vocab)
        aug_tags_list = ut.alias_tags(new_tags_list, alias_aug)
        new_tags_list = [ut.unique(t1 + t2) for t1, t2 in zip(new_tags_list, aug_tags_list)]

    unmapped = list(set(tag_vocab) - set(alias_map.keys()))
    if delete_unmapped:
        new_tags_list = [ut.setdiff(tags, unmapped) for tags in new_tags_list]

    toreturn = None
    if return_map:
        toreturn = (alias_map,)

    if return_unmapped:
        toreturn = toreturn + (unmapped,)

    if toreturn is None:
        toreturn = new_tags_list
    else:
        toreturn = (new_tags_list,) + toreturn
    return toreturn
    def __init__(self, img, **kwargs):
        super(PaintInteraction, self).__init__(**kwargs)
        init_mask = kwargs.get('init_mask', None)
        if init_mask is None:
            mask = np.full(img.shape, 255, dtype=np.uint8)
        else:
            mask = init_mask
        self.mask = mask
        self.img = img
        self.brush_size = 75
        import wbia.plottool as pt

        self.valid_colors1 = ut.odict([
            # ('background', (255 * pt.BLACK).tolist()),
            ('scenery', (255 * pt.BLACK).tolist()),
            ('photobomb', (255 * pt.RED).tolist()),
        ])
        self.valid_colors2 = ut.odict([('foreground',
                                        (255 * pt.WHITE).tolist())])
        self.color1_idx = 0
        self.color1 = self.valid_colors1['scenery']
        self.color2 = self.valid_colors2['foreground']
        self.background = None
        self.last_stroke = None
        self.finished_callback = None
        self._imshow_running = True
示例#3
0
 def __init__(self, img, **kwargs):
     super(PaintInteraction, self).__init__(**kwargs)
     init_mask = kwargs.get('init_mask', None)
     if init_mask is None:
         mask = np.full(img.shape, 255, dtype=np.uint8)
     else:
         mask = init_mask
     self.mask = mask
     self.img = img
     self.brush_size = 75
     import plottool as pt
     self.valid_colors1 = ut.odict([
         #('background', (255 * pt.BLACK).tolist()),
         ('scenery', (255 * pt.BLACK).tolist()),
         ('photobomb', (255 * pt.RED).tolist()),
     ])
     self.valid_colors2 = ut.odict([
         ('foreground', (255 * pt.WHITE).tolist()),
     ])
     self.color1_idx = 0
     self.color1 = self.valid_colors1['scenery']
     self.color2 = self.valid_colors2['foreground']
     self.background = None
     self.last_stroke = None
     self.finished_callback = None
     self._imshow_running = True
示例#4
0
 def __init__(db, sqldb_dpath='.', sqldb_fname='database.sqlite3',
              text_factory=unicode):
     """ Creates db and opens connection """
     with utool.Timer('New SQLDatabaseController'):
         #printDBG('[sql.__init__]')
         # Table info
         db.table_columns     = utool.odict()
         db.table_constraints = utool.odict()
         db.table_docstr      = utool.odict()
         # TODO:
         db.stack = []
         db.cache = {}  # key \in [tblname][colnames][rowid]
         # Get SQL file path
         db.dir_  = sqldb_dpath
         db.fname = sqldb_fname
         assert exists(db.dir_), '[sql] db.dir_=%r does not exist!' % db.dir_
         fpath    = join(db.dir_, db.fname)
         if not exists(fpath):
             print('[sql] Initializing new database')
         # Open the SQL database connection with support for custom types
         #lite.enable_callback_tracebacks(True)
         #fpath = ':memory:'
         db.connection = lite.connect2(fpath)
         db.connection.text_factory = text_factory
         #db.connection.isolation_level = None  # turns sqlite3 autocommit off
         COPY_TO_MEMORY = utool.get_flag('--copy-db-to-memory')
         if COPY_TO_MEMORY:
             db._copy_to_memory()
             db.connection.text_factory = text_factory
         # Get a cursor which will preform sql commands / queries / executions
         db.cur = db.connection.cursor()
示例#5
0
class META_DECISION(object):  # NOQA
    """
    Enumerated types of review codes and texts

    Notes:
        unreviewed: we dont have a meta decision
        same: we know this is the same animal through non-visual means
        diff: we know this is the different animal through non-visual means

    Example:
        >>> # ENABLE_DOCTEST
        >>> from wbia.constants import *  # NOQA
        >>> assert hasattr(META_DECISION, 'CODE')
        >>> assert hasattr(META_DECISION, 'NICE')
        >>> code1 = META_DECISION.INT_TO_CODE[META_DECISION.NULL]
        >>> code2 = META_DECISION.CODE.NULL
        >>> assert code1 == code2
        >>> nice1 = META_DECISION.INT_TO_NICE[META_DECISION.NULL]
        >>> nice2 = META_DECISION.NICE.NULL
        >>> assert nice1 == nice2
    """

    NULL = None
    DIFF = 0
    SAME = 1
    INT_TO_CODE = ut.odict([(NULL, 'null'), (DIFF, 'diff'), (SAME, 'same')])
    INT_TO_NICE = ut.odict([(NULL, 'NULL'), (DIFF, 'Different'),
                            (SAME, 'Same')])
    CODE_TO_NICE = ut.map_keys(INT_TO_CODE, INT_TO_NICE)
    CODE_TO_INT = ut.invert_dict(INT_TO_CODE)
    NICE_TO_CODE = ut.invert_dict(CODE_TO_NICE)
    NICE_TO_INT = ut.invert_dict(INT_TO_NICE)
示例#6
0
class CONFIDENCE(object):
    UNKNOWN = None
    GUESSING = 1
    NOT_SURE = 2
    PRETTY_SURE = 3
    ABSOLUTELY_SURE = 4

    INT_TO_CODE = ut.odict([
        (ABSOLUTELY_SURE, 'absolutely_sure'),
        (PRETTY_SURE, 'pretty_sure'),
        (NOT_SURE, 'not_sure'),
        (GUESSING, 'guessing'),
        (UNKNOWN, 'unspecified'),
    ])

    INT_TO_NICE = ut.odict([
        (ABSOLUTELY_SURE, 'Doubtless'),
        (PRETTY_SURE, 'Sure'),
        (NOT_SURE, 'Unsure'),
        (GUESSING, 'Guessing'),
        (UNKNOWN, 'Unspecified'),
    ])

    CODE_TO_NICE = ut.map_keys(INT_TO_CODE, INT_TO_NICE)
    CODE_TO_INT = ut.invert_dict(INT_TO_CODE)
    NICE_TO_CODE = ut.invert_dict(CODE_TO_NICE)
    NICE_TO_INT = ut.invert_dict(INT_TO_NICE)
示例#7
0
class QUAL(object):
    EXCELLENT = 5
    GOOD = 4
    OK = 3
    POOR = 2
    JUNK = 1
    UNKNOWN = None

    INT_TO_CODE = ut.odict([
        (EXCELLENT, 'excellent'),
        (GOOD, 'good'),
        (OK, 'ok'),
        (POOR, 'poor'),
        (JUNK, 'junk'),
        (UNKNOWN, 'unspecified'),
    ])

    INT_TO_NICE = ut.odict([
        (EXCELLENT, 'Excellent'),
        (GOOD, 'Good'),
        (OK, 'OK'),
        (POOR, 'Poor'),
        (JUNK, 'Junk'),
        (UNKNOWN, 'Unspecified'),
    ])

    CODE_TO_NICE = ut.map_keys(INT_TO_CODE, INT_TO_NICE)
    CODE_TO_INT = ut.invert_dict(INT_TO_CODE)
    NICE_TO_CODE = ut.invert_dict(CODE_TO_NICE)
    NICE_TO_INT = ut.invert_dict(INT_TO_NICE)
示例#8
0
def parse_abbrev():
    ref_text = 'Acoustics Acoust.\nAdministration Admin.\nAdministrative Administ.\nAmerican Amer.\nAnalysis Anal.\nAnnals Ann.\nAnnual Annu.\nApparatus App.\nApplications Appl.\nApplied Appl.\nAssociation Assoc.\nAutomatic Automat.\nBritish Brit.\nBroadcasting Broadcast.\nBusiness Bus.\nCanadian Can.\nChinese Chin.\nCommunications Commun.\nComputer(s) Comput.\nConference Conf.\nCongress Congr.\nConvention Conv.\nCorrespondence Corresp.\nCybernetics Cybern.\nDepartment Dept.\nDesign Des.\nDevelopment Develop.\nDigest Dig.\nEconomic(s) Econ.\nEducation Edu.\nElectric Elect.\nElectrical Elect.\nElectronic Electron.\nElectronics Electron.\nEngineering Eng.\nErgonomics Ergonom.\nEuropean Eur.\nEvolutionary Evol.\nExpress Express\nFoundation Found.\nGeoscience Geosci.\nGraphics Graph.\nIndustrial Ind.\nInformation Inform.\nInstitute Inst.\nIntelligence Intell.\nInternational Int.\nJapan Jpn.\nJournal J.\nLetter(s) Lett.\nMachine Mach.\nMagazine Mag.\nMagnetics Magn.\nManagement Manage.\nManaging Manag.\nMathematical Math.\nMechanical Mech.\nMeeting Meeting\nNational Nat.\nNewsletter Newslett.\nNuclear Nucl.\nOccupation Occupat.\nOperational Oper.\nOptical Opt.\nOptics Opt.\nOrganization Org.\nPhilosophical Philosoph.\nProceedings Proc.\nProcessing Process.\nProduction Prod.\nProductivity Productiv.\nQuarterly Quart.\nRecord Rec.\nReliability Rel.\nReport Rep.\nResearch Res.\nReview Rev.\nRoyal Roy.\nScience Sci.\nSelected Select.\nSociety Soc.\nSociological Sociol.\nStatistics Statist.\nStudies Stud.\nSupplement Suppl.\nSymposium Symp.\nSystems Syst.\nTechnical Tech.\nTechniques Techn.\nTechnology Technol.\nTelecommunications Telecommun.\nTransactions Trans.\nVehicular Veh.\nWorking Work.'
    abbrev_map = {}
    for line in ref_text.split('\n'):
        full, accro = line.split(' ')
        if full.endswith('(s)'):
            full_single = full[:-3]
            abbrev_map[full_single] = accro
            abbrev_map[full_single + 's'] = accro
        else:
            abbrev_map[full] = accro
    ut.odict(abbrev_map)
示例#9
0
def make_layer_json_dict(layer, layer_info, layer_to_id, extra=True):
    """
    >>> from ibeis_cnn.net_strs import *  # NOQA
    """
    #layer_type = layer_info['classname']
    #attr_key_list = layer_info['layer_attrs']
    json_dict = ut.odict([])
    if extra:
        json_dict['name'] = layer_info['name']

    json_dict['type'] = layer_info['classname']
    json_dict['id'] = layer_to_id[layer]

    if hasattr(layer, 'input_layer'):
        #json_dict['input_layer'] = layer.input_layer.name
        # HACK FOR WHEN DROPOUT LAYERS DONT EXIST
        def get_mrp_id(in_layer):
            if in_layer in layer_to_id:
                return layer_to_id[in_layer]
            else:
                return get_mrp_id(in_layer.input_layer)
        json_dict['input_layer'] = get_mrp_id(layer.input_layer)
    if hasattr(layer, 'input_layers'):
        #json_dict['input_layers'] = [l.name for l in layer.input_layers]
        json_dict['input_layers'] = [layer_to_id[l] for l in layer.input_layers]

    json_dict.update(**layer_info['layer_attrs'])
    nonlin = layer_info.get('nonlinearity', None)
    if nonlin is not None:
        json_dict['nonlinearity'] = nonlin

    json_params = []
    for param_info in layer_info['param_infos']:
        p = ut.odict()
        p['name'] = param_info['basename']
        if extra:
            init = param_info.get('init', None)
            if init is not None:
                p['init'] = init
        tags = param_info.get('tags', None)
        if tags is not None:
            if extra:
                p['tags'] = list(tags)
            else:
                if len(tags) > 0:
                    p['tags'] = list(tags)
        json_params.append(p)
    if len(json_params) > 0:
        json_dict['params'] = json_params
    return json_dict
示例#10
0
    def inference_stats(infr_list_):
        relabel_stats = []
        for infr in infr_list_:
            num_ccs, num_inconsistent = infr.relabel_using_reviews()
            state_hist = ut.dict_hist(nx.get_edge_attributes(infr.graph, 'decision').values())
            if POSTV not in state_hist:
                state_hist[POSTV] = 0
            hist = ut.dict_hist(nx.get_edge_attributes(infr.graph, '_speed_split').values())

            subgraphs = infr.positive_connected_compoments()
            subgraph_sizes = [len(g) for g in subgraphs]

            info = ut.odict([
                ('num_nonmatch_edges', state_hist[NEGTV]),
                ('num_match_edges', state_hist[POSTV]),
                ('frac_nonmatch_edges',  state_hist[NEGTV] / (state_hist[POSTV] + state_hist[NEGTV])),
                ('num_inconsistent', num_inconsistent),
                ('num_ccs', num_ccs),
                ('edges_flipped', hist.get('flip', 0)),
                ('edges_unchanged', hist.get('orig', 0)),
                ('bad_unreviewed_edges', hist.get('new', 0)),
                ('orig_size', len(infr.graph)),
                ('new_sizes', subgraph_sizes),
            ])
            relabel_stats.append(info)
        return relabel_stats
示例#11
0
def precompute_data():
    """
    Ensure features and such are computed
    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-precompute_data

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(precompute_data())
    """
    #basecmd = 'python -m ibeis.expt.experiment_printres
    #--exec-print_latexsum --rank-lt-list=1,5,10,100 '
    varydict = ut.odict([
        ('preload_flags', [
            #'--preload-chip',
            #'--preload-feat',
            #'--preload-feeatweight',
            '--preload',
            '--preindex',
        ]),
        ('dbname', get_dbnames()),
        ('acfg_name', ['default:qaids=allgt,species=primary,view=primary,is_known=True']),
        ('cfg_name', ['default', 'candidacy_baseline', 'candidacy_invariance']),
    ])
    return (varydict, 'preload', 'preload')
示例#12
0
def precompute_data():
    """
    Ensure features and such are computed
    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-precompute_data

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(precompute_data())
    """
    #basecmd = 'python -m ibeis.expt.experiment_printres
    #--exec-print_latexsum --rank-lt-list=1,5,10,100 '
    varydict = ut.odict([
        ('preload_flags', [
            #'--preload-chip',
            #'--preload-feat',
            #'--preload-feeatweight',
            '--preload',
            '--preindex',
        ]),
        ('dbname', get_dbnames()),
        ('acfg_name', ['default:qaids=allgt,species=primary,view=primary,is_known=True']),
        ('cfg_name', ['default', 'candidacy_baseline', 'candidacy_invariance']),
    ])
    return (varydict, 'preload', 'preload')
示例#13
0
def _debug_repr_cpd(cpd):
    import re
    import utool as ut
    code_fmt = ut.codeblock(
        '''
        {variable} = pgmpy.factors.TabularCPD(
            variable={variable_repr},
            variable_card={variable_card_repr},
            values={get_cpd_repr},
            evidence={evidence_repr},
            evidence_card={evidence_card_repr},
        )
        ''')
    keys = ['variable', 'variable_card', 'values', 'evidence', 'evidence_card']
    dict_ = ut.odict(zip(keys, [getattr(cpd, key) for key in keys]))
    # HACK
    dict_['values'] = cpd.get_cpd()
    r = ut.repr2(dict_, explicit=True, nobraces=True, nl=True)
    print(r)

    # Parse props that are needed for this fmtstr
    fmt_keys = [match.groups()[0] for match in re.finditer('{(.*?)}', code_fmt)]
    need_reprs = [key[:-5] for key in fmt_keys if key.endswith('_repr')]
    need_keys = [key for key in fmt_keys if not key.endswith('_repr')]
    # Get corresponding props
    # Call methods if needbe
    tmp = [(prop, getattr(cpd, prop)) for prop in need_reprs]
    tmp = [(x, y()) if ut.is_funclike(y) else (x, y) for (x, y) in tmp]
    fmtdict = dict(tmp)
    fmtdict = ut.map_dict_vals(ut.repr2, fmtdict)
    fmtdict = ut.map_dict_keys(lambda x: x + '_repr', fmtdict)
    tmp2 = [(prop, getattr(cpd, prop)) for prop in need_keys]
    fmtdict.update(dict(tmp2))
    code = code_fmt.format(**fmtdict)
    return code
示例#14
0
    def isect_info(self, other):
        set1 = set(self.rel_fpath_list)
        set2 = set(other.rel_fpath_list)

        set_comparisons = ut.odict([
            ('s1', set1),
            ('s2', set2),
            ('union', set1.union(set2)),
            ('isect', set1.intersection(set2)),
            ('s1 - s2', set1.difference(set2)),
            ('s2 - s1', set1.difference(set1)),
        ])
        stat_stats = ut.map_vals(len, set_comparisons)
        print(ut.repr4(stat_stats))
        return set_comparisons

        if False:
            idx_lookup1 = ut.make_index_lookup(self.rel_fpath_list)
            idx_lookup2 = ut.make_index_lookup(other.rel_fpath_list)

            uuids1 = ut.take(self.uuids,
                             ut.take(idx_lookup1, set_comparisons['union']))
            uuids2 = ut.take(other.uuids,
                             ut.take(idx_lookup2, set_comparisons['union']))

            uuids1 == uuids2
示例#15
0
    def get_cfgstr(nnindexer, noquery=False):
        r""" returns string which uniquely identified configuration and support data

        Args:
            noquery (bool): if True cfgstr is only relevant to building the
                index. No search params are returned (default = False)

        Returns:
            str: flann_cfgstr

        CommandLine:
            python -m wbia.algo.hots.neighbor_index --test-get_cfgstr

        Example:
            >>> # DISABLE_DOCTEST
            >>> from wbia.algo.hots.neighbor_index import *  # NOQA
            >>> import wbia
            >>> cfgdict = dict(fg_on=False)
            >>> qreq_ = wbia.testdata_qreq_(defaultdb='testdb1', p='default:fg_on=False')
            >>> qreq_.load_indexer()
            >>> nnindexer = qreq_.indexer
            >>> noquery = True
            >>> flann_cfgstr = nnindexer.get_cfgstr(noquery)
            >>> result = ('flann_cfgstr = %s' % (str(flann_cfgstr),))
            >>> print(result)
            flann_cfgstr = _FLANN((algo=kdtree,seed=42,t=8,))_VECS((11260,128)gj5nea@ni0%f3aja)
        """
        flann_cfgstr_list = []
        use_params_hash = True
        use_data_hash = True
        if use_params_hash:
            flann_defaults = vt.get_flann_params(
                nnindexer.flann_params['algorithm'])
            # flann_params_clean = flann_defaults.copy()
            flann_params_clean = ut.sort_dict(flann_defaults)
            ut.update_existing(flann_params_clean, nnindexer.flann_params)
            if noquery:
                ut.delete_dict_keys(flann_params_clean, ['checks'])
            shortnames = dict(algorithm='algo',
                              checks='chks',
                              random_seed='seed',
                              trees='t')
            short_params = ut.odict([
                (shortnames.get(key, key), str(val)[0:7])
                for key, val in six.iteritems(flann_params_clean)
            ])
            flann_valsig_ = ut.repr2(short_params,
                                     nl=False,
                                     explicit=True,
                                     strvals=True)
            flann_valsig_ = flann_valsig_.lstrip('dict').replace(' ', '')
            # flann_valsig_ = str(list(flann_params.values()))
            # flann_valsig = ut.remove_chars(flann_valsig_, ', \'[]')
            flann_cfgstr_list.append('_FLANN(' + flann_valsig_ + ')')
        if use_data_hash:
            vecs_hashstr = ut.hashstr_arr(nnindexer.idx2_vec, '_VECS')
            flann_cfgstr_list.append(vecs_hashstr)
        flann_cfgstr = ''.join(flann_cfgstr_list)
        return flann_cfgstr
示例#16
0
 def as_dict(self):
     return ut.odict([
         ('type', self.type),
         ('accro', self._accro),
         ('full', self._full),
         ('abbrev', self.abbrev()),
         ('publisher', self._publisher),
     ])
示例#17
0
 def get_annot_sex_stats(aid_list):
     annot_sextext_list = ibs.get_annot_sex_texts(aid_list)
     sextext2_aids = ut.group_items(aid_list, annot_sextext_list)
     sex_keys = list(ibs.const.SEX_TEXT_TO_INT.keys())
     assert set(sex_keys) >= set(annot_sextext_list), 'bad keys: ' + str(set(annot_sextext_list) - set(sex_keys))
     sextext2_nAnnots = ut.odict([(key, len(sextext2_aids.get(key, []))) for key in sex_keys])
     # Filter 0's
     sextext2_nAnnots = {key: val for key, val in six.iteritems(sextext2_nAnnots) if val != 0}
     return sextext2_nAnnots
示例#18
0
 def measure_error_edges(infr):
     for edge, data in infr.edges(data=True):
         true_state = data['truth']
         pred_state = data.get('evidence_decision', UNREV)
         if pred_state != UNREV:
             if true_state != pred_state:
                 error = ut.odict([('real', true_state),
                                   ('pred', pred_state)])
                 yield edge, error
示例#19
0
def parse_theano_flags():
    import os
    theano_flags_str = os.environ.get('THEANO_FLAGS', '')
    theano_flags_itemstrs = theano_flags_str.split(',')
    theano_flags = ut.odict([
        itemstr.split('=') for itemstr in theano_flags_itemstrs
        if len(itemstr) > 0
    ])
    return theano_flags
示例#20
0
class EVIDENCE_DECISION(object):  # NOQA
    """
    TODO: change to EVIDENCE_DECISION / VISUAL_DECISION
    Enumerated types of review codes and texts

    Notes:
        Unreviewed: Not comparared yet.
        nomatch: Visually comparable and the different
        match: Visually comparable and the same
        notcomp: Not comparable means it is actually impossible to determine.
        unknown: means that it was reviewed, but we just can't figure it out.
    """

    UNREVIEWED = None
    NEGATIVE = 0
    POSITIVE = 1
    INCOMPARABLE = 2
    UNKNOWN = 3

    INT_TO_CODE = ut.odict([
        (POSITIVE, 'match'),
        (NEGATIVE, 'nomatch'),
        (INCOMPARABLE, 'notcomp'),
        (UNKNOWN, 'unknown'),
        (UNREVIEWED, 'unreviewed'),
    ])

    INT_TO_NICE = ut.odict([
        (POSITIVE, 'Positive'),
        (NEGATIVE, 'Negative'),
        (INCOMPARABLE, 'Incomparable'),
        (UNKNOWN, 'Unknown'),
        (UNREVIEWED, 'Unreviewed'),
    ])

    CODE_TO_NICE = ut.map_keys(INT_TO_CODE, INT_TO_NICE)
    CODE_TO_INT = ut.invert_dict(INT_TO_CODE)
    NICE_TO_CODE = ut.invert_dict(CODE_TO_NICE)
    NICE_TO_INT = ut.invert_dict(INT_TO_NICE)

    MATCH_CODE = CODE_TO_INT
示例#21
0
def parse_theano_flags():
    """
    export THEANO_FLAGS="device=cpu,print_active_device=True,enable_initial_driver_test=True"
    export THEANO_FLAGS="device=gpu2,print_active_device=True,enable_initial_driver_test=False"
    set THEANO_FLAGS="device=cpu,print_active_device=True,enable_initial_driver_test=True"
    """
    theano_flags_str = os.environ.get('THEANO_FLAGS', '')
    theano_flags_itemstrs = theano_flags_str.split(',')
    theano_flags = ut.odict([
        itemstr.split('=') for itemstr in theano_flags_itemstrs
        if len(itemstr) > 0
    ])
    return theano_flags
示例#22
0
 def simulate_user_feedback(infr):
     qreq_ = infr.qreq_
     aid_pairs = np.array(ut.take_column(infr.needs_review_list, [0, 1]))
     nid_pairs = qreq_.ibs.get_annot_nids(aid_pairs)
     truth = nid_pairs.T[0] == nid_pairs.T[1]
     user_feedback = ut.odict([
         ('aid1', aid_pairs.T[0]),
         ('aid2', aid_pairs.T[1]),
         ('p_match', truth.astype(np.float)),
         ('p_nomatch', 1.0 - truth),
         ('p_notcomp', np.array([0.0] * len(aid_pairs))),
     ])
     return user_feedback
示例#23
0
 def simulate_user_feedback(infr):
     qreq_ = infr.qreq_
     aid_pairs = np.array(ut.take_column(infr.needs_review_list, [0, 1]))
     nid_pairs = qreq_.ibs.get_annot_nids(aid_pairs)
     truth = nid_pairs.T[0] == nid_pairs.T[1]
     user_feedback = ut.odict([
         ('aid1', aid_pairs.T[0]),
         ('aid2', aid_pairs.T[1]),
         ('p_match', truth.astype(np.float)),
         ('p_nomatch', 1.0 - truth),
         ('p_notcomp', np.array([0.0] * len(aid_pairs))),
     ])
     return user_feedback
示例#24
0
    def get_cfgstr(nnindexer, noquery=False):
        r""" returns string which uniquely identified configuration and support data

        Args:
            noquery (bool): if True cfgstr is only relevant to building the
                index. No search params are returned (default = False)

        Returns:
            str: flann_cfgstr

        CommandLine:
            python -m ibeis.algo.hots.neighbor_index --test-get_cfgstr

        Example:
            >>> # DISABLE_DOCTEST
            >>> from ibeis.algo.hots.neighbor_index import *  # NOQA
            >>> import ibeis
            >>> cfgdict = dict(fg_on=False)
            >>> qreq_ = ibeis.testdata_qreq_(defaultdb='testdb1', p='default:fg_on=False')
            >>> qreq_.load_indexer()
            >>> nnindexer = qreq_.indexer
            >>> noquery = True
            >>> flann_cfgstr = nnindexer.get_cfgstr(noquery)
            >>> result = ('flann_cfgstr = %s' % (str(flann_cfgstr),))
            >>> print(result)
            flann_cfgstr = _FLANN((algo=kdtree,seed=42,t=8,))_VECS((11260,128)gj5nea@ni0%f3aja)
        """
        flann_cfgstr_list = []
        use_params_hash = True
        use_data_hash = True
        if use_params_hash:
            flann_defaults = vt.get_flann_params(nnindexer.flann_params['algorithm'])
            #flann_params_clean = flann_defaults.copy()
            flann_params_clean = ut.sort_dict(flann_defaults)
            ut.updateif_haskey(flann_params_clean, nnindexer.flann_params)
            if noquery:
                ut.delete_dict_keys(flann_params_clean, ['checks'])
            shortnames = dict(algorithm='algo', checks='chks', random_seed='seed', trees='t')
            short_params = ut.odict([(shortnames.get(key, key), str(val)[0:7])
                                     for key, val in six.iteritems(flann_params_clean)])
            flann_valsig_ = ut.dict_str(
                short_params, nl=False, explicit=True, strvals=True)
            flann_valsig_ = flann_valsig_.lstrip('dict').replace(' ', '')
            #flann_valsig_ = str(list(flann_params.values()))
            #flann_valsig = ut.remove_chars(flann_valsig_, ', \'[]')
            flann_cfgstr_list.append('_FLANN(' + flann_valsig_ + ')')
        if use_data_hash:
            vecs_hashstr = ut.hashstr_arr(nnindexer.idx2_vec, '_VECS')
            flann_cfgstr_list.append(vecs_hashstr)
        flann_cfgstr = ''.join(flann_cfgstr_list)
        return flann_cfgstr
示例#25
0
    def get_addtable_kw(table):
        primary_coldef = [(table.rowid_colname, 'INTEGER PRIMARY KEY')]
        parent_coldef = [(key, 'INTEGER NOT NULL') for key in table.parent_rowid_colnames]
        config_coldef = [(CONFIG_ROWID, 'INTEGER DEFAULT 0')]
        internal_data_coldef = list(zip(table._internal_data_colnames,
                                        table._internal_data_coltypes))

        coldef_list = primary_coldef + parent_coldef + config_coldef + internal_data_coldef
        add_table_kw = ut.odict([
            ('tablename', table.tablename,),
            ('coldef_list', coldef_list,),
            ('docstr', table.docstr,),
            ('superkeys', [table.superkey_colnames],),
            ('dependson', table.parents),
        ])
        return add_table_kw
示例#26
0
 def initializer_info(initclass):
     initclassname = initclass.__class__.__name__
     if initclassname == 'Constant':
         spec = initclass.val
     else:
         spec = ut.odict()
         spec['type'] = initclassname
         for key, val in initclass.__dict__.items():
             if isinstance(val, lasagne.init.Initializer):
                 spec[key] = initializer_info(val)
             elif isinstance(val, type) and issubclass(val, lasagne.init.Initializer):
                 spec[key] = val.__name__
                 #initializer_info(val())
             else:
                 spec[key] = val
     return spec
示例#27
0
 def infodict2(player):
     infodict = ut.odict(
         [
             # ('id_', player.id_),
             ("turn", player.turn),
             ("life", player.life),
             ("library_hash", ut.hashstr27(str(player.deck.library))),
             ("library_size", len(player.deck)),
             ("hand_size", len(player.hand)),
             ("bfield_size", len(player.bfield)),
             ("graveyard_size", len(player.graveyard)),
             ("exiled_size", len(player.exiled)),
             ("hand", mtgobjs.CardGroup(player.hand).infohist),
             ("bfield", mtgobjs.CardGroup(player.bfield).infohist),
             ("graveyard", mtgobjs.CardGroup(player.graveyard).infohist),
         ]
     )
     return infodict
示例#28
0
def experiments_k():
    """
    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_k
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_k --full
        ./experiment_k.sh

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(experiments_k())
    """
    varydict = ut.odict([
        ('acfg_name', ACFG_OPTION_VARYSIZE),
        ('cfg_name', ['candidacy_k']),
        ('dbname', get_dbnames(exclude_list=['PZ_FlankHack', 'PZ_MTEST'])),
    ])
    #return (varydict, 'k', ['surface3d', 'surface2d'])
    return (varydict, 'k', ['surface2d'])
示例#29
0
def experiments_k():
    """
    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_k
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_k --full
        ./experiment_k.sh

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(experiments_k())
    """
    varydict = ut.odict([
        ('acfg_name', ACFG_OPTION_VARYSIZE),
        ('cfg_name', ['candidacy_k']),
        ('dbname', get_dbnames(exclude_list=['PZ_FlankHack', 'PZ_MTEST'])),
    ])
    #return (varydict, 'k', ['surface3d', 'surface2d'])
    return (varydict, 'k', ['surface2d'])
示例#30
0
def infer_monitor_specs(res_w, res_h, inches_diag):
    """
    monitors = [
        dict(name='work1', inches_diag=23, res_w=1920, res_h=1080),
        dict(name='work2', inches_diag=24, res_w=1920, res_h=1200),

        dict(name='hp-129', inches_diag=25, res_w=1920, res_h=1080),
        dict(name='?-26', inches_diag=26, res_w=1920, res_h=1080),
        dict(name='?-27', inches_diag=27, res_w=1920, res_h=1080),
    ]
    for info in monitors:
        name = info['name']
        inches_diag = info['inches_diag']
        res_h = info['res_h']
        res_w = info['res_w']
        print('---')
        print(name)
        inches_w = inches_diag * res_w / np.sqrt(res_h**2 + res_w**2)
        inches_h = inches_diag * res_h / np.sqrt(res_h**2 + res_w**2)
        print('inches diag = %.2f' % (inches_diag))
        print('inches WxH = %.2f x %.2f' % (inches_w, inches_h))

    #inches_w = inches_diag * res_w/sqrt(res_h**2 + res_w**2)
    """
    import sympy

    # Build a system of equations and solve it
    inches_w, inches_h = sympy.symbols(
        'inches_w inches_h'.split(), real=True, positive=True
    )
    res_w, res_h = sympy.symbols('res_w res_h'.split(), real=True, positive=True)
    (inches_diag,) = sympy.symbols('inches_diag'.split(), real=True, positive=True)
    equations = [
        sympy.Eq(inches_diag, (inches_w ** 2 + inches_h ** 2) ** 0.5),
        sympy.Eq(res_w / res_h, inches_w / inches_h),
    ]
    print('Possible solutions:')
    query_vars = [inches_w, inches_h]
    for solution in sympy.solve(equations, query_vars):
        print('Solution:')
        reprstr = ut.repr3(
            ut.odict(zip(query_vars, solution)), explicit=True, nobr=1, with_comma=False
        )
        print(ut.indent(ut.autopep8_format(reprstr)))
示例#31
0
文件: pipinfo.py 项目: Erotemic/utool
def module_stdinfo_dict(module, versionattr='__version__', version=None,
                        libdep=None, name=None, **kwargs):
    infodict = ut.odict()
    if module is None:
        infodict['__version__'] = version
        infodict['__name__'] = name
        infodict['__file__'] = 'None'
    else:
        if version is not None:
            infodict['__version__'] = version
        else:
            infodict['__version__'] = getattr(module, versionattr, None)
        infodict['__name__'] = name
        infodict['__name__'] = getattr(module, '__name__', None)
        infodict['__file__'] = getattr(module, '__file__', None)

    if libdep is not None:
        infodict['libdep'] = libdep
    infodict.update(kwargs)
    return infodict
示例#32
0
    def initialize(depc):
        print('[depc] INITIALIZE DEPCACHE')

        if depc._use_globals:
            print(' * regsitering %d global preproc funcs' % (len(__PREPROC_REGISTER__),))
            for args_, kwargs_ in __PREPROC_REGISTER__:
                depc._register_prop(*args_, **kwargs_)

        ut.ensuredir(depc.cache_dpath)
        #print('depc.cache_dpath = %r' % (depc.cache_dpath,))
        config_addtable_kw = ut.odict(
            [
                ('tablename', CONFIG_TABLE,),
                ('coldef_list', [
                    (CONFIG_ROWID, 'INTEGER PRIMARY KEY'),
                    (CONFIG_HASHID, 'TEXT'),
                ],),
                ('docstr', 'table for algo configurations'),
                ('superkeys', [(CONFIG_HASHID,)]),
                ('dependson', [])
            ]
        )
        #print(ut.repr3(config_addtable_kw))

        #print('depc.fname_to_db.keys = %r' % (depc.fname_to_db,))
        for fname in depc.fname_to_db.keys():
            #print('fname = %r' % (fname,))
            if fname == ':memory:':
                fpath = fname
            else:
                fname_ = ut.ensure_ext(fname, '.sqlite')
                fpath = ut.unixjoin(depc.cache_dpath, fname_)
            #print('fpath = %r' % (fpath,))
            db = SQLDatabaseController(fpath=fpath, simple=True)
            if not db.has_table(CONFIG_TABLE):
                db.add_table(**config_addtable_kw)
            depc.fname_to_db[fname] = db
        print('[depc] Finished initialization')

        for table in depc.cachetable_dict.values():
            table.initialize()
示例#33
0
def inspect_annotation_configs():
    r"""
    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-inspect_annotation_configs
        python -m ibeis.scripts.gen_cand_expts --exec-inspect_annotation_configs --full

    Example:
        >>> # SCRIPT
        >>> from ibeis.scripts.gen_cand_expts import *  # NOQA
        >>> make_standard_test_scripts(inspect_annotation_configs())
    """
    testdef_list = [func() for func in TEST_GEN_FUNCS]
    acfg_name_list = ut.flatten([tup[0]['acfg_name'] for tup in testdef_list])
    acfg_name_list = list(set(acfg_name_list))
    varydict = ut.odict([
        #('acfg_name', ['controlled']),
        #('acfg_name', ['controlled', 'controlled2']),
        ('acfg_name', [' '.join(acfg_name_list)]),
        ('dbname', get_dbnames()),
    ])
    return varydict, 'inspect_acfg', 'inspect_acfg'
示例#34
0
def experiments_baseline():
    """
    Generates the experiments we are doing on invariance

    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_baseline
        ./experiment_baseline.sh

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(experiments_baseline())
    """
    # Invariance Experiments
    varydict = ut.odict([
        #('acfg_name', ['controlled']),
        #('acfg_name', ['controlled', 'controlled2']),
        ('acfg_name', ACFG_OPTION_CONTROLLED),
        ('cfg_name', ['candidacy_baseline']),
        ('dbname', get_dbnames()),
    ])
    return (varydict, 'baseline', 'cumhist')
示例#35
0
def experiments_viewpoint():
    """
    Generates the experiments we are doing on invariance

    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_viewpoint --full
        ./experiment_view.sh

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(experiments_viewpoint())
    """
    #basecmd = 'python -m ibeis.expt.experiment_printres
    #--exec-print_latexsum --rank-lt-list=1,5,10,100 '
    varydict = ut.odict([
        ('acfg_name', ['viewpoint_compare']),
        ('cfg_name', ['default']),
        #('dbname', ['NNP_Master3', 'PZ_Master0']),
        ('dbname', ['PZ_Master1']),
    ])
    return (varydict, 'view', 'cumhist')
示例#36
0
def inspect_annotation_configs():
    r"""
    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-inspect_annotation_configs
        python -m ibeis.scripts.gen_cand_expts --exec-inspect_annotation_configs --full

    Example:
        >>> # SCRIPT
        >>> from ibeis.scripts.gen_cand_expts import *  # NOQA
        >>> make_standard_test_scripts(inspect_annotation_configs())
    """
    testdef_list = [func() for func in TEST_GEN_FUNCS]
    acfg_name_list = ut.flatten([tup[0]['acfg_name'] for tup in testdef_list])
    acfg_name_list = list(set(acfg_name_list))
    varydict = ut.odict([
        #('acfg_name', ['controlled']),
        #('acfg_name', ['controlled', 'controlled2']),
        ('acfg_name', [' '.join(acfg_name_list)]),
        ('dbname', get_dbnames()),
    ])
    return varydict, 'inspect_acfg', 'inspect_acfg'
示例#37
0
def experiments_baseline():
    """
    Generates the experiments we are doing on invariance

    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_baseline
        ./experiment_baseline.sh

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(experiments_baseline())
    """
    # Invariance Experiments
    varydict = ut.odict([
        #('acfg_name', ['controlled']),
        #('acfg_name', ['controlled', 'controlled2']),
        ('acfg_name', ACFG_OPTION_CONTROLLED),
        ('cfg_name', ['candidacy_baseline']),
        ('dbname', get_dbnames()),
    ])
    return (varydict, 'baseline', 'cumhist')
示例#38
0
def experiments_viewpoint():
    """
    Generates the experiments we are doing on invariance

    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_viewpoint --full
        ./experiment_view.sh

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(experiments_viewpoint())
    """
    #basecmd = 'python -m ibeis.expt.experiment_printres
    #--exec-print_latexsum --rank-lt-list=1,5,10,100 '
    varydict = ut.odict([
        ('acfg_name', ['viewpoint_compare']),
        ('cfg_name', ['default']),
        #('dbname', ['NNP_Master3', 'PZ_Master0']),
        ('dbname', ['PZ_Master1']),
    ])
    return (varydict, 'view', 'cumhist')
示例#39
0
def experiments_namescore():
    """
    Generates the experiments we are doing on invariance

    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_namescore --full
        ./experiment_namescore.sh

        python -m ibeis.scripts.gen_cand_expts --exec-experiments_namescore --full
        python -m ibeis.expt.experiment_helpers --exec-get_annotcfg_list:0 -a candidacy_namescore --db PZ_Master1  # NOQA

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(experiments_namescore())
    """
    varydict = ut.odict([
        #('acfg_name', ['controlled', 'controlled2']),
        ('acfg_name', ACFG_OPTION_CONTROLLED + ACFG_OPTION_VARYPERNAME),
        ('cfg_name', ['candidacy_namescore', 'candidacy_namescore:K=1']),
        ('dbname', get_dbnames()),
    ])
    return (varydict, 'namescore', 'cumhist')
示例#40
0
def experiments_invariance():
    """
    Generates the experiments we are doing on invariance

    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_invariance
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_invariance --full

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(experiments_invariance())
    """
    # Invariance Experiments
    #static_flags += ' --dpi=512 --figsize=11,4 --clipwhite'
    varydict = ut.odict([
        #('acfg_name', ['controlled']),
        #('acfg_name', ['controlled', 'controlled2']),
        ('acfg_name', ACFG_OPTION_CONTROLLED),
        ('cfg_name', ['candidacy_invariance']),
        ('dbname', get_dbnames()),
    ])
    return (varydict, 'invar', 'cumhist')
示例#41
0
def experiments_namescore():
    """
    Generates the experiments we are doing on invariance

    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_namescore --full
        ./experiment_namescore.sh

        python -m ibeis.scripts.gen_cand_expts --exec-experiments_namescore --full
        python -m ibeis.expt.experiment_helpers --exec-get_annotcfg_list:0 -a candidacy_namescore --db PZ_Master1  # NOQA

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(experiments_namescore())
    """
    varydict = ut.odict([
        #('acfg_name', ['controlled', 'controlled2']),
        ('acfg_name', ACFG_OPTION_CONTROLLED + ACFG_OPTION_VARYPERNAME),
        ('cfg_name', ['candidacy_namescore', 'candidacy_namescore:K=1']),
        ('dbname', get_dbnames()),
    ])
    return (varydict, 'namescore', 'cumhist')
示例#42
0
def experiments_invariance():
    """
    Generates the experiments we are doing on invariance

    CommandLine:
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_invariance
        python -m ibeis.scripts.gen_cand_expts --exec-experiments_invariance --full

    Example:
        >>> from ibeis.scripts.gen_cand_expts import *
        >>> make_standard_test_scripts(experiments_invariance())
    """
    # Invariance Experiments
    #static_flags += ' --dpi=512 --figsize=11,4 --clipwhite'
    varydict = ut.odict([
        #('acfg_name', ['controlled']),
        #('acfg_name', ['controlled', 'controlled2']),
        ('acfg_name', ACFG_OPTION_CONTROLLED),
        ('cfg_name', ['candidacy_invariance']),
        ('dbname', get_dbnames()),
    ])
    return (varydict, 'invar', 'cumhist')
示例#43
0
def _debug_repr_cpd(cpd):
    import re
    import utool as ut

    code_fmt = ut.codeblock("""
        {variable} = pgmpy.factors.TabularCPD(
            variable={variable_repr},
            variable_card={variable_card_repr},
            values={get_cpd_repr},
            evidence={evidence_repr},
            evidence_card={evidence_card_repr},
        )
        """)
    keys = ['variable', 'variable_card', 'values', 'evidence', 'evidence_card']
    dict_ = ut.odict(zip(keys, [getattr(cpd, key) for key in keys]))
    # HACK
    dict_['values'] = cpd.get_cpd()
    r = ut.repr2(dict_, explicit=True, nobraces=True, nl=True)
    logger.info(r)

    # Parse props that are needed for this fmtstr
    fmt_keys = [
        match.groups()[0] for match in re.finditer('{(.*?)}', code_fmt)
    ]
    need_reprs = [key[:-5] for key in fmt_keys if key.endswith('_repr')]
    need_keys = [key for key in fmt_keys if not key.endswith('_repr')]
    # Get corresponding props
    # Call methods if needbe
    tmp = [(prop, getattr(cpd, prop)) for prop in need_reprs]
    tmp = [(x, y()) if ut.is_funclike(y) else (x, y) for (x, y) in tmp]
    fmtdict = dict(tmp)
    fmtdict = ut.map_dict_vals(ut.repr2, fmtdict)
    fmtdict = ut.map_dict_keys(lambda x: x + '_repr', fmtdict)
    tmp2 = [(prop, getattr(cpd, prop)) for prop in need_keys]
    fmtdict.update(dict(tmp2))
    code = code_fmt.format(**fmtdict)
    return code
示例#44
0
def build_alias_map(regex_map, tag_vocab):
    """
    Constructs explicit mapping. Order of items in regex map matters.
    Items at top are given preference.

    Example:
        >>> tags_list = [['t1', 't2'], [], ['t3'], ['t4', 't5']]
        >>> tag_vocab = ut.flat_unique(*tags_list)
        >>> regex_map = [('t[3-4]', 'A9'), ('t0', 'a0')]
        >>> unmapped = list(set(tag_vocab) - set(alias_map.keys()))
    """
    import utool as ut
    import re
    alias_map = ut.odict([])
    for pats, new_tag in reversed(regex_map):
        pats = ut.ensure_iterable(pats)
        for pat in pats:
            flags = [re.match(pat, t) for t in tag_vocab]
            for old_tag in ut.compress(tag_vocab, flags):
                alias_map[old_tag] = new_tag
    identity_map = ut.take_column(regex_map, 1)
    for tag in ut.filter_Nones(identity_map):
        alias_map[tag] = tag
    return alias_map
示例#45
0
def graph_info(graph, verbose=False):
    import utool as ut
    node_attrs = list(graph.node.values())
    edge_attrs = list(ut.take_column(graph.edges(data=True), 2))
    node_attr_hist = ut.dict_hist(ut.flatten([attr.keys() for attr in node_attrs]))
    edge_attr_hist = ut.dict_hist(ut.flatten([attr.keys() for attr in edge_attrs]))
    node_type_hist = ut.dict_hist(list(map(type, graph.nodes())))
    info_dict = ut.odict([
        ('directed', graph.is_directed()),
        ('multi', graph.is_multigraph()),
        ('num_nodes', len(graph)),
        ('num_edges', len(list(graph.edges()))),
        ('edge_attr_hist', ut.sort_dict(edge_attr_hist)),
        ('node_attr_hist', ut.sort_dict(node_attr_hist)),
        ('node_type_hist', ut.sort_dict(node_type_hist)),
        ('graph_attrs', graph.graph),
        ('graph_name', graph.name),
    ])
    #unique_attrs = ut.map_dict_vals(ut.unique, ut.dict_accum(*node_attrs))
    #ut.dict_isect_combine(*node_attrs))
    #[list(attrs.keys())]
    if verbose:
        print(ut.repr3(info_dict))
    return info_dict
示例#46
0
    def isect_info(self, other):
        set1 = set(self.rel_fpath_list)
        set2 = set(other.rel_fpath_list)

        set_comparisons = ut.odict([
            ('s1', set1),
            ('s2', set2),
            ('union', set1.union(set2)),
            ('isect', set1.intersection(set2)),
            ('s1 - s2', set1.difference(set2)),
            ('s2 - s1', set1.difference(set1)),
        ])
        stat_stats = ut.map_vals(len, set_comparisons)
        print(ut.repr4(stat_stats))
        return set_comparisons

        if False:
            idx_lookup1 = ut.make_index_lookup(self.rel_fpath_list)
            idx_lookup2 = ut.make_index_lookup(other.rel_fpath_list)

            uuids1 = ut.take(self.uuids, ut.take(idx_lookup1, set_comparisons['union']))
            uuids2 = ut.take(other.uuids, ut.take(idx_lookup2, set_comparisons['union']))

            uuids1 == uuids2
示例#47
0
def classification_report2(y_true, y_pred, target_names=None,
                           sample_weight=None, verbose=True):
    """
    References:
        https://csem.flinders.edu.au/research/techreps/SIE07001.pdf
        https://www.mathworks.com/matlabcentral/fileexchange/5648-bm-cm-?requestedDomain=www.mathworks.com
        Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC and CEN
            Error Measures in MultiClass Prediction

    Example:
        >>> from ibeis.algo.verif.sklearn_utils import *  # NOQA
        >>> y_true = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3]
        >>> y_pred = [1, 2, 1, 3, 1, 2, 2, 3, 2, 2, 3, 3, 2, 3, 3, 3, 1, 3]
        >>> target_names = None
        >>> sample_weight = None
        >>> verbose = True
        >>> report = classification_report2(y_true, y_pred, verbose=verbose)

    Ignore:
        >>> size = 100
        >>> rng = np.random.RandomState(0)
        >>> p_classes = np.array([.90, .05, .05][0:2])
        >>> p_classes = p_classes / p_classes.sum()
        >>> p_wrong   = np.array([.03, .01, .02][0:2])
        >>> y_true = testdata_ytrue(p_classes, p_wrong, size, rng)
        >>> rs = []
        >>> for x in range(17):
        >>>     p_wrong += .05
        >>>     y_pred = testdata_ypred(y_true, p_wrong, rng)
        >>>     report = classification_report2(y_true, y_pred, verbose='hack')
        >>>     rs.append(report)
        >>> import plottool as pt
        >>> pt.qtensure()
        >>> df = pd.DataFrame(rs).drop(['raw'], axis=1)
        >>> delta = df.subtract(df['target'], axis=0)
        >>> sqrd_error = np.sqrt((delta ** 2).sum(axis=0))
        >>> print('Error')
        >>> print(sqrd_error.sort_values())
        >>> ys = df.to_dict(orient='list')
        >>> pt.multi_plot(ydata_list=ys)
    """
    import sklearn.metrics
    from sklearn.preprocessing import LabelEncoder

    if target_names is None:
        unique_labels = np.unique(np.hstack([y_true, y_pred]))
        if len(unique_labels) == 1 and (unique_labels[0] == 0 or unique_labels[0] == 1):
            target_names = np.array([False, True])
            y_true_ = y_true
            y_pred_ = y_pred
        else:
            lb = LabelEncoder()
            lb.fit(unique_labels)
            y_true_ = lb.transform(y_true)
            y_pred_ = lb.transform(y_pred)
            target_names = lb.classes_
    else:
        y_true_ = y_true
        y_pred_ = y_pred

    # Real data is on the rows,
    # Pred data is on the cols.

    cm = sklearn.metrics.confusion_matrix(
        y_true_, y_pred_, sample_weight=sample_weight)
    confusion = cm  # NOQA

    k = len(cm)  # number of classes
    N = cm.sum()  # number of examples

    real_total = cm.sum(axis=1)
    pred_total = cm.sum(axis=0)

    # the number of "positive" cases **per class**
    n_pos = real_total  # NOQA
    # the number of times a class was predicted.
    n_neg = N - n_pos  # NOQA

    # number of true positives per class
    n_tps = np.diag(cm)
    # number of true negatives per class
    n_fps = (cm - np.diagflat(np.diag(cm))).sum(axis=0)

    tprs = n_tps / real_total  # true pos rate (recall)
    tpas = n_tps / pred_total  # true pos accuracy (precision)

    unused = (real_total + pred_total) == 0

    fprs = n_fps / n_neg  # false pose rate
    fprs[unused] = np.nan
    # tnrs = 1 - fprs

    rprob = real_total / N
    pprob = pred_total / N

    if len(cm) == 2:
        [[A, B],
         [C, D]] = cm
        (A * D - B * C) / np.sqrt((A + C) * (B + D) * (A + B) * (C + D))

        # c2 = vt.ConfusionMetrics().fit(scores, y)

    # bookmaker is analogous to recall, but unbiased by class frequency
    rprob_mat = np.tile(rprob, [k, 1]).T - (1 - np.eye(k))
    bmcm = cm.T / rprob_mat
    bms = np.sum(bmcm.T, axis=0) / N

    # markedness is analogous to precision, but unbiased by class frequency
    pprob_mat = np.tile(pprob, [k, 1]).T - (1 - np.eye(k))
    mkcm = cm / pprob_mat
    mks = np.sum(mkcm.T, axis=0) / N

    mccs = np.sign(bms) * np.sqrt(np.abs(bms * mks))

    perclass_data = ut.odict([
        ('precision', tpas),
        ('recall', tprs),
        ('fpr', fprs),
        ('markedness', mks),
        ('bookmaker', bms),
        ('mcc', mccs),
        ('support', real_total),
    ])

    tpa = np.nansum(tpas * rprob)
    tpr = np.nansum(tprs * rprob)

    fpr = np.nansum(fprs * rprob)

    mk = np.nansum(mks * rprob)
    bm = np.nansum(bms * pprob)

    # The simple mean seems to do the best
    mccs_ = mccs[~np.isnan(mccs)]
    if len(mccs_) == 0:
        mcc_combo = np.nan
    else:
        mcc_combo = np.nanmean(mccs_)

    combined_data = ut.odict([
        ('precision', tpa),
        ('recall', tpr),
        ('fpr', fpr),
        ('markedness', mk),
        ('bookmaker', bm),
        # ('mcc', np.sign(bm) * np.sqrt(np.abs(bm * mk))),
        ('mcc', mcc_combo),
        # np.sign(bm) * np.sqrt(np.abs(bm * mk))),
        ('support', real_total.sum())
    ])

    # Not sure how to compute this. Should it agree with the sklearn impl?
    if verbose == 'hack':
        verbose = False
        mcc_known = sklearn.metrics.matthews_corrcoef(
            y_true, y_pred, sample_weight=sample_weight)
        mcc_raw = np.sign(bm) * np.sqrt(np.abs(bm * mk))

        import scipy as sp
        def gmean(x, w=None):
            if w is None:
                return sp.stats.gmean(x)
            return np.exp(np.nansum(w * np.log(x)) / np.nansum(w))

        def hmean(x, w=None):
            if w is None:
                return sp.stats.hmean(x)
            return 1 / (np.nansum(w * (1 / x)) / np.nansum(w))

        def amean(x, w=None):
            if w is None:
                return np.mean(x)
            return np.nansum(w * x) / np.nansum(w)

        report = {
            'target': mcc_known,
            'raw': mcc_raw,
        }

        # print('%r <<<' % (mcc_known,))
        means = {
            'a': amean,
            # 'h': hmean,
            'g': gmean,
        }
        weights = {
            'p': pprob,
            'r': rprob,
            '': None,
        }
        for mean_key, mean in means.items():
            for w_key, w in weights.items():
                # Hack of very wrong items
                if mean_key == 'g':
                    if w_key in ['r', 'p', '']:
                        continue
                if mean_key == 'g':
                    if w_key in ['r']:
                        continue
                m = mean(mccs, w)
                r_key = '{} {}'.format(mean_key, w_key)
                report[r_key] = m
                # print(r_key)
                # print(np.abs(m - mcc_known))

        # print(ut.repr4(report, precision=8))
        return report
        # print('mcc_known = %r' % (mcc_known,))
        # print('mcc_combo1 = %r' % (mcc_combo1,))
        # print('mcc_combo2 = %r' % (mcc_combo2,))
        # print('mcc_combo3 = %r' % (mcc_combo3,))

    # if target_names is None:
    #     target_names = list(range(k))
    index = pd.Index(target_names, name='class')

    perclass_df = pd.DataFrame(perclass_data, index=index)
    # combined_df = pd.DataFrame(combined_data, index=['ave/sum'])
    combined_df = pd.DataFrame(combined_data, index=['combined'])

    metric_df = pd.concat([perclass_df, combined_df])
    metric_df.index.name = 'class'
    metric_df.columns.name = 'metric'

    pred_id = ['%s' % m for m in target_names]
    real_id = ['%s' % m for m in target_names]
    confusion_df = pd.DataFrame(confusion, columns=pred_id, index=real_id)

    confusion_df = confusion_df.append(pd.DataFrame(
        [confusion.sum(axis=0)], columns=pred_id, index=['Σp']))
    confusion_df['Σr'] = np.hstack([confusion.sum(axis=1), [0]])
    confusion_df.index.name = 'real'
    confusion_df.columns.name = 'pred'

    if np.all(confusion_df - np.floor(confusion_df) < .000001):
        confusion_df = confusion_df.astype(np.int)
    confusion_df.iloc[(-1, -1)] = N
    if np.all(confusion_df - np.floor(confusion_df) < .000001):
        confusion_df = confusion_df.astype(np.int)
    # np.nan

    if verbose:
        cfsm_str = confusion_df.to_string(float_format=lambda x: '%.1f' % (x,))
        print('Confusion Matrix (real × pred) :')
        print(ut.hz_str('    ', cfsm_str))

        # ut.cprint('\nExtended Report', 'turquoise')
        print('\nEvaluation Metric Report:')
        float_precision = 2
        float_format = '%.' + str(float_precision) + 'f'
        ext_report = metric_df.to_string(float_format=float_format)
        print(ut.hz_str('    ', ext_report))

    report = {
        'metrics': metric_df,
        'confusion': confusion_df,
    }

    # FIXME: What is the difference between sklearn multiclass-MCC
    # and BM * MK MCC?

    def matthews_corrcoef(y_true, y_pred, sample_weight=None):
        from sklearn.metrics.classification import (
            _check_targets, LabelEncoder, confusion_matrix)
        y_type, y_true, y_pred = _check_targets(y_true, y_pred)
        if y_type not in {"binary", "multiclass"}:
            raise ValueError("%s is not supported" % y_type)
        lb = LabelEncoder()
        lb.fit(np.hstack([y_true, y_pred]))
        y_true = lb.transform(y_true)
        y_pred = lb.transform(y_pred)
        C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
        t_sum = C.sum(axis=1)
        p_sum = C.sum(axis=0)
        n_correct = np.trace(C)
        n_samples = p_sum.sum()
        cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum)
        cov_ypyp = n_samples ** 2 - np.dot(p_sum, p_sum)
        cov_ytyt = n_samples ** 2 - np.dot(t_sum, t_sum)
        mcc = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
        if np.isnan(mcc):
            return 0.
        else:
            return mcc

    try:
        # mcc = sklearn.metrics.matthews_corrcoef(
        #     y_true, y_pred, sample_weight=sample_weight)
        mcc = matthews_corrcoef(y_true, y_pred, sample_weight=sample_weight)
        # These scales are chosen somewhat arbitrarily in the context of a
        # computer vision application with relatively reasonable quality data
        # https://stats.stackexchange.com/questions/118219/how-to-interpret
        mcc_significance_scales = ut.odict([
            (1.0, 'perfect'),
            (0.9, 'very strong'),
            (0.7, 'strong'),
            (0.5, 'significant'),
            (0.3, 'moderate'),
            (0.2, 'weak'),
            (0.0, 'negligible'),
        ])
        for k, v in mcc_significance_scales.items():
            if np.abs(mcc) >= k:
                if verbose:
                    print('classifier correlation is %s' % (v,))
                break
        if verbose:
            float_precision = 2
            print(('MCC\' = %.' + str(float_precision) + 'f') % (mcc,))
        report['mcc'] = mcc
    except ValueError:
        pass
    return report
示例#48
0
            'view_pername': '#primary>0&#primary1>1',
            'sample_per_ref_name': 1,
            'sample_per_name': 1,
            # TODO: need to make this consistent accross both experiment modes
            'sample_size': None,
        }),
}


viewdiff = vp = viewpoint_compare = {
    'qcfg': ut.augdict(
        ctrl['qcfg'], ut.odict([
            ('sample_size', None),
            # To be a query you must have at least two primary1 views and at
            # least one primary view
            ('view_pername', '#primary>0&#primary1>0'),
            ('force_const_size', True),
            ('view', 'primary1'),
            ('sample_per_name', 1),
        ])),

    'dcfg': ut.augdict(
        ctrl['dcfg'], {
            'view': ['primary'],
            'force_const_size': True,
            'sample_per_ref_name': 1,
            'sample_per_name': 1,  # None this seems to produce odd results
                                   # where the per_ref is still more then 1
            'sample_size': None,
            'view_pername': '#primary>0&#primary1>0',
        }),
示例#49
0
 def group_items(self, labels):
     """ group as dict """
     unique_labels, groups = self.group(labels)
     label_to_group = ut.odict(zip(unique_labels, groups))
     return label_to_group
示例#50
0
    #db.modify_table(const.FEATURE_WEIGHT_TABLE, dependson=[const.FEATURE_TABLE, const.PROBCHIP_TABLE])
    #db.modify_table(const.RESIDUAL_TABLE, dependson=[const.FEATURE_TABLE, const.VOCAB_TABLE])
    #db.modify_table(const.PROBCHIP_TABLE, dependson=[const.CHIP_TABLE])


# ========================
# Valid Versions & Mapping
# ========================


base = const.BASE_DATABASE_VERSION
VALID_VERSIONS = ut.odict([
    #version:   (Pre-Update Function,  Update Function,    Post-Update Function)
    (base   ,    (None,                 None,               None,)),
    ('1.0.0',    (None,                 update_1_0_0,       None,)),
    ('1.0.1',    (None,                 update_1_0_1,       None,)),
    ('1.0.2',    (None,                 update_1_0_2,       None,)),
    ('1.0.3',    (None,                 update_1_0_3,       None,)),
    ('1.0.4',    (None,                 update_1_0_4,       None,)),
])

LEGACY_UPDATE_FUNCTIONS = [
    ('1.0.4',  _sql_helpers.fix_metadata_consistency),
]


def autogen_dbcache_schema():
    """
    autogen_dbcache_schema

    Example:
示例#51
0
    def make_annot_inference_dict(infr, internal=False):
        #import uuid

        def convert_to_name_uuid(nid):
            #try:
            text = ibs.get_name_texts(nid, apply_fix=False)
            if text is None:
                text = 'NEWNAME_%s' % (str(nid),)
            #uuid_ = uuid.UUID(text)
            #except ValueError:
            #    text = 'NEWNAME_%s' % (str(nid),)
            #    #uuid_ = nid
            return text
        ibs = infr.qreq_.ibs

        if internal:
            get_annot_uuids = ut.identity
        else:
            get_annot_uuids = ibs.get_annot_uuids
            #return uuid_

        # Compile the cluster_dict
        col_list = ['aid_list', 'orig_nid_list', 'new_nid_list',
                    'exemplar_flag_list', 'error_flag_list']
        cluster_dict = dict(zip(col_list, ut.listT(infr.cluster_tuples)))
        cluster_dict['annot_uuid_list'] = get_annot_uuids(cluster_dict['aid_list'])
        # We store the name's UUID as the name's text
        #cluster_dict['orig_name_uuid_list'] = [convert_to_name_uuid(nid)
        #                                       for nid in cluster_dict['orig_nid_list']]
        #cluster_dict['new_name_uuid_list'] = [convert_to_name_uuid(nid)
        # for nid in cluster_dict['new_nid_list']]
        cluster_dict['orig_name_list'] = [convert_to_name_uuid(nid)
                                          for nid in cluster_dict['orig_nid_list']]
        cluster_dict['new_name_list'] = [convert_to_name_uuid(nid)
                                         for nid in cluster_dict['new_nid_list']]
        # Filter out only the keys we want to send back in the dictionary
        #key_list = ['annot_uuid_list', 'orig_name_uuid_list',
        #            'new_name_uuid_list', 'exemplar_flag_list',
        #            'error_flag_list']
        key_list = ['annot_uuid_list', 'orig_name_list', 'new_name_list',
                    'exemplar_flag_list', 'error_flag_list']
        cluster_dict = ut.dict_subset(cluster_dict, key_list)

        # Compile the annot_pair_dict
        col_list = ['aid_1_list', 'aid_2_list', 'p_same_list',
                    'confidence_list', 'raw_score_list']
        annot_pair_dict = dict(zip(col_list, ut.listT(infr.needs_review_list)))
        annot_pair_dict['annot_uuid_1_list'] = get_annot_uuids(annot_pair_dict['aid_1_list'])
        annot_pair_dict['annot_uuid_2_list'] = get_annot_uuids(annot_pair_dict['aid_2_list'])
        zipped = zip(annot_pair_dict['annot_uuid_1_list'],
                     annot_pair_dict['annot_uuid_2_list'],
                     annot_pair_dict['p_same_list'])
        annot_pair_dict['review_pair_list'] = [
            {
                'annot_uuid_key'       : annot_uuid_1,
                'annot_uuid_1'         : annot_uuid_1,
                'annot_uuid_2'         : annot_uuid_2,
                'prior_matching_state' : {
                    'p_match'   : p_same,
                    'p_nomatch' : 1.0 - p_same,
                    'p_notcomp' : 0.0,
                }
            }
            for (annot_uuid_1, annot_uuid_2, p_same) in zipped
        ]
        # Filter out only the keys we want to send back in the dictionary
        key_list = ['review_pair_list', 'confidence_list']
        annot_pair_dict = ut.dict_subset(annot_pair_dict, key_list)

        # Compile the inference dict
        inference_dict = ut.odict([
            ('cluster_dict', cluster_dict),
            ('annot_pair_dict', annot_pair_dict),
            ('_internal_state', None),
        ])
        return inference_dict
示例#52
0
def get_orgres_desc_match_dists(allres, orgtype_list=['false', 'true'],
                                distkey_list=['L2'],
                                verbose=True):
    r"""
    computes distances between matching descriptors of orgtypes in allres

    Args:
        allres (AllResults): AllResults object
        orgtype_list (list): of strings denoting the type of results to compare
        distkey_list (list): list of requested distance types

    Returns:
        dict: orgres2_descmatch_dists mapping from orgtype to dicts of distances (ndarrays)

    Notes:
        Just SIFT distance seems to have a very interesting property

    CommandLine:
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --show
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_Master0 --show
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_Master0 --distkeys=fs,lnbnn,bar_L2_sift --show
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_MTEST --distkeys=fs,lnbnn,bar_L2_sift,cos_sift --show
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_Master0 --distkeys=fs,lnbnn,bar_L2_sift,cos_sift --show
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_MTEST --distkeys=cos_sift --show
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_Master0 --distkeys=fs,lnbnn,bar_L2_sift,cos_sift --show --nosupport

        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_MTEST --distkeys=lnbnn --show --feat_type=hesaff+siam128
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_MTEST --distkeys=lnbnn --show --feat_type=hesaff+sift

        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_MTEST --distkeys=lnbnn --show --feat_type=hesaff+sift --num-top-fs=2
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_MTEST --distkeys=lnbnn --show --feat_type=hesaff+sift --num-top-fs=10
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_MTEST --distkeys=lnbnn --show --feat_type=hesaff+sift --num-top-fs=1000
        python -m ibeis.expt.results_analyzer --test-get_orgres_desc_match_dists --db PZ_MTEST --distkeys=lnbnn --show --feat_type=hesaff+siam128 --num-top-fs=1

    Example:
        >>> # SLOW_DOCTEST
        >>> from ibeis.expt.results_analyzer import *  # NOQA
        >>> from ibeis.expt import results_all
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> qaid_list = ibs.get_valid_aids(hasgt=True)
        >>> from ibeis.model import Config
        >>> cfgdict = ut.argparse_dict(dict(Config.parse_config_items(Config.QueryConfig())), only_specified=True)
        >>> allres = results_all.get_allres(ibs, qaid_list, cfgdict=cfgdict)
        >>> # {'feat_type': 'hesaff+siam128'})
        >>> orgtype_list = ['false', 'top_true']
        >>> verbose = True
        >>> distkey_list = ut.get_argval('--distkeys', type_=list, default=['fs', 'lnbnn', 'bar_L2_sift'])
        >>> #distkey_list = ['hist_isect']
        >>> #distkey_list = ['L2_sift', 'bar_L2_sift']
        >>> # execute function
        >>> orgres2_descmatch_dists = get_orgres_desc_match_dists(allres, orgtype_list, distkey_list, verbose)
        >>> #print('orgres2_descmatch_dists = ' + ut.dict_str(orgres2_descmatch_dists, truncate=-1, precision=3))
        >>> stats_ = {key: ut.dict_val_map(val, ut.get_stats) for key, val in orgres2_descmatch_dists.items()}
        >>> print('orgres2_descmatch_dists = ' + ut.dict_str(stats_, truncate=2, precision=3, nl=4))
        >>> # ------ VISUALIZE ------------
        >>> ut.quit_if_noshow()
        >>> import vtool as vt
        >>> # If viewing a large amount of data this might help on OverFlowError
        >>> #ut.embed()
        >>> # http://stackoverflow.com/questions/20330475/matplotlib-overflowerror-allocated-too-many-blocks
        >>> # http://matplotlib.org/1.3.1/users/customizing.html
        >>> limit_ = len(qaid_list) > 100
        >>> if limit_ or True:
        >>>     import matplotlib as mpl
        >>>     mpl.rcParams['agg.path.chunksize'] = 100000
        >>> # visualize the descriptor scores
        >>> for fnum, distkey in enumerate(distkey_list, start=1):
        >>>     encoder = vt.ScoreNormalizer()
        >>>     tn_scores, tp_scores = ut.get_list_column(ut.dict_take(orgres2_descmatch_dists, orgtype_list), distkey)
        >>>     encoder.fit_partitioned(tp_scores, tn_scores, verbose=False)
        >>>     figtitle = 'Descriptor Distance: %r. db=%r\norgtype_list=%r' % (distkey, ibs.get_dbname(), orgtype_list)
        >>>     use_support = not ut.get_argflag('--nosupport')
        >>>     encoder.visualize(figtitle=figtitle, use_stems=not limit_, fnum=fnum, with_normscore=use_support, with_scores=use_support)
        >>> ut.show_if_requested()
    """
    import vtool as vt
    orgres2_descmatch_dists = {}
    desc_dist_xs, other_xs = vt.index_partition(distkey_list, vt.VALID_DISTS)
    distkey_list1 = ut.list_take(distkey_list, desc_dist_xs)
    distkey_list2 = ut.list_take(distkey_list, other_xs)

    for orgtype in orgtype_list:
        if verbose:
            print('[rr2] getting orgtype=%r distances between vecs' % orgtype)
        orgres = allres.get_orgtype(orgtype)
        qaids = orgres.qaids
        aids  = orgres.aids
        # DO distance that need real computation
        if len(desc_dist_xs) > 0:
            try:
                stacked_qvecs, stacked_dvecs = get_matching_descriptors(allres, qaids, aids)
            except Exception as ex:
                orgres.printme3()
                ut.printex(ex)
                raise
            if verbose:
                print('[rr2]  * stacked_qvecs.shape = %r' % (stacked_qvecs.shape,))
                print('[rr2]  * stacked_dvecs.shape = %r' % (stacked_dvecs.shape,))
            #distkey_list = ['L1', 'L2', 'hist_isect', 'emd']
            #distkey_list = ['L1', 'L2', 'hist_isect']
            #distkey_list = ['L2', 'hist_isect']
            hist1 = np.asarray(stacked_qvecs, dtype=np.float32)
            hist2 = np.asarray(stacked_dvecs, dtype=np.float32)
            # returns an ordered dictionary
            distances1 = vt.compute_distances(hist1, hist2, distkey_list1)
        else:
            distances1 = {}
        # DO precomputed distances like fs (true weights) or lnbnn
        if len(other_xs) > 0:
            distances2 = ut.odict([(disttype, []) for disttype in distkey_list2])
            for qaid, daid in zip(qaids, aids):
                try:
                    qres = allres.qaid2_qres[qaid]
                    for disttype in distkey_list2:
                        if disttype == 'fs':
                            # hack in full fs
                            assert disttype == 'fs', 'unimplemented'
                            vals = qres.aid2_fs[daid]
                        else:
                            assert disttype in qres.filtkey_list, 'no score labeled %' % (disttype,)
                            index = qres.filtkey_list.index(disttype)
                            vals = qres.aid2_fsv[daid].T[index]
                        if len(vals) == 0:
                            continue
                        else:
                            # individual score component
                            pass
                        #num_top_vec_scores = None
                        num_top_vec_scores = ut.get_argval('--num-top-fs', type_=int, default=None)
                        if num_top_vec_scores is not None:
                            # Take only the best matching descriptor scores for each pair in this analysis
                            # This tries to see how deperable the BEST descriptor score is for each match
                            vals = vals[vals.argsort()[::-1][0:num_top_vec_scores]]
                            vals = vals[vals.argsort()[::-1][0:num_top_vec_scores]]
                        distances2[disttype].extend(vals)
                except KeyError:
                    continue
            # convert to numpy array
            for disttype in distkey_list2:
                distances2[disttype] = np.array(distances2[disttype])
        else:
            distances2 = {}
        # Put things back in expected order
        dist1_vals = ut.dict_take(distances1, distkey_list1)
        dist2_vals = ut.dict_take(distances2, distkey_list2)
        dist_vals = vt.rebuild_partition(dist1_vals, dist2_vals, desc_dist_xs, other_xs)
        distances = ut.odict(list(zip(distkey_list, dist_vals)))
        orgres2_descmatch_dists[orgtype] = distances
    return orgres2_descmatch_dists
示例#53
0
 def _asdict(cm):
     return ut.odict(
         [(field, None if  getattr(cm, field) is None else getattr(cm, field).copy())
             for field in cm._oldfields])