コード例 #1
0
ファイル: util_git.py プロジェクト: Erotemic/utool
            def exec_(script):
                import utool as ut

                print("+**** exec %s script *******" % (script.type_))
                print("repo = %r" % (repo,))
                with ut.ChdirContext(repo.dpath):
                    if script.is_fpath_valid():
                        normbuild_flag = "--no-rmbuild"
                        if ut.get_argflag(normbuild_flag):
                            ut.cmd(script.fpath + " " + normbuild_flag)
                        else:
                            ut.cmd(script.fpath)
                    else:
                        if script.text is not None:
                            print("ABOUT TO EXECUTE")
                            ut.print_code(script.text, "bash")
                            if ut.are_you_sure("execute above script?"):
                                from os.path import join

                                scriptdir = ut.ensure_app_resource_dir("utool", "build_scripts")
                                script_path = join(
                                    scriptdir, "script_" + script.type_ + "_" + ut.hashstr27(script.text) + ".sh"
                                )
                                ut.writeto(script_path, script.text)
                                _ = ut.cmd("bash ", script_path)  # NOQA
                        else:
                            print("CANT QUITE EXECUTE THIS YET")
                            ut.print_code(script.text, "bash")
                # os.system(scriptname)
                print("L**** exec %s script *******" % (script.type_))
コード例 #2
0
ファイル: util_git.py プロジェクト: SU-ECE-18-7/utool
 def exec_(script):
     import utool as ut
     print('+**** exec %s script *******' % (script.type_))
     print('repo = %r' % (repo,))
     with ut.ChdirContext(repo.dpath):
         if script.is_fpath_valid():
             normbuild_flag = '--no-rmbuild'
             if ut.get_argflag(normbuild_flag):
                 ut.cmd(script.fpath + ' ' + normbuild_flag)
             else:
                 ut.cmd(script.fpath)
         else:
             if script.text is not None:
                 print('ABOUT TO EXECUTE')
                 ut.print_code(script.text, 'bash')
                 if ut.are_you_sure('execute above script?'):
                     from os.path import join
                     scriptdir = ut.ensure_app_resource_dir('utool',
                                                            'build_scripts')
                     script_path = join(scriptdir,
                                        'script_' + script.type_ + '_' +
                                        ut.hashstr27(script.text) + '.sh')
                     ut.writeto(script_path, script.text)
                     _ = ut.cmd('bash ', script_path)  # NOQA
             else:
                 print("CANT QUITE EXECUTE THIS YET")
                 ut.print_code(script.text, 'bash')
     #os.system(scriptname)
     print('L**** exec %s script *******' % (script.type_))
コード例 #3
0
ファイル: util_git.py プロジェクト: Erotemic/utool
 def exec_(script):
     import utool as ut
     print('+**** exec %s script *******' % (script.type_))
     print('repo = %r' % (repo,))
     with ut.ChdirContext(repo.dpath):
         if script.is_fpath_valid():
             normbuild_flag = '--no-rmbuild'
             if ut.get_argflag(normbuild_flag):
                 ut.cmd(script.fpath + ' ' + normbuild_flag)
             else:
                 ut.cmd(script.fpath)
         else:
             if script.text is not None:
                 print('ABOUT TO EXECUTE')
                 ut.print_code(script.text, 'bash')
                 if ut.are_you_sure('execute above script?'):
                     from os.path import join
                     scriptdir = ut.ensure_app_resource_dir('utool',
                                                            'build_scripts')
                     script_path = join(scriptdir,
                                        'script_' + script.type_ + '_' +
                                        ut.hashstr27(script.text) + '.sh')
                     ut.writeto(script_path, script.text)
                     _ = ut.cmd('bash ', script_path)  # NOQA
             else:
                 print("CANT QUITE EXECUTE THIS YET")
                 ut.print_code(script.text, 'bash')
     #os.system(scriptname)
     print('L**** exec %s script *******' % (script.type_))
コード例 #4
0
ファイル: _grave_scorenorm.py プロジェクト: Erotemic/ibeis
def delete_all_learned_normalizers():
    r"""
    DELETES ALL CACHED NORMALIZERS IN ALL DATABASES

    CommandLine:
        python -m ibeis.algo.hots.score_normalization --test-delete_all_learned_normalizers
        #-y

    Example:
        >>> # DOCTEST_DISABLE
        >>> from ibeis.algo.hots import score_normalization
        >>> score_normalization.delete_all_learned_normalizers()
    """
    from ibeis.algo.hots import score_normalization
    import utool as ut
    print('DELETE_ALL_LEARNED_NORMALIZERS')
    normalizer_fpath_list = score_normalization.list_available_score_normalizers()
    print('The following normalizers will be deleted: ' + ut.indentjoin(normalizer_fpath_list, '\n  '))
    if ut.are_you_sure('Deleting all learned normalizers'):
        ut.remove_fpaths(normalizer_fpath_list, verbose=True)
コード例 #5
0
def remerge_subset():
    """
    Assumes ibs1 is an updated subset of ibs2.
    Re-merges ibs1 back into ibs2.

    TODO: annotmatch table must have non-directional edges for this to work.
    I.e. u < v

    Ignore:

        # Ensure annotmatch and names are up to date with staging

        # Load graph
        import ibei
        ibs = wbia.opendb('PZ_PB_RF_TRAIN')
        infr = wbia.AnnotInference(aids='all', ibs=ibs, verbose=3)
        infr.reset_feedback('staging', apply=True)
        infr.relabel_using_reviews()

        # Check deltas
        infr.wbia_name_group_delta_info()
        infr.wbia_delta_info()

        # Write if it looks good
        infr.write_wbia_annotmatch_feedback()
        infr.write_wbia_name_assignment()

    Ignore:
        import wbia
        ibs = wbia.opendb('PZ_Master1')
        infr = wbia.AnnotInference(ibs, 'all')
        infr.reset_feedback('annotmatch', apply=True)

    CommandLine:
        python -m wbia.dbio.export_subset remerge_subset
    """
    import wbia

    ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')
    ibs2 = wbia.opendb('PZ_Master1')

    gids1, gids2 = ibs1.images(), ibs2.images()
    idxs1, idxs2 = ut.isect_indices(gids1.uuids, gids2.uuids)
    isect_gids1, isect_gids2 = gids1.take(idxs1), gids2.take(idxs2)

    assert all(
        set.issubset(set(a1), set(a2))
        for a1, a2 in zip(isect_gids1.annot_uuids, isect_gids2.annot_uuids))

    annot_uuids = ut.flatten(isect_gids1.annot_uuids)
    # aids1 = ibs1.annots(ibs1.get_annot_aids_from_uuid(annot_uuids), asarray=True)
    # aids2 = ibs2.annots(ibs2.get_annot_aids_from_uuid(annot_uuids), asarray=True)
    aids1 = ibs1.annots(uuids=annot_uuids, asarray=True)
    aids2 = ibs2.annots(uuids=annot_uuids, asarray=True)
    import numpy as np

    to_aids2 = dict(zip(aids1, aids2))
    # to_aids1 = dict(zip(aids2, aids1))

    # Step 1) Update individual annot properties
    # These annots need updates
    # np.where(aids1.visual_uuids != aids2.visual_uuids)
    # np.where(aids1.semantic_uuids != aids2.semantic_uuids)

    annot_unary_props = [
        # 'yaws', 'bboxes', 'thetas', 'qual', 'species', 'unary_tags']
        'yaws',
        'bboxes',
        'thetas',
        'qual',
        'species',
        'case_tags',
        'multiple',
        'age_months_est_max',
        'age_months_est_min',  # 'sex_texts'
    ]
    to_change = {}
    for key in annot_unary_props:
        prop1 = getattr(aids1, key)
        prop2 = getattr(aids2, key)
        diff_idxs = set(np.where(prop1 != prop2)[0])
        if diff_idxs:
            diff_prop1 = ut.take(prop1, diff_idxs)
            diff_prop2 = ut.take(prop2, diff_idxs)
            logger.info('key = %r' % (key, ))
            logger.info('diff_prop1 = %r' % (diff_prop1, ))
            logger.info('diff_prop2 = %r' % (diff_prop2, ))
            to_change[key] = diff_idxs
    if to_change:
        changed_idxs = ut.unique(ut.flatten(to_change.values()))
        logger.info('Found %d annots that need updated properties' %
                    len(changed_idxs))
        logger.info('changing unary attributes: %r' % (to_change, ))
        if False and ut.are_you_sure('apply change'):
            for key, idxs in to_change.items():
                subaids1 = aids1.take(idxs)
                subaids2 = aids2.take(idxs)
                prop1 = getattr(subaids1, key)
                # prop2 = getattr(subaids2, key)
                setattr(subaids2, key, prop1)
    else:
        logger.info('Annot properties are in sync. Nothing to change')

    # Step 2) Update annotmatch - pairwise relationships
    infr1 = wbia.AnnotInference(aids=aids1.aids,
                                ibs=ibs1,
                                verbose=3,
                                autoinit=False)

    # infr2 = wbia.AnnotInference(aids=ibs2.annots().aids, ibs=ibs2, verbose=3)
    aids2 = ibs2.get_valid_aids(is_known=True)
    infr2 = wbia.AnnotInference(aids=aids2, ibs=ibs2, verbose=3)
    infr2.reset_feedback('annotmatch', apply=True)

    # map feedback from ibs1 onto ibs2 using ibs2 aids.
    fb1 = infr1.read_wbia_annotmatch_feedback()
    fb1_t = {(to_aids2[u], to_aids2[v]): val for (u, v), val in fb1.items()}
    fb1_df_t = infr2._pandas_feedback_format(fb1_t).drop('am_rowid', axis=1)

    # Add transformed feedback into ibs2
    infr2.add_feedback_from(fb1_df_t)

    # Now ensure that dummy connectivity exists to preserve origninal names
    # from wbia.algo.graph import nx_utils
    # for (u, v) in infr2.find_mst_edges('name_label'):
    #     infr2.draw_aids((u, v))
    #     cc1 = infr2.pos_graph.connected_to(u)
    #     cc2 = infr2.pos_graph.connected_to(v)
    #     logger.info(nx_utils.edges_cross(infr2.graph, cc1, cc2))
    #     infr2.neg_redundancy(cc1, cc2)
    #     infr2.pos_redundancy(cc2)

    infr2.relabel_using_reviews(rectify=True)
    infr2.apply_nondynamic_update()

    if False:
        infr2.wbia_delta_info()
        infr2.wbia_name_group_delta_info()

    if len(list(infr2.inconsistent_components())) > 0:
        raise NotImplementedError('need to fix inconsistencies first')
        # Make it so it just loops until inconsistencies are resolved
        infr2.prioritize()
        infr2.qt_review_loop()
    else:
        infr2.write_wbia_staging_feedback()
        infr2.write_wbia_annotmatch_feedback()
        infr2.write_wbia_name_assignment()

    # if False:
    #     # Fix any inconsistency
    #     infr2.start_qt_interface(loop=False)
    #     test_nodes = [5344, 5430, 5349, 5334, 5383, 2280, 2265, 2234, 5399,
    #                   5338, 2654]
    #     import networkx as nx
    #     nx.is_connected(infr2.graph.subgraph(test_nodes))
    #     # infr = wbia.AnnotInference(aids=test_nodes, ibs=ibs2, verbose=5)

    #     # randomly sample some new labels to verify
    #     import wbia.guitool as gt
    #     from wbia.gui import inspect_gui
    #     gt.ensure_qapp()
    #     ut.qtensure()
    #     old_groups = ut.group_items(name_delta.index.tolist(), name_delta['old_name'])
    #     del old_groups['____']

    #     new_groups = ut.group_items(name_delta.index.tolist(), name_delta['new_name'])

    #     from wbia.algo.hots import simulate
    #     c = simulate.compare_groups(
    #         list(new_groups.values()),
    #         list(old_groups.values()),
    #     )
    #     ut.map_vals(len, c)
    #     for aids in c['pred_splits']:
    #         old_nids = ibs2.get_annot_nids(aids)
    #         new_nids = ut.take_column(infr2.gen_node_attrs('name_label', aids), 1)
    #         split_aids = ut.take_column(ut.group_items(aids, new_nids).values(), 0)
    #         aid1, aid2 = split_aids[0:2]

    #         if False:
    #             inspect_gui.show_vsone_tuner(ibs2, aid1, aid2)
    #     infr2.start_qt_interface(loop=False)

    # if False:
    #     # import wbia
    #     ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')
    #     infr1 = wbia.AnnotInference(aids='all', ibs=ibs1, verbose=3)
    #     infr1.initialize_graph()
    #     # infr1.reset_feedback('staging')
    #     infr1.reset_feedback('annotmatch')
    #     infr1.apply_feedback_edges()
    #     infr1.relabel_using_reviews()
    #     infr1.apply_review_inference()
    #     infr1.start_qt_interface(loop=False)
    # delta = infr2.match_state_delta()
    # logger.info('delta = %r' % (delta,))

    # infr2.ensure_mst()
    # infr2.relabel_using_reviews()
    # infr2.apply_review_inference()

    # mst_edges = infr2.find_mst_edges()
    # set(infr2.graph.edges()).intersection(mst_edges)

    return
    """