Exemple #1
0
    def make_pipeline_text(self):
        blocks = []

        blocks.append(ub.codeblock(
            '''
            # ----------------------
            # nodes
            #
            '''))
        for proc in self.procs.values():
            node_text = proc.make_node_text()
            if node_text:
                blocks.append(node_text)

        blocks.append(ub.codeblock(
            '''
            # ----------------------
            # connections
            #
            '''))
        for proc in self.procs.values():
            edge_text = proc.make_edge_text()
            if edge_text:
                blocks.append(edge_text)

        blocks.append(ub.codeblock(
            '''
            # ----------------------
            # global pipeline config
            #
            '''))
        blocks.append(self.make_global_text())

        text = '\n\n'.join(blocks)
        return text
Exemple #2
0
def make_initd(fpath):
    name = splitext(basename(fpath))[0]
    text = INITD_TEMPLATE.format(name=name, fpath=fpath)
    startup_hook = expanduser(join('~/local/ubuntu/startup/init.d', name))
    ub.writeto(startup_hook, text)
    print(ub.codeblock(
        '''
        RUN:
        sudo cp {startup_hook} /etc/init.d/{name}
        sudo chmod +x /etc/init.d/{name}
        # sudo update-rc.d /etc/init.d/{name} defaults
        sudo update-rc.d {name} defaults
        service {name} start
        ''').format(**locals()))
Exemple #3
0
 def make_node_text(self):
     """
     Creates a text based definition of this node for a .pipe file
     """
     fmtstr = ub.codeblock(
         '''
         process {name}
           :: {type}
         ''')
     parts = [fmtstr.format(name=self.name, type=self.type)]
     if self.config:
         if isinstance(self.config, six.string_types):
             parts.extend(self.config.splitlines())
         else:
             for key, val in self.config.items():
                 parts.append('  :{key} {val}'.format(key=key, val=val))
     text = '\n'.join(parts)
     return text
Exemple #4
0
 def make_edge_text(self):
     """
     Creates a text based definition of all incoming conections to this node
     for a .pipe file
     """
     fmtstr = ub.codeblock(
         '''
         connect from {oport_abs_name}
                 to   {iport_abs_name}
         ''')
     parts = []
     for iport in self.iports.values():
         for oport in iport.connections:
             if oport is not None:
                 part = fmtstr.format(
                     oport_abs_name=oport.absname(),
                     iport_abs_name=iport.absname(),
                 )
                 parts.append(part)
     text = '\n'.join(parts)
     return text
Exemple #5
0
    def argparse(self, parser=None, special_options=False):
        """
        construct or update an argparse.ArgumentParser CLI parser

        Args:
            parser (None | argparse.ArgumentParser): if specified this
                parser is updated with options from this config.

            special_options (bool, default=False):
                adds special scriptconfig options, namely: --config, --dumps,
                and --dump.

        Returns:
            argparse.ArgumentParser : a new or updated argument parser

        CommandLine:
            xdoctest -m scriptconfig.config Config.argparse:0
            xdoctest -m scriptconfig.config Config.argparse:1

        TODO:
            A good CLI spec for lists might be

            # In the case where ``key`` ends with and ``=``, assume the list is
            # given as a comma separated string with optional square brakets at
            # each end.

            --key=[f]

            # In the case where ``key`` does not end with equals and we know
            # the value is supposd to be a list, then we consume arguments
            # until we hit the next one that starts with '--' (which means
            # that list items cannot start with -- but they can contains
            # commas)

        FIXME:

            * In the case where we have an nargs='+' action, and we specify
              the option with an `=`, and then we give position args after it
              there is no way to modify behavior of the action to just look at
              the data in the string without modifying the ArgumentParser
              itself. The action object has no control over it. For example
              `--foo=bar baz biz` will parse as `[baz, biz]` which is really
              not what we want. We may be able to overload ArgumentParser to
              fix this.

        Example:
            >>> # You can now make instances of this class
            >>> import scriptconfig
            >>> self = scriptconfig.Config.demo()
            >>> parser = self.argparse()
            >>> parser.print_help()
            >>> # xdoctest: +REQUIRES(PY3)
            >>> # Python2 argparse does a hard sys.exit instead of raise
            >>> ns, extra = parser.parse_known_args()

        Example:
            >>> # You can now make instances of this class
            >>> import scriptconfig as scfg
            >>> class MyConfig(scfg.Config):
            >>>     description = 'my CLI description'
            >>>     default = {
            >>>         'path1':  scfg.Value(None, position=1, alias='src'),
            >>>         'path2':  scfg.Value(None, position=2, alias='dst'),
            >>>         'dry':  scfg.Value(False, isflag=True),
            >>>         'approx':  scfg.Value(False, isflag=False, alias=['a1', 'a2']),
            >>>     }
            >>> self = MyConfig()
            >>> special_options = True
            >>> parser = None
            >>> parser = self.argparse(special_options=special_options)
            >>> parser.print_help()
            >>> self._read_argv(argv=['objection', '42', '--path1=overruled!'])
            >>> print('self = {!r}'.format(self))

        Ignore:
            >>> self._read_argv(argv=['hi','--path1=foobar'])
            >>> self._read_argv(argv=['hi', 'hello', '--path1=foobar'])
            >>> self._read_argv(argv=['hi', 'hello', '--path1=foobar', '--help'])
            >>> self._read_argv(argv=['--path1=foobar', '--path1=baz'])
            >>> print('self = {!r}'.format(self))
        """
        import argparse

        if parser is None:
            parserkw = self._parserkw()
            parser = argparse.ArgumentParser(**parserkw)

        # Use custom action used to mark which values were explicitly set on
        # the commandline
        parser._explicitly_given = set()

        parent = self

        class ParseAction(argparse.Action):
            def __init__(self, *args, **kwargs):
                super(ParseAction, self).__init__(*args, **kwargs)
                # with script config nothing should be required by default all
                # positional arguments should have keyword arg variants Setting
                # required=False here will prevent positional args from
                # erroring if they are not specified. I dont think there are
                # other side effects, but we should make sure that is actually
                # the case.
                self.required = False

                if self.type is None:
                    # Is this the right place to put this?
                    def _mytype(value):
                        key = self.dest
                        template = parent.default[key]
                        if not isinstance(template, Value):
                            # smartcast non-valued params from commandline
                            value = smartcast.smartcast(value)
                        else:
                            value = template.cast(value)
                        return value

                    self.type = _mytype

                # print('self.type = {!r}'.format(self.type))

            def __call__(action, parser, namespace, values, option_string=None):
                # print('CALL action = {!r}'.format(action))
                # print('option_string = {!r}'.format(option_string))
                # print('values = {!r}'.format(values))

                if isinstance(values, list) and len(values):
                    # We got a list of lists, which we hack into a flat list
                    if isinstance(values[0], list):
                        import itertools as it
                        values = list(it.chain(*values))

                setattr(namespace, action.dest, values)
                parser._explicitly_given.add(action.dest)

        # IRC: this ensures each key has a real Value class
        _metadata = {
            key: self._data[key]
            for key, value in self._default.items()
            if isinstance(self._data[key], Value)
        }  # :type: Dict[str, Value]
        _positions = {k: v.position for k, v in _metadata.items()
                      if v.position is not None}
        if _positions:
            if ub.find_duplicates(_positions.values()):
                raise Exception('two values have the same position')
            _keyorder = ub.oset(ub.argsort(_positions))
            _keyorder |= (ub.oset(self._default) - _keyorder)
        else:
            _keyorder = list(self._default.keys())

        def _add_arg(parser, name, key, argkw, positional, isflag, isalias):
            _argkw = argkw.copy()

            if isalias:
                _argkw['help'] = 'alias of {}'.format(key)
                _argkw.pop('default', None)
                # flags cannot have flag aliases
                isflag = False

            elif positional:
                parser.add_argument(name, **_argkw)

            if isflag:
                # Can we support both flag and setitem methods of cli
                # parsing?
                if not isinstance(_argkw.get('default', None), bool):
                    raise ValueError('can only use isflag with bools')
                _argkw.pop('type', None)
                _argkw.pop('choices', None)
                _argkw.pop('action', None)
                _argkw.pop('nargs', None)
                _argkw['dest'] = key

                _argkw_true = _argkw.copy()
                _argkw_true['action'] = 'store_true'

                _argkw_false = _argkw.copy()
                _argkw_false['action'] = 'store_false'
                _argkw_false.pop('help', None)

                parser.add_argument('--' + name, **_argkw_true)
                parser.add_argument('--no-' + name, **_argkw_false)
            else:
                parser.add_argument('--' + name, **_argkw)

        mode = 1

        alias_registry = []
        for key, value in self._data.items():
            # key: str
            # value: Any | Value
            argkw = {}
            argkw['help'] = ''
            positional = None
            isflag = False
            if key in _metadata:
                # Use the metadata in the Value class to enhance argparse
                _value = _metadata[key]
                argkw.update(_value.parsekw)
                value = _value.value
                isflag = _value.isflag
                positional = _value.position
            else:
                _value = value if isinstance(value, Value) else None

            if not argkw['help']:
                argkw['help'] = '<undocumented>'

            argkw['default'] = value
            argkw['action'] = ParseAction

            name = key
            _add_arg(parser, name, key, argkw, positional, isflag, isalias=False)

            if _value is not None:
                if _value.alias:
                    alts = _value.alias
                    alts = alts if ub.iterable(alts) else [alts]
                    for alias in alts:
                        tup = (alias, key, argkw)
                        alias_registry.append(tup)
                        if mode == 0:
                            name = alias
                            _add_arg(parser, name, key, argkw, positional, isflag, isalias=True)

        if mode == 1:
            for tup in alias_registry:
                (alias, key, argkw) = tup
                name = alias
                dest = key
                _add_arg(parser, name, dest, argkw, positional, isflag, isalias=True)

        if special_options:
            parser.add_argument('--config', default=None, help=ub.codeblock(
                '''
                special scriptconfig option that accepts the path to a on-disk
                configuration file, and loads that into this {!r} object.
                ''').format(self.__class__.__name__))

            parser.add_argument('--dump', default=None, help=ub.codeblock(
                '''
                If specified, dump this config to disk.
                ''').format(self.__class__.__name__))

            parser.add_argument('--dumps', action='store_true', help=ub.codeblock(
                '''
                If specified, dump this config stdout
                ''').format(self.__class__.__name__))

        return parser
Exemple #6
0
def main():
    candidates = None
    mode = 'ultra'
    if mode == 'great':
        candidate_csv_text = ub.codeblock(
            '''
            registeel,LOCK_ON,FLASH_CANNON,FOCUS_BLAST,22,10,14,15
            stunfisk_galarian,MUD_SHOT,ROCK_SLIDE,EARTHQUAKE,25,11,14,14
            # altaria,DRAGON_BREATH,SKY_ATTACK,DRAGON_PULSE,26.5,14,12,13

            skarmory,AIR_SLASH,SKY_ATTACK,FLASH_CANNON,26,11,13,10

            azumarill,BUBBLE,ICE_BEAM,HYDRO_PUMP,38,12,15,13
            dewgong,ICE_SHARD,ICY_WIND,WATER_PULSE,26.5,15,08,15

            # umbreon,SNARL,FOUL_PLAY,LAST_RESORT,24.5,15,10,15
            # farfetchd_galarian,FURY_CUTTER,LEAF_BLADE,BRAVE_BIRD,33.5,12,15,15

            hypno,CONFUSION,SHADOW_BALL,THUNDER_PUNCH,25.5,13,15,14
            # hypno,CONFUSION,SHADOW_BALL,FOCUS_BLAST,25.5,13,15,14

            # machamp-shadow,COUNTER,ROCK_SLIDE,CROSS_CHOP,18,5,11,10
            victreebel_shadow-shadow,RAZOR_LEAF,LEAF_BLADE,FRUSTRATION,22.5,4,14,14
            ''')

        candidate_explicit = [
            Pokemon('medicham', ivs=[7, 15, 14], level=41.5),
            Pokemon('medicham', ivs=[7, 15, 14], level=43.0),
            Pokemon('medicham', ivs=[7, 15, 14]).maximize(1500),
            Pokemon('machamp', [1, 15, 6], cp=1493),
            Pokemon('altaria', [1, 11, 8], cp=1496),
            Pokemon('skarmory', [0, 15, 13], cp=1495),
            Pokemon('umbreon', [1, 8, 8], cp=1495),
            Pokemon('registeel', [10, 14, 15], cp=1487),
            Pokemon('stunfisk', [11, 14, 14], form='Galarian', cp=1498),
            Pokemon('cresselia', [7, 14, 8], cp=1493),
            Pokemon('vigoroth', [0, 10, 9], cp=1495),
            Pokemon('drifblim', [4, 14, 13], cp=1498),
            Pokemon('haunter', [6, 13, 15], cp=1498),
            Pokemon('mantine', [6, 13, 14], cp=1497),
            Pokemon('politoed', [3, 5, 13], cp=1493),
            Pokemon('charizard', [3, 15, 14], cp=1485),
            Pokemon('gengar', [5, 11, 14], cp=1483),
            Pokemon('mew', [15, 12, 11], cp=1470),
            Pokemon('dewgong', [15, 8, 15]).maximize(1500),
            Pokemon('azumarill', [12, 15, 13]).maximize(1500),
            Pokemon('hypno', [13, 15, 14]).maximize(1500),
        ]
        for cand in candidate_explicit:
            cand.populate_cp()
        stat_products = [cand.stat_product for cand in candidate_explicit]
        sortx = ub.argsort(stat_products)
        candidate_explicit = list(ub.take(candidate_explicit, sortx))
        stat_products = list(ub.take(stat_products, sortx))
        print('stat_products = {}'.format(ub.repr2(stat_products, nl=1)))
        print('candidate_explicit = {}'.format(ub.repr2(candidate_explicit, nl=1)))

        for cand in candidate_explicit:
            print('cand.adjusted = {}, {:.2f}, {}'.format(ub.repr2(cand.adjusted, nl=0, precision=2), cand.stat_product, cand))

    if mode == 'ultra':
        candidate_csv_text = ub.codeblock(
            '''
            cresselia,PSYCHO_CUT,MOONBLAST,FUTURE_SIGHT
            togekiss,CHARM,FLAMETHROWER,ANCIENT_POWER
            articuno,ICE_SHARD,ICY_WIND,HURRICANE
            swampert,MUD_SHOT,MUDDY_WATER,EARTHQUAKE
            venusaur,VINE_WHIP,FRENZY_PLANT,SLUDGE_BOMB
            ''')

        candidates = [
            Pokemon('Gengar', (7, 14, 14), cp=2500, moves=['SHADOW_CLAW', 'SHADOW_PUNCH', 'SHADOW_BALL']),
            Pokemon('Togekiss', (15, 15, 14), cp=2469, moves=['CHARM', 'FLAMETHROWER', 'AERIAL_ACE']),
            Pokemon('Venusaur', (15, 13, 13), cp=2482, moves=['VINE_WHIP', 'FRENZY_PLANT', 'SLUDGE_BOMB']),
            Pokemon('Muk', (9, 7, 4), cp=2486, form='Alola', moves=['SNARL', 'DARK_PULSE', 'SLUDGE_WAVE']),
            Pokemon('Swampert', (0, 2, 14), cp=2500, moves=['WATER_GUN', 'HYDRO_CANNON', 'SLUDGE_WAVE']),
            Pokemon('Empoleon', (0, 10, 14), cp=2495, moves=['WATERFALL', 'HYDRO_CANNON', 'DRILL_PECK']),
            Pokemon('sirfetch’d', (4, 11, 12), cp=2485, form='Galarian', moves=['COUNTER', 'CLOSE_COMBAT', 'LEAF_BLADE']),
        ]
    # else:
    #     raise KeyError(mode)

    if candidates is None:
        candidates = []
        for line in candidate_csv_text.split('\n'):
            line = line.strip()
            if line.startswith('#'):
                continue
            if line:
                row = line.split(',')
                cand = Pokemon.from_pvpoke_row(row)
                candidates.append(cand)

    print(ub.repr2(api.learnable))

    if mode == 'ultra':
        base = 'https://pvpoke.com/team-builder/all/2500'
        base = 'https://pvpoke.com/team-builder/premier/2500'
    elif mode == 'great':
        base = 'https://pvpoke.com/team-builder/all/1500'
    sep = '%2C'
    import itertools as it
    print('candidates = {!r}'.format(candidates))
    for team in it.combinations(candidates, 3):
        # if not any('registeel' in p.name for p in team):
        #     continue
        # if not any('victree' in p.name for p in team):
        #     continue
        # if len(set(p.name for p in team)) != 3:
        #     continue
        suffix = sep.join([p.to_pvpoke_url() for p in team])
        url = base + '/' + suffix
        print(url)
Exemple #7
0
def main():
    mode = 'great'
    if mode == 'great':
        candidate_csv_text = ub.codeblock('''
            registeel,LOCK_ON,FLASH_CANNON,FOCUS_BLAST,22,10,14,15
            stunfisk_galarian,MUD_SHOT,ROCK_SLIDE,EARTHQUAKE,25,11,14,14
            # altaria,DRAGON_BREATH,SKY_ATTACK,DRAGON_PULSE,26.5,14,12,13

            skarmory,AIR_SLASH,SKY_ATTACK,FLASH_CANNON,26,11,13,10

            azumarill,BUBBLE,ICE_BEAM,HYDRO_PUMP,38,12,15,13
            dewgong,ICE_SHARD,ICY_WIND,WATER_PULSE,26.5,15,08,15

            # umbreon,SNARL,FOUL_PLAY,LAST_RESORT,24.5,15,10,15
            # farfetchd_galarian,FURY_CUTTER,LEAF_BLADE,BRAVE_BIRD,33.5,12,15,15

            hypno,CONFUSION,SHADOW_BALL,THUNDER_PUNCH,25.5,13,15,14
            # hypno,CONFUSION,SHADOW_BALL,FOCUS_BLAST,25.5,13,15,14

            # machamp-shadow,COUNTER,ROCK_SLIDE,CROSS_CHOP,18,5,11,10
            victreebel_shadow-shadow,RAZOR_LEAF,LEAF_BLADE,FRUSTRATION,22.5,4,14,14
            ''')
    elif mode == 'ultra':
        candidate_csv_text = ub.codeblock('''
            cresselia,PSYCHO_CUT,MOONBLAST,FUTURE_SIGHT
            togekiss,CHARM,FLAMETHROWER,ANCIENT_POWER
            articuno,ICE_SHARD,ICY_WIND,HURRICANE
            swampert,MUD_SHOT,MUDDY_WATER,EARTHQUAKE
            venusaur,VINE_WHIP,FRENZY_PLANT,SLUDGE_BOMB
            ''')
    else:
        raise KeyError(mode)

    candidates = []
    for line in candidate_csv_text.split('\n'):
        line = line.strip()
        if line.startswith('#'):
            continue
        if line:
            row = line.split(',')
            cand = Pokemon.from_pvpoke_row(row)
            candidates.append(cand)

    # for self in candidates:
    #     self.populate_stats()

    # for self in candidates:
    #     print('self = {!r}'.format(self))
    #     print(self.calc_cp())

    print(ub.repr2(api.learnable))

    if mode == 'ultra':
        base = 'https://pvpoke.com/team-builder/all/2500'
    elif mode == 'great':
        base = 'https://pvpoke.com/team-builder/all/1500'
    sep = '%2C'
    import itertools as it
    print('candidates = {!r}'.format(candidates))
    for team in it.combinations(candidates, 3):
        # if not any('registeel' in p.name for p in team):
        #     continue
        if not any('victree' in p.name for p in team):
            continue
        if len(set(p.name for p in team)) != 3:
            continue
        suffix = sep.join([p.to_pvpoke_url() for p in team])
        url = base + '/' + suffix
        print(url)
Exemple #8
0
    candidates = seagate_msrps
    for cand in candidates:
        cand['cost_per_TB'] = cand['cost'] / cand['TB']

    import pandas as pd
    df = pd.DataFrame(candidates)
    df = df.sort_values('cost_per_TB')
    print(df)


    print('candidates = {}'.format(ub.repr2(candidates, nl=1, precision=2, align=':')))


considering = ub.codeblock(
    '''
    https://www.lg.com/us/monitors/lg-27UK650-W-4k-uhd-led-monitor#

    https://www.lg.com/us/monitors/lg-27GL850-B-gaming-monitor

    https://www.amazon.com/Dell-U2720QM-UltraSharp-Ultra-Thin-DisplayPort/dp/B08F5J8S6Y?ref_=ast_sto_dp


    https://www.amazon.com/Dell-U2720QM-UltraSharp-Ultra-Thin-DisplayPort/dp/B08F5J8S6Y?ref_=ast_sto_dp

    # Has 2560x1440 resolution, which is what I like anyway
    #
    https://www.amazon.com/LG-27GL83A-B-Ultragear-Compatible-Monitor/dp/B07YGZL8XF/ref=sr_1_2_sspa?dchild=1&keywords=4k+monitor&qid=1619557709&sr=8-2-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUEzNjBXRVNRRkNMWU5QJmVuY3J5cHRlZElkPUEwMDgxNzQ4MUQ3RE9SM0dTTkkyTCZlbmNyeXB0ZWRBZElkPUEwNTM2NTc0MTE5SVJJVzQySjJGQyZ3aWRnZXROYW1lPXNwX2F0ZiZhY3Rpb249Y2xpY2tSZWRpcmVjdCZkb05vdExvZ0NsaWNrPXRydWU=

    WQHD
    ''')
Exemple #9
0
 def __repr__(self):
     return ub.codeblock('''
         <This repr has newlines, and the first line is long:
            * line1
            * line2>
         ''')
Exemple #10
0
def main():
    import ubelt as ub
    header = ub.codeblock('''
        import ubelt as ub
        ti = ub.Timerit(100, bestof=10, verbose=2)

        d = {
            'keyboard_debug': False,
            'snapshot_after_error': True,  # Try to checkpoint before crashing
            'show_prog': True,
            'use_tqdm': None,
            'prog_backend': 'progiter',
            'ignore_inf_loss_parts': False,
            'use_tensorboard': True,
            'export_modules': [],
            'large_loss': 1000,
            'num_keep': 2,
            'keep_freq': 20,
        }

        num_inner_loops = 10000

        def access_dict_direct():
            for i in range(num_inner_loops):
                if d['ignore_inf_loss_parts']:
                    pass
        for timer in ti.reset('access_dict_direct'):
            with timer:
                access_dict_direct()

        ''')

    parts = [header]

    for n in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 100, 1000]:
        func_text, funcname = _gen_cluttered_func(n=n)
        time_text = ub.codeblock('''
            {func_text}
            for timer in ti.reset('{funcname}'):
                with timer:
                    {funcname}()
            ''').format(func_text=func_text, funcname=funcname)
        parts.append(time_text)

    block = '\n'.join(parts)

    prog_text = ub.codeblock('''
        import ubelt as ub
        def main():
        {block}

        if __name__ == '__main__':
            main()
        ''').format(block=ub.indent(block))

    # prog_text = 'def main():\n' + ub.indent(block) + 'if __name__ == "__main__":\n    main()'
    fpath = 'bench_local_clutter.py'
    with open(fpath, 'w') as file:
        file.write(prog_text)

    ub.cmd('python ' + fpath, verbose=3)
Exemple #11
0
def make_dummy_package(dpath, pkgname='mkinit_dummy_module'):
    """
    Creates a dummy package structure with or without __init__ files
    """
    root = ub.ensuredir(join(dpath, pkgname))
    ub.delete(root)
    ub.ensuredir(root)
    paths = {
        'root': root,
        'submod1': ub.touch(join(root, 'submod1.py')),
        'submod2': ub.touch(join(root, 'submod2.py')),
        'subdir1': ub.ensuredir(join(root, 'subdir1')),
        'subdir2': ub.ensuredir(join(root, 'subdir2')),
    }
    paths['subdir1_init'] = ub.touch(join(paths['subdir1'], '__init__.py'))
    paths['subdir2_init'] = ub.touch(join(paths['subdir2'], '__init__.py'))
    paths['root_init'] = ub.touch(join(paths['root'], '__init__.py'))

    ub.writeto(paths['subdir1_init'], ub.codeblock(
        '''
        simple_subattr1 = "hello world"
        simple_subattr2 = "hello world"
        _private_attr = "hello world"
        '''))

    ub.writeto(paths['subdir2_init'], ub.codeblock(
        '''
        __all__ = ['public_attr']

        public_attr = "hello world"
        private_attr = "hello world"
        '''))

    ub.writeto(paths['submod1'], ub.codeblock(
        '''
        import six

        attr1 = True
        attr2 = six.moves.zip

        # ------------------------

        if True:
            good_attr_01 = None

        if False:
            bad_attr_false1 = None

        if None:
            bad_attr_none1 = None

        # ------------------------

        if True:
            good_attr_02 = None
        else:
            bad_attr_true2 = None

        if False:
            bad_attr_false2 = None
        else:
            good_attr_03 = None

        if None:
            bad_attr_none2 = None
        else:
            good_attr_04 = None

        # ------------------------

        if True:
            good_attr_05 = None
        elif False:
            bad_attr3 = None
        else:
            bad_attr3 = None

        if False:
            bad_attr_elif_True3_0 = None
        elif True:
            good_attr_06 = None
        else:
            bad_attr_elif_True3_1 = None

        # ------------------------
        import sys

        if sys.version_info.major == 3:
            good_attr_07 = 'py3'
            bad_attr_uncommon4_1 = None
        else:
            good_attr_07 = 'py2'
            bad_attr_uncommon4_0 = None

        # ------------------------
        # This is static, so maybe another_val exists as a global
        if sys.version_info.major == good_attr_07:
            good_attr_08 = None
            bad_attr_uncommon5_1 = None
            bad_attr_uncommon5_0 = None
        elif sys:
            good_attr_08 = None
            bad_attr_uncommon5_1 = None
        else:
            good_attr_08 = None
            bad_attr_uncommon5_0 = None

        # ------------------------
        flag1 = sys.version_info.major < 10
        flag2 = sys.version_info.major > 10
        flag3 = sys.version_info.major > 10

        if flag1:
            bad_attr_num6 = 1
        elif flag2:
            bad_attr_num6 = 1
        elif flag3:
            bad_attr_num6 = 1

        if flag1:
            bad_attr_num6_0 = 1
        elif 0:
            bad_attr_num0 = 1
        elif 1:
            bad_attr_09 = 1
        else:
            bad_attr13 = 1

        if flag1:
            good_attr_09 = 1
        elif 1:
            good_attr_09 = 1
            bad_attr_09_1 = 1
        elif 2 == 3:
            pass

        # ------------------------

        if 'foobar':
            good_attr_10 = 1

        if False:
            bad_attr_str7 = 1
        elif (1, 2):
            good_attr_11 = 1
        elif True:
            bad_attr_true8 = 1

        # ------------------------

        if flag1 != flag2:
            good_attr_12 = None
        else:
            bad_attr_12 = None
            raise Exception

        # ------------------------

        try:
            good_attr_13 = None
            bad_attr_13 = None
        except Exception:
            good_attr_13 = None

        # ------------------------

        try:
            good_attr_14 = None
        except Exception:
            bad_attr_14 = None
            raise

        # ------------------------

        def func1():
            pass

        class class1():
            pass

        if __name__ == '__main__':
            bad_attr_main = None

        if __name__ == 'something_else':
            bad_something_else = None
        '''))
    return paths
def main():

    # TODO: find a better place for root
    ROOT = join(os.getcwd())
    # ROOT = '.'
    os.chdir(ROOT)

    NAME = 'pyhesaff'
    VERSION = '0.1.2'
    DOCKER_TAG = '{}-{}'.format(NAME, VERSION)

    QUAY_REPO = 'quay.io/erotemic/manylinux-for'
    DOCKER_URI = '{QUAY_REPO}:{DOCKER_TAG}'.format(**locals())

    dockerfile_fpath = join(ROOT, 'Dockerfile')

    # This docker code is very specific for building linux binaries.
    # We will need to do a bit of refactoring to handle OSX and windows.
    # But the goal is to get at least one OS working end-to-end.
    """
    Notes:
        docker run --rm -it quay.io/pypa/manylinux2010_x86_64 /bin/bash
        ---
        ls /opt/python
    """

    BASE_IMAGE = 'quay.io/pypa/manylinux2010_x86_64'

    docker_code = ub.codeblock(f'''
        FROM {BASE_IMAGE}

        RUN yum install lz4-devel -y

        RUN MB_PYTHON_TAG=cp27-cp27m  && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
            source ./venv-$MB_PYTHON_TAG/bin/activate && \
            pip install scikit-build cmake ninja

        RUN MB_PYTHON_TAG=cp27-cp27mu  && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
            source ./venv-$MB_PYTHON_TAG/bin/activate && \
            pip install scikit-build cmake ninja

        RUN MB_PYTHON_TAG=cp35-cp35m  && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
            source ./venv-$MB_PYTHON_TAG/bin/activate && \
            pip install scikit-build cmake ninja

        RUN MB_PYTHON_TAG=cp36-cp36m  && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
            source ./venv-$MB_PYTHON_TAG/bin/activate && \
            pip install scikit-build cmake ninja

        RUN MB_PYTHON_TAG=cp37-cp37m  && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
            source ./venv-$MB_PYTHON_TAG/bin/activate && \
            pip install scikit-build cmake ninja

        RUN MB_PYTHON_TAG=cp38-cp38  && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
            /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
            source ./venv-$MB_PYTHON_TAG/bin/activate && \
            pip install scikit-build cmake ninja
        ''')

    docker_code2 = '\n\n'.join(
        [ub.paragraph(p) for p in docker_code.split('\n\n')])

    try:
        print(ub.color_text('\n--- DOCKER CODE ---', 'white'))
        print(ub.highlight_code(docker_code2, 'docker'))
        print(ub.color_text('--- END DOCKER CODE ---\n', 'white'))
    except Exception:
        pass
    with open(dockerfile_fpath, 'w') as file:
        file.write(docker_code2)

    docker_build_cli = ' '.join([
        'docker',
        'build',
        '--tag {}'.format(DOCKER_TAG),
        '-f {}'.format(dockerfile_fpath),
        '.',
    ])
    print('docker_build_cli = {!r}'.format(docker_build_cli))
    if ub.argflag('--dry'):
        print('DRY RUN')
        print('WOULD RUN')
        print(docker_build_cli)
    else:
        info = ub.cmd(docker_build_cli, verbose=3, shell=True)
        if info['ret'] != 0:
            print(ub.color_text('\n--- FAILURE ---', 'red'))
            print('Failed command:')
            print(info['command'])
            print(info['err'])
            print('NOTE: sometimes reruning the command manually works')
            raise Exception('Building docker failed with exit code {}'.format(
                info['ret']))
        else:
            print(ub.color_text('\n--- SUCCESS ---', 'green'))

    print(
        ub.highlight_code(
            ub.codeblock(r'''
        # Finished creating the docker image.
        # To test / export / publish you can do something like this:

        # Test that we can get a bash terminal
        docker run -it {DOCKER_TAG} /bin/bash

        # Create a tag for the docker image
        docker tag {DOCKER_TAG} {DOCKER_URI}

        # Export your docker image to a file
        docker save -o ${ROOT}/{DOCKER_TAG}.docker.tar {DOCKER_TAG}

        # Login to a docker registry (we are using quay)

        # In some cases this works,
        docker login

        # But you may need to specify secret credentials
        load_secrets
        echo "QUAY_USERNAME = $QUAY_USERNAME"
        docker login -u $QUAY_USERNAME -p $QUAY_PASSWORD quay.io
        unload_secrets

        # Upload the docker image to quay.io
        docker push {DOCKER_URI}
        ''').format(
                NAME=NAME,
                ROOT=ROOT,
                DOCKER_TAG=DOCKER_TAG,
                DOCKER_URI=DOCKER_URI,
            ),
            'bash',
        ))

    PUBLISH = 0
    if PUBLISH:
        cmd1 = 'docker tag {DOCKER_TAG} {DOCKER_URI}'.format(**locals())
        cmd2 = 'docker push {DOCKER_URI}'.format(**locals())
        print('-- <push cmds> ---')
        print(cmd1)
        print(cmd2)
        print('-- </push cmds> ---')
Exemple #13
0
"""
Looks at the `git_*.{sh,ph}` scripts and makes corresponding `git-*` scripts
"""
import glob
from os.path import dirname, join, basename, splitext
import ubelt as ub


SCRIPT_HEADER = ub.codeblock(
    r'''
    #!/bin/bash
    # References:
    # https://stackoverflow.com/questions/59895/getting-the-source-directory-of-a-bash-script-from-within

    SOURCE="${BASH_SOURCE[0]}"
    while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
      DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
      SOURCE="$(readlink "$SOURCE")"
      [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
    done

    DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
    ''')

SCRIPT_FOOTER_FMT = '$DIR/../{fname} "$@"'


def setup_git_scripts():
    dpath = dirname(__file__)

    git_sh_scripts = list(glob.glob(join(dpath, 'git_*.sh')))
Exemple #14
0
class MathSymbolsUnicode:
    """
    https://en.wikipedia.org/wiki/Mathematical_operators_and_symbols_in_Unicode

    https://www.quora.com/What-do-mathbb-C-mathbb-F-mathbb-H-mathbb-N-mathbb-Q-mathbb-R-mathbb-S-and-mathbb-Z-mean

    https://mathworld.wolfram.com/Doublestruck.html

    https://peterjamesthomas.com/maths-science/a-brief-taxonomy-of-numbers/

    http://xahlee.info/comp/unicode_math_operators.html

    # Example:
    #     >>>
    """
    sym_elementof   = 'ϵ'
    sym_finitefield = '𝔽'

    sym_natural     = 'ℕ'
    sym_integral    = 'ℤ'
    sym_rational    = 'ℚ'

    sym_complex     = 'ℂ'
    sym_quaternions = 'ℍ'
    sym_octernion   = '𝕆'

    sym_irrational  = 'ℙ'
    sym_real        = 'ℝ'

    sym_floating    = '𝕃'
    sym_list        = '[]'

    _greek_alphabet = ub.codeblock(
        """
        Α    α      Alpha     a
        Β    β      Beta      b
        Γ    γ      Gamma     g
        Δ    δ      Delta     d
        Ε    ε      Epsilon   e
        Ζ    ζ      Zeta      z
        Η    η      Eta       h
        Θ    θ      Theta     th
        Ι    ι      Iota      i
        Κ    κ      Kappa     k
        Λ    λ      Lambda    l
        Μ    μ      Mu        m
        Ν    ν      Nu        n
        Ξ    ξ      Xi        x
        Ο    ο      Omicron   o
        Π    π      Pi        p
        Ρ    ρ      Rho       r
        Σ    σ,ς *  Sigma     s
        Τ    τ      Tau       t
        Υ    υ      Upsilon   u
        Φ    φ      Phi       ph
        Χ    χ      Chi       ch
        Ψ    ψ      Psi       ps
        Ω    ω      Omega     o

        Superscripts: ⁰ ¹ ² ³ ⁴ ⁵ ⁶ ⁷ ⁸ ⁹ ⁺ ⁻ ⁼ ⁽ ⁾ ⁿ ⁱ
        Subscripts: ₀ ₁ ₂ ₃ ₄ ₅ ₆ ₇ ₈ ₉ ₊ ₋ ₌ ₍ ₎ ₐ ₑ ₕ ᵢ ⱼ ₖ ₗ ₘ ₙ ₒ ₚ ᵣ ₛ ₜ ᵤ ᵥ ₓ
        """)

    _notes = ub.codeblock(
        """
        # Variables
        alpha = α
        beta = β
        delta = δ
        epsilon = ε
        theta = θ
        lambda = λ
        mu = μ
        phi = φ
        psi = ψ
        omega = Ω

        alpha   = α
        beta    = β
        gamma   = γ
        delta   = δ
        epsilon = ε
        zeta    = ζ
        eta     = η
        theta   = θ
        iota    = ι
        kappa   = κ
        lambda  = λ
        nu      = ν
        mu      = μ
        xi      = ξ
        omicron = ο
        pi      = π
        rho     = ρ
        sigma   = σ
        tau     = τ
        upsilon = υ
        phi     = φ
        chi     = χ
        psi     = ψ
        omega   = ω


        # Special meta-numeric symbols
        pi            = π  # 3.14... ratio of circle circumference to the diameter
        tau           = 𝜏  # 6.28... ratio of circle circumference to its radius... it makes a tad more sense
        infinity      = ∞

        # Existential quantifiers
        forall        = ∀
        exists        = ∃
        forall        = ∀
        not_exists    = ∄

        delta_upper   = ∆

        # Calculus
        partial       = ∂
        integral      = ∫

        # Relational
        eq        = =
        ne        = ≠
        le        = ≦
        ge        = ≥
        lt        = <
        gt        = >
        approx_eq = ≈
        approx_ne = ≇
        strict_approx_ne = ≆
        propor    = ∝
        equiv     = ≡
        not_equiv = ≢

        # n-ary operations
        product       = ∏
        sumation      = ∑
        big_isect     = ⋂
        big_union     = ⋃

        union = ∪

        # Root operations
        square_root   = √
        cube_root     = ∛
        quad_root     = ∜

        # Logical
        and           = ∧
        or            = ∨
        not           = ¬

        # Set operations
        emptyset      = ∅
        isect         = ∩

        # Membership
        elementof     = ϵ
        not_elementof = ∉

        subset_lt = ⊂  # strict subset of
        subset_gt = ⊃  # strict superset of
        subset_le = ⊆  # subset
        subset_ge = ⊇  # superset
        subset_not_ge = ⊉
        subset_not_gt = ⊅
        subset_not_lt = ⊄
        subset_not_le = ⊈
        """)
Exemple #15
0
CORE_LAYERS = ub.codeblock(
    '''
    layer {{
      bottom: "data"
      top: "conv1_1"
      name: "conv1_1"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 64
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv1_1"
      top: "conv1_1"
      name: "conv1_1_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv1_1"
      top: "conv1_1"
      name: "relu1_1"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv1_1"
      top: "conv1_2"
      name: "conv1_2"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 64
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv1_2"
      top: "conv1_2"
      name: "conv1_2_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv1_2"
      top: "conv1_2"
      name: "relu1_2"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv1_2"
      top: "pool1"
      top: "pool1_mask"
      name: "pool1"
      type: "Pooling"
      pooling_param {{
        pool: MAX
        kernel_size: 2
        stride: 2
      }}
    }}
    layer {{
      bottom: "pool1"
      top: "conv2_1"
      name: "conv2_1"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 128
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv2_1"
      top: "conv2_1"
      name: "conv2_1_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv2_1"
      top: "conv2_1"
      name: "relu2_1"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv2_1"
      top: "conv2_2"
      name: "conv2_2"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 128
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv2_2"
      top: "conv2_2"
      name: "conv2_2_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv2_2"
      top: "conv2_2"
      name: "relu2_2"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv2_2"
      top: "pool2"
      top: "pool2_mask"
      name: "pool2"
      type: "Pooling"
      pooling_param {{
        pool: MAX
        kernel_size: 2
        stride: 2
      }}
    }}
    layer {{
      bottom: "pool2"
      top: "conv3_1"
      name: "conv3_1"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 256
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv3_1"
      top: "conv3_1"
      name: "conv3_1_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv3_1"
      top: "conv3_1"
      name: "relu3_1"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv3_1"
      top: "conv3_2"
      name: "conv3_2"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 256
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv3_2"
      top: "conv3_2"
      name: "conv3_2_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv3_2"
      top: "conv3_2"
      name: "relu3_2"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv3_2"
      top: "conv3_3"
      name: "conv3_3"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 256
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv3_3"
      top: "conv3_3"
      name: "conv3_3_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv3_3"
      top: "conv3_3"
      name: "relu3_3"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv3_3"
      top: "pool3"
      top: "pool3_mask"
      name: "pool3"
      type: "Pooling"
      pooling_param {{
        pool: MAX
        kernel_size: 2
        stride: 2
      }}
    }}
    layer {{
      bottom: "pool3"
      top: "conv4_1"
      name: "conv4_1"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 512
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv4_1"
      top: "conv4_1"
      name: "conv4_1_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv4_1"
      top: "conv4_1"
      name: "relu4_1"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv4_1"
      top: "conv4_2"
      name: "conv4_2"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 512
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv4_2"
      top: "conv4_2"
      name: "conv4_2_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv4_2"
      top: "conv4_2"
      name: "relu4_2"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv4_2"
      top: "conv4_3"
      name: "conv4_3"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 512
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv4_3"
      top: "conv4_3"
      name: "conv4_3_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv4_3"
      top: "conv4_3"
      name: "relu4_3"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv4_3"
      top: "pool4"
      top: "pool4_mask"
      name: "pool4"
      type: "Pooling"
      pooling_param {{
        pool: MAX
        kernel_size: 2
        stride: 2
      }}
    }}
    layer {{
      bottom: "pool4"
      top: "conv5_1"
      name: "conv5_1"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 512
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv5_1"
      top: "conv5_1"
      name: "conv5_1_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv5_1"
      top: "conv5_1"
      name: "relu5_1"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv5_1"
      top: "conv5_2"
      name: "conv5_2"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 512
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv5_2"
      top: "conv5_2"
      name: "conv5_2_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv5_2"
      top: "conv5_2"
      name: "relu5_2"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv5_2"
      top: "conv5_3"
      name: "conv5_3"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 512
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv5_3"
      top: "conv5_3"
      name: "conv5_3_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv5_3"
      top: "conv5_3"
      name: "relu5_3"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv5_3"
      top: "pool5"
      top: "pool5_mask"
      name: "pool5"
      type: "Pooling"
      pooling_param {{
        pool: MAX
        kernel_size: 2
        stride: 2
      }}
    }}
    layer {{
      name: "upsample5"
      type: "Upsample"
      bottom: "pool5"
      top: "pool5_D"
      bottom: "pool5_mask"
      upsample_param {{
        scale: 2
        upsample_w: 30
        upsample_h: 23
      }}
    }}
    layer {{
      bottom: "pool5_D"
      top: "conv5_3_D"
      name: "conv5_3_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 512
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv5_3_D"
      top: "conv5_3_D"
      name: "conv5_3_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv5_3_D"
      top: "conv5_3_D"
      name: "relu5_3_D"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv5_3_D"
      top: "conv5_2_D"
      name: "conv5_2_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 512
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv5_2_D"
      top: "conv5_2_D"
      name: "conv5_2_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv5_2_D"
      top: "conv5_2_D"
      name: "relu5_2_D"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv5_2_D"
      top: "conv5_1_D"
      name: "conv5_1_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 512
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv5_1_D"
      top: "conv5_1_D"
      name: "conv5_1_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv5_1_D"
      top: "conv5_1_D"
      name: "relu5_1_D"
      type: "ReLU"
    }}
    layer {{
      name: "upsample4"
      type: "Upsample"
      bottom: "conv5_1_D"
      top: "pool4_D"
      bottom: "pool4_mask"
      upsample_param {{
        scale: 2
        upsample_w: 60
        upsample_h: 45
      }}
    }}
    layer {{
      bottom: "pool4_D"
      top: "conv4_3_D"
      name: "conv4_3_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 512
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv4_3_D"
      top: "conv4_3_D"
      name: "conv4_3_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv4_3_D"
      top: "conv4_3_D"
      name: "relu4_3_D"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv4_3_D"
      top: "conv4_2_D"
      name: "conv4_2_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 512
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv4_2_D"
      top: "conv4_2_D"
      name: "conv4_2_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv4_2_D"
      top: "conv4_2_D"
      name: "relu4_2_D"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv4_2_D"
      top: "conv4_1_D"
      name: "conv4_1_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 256
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv4_1_D"
      top: "conv4_1_D"
      name: "conv4_1_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv4_1_D"
      top: "conv4_1_D"
      name: "relu4_1_D"
      type: "ReLU"
    }}
    layer {{
      name: "upsample3"
      type: "Upsample"
      bottom: "conv4_1_D"
      top: "pool3_D"
      bottom: "pool3_mask"
      upsample_param {{
        scale: 2
      }}
    }}
    layer {{
      bottom: "pool3_D"
      top: "conv3_3_D"
      name: "conv3_3_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 256
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv3_3_D"
      top: "conv3_3_D"
      name: "conv3_3_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv3_3_D"
      top: "conv3_3_D"
      name: "relu3_3_D"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv3_3_D"
      top: "conv3_2_D"
      name: "conv3_2_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 256
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv3_2_D"
      top: "conv3_2_D"
      name: "conv3_2_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv3_2_D"
      top: "conv3_2_D"
      name: "relu3_2_D"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv3_2_D"
      top: "conv3_1_D"
      name: "conv3_1_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 128
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv3_1_D"
      top: "conv3_1_D"
      name: "conv3_1_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv3_1_D"
      top: "conv3_1_D"
      name: "relu3_1_D"
      type: "ReLU"
    }}
    layer {{
      name: "upsample2"
      type: "Upsample"
      bottom: "conv3_1_D"
      top: "pool2_D"
      bottom: "pool2_mask"
      upsample_param {{
        scale: 2
      }}
    }}
    layer {{
      bottom: "pool2_D"
      top: "conv2_2_D"
      name: "conv2_2_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 128
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv2_2_D"
      top: "conv2_2_D"
      name: "conv2_2_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv2_2_D"
      top: "conv2_2_D"
      name: "relu2_2_D"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv2_2_D"
      top: "conv2_1_D"
      name: "conv2_1_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 64
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv2_1_D"
      top: "conv2_1_D"
      name: "conv2_1_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv2_1_D"
      top: "conv2_1_D"
      name: "relu2_1_D"
      type: "ReLU"
    }}
    layer {{
      name: "upsample1"
      type: "Upsample"
      bottom: "conv2_1_D"
      top: "pool1_D"
      bottom: "pool1_mask"
      upsample_param {{
        scale: 2
      }}
    }}
    layer {{
      bottom: "pool1_D"
      top: "conv1_2_D"
      name: "conv1_2_D"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: 64
        pad: 1
        kernel_size: 3
      }}
    }}
    layer {{
      bottom: "conv1_2_D"
      top: "conv1_2_D"
      name: "conv1_2_D_bn"
      type: "BN"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 1
        decay_mult: 0
      }}
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      bottom: "conv1_2_D"
      top: "conv1_2_D"
      name: "relu1_2_D"
      type: "ReLU"
    }}
    layer {{
      bottom: "conv1_2_D"
      top: "conv1_1_D_output{n_classes}"
      name: "conv1_1_D_output{n_classes}"
      type: "Convolution"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
        num_output: {n_classes}
        pad: 1
        kernel_size: 3
      }}
    }}
    ''')
Exemple #16
0
def main():
    import os
    ROOT = join(os.getcwd())
    ROOT = ub.expandpath('~/code/hesaff')
    os.chdir(ROOT)

    VERSION = setup.version
    PY_VER = sys.version_info.major
    NAME = 'pyhesaff'
    tag = '{}-{}-py{}'.format(NAME, VERSION, PY_VER)

    # context_dpath = ub.ensuredir((ROOT, 'docker/context'))
    staging_dpath = ub.ensuredir((ROOT, 'docker/staging'))

    # Prestage the multibuild repo
    if not exists(join(staging_dpath, 'multibuild')):
        # FIXME: make robust in the case this fails
        info = ub.cmd(
            'git clone https://github.com/matthew-brett/multibuild.git',
            cwd=staging_dpath,
            verbose=3)

    if not exists(join(staging_dpath, 'opencv')):
        # FIXME: make robust in the case this fails
        opencv_version = '4.1.0'
        fpath = ub.grabdata(
            'https://github.com/opencv/opencv/archive/{}.zip'.format(
                opencv_version),
            verbose=1)
        ub.cmd('ln -s {} .'.format(fpath), cwd=staging_dpath, verbose=3)
        ub.cmd('unzip {}'.format(fpath), cwd=staging_dpath, verbose=3)
        import shutil
        shutil.move(join(staging_dpath, 'opencv-' + opencv_version),
                    join(staging_dpath, 'opencv'))

    stage_self(ROOT, staging_dpath)

    dockerfile_fpath = join(ROOT, 'Dockerfile')
    # This docker code is very specific for building linux binaries.
    # We will need to do a bit of refactoring to handle OSX and windows.
    # But the goal is to get at least one OS working end-to-end.
    docker_code = ub.codeblock('''
        FROM quay.io/skvark/manylinux1_x86_64

        # SETUP ENV
        ARG MB_PYTHON_VERSION=3.6
        ARG ENABLE_CONTRIB=0
        ARG ENABLE_HEADLESS=1
        ENV PYTHON_VERSION=3.6
        ENV PYTHON_ROOT=/opt/python/cp36-cp36m/
        ENV PYTHONPATH=/opt/python/cp36-cp36m/lib/python3.6/site-packages/
        ENV PATH=/opt/python/cp36-cp36m/bin:$PATH
        ENV PYTHON_EXE=/opt/python/cp36-cp36m/python
        ENV MULTIBUILD_DIR=/root/code/multibuild
        ENV HOME=/root
        # params to bdist_wheel. used to set osx build target.
        ENV TEST_DEPENDS="numpy==1.11.1"
        ENV BDIST_PARAMS=""
        ENV USE_CCACHE=1
        ENV PLAT=x86_64
        ENV UNICODE_WIDTH=32

        # -e BUILD_COMMANDS="$build_cmds" \
        # -e PYTHON_VERSION="$MB_PYTHON_VERSION" \
        # -e UNICODE_WIDTH="$UNICODE_WIDTH" \
        # -e BUILD_COMMIT="$BUILD_COMMIT" \
        # -e CONFIG_PATH="$CONFIG_PATH" \
        # -e ENV_VARS_PATH="$ENV_VARS_PATH" \
        # -e WHEEL_SDIR="$WHEEL_SDIR" \
        # -e MANYLINUX_URL="$MANYLINUX_URL" \
        # -e BUILD_DEPENDS="$BUILD_DEPENDS" \
        # -e USE_CCACHE="$USE_CCACHE" \
        # -e REPO_DIR="$repo_dir" \
        # -e PLAT="$PLAT" \

        # These are defined in the parent image
        # ENV JPEG_INCLUDE_DIR=/opt/libjpeg-turbo/include
        # ENV JPEG_LIBRARY=/opt/libjpeg-turbo/lib64/libjpeg.a

        RUN mkdir -p /io
        WORKDIR /root

        # Setup code / scripts
        COPY docker/staging/multibuild /root/code/multibuild
        # Hack to fix issue
        RUN find $MULTIBUILD_DIR -iname "*.sh" -type f -exec sed -i 's/gh-clone/gh_clone/g' {} +

        # Copy bash configs (mirrors the environs)
        COPY docker/config.sh /root/config.sh
        COPY docker/bashrc.sh /root/.bashrc

        # Setup a virtualenv
        RUN source /root/.bashrc && \
            $PYTHON_EXE -m pip install --upgrade pip && \
            $PYTHON_EXE -m pip install virtualenv && \
            $PYTHON_EXE -m virtualenv --python=$PYTHON_EXE $HOME/venv

        # Install packages in virtual environment
        RUN source /root/.bashrc && \
            pip install cmake ninja scikit-build wheel numpy

        # This is very different for different operating systems
        # https://github.com/skvark/opencv-python/blob/master/setup.py
        COPY docker/staging/opencv /root/code/opencv
        RUN source /root/.bashrc && \
            source code/multibuild/common_utils.sh && \
            source code/multibuild/travis_linux_steps.sh && \
            mkdir -p /root/code/opencv/build && \
            cd /root/code/opencv/build && \
            cmake -G "Unix Makefiles" \
                   -DINSTALL_CREATE_DISTRIB=ON \
                   -DOPENCV_SKIP_PYTHON_LOADER=ON \
                   -DBUILD_opencv_apps=OFF \
                   -DBUILD_SHARED_LIBS=OFF \
                   -DBUILD_TESTS=OFF \
                   -DBUILD_PERF_TESTS=OFF \
                   -DBUILD_DOCS=OFF \
                   -DWITH_QT=OFF \
                   -DWITH_IPP=OFF \
                   -DWITH_V4L=ON \
                   -DBUILD_JPEG=OFF \
                   -DENABLE_PRECOMPILED_HEADERS=OFF \
                   -DJPEG_INCLUDE_DIR=/opt/libjpeg-turbo/include \
                   -DJPEG_LIBRARY=/opt/libjpeg-turbo/lib64/libjpeg.a \
                /root/code/opencv

       # Note: there is no need to compile the above with python
       # -DPYTHON3_EXECUTABLE=$PYTHON_EXE \
       # -DBUILD_opencv_python3=ON \
       # -DOPENCV_PYTHON3_INSTALL_PATH=python \

        RUN source /root/.bashrc && \
            source code/multibuild/common_utils.sh && \
            source code/multibuild/travis_linux_steps.sh && \
            cd /root/code/opencv/build && \
            make -j9 && make install

        COPY docker/staging/hesaff /root/code/hesaff

        # # Use skbuild to build hesaff
        # RUN source /root/.bashrc && \
        #     cd /root/code/hesaff && \
        #     CMAKE_FIND_LIBRARY_SUFFIXES=".a;.so" python setup.py build_ext --inplace
        # export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'

        # Use cmake to build hesaff 9maybe not needed?)
        # RUN source /root/.bashrc && \
        #     mkdir -p /root/code/hesaff/build && \
        #     cd /root/code/hesaff/build && \
        #     CXXFLAGS="-std=c++11 $CXXFLAGS" cmake -G "Unix Makefiles" /root/code/hesaff && \
        #     make

        # Use skbuild to build hesaff
        RUN source /root/.bashrc && \
            cd /root/code/hesaff && \
            python setup.py build && \
            python setup.py bdist_wheel

        # RUN source /root/.bashrc && \
        #     pip install xdoctest
        ''')

    try:
        print(ub.color_text('\n--- DOCKER CODE ---', 'white'))
        print(ub.highlight_code(docker_code, 'docker'))
        print(ub.color_text('--- END DOCKER CODE ---\n', 'white'))
    except Exception:
        pass
    with open(dockerfile_fpath, 'w') as file:
        file.write(docker_code)

    docker_build_cli = ' '.join([
        'docker',
        'build',
        # '--build-arg PY_VER={}'.format(PY_VER),
        '--tag {}'.format(tag),
        '-f {}'.format(dockerfile_fpath),
        '.'
    ])
    print('docker_build_cli = {!r}'.format(docker_build_cli))
    info = ub.cmd(docker_build_cli, verbose=3, shell=True)

    if info['ret'] != 0:
        print(ub.color_text('\n--- FAILURE ---', 'red'))
        print('Failed command:')
        print(info['command'])
        print(info['err'])
        print('NOTE: sometimes reruning the command manually works')
        raise Exception('Building docker failed with exit code {}'.format(
            info['ret']))
    else:
        print(ub.color_text('\n--- SUCCESS ---', 'green'))

    DEPLOY = True

    if DEPLOY:
        VMNT_DIR = '{ROOT}/{NAME}-docker/vmnt'.format(NAME=NAME, ROOT=ROOT)
        print('VMNT_DIR = {!r}'.format(VMNT_DIR))
        ub.ensuredir(VMNT_DIR)

        # TODO: Correctly mangle the ffmpeg libs as done via
        # ls -a ~/.local/conda/envs/py36/lib/python3.6/site-packages/cv2/
        # ls ~/.local/conda/envs/py36/lib/python3.6/site-packages/cv2/.libs

        # cp code/hesaff/build/libhesaff.so /root/vmnt
        # cp /root/ffmpeg_build/lib/libavcodec.so.58 /root/vmnt
        # cp /root/ffmpeg_build/lib/libavformat.so.58 /root/vmnt
        # cp /root/ffmpeg_build/lib/libavutil.so.56 /root/vmnt
        # cp /root/ffmpeg_build/lib/libswscale.so.5 /root/vmnt
        inside_cmds = ' && '.join(
            ub.codeblock('''
            cp code/hesaff/dist/pyhesaff*.whl /root/vmnt
            ''').split('\n'))

        docker_run_cli = ' '.join([
            'docker', 'run', '-v {}:/root/vmnt/'.format(VMNT_DIR), '-it', tag,
            'bash -c "{}"'.format(inside_cmds)
        ])
        print(docker_run_cli)
        info = ub.cmd(docker_run_cli, verbose=3)
        assert info['ret'] == 0

        # import shutil
        # PKG_DIR = join(ROOT, 'pyhesaff')
        # shutil.copy(join(VMNT_DIR, 'libhesaff.so'), join(PKG_DIR, 'libhesaff-manylinux1_x86_64.so'))

        # TODO: do this correctly
        # shutil.copy(join(VMNT_DIR, 'libhesaff.so'), join(PKG_DIR, 'libavcodec.so.58'))
        # shutil.copy(join(VMNT_DIR, 'libavformat.so.58'), join(PKG_DIR, 'libavformat.so.58'))
        # shutil.copy(join(VMNT_DIR, 'libavutil.so.56'), join(PKG_DIR, 'libavutil.so.56'))
        # shutil.copy(join(VMNT_DIR, 'libswscale.so.5'), join(PKG_DIR, 'libswscale.so.5'))

    # print(ub.highlight_code(ub.codeblock(
    print(
        ub.highlight_code(
            ub.codeblock(r'''
        # Finished creating the docker image.
        # To test / export you can do something like this:

        VMNT_DIR={ROOT}/{NAME}-docker/vmnt
        mkdir -p VMNT_DIR
        TAG={tag}

        # Test that we can get a bash terminal
        docker run -v $VMNT_DIR:/root/vmnt -it {tag} bash

        # Move deployment to the vmnt directory
        docker run -v $VMNT_DIR:/root/vmnt -it {tag} bash -c 'cd /root/code/hesaff && python3 -m xdoctest pyhesaff'

        # Run system tests
        docker run -v $VMNT_DIR:/root/vmnt -it {tag} bash -c 'cd /root/code/hesaff && python3 run_doctests.sh'

        # Inside bash test that we can fit a new model
        python -m pyhessaff demo

        mkdir -p ${ROOT}/{NAME}-docker/dist
        docker save -o ${ROOT}/{NAME}-docker/dist/{tag}.docker.tar {tag}
        ''').format(NAME=NAME, ROOT=ROOT, tag=tag), 'bash'))
Exemple #17
0
def run_ibeis():
    r"""
    CommandLine:
        python -m ibeis
        python -m ibeis find_installed_tomcat
        python -m ibeis get_annot_groundtruth:1
    """
    import ibeis  # NOQA
    #ut.set_process_title('IBEIS_main')
    #main_locals = ibeis.main()
    #ibeis.main_loop(main_locals)
    #ut.set_process_title('IBEIS_main')
    cmdline_varags = ut.get_cmdline_varargs()
    if len(cmdline_varags) > 0 and cmdline_varags[0] == 'rsync':
        from ibeis.scripts import rsync_ibeisdb
        rsync_ibeisdb.rsync_ibsdb_main()
        sys.exit(0)

    if ub.argflag('--devcmd'):
        # Hack to let devs mess around when using an installer version
        # TODO: add more hacks
        #import utool.tests.run_tests
        #utool.tests.run_tests.run_tests()
        ut.embed()
    # Run the tests of other modules
    elif ub.argflag('--run-utool-tests'):
        raise Exception('Deprecated functionality')
    elif ub.argflag('--run-vtool_ibeis-tests'):
        raise Exception('Deprecated functionality')
    elif ub.argflag(('--run-ibeis-tests', '--run-tests')):
        raise Exception('Deprecated functionality')

    if ub.argflag('-e'):
        """
        ibeis -e print -a default -t default
        """
        # Run dev script if -e given
        import ibeis.dev  # NOQA
        ibeis.dev.devmain()
        print('... exiting')
        sys.exit(0)

    # Attempt to run a test using the funciton name alone
    # with the --tf flag
    # if False:
    #     import ibeis.tests.run_tests
    #     import ibeis.tests.reset_testdbs
    #     import ibeis.scripts.thesis
    #     ignore_prefix = [
    #         #'ibeis.tests',
    #         'ibeis.control.__SQLITE3__',
    #         '_autogen_explicit_controller']
    #     ignore_suffix = ['_grave']
    #     func_to_module_dict = {
    #         'demo_bayesnet': 'ibeis.unstable.demobayes',
    #     }
    #     ut.main_function_tester('ibeis', ignore_prefix, ignore_suffix,
    #                             func_to_module_dict=func_to_module_dict)

    #if ub.argflag('-e'):
    #    import ibeis
    #    expt_kw = ut.get_arg_dict(ut.get_func_kwargs(ibeis.run_experiment),
    #    prefix_list=['--', '-'])
    #    ibeis.run_experiment(**expt_kw)
    #    sys.exit(0)

    doctest_modname = ut.get_argval(
        ('--doctest-module', '--tmod', '-tm', '--testmod'),
        type_=str, default=None, help_='specify a module to doctest')
    if doctest_modname is not None:
        """
        Allow any doctest to be run the main ibeis script

        python -m ibeis --tmod utool.util_str --test-align:0
        python -m ibeis --tmod ibeis.algo.hots.pipeline --test-request_ibeis_query_L0:0 --show
        python -m ibeis --tf request_ibeis_query_L0:0 --show
        ./dist/ibeis/IBEISApp --tmod ibeis.algo.hots.pipeline --test-request_ibeis_query_L0:0 --show  # NOQA
        ./dist/ibeis/IBEISApp --tmod utool.util_str --test-align:0
        ./dist/IBEIS.app/Contents/MacOS/IBEISApp --tmod utool.util_str --test-align:0
        ./dist/IBEIS.app/Contents/MacOS/IBEISApp --run-utool-tests
        ./dist/IBEIS.app/Contents/MacOS/IBEISApp --run-vtool_ibeis-tests
        """
        print('[ibeis] Testing module')
        mod_alias_list = {
            'exptdraw': 'ibeis.expt.experiment_drawing'
        }
        doctest_modname = mod_alias_list.get(doctest_modname, doctest_modname)
        module = ut.import_modname(doctest_modname)
        (nPass, nTotal, failed_list, error_report_list) = ut.doctest_funcs(module=module)
        retcode = 1 - (len(failed_list) == 0)
        #print(module)
        sys.exit(retcode)

    import ibeis
    main_locals = ibeis.main()
    execstr = ibeis.main_loop(main_locals)
    # <DEBUG CODE>
    if 'back' in main_locals and CMD:
        back = main_locals['back']
        front = getattr(back, 'front', None)  # NOQA
        #front = back.front
        #ui = front.ui
    ibs = main_locals['ibs']  # NOQA
    print('-- EXECSTR --')
    print(ub.codeblock(execstr))
    print('-- /EXECSTR --')
    exec(execstr)
Exemple #18
0
def write_default_ipython_profile():
    """
    CommandLine:
        python ~/local/init/init_ipython_config.py

        python -c "import xdev, ubelt; xdev.startfile(ubelt.truepath('~/.ipython/profile_default'))"
        python -c "import xdev, ubelt; xdev.editfile(ubelt.truepath('~/.ipython/profile_default/ipython_config.py'))"

    References:
        http://2sn.org/python/ipython_config.py
    """
    dpath = ub.expandpath('~/.ipython/profile_default')
    ub.ensuredir(dpath)
    ipy_config_fpath = join(dpath, 'ipython_config.py')
    ipy_config_text = ub.codeblock(
        r'''
        # STARTBLOCK
        import six
        c = get_config()  # NOQA
        c.InteractiveShellApp.exec_lines = []
        if six.PY2:
            future_line = (
                'from __future__ import absolute_import, division, print_function, with_statement, unicode_literals')
            c.InteractiveShellApp.exec_lines.append(future_line)
            # Fix sip versions
            try:
                import sip
                # http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
                sip.setapi('QVariant', 2)
                sip.setapi('QString', 2)
                sip.setapi('QTextStream', 2)
                sip.setapi('QTime', 2)
                sip.setapi('QUrl', 2)
                sip.setapi('QDate', 2)
                sip.setapi('QDateTime', 2)
                if hasattr(sip, 'setdestroyonexit'):
                    sip.setdestroyonexit(False)  # This prevents a crash on windows
            except ImportError as ex:
                pass
            except ValueError as ex:
                print('Warning: Value Error: %s' % str(ex))
                pass
        c.InteractiveShellApp.exec_lines.append('%load_ext autoreload')
        c.InteractiveShellApp.exec_lines.append('%autoreload 2')
        #c.InteractiveShellApp.exec_lines.append('%pylab qt4')
        c.InteractiveShellApp.exec_lines.append('import numpy as np')
        c.InteractiveShellApp.exec_lines.append('import ubelt as ub')
        c.InteractiveShellApp.exec_lines.append('import xdev')
        c.InteractiveShellApp.exec_lines.append('import pandas as pd')
        c.InteractiveShellApp.exec_lines.append('pd.options.display.max_columns = 40')
        c.InteractiveShellApp.exec_lines.append('pd.options.display.width = 160')
        c.InteractiveShellApp.exec_lines.append('pd.options.display.max_rows = 20')
        c.InteractiveShellApp.exec_lines.append('pd.options.display.float_format = lambda x: \'%.4f\' % (x,)')
        c.InteractiveShellApp.exec_lines.append('import networkx as nx')
        c.InteractiveShellApp.exec_lines.append('from os.path import *')
        c.InteractiveShellApp.exec_lines.append('from six.moves import cPickle as pickle')
        #c.InteractiveShellApp.exec_lines.append('if \'verbose\' not in vars():\\n    verbose = True')
        import ubelt as ub
        c.InteractiveShellApp.exec_lines.append(ub.codeblock(
            """
            class classproperty(property):
                def __get__(self, cls, owner):
                    return classmethod(self.fget).__get__(None, owner)()
            class vim(object):
                @classproperty
                def focus(cls):
                    from vimtk.cplat_ctrl import Window
                    Window.find('GVIM').focus()
                @classproperty
                def copy(cls):
                    import time
                    from vimtk.cplat_ctrl import Window
                    gvim_window = Window.find('GVIM')
                    gvim_window.focus()
                    import vimtk
                    import IPython
                    ipy = IPython.get_ipython()
                    lastline = ipy.history_manager.input_hist_parsed[-2]
                    vimtk.cplat.copy_text_to_clipboard(lastline)
                    from vimtk import xctrl
                    xctrl.XCtrl.do(
                        ('focus', 'GVIM'),
                        ('key', 'ctrl+v'),
                        ('focus', 'x-terminal-emulator.X-terminal-emulator')
                    )
            """
        ))
        #c.InteractiveShell.autoindent = True
        #c.InteractiveShell.colors = 'LightBG'
        #c.InteractiveShell.confirm_exit = False
        #c.InteractiveShell.deep_reload = True
        c.InteractiveShell.editor = 'gvim'
        #c.InteractiveShell.xmode = 'Context'
        # ENDBOCK
        '''
    )
    with open(ipy_config_fpath, 'w') as file:
        file.write(ipy_config_text + '\n')
Exemple #19
0
def compare_results():
    print('Comparing results')
    import pandas as pd
    from tabulate import tabulate

    # Read in output of demo script
    measure_fpath = 'measurements_haul83.csv'
    py_df = pd.DataFrame.from_csv(measure_fpath, index_col=None)
    # Convert python length output from mm into cm for consistency
    py_df['fishlen'] = py_df['fishlen'] / 10
    py_df['current_frame'] = py_df['current_frame'].astype(np.int)

    # janky CSV parsing
    py_df['box_pts1'] = py_df['box_pts1'].map(lambda p: eval(p.replace(';', ','), np.__dict__))
    py_df['box_pts2'] = py_df['box_pts2'].map(lambda p: eval(p.replace(';', ','), np.__dict__))

    py_df['obox1'] = [ctalgo.OrientedBBox(*cv2.minAreaRect(pts[:, None, :].astype(np.int)))
                      for pts in py_df['box_pts1']]
    py_df['obox2'] = [ctalgo.OrientedBBox(*cv2.minAreaRect(pts[:, None, :].astype(np.int)))
                      for pts in py_df['box_pts2']]
    py_df.drop(['box_pts1', 'box_pts2'], axis=1, inplace=True)

    # Remap to matlab names
    py_df = py_df.rename(columns={
        'error': 'Err',
        'fishlen': 'fishLength',
        'range': 'fishRange',
    })

    # Load matlab results
    mat_df = _read_kresimir_results()

    FORCE_COMPARABLE_RANGE = True
    # FORCE_COMPARABLE_RANGE = False
    if FORCE_COMPARABLE_RANGE:
        # Be absolutely certain we are in comparable regions (may slightly bias
        # results, against python and in favor of matlab)
        min_frame = max(mat_df.current_frame.min(), py_df.current_frame.min())
        max_frame = min(mat_df.current_frame.max(), py_df.current_frame.max())
        print('min_frame = {!r}'.format(min_frame))
        print('max_frame = {!r}'.format(max_frame))

        mat_df = mat_df[(mat_df.current_frame >= min_frame) &
                        (mat_df.current_frame <= max_frame)]
        py_df = py_df[(py_df.current_frame >= min_frame) &
                      (py_df.current_frame <= max_frame)]

    intersect_frames = np.intersect1d(mat_df.current_frame, py_df.current_frame)
    print('intersecting frames = {} / {} (matlab)'.format(
        len(intersect_frames), len(set(mat_df.current_frame))))
    print('intersecting frames = {} / {} (python)'.format(
        len(intersect_frames), len(set(py_df.current_frame))))

    #  Reuse the hungarian algorithm implementation from ctalgo
    min_assign = ctalgo.FishStereoMeasurments.minimum_weight_assignment

    correspond = []
    for f in intersect_frames:
        pidxs = np.where(py_df.current_frame == f)[0]
        midxs = np.where(mat_df.current_frame == f)[0]

        pdf = py_df.iloc[pidxs]
        mdf = mat_df.iloc[midxs]

        ppts1 = np.array([o.center for o in pdf['obox1']])
        mpts1 = np.array([o.center for o in mdf['obox1']])

        ppts2 = np.array([o.center for o in pdf['obox2']])
        mpts2 = np.array([o.center for o in mdf['obox2']])

        dists1 = sklearn.metrics.pairwise.pairwise_distances(ppts1, mpts1)
        dists2 = sklearn.metrics.pairwise.pairwise_distances(ppts2, mpts2)

        # arbitrarilly chosen threshold
        thresh = 100
        for i, j in min_assign(dists1):
            d1 = dists1[i, j]
            d2 = dists2[i, j]
            if d1 < thresh and d2 < thresh and abs(d1 - d2) < thresh / 4:
                correspond.append((pidxs[i], midxs[j]))
    correspond = np.array(correspond)

    # pflags = np.array(ub.boolmask(correspond.T[0], len(py_df)))
    mflags = np.array(ub.boolmask(correspond.T[1], len(mat_df)))
    # print('there are {} detections that seem to be in common'.format(len(correspond)))
    # print('The QC flags of the common detections are:       {}'.format(
    #     ub.dict_hist(mat_df[mflags]['QC'].values)))
    # print('The QC flags of the other matlab detections are: {}'.format(
    #     ub.dict_hist(mat_df[~mflags]['QC'].values)))

    print('\n\n----\n## All stats\n')
    print(ub.codeblock(
        '''
        Overall, the matlab script made {nmat} length measurements and the
        python script made {npy} length measurements.  Here is a table
        summarizing the average lengths / ranges / errors of each script:
        ''').format(npy=len(py_df), nmat=len(mat_df)))
    stats = pd.DataFrame(columns=['python', 'matlab'])
    for key in ['fishLength', 'fishRange', 'Err']:
        stats.loc[key, 'python'] = '{:6.2f} ± {:6.2f}'.format(py_df[key].mean(), py_df[key].std())
        stats.loc[key, 'matlab'] = '{:6.2f} ± {:6.2f}'.format(mat_df[key].mean(), mat_df[key].std())

    stats.loc['nTotal', 'python'] = '{}'.format(len(py_df))
    stats.loc['nTotal', 'matlab'] = '{}'.format(len(mat_df))
    print(tabulate(stats, headers='keys', tablefmt='psql', stralign='right'))

    print('\n\n----\n## Only COMMON detections\n')
    py_df_c = py_df.iloc[correspond.T[0]]
    mat_df_c = mat_df.iloc[correspond.T[1]]
    stats = pd.DataFrame(columns=['python', 'matlab'])
    for key in ['fishLength', 'fishRange', 'Err']:
        stats.loc[key, 'python'] = '{:6.2f} ± {:6.2f}'.format(py_df_c[key].mean(), py_df_c[key].std())
        stats.loc[key, 'matlab'] = '{:6.2f} ± {:6.2f}'.format(mat_df_c[key].mean(), mat_df_c[key].std())

    stats.loc['nTotal', 'python'] = '{}'.format(len(py_df_c))
    stats.loc['nTotal', 'matlab'] = '{}'.format(len(mat_df_c))

    print(ub.codeblock(
        '''
        Now, we investigate how many dections matlab and python made in common.
        (Note, choosing which dections in one version correspond to which in
         another is done using a heuristic based on distances between bbox
         centers and a thresholded minimum assignment problem).

        Python made {npy_c}/{nmat} = {percent:.2f}% of the detections matlab made

        ''').format(npy_c=len(py_df_c), nmat=len(mat_df),
                    percent=100 * len(py_df_c) / len(mat_df)))
    print(tabulate(stats, headers='keys', tablefmt='psql', stralign='right'))

    print('\n\n----\n## Evaulation using the QC code\n')
    hist_hit = ub.dict_hist(mat_df[mflags]['QC'].values)
    hist_miss = ub.dict_hist(mat_df[~mflags]['QC'].values)
    print(ub.codeblock(
        '''
        However, not all of those matlab detections were good. Because we have
        detections in corrsepondences with each other we can assign the python
        detections QC codes.

        Here is a histogram of the QC codes for these python detections:
        {}
        (Note: read histogram as <QC-code>: <frequency>)

        Here is a histogram of the other matlab detections that python did not
        find:
        {}

        To summarize:
            python correctly rejected {:.2f}% of the matlab QC=0 detections
            python correctly accepted {:.2f}% of the matlab QC=1 detections
            python correctly accepted {:.2f}% of the matlab QC=2 detections

            Note, that because python made detections that matlab did not make,
            the remaining {} detections may be right or wrong, but there is
            no way to tell from this analysis.

        Lastly, here are the statistics for the common detections that had a
        non-zero QC code.
        ''').format(
            ub.repr2(hist_hit, nl=1),
            ub.repr2(hist_miss, nl=1),
            100 * hist_miss[0] / (hist_hit[0] + hist_miss[0]),
            100 * hist_hit[1] / (hist_hit[1] + hist_miss[1]),
            100 * hist_hit[2] / (hist_hit[2] + hist_miss[2]),
            len(py_df) - len(py_df_c)
                   )
    )

    is_qc = (mat_df_c['QC'] > 0).values
    mat_df_c = mat_df_c[is_qc]
    py_df_c = py_df_c[is_qc]
    stats = pd.DataFrame(columns=['python', 'matlab'])
    for key in ['fishLength', 'fishRange', 'Err']:
        stats.loc[key, 'python'] = '{:6.2f} ± {:6.2f}'.format(py_df_c[key].mean(), py_df_c[key].std())
        stats.loc[key, 'matlab'] = '{:6.2f} ± {:6.2f}'.format(mat_df_c[key].mean(), mat_df_c[key].std())

    stats.loc['nTotal', 'python'] = '{}'.format(len(py_df_c))
    stats.loc['nTotal', 'matlab'] = '{}'.format(len(mat_df_c))
    print(tabulate(stats, headers='keys', tablefmt='psql', stralign='right'))
Exemple #20
0
CORE_LAYERS = ub.codeblock("""
    layer {{
      name: "norm"
      type: "LRN"
      bottom: "data"
      top: "norm"
      lrn_param {{
        local_size: 5
        alpha: 0.0001
        beta: 0.75
      }}
    }}
    layer {{
      name: "conv1"
      type: "Convolution"
      bottom: "norm"
      top: "conv1"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        num_output: 64
        kernel_size: 7
        pad: 3
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
      }}
    }}
    layer {{
      bottom: "conv1"
      top: "conv1"
      name: "conv1_bn"
      type: "BN"
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      name: "relu1"
      type: "ReLU"
      bottom: "conv1"
      top: "conv1"
    }}
    layer {{
      name: "pool1"
      type: "Pooling"
      bottom: "conv1"
      top: "pool1"
      top: "pool1_mask"
      pooling_param {{
        pool: MAX
        kernel_size: 2
        stride: 2
      }}
    }}
    layer {{
      name: "conv2"
      type: "Convolution"
      bottom: "pool1"
      top: "conv2"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        num_output: 64
        kernel_size: 7
        pad: 3
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
      }}
    }}
    layer {{
      bottom: "conv2"
      top: "conv2"
      name: "conv2_bn"
      type: "BN"
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      name: "relu2"
      type: "ReLU"
      bottom: "conv2"
      top: "conv2"
    }}
    layer {{
      name: "pool2"
      type: "Pooling"
      bottom: "conv2"
      top: "pool2"
      top: "pool2_mask"
      pooling_param {{
        pool: MAX
        kernel_size: 2
        stride: 2
      }}
    }}
    layer {{
      name: "conv3"
      type: "Convolution"
      bottom: "pool2"
      top: "conv3"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        num_output: 64
        kernel_size: 7
        pad: 3
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
      }}
    }}
    layer {{
      bottom: "conv3"
      top: "conv3"
      name: "conv3_bn"
      type: "BN"
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      name: "relu3"
      type: "ReLU"
      bottom: "conv3"
      top: "conv3"
    }}
    layer {{
      name: "pool3"
      type: "Pooling"
      bottom: "conv3"
      top: "pool3"
      top: "pool3_mask"
      pooling_param {{
        pool: MAX
        kernel_size: 2
        stride: 2
      }}
    }}
    layer {{
      name: "conv4"
      type: "Convolution"
      bottom: "pool3"
      top: "conv4"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        num_output: 64
        kernel_size: 7
        pad: 3
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
      }}
    }}
    layer {{
      bottom: "conv4"
      top: "conv4"
      name: "conv4_bn"
      type: "BN"
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      name: "relu4"
      type: "ReLU"
      bottom: "conv4"
      top: "conv4"
    }}
    layer {{
      name: "pool4"
      type: "Pooling"
      bottom: "conv4"
      top: "pool4"
      top: "pool4_mask"
      pooling_param {{
        pool: MAX
        kernel_size: 2
        stride: 2
      }}
    }}
    layer {{
      name: "upsample4"
      type: "Upsample"
      bottom: "pool4"
      bottom: "pool4_mask"
      top: "upsample4"
      upsample_param {{
        scale: 2
        pad_out_h: true
      }}
    }}
    layer {{
      name: "conv_decode4"
      type: "Convolution"
      bottom: "upsample4"
      top: "conv_decode4"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        num_output: 64
        kernel_size: 7
        pad: 3
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
      }}
    }}
    layer {{
      bottom: "conv_decode4"
      top: "conv_decode4"
      name: "conv_decode4_bn"
      type: "BN"
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      name: "upsample3"
      type: "Upsample"
      bottom: "conv_decode4"
      bottom: "pool3_mask"
      top: "upsample3"
      upsample_param {{
        scale: 2
      }}
    }}
    layer {{
      name: "conv_decode3"
      type: "Convolution"
      bottom: "upsample3"
      top: "conv_decode3"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        num_output: 64
        kernel_size: 7
        pad: 3
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
      }}
    }}
    layer {{
      bottom: "conv_decode3"
      top: "conv_decode3"
      name: "conv_decode3_bn"
      type: "BN"
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      name: "upsample2"
      type: "Upsample"
      bottom: "conv_decode3"
      bottom: "pool2_mask"
      top: "upsample2"
      upsample_param {{
        scale: 2
      }}
    }}
    layer {{
      name: "conv_decode2"
      type: "Convolution"
      bottom: "upsample2"
      top: "conv_decode2"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        num_output: 64
        kernel_size: 7
        pad: 3
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
      }}
    }}
    layer {{
      bottom: "conv_decode2"
      top: "conv_decode2"
      name: "conv_decode2_bn"
      type: "BN"
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      name: "upsample1"
      type: "Upsample"
      bottom: "conv_decode2"
      bottom: "pool1_mask"
      top: "upsample1"
      upsample_param {{
        scale: 2
      }}
    }}
    layer {{
      name: "conv_decode1"
      type: "Convolution"
      bottom: "upsample1"
      top: "conv_decode1"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        num_output: 64
        kernel_size: 7
        pad: 3
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
      }}
    }}
    layer {{
      bottom: "conv_decode1"
      top: "conv_decode1"
      name: "conv_decode1_bn"
      type: "BN"
      bn_param {{
        bn_mode: INFERENCE
        scale_filler {{
          type: "constant"
          value: 1
        }}
        shift_filler {{
          type: "constant"
          value: 0.001
        }}
     }}
    }}
    layer {{
      name: "conv_classifier_output{n_classes}"
      type: "Convolution"
      bottom: "conv_decode1"
      top: "conv_classifier_output{n_classes}"
      param {{
        lr_mult: 1
        decay_mult: 1
      }}
      param {{
        lr_mult: 2
        decay_mult: 0
      }}
      convolution_param {{
        num_output: {n_classes}
        kernel_size: 1
        weight_filler {{
          type: "msra"
        }}
        bias_filler {{
          type: "constant"
        }}
      }}
    }}
    """)
Exemple #21
0
                                core_layer_auto,
                                num_context_lines=10)))
    return core_layer_auto


FIT_FOOTER = ub.codeblock('''
    layer {{
      name: "loss"
      type: "SoftmaxWithLoss"
      bottom: "conv1_1_D_output{n_classes}"
      bottom: "label"
      top: "loss"
      softmax_param {{engine: CAFFE}}
      loss_param: {{
        weight_by_label_freqs: true
        {class_weights_text}
      }}
    }}
    layer {{
      name: "accuracy"
      type: "Accuracy"
      bottom: "conv1_1_D_output{n_classes}"
      bottom: "label"
      top: "accuracy"
      top: "per_class_accuracy"
    }}
    ''')

PREDICT_FOOTER = ub.codeblock('''
    layer {{
      name: "prob"
      type: "Softmax"
Exemple #22
0
def make_prototext(image_list_fpath, arch, mode='fit', batch_size=1,
                   n_classes=None, class_weights=None, ignore_label=None,
                   shuffle=None, params=None):

    assert mode in {'fit', 'predict'}
    mod = model_modules[arch]
    if shuffle is None:
        shuffle = (mode == 'fit')

    if n_classes is None:
        n_classes = len(class_weights)
    elif ignore_label is not None:
        # this is really weird
        # with 12 classes we need to make the number of outputs be 11 because
        # we are ignoring the last label. However, when class_weights are
        # passed in we only send it the used weights, so that's already the
        # right number. Not sure what happend when ignore_label=0 and not 11
        n_classes -= 1

    fmtdict = {
        'shuffle': str(shuffle).lower(),
        'batch_size': batch_size,
        'image_list_fpath': image_list_fpath,
        'n_classes': n_classes,
        'arch_name': arch,
    }

    if image_list_fpath is None:
        # Input layer when we use blobs
        # maybe use this def instead?
        # layer {
        #   name: "input"
        #   type: "Input"
        #   top: "data"
        #   input_param {
        #     shape {
        #       dim: 1
        #       dim: 3
        #       dim: 360
        #       dim: 480
        #     }
        #   }
        # }
        input_layer_fmt = ub.codeblock(
            '''
            input: "data"
            input_dim: {batch_size}
            input_dim: 3
            input_dim: 360
            input_dim: 480
            ''')
    else:
        # Layer when input is specified in a txt
        input_layer_fmt = ub.codeblock(
            '''
            name: "{arch_name}"
            layer {{
              name: "data"
              type: "DenseImageData"
              top: "data"
              top: "label"
              dense_image_data_param {{
                source: "{image_list_fpath}"
                batch_size: {batch_size}
                shuffle: {shuffle}
              }}
            }}
            '''
        )

    input_layer = input_layer_fmt.format(**fmtdict)

    if hasattr(mod, 'make_core_layers'):
        if params is not None:
            freeze_before = params['freeze_before']
            finetune_decay = params['finetune_decay']
        else:
            freeze_before = 0
            finetune_decay = 1
        core = mod.make_core_layers(n_classes, freeze_before, finetune_decay)
    else:
        core = mod.CORE_LAYERS.format(**fmtdict)

    if mode == 'fit':
        # remove batch-norm inference when fitting
        core = re.sub('^\s*bn_mode:\s*INFERENCE$', '', core, flags=re.M)
        class_weights_line = ['class_weighting: {}'.format(w) for w in class_weights]
        class_weights_line += ['ignore_label: {}'.format(ignore_label)]
        class_weights_text = ub.indent('\n'.join(class_weights_line), ' ' * 4).lstrip()
        fmtdict['class_weights_text'] = class_weights_text
        footer_fmt = mod.FIT_FOOTER
    else:
        footer_fmt = mod.PREDICT_FOOTER

    footer = footer_fmt.format(**fmtdict)

    text = '\n'.join([input_layer, core, footer])
    return text
Exemple #23
0
def main():
    from os.path import join
    import six
    import pytest
    import ubelt as ub
    import os
    from xdoctest import static_analysis as static
    dpath = ub.ensure_app_cache_dir('import_abi_test')
    text = ub.codeblock('''
        a = 42
        ''')

    os.chdir(dpath)

    modpath = join(dpath, 'foo.py')
    ub.writeto(modpath, text)
    import foo
    assert foo.a == 42

    tag = static._extension_module_tags()[0]

    # Q: Can augment .py files with an ABI tag
    # A: No
    modpath = join(dpath, 'bar.' + tag + '.py')
    ub.writeto(modpath, text)
    with pytest.raises(ImportError, match=".*No module named .?bar.*"):
        import bar  # NOQA

    # Q: Can we augment .so files with an ABI tag
    # A: Yes (note its not a valid so so it fails)
    modpath = join(dpath, 'baz.' + tag + '.so')
    ub.writeto(modpath, text)
    with pytest.raises(ImportError, match=".*file too short"):
        import baz  # NOQA

    # Q: Can we augment .so files with an **arbitrary** tag?
    # A: No
    modpath = join(dpath, 'buz.' + 'junktag' + '.so')
    ub.writeto(modpath, text)
    with pytest.raises(ImportError, match="No module named .*"):
        import buz  # NOQA

    # Q: Can we augment .so files with an "abi3" tag?
    # A: Yes, but not in python2
    modpath = join(dpath, 'biz.' + 'abi3' + '.so')
    ub.writeto(modpath, text)
    if six.PY3:
        with pytest.raises(ImportError, match=".*file too short"):
            import biz  # NOQA
    else:
        with pytest.raises(ImportError, match="No module named .*"):
            import biz  # NOQA

    # Q: Can we augment .so files with a semi-nice tag?
    # A: No
    modpath = join(dpath, 'spam.' + 'linux' + '.so')
    ub.writeto(modpath, text)
    with pytest.raises(ImportError, match="No module named .*"):
        import spam  # NOQA

    modpath = join(dpath, 'eggs.' + 'cp3-abi3-linux_x86_64' + '.so')
    ub.writeto(modpath, text)
    with pytest.raises(ImportError, match="No module named .*"):
        import eggs  # NOQA

    modpath = join(dpath, 'ham.' + 'py3-none-any' + '.so')
    ub.writeto(modpath, text)
    with pytest.raises(ImportError, match="No module named .*"):
        import ham  # NOQA
Exemple #24
0
def test_list_of_numpy():
    try:
        import numpy as np
    except ImportError:
        import pytest
        pytest.skip('numpy is optional')

    import ubelt as ub

    data = [
        np.zeros((3, 3), dtype=np.int32),
        np.zeros((3, 10), dtype=np.int32),
        np.zeros((3, 20), dtype=np.int32),
        np.zeros((3, 30), dtype=np.int32),
    ]
    text = ub.repr2(data, nl=2)
    print(text)
    assert repr(data) == repr(eval(text)), 'should produce eval-able code'
    assert text == ub.codeblock('''
        [
            np.array([[0, 0, 0],
                      [0, 0, 0],
                      [0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                       0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                       0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                       0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
        ]
        ''')
    text = ub.repr2(data, max_line_width=10000, nl=2)
    print(text)
    assert text == ub.codeblock('''
        [
            np.array([[0, 0, 0],
                      [0, 0, 0],
                      [0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
        ]
        ''')
    text = ub.repr2(data, nl=1)
    print(text)
    assert text == ub.codeblock('''
        [
            np.array([[0, 0, 0],[0, 0, 0],[0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
        ]
        ''')
    text = ub.repr2(data, nl=0)
    print(text)
    assert text == ub.codeblock('''
        [np.array([[0, 0, 0],[0, 0, 0],[0, 0, 0]], dtype=np.int32), np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32)]
        ''')
Exemple #25
0
def main():

    def argval(clikey, envkey=None, default=ub.NoParam):
        if envkey is not None:
            envval = os.environ.get(envkey)
            if envval:
                default = envval
        return ub.argval(clikey, default=default)

    DEFAULT_PY_VER = '{}.{}'.format(sys.version_info.major, sys.version_info.minor)
    PY_VER = argval('--pyver', 'MB_PYTHON_VERSION', default=DEFAULT_PY_VER)

    dpath = argval('--dpath', None, default=os.getcwd())
    PLAT = argval('--plat', 'PLAT', default='x86_64')

    UNICODE_WIDTH = argval('--unicode_width', 'UNICODE_WIDTH', '32')

    import multiprocessing
    MAKE_CPUS = argval('--make_cpus', 'MAKE_CPUS', multiprocessing.cpu_count() + 1)

    OPENCV_VERSION = '4.1.0'

    os.chdir(dpath)

    BASE = 'manylinux1_{}'.format(PLAT)
    BASE_REPO = 'quay.io/skvark'

    PY_TAG = 'cp{ver}-cp{ver}m'.format(ver=PY_VER.replace('.', ''))

    # do we need the unicode width in this tag?
    DOCKER_TAG = '{}-opencv{}-py{}'.format(BASE, OPENCV_VERSION, PY_VER)

    if not exists(join(dpath, 'opencv-' + OPENCV_VERSION)):
        # FIXME: make robust in the case this fails
        fpath = ub.grabdata('https://github.com/opencv/opencv/archive/{}.zip'.format(OPENCV_VERSION), dpath=dpath, verbose=1)
        ub.cmd('ln -s {} .'.format(fpath), cwd=dpath, verbose=3)
        ub.cmd('unzip {}'.format(fpath), cwd=dpath, verbose=3)

    dockerfile_fpath = join(dpath, 'Dockerfile_' + DOCKER_TAG)
    # This docker code is very specific for building linux binaries.
    # We will need to do a bit of refactoring to handle OSX and windows.
    # But the goal is to get at least one OS working end-to-end.
    docker_code = ub.codeblock(
        '''
        FROM {BASE_REPO}/{BASE}

        # SETUP ENV
        ARG MB_PYTHON_VERSION={PY_VER}
        ENV PYTHON_VERSION={PY_VER}
        ENV PYTHON_ROOT=/opt/python/{PY_TAG}/
        ENV PYTHONPATH=/opt/python/{PY_TAG}/lib/python{PY_VER}/site-packages/
        ENV PATH=/opt/python/{PY_TAG}/bin:$PATH
        ENV PYTHON_EXE=/opt/python/{PY_TAG}/bin/python
        ENV HOME=/root
        ENV PLAT={PLAT}
        ENV UNICODE_WIDTH={UNICODE_WIDTH}

        # Update python environment
        RUN echo "$PYTHON_EXE"
        RUN $PYTHON_EXE -m pip install --upgrade pip && \
            $PYTHON_EXE -m pip install cmake ninja scikit-build wheel numpy

        # This is very different for different operating systems
        # https://github.com/skvark/opencv-python/blob/master/setup.py
        COPY opencv-{OPENCV_VERSION} /root/code/opencv
        RUN mkdir -p /root/code/opencv/build && \
            cd /root/code/opencv/build && \
            cmake -G "Unix Makefiles" \
                   -DINSTALL_CREATE_DISTRIB=ON \
                   -DOPENCV_SKIP_PYTHON_LOADER=ON \
                   -DBUILD_opencv_apps=OFF \
                   -DBUILD_SHARED_LIBS=OFF \
                   -DBUILD_TESTS=OFF \
                   -DBUILD_PERF_TESTS=OFF \
                   -DBUILD_DOCS=OFF \
                   -DWITH_QT=OFF \
                   -DWITH_IPP=OFF \
                   -DWITH_V4L=ON \
                   -DBUILD_JPEG=OFF \
                   -DENABLE_PRECOMPILED_HEADERS=OFF \
                /root/code/opencv

        # Note: there is no need to compile the above with python
        # -DPYTHON3_EXECUTABLE=$PYTHON_EXE \
        # -DBUILD_opencv_python3=ON \
        # -DOPENCV_PYTHON3_INSTALL_PATH=python \

        RUN cd /root/code/opencv/build && make -j{MAKE_CPUS} && make install
        '''.format(**locals()))

    try:
        print(ub.color_text('\n--- DOCKER CODE ---', 'white'))
        print(ub.highlight_code(docker_code, 'docker'))
        print(ub.color_text('--- END DOCKER CODE ---\n', 'white'))
    except Exception:
        pass
    with open(dockerfile_fpath, 'w') as file:
        file.write(docker_code)

    docker_build_cli = ' '.join([
        'docker', 'build',
        # '--build-arg PY_VER={}'.format(PY_VER),
        '--tag {}'.format(DOCKER_TAG),
        '-f {}'.format(dockerfile_fpath),
        '.'
    ])
    print('docker_build_cli = {!r}'.format(docker_build_cli))
    info = ub.cmd(docker_build_cli, verbose=3, shell=True)

    if info['ret'] != 0:
        print(ub.color_text('\n--- FAILURE ---', 'red'))
        print('Failed command:')
        print(info['command'])
        print(info['err'])
        print('NOTE: sometimes reruning the command manually works')
        raise Exception('Building docker failed with exit code {}'.format(info['ret']))
    else:
        # write out what the tag is
        with open(join(dpath, 'opencv-docker-tag.txt'), 'w') as file:
            file.write(DOCKER_TAG)
        print(ub.color_text('\n--- SUCCESS ---', 'green'))
Exemple #26
0
def test_list_of_numpy():
    import numpy as np
    import ubelt as ub

    data = [
        np.zeros((3, 3), dtype=np.int32),
        np.zeros((3, 10), dtype=np.int32),
        np.zeros((3, 20), dtype=np.int32),
        np.zeros((3, 30), dtype=np.int32),
    ]
    string = ub.repr2(data, nl=2)
    print(string)
    assert repr(data) == repr(eval(string)), 'should produce eval-able code'
    assert string == ub.codeblock('''
        [
            np.array([[0, 0, 0],
                      [0, 0, 0],
                      [0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                       0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                       0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                       0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
        ]
        ''')
    string = ub.repr2(data, max_line_width=10000, nl=2)
    print(string)
    assert string == ub.codeblock('''
        [
            np.array([[0, 0, 0],
                      [0, 0, 0],
                      [0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
        ]
        ''')
    string = ub.repr2(data, nl=1)
    print(string)
    assert string == ub.codeblock('''
        [
            np.array([[0, 0, 0],[0, 0, 0],[0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
            np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32),
        ]
        ''')
    string = ub.repr2(data, nl=0)
    print(string)
    assert string == ub.codeblock('''
        [np.array([[0, 0, 0],[0, 0, 0],[0, 0, 0]], dtype=np.int32), np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32)]
        ''')
Exemple #27
0
def export_model_code(dpath, model, initkw=None, export_modules=[]):
    """
    Exports the class used to define a pytorch model as a new python module.

    Exports the minimum amount of code needed to make a self-contained Python
    module defining the pytorch model class. This exports the actual source
    code. The advantage of using this over pickle is that the original code can
    change arbitrarilly because all dependencies on the original code are
    removed in the exported code.

    Args:
        dpath (str): directory to dump the model
        model (tuple or type or object): class or class instance (e.g. torch.nn.Module)
        name (str): name to use for the file (defaults to the classname)
        initkw (dict): if specified, creates the function `make`, which
            initializes the network with the specific arguments.
        export_modules (List[str]): A list of modules that the exported code
            should not depend on. Any code referenced from these modules will
            be statically extracted and copied into the model definition.
            Note that this feature is experimental.

    Returns:
        str: static_modpath: path to the saved model file.
            While you could put the output path in your PYTHONPATH, it is best
            to use `ub.import_module_from_path` to "load" the model instead.

    Example:
        >>> from torch_liberator.exporter import export_model_code
        >>> from torchvision.models import densenet
        >>> import torchvision
        >>> from os.path import basename
        >>> initkw = {'growth_rate': 16}
        >>> model = densenet.DenseNet(**initkw)
        >>> dpath = ub.ensure_app_cache_dir('torch_liberator/tests')
        >>> static_modpath = export_model_code(dpath, model, initkw)
        >>> print('static_modpath = {!r}'.format(static_modpath))
        ...
        >>> mod_fname = (basename(static_modpath))
        >>> print('mod_fname = {!r}'.format(mod_fname))
        >>> if torchvision.__version__ == '0.2.2':
        >>>     if six.PY2:
        >>>         assert mod_fname == 'DenseNet_b7ec43.py', 'got={}'.format(mod_fname)
        >>>     else:
        >>>         assert mod_fname == 'DenseNet_256629.py', 'got={}'.format(mod_fname)
        >>> # now the module can be loaded
        >>> module = ub.import_module_from_path(static_modpath)
        >>> loaded = module.make()
        >>> assert model.features.denseblock1.denselayer1.conv2.out_channels == 16
        >>> assert loaded.features.denseblock1.denselayer1.conv2.out_channels == 16
        >>> assert model is not loaded
    """
    if isinstance(model, type):
        model_class = model
    else:
        model_class = model.__class__
    classname = model_class.__name__

    if initkw is None:
        raise NotImplementedError(
            'ERROR: The params passed to the model __init__ must be available')
        footer = ''
    else:
        # First see if we can get away with a simple encoding of initkw
        try:
            # Do not use repr. The text produced is non-deterministic for
            # dictionaries. Instead, use ub.repr2, which is deterministic.
            init_text = ub.repr2(initkw, nl=1)
            eval(init_text, {})
            init_code = ub.codeblock('initkw = {}').format(init_text)
        except Exception:
            # fallback to pickle
            warnings.warn('Initialization params might not be serialized '
                          'deterministically')
            init_bytes = repr(pickle.dumps(initkw, protocol=0))
            init_code = ub.codeblock('''
                import pickle
                initkw = pickle.loads({})
                ''').format(init_bytes)
        init_code = ub.indent(init_code).lstrip()
        # create a function to instanciate the class
        footer = '\n\n' + ub.codeblock('''
            __pt_export_version__ = '{__pt_export_version__}'


            def get_initkw():
                """ creates an instance of the model """
                {init_code}
                return initkw


            def get_model_cls():
                model_cls = {classname}
                return model_cls


            def make():
                """ creates an instance of the model """
                initkw = get_initkw()
                model_cls = get_model_cls()
                model = model_cls(**initkw)
                return model
            ''').format(classname=classname,
                        init_code=init_code,
                        __pt_export_version__=__pt_export_version__)

        # TODO: assert that the name "make" is not used in the model body

    body = closer.source_closure(model_class, expand_names=export_modules)

    body_footer = body + footer + '\n'
    # dont need to hash the header, because comments are removed anyway

    # with open('debug-closer.py', 'w') as file:
    #     file.write(body_footer)
    hashid = hash_code(body_footer)

    header = ub.codeblock('''
        """
        This module was autogenerated by torch_liberator/export/exporter.py
        original_module={}
        classname={}
        timestamp={}
        hashid={}
        """
        ''').format(model_class.__module__, classname, ub.timestamp(), hashid)

    sourcecode = header + '\n' + body_footer

    static_modname = classname + '_' + hashid[0:6]
    static_modpath = join(dpath, static_modname + '.py')
    with open(static_modpath, 'w') as file:
        file.write(sourcecode)
    return static_modpath
Exemple #28
0
INITD_TEMPLATE = ub.codeblock(
    '''
    #!/bin/sh
    ### BEGIN INIT INFO
    # Provides:          {name}
    # Required-Start:    $local_fs $network
    # Required-Stop:     $local_fs
    # Default-Start:     2 3 4 5
    # Default-Stop:      0 1 6
    # Short-Description: custom script
    # Description:       custom script
    ### END INIT INFO


    PIDFILE="/var/run/{name}.pid"
    NAME="{name}"
    DAEMON="{fpath}"

    # Return 0, process already started.
    # Return 1, start cpu_temp
    do_start()
    {{
            if [ -f $PIDFILE ]; then
                    return 0
            fi
            $DAEMON &
            echo "$!" > $PIDFILE
            return 1
    }}

    # Return 0, process not started.
    # Return 1, kill process
    do_stop()
    {{
            if [ ! -f $PIDFILE ]; then
                    return 0
            fi
            kill -9 `cat $PIDFILE`
            rm $PIDFILE
            return 1
    }}

    case "$1" in
      start)
            do_start
            case "$?" in
                    0) echo "$NAME already started." ;;
                    1) echo "Started $NAME." ;;
            esac
            ;;
      stop)
            do_stop
            case "$?" in
                    0) echo "$NAME has not started." ;;
                    1) echo "Killed $NAME." ;;
            esac
            ;;

      status)
            if [ ! -r "$PIDFILE" ]; then
                    echo "$NAME is not running."
                    exit 3
            fi
            if read pid < "$PIDFILE" && ps -p "$pid" > /dev/null 2>&1; then
                    echo "$NAME is running."
                    exit 0
            else
                    echo "$NAME is not running but $PIDFILE exists."
                    exit 1
            fi
            ;;
      *)
            N=/etc/init.d/$NAME
            echo "Usage: $N {{start|stop|status}}" >&2
            exit 1
            ;;
    esac

    exit 0
    ''')
Exemple #29
0
def run_pvpoke_ultra_experiment():
    """
    https://pvpoke.com/battle/matrix/

    !pip install selenium
    """
    """
    Relevant page items:

    <button class="add-poke-btn button">+ Add Pokemon</button>
    '//*[@id="main"]/div[3]/div[3]/div/div[1]/button[1]'
    '/html/body/div[1]/div/div[3]/div[3]/div/div[1]/button[1]'

    <input class="poke-search" type="text" placeholder="Search name">
    /html/body/div[5]/div/div[3]/div[1]/input


    /html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/a/span[1]


    Level Cap
    /html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[5]


    # IV GROUP
    ivs-group

    save-poke

    import sys, ubelt
    sys.path.append(ubelt.expandpath('~/code/pypogo'))
    from pypogo.pvpoke_experiment import *  # NOQA
    from pypogo.pvpoke_experiment import _oldstuff
    """
    from selenium import webdriver
    from selenium.webdriver.common.keys import Keys
    from selenium.webdriver.support.ui import Select
    import ubelt as ub
    import os
    import pathlib
    import time
    import pandas as pd
    import pypogo

    # Requires the driver be in the PATH
    fpath = ensure_selenium_chromedriver()
    os.environ['PATH'] = os.pathsep.join(
        ub.oset(os.environ['PATH'].split(os.pathsep))
        | ub.oset([str(fpath.parent)]))

    url = 'https://pvpoke.com/battle/matrix/'
    # chrome_exe = ub.find_exe("google-chrome")
    driver = webdriver.Chrome()
    driver.get(url)

    league = 'Great'
    # league = 'Master40'
    if league == 'Great':
        league_box_target = 'Great League (CP 1500)'

        have_ivs = list(
            ub.oset([
                tuple([int(x) for x in p.strip().split(',') if x])
                for p in ub.codeblock('''
            10, 10, 12,
            10, 12, 14,
            10, 12, 14,
            10, 13, 10,
            10, 13, 12,
            10, 14, 14,
            11, 12, 14,
            11, 14, 12,
            11, 14, 15,
            11, 15, 11,
            11, 15, 11,
            11, 15, 12,
            11, 15, 12,
            12, 10, 12,
            12, 11, 12,
            12, 12, 15,
            12, 14, 11,
            12, 14, 15,
            12, 15, 11,
            12, 15, 12
            12, 15, 12,
            13, 11, 13
            13, 12, 10
            13, 12, 13,
            13, 13, 10,
            13, 13, 11,
            13, 15, 10,
            13, 15, 11,
            13, 15, 11,
            14, 10, 12,
            14, 11, 10,
            14, 11, 10,
            14, 13, 11
            14, 13, 14,
            15, 10, 12
            15, 11, 10,
            15, 11, 11,
            15, 12, 11
            ''').split('\n')
            ]))
        to_check_mons = [
            pypogo.Pokemon('Deoxys',
                           form='defense',
                           ivs=ivs,
                           moves=['Counter', 'Rock Slide',
                                  'Psycho Boost']).maximize(1500)
            for ivs in have_ivs
        ]
        meta_text = 'Great League Meta'
    elif league == 'Master40':
        league_box_target = 'Master League (Level 40)'
        meta_text = 'Master League Meta'
        # Test the effect of best buddies vs the master league
        to_check_mons = [
            pypogo.Pokemon('Mewtwo', ivs=[15, 15, 15], level=40),
            pypogo.Pokemon('Mewtwo', ivs=[15, 15, 15], level=41),
            pypogo.Pokemon('Garchomp', ivs=[15, 15, 15], level=40),
            pypogo.Pokemon('Garchomp', ivs=[15, 15, 15], level=41),
            pypogo.Pokemon('Dragonite', ivs=[15, 14, 15], level=40),
            pypogo.Pokemon('Dragonite', ivs=[15, 14, 15], level=41),
            pypogo.Pokemon('Giratina',
                           form='origin',
                           ivs=[15, 14, 15],
                           level=40),
            pypogo.Pokemon('Giratina',
                           form='origin',
                           ivs=[15, 14, 15],
                           level=41),
            pypogo.Pokemon('Kyogre', ivs=[15, 15, 14], level=40),
            pypogo.Pokemon('Kyogre', ivs=[15, 15, 14], level=41),
            pypogo.Pokemon('Groudon', ivs=[14, 14, 13], level=40),
            pypogo.Pokemon('Groudon', ivs=[14, 14, 13], level=41),
            pypogo.Pokemon('Togekiss', ivs=[15, 15, 14], level=40),
            pypogo.Pokemon('Togekiss', ivs=[15, 15, 14], level=41),
        ]
        for mon in to_check_mons:
            mon.populate_all()
    else:
        pass

    leage_select = driver.find_elements_by_class_name('league-select')[0]
    leage_select.click()
    leage_select.send_keys(league_box_target)
    leage_select.click()

    leage_select.text.split('\n')
    leage_select.send_keys('\n')
    leage_select.send_keys('\n')

    def add_pokemon(mon):
        add_poke1_button = driver.find_elements_by_class_name(
            'add-poke-btn')[0]
        add_poke1_button.click()

        select_drop = driver.find_element_by_xpath(
            '/html/body/div[5]/div/div[3]/div[1]/select')

        if 1:
            import xdev
            all_names = select_drop.text.split('\n')
            distances = xdev.edit_distance(mon.display_name(), all_names)
            chosen_name = all_names[ub.argmin(distances)]
        else:
            chosen_name = mon.name

        search_box = driver.find_element_by_xpath(
            '/html/body/div[5]/div/div[3]/div[1]/input')
        search_box.send_keys(chosen_name)

        advanced_ivs_arrow = driver.find_element_by_xpath(
            '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/a/span[1]')
        advanced_ivs_arrow.click()

        level40_cap = driver.find_element_by_xpath(
            '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[2]'
        )
        level41_cap = driver.find_element_by_xpath(
            '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[3]'
        )
        level50_cap = driver.find_element_by_xpath(
            '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[4]'
        )
        level51_cap = driver.find_element_by_xpath(
            '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[5]'
        )

        if mon.level >= 51:
            level51_cap.click()
        elif mon.level >= 50:
            level50_cap.click()
        elif mon.level >= 41:
            level41_cap.click()
        elif mon.level >= 40:
            level40_cap.click()

        level_box = driver.find_element_by_xpath(
            '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/input'
        )
        level_box.click()
        level_box.clear()
        level_box.clear()
        level_box.send_keys(str(mon.level))

        iv_a = driver.find_element_by_xpath(
            '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/div/input[1]'
        )
        iv_d = driver.find_element_by_xpath(
            '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/div/input[2]'
        )
        iv_s = driver.find_element_by_xpath(
            '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/div/input[3]'
        )

        # TODO
        # driver.find_elements_by_class_name('move-select')

        iv_a.clear()
        iv_a.send_keys(str(mon.ivs[0]))

        iv_d.clear()
        iv_d.send_keys(str(mon.ivs[1]))

        iv_s.clear()
        iv_s.send_keys(str(mon.ivs[2]))

        # USE_MOVES = 1
        if mon.moves is not None:
            # mon.populate_all()

            fast_select = driver.find_element_by_xpath(
                '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[10]/select[1]')
            fast_select.click()
            fast_select.send_keys(mon.pvp_fast_move['name'])
            fast_select.send_keys(Keys.ENTER)

            charge1_select = driver.find_element_by_xpath(
                '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[10]/select[2]')
            charge1_select.click()
            charge1_select.send_keys(mon.pvp_charge_moves[0]['name'])
            charge1_select.send_keys(Keys.ENTER)

            charge2_select = driver.find_element_by_xpath(
                '/html/body/div[5]/div/div[3]/div[1]/div[2]/div[10]/select[3]')
            charge2_select.click()
            charge2_select.send_keys(mon.pvp_charge_moves[1]['name'])
            charge2_select.send_keys(Keys.ENTER)

        save_button = driver.find_elements_by_class_name('save-poke')[0]
        save_button.click()

    quickfills = driver.find_elements_by_class_name('quick-fill-select')
    quickfill = quickfills[1]
    quickfill.text.split('\n')
    quickfill.click()
    quickfill.send_keys(meta_text)
    quickfill.click()

    import pypogo
    # mon1 = pypogo.Pokemon('Mewtwo', ivs=[15, 15, 15], level=40)
    # mon2 = pypogo.Pokemon('Mewtwo', ivs=[15, 15, 15], level=41)

    if 1:
        for mon in to_check_mons:
            pass
            add_pokemon(mon)

    shield_selectors = driver.find_elements_by_class_name('shield-select')
    shield_selectors[2].click()
    shield_selectors[2].send_keys('No shields')
    shield_selectors[2].send_keys(Keys.ENTER)

    shield_selectors[3].click()
    shield_selectors[3].send_keys('No shields')
    shield_selectors[3].send_keys(Keys.ENTER)

    shield_selectors[0].click()

    battle_btn = driver.find_elements_by_class_name('battle-btn')[0]
    battle_btn.click()

    # Clear previous downloaded files
    dlfolder = pathlib.Path(ub.expandpath('$HOME/Downloads'))
    for old_fpath in list(dlfolder.glob('_vs*.csv')):
        old_fpath.unlink()

    time.sleep(2.0)

    # Download new data
    dl_btn = driver.find_element_by_xpath(
        '//*[@id="main"]/div[4]/div[9]/div/a')
    dl_btn.click()

    while len(list(dlfolder.glob('_vs*.csv'))) < 1:
        pass

    new_fpaths = list(dlfolder.glob('_vs*.csv'))
    assert len(new_fpaths) == 1
    fpath = new_fpaths[0]

    data = pd.read_csv(fpath, header=0, index_col=0)

    if 1:
        # GROUP ANALYSIS
        data.sum(axis=1).sort_values()
        (data > 500).sum(axis=1).sort_values()

        flipped = []
        for key, col in data.T.iterrows():
            if not ub.allsame(col > 500):
                flipped.append(key)

        flip_df = data.loc[:, flipped]

        def color(x):
            if x > 500:
                return ub.color_text(str(x), 'green')
            else:
                return ub.color_text(str(x), 'red')

        print(flip_df.applymap(color))
        print(flip_df.columns.tolist())

        (data > 500)
    else:
        # PAIR ANALYSIS
        pairs = list(ub.iter_window(range(len(data)), step=2))
        for i, j in pairs:
            print('-----')
            matchup0 = data.iloc[i]
            matchup1 = data.iloc[j]
            delta = matchup1 - matchup0
            print(delta[delta != 0])

            wins0 = matchup0 > 500
            wins1 = matchup1 > 500
            flips = (wins0 != wins1)
            flipped_vs = matchup0.index[flips]
            num_flips = sum(flips)
            print('flipped_vs = {!r}'.format(flipped_vs))
            print('num_flips = {!r}'.format(num_flips))
            print(matchup0.mean())
            print(matchup1.mean())
            print(matchup1.mean() / matchup0.mean())
Exemple #30
0
    def build_pyproject(self):
        # data = toml.loads((self.template_dpath / 'pyproject.toml').read_text())
        # print('data = {}'.format(ub.repr2(data, nl=5)))
        pyproj_config = ub.AutoDict()
        # {'tool': {}}
        if 'binpy' in self.config['tags']:
            pyproj_config['build-system']['requires'] = [
                "setuptools>=41.0.1",
                # setuptools_scm[toml]
                "wheel",
                "scikit-build>=0.9.0",
                "numpy",
                "ninja"
            ]
            pyproj_config['tool']['cibuildwheel'].update({
                'build':
                "cp37-* cp38-* cp39-* cp310-*",
                'build-frontend':
                "build",
                'skip':
                "pp* cp27-* cp34-* cp35-* cp36-* *-musllinux_*",
                'build-verbosity':
                1,
                'test-requires': ["-r requirements/tests.txt"],
                'test-command':
                "python {project}/run_tests.py"
            })

            if True:
                cibw = pyproj_config['tool']['cibuildwheel']
                req_commands = {
                    'linux': [
                        'yum install epel-release lz4 lz4-devel -y',
                    ],
                    'windows': [
                        'choco install lz4 -y',
                    ],
                    'macos': [
                        'brew install lz4',
                    ]
                }
                for plat in req_commands.keys():
                    cmd = ' && '.join(req_commands[plat])
                    cibw[plat]['before-all'] = cmd

        WITH_PYTEST_INI = 1
        if WITH_PYTEST_INI:
            pytest_ini_opts = pyproj_config['tool']['pytest']['ini_options']
            pytest_ini_opts[
                'addopts'] = "-p no:doctest --xdoctest --xdoctest-style=google --ignore-glob=setup.py"
            pytest_ini_opts[
                'norecursedirs'] = ".git ignore build __pycache__ dev _skbuild"
            pytest_ini_opts['filterwarnings'] = [
                "default",
                "ignore:.*No cfgstr given in Cacher constructor or call.*:Warning",
                "ignore:.*Define the __nice__ method for.*:Warning",
                "ignore:.*private pytest class or function.*:Warning",
            ]

        WITH_COVERAGE = 1
        if WITH_COVERAGE:
            pyproj_config['tool']['coverage'].update(
                toml.loads(
                    ub.codeblock('''
                [run]
                branch = true

                [report]
                exclude_lines =[
                    "pragma: no cover",
                    ".*  # pragma: no cover",
                    ".*  # nocover",
                    "def __repr__",
                    "raise AssertionError",
                    "raise NotImplementedError",
                    "if 0:",
                    "if trace is not None",
                    "verbose = .*",
                    "^ *raise",
                    "^ *pass *$",
                    "if _debug:",
                    "if __name__ == .__main__.:",
                    ".*if six.PY2:"
                ]

                omit=[
                    "{REPO_NAME}/__main__.py",
                    "*/setup.py"
                ]
                ''').format(REPO_NAME=self.repo_name)))

        text = toml.dumps(pyproj_config)
        return text
Exemple #31
0
def get_func_sourcecode(func,
                        strip_def=False,
                        strip_ret=False,
                        strip_docstr=False,
                        strip_comments=False,
                        remove_linenums=None,
                        strip_decor=False):
    """
    wrapper around inspect.getsource but takes into account utool decorators
    strip flags are very hacky as of now

    Args:
        func (function):
        strip_def (bool):
        strip_ret (bool): (default = False)
        strip_docstr (bool): (default = False)
        strip_comments (bool): (default = False)
        remove_linenums (None): (default = None)

    Example:
        >>> # build test data
        >>> func = get_func_sourcecode
        >>> strip_def = True
        >>> strip_ret = True
        >>> sourcecode = get_func_sourcecode(func, strip_def)
        >>> print('sourcecode = {}'.format(sourcecode))
    """
    inspect.linecache.clearcache()  # HACK: fix inspect bug
    sourcefile = inspect.getsourcefile(func)
    if hasattr(func, '_utinfo'):
        # DEPRICATE
        func2 = func._utinfo['orig_func']
        sourcecode = get_func_sourcecode(func2)
    elif sourcefile is not None and (sourcefile != '<string>'):
        try_limit = 2
        for num_tries in range(try_limit):
            try:
                #print(func)
                sourcecode = inspect.getsource(func)
                if not isinstance(sourcecode, six.text_type):
                    sourcecode = sourcecode.decode('utf-8')
                #print(sourcecode)
            except (IndexError, OSError, SyntaxError):
                print('WARNING: Error getting source')
                inspect.linecache.clearcache()
                if num_tries + 1 != try_limit:
                    tries_left = try_limit - num_tries - 1
                    print('Attempting %d more time(s)' % (tries_left))
                else:
                    raise
    else:
        sourcecode = None
    if strip_def:
        # hacky
        # TODO: use redbaron or something like that for a more robust appraoch
        sourcecode = textwrap.dedent(sourcecode)
        regex_decor = '^@.' + REGEX_NONGREEDY
        regex_defline = '^def [^:]*\\):\n'
        patern = '(' + regex_decor + ')?' + regex_defline
        RE_FLAGS = re.MULTILINE | re.DOTALL
        RE_KWARGS = {'flags': RE_FLAGS}
        nodef_source = re.sub(patern, '', sourcecode, **RE_KWARGS)
        sourcecode = textwrap.dedent(nodef_source)
        #print(sourcecode)
        pass
    if strip_ret:
        r""" \s is a whitespace char """
        return_ = named_field('return', 'return .*$')
        prereturn = named_field('prereturn', r'^\s*')
        return_bref = bref_field('return')
        prereturn_bref = bref_field('prereturn')
        regex = prereturn + return_
        repl = prereturn_bref + 'pass  # ' + return_bref
        sourcecode_ = re.sub(regex, repl, sourcecode, flags=re.MULTILINE)
        sourcecode = sourcecode_
        pass
    if strip_docstr or strip_comments:
        # pip install pyminifier
        # References: http://code.activestate.com/recipes/576704/
        #from pyminifier import minification, token_utils
        def remove_docstrings_or_comments(source):
            """
            TODO: commit clean version to pyminifier
            """
            import tokenize
            from six.moves import StringIO
            io_obj = StringIO(source)
            out = ''
            prev_toktype = tokenize.INDENT
            last_lineno = -1
            last_col = 0
            for tok in tokenize.generate_tokens(io_obj.readline):
                token_type = tok[0]
                token_string = tok[1]
                start_line, start_col = tok[2]
                end_line, end_col = tok[3]
                if start_line > last_lineno:
                    last_col = 0
                if start_col > last_col:
                    out += (' ' * (start_col - last_col))
                # Remove comments:
                if strip_comments and token_type == tokenize.COMMENT:
                    pass
                elif strip_docstr and token_type == tokenize.STRING:
                    if prev_toktype != tokenize.INDENT:
                        # This is likely a docstring; double-check we're not inside an operator:
                        if prev_toktype != tokenize.NEWLINE:
                            if start_col > 0:
                                out += token_string
                else:
                    out += token_string
                prev_toktype = token_type
                last_col = end_col
                last_lineno = end_line
            return out

        sourcecode = remove_docstrings_or_comments(sourcecode)
        #sourcecode = minification.remove_comments_and_docstrings(sourcecode)
        #tokens = token_utils.listified_tokenizer(sourcecode)
        #minification.remove_comments(tokens)
        #minification.remove_docstrings(tokens)
        #token_utils.untokenize(tokens)

    if strip_decor:
        try:
            import redbaron
            red = redbaron.RedBaron(ub.codeblock(sourcecode))
        except Exception:
            hack_text = ub.ensure_unicode(ub.codeblock(sourcecode)).encode(
                'ascii', 'replace')
            red = redbaron.RedBaron(hack_text)
            pass
        if len(red) == 1:
            redfunc = red[0]
            if redfunc.type == 'def':
                # Remove decorators
                del redfunc.decorators[:]
                sourcecode = redfunc.dumps()

    if remove_linenums is not None:
        source_lines = sourcecode.strip('\n').split('\n')
        delete_items_by_index(source_lines, remove_linenums)
        sourcecode = '\n'.join(source_lines)
    return sourcecode
Exemple #32
0
    def main(cls, cmdline=0, **kwargs):
        """
        Ignore:
            repodir = ub.Path('~/code/pyflann_ibeis').expand()
            kwargs = {
                'repodir': repodir,
                'tags': ['binpy', 'erotemic', 'github'],
            }
            cmdline = 0

        Example:
            repodir = ub.Path.appdir('pypkg/demo/my_new_repo')
            import sys, ubelt
            sys.path.append(ubelt.expandpath('~/misc/templates/PYPKG'))
            from apply_template import *  # NOQA
            kwargs = {
                'repodir': repodir,
            }
            cmdline = 0
        """
        import ubelt as ub
        config = TemplateConfig(cmdline=cmdline, data=kwargs)
        repo_dpath = ub.Path(config['repodir'])
        repo_dpath.ensuredir()

        IS_NEW_REPO = 0

        create_new_repo_info = ub.codeblock('''
            # TODO:
            # At least instructions on how to create a new repo, or maybe an
            # API call
            https://github.com/new

            git init
            ''')
        print(create_new_repo_info)

        if IS_NEW_REPO:
            # TODO: git init
            # TODO: github or gitlab register
            pass

        self = TemplateApplier(config)
        self.setup().gather_tasks()

        self.setup().apply()

        if config['setup_secrets']:
            setup_secrets_fpath = self.repo_dpath / 'dev/setup_secrets.sh'
            if 'erotemic' in self.config['tags']:
                environ_export = 'setup_package_environs_github_erotemic'
                upload_secret_cmd = 'upload_github_secrets'
            elif 'pyutils' in self.config['tags']:
                environ_export = 'setup_package_environs_github_pyutils'
                upload_secret_cmd = 'upload_github_secrets'
            elif 'kitware' in self.config['tags']:
                environ_export = 'setup_package_environs_gitlab_kitware'
                upload_secret_cmd = 'upload_gitlab_repo_secrets'
            else:
                raise Exception

            import cmd_queue
            script = cmd_queue.Queue.create()
            script.submit(
                ub.codeblock(f'''
                cd {self.repo_dpath}
                source {setup_secrets_fpath}
                {environ_export}
                load_secrets
                export_encrypted_code_signing_keys
                git commit -am "Updated secrets"
                {upload_secret_cmd}
                '''))
            script.rprint()
#!/usr/bin/env python
"""
Looks at the `git_*.{sh,ph}` scripts and makes corresponding `git-*` scripts
"""
import glob
from os.path import dirname, join, basename, splitext
import ubelt as ub

SCRIPT_HEADER = ub.codeblock(r'''
    #!/bin/bash
    # References:
    # https://stackoverflow.com/questions/59895/getting-the-source-directory-of-a-bash-script-from-within

    SOURCE="${BASH_SOURCE[0]}"
    while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
      DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
      SOURCE="$(readlink "$SOURCE")"
      [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
    done

    DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
    ''')

SCRIPT_FOOTER_FMT = '$DIR/../{fname} "$@"'


def setup_git_scripts():
    dpath = dirname(__file__)

    git_sh_scripts = list(glob.glob(join(dpath, 'git_*.sh')))
    git_py_scripts = list(glob.glob(join(dpath, 'git_*.py')))
Exemple #34
0
 def main(cls, cmdline=False, **kwargs):
     config = cls(cmdline=cmdline, data=kwargs)
     print(ub.codeblock(config['text']))
Exemple #35
0
def make_baseline_truthfiles():
    work_dir = ub.truepath('~/work')
    data_dir = ub.truepath('~/data')

    challenge_data_dir = join(data_dir, 'viame-challenge-2018')
    challenge_work_dir = join(work_dir, 'viame-challenge-2018')

    ub.ensuredir(challenge_work_dir)

    img_root = join(challenge_data_dir, 'phase0-imagery')
    annot_dir = join(challenge_data_dir, 'phase0-annotations')
    fpaths = list(glob.glob(join(annot_dir, '*.json')))
    # ignore the non-bounding box nwfsc and afsc datasets for now

    # exclude = ('nwfsc', 'afsc', 'mouss', 'habcam')
    # exclude = ('nwfsc', 'afsc', 'mouss',)
    # fpaths = [p for p in fpaths if not basename(p).startswith(exclude)]

    import json
    dsets = ub.odict()
    for fpath in fpaths:
        key = basename(fpath).split('.')[0]
        dsets[key] = json.load(open(fpath, 'r'))

    print('Merging')
    merged = coco_union(dsets)

    merged_fpath = join(challenge_work_dir, 'phase0-merged.mscoco.json')
    with open(merged_fpath, 'w') as fp:
        json.dump(merged, fp, indent=4)

    import copy
    self = CocoDataset(copy.deepcopy(merged), img_root=img_root, autobuild=False)
    self._build_index()
    self.run_fixes()

    if True:
        # remove all point annotations
        print('Remove point annotations')
        to_remove = []
        for ann in self.dataset['annotations']:
            if ann['roi_shape'] == 'point':
                to_remove.append(ann)
        for ann in to_remove:
            self.dataset['annotations'].remove(ann)
        self._build_index()

        # remove empty images
        print('Remove empty images')
        to_remove = []
        for gid in self.imgs.keys():
            aids = self.gid_to_aids.get(gid, [])
            if not aids:
                to_remove.append(self.imgs[gid])
        for img in to_remove:
            self.dataset['images'].remove(img)
        self._build_index()

    print('# self.anns = {!r}'.format(len(self.anns)))
    print('# self.imgs = {!r}'.format(len(self.imgs)))
    print('# self.cats = {!r}'.format(len(self.cats)))

    catname_to_nannots = ub.map_keys(lambda x: self.cats[x]['name'],
                                     ub.map_vals(len, self.cid_to_aids))
    catname_to_nannots = ub.odict(sorted(catname_to_nannots.items(),
                                         key=lambda kv: kv[1]))
    print(ub.repr2(catname_to_nannots))

    if False:
        # aid = list(self.anns.values())[0]['id']
        # self.show_annotation(aid)
        gids = sorted([gid for gid, aids in self.gid_to_aids.items() if aids])
        # import utool as ut
        # for gid in ut.InteractiveIter(gids):
        for gid in gids:
            from matplotlib import pyplot as plt
            fig = plt.figure(1)
            fig.clf()
            self.show_annotation(gid=gid)
            fig.canvas.draw()

        for ann in self.anns.values():
            primary_aid = ann['id']
            print('primary_aid = {!r}'.format(primary_aid))
            print(len(self.gid_to_aids[ann['image_id']]))

            if 'roi_shape' not in ann:
                ann['roi_shape'] = 'bounding_box'

            if ann['roi_shape'] == 'boundingBox':
                pass

            if ann['roi_shape'] == 'point':
                primary_aid = ann['id']
                print('primary_aid = {!r}'.format(primary_aid))
                print(len(self.gid_to_aids[ann['image_id']]))
                break

    # Split into train / test  set
    print('Splitting')
    skf = StratifiedGroupKFold(n_splits=2)
    groups = [ann['image_id'] for ann in self.anns.values()]
    y = [ann['category_id'] for ann in self.anns.values()]
    X = [ann['id'] for ann in self.anns.values()]
    split = list(skf.split(X=X, y=y, groups=groups))[0]
    train_idx, test_idx = split

    print('Taking subsets')
    aid_to_gid = {aid: ann['image_id'] for aid, ann in self.anns.items()}
    train_aids = list(ub.take(X, train_idx))
    test_aids = list(ub.take(X, test_idx))
    train_gids = sorted(set(ub.take(aid_to_gid, train_aids)))
    test_gids = sorted(set(ub.take(aid_to_gid, test_aids)))

    train_dset = self.subset(train_gids)
    test_dset = self.subset(test_gids)

    print('---------')
    print('# train_dset.anns = {!r}'.format(len(train_dset.anns)))
    print('# train_dset.imgs = {!r}'.format(len(train_dset.imgs)))
    print('# train_dset.cats = {!r}'.format(len(train_dset.cats)))
    print('---------')
    print('# test_dset.anns = {!r}'.format(len(test_dset.anns)))
    print('# test_dset.imgs = {!r}'.format(len(test_dset.imgs)))
    print('# test_dset.cats = {!r}'.format(len(test_dset.cats)))

    train_dset._ensure_imgsize()
    test_dset._ensure_imgsize()

    print('Writing')
    with open(join(challenge_work_dir, 'phase0-merged-train.mscoco.json'), 'w') as fp:
        json.dump(train_dset.dataset, fp, indent=4)

    with open(join(challenge_work_dir, 'phase0-merged-test.mscoco.json'), 'w') as fp:
        json.dump(test_dset.dataset, fp, indent=4)

    # Make a detectron yaml file
    config_text = ub.codeblock(
        """
        MODEL:
          TYPE: generalized_rcnn
          CONV_BODY: ResNet.add_ResNet50_conv4_body
          NUM_CLASSES: {num_classes}
          FASTER_RCNN: True
        NUM_GPUS: 1
        SOLVER:
          WEIGHT_DECAY: 0.0001
          LR_POLICY: steps_with_decay
          BASE_LR: 0.01
          GAMMA: 0.1
          # 1x schedule (note TRAIN.IMS_PER_BATCH: 1)
          MAX_ITER: 180000
          STEPS: [0, 120000, 160000]
        RPN:
          SIZES: (32, 64, 128, 256, 512)
        FAST_RCNN:
          ROI_BOX_HEAD: ResNet.add_ResNet_roi_conv5_head
          ROI_XFORM_METHOD: RoIAlign
        TRAIN:
          WEIGHTS: https://s3-us-west-2.amazonaws.com/detectron/ImageNetPretrained/MSRA/R-50.pkl
          DATASETS: ('/work/viame-challenge-2018/phase0-merged-train.mscoco.json',)
          IM_DIR: '/data/viame-challenge-2018/phase0-imagery'
          SCALES: (800,)
          MAX_SIZE: 1333
          IMS_PER_BATCH: 1
          BATCH_SIZE_PER_IM: 512
        TEST:
          DATASETS: ('/work/viame-challenge-2018/phase0-merged-test.mscoco.json',)
          IM_DIR: '/data/viame-challenge-2018/phase0-imagery'
          SCALES: (800,)
          MAX_SIZE: 1333
          NMS: 0.5
          RPN_PRE_NMS_TOP_N: 6000
          RPN_POST_NMS_TOP_N: 1000
        OUTPUT_DIR: /work/viame-challenge-2018/output
        """)
    config_text = config_text.format(
        num_classes=len(self.cats),
    )
    ub.writeto(join(challenge_work_dir, 'phase0-faster-rcnn.yaml'), config_text)

    docker_cmd = ('nvidia-docker run '
                  '-v {work_dir}:/work -v {data_dir}:/data '
                  '-it detectron:c2-cuda9-cudnn7 bash').format(
                      work_dir=work_dir, data_dir=data_dir)

    train_cmd = ('python2 tools/train_net.py '
                 '--cfg /work/viame-challenge-2018/phase0-faster-rcnn.yaml '
                 'OUTPUT_DIR /work/viame-challenge-2018/output')

    hacks = ub.codeblock(
        """
        git remote add Erotemic https://github.com/Erotemic/Detectron.git
        git fetch --all
        git checkout general_dataset

        # curl https://github.com/Erotemic/Detectron/blob/42d44b2d155c775dc509b6a44518d0c582f8cdf5/tools/train_net.py
        # wget https://github.com/Erotemic/Detectron/blob/42d44b2d155c775dc509b6a44518d0c582f8cdf5/lib/core/config.py
        """)

    print(docker_cmd)
    print(train_cmd)
Exemple #36
0
def compare_results():
    print('Comparing results')
    import pandas as pd
    from tabulate import tabulate

    # Read in output of demo script
    measure_fpath = 'measurements_haul83.csv'
    py_df = pd.DataFrame.from_csv(measure_fpath, index_col=None)
    # Convert python length output from mm into cm for consistency
    py_df['fishlen'] = py_df['fishlen'] / 10
    py_df['current_frame'] = py_df['current_frame'].astype(np.int)

    # janky CSV parsing
    py_df['box_pts1'] = py_df['box_pts1'].map(
        lambda p: eval(p.replace(';', ','), np.__dict__))
    py_df['box_pts2'] = py_df['box_pts2'].map(
        lambda p: eval(p.replace(';', ','), np.__dict__))

    py_df['obox1'] = [
        ctalgo.OrientedBBox(*cv2.minAreaRect(pts[:, None, :].astype(np.int)))
        for pts in py_df['box_pts1']
    ]
    py_df['obox2'] = [
        ctalgo.OrientedBBox(*cv2.minAreaRect(pts[:, None, :].astype(np.int)))
        for pts in py_df['box_pts2']
    ]
    py_df.drop(['box_pts1', 'box_pts2'], axis=1, inplace=True)

    # Remap to matlab names
    py_df = py_df.rename(columns={
        'error': 'Err',
        'fishlen': 'fishLength',
        'range': 'fishRange',
    })

    # Load matlab results
    mat_df = _read_kresimir_results()

    FORCE_COMPARABLE_RANGE = True
    # FORCE_COMPARABLE_RANGE = False
    if FORCE_COMPARABLE_RANGE:
        # Be absolutely certain we are in comparable regions (may slightly bias
        # results, against python and in favor of matlab)
        min_frame = max(mat_df.current_frame.min(), py_df.current_frame.min())
        max_frame = min(mat_df.current_frame.max(), py_df.current_frame.max())
        print('min_frame = {!r}'.format(min_frame))
        print('max_frame = {!r}'.format(max_frame))

        mat_df = mat_df[(mat_df.current_frame >= min_frame)
                        & (mat_df.current_frame <= max_frame)]
        py_df = py_df[(py_df.current_frame >= min_frame)
                      & (py_df.current_frame <= max_frame)]

    intersect_frames = np.intersect1d(mat_df.current_frame,
                                      py_df.current_frame)
    print('intersecting frames = {} / {} (matlab)'.format(
        len(intersect_frames), len(set(mat_df.current_frame))))
    print('intersecting frames = {} / {} (python)'.format(
        len(intersect_frames), len(set(py_df.current_frame))))

    #  Reuse the hungarian algorithm implementation from ctalgo
    min_assign = ctalgo.FishStereoMeasurments.minimum_weight_assignment

    correspond = []
    for f in intersect_frames:
        pidxs = np.where(py_df.current_frame == f)[0]
        midxs = np.where(mat_df.current_frame == f)[0]

        pdf = py_df.iloc[pidxs]
        mdf = mat_df.iloc[midxs]

        ppts1 = np.array([o.center for o in pdf['obox1']])
        mpts1 = np.array([o.center for o in mdf['obox1']])

        ppts2 = np.array([o.center for o in pdf['obox2']])
        mpts2 = np.array([o.center for o in mdf['obox2']])

        dists1 = sklearn.metrics.pairwise.pairwise_distances(ppts1, mpts1)
        dists2 = sklearn.metrics.pairwise.pairwise_distances(ppts2, mpts2)

        # arbitrarilly chosen threshold
        thresh = 100
        for i, j in min_assign(dists1):
            d1 = dists1[i, j]
            d2 = dists2[i, j]
            if d1 < thresh and d2 < thresh and abs(d1 - d2) < thresh / 4:
                correspond.append((pidxs[i], midxs[j]))
    correspond = np.array(correspond)

    # pflags = np.array(ub.boolmask(correspond.T[0], len(py_df)))
    mflags = np.array(ub.boolmask(correspond.T[1], len(mat_df)))
    # print('there are {} detections that seem to be in common'.format(len(correspond)))
    # print('The QC flags of the common detections are:       {}'.format(
    #     ub.dict_hist(mat_df[mflags]['QC'].values)))
    # print('The QC flags of the other matlab detections are: {}'.format(
    #     ub.dict_hist(mat_df[~mflags]['QC'].values)))

    print('\n\n----\n## All stats\n')
    print(
        ub.codeblock('''
        Overall, the matlab script made {nmat} length measurements and the
        python script made {npy} length measurements.  Here is a table
        summarizing the average lengths / ranges / errors of each script:
        ''').format(npy=len(py_df), nmat=len(mat_df)))
    stats = pd.DataFrame(columns=['python', 'matlab'])
    for key in ['fishLength', 'fishRange', 'Err']:
        stats.loc[key, 'python'] = '{:6.2f} ± {:6.2f}'.format(
            py_df[key].mean(), py_df[key].std())
        stats.loc[key, 'matlab'] = '{:6.2f} ± {:6.2f}'.format(
            mat_df[key].mean(), mat_df[key].std())

    stats.loc['nTotal', 'python'] = '{}'.format(len(py_df))
    stats.loc['nTotal', 'matlab'] = '{}'.format(len(mat_df))
    print(tabulate(stats, headers='keys', tablefmt='psql', stralign='right'))

    print('\n\n----\n## Only COMMON detections\n')
    py_df_c = py_df.iloc[correspond.T[0]]
    mat_df_c = mat_df.iloc[correspond.T[1]]
    stats = pd.DataFrame(columns=['python', 'matlab'])
    for key in ['fishLength', 'fishRange', 'Err']:
        stats.loc[key, 'python'] = '{:6.2f} ± {:6.2f}'.format(
            py_df_c[key].mean(), py_df_c[key].std())
        stats.loc[key, 'matlab'] = '{:6.2f} ± {:6.2f}'.format(
            mat_df_c[key].mean(), mat_df_c[key].std())

    stats.loc['nTotal', 'python'] = '{}'.format(len(py_df_c))
    stats.loc['nTotal', 'matlab'] = '{}'.format(len(mat_df_c))

    print(
        ub.codeblock('''
        Now, we investigate how many dections matlab and python made in common.
        (Note, choosing which dections in one version correspond to which in
         another is done using a heuristic based on distances between bbox
         centers and a thresholded minimum assignment problem).

        Python made {npy_c}/{nmat} = {percent:.2f}% of the detections matlab made

        ''').format(npy_c=len(py_df_c),
                    nmat=len(mat_df),
                    percent=100 * len(py_df_c) / len(mat_df)))
    print(tabulate(stats, headers='keys', tablefmt='psql', stralign='right'))

    print('\n\n----\n## Evaulation using the QC code\n')
    hist_hit = ub.dict_hist(mat_df[mflags]['QC'].values)
    hist_miss = ub.dict_hist(mat_df[~mflags]['QC'].values)
    print(
        ub.codeblock('''
        However, not all of those matlab detections were good. Because we have
        detections in corrsepondences with each other we can assign the python
        detections QC codes.

        Here is a histogram of the QC codes for these python detections:
        {}
        (Note: read histogram as <QC-code>: <frequency>)

        Here is a histogram of the other matlab detections that python did not
        find:
        {}

        To summarize:
            python correctly rejected {:.2f}% of the matlab QC=0 detections
            python correctly accepted {:.2f}% of the matlab QC=1 detections
            python correctly accepted {:.2f}% of the matlab QC=2 detections

            Note, that because python made detections that matlab did not make,
            the remaining {} detections may be right or wrong, but there is
            no way to tell from this analysis.

        Lastly, here are the statistics for the common detections that had a
        non-zero QC code.
        ''').format(ub.repr2(hist_hit, nl=1), ub.repr2(hist_miss, nl=1),
                    100 * hist_miss[0] / (hist_hit[0] + hist_miss[0]),
                    100 * hist_hit[1] / (hist_hit[1] + hist_miss[1]),
                    100 * hist_hit[2] / (hist_hit[2] + hist_miss[2]),
                    len(py_df) - len(py_df_c)))

    is_qc = (mat_df_c['QC'] > 0).values
    mat_df_c = mat_df_c[is_qc]
    py_df_c = py_df_c[is_qc]
    stats = pd.DataFrame(columns=['python', 'matlab'])
    for key in ['fishLength', 'fishRange', 'Err']:
        stats.loc[key, 'python'] = '{:6.2f} ± {:6.2f}'.format(
            py_df_c[key].mean(), py_df_c[key].std())
        stats.loc[key, 'matlab'] = '{:6.2f} ± {:6.2f}'.format(
            mat_df_c[key].mean(), mat_df_c[key].std())

    stats.loc['nTotal', 'python'] = '{}'.format(len(py_df_c))
    stats.loc['nTotal', 'matlab'] = '{}'.format(len(mat_df_c))
    print(tabulate(stats, headers='keys', tablefmt='psql', stralign='right'))
Exemple #37
0
def _complete_source(line, state_indent, line_iter):
    """
    helper
    remove lines from the iterator if they are needed to complete source

    This uses :func:`static.is_balanced_statement` to do the heavy lifting

    Example:
        >>> from xdoctest.parser import *  # NOQA
        >>> from xdoctest.parser import _complete_source
        >>> state_indent = 0
        >>> line = '>>> x = { # The line is not finished'
        >>> remain_lines = ['>>> 1:2,', '>>> 3:4,', '>>> 5:6}', '>>> y = 7']
        >>> line_iter = enumerate(remain_lines, start=1)
        >>> finished = list(_complete_source(line, state_indent, line_iter))
        >>> final = chr(10).join([t[1] for t in finished])
        >>> print(final)
    """
    norm_line = line[state_indent:]  # Normalize line indentation
    prefix = norm_line[:4]
    suffix = norm_line[4:]
    assert prefix.strip() in {'>>>', '...'
                              }, ('unexpected prefix: {!r}'.format(prefix))
    yield line, norm_line

    source_parts = [suffix]

    # These hacks actually modify the input doctest slighly
    HACK_TRIPLE_QUOTE_FIX = True

    try:
        while not static.is_balanced_statement(source_parts, only_tokens=True):
            line_idx, next_line = next(line_iter)
            norm_line = next_line[state_indent:]
            prefix = norm_line[:4]
            suffix = norm_line[4:]

            if prefix.strip() not in {'>>>', '...', ''}:  # nocover
                error = True
                if HACK_TRIPLE_QUOTE_FIX:
                    # TODO: make a more robust patch
                    if any("'''" in s or '"""' in s for s in source_parts):
                        # print('HACK FIXING TRIPLE QUOTE')
                        next_line = next_line[:state_indent] + '... ' + norm_line
                        norm_line = '... ' + norm_line
                        prefix = ''
                        suffix = norm_line
                        error = False

                if error:
                    if DEBUG:
                        print(' * !!!ERROR!!!')
                        print(' * source_parts = {!r}'.format(source_parts))
                        print(' * prefix = {!r}'.format(prefix))
                        print(' * norm_line = {!r}'.format(norm_line))
                        print(' * !!!!!!!!!!!!!')

                    raise SyntaxError(
                        'Bad indentation in doctest on line {}: {!r}'.format(
                            line_idx, next_line))
            source_parts.append(suffix)
            yield next_line, norm_line
    except StopIteration:
        if DEBUG:
            import ubelt as ub
            print('<FAIL DID NOT COMPLETE SOURCE>')
            import traceback
            tb_text = traceback.format_exc()
            tb_text = ub.highlight_code(tb_text)
            tb_text = ub.indent(tb_text)
            print(tb_text)
            # print(' * line_iter = {!r}'.format(line_iter))
            print(' * state_indent = {!r}'.format(state_indent))
            print(' * line = {!r}'.format(line))
            # print('source =\n{}'.format('\n'.join(source_parts)))
            print('# Ensure that the following line should actually fail')
            print('source_parts = {}'.format(ub.repr2(source_parts, nl=2)))
            print(
                ub.codeblock(r'''
                from xdoctest import static_analysis as static
                static.is_balanced_statement(source_parts, only_tokens=False)
                static.is_balanced_statement(source_parts, only_tokens=True)
                text = '\n'.join(source_parts)
                print(text)
                static.six_axt_parse(text)
                '''))
            print('</FAIL DID NOT COMPLETE SOURCE>')
            # sys.exit(1)
        # TODO: use AST to reparse all doctest parts to discover where the
        # syntax error in the doctest is and then raise it.
        raise exceptions.IncompleteParseError(
            'ill-formed doctest: all parts have been processed '
            'but the doctest source is not balanced')
    else:
        if DEBUG > 1:
            import ubelt as ub
            print('<SUCCESS COMPLETED SOURCE>')
            # print(' * line_iter = {!r}'.format(line_iter))
            print('source_parts = {}'.format(ub.repr2(source_parts, nl=2)))
            print('</SUCCESS COMPLETED SOURCE>')
Exemple #38
0
def find_chain(head, authors=None, preserve_tags=True, oldest_commit=None):
    """
    Find a chain of commits starting at the HEAD.  If `authors` is specified
    the commits must be from one of these authors.

    The term chain is used in the graph-theory sense. It is a list of commits
    where all non-endpoint commits have exactly one parent and one child.

    TODO:
        - [ ] allow a chain to include branches if all messages on all branches
              conform to the chain pattern (e.g. wip)


        def search(node, current_path):
            if current_path:
                pass

            child_paths = []
            for parent in node.parents:
                path = search(parent, current_path)
                child_paths.append(path)

            if len(child_paths) == 0:
                pass
            if len(child_paths) == 1:
                # normal one parent case
                pass
            else:
                pass
                # Branching case
                # ACCEPT THE BRANCHING PATHS IF:
                #  * PARENT OF ALL PATHS HAVE A COMMON ENDPOINT
                #  * HANDLE CASE WHERE PATHS OVERLAPS

    Args:
        head (git.Commit): starting point
        authors (set): valid authors
        preserve_tags (bool, default=True): if True the chain is not allowed
            to extend past any tags. If a set, then we will not procede past
            any tag with a name in the set.

    Example:
        >>> # assuming you are in a git repo
        >>> chain = find_chain(git.Repo().head.commit)
    """
    chain = []
    commit = head

    if preserve_tags:
        tags = head.repo.tags
        if isinstance(preserve_tags, (set, list, tuple)):
            tags = {tag for tag in tags if tag.name in preserve_tags}
        tagged_commits = {tag.commit for tag in tags}

    while len(commit.parents) <= 1:
        if authors is not None and commit.author.name not in authors:
            break
        if len(commit.parents) == 0:
            # Hmm it seems that including the initial commit in a chain causes
            # problems, issue a warning
            warnings.warn(ub.codeblock(
                '''
                This script contains a known issue, where the initial commit is
                not included when it "should" be part of a streak.

                To squash the entire branch, use the following workaround:
                    git branch -m master old_master
                    git checkout --orphan master
                    git commit -am "initial commit"
                    git branch -D old_master
                '''))

            break

        if oldest_commit is not None:
            if commit.hexsha.startswith(oldest_commit):
                break

            if oldest_commit in tagged_commits:
                break

        if preserve_tags:
            # If we are preserving tags, break the chain once we find one
            if commit in tagged_commits:
                break

        chain.append(commit)
        if len(commit.parents) > 0:
            commit = commit.parents[0]
        else:
            break

    return chain