def test_global_color_disable(): """ CommandLine: xdoctest -m /home/joncrall/code/ubelt/tests/test_color.py test_global_color_disable """ import ubelt as ub text = 'text = "hi"' has_color = ub.color_text(text, 'red') != ub.color_text(text, None) text1a = ub.color_text(text, 'red') text1b = ub.highlight_code(text) if has_color: assert text != text1a assert text != text1b # Force colors to be disabled prev = ub.util_colors.NO_COLOR try: ub.util_colors.NO_COLOR = True text2a = ub.color_text(text, 'red') text2b = ub.highlight_code(text) assert text == text2a assert text == text2b finally: # Re-enable coloration ub.util_colors.NO_COLOR = prev text3a = ub.color_text(text, 'red') text3b = ub.highlight_code(text) if has_color: assert text != text3a assert text != text3b
def color_pytb(text): return ub.highlight_code(text, lexer_name='pytb', stripall=True)
def difftext(text1, text2, context_lines=0, ignore_whitespace=False, colored=False): r""" Uses difflib to return a difference string between two similar texts Args: text1 (str): old text text2 (str): new text context_lines (int): number of lines of unchanged context ignore_whitespace (bool): colored (bool): if true highlight the diff Returns: str: formatted difference text message References: http://www.java2s.com/Code/Python/Utility/IntelligentdiffbetweentextfilesTimPeters.htm Example: >>> # build test data >>> text1 = 'one\ntwo\nthree' >>> text2 = 'one\ntwo\nfive' >>> # execute function >>> result = difftext(text1, text2) >>> # verify results >>> print(result) - three + five Example: >>> # build test data >>> text1 = 'one\ntwo\nthree\n3.1\n3.14\n3.1415\npi\n3.4\n3.5\n4' >>> text2 = 'one\ntwo\nfive\n3.1\n3.14\n3.1415\npi\n3.4\n4' >>> # execute function >>> context_lines = 1 >>> result = difftext(text1, text2, context_lines, colored=True) >>> # verify results >>> print(result) """ import ubelt as ub import difflib text1 = ub.ensure_unicode(text1) text2 = ub.ensure_unicode(text2) text1_lines = text1.splitlines() text2_lines = text2.splitlines() if ignore_whitespace: text1_lines = [t.rstrip() for t in text1_lines] text2_lines = [t.rstrip() for t in text2_lines] ndiff_kw = dict(linejunk=difflib.IS_LINE_JUNK, charjunk=difflib.IS_CHARACTER_JUNK) else: ndiff_kw = {} all_diff_lines = list(difflib.ndiff(text1_lines, text2_lines, **ndiff_kw)) if context_lines is None: diff_lines = all_diff_lines else: # boolean for every line if it is marked or not ismarked_list = [ len(line) > 0 and line[0] in '+-?' for line in all_diff_lines ] # flag lines that are within context_lines away from a diff line isvalid_list = ismarked_list[:] for i in range(1, context_lines + 1): isvalid_list[:-i] = list( map(any, zip(isvalid_list[:-i], ismarked_list[i:]))) isvalid_list[i:] = list( map(any, zip(isvalid_list[i:], ismarked_list[:-i]))) USE_BREAK_LINE = True if USE_BREAK_LINE: # insert a visual break when there is a break in context diff_lines = [] prev = False visual_break = '\n <... FILTERED CONTEXT ...> \n' #print(isvalid_list) for line, valid in zip(all_diff_lines, isvalid_list): if valid: diff_lines.append(line) elif prev: if False: diff_lines.append(visual_break) prev = valid else: diff_lines = list(ub.compress(all_diff_lines, isvalid_list)) text = '\n'.join(diff_lines) if colored: text = ub.highlight_code(text, lexer_name='diff') return text
def parse(self, string, info=None): """ Divide the given string into examples and interleaving text. Args: string (str): string representing the doctest info (dict): info about where the string came from in case of an error Returns: list : a list of `DoctestPart` objects CommandLine: python -m xdoctest.parser DoctestParser.parse Example: >>> s = 'I am a dummy example with two parts' >>> x = 10 >>> print(s) I am a dummy example with two parts >>> s = 'My purpose it so demonstrate how wants work here' >>> print('The new want applies ONLY to stdout') >>> print('given before the last want') >>> ''' this wont hurt the test at all even though its multiline ''' >>> y = 20 The new want applies ONLY to stdout given before the last want >>> # Parts from previous examples are executed in the same context >>> print(x + y) 30 this is simply text, and doesnt apply to the previous doctest the <BLANKLINE> directive is still in effect. Example: >>> from xdoctest import parser >>> from xdoctest.docstr import docscrape_google >>> from xdoctest import core >>> self = parser.DoctestParser() >>> docstr = self.parse.__doc__ >>> blocks = docscrape_google.split_google_docblocks(docstr) >>> doclineno = self.parse.__func__.__code__.co_firstlineno >>> key, (string, offset) = blocks[-2] >>> self._label_docsrc_lines(string) >>> doctest_parts = self.parse(string) >>> # each part with a want-string needs to be broken in two >>> assert len(doctest_parts) == 6 """ if DEBUG > 1: print('\n===== PARSE ====') if sys.version_info.major == 2: # nocover string = utils.ensure_unicode(string) if not isinstance(string, six.string_types): raise TypeError('Expected string but got {!r}'.format(string)) string = string.expandtabs() # If all lines begin with the same indentation, then strip it. min_indent = _min_indentation(string) if min_indent > 0: string = '\n'.join([l[min_indent:] for l in string.splitlines()]) labeled_lines = None grouped_lines = None all_parts = None try: labeled_lines = self._label_docsrc_lines(string) grouped_lines = self._group_labeled_lines(labeled_lines) all_parts = list(self._package_groups(grouped_lines)) except Exception as orig_ex: if labeled_lines is None: failpoint = '_label_docsrc_lines' elif grouped_lines is None: failpoint = '_group_labeled_lines' elif all_parts is None: failpoint = '_package_groups' if DEBUG: print('<FAILPOINT>') print('!!! FAILED !!!') print('failpoint = {!r}'.format(failpoint)) import ubelt as ub import traceback tb_text = traceback.format_exc() tb_text = ub.highlight_code(tb_text) tb_text = ub.indent(tb_text) print(tb_text) print('Failed to parse string = <{[<{[<{[') print(string) print(']}>a]}>]}> # end string') print('info = {}'.format(ub.repr2(info))) print('-----') print('orig_ex = {}'.format(orig_ex)) print('labeled_lines = {}'.format(ub.repr2(labeled_lines))) print('grouped_lines = {}'.format(ub.repr2(grouped_lines, nl=3))) print('all_parts = {}'.format(ub.repr2(all_parts))) print('</FAILPOINT>') # sys.exit(1) raise exceptions.DoctestParseError( 'Failed to parse doctest in {}'.format(failpoint), string=string, info=info, orig_ex=orig_ex) if DEBUG > 1: print('\n===== FINISHED PARSE ====') return all_parts
def _complete_source(line, state_indent, line_iter): """ helper remove lines from the iterator if they are needed to complete source """ norm_line = line[state_indent:] # Normalize line indentation prefix = norm_line[:4] suffix = norm_line[4:] assert prefix.strip() in {'>>>', '...'}, '{}'.format(prefix) yield line, norm_line source_parts = [suffix] # These hacks actually modify the input doctest slighly HACK_TRIPLE_QUOTE_FIX = True try: while not static.is_balanced_statement(source_parts, only_tokens=True): line_idx, next_line = next(line_iter) norm_line = next_line[state_indent:] prefix = norm_line[:4] suffix = norm_line[4:] if prefix.strip() not in {'>>>', '...', ''}: # nocover error = True if HACK_TRIPLE_QUOTE_FIX: # TODO: make a more robust patch if any("'''" in s or '"""' in s for s in source_parts): # print('HACK FIXING TRIPLE QUOTE') next_line = next_line[:state_indent] + '... ' + norm_line norm_line = '... ' + norm_line prefix = '' suffix = norm_line error = False if error: if DEBUG: print(' * !!!ERROR!!!') print(' * source_parts = {!r}'.format(source_parts)) print(' * prefix = {!r}'.format(prefix)) print(' * norm_line = {!r}'.format(norm_line)) print(' * !!!!!!!!!!!!!') raise SyntaxError( 'Bad indentation in doctest on line {}: {!r}'.format( line_idx, next_line)) source_parts.append(suffix) yield next_line, norm_line except StopIteration: if DEBUG: import ubelt as ub print('<FAIL DID NOT COMPLETE SOURCE>') import traceback tb_text = traceback.format_exc() tb_text = ub.highlight_code(tb_text) tb_text = ub.indent(tb_text) print(tb_text) # print(' * line_iter = {!r}'.format(line_iter)) print(' * state_indent = {!r}'.format(state_indent)) print(' * line = {!r}'.format(line)) # print('source =\n{}'.format('\n'.join(source_parts))) print('# Ensure that the following line should actually fail') print('source_parts = {}'.format(ub.repr2(source_parts, nl=2))) print( ub.codeblock(r''' from xdoctest import static_analysis as static static.is_balanced_statement(source_parts, only_tokens=False) static.is_balanced_statement(source_parts, only_tokens=True) text = '\n'.join(source_parts) print(text) static.six_axt_parse(text) ''')) print('</FAIL DID NOT COMPLETE SOURCE>') # sys.exit(1) # TODO: use AST to reparse all doctest parts to discover where the # syntax error in the doctest is and then raise it. raise exceptions.IncompleteParseError( 'ill-formed doctest: all parts have been processed ' 'but the doctest source is not balanced') else: if DEBUG > 1: import ubelt as ub print('<SUCCESS COMPLETED SOURCE>') # print(' * line_iter = {!r}'.format(line_iter)) print('source_parts = {}'.format(ub.repr2(source_parts, nl=2))) print('</SUCCESS COMPLETED SOURCE>')
def close(closer, visitor): """ Populate all undefined names using the context from a module """ # Parse the parent module to find only the relevant global varaibles and # include those in the extracted source code. closer.debug('closing') current_sourcecode = closer.current_sourcecode() # Loop until all undefined names are defined names = True while names: # Determine if there are any variables needed from the parent scope current_sourcecode = closer.current_sourcecode() # Make sure we process names in the same order for hashability prev_names = names names = sorted(undefined_names(current_sourcecode)) closer.debug(' * undefined_names = {}'.format(names)) if names == prev_names: print('visitor.definitions = {}'.format( ub.repr2(visitor.definitions, si=1))) if DEBUG: warnings.warn( 'We were unable do do anything about undefined names') return else: current_sourcecode = closer.current_sourcecode() print('--- <ERROR> ---') print('Unable to define names') print(' * names = {!r}'.format(names)) print('<<< CURRENT_SOURCE >>>\n{}\n<<<>>>'.format( ub.highlight_code(current_sourcecode))) print('--- </ERROR> ---') raise AssertionError( 'unable to define names: {}'.format(names)) for name in names: try: try: closer.debug( ' * try visitor.extract_definition({})'.format( names)) d = visitor.extract_definition(name) except KeyError as ex: closer.debug(' * encountered issue: {!r}'.format(ex)) # There is a corner case where we have the definition, # we just need to move it to the top. flag = False for d_ in closer.body_defs.values(): if name == d_.name: closer.debug( ' * corner case: move definition to top') closer._add_definition(d_) flag = True break if not flag: raise else: closer.debug(' * add extracted def {}'.format(name)) closer._add_definition(d) # type_, text = visitor.extract_definition(name) except Exception as ex: closer.debug( ' * unable to extracted def {} due to {!r}'.format( name, ex)) current_sourcecode = closer.current_sourcecode() print('--- <ERROR> ---') print('Error computing source code extract_definition') print(' * failed to close name = {!r}'.format(name)) # print('<<< CURRENT_SOURCE >>>\n{}\n<<<>>>'.format(ub.highlight_code(current_sourcecode))) print('--- </ERROR> ---')
def main(): def argval(clikey, envkey=None, default=ub.NoParam): if envkey is not None: envval = os.environ.get(envkey) if envval: default = envval return ub.argval(clikey, default=default) DEFAULT_PY_VER = '{}.{}'.format(sys.version_info.major, sys.version_info.minor) PY_VER = argval('--pyver', 'MB_PYTHON_VERSION', default=DEFAULT_PY_VER) dpath = argval('--dpath', None, default=os.getcwd()) PLAT = argval('--plat', 'PLAT', default='x86_64') UNICODE_WIDTH = argval('--unicode_width', 'UNICODE_WIDTH', '32') import multiprocessing MAKE_CPUS = argval('--make_cpus', 'MAKE_CPUS', multiprocessing.cpu_count() + 1) OPENCV_VERSION = '4.1.0' os.chdir(dpath) BASE = 'manylinux1_{}'.format(PLAT) BASE_REPO = 'quay.io/skvark' PY_TAG = 'cp{ver}-cp{ver}m'.format(ver=PY_VER.replace('.', '')) # do we need the unicode width in this tag? DOCKER_TAG = '{}-opencv{}-py{}'.format(BASE, OPENCV_VERSION, PY_VER) if not exists(join(dpath, 'opencv-' + OPENCV_VERSION)): # FIXME: make robust in the case this fails fpath = ub.grabdata('https://github.com/opencv/opencv/archive/{}.zip'.format(OPENCV_VERSION), dpath=dpath, verbose=1) ub.cmd('ln -s {} .'.format(fpath), cwd=dpath, verbose=3) ub.cmd('unzip {}'.format(fpath), cwd=dpath, verbose=3) dockerfile_fpath = join(dpath, 'Dockerfile_' + DOCKER_TAG) # This docker code is very specific for building linux binaries. # We will need to do a bit of refactoring to handle OSX and windows. # But the goal is to get at least one OS working end-to-end. docker_code = ub.codeblock( ''' FROM {BASE_REPO}/{BASE} # SETUP ENV ARG MB_PYTHON_VERSION={PY_VER} ENV PYTHON_VERSION={PY_VER} ENV PYTHON_ROOT=/opt/python/{PY_TAG}/ ENV PYTHONPATH=/opt/python/{PY_TAG}/lib/python{PY_VER}/site-packages/ ENV PATH=/opt/python/{PY_TAG}/bin:$PATH ENV PYTHON_EXE=/opt/python/{PY_TAG}/bin/python ENV HOME=/root ENV PLAT={PLAT} ENV UNICODE_WIDTH={UNICODE_WIDTH} # Update python environment RUN echo "$PYTHON_EXE" RUN $PYTHON_EXE -m pip install --upgrade pip && \ $PYTHON_EXE -m pip install cmake ninja scikit-build wheel numpy # This is very different for different operating systems # https://github.com/skvark/opencv-python/blob/master/setup.py COPY opencv-{OPENCV_VERSION} /root/code/opencv RUN mkdir -p /root/code/opencv/build && \ cd /root/code/opencv/build && \ cmake -G "Unix Makefiles" \ -DINSTALL_CREATE_DISTRIB=ON \ -DOPENCV_SKIP_PYTHON_LOADER=ON \ -DBUILD_opencv_apps=OFF \ -DBUILD_SHARED_LIBS=OFF \ -DBUILD_TESTS=OFF \ -DBUILD_PERF_TESTS=OFF \ -DBUILD_DOCS=OFF \ -DWITH_QT=OFF \ -DWITH_IPP=OFF \ -DWITH_V4L=ON \ -DBUILD_JPEG=OFF \ -DENABLE_PRECOMPILED_HEADERS=OFF \ /root/code/opencv # Note: there is no need to compile the above with python # -DPYTHON3_EXECUTABLE=$PYTHON_EXE \ # -DBUILD_opencv_python3=ON \ # -DOPENCV_PYTHON3_INSTALL_PATH=python \ RUN cd /root/code/opencv/build && make -j{MAKE_CPUS} && make install '''.format(**locals())) try: print(ub.color_text('\n--- DOCKER CODE ---', 'white')) print(ub.highlight_code(docker_code, 'docker')) print(ub.color_text('--- END DOCKER CODE ---\n', 'white')) except Exception: pass with open(dockerfile_fpath, 'w') as file: file.write(docker_code) docker_build_cli = ' '.join([ 'docker', 'build', # '--build-arg PY_VER={}'.format(PY_VER), '--tag {}'.format(DOCKER_TAG), '-f {}'.format(dockerfile_fpath), '.' ]) print('docker_build_cli = {!r}'.format(docker_build_cli)) info = ub.cmd(docker_build_cli, verbose=3, shell=True) if info['ret'] != 0: print(ub.color_text('\n--- FAILURE ---', 'red')) print('Failed command:') print(info['command']) print(info['err']) print('NOTE: sometimes reruning the command manually works') raise Exception('Building docker failed with exit code {}'.format(info['ret'])) else: # write out what the tag is with open(join(dpath, 'opencv-docker-tag.txt'), 'w') as file: file.write(DOCKER_TAG) print(ub.color_text('\n--- SUCCESS ---', 'green'))
def main(): # TODO: find a better place for root ROOT = join(os.getcwd()) # ROOT = '.' os.chdir(ROOT) NAME = 'pyhesaff' VERSION = '0.1.2' DOCKER_TAG = '{}-{}'.format(NAME, VERSION) QUAY_REPO = 'quay.io/erotemic/manylinux-for' DOCKER_URI = '{QUAY_REPO}:{DOCKER_TAG}'.format(**locals()) dockerfile_fpath = join(ROOT, 'Dockerfile') # This docker code is very specific for building linux binaries. # We will need to do a bit of refactoring to handle OSX and windows. # But the goal is to get at least one OS working end-to-end. """ Notes: docker run --rm -it quay.io/pypa/manylinux2010_x86_64 /bin/bash --- ls /opt/python """ BASE_IMAGE = 'quay.io/pypa/manylinux2010_x86_64' docker_code = ub.codeblock(f''' FROM {BASE_IMAGE} RUN yum install lz4-devel -y RUN MB_PYTHON_TAG=cp27-cp27m && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja RUN MB_PYTHON_TAG=cp27-cp27mu && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja RUN MB_PYTHON_TAG=cp35-cp35m && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja RUN MB_PYTHON_TAG=cp36-cp36m && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja RUN MB_PYTHON_TAG=cp37-cp37m && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja RUN MB_PYTHON_TAG=cp38-cp38 && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja ''') docker_code2 = '\n\n'.join( [ub.paragraph(p) for p in docker_code.split('\n\n')]) try: print(ub.color_text('\n--- DOCKER CODE ---', 'white')) print(ub.highlight_code(docker_code2, 'docker')) print(ub.color_text('--- END DOCKER CODE ---\n', 'white')) except Exception: pass with open(dockerfile_fpath, 'w') as file: file.write(docker_code2) docker_build_cli = ' '.join([ 'docker', 'build', '--tag {}'.format(DOCKER_TAG), '-f {}'.format(dockerfile_fpath), '.', ]) print('docker_build_cli = {!r}'.format(docker_build_cli)) if ub.argflag('--dry'): print('DRY RUN') print('WOULD RUN') print(docker_build_cli) else: info = ub.cmd(docker_build_cli, verbose=3, shell=True) if info['ret'] != 0: print(ub.color_text('\n--- FAILURE ---', 'red')) print('Failed command:') print(info['command']) print(info['err']) print('NOTE: sometimes reruning the command manually works') raise Exception('Building docker failed with exit code {}'.format( info['ret'])) else: print(ub.color_text('\n--- SUCCESS ---', 'green')) print( ub.highlight_code( ub.codeblock(r''' # Finished creating the docker image. # To test / export / publish you can do something like this: # Test that we can get a bash terminal docker run -it {DOCKER_TAG} /bin/bash # Create a tag for the docker image docker tag {DOCKER_TAG} {DOCKER_URI} # Export your docker image to a file docker save -o ${ROOT}/{DOCKER_TAG}.docker.tar {DOCKER_TAG} # Login to a docker registry (we are using quay) # In some cases this works, docker login # But you may need to specify secret credentials load_secrets echo "QUAY_USERNAME = $QUAY_USERNAME" docker login -u $QUAY_USERNAME -p $QUAY_PASSWORD quay.io unload_secrets # Upload the docker image to quay.io docker push {DOCKER_URI} ''').format( NAME=NAME, ROOT=ROOT, DOCKER_TAG=DOCKER_TAG, DOCKER_URI=DOCKER_URI, ), 'bash', )) PUBLISH = 0 if PUBLISH: cmd1 = 'docker tag {DOCKER_TAG} {DOCKER_URI}'.format(**locals()) cmd2 = 'docker push {DOCKER_URI}'.format(**locals()) print('-- <push cmds> ---') print(cmd1) print(cmd2) print('-- </push cmds> ---')
def main(): import os ROOT = join(os.getcwd()) ROOT = ub.expandpath('~/code/hesaff') os.chdir(ROOT) VERSION = setup.version PY_VER = sys.version_info.major NAME = 'pyhesaff' tag = '{}-{}-py{}'.format(NAME, VERSION, PY_VER) # context_dpath = ub.ensuredir((ROOT, 'docker/context')) staging_dpath = ub.ensuredir((ROOT, 'docker/staging')) # Prestage the multibuild repo if not exists(join(staging_dpath, 'multibuild')): # FIXME: make robust in the case this fails info = ub.cmd( 'git clone https://github.com/matthew-brett/multibuild.git', cwd=staging_dpath, verbose=3) if not exists(join(staging_dpath, 'opencv')): # FIXME: make robust in the case this fails opencv_version = '4.1.0' fpath = ub.grabdata( 'https://github.com/opencv/opencv/archive/{}.zip'.format( opencv_version), verbose=1) ub.cmd('ln -s {} .'.format(fpath), cwd=staging_dpath, verbose=3) ub.cmd('unzip {}'.format(fpath), cwd=staging_dpath, verbose=3) import shutil shutil.move(join(staging_dpath, 'opencv-' + opencv_version), join(staging_dpath, 'opencv')) stage_self(ROOT, staging_dpath) dockerfile_fpath = join(ROOT, 'Dockerfile') # This docker code is very specific for building linux binaries. # We will need to do a bit of refactoring to handle OSX and windows. # But the goal is to get at least one OS working end-to-end. docker_code = ub.codeblock(''' FROM quay.io/skvark/manylinux1_x86_64 # SETUP ENV ARG MB_PYTHON_VERSION=3.6 ARG ENABLE_CONTRIB=0 ARG ENABLE_HEADLESS=1 ENV PYTHON_VERSION=3.6 ENV PYTHON_ROOT=/opt/python/cp36-cp36m/ ENV PYTHONPATH=/opt/python/cp36-cp36m/lib/python3.6/site-packages/ ENV PATH=/opt/python/cp36-cp36m/bin:$PATH ENV PYTHON_EXE=/opt/python/cp36-cp36m/python ENV MULTIBUILD_DIR=/root/code/multibuild ENV HOME=/root # params to bdist_wheel. used to set osx build target. ENV TEST_DEPENDS="numpy==1.11.1" ENV BDIST_PARAMS="" ENV USE_CCACHE=1 ENV PLAT=x86_64 ENV UNICODE_WIDTH=32 # -e BUILD_COMMANDS="$build_cmds" \ # -e PYTHON_VERSION="$MB_PYTHON_VERSION" \ # -e UNICODE_WIDTH="$UNICODE_WIDTH" \ # -e BUILD_COMMIT="$BUILD_COMMIT" \ # -e CONFIG_PATH="$CONFIG_PATH" \ # -e ENV_VARS_PATH="$ENV_VARS_PATH" \ # -e WHEEL_SDIR="$WHEEL_SDIR" \ # -e MANYLINUX_URL="$MANYLINUX_URL" \ # -e BUILD_DEPENDS="$BUILD_DEPENDS" \ # -e USE_CCACHE="$USE_CCACHE" \ # -e REPO_DIR="$repo_dir" \ # -e PLAT="$PLAT" \ # These are defined in the parent image # ENV JPEG_INCLUDE_DIR=/opt/libjpeg-turbo/include # ENV JPEG_LIBRARY=/opt/libjpeg-turbo/lib64/libjpeg.a RUN mkdir -p /io WORKDIR /root # Setup code / scripts COPY docker/staging/multibuild /root/code/multibuild # Hack to fix issue RUN find $MULTIBUILD_DIR -iname "*.sh" -type f -exec sed -i 's/gh-clone/gh_clone/g' {} + # Copy bash configs (mirrors the environs) COPY docker/config.sh /root/config.sh COPY docker/bashrc.sh /root/.bashrc # Setup a virtualenv RUN source /root/.bashrc && \ $PYTHON_EXE -m pip install --upgrade pip && \ $PYTHON_EXE -m pip install virtualenv && \ $PYTHON_EXE -m virtualenv --python=$PYTHON_EXE $HOME/venv # Install packages in virtual environment RUN source /root/.bashrc && \ pip install cmake ninja scikit-build wheel numpy # This is very different for different operating systems # https://github.com/skvark/opencv-python/blob/master/setup.py COPY docker/staging/opencv /root/code/opencv RUN source /root/.bashrc && \ source code/multibuild/common_utils.sh && \ source code/multibuild/travis_linux_steps.sh && \ mkdir -p /root/code/opencv/build && \ cd /root/code/opencv/build && \ cmake -G "Unix Makefiles" \ -DINSTALL_CREATE_DISTRIB=ON \ -DOPENCV_SKIP_PYTHON_LOADER=ON \ -DBUILD_opencv_apps=OFF \ -DBUILD_SHARED_LIBS=OFF \ -DBUILD_TESTS=OFF \ -DBUILD_PERF_TESTS=OFF \ -DBUILD_DOCS=OFF \ -DWITH_QT=OFF \ -DWITH_IPP=OFF \ -DWITH_V4L=ON \ -DBUILD_JPEG=OFF \ -DENABLE_PRECOMPILED_HEADERS=OFF \ -DJPEG_INCLUDE_DIR=/opt/libjpeg-turbo/include \ -DJPEG_LIBRARY=/opt/libjpeg-turbo/lib64/libjpeg.a \ /root/code/opencv # Note: there is no need to compile the above with python # -DPYTHON3_EXECUTABLE=$PYTHON_EXE \ # -DBUILD_opencv_python3=ON \ # -DOPENCV_PYTHON3_INSTALL_PATH=python \ RUN source /root/.bashrc && \ source code/multibuild/common_utils.sh && \ source code/multibuild/travis_linux_steps.sh && \ cd /root/code/opencv/build && \ make -j9 && make install COPY docker/staging/hesaff /root/code/hesaff # # Use skbuild to build hesaff # RUN source /root/.bashrc && \ # cd /root/code/hesaff && \ # CMAKE_FIND_LIBRARY_SUFFIXES=".a;.so" python setup.py build_ext --inplace # export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' # Use cmake to build hesaff 9maybe not needed?) # RUN source /root/.bashrc && \ # mkdir -p /root/code/hesaff/build && \ # cd /root/code/hesaff/build && \ # CXXFLAGS="-std=c++11 $CXXFLAGS" cmake -G "Unix Makefiles" /root/code/hesaff && \ # make # Use skbuild to build hesaff RUN source /root/.bashrc && \ cd /root/code/hesaff && \ python setup.py build && \ python setup.py bdist_wheel # RUN source /root/.bashrc && \ # pip install xdoctest ''') try: print(ub.color_text('\n--- DOCKER CODE ---', 'white')) print(ub.highlight_code(docker_code, 'docker')) print(ub.color_text('--- END DOCKER CODE ---\n', 'white')) except Exception: pass with open(dockerfile_fpath, 'w') as file: file.write(docker_code) docker_build_cli = ' '.join([ 'docker', 'build', # '--build-arg PY_VER={}'.format(PY_VER), '--tag {}'.format(tag), '-f {}'.format(dockerfile_fpath), '.' ]) print('docker_build_cli = {!r}'.format(docker_build_cli)) info = ub.cmd(docker_build_cli, verbose=3, shell=True) if info['ret'] != 0: print(ub.color_text('\n--- FAILURE ---', 'red')) print('Failed command:') print(info['command']) print(info['err']) print('NOTE: sometimes reruning the command manually works') raise Exception('Building docker failed with exit code {}'.format( info['ret'])) else: print(ub.color_text('\n--- SUCCESS ---', 'green')) DEPLOY = True if DEPLOY: VMNT_DIR = '{ROOT}/{NAME}-docker/vmnt'.format(NAME=NAME, ROOT=ROOT) print('VMNT_DIR = {!r}'.format(VMNT_DIR)) ub.ensuredir(VMNT_DIR) # TODO: Correctly mangle the ffmpeg libs as done via # ls -a ~/.local/conda/envs/py36/lib/python3.6/site-packages/cv2/ # ls ~/.local/conda/envs/py36/lib/python3.6/site-packages/cv2/.libs # cp code/hesaff/build/libhesaff.so /root/vmnt # cp /root/ffmpeg_build/lib/libavcodec.so.58 /root/vmnt # cp /root/ffmpeg_build/lib/libavformat.so.58 /root/vmnt # cp /root/ffmpeg_build/lib/libavutil.so.56 /root/vmnt # cp /root/ffmpeg_build/lib/libswscale.so.5 /root/vmnt inside_cmds = ' && '.join( ub.codeblock(''' cp code/hesaff/dist/pyhesaff*.whl /root/vmnt ''').split('\n')) docker_run_cli = ' '.join([ 'docker', 'run', '-v {}:/root/vmnt/'.format(VMNT_DIR), '-it', tag, 'bash -c "{}"'.format(inside_cmds) ]) print(docker_run_cli) info = ub.cmd(docker_run_cli, verbose=3) assert info['ret'] == 0 # import shutil # PKG_DIR = join(ROOT, 'pyhesaff') # shutil.copy(join(VMNT_DIR, 'libhesaff.so'), join(PKG_DIR, 'libhesaff-manylinux1_x86_64.so')) # TODO: do this correctly # shutil.copy(join(VMNT_DIR, 'libhesaff.so'), join(PKG_DIR, 'libavcodec.so.58')) # shutil.copy(join(VMNT_DIR, 'libavformat.so.58'), join(PKG_DIR, 'libavformat.so.58')) # shutil.copy(join(VMNT_DIR, 'libavutil.so.56'), join(PKG_DIR, 'libavutil.so.56')) # shutil.copy(join(VMNT_DIR, 'libswscale.so.5'), join(PKG_DIR, 'libswscale.so.5')) # print(ub.highlight_code(ub.codeblock( print( ub.highlight_code( ub.codeblock(r''' # Finished creating the docker image. # To test / export you can do something like this: VMNT_DIR={ROOT}/{NAME}-docker/vmnt mkdir -p VMNT_DIR TAG={tag} # Test that we can get a bash terminal docker run -v $VMNT_DIR:/root/vmnt -it {tag} bash # Move deployment to the vmnt directory docker run -v $VMNT_DIR:/root/vmnt -it {tag} bash -c 'cd /root/code/hesaff && python3 -m xdoctest pyhesaff' # Run system tests docker run -v $VMNT_DIR:/root/vmnt -it {tag} bash -c 'cd /root/code/hesaff && python3 run_doctests.sh' # Inside bash test that we can fit a new model python -m pyhessaff demo mkdir -p ${ROOT}/{NAME}-docker/dist docker save -o ${ROOT}/{NAME}-docker/dist/{tag}.docker.tar {tag} ''').format(NAME=NAME, ROOT=ROOT, tag=tag), 'bash'))