Esempio n. 1
0
def test_build_with_conda_not_on_path(testing_workdir):
    with put_bad_conda_on_path(testing_workdir):
        # using subprocess is not ideal, but it is the easiest way to ensure that PATH
        #    is altered the way we want here.
        check_call_env('conda-build {0} --no-anaconda-upload'.format(
            os.path.join(metadata_dir, "python_run")).split(),
                       env=os.environ)
def test_skeleton_pypi(testing_workdir):
    """published in docs at http://conda.pydata.org/docs/build_tutorials/pkgs.html"""
    conda_path = os.path.join(sys.prefix, 'Scripts' if sys.platform == 'win32' else 'bin', 'conda')
    cmd = conda_path + ' skeleton pypi Click'
    check_call_env(cmd.split())
    cmd = conda_path + ' build click'
    check_call_env(cmd.split())
Esempio n. 3
0
def apply_patch(src_dir, path, config, git=None):
    if not isfile(path):
        sys.exit('Error: no such patch: %s' % path)

    files, is_git_format = _get_patch_file_details(path)
    if git and is_git_format:
        # Prevents git from asking interactive questions,
        # also necessary to achieve sha1 reproducibility;
        # as is --committer-date-is-author-date. By this,
        # we mean a round-trip of git am/git format-patch
        # gives the same file.
        git_env = os.environ
        git_env['GIT_COMMITTER_NAME'] = 'conda-build'
        git_env['GIT_COMMITTER_EMAIL'] = '*****@*****.**'
        check_call_env([git, 'am', '--committer-date-is-author-date', path],
                       cwd=src_dir, stdout=None, env=git_env)
    else:
        print('Applying patch: %r' % path)
        patch = external.find_executable('patch', config.build_prefix)
        if patch is None:
            sys.exit("""\
        Error:
            Cannot use 'git' (not a git repo and/or patch) and did not find 'patch' in: %s
            You can install 'patch' using apt-get, yum (Linux), Xcode (MacOSX),
            or conda, m2-patch (Windows),
        """ % (os.pathsep.join(external.dir_paths)))
        patch_strip_level = _guess_patch_strip_level(files, src_dir)
        patch_args = ['-p%d' % patch_strip_level, '-i', path]
        if sys.platform == 'win32':
            patch_args[-1] = _ensure_unix_line_endings(path)
        check_call_env([patch] + patch_args, cwd=src_dir)
        if sys.platform == 'win32' and os.path.exists(patch_args[-1]):
            os.remove(patch_args[-1])  # clean up .patch_unix file
Esempio n. 4
0
def build(m, bld_bat, stats):
    with path_prepended(m.config.build_prefix):
        with path_prepended(m.config.host_prefix):
            env = environ.get_dict(m=m)
    env["CONDA_BUILD_STATE"] = "BUILD"

    # hard-code this because we never want pip's build isolation
    #    https://github.com/conda/conda-build/pull/2972#discussion_r198290241
    #
    # Note that pip env "NO" variables are inverted logic.
    #      PIP_NO_BUILD_ISOLATION=False means don't use build isolation.
    #
    env["PIP_NO_BUILD_ISOLATION"] = False
    # some other env vars to have pip ignore dependencies.
    # we supply them ourselves instead.
    #    See note above about inverted logic on "NO" variables
    env["PIP_NO_DEPENDENCIES"] = False
    env["PIP_IGNORE_INSTALLED"] = True

    # pip's cache directory (PIP_NO_CACHE_DIR) should not be
    # disabled as this results in .egg-info rather than
    # .dist-info directories being created, see gh-3094
    # set PIP_CACHE_DIR to a path in the work dir that does not exist.
    env['PIP_CACHE_DIR'] = m.config.pip_cache_dir

    # set variables like CONDA_PY in the test environment
    env.update(set_language_env_vars(m.config.variant))

    for name in 'BIN', 'INC', 'LIB':
        path = env['LIBRARY_' + name]
        if not isdir(path):
            os.makedirs(path)

    src_dir = m.config.work_dir
    if os.path.isfile(bld_bat):
        with open(bld_bat) as fi:
            data = fi.read()
        with open(join(src_dir, 'bld.bat'), 'w') as fo:
            # more debuggable with echo on
            fo.write('@echo on\n')
            for key, value in env.items():
                if value:
                    fo.write('set "{key}={value}"\n'.format(key=key, value=value))
            if not m.uses_new_style_compiler_activation:
                fo.write(msvc_env_cmd(bits=m.config.host_arch, config=m.config,
                                    override=m.get_value('build/msvc_compiler', None)))
            # Reset echo on, because MSVC scripts might have turned it off
            fo.write('@echo on\n')
            fo.write('set "INCLUDE={};%INCLUDE%"\n'.format(env["LIBRARY_INC"]))
            fo.write('set "LIB={};%LIB%"\n'.format(env["LIBRARY_LIB"]))
            if m.config.activate and m.name() != 'conda':
                _write_bat_activation_text(fo, m)
            fo.write("REM ===== end generated header =====\n")
            fo.write(data)

        cmd = ['cmd.exe', '/c', 'bld.bat']
        check_call_env(cmd, cwd=src_dir, stats=stats)

    fix_staged_scripts(join(m.config.host_prefix, 'Scripts'), config=m.config)
Esempio n. 5
0
def test_skeleton_pypi(testing_workdir):
    """published in docs at https://docs.conda.io/projects/conda-build/en/latest/user-guide/tutorials/build-pkgs-skeleton.html"""
    conda_path = os.path.join(sys.prefix,
                              'Scripts' if sys.platform == 'win32' else 'bin',
                              'conda')
    cmd = conda_path + ' skeleton pypi click'
    check_call_env(cmd.split())
    cmd = conda_path + ' build click'
    check_call_env(cmd.split())
Esempio n. 6
0
def testing_env(testing_workdir, request, monkeypatch):
    env_path = os.path.join(testing_workdir, 'env')

    check_call_env(['conda', 'create', '-yq', '-p', env_path,
                    'python={}'.format(".".join(sys.version.split('.')[:2]))])
    monkeypatch.setenv('PATH', prepend_bin_path(os.environ.copy(), env_path,
                                                prepend_prefix=True)['PATH'])
    # cleanup is done by just cleaning up the testing_workdir
    return env_path
Esempio n. 7
0
def testing_env(testing_workdir, request, monkeypatch):
    env_path = os.path.join(testing_workdir, 'env')

    check_call_env(['conda', 'create', '-yq', '-p', env_path,
                    'python={0}'.format(".".join(sys.version.split('.')[:2]))])
    monkeypatch.setenv('PATH', prepend_bin_path(os.environ.copy(), env_path,
                                                prepend_prefix=True)['PATH'])
    # cleanup is done by just cleaning up the testing_workdir
    return env_path
Esempio n. 8
0
def build(m, bld_bat, stats, provision_only=False):
    with path_prepended(m.config.host_prefix):
        with path_prepended(m.config.build_prefix):
            env = environ.get_dict(m=m)
    env["CONDA_BUILD_STATE"] = "BUILD"

    # hard-code this because we never want pip's build isolation
    #    https://github.com/conda/conda-build/pull/2972#discussion_r198290241
    #
    # Note that pip env "NO" variables are inverted logic.
    #      PIP_NO_BUILD_ISOLATION=False means don't use build isolation.
    #
    env["PIP_NO_BUILD_ISOLATION"] = 'False'
    # some other env vars to have pip ignore dependencies.
    # we supply them ourselves instead.
    #    See note above about inverted logic on "NO" variables
    env["PIP_NO_DEPENDENCIES"] = True
    env["PIP_IGNORE_INSTALLED"] = True

    # pip's cache directory (PIP_NO_CACHE_DIR) should not be
    # disabled as this results in .egg-info rather than
    # .dist-info directories being created, see gh-3094
    # set PIP_CACHE_DIR to a path in the work dir that does not exist.
    env['PIP_CACHE_DIR'] = m.config.pip_cache_dir

    # tell pip to not get anything from PyPI, please.  We have everything we need
    # locally, and if we don't, it's a problem.
    env["PIP_NO_INDEX"] = True

    # set variables like CONDA_PY in the test environment
    env.update(set_language_env_vars(m.config.variant))

    for name in 'BIN', 'INC', 'LIB':
        path = env['LIBRARY_' + name]
        if not isdir(path):
            os.makedirs(path)

    work_script, env_script = write_build_scripts(m, env, bld_bat)

    if not provision_only and os.path.isfile(work_script):
        cmd = ['cmd.exe', '/c', os.path.basename(work_script)]
        # rewrite long paths in stdout back to their env variables
        if m.config.debug:
            rewrite_env = None
        else:
            rewrite_env = {
                k: env[k]
                for k in ['PREFIX', 'BUILD_PREFIX', 'SRC_DIR'] if k in env
            }
            print("Rewriting env in output: %s" % pprint.pformat(rewrite_env))
        check_call_env(cmd,
                       cwd=m.config.work_dir,
                       stats=stats,
                       rewrite_stdout_env=rewrite_env)
        fix_staged_scripts(join(m.config.host_prefix, 'Scripts'),
                           config=m.config)
Esempio n. 9
0
def test_subprocess_stats_call(testing_workdir):
    stats = {}
    utils.check_call_env(['hostname'], stats=stats, cwd=testing_workdir)
    assert stats
    stats = {}
    out = utils.check_output_env(['hostname'], stats=stats, cwd=testing_workdir)
    assert out
    assert stats
    with pytest.raises(subprocess.CalledProcessError):
        utils.check_call_env(['bash', '-c', 'exit 1'], cwd=testing_workdir)
Esempio n. 10
0
def test_subprocess_stats_call(testing_workdir):
    stats = {}
    utils.check_call_env(['hostname'], stats=stats, cwd=testing_workdir)
    assert stats
    stats = {}
    out = utils.check_output_env(['hostname'], stats=stats, cwd=testing_workdir)
    assert out
    assert stats
    with pytest.raises(subprocess.CalledProcessError):
        utils.check_call_env(['bash', '-c', 'exit 1'], cwd=testing_workdir)
Esempio n. 11
0
def test_subprocess_stats_call():
    stats = {}
    utils.check_call_env(['ls'], stats=stats)
    assert stats
    stats = {}
    out = utils.check_output_env(['ls'], stats=stats)
    assert out
    assert stats
    with pytest.raises(subprocess.CalledProcessError):
        utils.check_call_env(['bash', '-c', 'exit 1'])
Esempio n. 12
0
def test_subprocess_stats_call():
    stats = {}
    utils.check_call_env(['ls'], stats=stats)
    assert stats
    stats = {}
    out = utils.check_output_env(['ls'], stats=stats)
    assert out
    assert stats
    with pytest.raises(subprocess.CalledProcessError):
        utils.check_call_env(['bash', '-c', 'exit 1'])
Esempio n. 13
0
 def try_apply_patch(patch, patch_args, cwd, stdout, stderr):
     # An old reference: https://unix.stackexchange.com/a/243748/34459
     #
     # I am worried that '--ignore-whitespace' may be destructive. If so we should
     # avoid passing it, particularly in the initial (most likely to succeed) calls.
     #
     # From here-in I define a 'native' patch as one which has:
     # 1. LF for the patch block metadata.
     # 2. CRLF or LF for the actual patched lines matching those of the source lines.
     #
     # Calls to a raw 'patch' are destructive in various ways:
     # 1. It leaves behind .rej and .orig files
     # 2. If you pass it a patch with incorrect CRLF changes and do not pass --binary and
     #    if any of those blocks *can* be applied, then the whole file gets written out with
     #    LF.  This cannot be reversed either; the text changes will be reversed but not
     #    line-feed changes (since all line-endings get changed, not just those of the of
     #    patched lines)
     # 3. If patching fails, the bits that succeeded remain, so patching is not at all
     #    atomic.
     #
     # Still, we do our best to mitigate all of this as follows:
     # 1. We use --dry-run to test for applicability first.
     # 2 We check for native application of a native patch (--binary, without --ignore-whitespace)
     #
     # Some may bemoan the loss of patch failure artifacts, but it is fairly random which
     # patch and patch attempt they apply to so their informational value is low, besides that,
     # they are ugly.
     #
     import tempfile
     temp_name = os.path.join(tempfile.gettempdir(),
                              next(tempfile._get_candidate_names()))
     base_patch_args = ['--no-backup-if-mismatch', '--batch'
                        ] + patch_args + ['-r', temp_name]
     log = get_logger(__name__)
     try:
         try_patch_args = base_patch_args[:]
         try_patch_args.append('--dry-run')
         log.debug("dry-run applying with\n{} {}".format(
             patch, try_patch_args))
         check_call_env([patch] + try_patch_args,
                        cwd=cwd,
                        stdout=stdout,
                        stderr=stderr)
         # You can use this to pretend the patch failed so as to test reversal!
         # raise CalledProcessError(-1, ' '.join([patch] + patch_args))
     except Exception as e:
         raise e
     else:
         check_call_env([patch] + base_patch_args,
                        cwd=cwd,
                        stdout=stdout,
                        stderr=stderr)
     finally:
         if os.path.exists(temp_name):
             os.unlink(temp_name)
Esempio n. 14
0
def package_exists(package_name):
    try:
        cmd = ['cpan', '-D', package_name]
        if on_win:
            cmd.insert(0, '/c')
            cmd.insert(0, 'cmd.exe')
        check_call_env(cmd)
        in_repo = True
    except subprocess.CalledProcessError:
        in_repo = False
    return in_repo
Esempio n. 15
0
def package_exists(package_name):
    try:
        cmd = ['cpan', '-D', package_name]
        if on_win:
            cmd.insert(0, '/c')
            cmd.insert(0, 'cmd.exe')
        check_call_env(cmd)
        in_repo = True
    except subprocess.CalledProcessError:
        in_repo = False
    return in_repo
Esempio n. 16
0
def test_checkout_tool_as_dependency(testing_workdir, testing_config, monkeypatch):
    # "hide" svn by putting a known bad one on PATH
    exename = dummy_executable(testing_workdir, "svn")
    monkeypatch.setenv("PATH", testing_workdir, prepend=os.pathsep)
    FNULL = open(os.devnull, 'w')
    with pytest.raises(subprocess.CalledProcessError, message="Dummy svn was not executed"):
        check_call_env([exename, '--version'], stderr=FNULL)
    FNULL.close()
    env = dict(os.environ)
    env["PATH"] = os.pathsep.join([testing_workdir, env["PATH"]])
    api.build(os.path.join(metadata_dir, '_checkout_tool_as_dependency'), config=testing_config)
Esempio n. 17
0
def _clean(setup_py):
    '''
    This invokes:
    $ python setup.py clean

    :param setup_py: path to setup.py
    '''
    # first call setup.py clean
    cmd = ['python', setup_py, 'clean']
    check_call_env(cmd)
    print("Completed: " + " ".join(cmd))
    print("===============================================")
Esempio n. 18
0
def _clean(setup_py):
    '''
    This invokes:
    $ python setup.py clean

    :param setup_py: path to setup.py
    '''
    # first call setup.py clean
    cmd = ['python', setup_py, 'clean']
    check_call_env(cmd)
    print("Completed: " + " ".join(cmd))
    print("===============================================")
Esempio n. 19
0
def svn_source(metadata, config):
    ''' Download a source from SVN repo. '''
    if config.verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'w')
        stdout = FNULL
        stderr = FNULL

    meta = metadata.get_section('source')

    def parse_bool(s):
        return str(s).lower().strip() in ('yes', 'true', '1', 'on')

    svn = external.find_executable('svn', config.build_prefix)
    if not svn:
        sys.exit("Error: svn is not installed")
    svn_url = meta['svn_url']
    svn_revision = meta.get('svn_rev') or 'head'
    svn_ignore_externals = parse_bool(meta.get('svn_ignore_externals') or 'no')
    if not isdir(config.svn_cache):
        os.makedirs(config.svn_cache)
    svn_dn = svn_url.split(':', 1)[-1].replace('/', '_').replace(':', '_')
    cache_repo = join(config.svn_cache, svn_dn)
    if svn_ignore_externals:
        extra_args = ['--ignore-externals']
    else:
        extra_args = []
    if isdir(cache_repo):
        check_call_env([svn, 'up', '-r', svn_revision] + extra_args,
                       cwd=cache_repo,
                       stdout=stdout,
                       stderr=stderr)
    else:
        check_call_env([svn, 'co', '-r', svn_revision] + extra_args +
                       [svn_url, cache_repo],
                       stdout=stdout,
                       stderr=stderr)
        assert isdir(cache_repo)

    # now copy into work directory
    copy_into(cache_repo,
              config.work_dir,
              config.timeout,
              symlinks=True,
              locking=config.locking)

    if not config.verbose:
        FNULL.close()

    return config.work_dir
Esempio n. 20
0
def build(m, bld_bat, stats, provision_only=False):
    with path_prepended(m.config.host_prefix):
        with path_prepended(m.config.build_prefix):
            env = environ.get_dict(m=m)
    env["CONDA_BUILD_STATE"] = "BUILD"

    # hard-code this because we never want pip's build isolation
    #    https://github.com/conda/conda-build/pull/2972#discussion_r198290241
    #
    # Note that pip env "NO" variables are inverted logic.
    #      PIP_NO_BUILD_ISOLATION=False means don't use build isolation.
    #
    env["PIP_NO_BUILD_ISOLATION"] = 'False'
    # some other env vars to have pip ignore dependencies.
    # we supply them ourselves instead.
    #    See note above about inverted logic on "NO" variables
    env["PIP_NO_DEPENDENCIES"] = True
    env["PIP_IGNORE_INSTALLED"] = True

    # pip's cache directory (PIP_NO_CACHE_DIR) should not be
    # disabled as this results in .egg-info rather than
    # .dist-info directories being created, see gh-3094
    # set PIP_CACHE_DIR to a path in the work dir that does not exist.
    env['PIP_CACHE_DIR'] = m.config.pip_cache_dir

    # tell pip to not get anything from PyPI, please.  We have everything we need
    # locally, and if we don't, it's a problem.
    env["PIP_NO_INDEX"] = True

    # set variables like CONDA_PY in the test environment
    env.update(set_language_env_vars(m.config.variant))

    for name in 'BIN', 'INC', 'LIB':
        path = env['LIBRARY_' + name]
        if not isdir(path):
            os.makedirs(path)

    work_script, env_script = write_build_scripts(m, env, bld_bat)

    if not provision_only and os.path.isfile(work_script):
        cmd = ['cmd.exe', '/d', '/c', os.path.basename(work_script)]
        # rewrite long paths in stdout back to their env variables
        if m.config.debug:
            rewrite_env = None
        else:
            rewrite_env = {
                k: env[k]
                for k in ['PREFIX', 'BUILD_PREFIX', 'SRC_DIR'] if k in env
            }
            print("Rewriting env in output: %s" % pprint.pformat(rewrite_env))
        check_call_env(cmd, cwd=m.config.work_dir, stats=stats, rewrite_stdout_env=rewrite_env)
        fix_staged_scripts(join(m.config.host_prefix, 'Scripts'), config=m.config)
def test_checkout_tool_as_dependency(testing_workdir, test_config):
    # temporarily necessary because we have custom rebuilt svn for longer prefix here
    test_config.channel_urls = ('conda_build_test', )
    # "hide" svn by putting a known bad one on PATH
    exename = dummy_executable(testing_workdir, "svn")
    os.environ["PATH"] = os.pathsep.join([testing_workdir, os.environ["PATH"]])
    FNULL = open(os.devnull, 'w')
    with pytest.raises(subprocess.CalledProcessError, message="Dummy svn was not executed"):
        check_call_env([exename, '--version'], stderr=FNULL)
    FNULL.close()
    env = dict(os.environ)
    env["PATH"] = os.pathsep.join([testing_workdir, env["PATH"]])
    api.build(os.path.join(metadata_dir, '_checkout_tool_as_dependency'), config=test_config)
Esempio n. 22
0
def test_checkout_tool_as_dependency(testing_workdir, test_config):
    # temporarily necessary because we have custom rebuilt svn for longer prefix here
    test_config.channel_urls = ('conda_build_test', )
    # "hide" svn by putting a known bad one on PATH
    exename = dummy_executable(testing_workdir, "svn")
    os.environ["PATH"] = os.pathsep.join([testing_workdir, os.environ["PATH"]])
    FNULL = open(os.devnull, 'w')
    with pytest.raises(subprocess.CalledProcessError, message="Dummy svn was not executed"):
        check_call_env([exename, '--version'], stderr=FNULL)
    FNULL.close()
    env = dict(os.environ)
    env["PATH"] = os.pathsep.join([testing_workdir, env["PATH"]])
    api.build(os.path.join(metadata_dir, '_checkout_tool_as_dependency'), config=test_config)
Esempio n. 23
0
def hg_source(source_dict, src_dir, hg_cache, verbose):
    ''' Download a source from Mercurial repo. '''
    if verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'w')
        stdout = FNULL
        stderr = FNULL

    hg_url = source_dict['hg_url']
    if not isdir(hg_cache):
        os.makedirs(hg_cache)
    hg_dn = hg_url.split(':')[-1].replace('/', '_')
    cache_repo = join(hg_cache, hg_dn)
    if isdir(cache_repo):
        check_call_env(['hg', 'pull'], cwd=cache_repo, stdout=stdout, stderr=stderr)
    else:
        check_call_env(['hg', 'clone', hg_url, cache_repo], stdout=stdout, stderr=stderr)
        assert isdir(cache_repo)

    # now clone in to work directory
    update = source_dict.get('hg_tag') or 'tip'
    if verbose:
        print('checkout: %r' % update)

    check_call_env(['hg', 'clone', cache_repo, src_dir], stdout=stdout,
                   stderr=stderr)
    check_call_env(['hg', 'update', '-C', update], cwd=src_dir, stdout=stdout,
                   stderr=stderr)

    if not verbose:
        FNULL.close()

    return src_dir
Esempio n. 24
0
def test_checkout_tool_as_dependency(testing_workdir, testing_config,
                                     monkeypatch):
    # "hide" svn by putting a known bad one on PATH
    exename = dummy_executable(testing_workdir, "svn")
    monkeypatch.setenv("PATH", testing_workdir, prepend=os.pathsep)
    FNULL = open(os.devnull, 'w')
    with pytest.raises(subprocess.CalledProcessError,
                       message="Dummy svn was not executed"):
        check_call_env([exename, '--version'], stderr=FNULL)
    FNULL.close()
    env = dict(os.environ)
    env["PATH"] = os.pathsep.join([testing_workdir, env["PATH"]])
    api.build(os.path.join(metadata_dir, '_checkout_tool_as_dependency'),
              config=testing_config)
Esempio n. 25
0
def hg_source(source_dict, src_dir, hg_cache, verbose):
    ''' Download a source from Mercurial repo. '''
    if verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'w')
        stdout = FNULL
        stderr = FNULL

    hg_url = source_dict['hg_url']
    if not isdir(hg_cache):
        os.makedirs(hg_cache)
    hg_dn = hg_url.split(':')[-1].replace('/', '_')
    cache_repo = join(hg_cache, hg_dn)
    if isdir(cache_repo):
        check_call_env(['hg', 'pull'], cwd=cache_repo, stdout=stdout, stderr=stderr)
    else:
        check_call_env(['hg', 'clone', hg_url, cache_repo], stdout=stdout, stderr=stderr)
        assert isdir(cache_repo)

    # now clone in to work directory
    update = source_dict.get('hg_tag') or 'tip'
    if verbose:
        print('checkout: %r' % update)

    check_call_env(['hg', 'clone', cache_repo, src_dir], stdout=stdout,
                   stderr=stderr)
    check_call_env(['hg', 'update', '-C', update], cwd=src_dir, stdout=stdout,
                   stderr=stderr)

    if not verbose:
        FNULL.close()

    return src_dir
Esempio n. 26
0
def testing_env(testing_workdir, request):
    env_path = os.path.join(testing_workdir, 'env')

    check_call_env(['conda', 'create', '-yq', '-p', env_path,
                    'python={0}'.format(".".join(sys.version.split('.')[:2]))])
    path_backup = os.environ['PATH']
    os.environ['PATH'] = prepend_bin_path(os.environ.copy(), env_path, prepend_prefix=True)['PATH']

    # cleanup is done by just cleaning up the testing_workdir
    def reset_path():
        os.environ['PATH'] = path_backup

    request.addfinalizer(reset_path)
    return env_path
Esempio n. 27
0
def svn_source(source_dict,
               src_dir,
               svn_cache,
               verbose=True,
               timeout=900,
               locking=True):
    ''' Download a source from SVN repo. '''
    if verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'wb')
        stdout = FNULL
        stderr = FNULL

    def parse_bool(s):
        return str(s).lower().strip() in ('yes', 'true', '1', 'on')

    svn_url = source_dict['svn_url']
    svn_revision = source_dict.get('svn_rev') or 'head'
    svn_ignore_externals = parse_bool(
        source_dict.get('svn_ignore_externals') or 'no')
    if not isdir(svn_cache):
        os.makedirs(svn_cache)
    svn_dn = svn_url.split(':', 1)[-1].replace('/', '_').replace(':', '_')
    cache_repo = join(svn_cache, svn_dn)
    if svn_ignore_externals:
        extra_args = ['--ignore-externals']
    else:
        extra_args = []
    if isdir(cache_repo):
        check_call_env(['svn', 'up', '-r', svn_revision] + extra_args,
                       cwd=cache_repo,
                       stdout=stdout,
                       stderr=stderr)
    else:
        check_call_env(['svn', 'co', '-r', svn_revision] + extra_args +
                       [svn_url, cache_repo],
                       stdout=stdout,
                       stderr=stderr)
        assert isdir(cache_repo)

    # now copy into work directory
    copy_into(cache_repo, src_dir, timeout, symlinks=True, locking=locking)

    if not verbose:
        FNULL.close()

    return src_dir
Esempio n. 28
0
def build(m, bld_bat, stats):
    with path_prepended(m.config.build_prefix):
        with path_prepended(m.config.host_prefix):
            env = environ.get_dict(m=m)
    env["CONDA_BUILD_STATE"] = "BUILD"

    # hard-code this because we never want pip's build isolation
    #    https://github.com/conda/conda-build/pull/2972#discussion_r198290241
    env["PIP_NO_BUILD_ISOLATION"] = False

    # set variables like CONDA_PY in the test environment
    env.update(set_language_env_vars(m.config.variant))

    for name in 'BIN', 'INC', 'LIB':
        path = env['LIBRARY_' + name]
        if not isdir(path):
            os.makedirs(path)

    src_dir = m.config.work_dir
    if os.path.isfile(bld_bat):
        with open(bld_bat) as fi:
            data = fi.read()
        with open(join(src_dir, 'bld.bat'), 'w') as fo:
            # more debuggable with echo on
            fo.write('@echo on\n')
            for key, value in env.items():
                if value:
                    fo.write('set "{key}={value}"\n'.format(key=key,
                                                            value=value))
            if not m.uses_new_style_compiler_activation:
                fo.write(
                    msvc_env_cmd(bits=m.config.host_arch,
                                 config=m.config,
                                 override=m.get_value('build/msvc_compiler',
                                                      None)))
            # Reset echo on, because MSVC scripts might have turned it off
            fo.write('@echo on\n')
            fo.write('set "INCLUDE={};%INCLUDE%"\n'.format(env["LIBRARY_INC"]))
            fo.write('set "LIB={};%LIB%"\n'.format(env["LIBRARY_LIB"]))
            if m.config.activate and m.name() != 'conda':
                _write_bat_activation_text(fo, m)
            fo.write("REM ===== end generated header =====\n")
            fo.write(data)

        cmd = ['cmd.exe', '/c', 'bld.bat']
        check_call_env(cmd, cwd=src_dir, stats=stats)

    fix_staged_scripts(join(m.config.host_prefix, 'Scripts'), config=m.config)
Esempio n. 29
0
def _build_ext(setup_py):
    '''
    Define a develop function - similar to build function
    todo: need to test on win32 and linux

    It invokes:
    $ python setup.py build_ext --inplace

    :param setup_py: path to setup.py
    '''

    # next call setup.py develop
    cmd = ['python', setup_py, 'build_ext', '--inplace']
    check_call_env(cmd)
    print("Completed: " + " ".join(cmd))
    print("===============================================")
Esempio n. 30
0
def _build_ext(setup_py):
    '''
    Define a develop function - similar to build function
    todo: need to test on win32 and linux

    It invokes:
    $ python setup.py build_ext --inplace

    :param setup_py: path to setup.py
    '''

    # next call setup.py develop
    cmd = ['python', setup_py, 'build_ext', '--inplace']
    check_call_env(cmd)
    print("Completed: " + " ".join(cmd))
    print("===============================================")
Esempio n. 31
0
def svn_source(metadata, config):
    ''' Download a source from SVN repo. '''
    if config.verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'w')
        stdout = FNULL
        stderr = FNULL

    meta = metadata.get_section('source')

    def parse_bool(s):
        return str(s).lower().strip() in ('yes', 'true', '1', 'on')

    svn = external.find_executable('svn', config.build_prefix)
    if not svn:
        sys.exit("Error: svn is not installed")
    svn_url = meta['svn_url']
    svn_revision = meta.get('svn_rev') or 'head'
    svn_ignore_externals = parse_bool(meta.get('svn_ignore_externals') or 'no')
    if not isdir(config.svn_cache):
        os.makedirs(config.svn_cache)
    svn_dn = svn_url.split(':', 1)[-1].replace('/', '_').replace(':', '_')
    cache_repo = join(config.svn_cache, svn_dn)
    if svn_ignore_externals:
        extra_args = ['--ignore-externals']
    else:
        extra_args = []
    if isdir(cache_repo):
        check_call_env([svn, 'up', '-r', svn_revision] + extra_args, cwd=cache_repo,
                       stdout=stdout, stderr=stderr)
    else:
        check_call_env([svn, 'co', '-r', svn_revision] + extra_args + [svn_url, cache_repo],
                       stdout=stdout, stderr=stderr)
        assert isdir(cache_repo)

    # now copy into work directory
    copy_into(cache_repo, config.work_dir, config.timeout, symlinks=True, locking=config.locking)

    if not config.verbose:
        FNULL.close()

    return config.work_dir
Esempio n. 32
0
def build(m, bld_bat):
    with path_prepended(m.config.build_prefix):
        env = environ.get_dict(config=m.config, m=m)
    env["CONDA_BUILD_STATE"] = "BUILD"

    # set variables like CONDA_PY in the test environment
    env.update(set_language_env_vars(m.config.variant))

    for name in 'BIN', 'INC', 'LIB':
        path = env['LIBRARY_' + name]
        if not isdir(path):
            os.makedirs(path)

    src_dir = m.config.work_dir
    if os.path.isfile(bld_bat):
        with open(bld_bat) as fi:
            data = fi.read()
        with open(join(src_dir, 'bld.bat'), 'w') as fo:
            # more debuggable with echo on
            fo.write('@echo on\n')
            for key, value in env.items():
                fo.write('set "{key}={value}"\n'.format(key=key, value=value))
            fo.write(
                msvc_env_cmd(bits=bits,
                             config=m.config,
                             override=m.get_value('build/msvc_compiler',
                                                  None)))
            # Reset echo on, because MSVC scripts might have turned it off
            fo.write('@echo on\n')
            fo.write('set "INCLUDE={};%INCLUDE%"\n'.format(env["LIBRARY_INC"]))
            fo.write('set "LIB={};%LIB%"\n'.format(env["LIBRARY_LIB"]))
            if m.config.activate:
                fo.write(
                    'call "{conda_root}\\activate.bat" "{prefix}"\n'.format(
                        conda_root=root_script_dir,
                        prefix=m.config.build_prefix))
            fo.write("REM ===== end generated header =====\n")
            fo.write(data)

        cmd = ['cmd.exe', '/c', 'bld.bat']
        check_call_env(cmd, cwd=src_dir)

    fix_staged_scripts(join(m.config.build_prefix, 'Scripts'))
Esempio n. 33
0
def svn_source(meta, config):
    """ Download a source from SVN repo. """
    if config.verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, "w")
        stdout = FNULL
        stderr = FNULL

    def parse_bool(s):
        return str(s).lower().strip() in ("yes", "true", "1", "on")

    svn = external.find_executable("svn", config.build_prefix)
    if not svn:
        sys.exit("Error: svn is not installed")
    svn_url = meta["svn_url"]
    svn_revision = meta.get("svn_rev") or "head"
    svn_ignore_externals = parse_bool(meta.get("svn_ignore_externals") or "no")
    if not isdir(config.svn_cache):
        os.makedirs(config.svn_cache)
    svn_dn = svn_url.split(":", 1)[-1].replace("/", "_").replace(":", "_")
    cache_repo = join(config.svn_cache, svn_dn)
    if svn_ignore_externals:
        extra_args = ["--ignore-externals"]
    else:
        extra_args = []
    if isdir(cache_repo):
        check_call_env([svn, "up", "-r", svn_revision] + extra_args, cwd=cache_repo, stdout=stdout, stderr=stderr)
    else:
        check_call_env(
            [svn, "co", "-r", svn_revision] + extra_args + [svn_url, cache_repo], stdout=stdout, stderr=stderr
        )
        assert isdir(cache_repo)

    # now copy into work directory
    copy_into(cache_repo, config.work_dir, config.timeout, symlinks=True)

    if not config.verbose:
        FNULL.close()

    return config.work_dir
Esempio n. 34
0
def build(m, bld_bat, stats):
    with path_prepended(m.config.build_prefix):
        with path_prepended(m.config.host_prefix):
            env = environ.get_dict(config=m.config, m=m)
    env["CONDA_BUILD_STATE"] = "BUILD"

    # set variables like CONDA_PY in the test environment
    env.update(set_language_env_vars(m.config.variant))

    for name in 'BIN', 'INC', 'LIB':
        path = env['LIBRARY_' + name]
        if not isdir(path):
            os.makedirs(path)

    src_dir = m.config.work_dir
    if os.path.isfile(bld_bat):
        with open(bld_bat) as fi:
            data = fi.read()
        with open(join(src_dir, 'bld.bat'), 'w') as fo:
            # more debuggable with echo on
            fo.write('@echo on\n')
            for key, value in env.items():
                if value:
                    fo.write('set "{key}={value}"\n'.format(key=key, value=value))
            if not m.uses_new_style_compiler_activation:
                fo.write(msvc_env_cmd(bits=m.config.host_arch, config=m.config,
                                    override=m.get_value('build/msvc_compiler', None)))
            # Reset echo on, because MSVC scripts might have turned it off
            fo.write('@echo on\n')
            fo.write('set "INCLUDE={};%INCLUDE%"\n'.format(env["LIBRARY_INC"]))
            fo.write('set "LIB={};%LIB%"\n'.format(env["LIBRARY_LIB"]))
            if m.config.activate and m.name() != 'conda':
                _write_bat_activation_text(fo, m)
            fo.write("REM ===== end generated header =====\n")
            fo.write(data)

        cmd = ['cmd.exe', '/c', 'bld.bat']
        check_call_env(cmd, cwd=src_dir, stats=stats)

    fix_staged_scripts(join(m.config.host_prefix, 'Scripts'), config=m.config)
Esempio n. 35
0
def svn_source(source_dict, src_dir, svn_cache, verbose=True, timeout=90, locking=True):
    ''' Download a source from SVN repo. '''
    if verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'w')
        stdout = FNULL
        stderr = FNULL

    def parse_bool(s):
        return str(s).lower().strip() in ('yes', 'true', '1', 'on')

    svn_url = source_dict['svn_url']
    svn_revision = source_dict.get('svn_rev') or 'head'
    svn_ignore_externals = parse_bool(source_dict.get('svn_ignore_externals') or 'no')
    if not isdir(svn_cache):
        os.makedirs(svn_cache)
    svn_dn = svn_url.split(':', 1)[-1].replace('/', '_').replace(':', '_')
    cache_repo = join(svn_cache, svn_dn)
    if svn_ignore_externals:
        extra_args = ['--ignore-externals']
    else:
        extra_args = []
    if isdir(cache_repo):
        check_call_env(['svn', 'up', '-r', svn_revision] + extra_args, cwd=cache_repo,
                       stdout=stdout, stderr=stderr)
    else:
        check_call_env(['svn', 'co', '-r', svn_revision] + extra_args + [svn_url, cache_repo],
                       stdout=stdout, stderr=stderr)
        assert isdir(cache_repo)

    # now copy into work directory
    copy_into(cache_repo, src_dir, timeout, symlinks=True, locking=locking)

    if not verbose:
        FNULL.close()

    return src_dir
Esempio n. 36
0
def hg_source(metadata, config):
    ''' Download a source from Mercurial repo. '''
    if config.verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'w')
        stdout = FNULL
        stderr = FNULL

    meta = metadata.get_section('source')

    hg = external.find_executable('hg', config.build_prefix)
    if not hg:
        sys.exit('Error: hg not installed')
    hg_url = meta['hg_url']
    if not isdir(config.hg_cache):
        os.makedirs(config.hg_cache)
    hg_dn = hg_url.split(':')[-1].replace('/', '_')
    cache_repo = join(config.hg_cache, hg_dn)
    if isdir(cache_repo):
        check_call_env([hg, 'pull'],
                       cwd=cache_repo,
                       stdout=stdout,
                       stderr=stderr)
    else:
        check_call_env([hg, 'clone', hg_url, cache_repo],
                       stdout=stdout,
                       stderr=stderr)
        assert isdir(cache_repo)

    # now clone in to work directory
    update = meta.get('hg_tag') or 'tip'
    if config.verbose:
        print('checkout: %r' % update)

    check_call_env([hg, 'clone', cache_repo, config.work_dir],
                   stdout=stdout,
                   stderr=stderr)
    check_call_env([hg, 'update', '-C', update],
                   cwd=config.work_dir,
                   stdout=stdout,
                   stderr=stderr)

    if not config.verbose:
        FNULL.close()

    return config.work_dir
Esempio n. 37
0
def apply_patch(src_dir, path, config, git=None):
    if not isfile(path):
        sys.exit('Error: no such patch: %s' % path)

    files, is_git_format = _get_patch_file_details(path)
    if git and is_git_format:
        # Prevents git from asking interactive questions,
        # also necessary to achieve sha1 reproducibility;
        # as is --committer-date-is-author-date. By this,
        # we mean a round-trip of git am/git format-patch
        # gives the same file.
        git_env = os.environ
        git_env['GIT_COMMITTER_NAME'] = 'conda-build'
        git_env['GIT_COMMITTER_EMAIL'] = '*****@*****.**'
        check_call_env([git, 'am', '--committer-date-is-author-date', path],
                       cwd=src_dir,
                       stdout=None,
                       env=git_env)
        config.git_commits_since_tag += 1
    else:
        print('Applying patch: %r' % path)
        patch = external.find_executable('patch', config.build_prefix)
        if patch is None:
            sys.exit("""\
        Error:
            Cannot use 'git' (not a git repo and/or patch) and did not find 'patch' in: %s
            You can install 'patch' using apt-get, yum (Linux), Xcode (MacOSX),
            or conda, m2-patch (Windows),
        """ % (os.pathsep.join(external.dir_paths)))
        patch_strip_level = _guess_patch_strip_level(files, src_dir)
        patch_args = ['-p%d' % patch_strip_level, '-i', path]
        if sys.platform == 'win32':
            patch_args[-1] = _ensure_unix_line_endings(path)
        check_call_env([patch] + patch_args, cwd=src_dir)
        if sys.platform == 'win32' and os.path.exists(patch_args[-1]):
            os.remove(patch_args[-1])  # clean up .patch_unix file
Esempio n. 38
0
def build(m, bld_bat, config):
    with path_prepended(config.build_prefix):
        env = environ.get_dict(config=config, m=m)
    env["CONDA_BUILD_STATE"] = "BUILD"

    for name in 'BIN', 'INC', 'LIB':
        path = env['LIBRARY_' + name]
        if not isdir(path):
            os.makedirs(path)

    src_dir = config.work_dir
    if os.path.isfile(bld_bat):
        with open(bld_bat) as fi:
            data = fi.read()
        with open(join(src_dir, 'bld.bat'), 'w') as fo:
            # more debuggable with echo on
            fo.write('@echo on\n')
            for key, value in env.items():
                fo.write('set "{key}={value}"\n'.format(key=key, value=value))
            fo.write(msvc_env_cmd(bits=bits, config=config,
                                  override=m.get_value('build/msvc_compiler', None)))
            # Reset echo on, because MSVC scripts might have turned it off
            fo.write('@echo on\n')
            fo.write('set "INCLUDE={};%INCLUDE%"\n'.format(env["LIBRARY_INC"]))
            fo.write('set "LIB={};%LIB%"\n'.format(env["LIBRARY_LIB"]))
            if config.activate:
                fo.write('call "{conda_root}\\activate.bat" "{prefix}"\n'.format(
                    conda_root=root_script_dir,
                    prefix=config.build_prefix))
            fo.write("REM ===== end generated header =====\n")
            fo.write(data)

        cmd = ['cmd.exe', '/c', 'bld.bat']
        check_call_env(cmd, cwd=src_dir)

    fix_staged_scripts(join(config.build_prefix, 'Scripts'))
Esempio n. 39
0
def hg_source(metadata, config):
    ''' Download a source from Mercurial repo. '''
    if config.verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'w')
        stdout = FNULL
        stderr = FNULL

    meta = metadata.get_section('source')

    hg = external.find_executable('hg', config.build_prefix)
    if not hg:
        sys.exit('Error: hg not installed')
    hg_url = meta['hg_url']
    if not isdir(config.hg_cache):
        os.makedirs(config.hg_cache)
    hg_dn = hg_url.split(':')[-1].replace('/', '_')
    cache_repo = join(config.hg_cache, hg_dn)
    if isdir(cache_repo):
        check_call_env([hg, 'pull'], cwd=cache_repo, stdout=stdout, stderr=stderr)
    else:
        check_call_env([hg, 'clone', hg_url, cache_repo], stdout=stdout, stderr=stderr)
        assert isdir(cache_repo)

    # now clone in to work directory
    update = meta.get('hg_tag') or 'tip'
    if config.verbose:
        print('checkout: %r' % update)

    check_call_env([hg, 'clone', cache_repo, config.work_dir], stdout=stdout, stderr=stderr)
    check_call_env([hg, 'update', '-C', update], cwd=config.work_dir, stdout=stdout, stderr=stderr)

    if not config.verbose:
        FNULL.close()

    return config.work_dir
Esempio n. 40
0
def hg_source(meta, config):
    """ Download a source from Mercurial repo. """
    if config.verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, "w")
        stdout = FNULL
        stderr = FNULL

    hg = external.find_executable("hg", config.build_prefix)
    if not hg:
        sys.exit("Error: hg not installed")
    hg_url = meta["hg_url"]
    if not isdir(config.hg_cache):
        os.makedirs(config.hg_cache)
    hg_dn = hg_url.split(":")[-1].replace("/", "_")
    cache_repo = join(config.hg_cache, hg_dn)
    if isdir(cache_repo):
        check_call_env([hg, "pull"], cwd=cache_repo, stdout=stdout, stderr=stderr)
    else:
        check_call_env([hg, "clone", hg_url, cache_repo], stdout=stdout, stderr=stderr)
        assert isdir(cache_repo)

    # now clone in to work directory
    update = meta.get("hg_tag") or "tip"
    if config.verbose:
        print("checkout: %r" % update)

    check_call_env([hg, "clone", cache_repo, config.work_dir], stdout=stdout, stderr=stderr)
    check_call_env([hg, "update", "-C", update], cwd=get_dir(config), stdout=stdout, stderr=stderr)

    if not config.verbose:
        FNULL.close()

    return config.work_dir
Esempio n. 41
0
def build(m, bld_bat):
    with path_prepended(m.config.build_prefix):
        env = environ.get_dict(config=m.config, m=m)
    env["CONDA_BUILD_STATE"] = "BUILD"

    # set variables like CONDA_PY in the test environment
    env.update(set_language_env_vars(m.config.variant))

    for name in 'BIN', 'INC', 'LIB':
        path = env['LIBRARY_' + name]
        if not isdir(path):
            os.makedirs(path)

    src_dir = m.config.work_dir
    if os.path.isfile(bld_bat):
        with open(bld_bat) as fi:
            data = fi.read()
        with open(join(src_dir, 'bld.bat'), 'w') as fo:
            # more debuggable with echo on
            fo.write('@echo on\n')
            for key, value in env.items():
                if value:
                    fo.write('set "{key}={value}"\n'.format(key=key, value=value))
            if not m.uses_new_style_compiler_activation:
                fo.write(msvc_env_cmd(bits=bits, config=m.config,
                                    override=m.get_value('build/msvc_compiler', None)))
            # Reset echo on, because MSVC scripts might have turned it off
            fo.write('@echo on\n')
            fo.write('set "INCLUDE={};%INCLUDE%"\n'.format(env["LIBRARY_INC"]))
            fo.write('set "LIB={};%LIB%"\n'.format(env["LIBRARY_LIB"]))
            if m.config.activate:
                fo.write('call "{conda_root}\\activate.bat" "{prefix}"\n'.format(
                    conda_root=root_script_dir,
                    prefix=m.config.build_prefix))
                if m.is_cross:
                    # HACK: we need both build and host envs "active" - i.e. on PATH,
                    #     and with their activate.d scripts sourced. Conda only
                    #     lets us activate one, though. This is a
                    #     vile hack to trick conda into "stacking"
                    #     two environments.
                    #
                    # Net effect: binaries come from host first, then build
                    #
                    # Conda 4.4 may break this by reworking the activate scripts.
                    #  ^^ shouldn't be true
                    # In conda 4.4, export CONDA_MAX_SHLVL=2 to stack envs to two
                    #   levels deep.
                    # conda 4.4 does require that a conda-meta/history file
                    #   exists to identify a valid conda environment
                    history_file = join(m.config.host_prefix, 'conda-meta', 'history')
                    if not isfile(history_file):
                        if not isdir(dirname(history_file)):
                            os.makedirs(dirname(history_file))
                        open(history_file, 'a').close()
                    # removing this placeholder should make conda double-activate with conda 4.3
                    fo.write('set "PATH=%PATH:CONDA_PATH_PLACEHOLDER;=%"\n')
                    fo.write('set CONDA_MAX_SHLVL=2\n')
                    fo.write('call "{conda_root}\\activate.bat" "{prefix}"\n'.format(
                        conda_root=root_script_dir,
                        prefix=m.config.host_prefix))
            fo.write("REM ===== end generated header =====\n")
            fo.write(data)

        cmd = ['cmd.exe', '/c', 'bld.bat']
        check_call_env(cmd, cwd=src_dir)

    fix_staged_scripts(join(m.config.build_prefix, 'Scripts'))
Esempio n. 42
0
def test_relative_git_url_submodule_clone(testing_workdir, monkeypatch):
    """
    A multi-part test encompassing the following checks:

    1. That git submodules identified with both relative and absolute URLs can be mirrored
       and cloned.

    2. That changes pushed to the original repository are updated in the mirror and finally
       reflected in the package version and filename via `GIT_DESCRIBE_TAG`.

    3. That `source.py` is using `check_call_env` and `check_output_env` and that those
       functions are using tools from the build env.
    """

    toplevel = os.path.join(testing_workdir, 'toplevel')
    os.mkdir(toplevel)
    relative_sub = os.path.join(testing_workdir, 'relative_sub')
    os.mkdir(relative_sub)
    absolute_sub = os.path.join(testing_workdir, 'absolute_sub')
    os.mkdir(absolute_sub)

    sys_git_env = os.environ.copy()
    sys_git_env['GIT_AUTHOR_NAME'] = 'conda-build'
    sys_git_env['GIT_AUTHOR_EMAIL'] = '*****@*****.**'
    sys_git_env['GIT_COMMITTER_NAME'] = 'conda-build'
    sys_git_env['GIT_COMMITTER_EMAIL'] = '*****@*****.**'

    # Find the git executable before putting our dummy one on PATH.
    git = find_executable('git')

    # Put the broken git on os.environ["PATH"]
    exename = dummy_executable(testing_workdir, 'git')
    monkeypatch.setenv("PATH", testing_workdir, prepend=os.pathsep)
    # .. and ensure it gets run (and fails).
    FNULL = open(os.devnull, 'w')
    # Strangely ..
    #   stderr=FNULL suppresses the output from echo on OS X whereas
    #   stdout=FNULL suppresses the output from echo on Windows
    with pytest.raises(subprocess.CalledProcessError,
                       message="Dummy git was not executed"):
        check_call_env([exename, '--version'], stdout=FNULL, stderr=FNULL)
    FNULL.close()

    for tag in range(2):
        os.chdir(absolute_sub)
        if tag == 0:
            check_call_env([git, 'init'], env=sys_git_env)
        with open('absolute', 'w') as f:
            f.write(str(tag))
        check_call_env([git, 'add', 'absolute'], env=sys_git_env)
        check_call_env([git, 'commit', '-m', 'absolute{}'.format(tag)],
                       env=sys_git_env)

        os.chdir(relative_sub)
        if tag == 0:
            check_call_env([git, 'init'], env=sys_git_env)
        with open('relative', 'w') as f:
            f.write(str(tag))
        check_call_env([git, 'add', 'relative'], env=sys_git_env)
        check_call_env([git, 'commit', '-m', 'relative{}'.format(tag)],
                       env=sys_git_env)

        os.chdir(toplevel)
        if tag == 0:
            check_call_env([git, 'init'], env=sys_git_env)
        with open('toplevel', 'w') as f:
            f.write(str(tag))
        check_call_env([git, 'add', 'toplevel'], env=sys_git_env)
        check_call_env([git, 'commit', '-m', 'toplevel{}'.format(tag)],
                       env=sys_git_env)
        if tag == 0:
            check_call_env([
                git, 'submodule', 'add',
                convert_path_for_cygwin_or_msys2(git, absolute_sub), 'absolute'
            ],
                           env=sys_git_env)
            check_call_env(
                [git, 'submodule', 'add', '../relative_sub', 'relative'],
                env=sys_git_env)
        else:
            # Once we use a more recent Git for Windows than 2.6.4 on Windows or m2-git we
            # can change this to `git submodule update --recursive`.
            check_call_env([git, 'submodule', 'foreach', git, 'pull'],
                           env=sys_git_env)
        check_call_env(
            [git, 'commit', '-am', 'added submodules@{}'.format(tag)],
            env=sys_git_env)
        check_call_env(
            [git, 'tag', '-a',
             str(tag), '-m', 'tag {}'.format(tag)],
            env=sys_git_env)

        # It is possible to use `Git for Windows` here too, though you *must* not use a different
        # (type of) git than the one used above to add the absolute submodule, because .gitmodules
        # stores the absolute path and that is not interchangeable between MSYS2 and native Win32.
        #
        # Also, git is set to False here because it needs to be rebuilt with the longer prefix. As
        # things stand, my _b_env folder for this test contains more than 80 characters.
        requirements = ('requirements',
                        OrderedDict([('build', [
                            'git            # [False]',
                            'm2-git         # [win]', 'm2-filesystem  # [win]'
                        ])]))

        filename = os.path.join(testing_workdir, 'meta.yaml')
        data = OrderedDict([
            ('package',
             OrderedDict([('name', 'relative_submodules'),
                          ('version', '{{ GIT_DESCRIBE_TAG }}')])),
            ('source',
             OrderedDict([('git_url', toplevel),
                          ('git_tag', str(tag))])), requirements,
            ('build',
             OrderedDict([('script', [
                 'git --no-pager submodule --quiet foreach git log -n 1 --pretty=format:%%s > '
                 '%PREFIX%\\summaries.txt  # [win]',
                 'git --no-pager submodule --quiet foreach git log -n 1 --pretty=format:%s > '
                 '$PREFIX/summaries.txt   # [not win]'
             ])])),
            ('test',
             OrderedDict([('commands', [
                 'echo absolute{}relative{} > %PREFIX%\\expected_summaries.txt       # [win]'
                 .format(tag, tag),
                 'fc.exe /W %PREFIX%\\expected_summaries.txt %PREFIX%\\summaries.txt # [win]',
                 'echo absolute{}relative{} > $PREFIX/expected_summaries.txt         # [not win]'
                 .format(tag, tag),
                 'diff -wuN ${PREFIX}/expected_summaries.txt ${PREFIX}/summaries.txt # [not win]'
             ])]))
        ])

        with open(filename, 'w') as outfile:
            outfile.write(
                yaml.dump(data, default_flow_style=False, width=999999999))
        # Reset the path because our broken, dummy `git` would cause `render_recipe`
        # to fail, while no `git` will cause the build_dependencies to be installed.
        monkeypatch.undo()
        # This will (after one spin round the loop) install and run 'git' with the
        # build env prepended to os.environ[]
        output = api.get_output_file_path(testing_workdir)[0]
        assert ("relative_submodules-{}-".format(tag) in output)
        api.build(testing_workdir)
Esempio n. 43
0
def apply_patch(src_dir, path, config, git=None):
    if not isfile(path):
        sys.exit('Error: no such patch: %s' % path)

    files, is_git_format = _get_patch_file_details(path)
    if git and is_git_format:
        # Prevents git from asking interactive questions,
        # also necessary to achieve sha1 reproducibility;
        # as is --committer-date-is-author-date. By this,
        # we mean a round-trip of git am/git format-patch
        # gives the same file.
        git_env = os.environ
        git_env['GIT_COMMITTER_NAME'] = 'conda-build'
        git_env['GIT_COMMITTER_EMAIL'] = '*****@*****.**'
        check_call_env([git, 'am', '--committer-date-is-author-date', path],
                       cwd=src_dir, stdout=None, env=git_env)
        config.git_commits_since_tag += 1
    else:
        print('Applying patch: %r' % path)
        patch = external.find_executable('patch', config.build_prefix)
        if patch is None:
            sys.exit("""\
        Error:
            Cannot use 'git' (not a git repo and/or patch) and did not find 'patch' in: %s
            You can install 'patch' using apt-get, yum (Linux), Xcode (MacOSX),
            or conda, m2-patch (Windows),
        """ % (os.pathsep.join(external.dir_paths)))
        patch_strip_level = _guess_patch_strip_level(files, src_dir)
        patch_args = ['-p%d' % patch_strip_level, '--ignore-whitespace', '-i', path]

        # line endings are a pain.
        # https://unix.stackexchange.com/a/243748/34459

        try:
            log = get_logger(__name__)
            log.info("Trying to apply patch as-is")
            check_call_env([patch] + patch_args, cwd=src_dir)
        except CalledProcessError:
            if sys.platform == 'win32':
                unix_ending_file = _ensure_unix_line_endings(path)
                patch_args[-1] = unix_ending_file
                try:
                    log.info("Applying unmodified patch failed.  "
                             "Convert to unix line endings and trying again.")
                    check_call_env([patch] + patch_args, cwd=src_dir)
                except:
                    log.info("Applying unix patch failed.  "
                             "Convert to CRLF line endings and trying again with --binary.")
                    patch_args.insert(0, '--binary')
                    win_ending_file = _ensure_win_line_endings(path)
                    patch_args[-1] = win_ending_file
                    try:
                        check_call_env([patch] + patch_args, cwd=src_dir)
                    except:
                        raise
                    finally:
                        if os.path.exists(win_ending_file):
                            os.remove(win_ending_file)  # clean up .patch_win file
                finally:
                    if os.path.exists(unix_ending_file):
                        os.remove(unix_ending_file)  # clean up .patch_unix file
            else:
                raise
Esempio n. 44
0
 def patch_or_reverse(patch, patch_args, cwd, stdout, stderr):
     # An old reference: https://unix.stackexchange.com/a/243748/34459
     #
     # I am worried that '--ignore-whitespace' may be destructive. If so we should
     # avoid passing it, particularly in the initial (most likely to succeed) calls.
     #
     # From here-in I define a 'native' patch as one which has:
     # 1. LF for the patch block metadata.
     # 2. CRLF or LF for the actual patched lines matching those of the source lines.
     #
     # Calls to a raw 'patch' are destructive in various ways:
     # 1. It leaves behind .rej and .orig files
     # 2. If you pass it a patch with incorrect CRLF changes and do not pass --binary and
     #    if any of those blocks *can* be applied, then the whole file gets written out with
     #    LF.  This cannot be reversed either; the text changes will be reversed but not
     #    line-feed changes (since all line-endings get changed, not just those of the of
     #    patched lines)
     # 3. If patching fails, the bits that succeeded remain, so patching is not at all
     #    atomic.
     #
     # Still, we do our best to mitigate all of this as follows:
     # 1. We disable .orig and .rej that for GNU patch via a temp file *
     # 2 (1). We check for native application of a native patch (--binary, without --ignore-whitespace)
     # 2 (2). We defer destructive calls to this until after the non-destructive ones.
     # 3. When patch indicates failure, we call it with -R to reverse the damage.
     #
     # * Some may bemoan the loss of these, but they it is fairly random which patch and patch
     #   attempt they apply to so their informational value is low, besides that, they are ugly.
     #   (and destructive to the future patchability of the source tree).
     #
     import tempfile
     temp_name = os.path.join(tempfile.gettempdir(),
                              next(tempfile._get_candidate_names()))
     patch_args.append('-r')
     patch_args.append(temp_name)
     patch_args = ['--no-backup-if-mismatch', '--batch'] + patch_args
     log = get_logger(__name__)
     try:
         log.debug("Applying with\n{} {}".format(patch, patch_args))
         check_call_env([patch] + patch_args,
                        cwd=cwd,
                        stdout=stdout,
                        stderr=stderr)
         # You can use this to pretend the patch failed so as to test reversal!
         # raise CalledProcessError(-1, ' '.join([patch] + patch_args))
     except Exception as e:
         try:
             if '--ignore-whitespace' in patch_args:
                 patch_args.remove('--ignore-whitespace')
             patch_args.insert(0, '-R')
             patch_args.append('--binary')
             patch_args.append('--force')
             log.debug("Reversing with\n{} {}".format(patch, patch_args))
             check_call_env([patch] + patch_args,
                            cwd=cwd,
                            stdout=stdout,
                            stderr=stderr)
         except:
             pass
         raise e
     finally:
         if os.path.exists(temp_name):
             os.unlink(temp_name)
Esempio n. 45
0
def run_setuppy(src_dir, temp_dir, python_version, config, setup_options):
    '''
    Patch distutils and then run setup.py in a subprocess.

    :param src_dir: Directory containing the source code
    :type src_dir: str
    :param temp_dir: Temporary directory for doing for storing pkginfo.yaml
    :type temp_dir: str
    '''
    specs = ['python %s*' % python_version, 'pyyaml', 'setuptools']
    with open(os.path.join(src_dir, "setup.py")) as setup:
        text = setup.read()
        if 'import numpy' in text or 'from numpy' in text:
            specs.append('numpy')
    # Do everything in the build env in case the setup.py install goes
    # haywire.
    # TODO: Try with another version of Python if this one fails. Some
    # packages are Python 2 or Python 3 only.

    create_env(config.build_prefix,
               specs=specs,
               clear_cache=False,
               config=config)
    stdlib_dir = join(
        config.build_prefix,
        'Lib' if sys.platform == 'win32' else 'lib/python%s' % python_version)

    patch = join(temp_dir, 'pypi-distutils.patch')
    with open(patch, 'w') as f:
        f.write(DISTUTILS_PATCH.format(temp_dir.replace('\\', '\\\\')))

    if exists(join(stdlib_dir, 'distutils', 'core.py-copy')):
        rm_rf(join(stdlib_dir, 'distutils', 'core.py'))
        copy2(join(stdlib_dir, 'distutils', 'core.py-copy'),
              join(stdlib_dir, 'distutils', 'core.py'))
        # Avoid race conditions. Invalidate the cache.
        if PY3:
            rm_rf(
                join(stdlib_dir, 'distutils', '__pycache__',
                     'core.cpython-%s%s.pyc' % sys.version_info[:2]))
            rm_rf(
                join(stdlib_dir, 'distutils', '__pycache__',
                     'core.cpython-%s%s.pyo' % sys.version_info[:2]))
        else:
            rm_rf(join(stdlib_dir, 'distutils', 'core.pyc'))
            rm_rf(join(stdlib_dir, 'distutils', 'core.pyo'))
    else:
        copy2(join(stdlib_dir, 'distutils', 'core.py'),
              join(stdlib_dir, 'distutils', 'core.py-copy'))
    apply_patch(join(stdlib_dir, 'distutils'), patch, config=config)

    # Save PYTHONPATH for later
    env = os.environ.copy()
    if 'PYTHONPATH' in env:
        env[str('PYTHONPATH')] = str(src_dir + ':' + env['PYTHONPATH'])
    else:
        env[str('PYTHONPATH')] = str(src_dir)
    cwd = getcwd()
    chdir(src_dir)
    cmdargs = [config.build_python, 'setup.py', 'install']
    cmdargs.extend(setup_options)
    try:
        check_call_env(cmdargs, env=env)
    except subprocess.CalledProcessError:
        print('$PYTHONPATH = %s' % env['PYTHONPATH'])
        sys.exit('Error: command failed: %s' % ' '.join(cmdargs))
    finally:
        chdir(cwd)
Esempio n. 46
0
def apply_one_patch(src_dir, recipe_dir, rel_path, config, git=None):
    path = os.path.join(recipe_dir, rel_path)
    if config.verbose:
        print('Applying patch: {}'.format(path))

    def try_apply_patch(patch, patch_args, cwd, stdout, stderr):
        # An old reference: https://unix.stackexchange.com/a/243748/34459
        #
        # I am worried that '--ignore-whitespace' may be destructive. If so we should
        # avoid passing it, particularly in the initial (most likely to succeed) calls.
        #
        # From here-in I define a 'native' patch as one which has:
        # 1. LF for the patch block metadata.
        # 2. CRLF or LF for the actual patched lines matching those of the source lines.
        #
        # Calls to a raw 'patch' are destructive in various ways:
        # 1. It leaves behind .rej and .orig files
        # 2. If you pass it a patch with incorrect CRLF changes and do not pass --binary and
        #    if any of those blocks *can* be applied, then the whole file gets written out with
        #    LF.  This cannot be reversed either; the text changes will be reversed but not
        #    line-feed changes (since all line-endings get changed, not just those of the of
        #    patched lines)
        # 3. If patching fails, the bits that succeeded remain, so patching is not at all
        #    atomic.
        #
        # Still, we do our best to mitigate all of this as follows:
        # 1. We use --dry-run to test for applicability first.
        # 2 We check for native application of a native patch (--binary, without --ignore-whitespace)
        #
        # Some may bemoan the loss of patch failure artifacts, but it is fairly random which
        # patch and patch attempt they apply to so their informational value is low, besides that,
        # they are ugly.
        #
        import tempfile
        temp_name = os.path.join(tempfile.gettempdir(),
                                 next(tempfile._get_candidate_names()))
        base_patch_args = ['--no-backup-if-mismatch', '--batch'] + patch_args
        try:
            try_patch_args = base_patch_args[:]
            try_patch_args.append('--dry-run')
            log.debug("dry-run applying with\n{} {}".format(
                patch, try_patch_args))
            check_call_env([patch] + try_patch_args,
                           cwd=cwd,
                           stdout=stdout,
                           stderr=stderr)
            # You can use this to pretend the patch failed so as to test reversal!
            # raise CalledProcessError(-1, ' '.join([patch] + patch_args))
        except Exception as e:
            raise e
        else:
            check_call_env([patch] + base_patch_args,
                           cwd=cwd,
                           stdout=stdout,
                           stderr=stderr)
        finally:
            if os.path.exists(temp_name):
                os.unlink(temp_name)

    exception = None
    if not isfile(path):
        raise RuntimeError('Error: no such patch: %s' % path)

    if config.verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'wb')
        stdout = FNULL
        stderr = FNULL

    attributes_output = ""
    patch_exe = external.find_executable('patch', config.build_prefix)
    if not len(patch_exe):
        patch_exe = external.find_executable('patch', config.host_prefix)
        if not len(patch_exe):
            patch_exe = ''
    with TemporaryDirectory() as tmpdir:
        patch_attributes = _get_patch_attributes(path, patch_exe, git, src_dir,
                                                 stdout, stderr, tmpdir)
        attributes_output += _patch_attributes_debug(patch_attributes,
                                                     rel_path,
                                                     config.build_prefix)
        if git and patch_attributes['format'] == 'git':
            # Prevents git from asking interactive questions,
            # also necessary to achieve sha1 reproducibility;
            # as is --committer-date-is-author-date. By this,
            # we mean a round-trip of git am/git format-patch
            # gives the same file.
            git_env = os.environ
            git_env['GIT_COMMITTER_NAME'] = 'conda-build'
            git_env['GIT_COMMITTER_EMAIL'] = '*****@*****.**'
            check_call_env(
                [git, 'am', '-3', '--committer-date-is-author-date', path],
                cwd=src_dir,
                stdout=stdout,
                stderr=stderr,
                env=git_env)
            config.git_commits_since_tag += 1
        else:
            if patch_exe is None or len(patch_exe) == 0:
                errstr = ("""\
            Error:
                Cannot use 'git' (not a git repo and/or patch) and did not find 'patch' in: %s
                You can install 'patch' using apt-get, yum (Linux), Xcode (MacOSX),
                or conda, m2-patch (Windows),
            """ % (os.pathsep.join(external.dir_paths)))
                raise RuntimeError(errstr)
            patch_args = patch_attributes['args']

            if config.verbose:
                print('Applying patch: {} with args:\n{}'.format(
                    path, patch_args))

            try:
                try_apply_patch(patch_exe,
                                patch_args,
                                cwd=src_dir,
                                stdout=stdout,
                                stderr=stderr)
            except Exception as e:
                exception = e
        if exception:
            raise exception
    return attributes_output
Esempio n. 47
0
def run_test(
    recipedir_or_package_or_metadata,
    config,
    stats,
    move_broken=True,
    provision_only=False,
    solver=None,
):
    """
    Execute any test scripts for the given package.

    :param m: Package's metadata.
    :type m: Metadata
    """

    # we want to know if we're dealing with package input.  If so, we can move the input on success.
    hash_input = {}

    # store this name to keep it consistent.  By changing files, we change the hash later.
    #    It matches the build hash now, so let's keep it around.
    test_package_name = (
        recipedir_or_package_or_metadata.dist()
        if hasattr(recipedir_or_package_or_metadata, "dist")
        else recipedir_or_package_or_metadata
    )

    if not provision_only:
        print("TEST START:", test_package_name)

    if hasattr(recipedir_or_package_or_metadata, "config"):
        metadata = recipedir_or_package_or_metadata
        utils.rm_rf(metadata.config.test_dir)
    else:
        metadata, hash_input = construct_metadata_for_test(
            recipedir_or_package_or_metadata, config
        )

    trace = "-x " if metadata.config.debug else ""

    # Must download *after* computing build id, or else computing build id will change
    #     folder destination
    _extract_test_files_from_package(metadata)

    # When testing a .tar.bz2 in the pkgs dir, clean_pkg_cache() will remove it.
    # Prevent this. When https://github.com/conda/conda/issues/5708 gets fixed
    # I think we can remove this call to clean_pkg_cache().
    in_pkg_cache = (
        not hasattr(recipedir_or_package_or_metadata, "config")
        and os.path.isfile(recipedir_or_package_or_metadata)
        and recipedir_or_package_or_metadata.endswith(CONDA_PACKAGE_EXTENSIONS)
        and os.path.dirname(recipedir_or_package_or_metadata) in pkgs_dirs[0]
    )
    if not in_pkg_cache:
        environ.clean_pkg_cache(metadata.dist(), metadata.config)

    copy_test_source_files(metadata, metadata.config.test_dir)
    # this is also copying tests/source_files from work_dir to testing workdir

    _, pl_files, py_files, r_files, lua_files, shell_files = create_all_test_files(
        metadata
    )

    if (
        not any([py_files, shell_files, pl_files, lua_files, r_files])
        and not metadata.config.test_run_post
    ):
        print("Nothing to test for:", test_package_name)
        return True

    if metadata.config.remove_work_dir:
        for name, prefix in (
            ("host", metadata.config.host_prefix),
            ("build", metadata.config.build_prefix),
        ):
            if os.path.isdir(prefix):
                # move host folder to force hardcoded paths to host env to break during tests
                #    (so that they can be properly addressed by recipe author)
                dest = os.path.join(
                    os.path.dirname(prefix),
                    "_".join(
                        (
                            "%s_prefix_moved" % name,
                            metadata.dist(),
                            getattr(metadata.config, "%s_subdir" % name),
                        )
                    ),
                )
                # Needs to come after create_files in case there's test/source_files
                shutil_move_more_retrying(prefix, dest, "{} prefix".format(prefix))

        # nested if so that there's no warning when we just leave the empty workdir in place
        if metadata.source_provided:
            dest = os.path.join(
                os.path.dirname(metadata.config.work_dir),
                "_".join(("work_moved", metadata.dist(), metadata.config.host_subdir)),
            )
            # Needs to come after create_files in case there's test/source_files
            shutil_move_more_retrying(config.work_dir, dest, "work")
    else:
        log.warn(
            "Not moving work directory after build.  Your package may depend on files "
            "in the work directory that are not included with your package"
        )

    # looks like a dead function to me
    # get_build_metadata(metadata)

    specs = metadata.get_test_deps(py_files, pl_files, lua_files, r_files)

    with utils.path_prepended(metadata.config.test_prefix):
        env = dict(os.environ.copy())
        env.update(environ.get_dict(m=metadata, prefix=config.test_prefix))
        env["CONDA_BUILD_STATE"] = "TEST"
        env["CONDA_BUILD"] = "1"
        if env_path_backup_var_exists:
            env["CONDA_PATH_BACKUP"] = os.environ["CONDA_PATH_BACKUP"]

    if not metadata.config.activate or metadata.name() == "conda":
        # prepend bin (or Scripts) directory
        env = utils.prepend_bin_path(
            env, metadata.config.test_prefix, prepend_prefix=True
        )

    if utils.on_win:
        env["PATH"] = metadata.config.test_prefix + os.pathsep + env["PATH"]

    env["PREFIX"] = metadata.config.test_prefix
    if "BUILD_PREFIX" in env:
        del env["BUILD_PREFIX"]

    # In the future, we will need to support testing cross compiled
    #     packages on physical hardware. until then it is expected that
    #     something like QEMU or Wine will be used on the build machine,
    #     therefore, for now, we use host_subdir.

    # ensure that the test prefix isn't kept between variants
    utils.rm_rf(metadata.config.test_prefix)

    if solver is None:
        solver, pkg_cache_path = get_solver(metadata.config.host_subdir)
    else:
        pkg_cache_path = PackageCacheData.first_writable().pkgs_dir

    solver.replace_channels()
    transaction = solver.solve(specs, [pkg_cache_path])

    downloaded = transaction.fetch_extract_packages(
        pkg_cache_path, solver.repos + list(solver.local_repos.values()),
    )
    if not downloaded:
        raise RuntimeError("Did not succeed in downloading packages.")

    mkdir_p(metadata.config.test_prefix)
    transaction.execute(
        PrefixData(metadata.config.test_prefix), pkg_cache_path,
    )

    with utils.path_prepended(metadata.config.test_prefix):
        env = dict(os.environ.copy())
        env.update(environ.get_dict(m=metadata, prefix=metadata.config.test_prefix))
        env["CONDA_BUILD_STATE"] = "TEST"
        if env_path_backup_var_exists:
            env["CONDA_PATH_BACKUP"] = os.environ["CONDA_PATH_BACKUP"]

    if config.test_run_post:
        from conda_build.utils import get_installed_packages

        installed = get_installed_packages(metadata.config.test_prefix)
        files = installed[metadata.meta["package"]["name"]]["files"]
        replacements = get_all_replacements(metadata.config)
        try_download(metadata, False, True)
        create_info_files(metadata, replacements, files, metadata.config.test_prefix)
        post_build(metadata, files, None, metadata.config.test_prefix, True)

    # when workdir is removed, the source files are unavailable.  There's the test/source_files
    #    entry that lets people keep these files around.  The files are copied into test_dir for
    #    intuitive relative path behavior, though, not work_dir, so we need to adjust where
    #    SRC_DIR points.  The initial CWD during tests is test_dir.
    if metadata.config.remove_work_dir:
        env["SRC_DIR"] = metadata.config.test_dir

    test_script, _ = write_test_scripts(
        metadata, env, py_files, pl_files, lua_files, r_files, shell_files, trace
    )

    if utils.on_win:
        cmd = [os.environ.get("COMSPEC", "cmd.exe"), "/d", "/c", test_script]
    else:
        cmd = (
            [shell_path]
            + (["-x"] if metadata.config.debug else [])
            + ["-o", "errexit", test_script]
        )
    try:
        test_stats = {}
        if not provision_only:
            # rewrite long paths in stdout back to their env variables
            if metadata.config.debug or metadata.config.no_rewrite_stdout_env:
                rewrite_env = None
            else:
                rewrite_env = {k: env[k] for k in ["PREFIX", "SRC_DIR"] if k in env}
                if metadata.config.verbose:
                    for k, v in rewrite_env.items():
                        print(
                            "{0} {1}={2}".format(
                                "set" if test_script.endswith(".bat") else "export",
                                k,
                                v,
                            )
                        )
            utils.check_call_env(
                cmd,
                env=env,
                cwd=metadata.config.test_dir,
                stats=test_stats,
                rewrite_stdout_env=rewrite_env,
            )
            log_stats(test_stats, "testing {}".format(metadata.name()))
            # TODO need to implement metadata.get_used_loop_vars
            # if stats is not None and metadata.config.variants:
            #     stats[
            #         stats_key(metadata, "test_{}".format(metadata.name()))
            #     ] = test_stats
            if os.path.exists(join(metadata.config.test_dir, "TEST_FAILED")):
                raise subprocess.CalledProcessError(-1, "")
            print("TEST END:", test_package_name)

    except subprocess.CalledProcessError as _:  # noqa
        tests_failed(
            metadata,
            move_broken=move_broken,
            broken_dir=metadata.config.broken_dir,
            config=metadata.config,
        )
        raise

    if config.need_cleanup and config.recipe_dir is not None and not provision_only:
        utils.rm_rf(config.recipe_dir)

    return True
Esempio n. 48
0
def run_setuppy(src_dir, temp_dir, python_version, extra_specs, config, setup_options):
    '''
    Patch distutils and then run setup.py in a subprocess.

    :param src_dir: Directory containing the source code
    :type src_dir: str
    :param temp_dir: Temporary directory for doing for storing pkginfo.yaml
    :type temp_dir: str
    '''
    # TODO: we could make everyone's lives easier if we include packaging here, because setuptools
    #    needs it in recent versions.  At time of writing, it is not a package in defaults, so this
    #    actually breaks conda-build right now.  Omit it until packaging is on defaults.
    # specs = ['python %s*' % python_version, 'pyyaml', 'setuptools', 'six', 'packaging', 'appdirs']
    specs = ['python %s*' % python_version, 'pyyaml']
    with open(os.path.join(src_dir, "setup.py")) as setup:
        text = setup.read()
        if 'import numpy' in text or 'from numpy' in text:
            specs.append('numpy')

    specs.extend(extra_specs)

    # Do everything in the build env in case the setup.py install goes
    # haywire.
    # TODO: Try with another version of Python if this one fails. Some
    # packages are Python 2 or Python 3 only.

    if not os.path.isdir(config.build_prefix) or not os.listdir(config.build_prefix):
        create_env(config.build_prefix, specs_or_actions=specs,
                   subdir=config.build_subdir,
                   clear_cache=False,
                   config=config)
    stdlib_dir = join(config.build_prefix,
                      'Lib' if sys.platform == 'win32'
                      else 'lib/python%s' % python_version)

    patch = join(temp_dir, 'pypi-distutils.patch')
    with open(patch, 'w') as f:
        f.write(DISTUTILS_PATCH.format(temp_dir.replace('\\', '\\\\')))

    if exists(join(stdlib_dir, 'distutils', 'core.py-copy')):
        rm_rf(join(stdlib_dir, 'distutils', 'core.py'))
        copy2(join(stdlib_dir, 'distutils', 'core.py-copy'),
              join(stdlib_dir, 'distutils', 'core.py'))
        # Avoid race conditions. Invalidate the cache.
        if PY3:
            rm_rf(join(stdlib_dir, 'distutils', '__pycache__',
                'core.cpython-%s%s.pyc' % sys.version_info[:2]))
            rm_rf(join(stdlib_dir, 'distutils', '__pycache__',
                'core.cpython-%s%s.pyo' % sys.version_info[:2]))
        else:
            rm_rf(join(stdlib_dir, 'distutils', 'core.pyc'))
            rm_rf(join(stdlib_dir, 'distutils', 'core.pyo'))
    else:
        copy2(join(stdlib_dir, 'distutils', 'core.py'), join(stdlib_dir,
            'distutils', 'core.py-copy'))
    apply_patch(join(stdlib_dir, 'distutils'), patch, config=config)

    # Save PYTHONPATH for later
    env = os.environ.copy()
    if 'PYTHONPATH' in env:
        env[str('PYTHONPATH')] = str(src_dir + ':' + env['PYTHONPATH'])
    else:
        env[str('PYTHONPATH')] = str(src_dir)
    cwd = getcwd()
    chdir(src_dir)
    cmdargs = [config.build_python, 'setup.py', 'install']
    cmdargs.extend(setup_options)
    try:
        check_call_env(cmdargs, env=env)
    except subprocess.CalledProcessError:
        print('$PYTHONPATH = %s' % env['PYTHONPATH'])
        sys.exit('Error: command failed: %s' % ' '.join(cmdargs))
    finally:
        chdir(cwd)
Esempio n. 49
0
def apply_patch(src_dir, path, config, git=None):
    if not isfile(path):
        sys.exit('Error: no such patch: %s' % path)

    files, is_git_format = _get_patch_file_details(path)
    if git and is_git_format:
        # Prevents git from asking interactive questions,
        # also necessary to achieve sha1 reproducibility;
        # as is --committer-date-is-author-date. By this,
        # we mean a round-trip of git am/git format-patch
        # gives the same file.
        git_env = os.environ
        git_env['GIT_COMMITTER_NAME'] = 'conda-build'
        git_env['GIT_COMMITTER_EMAIL'] = '*****@*****.**'
        check_call_env([git, 'am', '--committer-date-is-author-date', path],
                       cwd=src_dir, stdout=None, env=git_env)
        config.git_commits_since_tag += 1
    else:
        print('Applying patch: %r' % path)
        patch = external.find_executable('patch', config.build_prefix)
        if patch is None:
            sys.exit("""\
        Error:
            Cannot use 'git' (not a git repo and/or patch) and did not find 'patch' in: %s
            You can install 'patch' using apt-get, yum (Linux), Xcode (MacOSX),
            or conda, m2-patch (Windows),
        """ % (os.pathsep.join(external.dir_paths)))
        patch_strip_level = _guess_patch_strip_level(files, src_dir)
        patch_args = ['-p%d' % patch_strip_level, '-i', path]

        # line endings are a pain.
        # https://unix.stackexchange.com/a/243748/34459

        try:
            log = get_logger(__name__)
            log.info("Trying to apply patch as-is")
            check_call_env([patch] + patch_args, cwd=src_dir)
        except CalledProcessError:
            if sys.platform == 'win32':
                unix_ending_file = _ensure_unix_line_endings(path)
                patch_args[-1] = unix_ending_file
                try:
                    log.info("Applying unmodified patch failed.  "
                             "Convert to unix line endings and trying again.")
                    check_call_env([patch] + patch_args, cwd=src_dir)
                except:
                    log.info("Applying unix patch failed.  "
                             "Convert to CRLF line endings and trying again with --binary.")
                    patch_args.insert(0, '--binary')
                    win_ending_file = _ensure_win_line_endings(path)
                    patch_args[-1] = win_ending_file
                    try:
                        check_call_env([patch] + patch_args, cwd=src_dir)
                    except:
                        raise
                    finally:
                        if os.path.exists(win_ending_file):
                            os.remove(win_ending_file)  # clean up .patch_win file
                finally:
                    if os.path.exists(unix_ending_file):
                        os.remove(unix_ending_file)  # clean up .patch_unix file
            else:
                raise
Esempio n. 50
0
def test_skeleton_pypi(testing_workdir):
    """published in docs at http://conda.pydata.org/docs/build_tutorials/pkgs.html"""
    cmd = 'conda skeleton pypi pyinstrument'
    check_call_env(cmd.split())
    cmd = 'conda build pyinstrument'
    check_call_env(cmd.split())
Esempio n. 51
0
def run_setuppy(src_dir, temp_dir, python_version, extra_specs, config,
                setup_options):
    '''
    Patch distutils and then run setup.py in a subprocess.

    :param src_dir: Directory containing the source code
    :type src_dir: str
    :param temp_dir: Temporary directory for doing for storing pkginfo.yaml
    :type temp_dir: str
    '''
    # TODO: we could make everyone's lives easier if we include packaging here, because setuptools
    #    needs it in recent versions.  At time of writing, it is not a package in defaults, so this
    #    actually breaks conda-build right now.  Omit it until packaging is on defaults.
    # specs = ['python %s*' % python_version, 'pyyaml', 'setuptools', 'six', 'packaging', 'appdirs']
    specs = ['python %s*' % python_version, 'pyyaml', 'setuptools']
    with open(os.path.join(src_dir, "setup.py")) as setup:
        text = setup.read()
        if 'import numpy' in text or 'from numpy' in text:
            specs.append('numpy')

    specs.extend(extra_specs)

    rm_rf(config.host_prefix)
    create_env(config.host_prefix,
               specs_or_actions=specs,
               env='host',
               subdir=config.host_subdir,
               clear_cache=False,
               config=config)
    stdlib_dir = join(
        config.host_prefix,
        'Lib' if sys.platform == 'win32' else 'lib/python%s' % python_version)

    patch = join(temp_dir, 'pypi-distutils.patch')
    with open(patch, 'w') as f:
        f.write(DISTUTILS_PATCH.format(temp_dir.replace('\\', '\\\\')))

    if exists(join(stdlib_dir, 'distutils', 'core.py-copy')):
        rm_rf(join(stdlib_dir, 'distutils', 'core.py'))
        copy2(join(stdlib_dir, 'distutils', 'core.py-copy'),
              join(stdlib_dir, 'distutils', 'core.py'))
        # Avoid race conditions. Invalidate the cache.
        if PY3:
            rm_rf(
                join(stdlib_dir, 'distutils', '__pycache__',
                     'core.cpython-%s%s.pyc' % sys.version_info[:2]))
            rm_rf(
                join(stdlib_dir, 'distutils', '__pycache__',
                     'core.cpython-%s%s.pyo' % sys.version_info[:2]))
        else:
            rm_rf(join(stdlib_dir, 'distutils', 'core.pyc'))
            rm_rf(join(stdlib_dir, 'distutils', 'core.pyo'))
    else:
        copy2(join(stdlib_dir, 'distutils', 'core.py'),
              join(stdlib_dir, 'distutils', 'core.py-copy'))
    apply_patch(join(stdlib_dir, 'distutils'), patch, config=config)

    # Save PYTHONPATH for later
    env = os.environ.copy()
    if 'PYTHONPATH' in env:
        env[str('PYTHONPATH')] = str(src_dir + ':' + env['PYTHONPATH'])
    else:
        env[str('PYTHONPATH')] = str(src_dir)
    cwd = getcwd()
    chdir(src_dir)
    cmdargs = [config.host_python, 'setup.py', 'install']
    cmdargs.extend(setup_options)
    try:
        check_call_env(cmdargs, env=env)
    except subprocess.CalledProcessError:
        print('$PYTHONPATH = %s' % env['PYTHONPATH'])
        sys.exit('Error: command failed: %s' % ' '.join(cmdargs))
    finally:
        chdir(cwd)
Esempio n. 52
0
def apply_patch(src_dir, path, config, git=None):
    def try_apply_patch(patch, patch_args, cwd, stdout, stderr):
        # An old reference: https://unix.stackexchange.com/a/243748/34459
        #
        # I am worried that '--ignore-whitespace' may be destructive. If so we should
        # avoid passing it, particularly in the initial (most likely to succeed) calls.
        #
        # From here-in I define a 'native' patch as one which has:
        # 1. LF for the patch block metadata.
        # 2. CRLF or LF for the actual patched lines matching those of the source lines.
        #
        # Calls to a raw 'patch' are destructive in various ways:
        # 1. It leaves behind .rej and .orig files
        # 2. If you pass it a patch with incorrect CRLF changes and do not pass --binary and
        #    if any of those blocks *can* be applied, then the whole file gets written out with
        #    LF.  This cannot be reversed either; the text changes will be reversed but not
        #    line-feed changes (since all line-endings get changed, not just those of the of
        #    patched lines)
        # 3. If patching fails, the bits that succeeded remain, so patching is not at all
        #    atomic.
        #
        # Still, we do our best to mitigate all of this as follows:
        # 1. We use --dry-run to test for applicability first.
        # 2 We check for native application of a native patch (--binary, without --ignore-whitespace)
        #
        # Some may bemoan the loss of patch failure artifacts, but it is fairly random which
        # patch and patch attempt they apply to so their informational value is low, besides that,
        # they are ugly.
        #
        import tempfile
        temp_name = os.path.join(tempfile.gettempdir(),
                                 next(tempfile._get_candidate_names()))
        base_patch_args = ['--no-backup-if-mismatch', '--batch'
                           ] + patch_args + ['-r', temp_name]
        log = get_logger(__name__)
        try:
            try_patch_args = base_patch_args[:]
            try_patch_args.append('--dry-run')
            log.debug("dry-run applying with\n{} {}".format(
                patch, try_patch_args))
            check_call_env([patch] + try_patch_args,
                           cwd=cwd,
                           stdout=stdout,
                           stderr=stderr)
            # You can use this to pretend the patch failed so as to test reversal!
            # raise CalledProcessError(-1, ' '.join([patch] + patch_args))
        except Exception as e:
            raise e
        else:
            check_call_env([patch] + base_patch_args,
                           cwd=cwd,
                           stdout=stdout,
                           stderr=stderr)
        finally:
            if os.path.exists(temp_name):
                os.unlink(temp_name)

    exception = None
    if not isfile(path):
        raise RuntimeError('Error: no such patch: %s' % path)

    if config.verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'w')
        stdout = FNULL
        stderr = FNULL

    files, is_git_format = _get_patch_file_details(path)
    if git and is_git_format:
        # Prevents git from asking interactive questions,
        # also necessary to achieve sha1 reproducibility;
        # as is --committer-date-is-author-date. By this,
        # we mean a round-trip of git am/git format-patch
        # gives the same file.
        git_env = os.environ
        git_env['GIT_COMMITTER_NAME'] = 'conda-build'
        git_env['GIT_COMMITTER_EMAIL'] = '*****@*****.**'
        check_call_env(
            [git, 'am', '-3', '--committer-date-is-author-date', path],
            cwd=src_dir,
            stdout=stdout,
            stderr=stderr,
            env=git_env)
        config.git_commits_since_tag += 1
    else:
        if config.verbose:
            print('Applying patch: %r' % path)
        patch = external.find_executable('patch', config.build_prefix)
        if patch is None or len(patch) == 0:
            sys.exit("""\
        Error:
            Cannot use 'git' (not a git repo and/or patch) and did not find 'patch' in: %s
            You can install 'patch' using apt-get, yum (Linux), Xcode (MacOSX),
            or conda, m2-patch (Windows),
        """ % (os.pathsep.join(external.dir_paths)))
        patch_strip_level = _guess_patch_strip_level(files, src_dir)
        path_args = ['-i', path]
        patch_args = ['-p%d' % patch_strip_level]

        try:
            log = get_logger(__name__)
            # This is the case we check first of all as it is the case that allows a properly line-ended
            # patch to apply correctly to a properly line-ended source tree, modifying it following the
            # patch chunks exactly.
            try_apply_patch(patch,
                            patch_args + ['--binary'] + path_args,
                            cwd=src_dir,
                            stdout=stdout,
                            stderr=stderr)
        except CalledProcessError as e:
            # Capture the first exception
            exception = e
            if config.verbose:
                log.info(
                    "Applying patch natively failed.  "
                    "Trying to apply patch non-binary with --ignore-whitespace"
                )
            try:
                try_apply_patch(patch,
                                patch_args + ['--ignore-whitespace'] +
                                path_args,
                                cwd=src_dir,
                                stdout=stdout,
                                stderr=stderr)
            except CalledProcessError as e:  # noqa
                unix_ending_file = _ensure_unix_line_endings(path)
                path_args[-1] = unix_ending_file
                try:
                    if config.verbose:
                        log.info(
                            "Applying natively *and* non-binary failed!  "
                            "Converting to unix line endings and trying again.  "
                            "WARNING :: This is destructive to the source file line-endings."
                        )
                    # If this succeeds, it will change the source files' CRLFs to LFs. This can
                    # mess things up both for subsequent attempts (this line-ending change is not
                    # reversible) but worse, for subsequent, correctly crafted (I'm calling these
                    # "native" from now on) patches.
                    try_apply_patch(patch,
                                    patch_args + ['--ignore-whitespace'] +
                                    path_args,
                                    cwd=src_dir,
                                    stdout=stdout,
                                    stderr=stderr)
                except CalledProcessError:
                    if config.verbose:
                        log.warning(
                            "Applying natively, non-binary *and* unix attempts all failed!?  "
                            "Converting to CRLF line endings and trying again with "
                            "--ignore-whitespace and --binary. This can be destructive (even"
                            "with attempted reversal) to the source files' line-endings."
                        )
                    win_ending_file = _ensure_win_line_endings(path)
                    path_args[-1] = win_ending_file
                    try:
                        try_apply_patch(patch,
                                        patch_args +
                                        ['--ignore-whitespace', '--binary'] +
                                        path_args,
                                        cwd=src_dir,
                                        stdout=stdout,
                                        stderr=stderr)
                    except:
                        pass
                    else:
                        exception = None
                    finally:
                        if os.path.exists(win_ending_file):
                            os.remove(
                                win_ending_file)  # clean up .patch_unix file
                else:
                    exception = None
                finally:
                    if os.path.exists(unix_ending_file):
                        os.remove(unix_ending_file)
    if exception:
        raise exception
Esempio n. 53
0
def execute_build_script(m, src_dir, env, provision_only=False):

    script = utils.ensure_list(m.get_value("build/script", None))
    if script:
        script = "\n".join(script)

    if not m.output.is_first and not script:
        console.print("No build script found and not top-level build")
        return

    if isdir(src_dir):
        build_stats = {}
        if utils.on_win:
            build_file = join(m.path, "bld.bat")
            if isfile(build_file) or script:
                if isinstance(script, str) and script.endswith(".bat"):
                    build_file = os.path.join(m.path, script)
            windows.build(m,
                          build_file,
                          stats=build_stats,
                          provision_only=provision_only)
        else:
            build_file = join(m.path, "build.sh")
            # if isfile(build_file) and script:
            #     raise CondaBuildException(
            #         "Found a build.sh script and a build/script section "
            #         "inside meta.yaml. Either remove the build.sh script "
            #         "or remove the build/script section in meta.yaml."
            #     )
            # There is no sense in trying to run an empty build script.
            if isfile(build_file) or script:
                if isinstance(script, str) and script.endswith(".sh"):
                    build_file = os.path.join(m.path, script)

                work_file, _ = write_build_scripts(m, script, build_file)

                if not provision_only:
                    cmd = ([shell_path] + (["-x"] if m.config.debug else []) +
                           ["-o", "errexit", work_file])

                    # rewrite long paths in stdout back to their env variables
                    # if m.config.debug or m.config.no_rewrite_stdout_env:
                    if False:
                        rewrite_env = None
                    else:
                        rewrite_vars = ["PREFIX", "SRC_DIR"]
                        if not m.build_is_host:
                            rewrite_vars.insert(1, "BUILD_PREFIX")
                        rewrite_env = {
                            k: env[k]
                            for k in rewrite_vars if k in env
                        }
                        for k, v in rewrite_env.items():
                            console.print("{0} {1}={2}".format(
                                "set"
                                if build_file.endswith(".bat") else "export",
                                k,
                                v,
                            ))

                    # clear this, so that the activate script will get run as necessary
                    del env["CONDA_BUILD"]
                    env["PKG_NAME"] = m.get_value("package/name")

                    utils.check_call_env(
                        cmd,
                        env=env,
                        rewrite_stdout_env=rewrite_env,
                        cwd=src_dir,
                        stats=build_stats,
                    )

                    utils.remove_pycache_from_scripts(m.config.host_prefix)

        if build_stats and not provision_only:
            log_stats(build_stats, "building {}".format(m.name()))
Esempio n. 54
0
def git_mirror_checkout_recursive(git, mirror_dir, checkout_dir, git_url, config, git_ref=None,
                                  git_depth=-1, is_top_level=True):
    """ Mirror (and checkout) a Git repository recursively.

        It's not possible to use `git submodule` on a bare
        repository, so the checkout must be done before we
        know which submodules there are.

        Worse, submodules can be identified by using either
        absolute URLs or relative paths.  If relative paths
        are used those need to be relocated upon mirroring,
        but you could end up with `../../../../blah` and in
        that case conda-build could be tricked into writing
        to the root of the drive and overwriting the system
        folders unless steps are taken to prevent that.
    """

    if config.verbose:
        stdout = None
        stderr = None
    else:
        FNULL = open(os.devnull, 'w')
        stdout = FNULL
        stderr = FNULL

    if not mirror_dir.startswith(config.git_cache + os.sep):
        sys.exit("Error: Attempting to mirror to %s which is outside of GIT_CACHE %s"
                 % (mirror_dir, config.git_cache))

    # This is necessary for Cygwin git and m2-git, although it is fixed in newer MSYS2.
    git_mirror_dir = convert_path_for_cygwin_or_msys2(git, mirror_dir)
    git_checkout_dir = convert_path_for_cygwin_or_msys2(git, checkout_dir)

    if not isdir(os.path.dirname(mirror_dir)):
        os.makedirs(os.path.dirname(mirror_dir))
    if isdir(mirror_dir):
        if git_ref != 'HEAD':
            check_call_env([git, 'fetch'], cwd=mirror_dir, stdout=stdout, stderr=stderr)
        else:
            # Unlike 'git clone', fetch doesn't automatically update the cache's HEAD,
            # So here we explicitly store the remote HEAD in the cache's local refs/heads,
            # and then explicitly set the cache's HEAD.
            # This is important when the git repo is a local path like "git_url: ../",
            # but the user is working with a branch other than 'master' without
            # explicitly providing git_rev.
            check_call_env([git, 'fetch', 'origin', '+HEAD:_conda_cache_origin_head'],
                       cwd=mirror_dir, stdout=stdout, stderr=stderr)
            check_call_env([git, 'symbolic-ref', 'HEAD', 'refs/heads/_conda_cache_origin_head'],
                       cwd=mirror_dir, stdout=stdout, stderr=stderr)
    else:
        args = [git, 'clone', '--mirror']
        if git_depth > 0:
            args += ['--depth', str(git_depth)]
        try:
            check_call_env(args + [git_url, git_mirror_dir], stdout=stdout, stderr=stderr)
        except CalledProcessError:
            # on windows, remote URL comes back to us as cygwin or msys format.  Python doesn't
            # know how to normalize it.  Need to convert it to a windows path.
            if sys.platform == 'win32' and git_url.startswith('/'):
                git_url = convert_unix_path_to_win(git_url)

            if os.path.exists(git_url):
                # Local filepaths are allowed, but make sure we normalize them
                git_url = normpath(git_url)
            check_call_env(args + [git_url, git_mirror_dir], stdout=stdout, stderr=stderr)
        assert isdir(mirror_dir)

    # Now clone from mirror_dir into checkout_dir.
    check_call_env([git, 'clone', git_mirror_dir, git_checkout_dir], stdout=stdout, stderr=stderr)
    if is_top_level:
        checkout = git_ref
        if git_url.startswith('.'):
            output = check_output_env([git, "rev-parse", checkout], stdout=stdout, stderr=stderr)
            checkout = output.decode('utf-8')
        if config.verbose:
            print('checkout: %r' % checkout)
        if checkout:
            check_call_env([git, 'checkout', checkout],
                           cwd=checkout_dir, stdout=stdout, stderr=stderr)

    # submodules may have been specified using relative paths.
    # Those paths are relative to git_url, and will not exist
    # relative to mirror_dir, unless we do some work to make
    # it so.
    try:
        submodules = check_output_env([git, 'config', '--file', '.gitmodules', '--get-regexp',
                                   'url'], stderr=stdout, cwd=checkout_dir)
        submodules = submodules.decode('utf-8').splitlines()
    except CalledProcessError:
        submodules = []
    for submodule in submodules:
        matches = git_submod_re.match(submodule)
        if matches and matches.group(2)[0] == '.':
            submod_name = matches.group(1)
            submod_rel_path = matches.group(2)
            submod_url = urljoin(git_url + '/', submod_rel_path)
            submod_mirror_dir = os.path.normpath(
                os.path.join(mirror_dir, submod_rel_path))
            if config.verbose:
                print('Relative submodule %s found: url is %s, submod_mirror_dir is %s' % (
                      submod_name, submod_url, submod_mirror_dir))
            with TemporaryDirectory() as temp_checkout_dir:
                git_mirror_checkout_recursive(git, submod_mirror_dir, temp_checkout_dir, submod_url,
                                              config, git_ref, git_depth, False)

    if is_top_level:
        # Now that all relative-URL-specified submodules are locally mirrored to
        # relatively the same place we can go ahead and checkout the submodules.
        check_call_env([git, 'submodule', 'update', '--init',
                    '--recursive'], cwd=checkout_dir, stdout=stdout, stderr=stderr)
        git_info(config)
    if not config.verbose:
        FNULL.close()
Esempio n. 55
0
def test_skeleton_pypi(testing_workdir):
    """published in docs at http://conda.pydata.org/docs/build_tutorials/pkgs.html"""
    cmd = 'conda skeleton pypi click'
    check_call_env(cmd.split())
    cmd = 'conda build click'
    check_call_env(cmd.split())
Esempio n. 56
0
def test_relative_git_url_submodule_clone(testing_workdir, monkeypatch):
    """
    A multi-part test encompassing the following checks:

    1. That git submodules identified with both relative and absolute URLs can be mirrored
       and cloned.

    2. That changes pushed to the original repository are updated in the mirror and finally
       reflected in the package version and filename via `GIT_DESCRIBE_TAG`.

    3. That `source.py` is using `check_call_env` and `check_output_env` and that those
       functions are using tools from the build env.
    """

    toplevel = os.path.join(testing_workdir, 'toplevel')
    os.mkdir(toplevel)
    relative_sub = os.path.join(testing_workdir, 'relative_sub')
    os.mkdir(relative_sub)
    absolute_sub = os.path.join(testing_workdir, 'absolute_sub')
    os.mkdir(absolute_sub)

    sys_git_env = os.environ.copy()
    sys_git_env['GIT_AUTHOR_NAME'] = 'conda-build'
    sys_git_env['GIT_AUTHOR_EMAIL'] = '*****@*****.**'
    sys_git_env['GIT_COMMITTER_NAME'] = 'conda-build'
    sys_git_env['GIT_COMMITTER_EMAIL'] = '*****@*****.**'

    # Find the git executable before putting our dummy one on PATH.
    git = find_executable('git')

    # Put the broken git on os.environ["PATH"]
    exename = dummy_executable(testing_workdir, 'git')
    monkeypatch.setenv("PATH", testing_workdir, prepend=os.pathsep)
    # .. and ensure it gets run (and fails).
    FNULL = open(os.devnull, 'w')
    # Strangely ..
    #   stderr=FNULL suppresses the output from echo on OS X whereas
    #   stdout=FNULL suppresses the output from echo on Windows
    with pytest.raises(subprocess.CalledProcessError, message="Dummy git was not executed"):
        check_call_env([exename, '--version'], stdout=FNULL, stderr=FNULL)
    FNULL.close()

    for tag in range(2):
        os.chdir(absolute_sub)
        if tag == 0:
            check_call_env([git, 'init'], env=sys_git_env)
        with open('absolute', 'w') as f:
            f.write(str(tag))
        check_call_env([git, 'add', 'absolute'], env=sys_git_env)
        check_call_env([git, 'commit', '-m', 'absolute{}'.format(tag)],
                                env=sys_git_env)

        os.chdir(relative_sub)
        if tag == 0:
            check_call_env([git, 'init'], env=sys_git_env)
        with open('relative', 'w') as f:
            f.write(str(tag))
        check_call_env([git, 'add', 'relative'], env=sys_git_env)
        check_call_env([git, 'commit', '-m', 'relative{}'.format(tag)],
                                env=sys_git_env)

        os.chdir(toplevel)
        if tag == 0:
            check_call_env([git, 'init'], env=sys_git_env)
        with open('toplevel', 'w') as f:
            f.write(str(tag))
        check_call_env([git, 'add', 'toplevel'], env=sys_git_env)
        check_call_env([git, 'commit', '-m', 'toplevel{}'.format(tag)],
                                env=sys_git_env)
        if tag == 0:
            check_call_env([git, 'submodule', 'add',
                            convert_path_for_cygwin_or_msys2(git, absolute_sub), 'absolute'],
                           env=sys_git_env)
            check_call_env([git, 'submodule', 'add', '../relative_sub', 'relative'],
                           env=sys_git_env)
        else:
            # Once we use a more recent Git for Windows than 2.6.4 on Windows or m2-git we
            # can change this to `git submodule update --recursive`.
            check_call_env([git, 'submodule', 'foreach', git, 'pull'], env=sys_git_env)
        check_call_env([git, 'commit', '-am', 'added submodules@{}'.format(tag)],
                              env=sys_git_env)
        check_call_env([git, 'tag', '-a', str(tag), '-m', 'tag {}'.format(tag)],
                                env=sys_git_env)

        # It is possible to use `Git for Windows` here too, though you *must* not use a different
        # (type of) git than the one used above to add the absolute submodule, because .gitmodules
        # stores the absolute path and that is not interchangeable between MSYS2 and native Win32.
        #
        # Also, git is set to False here because it needs to be rebuilt with the longer prefix. As
        # things stand, my _b_env folder for this test contains more than 80 characters.
        requirements = ('requirements', OrderedDict([
                        ('build',
                         ['git            # [False]',
                          'm2-git         # [win]',
                          'm2-filesystem  # [win]'])]))

        filename = os.path.join(testing_workdir, 'meta.yaml')
        data = OrderedDict([
            ('package', OrderedDict([
                ('name', 'relative_submodules'),
                ('version', '{{ GIT_DESCRIBE_TAG }}')])),
            ('source', OrderedDict([
                ('git_url', toplevel),
                ('git_tag', str(tag))])),
            requirements,
            ('build', OrderedDict([
                ('script',
                 ['git --no-pager submodule --quiet foreach git log -n 1 --pretty=format:%%s > '
                       '%PREFIX%\\summaries.txt  # [win]',
                  'git --no-pager submodule --quiet foreach git log -n 1 --pretty=format:%s > '
                       '$PREFIX/summaries.txt   # [not win]'])
            ])),
            ('test', OrderedDict([
                ('commands',
                 ['echo absolute{}relative{} > %PREFIX%\\expected_summaries.txt       # [win]'
                      .format(tag, tag),
                  'fc.exe /W %PREFIX%\\expected_summaries.txt %PREFIX%\\summaries.txt # [win]',
                  'echo absolute{}relative{} > $PREFIX/expected_summaries.txt         # [not win]'
                      .format(tag, tag),
                  'diff -wuN ${PREFIX}/expected_summaries.txt ${PREFIX}/summaries.txt # [not win]'])
            ]))
        ])

        with open(filename, 'w') as outfile:
            outfile.write(yaml.dump(data, default_flow_style=False, width=999999999))
        # Reset the path because our broken, dummy `git` would cause `render_recipe`
        # to fail, while no `git` will cause the build_dependencies to be installed.
        monkeypatch.undo()
        # This will (after one spin round the loop) install and run 'git' with the
        # build env prepended to os.environ[]
        output = api.get_output_file_path(testing_workdir)[0]
        assert ("relative_submodules-{}-".format(tag) in output)
        api.build(testing_workdir)