示例#1
0
  def test_refresh_with_conflict(self):
    with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
      self.assertEqual(set(), self.git.changed_files())
      self.assertEqual(set(['README']), self.git.changed_files(from_commit='HEAD^'))
      self.assertEqual(set(['README']), self.git.changes_in('HEAD'))

      # Create a change on this branch that is incompatible with the change to master
      with open(self.readme_file, 'w') as readme:
        readme.write('Conflict')

      subprocess.check_call(['git', 'commit', '-am', 'Conflict'])

      self.assertEquals(set([]), self.git.changed_files(include_untracked=True, from_commit='HEAD'))
      with self.assertRaises(Scm.LocalException):
        self.git.refresh(leave_clean=False)
      # The repo is dirty
      self.assertEquals(set(['README']), self.git.changed_files(include_untracked=True, from_commit='HEAD'))

      with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
        subprocess.check_call(['git', 'reset', '--hard', 'HEAD'])

      # Now try with leave_clean
      with self.assertRaises(Scm.LocalException):
        self.git.refresh(leave_clean=True)
      # The repo is clean
      self.assertEquals(set([]), self.git.changed_files(include_untracked=True, from_commit='HEAD'))
示例#2
0
 def test_environment_negation(self):
   with temporary_file() as output:
     with environment_as(HORK='BORK'):
       with environment_as(HORK=None):
         # test that the variable is cleared
         subprocess.Popen([sys.executable, '-c', 'import os; print("HORK" in os.environ)'],
                          stdout=output).wait()
         output.seek(0)
         self.assertEquals('False\n', output.read())
示例#3
0
  def test_coverage_paths(self):
    self.assertFalse(os.path.isfile(self.coverage_data_file()))
    covered_file = os.path.join(self.build_root, 'lib', 'core.py')
    with environment_as(PANTS_PY_COVERAGE='paths:does_not_exist/,nor_does_this/'):
      # paths: should trump .coverage
      self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red])
      all_statements, not_run_statements = self.load_coverage_data(covered_file)
      self.assertEqual([1, 2, 5, 6], all_statements)
      self.assertEqual([1, 2, 5, 6], not_run_statements)

    with environment_as(PANTS_PY_COVERAGE='paths:core.py'):
      self.run_failing_tests(targets=[self.all], failed_targets=[self.all])
      all_statements, not_run_statements = self.load_coverage_data(covered_file)
      self.assertEqual([1, 2, 5, 6], all_statements)
      self.assertEqual([], not_run_statements)
示例#4
0
 def _do_run_tests_with_args(self, pex, workunit, args):
   try:
     # The pytest runner we use accepts a --pdb argument that will launch an interactive pdb
     # session on any test failure.  In order to support use of this pass-through flag we must
     # turn off stdin buffering that otherwise occurs.  Setting the PYTHONUNBUFFERED env var to
     # any value achieves this in python2.7.  We'll need a different solution when we support
     # running pants under CPython 3 which does not unbuffer stdin using this trick.
     env = {
       'PYTHONUNBUFFERED': '1',
     }
     profile = self.get_options().profile
     if profile:
       env['PEX_PROFILE_FILENAME'] = '{0}.subprocess.{1:.6f}'.format(profile, time.time())
     with environment_as(**env):
       rc = self._spawn_and_wait(pex, workunit, args=args, setsid=True)
       return PythonTestResult.rc(rc)
   except TestFailedTaskError:
     # _spawn_and_wait wraps the test runner in a timeout, so it could
     # fail with a TestFailedTaskError. We can't just set PythonTestResult
     # to a failure because the resultslog doesn't have all the failures
     # when tests are killed with a timeout. Therefore we need to re-raise
     # here.
     raise
   except Exception:
     self.context.log.error('Failed to run test!')
     self.context.log.info(traceback.format_exc())
     return PythonTestResult.exception()
示例#5
0
 def _maybe_scrubbed_env(cls):
   for env_var in cls._SCRUBBED_ENV:
     value = os.getenv(env_var)
     if value:
       log.warn('Scrubbing {env_var}={value}'.format(env_var=env_var, value=value))
   with environment_as(**cls._SCRUBBED_ENV):
     yield
示例#6
0
  def _run_tests(self, targets, stdout, stderr):
    if not targets:
      return PythonTestResult.rc(0)

    sources = list(itertools.chain(*[t.sources_relative_to_buildroot() for t in targets]))
    if not sources:
      return PythonTestResult.rc(0)

    with self._test_runner(targets, stdout, stderr) as (pex, test_args):
      args = ['-s'] if self._debug else []
      args.extend(test_args)
      args.extend(self._args)
      args.extend(sources)

      try:
        # The pytest runner we use accepts a --pdb argument that will launch an interactive pdb
        # session on any test failure.  In order to support use of this pass-through flag we must
        # turn off stdin buffering that otherwise occurs.  Setting the PYTHONUNBUFFERED env var to
        # any value achieves this in python2.7.  We'll need a different solution when we support
        # running pants under CPython 3 which does not unbuffer stdin using this trick.
        with environment_as(PYTHONUNBUFFERED='1'):
          rc = pex.run(args=args, setsid=True, stdout=stdout, stderr=stderr)
          return PythonTestResult.rc(rc)
      except Exception:
        print('Failed to run test!', file=stderr)
        traceback.print_exc()
        return PythonTestResult.exception()
示例#7
0
  def test_simple(self):
    with environment_as(ZINCUTILS_SORTED_ANALYSIS='1'):
      def get_test_analysis_path(name):
        return os.path.join(os.path.dirname(__file__), 'testdata', 'simple', name)

      def get_analysis_text(name):
        with open(get_test_analysis_path(name), 'r') as fp:
          return fp.read()

      def rebase(analysis_file, java_home=None):
        orig = get_analysis_text(analysis_file)
        buf = StringIO.StringIO()
        ZincAnalysisParser().rebase(iter(orig.splitlines(True)), buf,
                                    {b'/src/pants': AnalysisTools._PANTS_BUILDROOT_PLACEHOLDER,
                                     b'/src/pants/.pants.d': AnalysisTools._PANTS_WORKDIR_PLACEHOLDER}, java_home)
        return buf.getvalue()

      # Now check rebasing.
      rebased = rebase('simple.analysis')
      expected_rebased = get_analysis_text('simple.rebased.analysis')
      self.assertMultiLineEqual(expected_rebased, rebased)

      # And rebasing+filtering.
      rebased = rebase('simple.analysis', b'/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk')
      expected_filtered_rebased = get_analysis_text('simple.rebased.filtered.analysis')
      self.assertMultiLineEqual(expected_filtered_rebased, rebased)

      # Check parse_deps is returning both bin and src dependencies.
      infile = iter(get_analysis_text('simple.analysis').splitlines(True))
      deps = ZincAnalysisParser().parse_deps(infile, '')
      self.assertItemsEqual(deps['/src/pants/examples/src/scala/org/pantsbuild/example/hello/exe/Exe.scala'], [
          '/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/jre/lib/rt.jar',
          'org/pantsbuild/example/hello/welcome/WelcomeEverybody$.class',
        ])
示例#8
0
  def test_junit_runner_extra_env_vars_none(self):
    with environment_as(THIS_VARIABLE="12", THAT_VARIABLE="This is a variable."):
      self.make_target(
        spec='foo:foo_test',
        target_type=JavaTests,
        sources=['FooTest.java'],
        extra_env_vars={
          'HELLO': None,
          'THERE': False,
          'THIS_VARIABLE': None
        },
      )

      self.execute_junit_runner(dedent("""
          import org.junit.Test;
          import static org.junit.Assert.assertEquals;
          import static org.junit.Assert.assertFalse;
          public class FooTest {
            @Test
            public void testFoo() {
              assertEquals("False", System.getenv().get("THERE"));
              assertEquals("This is a variable.", System.getenv().get("THAT_VARIABLE"));
              assertFalse(System.getenv().containsKey("HELLO"));
              assertFalse(System.getenv().containsKey("THIS_VARIABLE"));
            }
          }
        """), target_name='foo:foo_test')
示例#9
0
  def test_junit_xml(self):
    # We expect xml of the following form:
    # <testsuite errors=[Ne] failures=[Nf] skips=[Ns] tests=[Nt] ...>
    #   <testcase classname="..." name="..." .../>
    #   <testcase classname="..." name="..." ...>
    #     <failure ...>...</failure>
    #   </testcase>
    # </testsuite>

    report_basedir = os.path.join(self.build_root, 'dist', 'junit')
    with environment_as(JUNIT_XML_BASE=report_basedir):
      self.run_failing_tests(targets=[self.red, self.green], failed_targets=[self.red])

      files = glob.glob(os.path.join(report_basedir, '*.xml'))
      self.assertEqual(1, len(files))
      junit_xml = files[0]
      with open(junit_xml) as fp:
        print(fp.read())

      root = DOM.parse(junit_xml).documentElement
      self.assertEqual(2, len(root.childNodes))
      self.assertEqual(2, int(root.getAttribute('tests')))
      self.assertEqual(1, int(root.getAttribute('failures')))
      self.assertEqual(0, int(root.getAttribute('errors')))
      self.assertEqual(0, int(root.getAttribute('skips')))

      children_by_test_name = dict((elem.getAttribute('name'), elem) for elem in root.childNodes)
      self.assertEqual(0, len(children_by_test_name['test_one'].childNodes))
      self.assertEqual(1, len(children_by_test_name['test_two'].childNodes))
      self.assertEqual('failure', children_by_test_name['test_two'].firstChild.nodeName)
示例#10
0
def initialize_repo(worktree, gitdir=None):
  """Initialize a git repository for the given `worktree`.

  NB: The given `worktree` must contain at least one file which will be committed to form an initial
  commit.

  :param string worktree: The path to the git work tree.
  :param string gitdir: An optional path to the `.git` dir to use.
  :returns: A `Git` repository object that can be used to interact with the repo.
  :rtype: :class:`pants.scm.git.Git`
  """
  @contextmanager
  def use_gitdir():
    if gitdir:
      yield gitdir
    else:
      with temporary_dir() as d:
        yield d

  with use_gitdir() as git_dir, environment_as(GIT_DIR=git_dir, GIT_WORK_TREE=worktree):
    subprocess.check_call(['git', 'init'])
    subprocess.check_call(['git', 'config', 'user.email', '*****@*****.**'])
    subprocess.check_call(['git', 'config', 'user.name', 'Your Name'])
    subprocess.check_call(['git', 'add', '.'])
    subprocess.check_call(['git', 'commit', '-am', 'Add project files.'])

    yield Git(gitdir=git_dir, worktree=worktree)
示例#11
0
  def execute(self):
    def is_python_test(target):
      # Note that we ignore PythonTestSuite, because we'll see the PythonTests targets
      # it depends on anyway,so if we don't we'll end up running the tests twice.
      # TODO(benjy): Once we're off the 'build' command we can get rid of python_test_suite,
      # or make it an alias of dependencies().
      return isinstance(target, PythonTests)

    test_targets = list(filter(is_python_test, self.context.targets()))
    if test_targets:
      self.context.release_lock()

      debug = self.get_options().level == 'debug'

      args = [] if self.get_options().no_colors else ['--color', 'yes']
      for options in self.get_options().options + self.get_passthru_args():
        args.extend(safe_shlex_split(options))
      test_builder = PythonTestBuilder(context=self.context,
                                       targets=test_targets,
                                       args=args,
                                       interpreter=self.interpreter,
                                       fast=self.get_options().fast,
                                       debug=debug)
      with self.context.new_workunit(name='run',
                                     labels=[WorkUnit.TOOL, WorkUnit.TEST]) as workunit:
        # pytest uses py.io.terminalwriter for output. That class detects the terminal
        # width and attempts to use all of it. However we capture and indent the console
        # output, leading to weird-looking line wraps. So we trick the detection code
        # into thinking the terminal window is narrower than it is.
        cols = os.environ.get('COLUMNS', 80)
        with environment_as(COLUMNS=str(int(cols) - 30)):
          stdout = workunit.output('stdout') if workunit else None
          stderr = workunit.output('stderr') if workunit else None
          if test_builder.run(stdout=stdout, stderr=stderr):
            raise TaskError()
示例#12
0
  def test_simple(self):
    with environment_as(ZINCUTILS_SORTED_ANALYSIS='1'):
      def get_test_analysis_path(name):
        return os.path.join(os.path.dirname(__file__), 'testdata', 'simple', name)

      def get_analysis_text(name):
        with open(get_test_analysis_path(name), 'r') as fp:
          return fp.read()

      # Now check rebasing.
      orig = iter(get_analysis_text('simple.analysis').splitlines(True))
      expected_rebased = get_analysis_text('simple.rebased.analysis')
      buf = StringIO.StringIO()
      ZincAnalysisParser().rebase(orig, buf, b'/src/pants', b'$PANTS_HOME')
      rebased = buf.getvalue()
      self.assertMultiLineEqual(expected_rebased, rebased)

      # And rebasing+filtering.
      orig = iter(get_analysis_text('simple.analysis').splitlines(True))
      expected_filtered_rebased = get_analysis_text('simple.rebased.filtered.analysis')
      buf = StringIO.StringIO()
      ZincAnalysisParser().rebase(orig, buf, b'/src/pants', b'$PANTS_HOME',
                                  b'/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk')
      filtered_rebased = buf.getvalue()
      self.assertMultiLineEqual(expected_filtered_rebased, filtered_rebased)

      # Check parse_deps is returning both bin and src dependencies.
      infile = iter(get_analysis_text('simple.analysis').splitlines(True))
      deps = ZincAnalysisParser().parse_deps(infile, '')
      f = '/src/pants/examples/src/scala/org/pantsbuild/example/hello/exe/Exe.scala'
      self.assertItemsEqual(deps[f], [
          '/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/jre/lib/rt.jar',
          '/src/pants/examples/src/scala/org/pantsbuild/example/hello/welcome/Welcome.scala',
        ])
  def test_pantsd_client_env_var_is_inherited_by_pantsd_runner_children(self):
    EXPECTED_KEY = 'TEST_ENV_VAR_FOR_PANTSD_INTEGRATION_TEST'
    EXPECTED_VALUE = '333'
    with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
      # First, launch the daemon without any local env vars set.
      pantsd_run(['help'])
      checker.assert_started()

      # Then, set an env var on the secondary call.
      # We additionally set the `HERMETIC_ENV` env var to allow the integration test harness
      # to pass this variable through.
      env = {
          EXPECTED_KEY: EXPECTED_VALUE,
          'HERMETIC_ENV': EXPECTED_KEY,
        }
      with environment_as(**env):
        result = pantsd_run(
          ['-q',
           'run',
           'testprojects/src/python/print_env',
           '--',
           EXPECTED_KEY]
        )
        checker.assert_running()

      self.assertEqual(EXPECTED_VALUE, ''.join(result.stdout_data).strip())
示例#14
0
def initialize_repo(worktree, gitdir=None):
  """Initialize a git repository for the given `worktree`.

  NB: The given `worktree` must contain at least one file which will be committed to form an initial
  commit.

  :param string worktree: The path to the git work tree.
  :param string gitdir: An optional path to the `.git` dir to use.
  :returns: A `Git` repository object that can be used to interact with the repo.
  :rtype: :class:`pants.scm.git.Git`
  """
  @contextmanager
  def use_gitdir():
    if gitdir:
      yield gitdir
    else:
      with temporary_dir() as d:
        yield d

  with use_gitdir() as git_dir, environment_as(GIT_DIR=git_dir, GIT_WORK_TREE=worktree):
    subprocess.check_call(['git', 'init'])
    subprocess.check_call(['git', 'config', 'user.email', '*****@*****.**'])
    # TODO: This method inherits the global git settings, so if a developer has gpg signing on, this
    # will turn that off. We should probably just disable reading from the global config somehow:
    # https://git-scm.com/docs/git-config.
    subprocess.check_call(['git', 'config', 'commit.gpgSign', 'false'])
    subprocess.check_call(['git', 'config', 'user.name', 'Your Name'])
    subprocess.check_call(['git', 'add', '.'])
    subprocess.check_call(['git', 'commit', '-am', 'Add project files.'])

    yield Git(gitdir=git_dir, worktree=worktree)
示例#15
0
  def _maybe_emit_coverage_data(self, targets, chroot, pex, stdout, stderr):
    coverage = os.environ.get('PANTS_PY_COVERAGE')
    if coverage is None:
      yield []
      return

    def read_coverage_list(prefix):
      return coverage[len(prefix):].split(',')

    coverage_modules = None
    if coverage.startswith('modules:'):
      # NB: pytest-cov maps these modules to the `[run] sources` config.  So for
      # `modules:pants.base,pants.util` the config emitted has:
      # [run]
      # source =
      #   pants.base
      #   pants.util
      #
      # Now even though these are not paths, coverage sees the dots and switches to a module
      # prefix-matching mode.  Unfortunately, neither wildcards nor top-level module prefixes
      # like `pants.` serve to engage this module prefix-matching as one might hope.  It
      # appears that `pants.` is treated as a path and `pants.*` is treated as a literal
      # module prefix name.
      coverage_modules = read_coverage_list('modules:')
    elif coverage.startswith('paths:'):
      coverage_modules = []
      for path in read_coverage_list('paths:'):
        if not os.path.exists(path) and not os.path.isabs(path):
          # Look for the source in the PEX chroot since its not available from CWD.
          path = os.path.join(chroot, path)
        coverage_modules.append(path)

    with self._cov_setup(targets,
                         chroot,
                         coverage_modules=coverage_modules) as (args, coverage_rc):
      try:
        yield args
      finally:
        with environment_as(PEX_MODULE='coverage.cmdline:main'):
          # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
          # This swaps the /tmp pex chroot source paths for the local original source paths
          # the pex was generated from and which the user understands.
          shutil.move('.coverage', '.coverage.raw')
          pex.run(args=['combine', '--rcfile', coverage_rc], stdout=stdout, stderr=stderr)

          pex.run(args=['report', '-i', '--rcfile', coverage_rc], stdout=stdout, stderr=stderr)

          # TODO(wickman): If coverage is enabled and we are not using fast mode, write an
          # intermediate .html that points to each of the coverage reports generated and
          # webbrowser.open to that page.
          # TODO(John Sirois): Possibly apply the same logic to the console report.  In fact,
          # consider combining coverage files from all runs in this Tasks's execute and then
          # producing just 1 console and 1 html report whether or not the tests are run in fast
          # mode.
          relpath = Target.maybe_readable_identify(targets)
          pants_distdir = Config.from_cache().getdefault('pants_distdir')
          target_dir = os.path.join(pants_distdir, 'coverage', relpath)
          safe_mkdir(target_dir)
          pex.run(args=['html', '-i', '--rcfile', coverage_rc, '-d', target_dir],
                  stdout=stdout, stderr=stderr)
示例#16
0
  def setUp(self):
    self.origin = safe_mkdtemp()
    with pushd(self.origin):
      subprocess.check_call(['git', 'init', '--bare'])

    self.gitdir = safe_mkdtemp()
    self.worktree = safe_mkdtemp()

    self.readme_file = os.path.join(self.worktree, 'README')

    with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
      self.init_repo('depot', self.origin)

      touch(self.readme_file)
      subprocess.check_call(['git', 'add', 'README'])
      subprocess.check_call(['git', 'commit', '-am', 'initial commit with decode -> \x81b'])
      subprocess.check_call(['git', 'tag', 'first'])
      subprocess.check_call(['git', 'push', '--tags', 'depot', 'master'])
      subprocess.check_call(['git', 'branch', '--set-upstream', 'master', 'depot/master'])

      with safe_open(self.readme_file, 'w') as readme:
        readme.write('Hello World.')
      subprocess.check_call(['git', 'commit', '-am', 'Update README.'])

    self.clone2 = safe_mkdtemp()
    with pushd(self.clone2):
      self.init_repo('origin', self.origin)
      subprocess.check_call(['git', 'pull', '--tags', 'origin', 'master:master'])

      with safe_open(os.path.realpath('README'), 'a') as readme:
        readme.write('--')
      subprocess.check_call(['git', 'commit', '-am', 'Update README 2.'])
      subprocess.check_call(['git', 'push', '--tags', 'origin', 'master'])

    self.git = Git(gitdir=self.gitdir, worktree=self.worktree)
示例#17
0
  def test_coverage_simple(self):
    self.assertFalse(os.path.isfile(self.coverage_data_file()))
    covered_file = os.path.join(self.build_root, 'lib', 'core.py')
    with environment_as(PANTS_PY_COVERAGE='1'):
      self.run_tests(targets=[self.green])
      all_statements, not_run_statements = self.load_coverage_data(covered_file)
      self.assertEqual([1, 2, 5, 6], all_statements)
      self.assertEqual([6], not_run_statements)

      self.run_failing_tests(targets=[self.red], failed_targets=[self.red])
      all_statements, not_run_statements = self.load_coverage_data(covered_file)
      self.assertEqual([1, 2, 5, 6], all_statements)
      self.assertEqual([2], not_run_statements)

      self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red])
      all_statements, not_run_statements = self.load_coverage_data(covered_file)
      self.assertEqual([1, 2, 5, 6], all_statements)
      self.assertEqual([], not_run_statements)

      # The all target has no coverage attribute and the code under test does not follow the
      # auto-discover pattern so we should get no coverage.
      self.run_failing_tests(targets=[self.all], failed_targets=[self.all])
      all_statements, not_run_statements = self.load_coverage_data(covered_file)
      self.assertEqual([1, 2, 5, 6], all_statements)
      self.assertEqual([1, 2, 5, 6], not_run_statements)

      self.run_failing_tests(targets=[self.all_with_coverage], failed_targets=[self.all_with_coverage])
      all_statements, not_run_statements = self.load_coverage_data(covered_file)
      self.assertEqual([1, 2, 5, 6], all_statements)
      self.assertEqual([], not_run_statements)
示例#18
0
  def test_setuptools_version(self):
    self.create_file('src/python/foo/__init__.py')
    self.create_python_library(
      relpath='src/python/foo/commands',
      name='commands',
      source_contents_map={
        'print_sys_path.py': dedent("""
          import os
          import sys
          from setuptools import Command


          class PrintSysPath(Command):
            user_options = []

            def initialize_options(self):
              pass

            def finalize_options(self):
              pass

            def run(self):
              with open(os.path.join(os.path.dirname(__file__), 'sys_path.txt'), 'w') as fp:
                fp.write(os.linesep.join(sys.path))
          """)
      },
    )
    foo = self.create_python_library(
      relpath='src/python/foo',
      name='foo',
      dependencies=[
        'src/python/foo/commands',
      ],
      provides=dedent("""
      setup_py(
        name='foo',
        version='0.0.0',
      )
      """)
    )
    self.set_options(run='print_sys_path')

    # Make sure setup.py can see our custom distutils Command 'print_sys_path'.
    sdist_srcdir = os.path.join(self.distdir, 'foo-0.0.0', 'src')
    with environment_as(PYTHONPATH=sdist_srcdir):
      with self.run_execute(foo):
        with open(os.path.join(sdist_srcdir, 'foo', 'commands', 'sys_path.txt')) as fp:
          def assert_extra(name, expected_version):
            package = Package.from_href(fp.readline().strip())
            self.assertEqual(name, package.name)
            self.assertEqual(expected_version, package.raw_version)

          # The 1st two elements of the sys.path should be our custom SetupPyRunner Installer's
          # setuptools and wheel mixins, which should match the setuptools and wheel versions
          # specified by the PythonSetup subsystem.
          init_subsystem(PythonSetup)
          python_setup = PythonSetup.global_instance()
          assert_extra('setuptools', python_setup.setuptools_version)
          assert_extra('wheel', python_setup.wheel_version)
示例#19
0
 def tool(self, name):
   with temporary_dir() as tool_root:
     tool_path = os.path.join(tool_root, name)
     touch(tool_path)
     chmod_plus_x(tool_path)
     new_path = os.pathsep.join([tool_root] + os.environ.get('PATH', '').split(os.pathsep))
     with environment_as(PATH=new_path):
       yield tool_path
示例#20
0
    def test_simple(self):
        def get_test_analysis_path(name):
            return os.path.join(os.path.dirname(__file__), "testdata", "simple", name)

        def get_analysis_text(name):
            with open(get_test_analysis_path(name), "r") as fp:
                return fp.read()

        def parse_analyis(name):
            return JMakeAnalysisParser().parse_from_path(get_test_analysis_path(name))

        def analysis_to_string(analysis):
            buf = StringIO.StringIO()
            analysis.write(buf)
            return buf.getvalue()

        with environment_as(JMAKE_SORTED_ANALYSIS="1"):
            full_analysis = parse_analyis("simple.analysis")

            analysis_splits = full_analysis.split(
                [
                    [b"/src/pants/examples/src/java/org/pantsbuild/example/hello/greet/Greeting.java"],
                    [b"/src/pants/examples/src/java/org/pantsbuild/example/hello/main/HelloMain.java"],
                ]
            )
            self.assertEquals(len(analysis_splits), 2)

            def compare_split(i):
                expected_filename = "simple_split{0}.analysis".format(i)

                # First compare as objects.
                expected_analyis = parse_analyis(expected_filename)
                self.assertTrue(expected_analyis.is_equal_to(analysis_splits[i]))

                # Then compare as text.
                expected = get_analysis_text(expected_filename)
                actual = analysis_to_string(analysis_splits[i])
                self.assertMultiLineEqual(expected, actual)

            compare_split(0)
            compare_split(1)

            # Now merge and check that we get what we started with.
            merged_analysis = JMakeAnalysis.merge(analysis_splits)
            # Check that they compare as objects.
            self.assertTrue(full_analysis.is_equal_to(merged_analysis))
            # Check that they compare as text.
            expected = get_analysis_text("simple.analysis")
            actual = analysis_to_string(merged_analysis)
            self.assertMultiLineEqual(expected, actual)

            # Now check rebasing.
            orig = iter(get_analysis_text("simple.analysis").splitlines(True))
            expected_rebased = get_analysis_text("simple.rebased.analysis")
            buf = StringIO.StringIO()
            JMakeAnalysisParser().rebase(orig, buf, b"/src/pants", b"$PANTS_HOME")
            rebased = buf.getvalue()
            self.assertMultiLineEqual(expected_rebased, rebased)
示例#21
0
  def test_changelog_utf8(self):
    with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
      def commit_contents_to_files(message, encoding, content, *files):
        for path in files:
          with safe_open(os.path.join(self.worktree, path), 'w') as fp:
            fp.write(content)
        subprocess.check_call(['git', 'add', '.'])

        subprocess.check_call(['git', 'config', '--local', '--add', 'i18n.commitencoding',
                               encoding])
        try:
          subprocess.check_call(['git', 'commit', '-m', message.encode(encoding)])
        finally:
          subprocess.check_call(['git', 'config', '--local', '--unset-all', 'i18n.commitencoding'])

        return subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()

      # Mix in a non-UTF-8 author to all commits to exercise the corner described here does not
      # adversely impact the ability to render the changelog (even if rendering for certain
      # characters is incorrect): http://comments.gmane.org/gmane.comp.version-control.git/262685
      non_utf8_config = dedent("""
      [user]
        name = Noralf Trønnes
      """).encode('iso-8859-1')

      with open(os.path.join(self.gitdir, 'config'), 'wb') as fp:
        fp.write(non_utf8_config)

      # Note the copyright symbol is used as the non-ascii character in the next 3 commits
      commit_contents_to_files('START1 © END', 'iso-8859-1', '1', 'foo')
      commit_contents_to_files('START2 © END', 'latin1', '1', 'bar')
      commit_contents_to_files('START3 © END', 'utf-8', '1', 'baz')

      commit_contents_to_files('START4 ~ END', 'us-ascii', '1', 'bip')

      # Prove our non-utf-8 encodings were stored in the commit metadata.
      log = subprocess.check_output(['git', 'log', '--format=%e'])
      self.assertEqual(['us-ascii', 'latin1', 'iso-8859-1'], filter(None, log.strip().splitlines()))

      # And show that the git log successfully transcodes all the commits none-the-less to utf-8
      changelog = self.git.changelog()

      # The ascii commit should combine with the iso-8859-1 author an fail to transcode the
      # o-with-stroke character, and so it should be replaced with the utf-8 replacement character
      # \uFFF or �.
      self.assertIn('Noralf Tr�nnes', changelog)
      self.assertIn('Noralf Tr\uFFFDnnes', changelog)

      # For the other 3 commits, each of iso-8859-1, latin1 and utf-8 have an encoding for the
      # o-with-stroke character - \u00F8 or ø - so we should find it;
      self.assertIn('Noralf Trønnes', changelog)
      self.assertIn('Noralf Tr\u00F8nnes', changelog)

      self.assertIn('START1 © END', changelog)
      self.assertIn('START2 © END', changelog)
      self.assertIn('START3 © END', changelog)
      self.assertIn('START4 ~ END', changelog)
示例#22
0
 def test_hermetic_environment_unicode(self):
   UNICODE_CHAR = '¡'
   ENCODED_CHAR = UNICODE_CHAR.encode('utf-8')
   expected_output = UNICODE_CHAR if PY3 else ENCODED_CHAR
   with environment_as(**dict(XXX=UNICODE_CHAR)):
     self.assertEquals(os.environ['XXX'], expected_output)
     with hermetic_environment_as(**dict(AAA=UNICODE_CHAR)):
       self.assertIn('AAA', os.environ)
       self.assertEquals(os.environ['AAA'], expected_output)
     self.assertEquals(os.environ['XXX'], expected_output)
示例#23
0
 def _scrub_cov_env_vars(self):
   cov_env_vars = {k: v for k, v in os.environ.items() if self._is_coverage_env_var(k)}
   if cov_env_vars:
     self.context.log.warn('Scrubbing coverage environment variables\n\t{}'
                           .format('\n\t'.join(sorted('{}={}'.format(k, v)
                                                      for k, v in cov_env_vars.items()))))
     with environment_as(**{k: None for k in cov_env_vars}):
       yield
   else:
     yield
示例#24
0
 def config(self, overrides=''):
   """Returns a config valid for the test build root."""
   if overrides:
     with temporary_file() as fp:
       fp.write(overrides)
       fp.close()
       with environment_as(PANTS_CONFIG_OVERRIDE=fp.name):
         return Config.load()
   else:
     return Config.load()
 def _setup_py_invocation_environment(self, pythonpath):
   setup_py_env = self._request_single(
     SetupPyInvocationEnvironment, self._native_toolchain_instance())
   env = setup_py_env.as_env_dict()
   if pythonpath:
     self.context.log.debug('Setting PYTHONPATH with setup_requires site directory: {}'
                            .format(pythonpath))
     env['PYTHONPATH'] = pythonpath
   with environment_as(**env):
     yield
示例#26
0
  def _cov_setup(self, targets, chroot, coverage_modules=None):
    def compute_coverage_modules(target):
      if target.coverage:
        return target.coverage
      else:
        # This makes the assumption that tests/python/<target> will be testing src/python/<target>.
        # Note in particular that this doesn't work for pants' own tests, as those are under
        # the top level package 'pants_tests', rather than just 'pants'.
        # TODO(John Sirois): consider failing fast if there is no explicit coverage scheme; but also
        # consider supporting configuration of a global scheme whether that be parallel
        # dirs/packages or some arbitrary function that can be registered that takes a test target
        # and hands back the source packages or paths under test.
        return set(os.path.dirname(source).replace(os.sep, '.')
                   for source in target.sources_relative_to_source_root())

    if coverage_modules is None:
      coverage_modules = set(itertools.chain(*[compute_coverage_modules(t) for t in targets]))

    # Hack in turning off pytest_cov reporting to the console - we want control this ourselves.
    # Take the approach of registering a plugin that replaces the pycov plugin's
    # `pytest_terminal_summary` callback with a noop.
    with temporary_dir() as plugin_root:
      plugin_root = os.path.realpath(plugin_root)
      with safe_open(os.path.join(plugin_root, 'pants_reporter.py'), 'w') as fp:
        fp.write(dedent("""
          def pytest_configure(__multicall__, config):
            # This executes the rest of the pytest_configures ensuring the `pytest_cov` plugin is
            # registered so we can grab it below.
            __multicall__.execute()
            pycov = config.pluginmanager.getplugin('_cov')
            # Squelch console reporting
            pycov.pytest_terminal_summary = lambda *args, **kwargs: None
        """))

      pythonpath = os.environ.get('PYTHONPATH')
      existing_pythonpath = pythonpath.split(os.pathsep) if pythonpath else []
      with environment_as(PYTHONPATH=os.pathsep.join(existing_pythonpath + [plugin_root])):
        def is_python_lib(tgt):
          return tgt.has_sources('.py') and not isinstance(tgt, PythonTests)

        source_mappings = {}
        for target in targets:
          libs = (tgt for tgt in target.closure() if is_python_lib(tgt))
          for lib in libs:
            source_mappings[lib.target_base] = [chroot]

        cp = self._generate_coverage_config(source_mappings=source_mappings)
        with temporary_file() as fp:
          cp.write(fp)
          fp.close()
          coverage_rc = fp.name
          args = ['-p', 'pants_reporter', '-p', 'pytest_cov', '--cov-config', coverage_rc]
          for module in coverage_modules:
            args.extend(['--cov', module])
          yield args, coverage_rc
示例#27
0
  def test_expand_interpreter_search_paths(self):
    with environment_as(PATH='/env/path1:/env/path2'):
      with setup_pexrc_with_pex_python_path(['/pexrc/path1:/pexrc/path2']):
        with fake_pyenv_root(['2.7.14', '3.5.5']) as (pyenv_root, expected_pyenv_paths):
          paths = ['/foo', '<PATH>', '/bar', '<PEXRC>', '/baz', '<PYENV>', '/qux']
          expanded_paths = PythonSetup.expand_interpreter_search_paths(
            paths, pyenv_root_func=lambda: pyenv_root)

    expected = ['/foo', '/env/path1', '/env/path2', '/bar', '/pexrc/path1', '/pexrc/path2',
                '/baz'] + expected_pyenv_paths + ['/qux']
    self.assertListEqual(expected, expanded_paths)
示例#28
0
def distribution(files=None, executables=None, java_home=None):
  with temporary_dir() as dist_root:
    with environment_as(DIST_ROOT=os.path.join(dist_root, java_home) if java_home else dist_root):
      for f in maybe_list(files or ()):
        touch(os.path.join(dist_root, f))
      for executable in maybe_list(executables or (), expected_type=EXE):
        path = os.path.join(dist_root, executable.relpath)
        with safe_open(path, 'w') as fp:
          fp.write(executable.contents or '')
        chmod_plus_x(path)
      yield dist_root
示例#29
0
  def setUp(self):
    self.origin = safe_mkdtemp()
    with pushd(self.origin):
      subprocess.check_call(['git', 'init', '--bare'])

    self.gitdir = safe_mkdtemp()
    self.worktree = safe_mkdtemp()

    self.readme_file = os.path.join(self.worktree, 'README')

    with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
      self.init_repo('depot', self.origin)

      touch(self.readme_file)
      subprocess.check_call(['git', 'add', 'README'])
      safe_mkdir(os.path.join(self.worktree, 'dir'))
      with open(os.path.join(self.worktree, 'dir', 'f'), 'w') as f:
        f.write("file in subdir")

      # Make some symlinks
      os.symlink('f', os.path.join(self.worktree, 'dir', 'relative-symlink'))
      os.symlink('no-such-file', os.path.join(self.worktree, 'dir', 'relative-nonexistent'))
      os.symlink('dir/f', os.path.join(self.worktree, 'dir', 'not-absolute\u2764'))
      os.symlink('../README', os.path.join(self.worktree, 'dir', 'relative-dotdot'))
      os.symlink('dir', os.path.join(self.worktree, 'link-to-dir'))
      os.symlink('README/f', os.path.join(self.worktree, 'not-a-dir'))
      os.symlink('loop1', os.path.join(self.worktree, 'loop2'))
      os.symlink('loop2', os.path.join(self.worktree, 'loop1'))

      subprocess.check_call(['git', 'add', 'README', 'dir', 'loop1', 'loop2',
                             'link-to-dir', 'not-a-dir'])
      subprocess.check_call(['git', 'commit', '-am', 'initial commit with decode -> \x81b'])
      self.initial_rev = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
      subprocess.check_call(['git', 'tag', 'first'])
      subprocess.check_call(['git', 'push', '--tags', 'depot', 'master'])
      subprocess.check_call(['git', 'branch', '--set-upstream-to', 'depot/master'])

      with safe_open(self.readme_file, 'w') as readme:
        readme.write('Hello World.\u2764'.encode('utf-8'))
      subprocess.check_call(['git', 'commit', '-am', 'Update README.'])

      self.current_rev = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()

    self.clone2 = safe_mkdtemp()
    with pushd(self.clone2):
      self.init_repo('origin', self.origin)
      subprocess.check_call(['git', 'pull', '--tags', 'origin', 'master:master'])

      with safe_open(os.path.realpath('README'), 'a') as readme:
        readme.write('--')
      subprocess.check_call(['git', 'commit', '-am', 'Update README 2.'])
      subprocess.check_call(['git', 'push', '--tags', 'origin', 'master'])

    self.git = Git(gitdir=self.gitdir, worktree=self.worktree)
示例#30
0
  def test_diffspec(self):
    """Test finding changes in a diffspecs

    To some extent this is just testing functionality of git not pants, since all pants says
    is that it will pass the diffspec to git diff-tree, but this should serve to at least document
    the functionality we belive works.
    """
    with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
      def commit_contents_to_files(content, *files):
        for path in files:
          with safe_open(os.path.join(self.worktree, path), 'w') as fp:
            fp.write(content)
        subprocess.check_call(['git', 'add', '.'])
        subprocess.check_call(['git', 'commit', '-m', 'change '+path])
        return subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()

      # We can get changes in HEAD or by SHA
      c1 = commit_contents_to_files('1', 'foo')
      self.assertEqual(set(['foo']), self.git.changes_in('HEAD'))
      self.assertEqual(set(['foo']), self.git.changes_in(c1))

      # Changes in new HEAD, from old-to-new HEAD, in old HEAD, or from old-old-head to new.
      c2 = commit_contents_to_files('2', 'bar')
      self.assertEqual(set(['bar']), self.git.changes_in('HEAD'))
      self.assertEqual(set(['bar']), self.git.changes_in('HEAD^..HEAD'))
      self.assertEqual(set(['foo']), self.git.changes_in('HEAD^'))
      self.assertEqual(set(['foo']), self.git.changes_in('HEAD~1'))
      self.assertEqual(set(['foo', 'bar']), self.git.changes_in('HEAD^^..HEAD'))

      # New commit doesn't change results-by-sha
      self.assertEqual(set(['foo']), self.git.changes_in(c1))

      # Files changed in multiple diffs within a range
      c3 = commit_contents_to_files('3', 'foo')
      self.assertEqual(set(['foo', 'bar']), self.git.changes_in('{}..{}'.format(c1, c3)))

      # Changes in a tag
      subprocess.check_call(['git', 'tag', 'v1'])
      self.assertEqual(set(['foo']), self.git.changes_in('v1'))

      # Introduce a new filename
      c4 = commit_contents_to_files('4', 'baz')
      self.assertEqual(set(['baz']), self.git.changes_in('HEAD'))

      # Tag-to-sha
      self.assertEqual(set(['baz']), self.git.changes_in('{}..{}'.format('v1', c4)))

      # We can get multiple changes from one ref
      c5 = commit_contents_to_files('5', 'foo', 'bar')
      self.assertEqual(set(['foo', 'bar']), self.git.changes_in('HEAD'))
      self.assertEqual(set(['foo', 'bar', 'baz']), self.git.changes_in('HEAD~4..HEAD'))
      self.assertEqual(set(['foo', 'bar', 'baz']), self.git.changes_in('{}..HEAD'.format(c1)))
      self.assertEqual(set(['foo', 'bar', 'baz']), self.git.changes_in('{}..{}'.format(c1, c4)))
 def test_testprojects_v2_engine(self):
     with environment_as(PANTS_ENABLE_V2_ENGINE='true'):
         self.test_testprojects()
示例#32
0
    def _maybe_emit_coverage_data(self, targets, chroot, pex, workunit):
        coverage = self.get_options().coverage
        if coverage is None:
            yield []
            return

        def read_coverage_list(prefix):
            return coverage[len(prefix):].split(',')

        coverage_modules = None
        if coverage.startswith('modules:'):
            # NB: pytest-cov maps these modules to the `[run] sources` config.  So for
            # `modules:pants.base,pants.util` the config emitted has:
            # [run]
            # source =
            #   pants.base
            #   pants.util
            #
            # Now even though these are not paths, coverage sees the dots and switches to a module
            # prefix-matching mode.  Unfortunately, neither wildcards nor top-level module prefixes
            # like `pants.` serve to engage this module prefix-matching as one might hope.  It
            # appears that `pants.` is treated as a path and `pants.*` is treated as a literal
            # module prefix name.
            coverage_modules = read_coverage_list('modules:')
        elif coverage.startswith('paths:'):
            coverage_modules = []
            for path in read_coverage_list('paths:'):
                if not os.path.exists(path) and not os.path.isabs(path):
                    # Look for the source in the PEX chroot since its not available from CWD.
                    path = os.path.join(chroot, path)
                coverage_modules.append(path)

        with self._cov_setup(
                targets, chroot,
                coverage_modules=coverage_modules) as (args, coverage_rc):
            try:
                yield args
            finally:
                with environment_as(PEX_MODULE='coverage.cmdline:main'):

                    def pex_run(args):
                        return self._pex_run(pex, workunit, args=args)

                    # On failures or timeouts, the .coverage file won't be written.
                    if not os.path.exists('.coverage'):
                        self.context.log.warn(
                            'No .coverage file was found! Skipping coverage reporting.'
                        )
                    else:
                        # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
                        # This swaps the /tmp pex chroot source paths for the local original source paths
                        # the pex was generated from and which the user understands.
                        shutil.move('.coverage', '.coverage.raw')
                        pex_run(args=['combine', '--rcfile', coverage_rc])
                        pex_run(args=['report', '-i', '--rcfile', coverage_rc])

                        # TODO(wickman): If coverage is enabled and we are not using fast mode, write an
                        # intermediate .html that points to each of the coverage reports generated and
                        # webbrowser.open to that page.
                        # TODO(John Sirois): Possibly apply the same logic to the console report.  In fact,
                        # consider combining coverage files from all runs in this Tasks's execute and then
                        # producing just 1 console and 1 html report whether or not the tests are run in fast
                        # mode.
                        if self.get_options().coverage_output_dir:
                            target_dir = self.get_options().coverage_output_dir
                        else:
                            relpath = Target.maybe_readable_identify(targets)
                            pants_distdir = self.context.options.for_global_scope(
                            ).pants_distdir
                            target_dir = os.path.join(pants_distdir,
                                                      'coverage', relpath)
                        safe_mkdir(target_dir)
                        pex_run(args=[
                            'html', '-i', '--rcfile', coverage_rc, '-d',
                            target_dir
                        ])
                        coverage_xml = os.path.join(target_dir, 'coverage.xml')
                        pex_run(args=[
                            'xml', '-i', '--rcfile', coverage_rc, '-o',
                            coverage_xml
                        ])
示例#33
0
 def test_get_configdir(self) -> None:
     with environment_as(XDG_CONFIG_HOME=""):
         self.assertEqual(os.path.expanduser("~/.config/pants"),
                          get_pants_configdir())
示例#34
0
 def test_set_cachedir(self) -> None:
     with temporary_file() as temp:
         with environment_as(XDG_CACHE_HOME=temp.name):
             self.assertEqual(os.path.join(temp.name, "pants"),
                              get_pants_cachedir())
def create_isolated_git_repo():
    # Isolated Git Repo Structure:
    # worktree
    # |--README
    # |--pants.ini
    # |--3rdparty
    #    |--BUILD
    # |--src
    #    |--resources
    #       |--org/pantsbuild/resourceonly
    #          |--BUILD
    #          |--README.md
    #    |--java
    #       |--org/pantsbuild/helloworld
    #          |--BUILD
    #          |--helloworld.java
    #    |--python
    #       |--python_targets
    #          |--BUILD
    #          |--test_binary.py
    #          |--test_library.py
    #          |--test_unclaimed_src.py
    #       |--sources
    #          |--BUILD
    #          |--sources.py
    #          |--sources.txt
    # |--tests
    #    |--scala
    #       |--org/pantsbuild/cp-directories
    #          |--BUILD
    #          |--ClasspathDirectoriesSpec.scala
    with temporary_dir(root_dir=get_buildroot()) as worktree:

        def create_file(path, content):
            """Creates a file in the isolated git repo."""
            write_path = os.path.join(worktree, path)
            with safe_open(write_path, 'w') as f:
                f.write(dedent(content))
            return write_path

        def copy_into(path, to_path=None):
            """Copies a file from the real git repo into the isolated git repo."""
            write_path = os.path.join(worktree, to_path or path)
            if os.path.isfile(path):
                safe_mkdir(os.path.dirname(write_path))
                shutil.copyfile(path, write_path)
            else:
                shutil.copytree(path, write_path)
            return write_path

        create_file('README', 'N.B. This is just a test tree.')
        create_file(
            'pants.ini', """
      [GLOBAL]
      pythonpath: [
          "{0}/contrib/go/src/python",
          "{0}/pants-plugins/src/python"
        ]
      backend_packages: +[
          "internal_backend.utilities",
          "pants.contrib.go"
        ]
      """.format(get_buildroot()))
        copy_into('.gitignore')

        with initialize_repo(worktree=worktree,
                             gitdir=os.path.join(worktree, '.git')) as git:

            def add_to_git(commit_msg, *files):
                git.add(*files)
                git.commit(commit_msg)

            add_to_git(
                'a go target with default sources',
                create_file('src/go/tester/BUILD', 'go_binary()'),
                create_file(
                    'src/go/tester/main.go', """
          package main
          import "fmt"
          func main() {
            fmt.Println("hello, world")
          }
          """))

            add_to_git(
                'resource file',
                create_file(
                    'src/resources/org/pantsbuild/resourceonly/BUILD', """
          resources(
            name='resource',
            sources=['README.md']
          )
          """),
                create_file(
                    'src/resources/org/pantsbuild/resourceonly/README.md',
                    'Just a resource.'))

            add_to_git(
                'hello world java program with a dependency on a resource file',
                create_file(
                    'src/java/org/pantsbuild/helloworld/BUILD', """
          jvm_binary(
            dependencies=[
              'src/resources/org/pantsbuild/resourceonly:resource',
            ],
            source='helloworld.java',
            main='org.pantsbuild.helloworld.HelloWorld',
          )
          """),
                create_file(
                    'src/java/org/pantsbuild/helloworld/helloworld.java', """
          package org.pantsbuild.helloworld;

          class HelloWorld {
            public static void main(String[] args) {
              System.out.println("Hello, World!\n");
            }
          }
          """))

            add_to_git(
                'scala test target',
                copy_into(
                    'testprojects/tests/scala/org/pantsbuild/testproject/cp-directories',
                    'tests/scala/org/pantsbuild/cp-directories'))

            add_to_git(
                'python targets',
                copy_into('testprojects/src/python/python_targets',
                          'src/python/python_targets'))

            add_to_git(
                'a python_library with resources=["filename"]',
                copy_into('testprojects/src/python/sources',
                          'src/python/sources'))

            add_to_git('3rdparty/BUILD', copy_into('3rdparty/BUILD'))

            with environment_as(PANTS_BUILDROOT_OVERRIDE=worktree):
                yield worktree
示例#36
0
  def _run_tests(self, tests_to_targets):
    if self._coverage:
      extra_jvm_options = self._coverage.extra_jvm_options
      classpath_prepend = self._coverage.classpath_prepend
      classpath_append = self._coverage.classpath_append
    else:
      extra_jvm_options = []
      classpath_prepend = ()
      classpath_append = ()

    tests_by_properties = self._tests_by_properties(
      tests_to_targets,
      self._infer_workdir,
      lambda target: target.test_platform,
      lambda target: target.payload.extra_jvm_options,
      lambda target: target.payload.extra_env_vars,
    )

    # the below will be None if not set, and we'll default back to runtime_classpath
    classpath_product = self.context.products.get_data('instrument_classpath')

    result = 0
    for (workdir, platform, target_jvm_options, target_env_vars), tests in tests_by_properties.items():
      for batch in self._partition(tests):
        # Batches of test classes will likely exist within the same targets: dedupe them.
        relevant_targets = set(map(tests_to_targets.get, batch))
        complete_classpath = OrderedSet()
        complete_classpath.update(classpath_prepend)
        complete_classpath.update(self.tool_classpath('junit'))
        complete_classpath.update(self.classpath(relevant_targets,
                                                 classpath_product=classpath_product))
        complete_classpath.update(classpath_append)
        distribution = JvmPlatform.preferred_jvm_distribution([platform], self._strict_jvm_version)
        with binary_util.safe_args(batch, self.get_options()) as batch_tests:
          self.context.log.debug('CWD = {}'.format(workdir))
          self.context.log.debug('platform = {}'.format(platform))
          with environment_as(**dict(target_env_vars)):
            result += abs(self._spawn_and_wait(
              executor=SubprocessExecutor(distribution),
              distribution=distribution,
              classpath=complete_classpath,
              main=JUnitRun._MAIN,
              jvm_options=self.jvm_options + extra_jvm_options + list(target_jvm_options),
              args=self._args + batch_tests + [u'-xmlreport'],
              workunit_factory=self.context.new_workunit,
              workunit_name='run',
              workunit_labels=[WorkUnitLabel.TEST],
              cwd=workdir,
              synthetic_jar_dir=self.workdir,
              create_synthetic_jar=self.synthetic_classpath,
            ))

          if result != 0 and self._fail_fast:
            break

    if result != 0:
      failed_targets_and_tests = self._get_failed_targets(tests_to_targets)
      failed_targets = sorted(failed_targets_and_tests, key=lambda target: target.address.spec)
      error_message_lines = []
      if self._failure_summary:
        for target in failed_targets:
          error_message_lines.append('\n{0}{1}'.format(' '*4, target.address.spec))
          for test in sorted(failed_targets_and_tests[target]):
            error_message_lines.append('{0}{1}'.format(' '*8, test))
      error_message_lines.append(
        '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
          .format(main=JUnitRun._MAIN, code=result, failed=len(failed_targets),
                  targets=pluralize(len(failed_targets), 'target'))
      )
      raise TestFailedTaskError('\n'.join(error_message_lines), failed_targets=list(failed_targets))
示例#37
0
 def test_user_expansion(self):
   with environment_as(HOME='/tmp/jake'):
     self.assertEquals('/tmp/jake/bob', expand_path('~/bob'))
示例#38
0
 def test_access_to_env(self):
     with environment_as(SOME_ENV_VAR='twelve'):
         self.do_test_repl(
             code=['import os', 'print(os.environ.get("SOME_ENV_VAR"))'],
             expected=['twelve'],
             targets=[self.library])
示例#39
0
 def test_empty_environment(self):
     with environment_as():
         pass
示例#40
0
    def test_changes_in(self):
        """Test finding changes in a diffspecs

    To some extent this is just testing functionality of git not pants, since all pants says
    is that it will pass the diffspec to git diff-tree, but this should serve to at least document
    the functionality we belive works.
    """
        with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):

            def commit_contents_to_files(content, *files):
                for path in files:
                    with safe_open(os.path.join(self.worktree, path),
                                   'w') as fp:
                        fp.write(content)
                subprocess.check_call(['git', 'add', '.'])
                subprocess.check_call(
                    ['git', 'commit', '-m', 'change {}'.format(files)])
                return subprocess.check_output(['git', 'rev-parse',
                                                'HEAD']).strip()

            # We can get changes in HEAD or by SHA
            c1 = commit_contents_to_files('1', 'foo')
            self.assertEqual({'foo'}, self.git.changes_in('HEAD'))
            self.assertEqual({'foo'}, self.git.changes_in(c1))

            # Changes in new HEAD, from old-to-new HEAD, in old HEAD, or from old-old-head to new.
            commit_contents_to_files('2', 'bar')
            self.assertEqual({'bar'}, self.git.changes_in('HEAD'))
            self.assertEqual({'bar'}, self.git.changes_in('HEAD^..HEAD'))
            self.assertEqual({'foo'}, self.git.changes_in('HEAD^'))
            self.assertEqual({'foo'}, self.git.changes_in('HEAD~1'))
            self.assertEqual({'foo', 'bar'},
                             self.git.changes_in('HEAD^^..HEAD'))

            # New commit doesn't change results-by-sha
            self.assertEqual({'foo'}, self.git.changes_in(c1))

            # Files changed in multiple diffs within a range
            c3 = commit_contents_to_files('3', 'foo')
            self.assertEqual({'foo', 'bar'},
                             self.git.changes_in('{}..{}'.format(c1, c3)))

            # Changes in a tag
            subprocess.check_call(['git', 'tag', 'v1'])
            self.assertEqual({'foo'}, self.git.changes_in('v1'))

            # Introduce a new filename
            c4 = commit_contents_to_files('4', 'baz')
            self.assertEqual({'baz'}, self.git.changes_in('HEAD'))

            # Tag-to-sha
            self.assertEqual({'baz'},
                             self.git.changes_in('{}..{}'.format('v1', c4)))

            # We can get multiple changes from one ref
            commit_contents_to_files('5', 'foo', 'bar')
            self.assertEqual({'foo', 'bar'}, self.git.changes_in('HEAD'))
            self.assertEqual({'foo', 'bar', 'baz'},
                             self.git.changes_in('HEAD~4..HEAD'))
            self.assertEqual({'foo', 'bar', 'baz'},
                             self.git.changes_in('{}..HEAD'.format(c1)))
            self.assertEqual({'foo', 'bar', 'baz'},
                             self.git.changes_in('{}..{}'.format(c1, c4)))
示例#41
0
 def execute_task(self, target_roots=None, resolve_local=False):
     with self.resolve_configuration(resolve_local=resolve_local):
         with environment_as(PANTS_DEV=None, PEX_VERBOSE="9"):
             context = self.context(target_roots=target_roots)
             return self.create_task(context).execute()
示例#42
0
 def test_buildroot_override(self):
     with temporary_dir() as root:
         with environment_as(PANTS_BUILDROOT_OVERRIDE=root):
             self.assertEqual(BuildRoot().path, root)
示例#43
0
 def empty_path(self):
     with temporary_dir() as path:
         with environment_as(PATH=path):
             yield path
示例#44
0
 def test_env_var_expansion(self):
   with self.root() as root:
     with environment_as(A='B', C='D'):
       self.assertEquals(os.path.join(root, 'B/D/E'), expand_path('$A/${C}/E'))
示例#45
0
 def test_go_command_no_gopath_overrides_user_gopath_issue2321(self):
     # Without proper GOPATH scrubbing, this bogus entry leads to a `go env` failure as explained
     # here: https://github.com/pantsbuild/pants/issues/2321
     # Before that fix, the `go env` command would raise.
     with environment_as(GOPATH=':/bogus/first/entry'):
         self.assert_no_gopath()
示例#46
0
    def setUp(self):
        self.origin = safe_mkdtemp()
        with pushd(self.origin):
            subprocess.check_call(['git', 'init', '--bare'])

        self.gitdir = safe_mkdtemp()
        self.worktree = safe_mkdtemp()

        self.readme_file = os.path.join(self.worktree, 'README')

        with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
            self.init_repo('depot', self.origin)

            touch(self.readme_file)
            subprocess.check_call(['git', 'add', 'README'])
            safe_mkdir(os.path.join(self.worktree, 'dir'))
            with open(os.path.join(self.worktree, 'dir', 'f'), 'w') as f:
                f.write("file in subdir")

            # Make some symlinks
            os.symlink('f',
                       os.path.join(self.worktree, 'dir', 'relative-symlink'))
            os.symlink(
                'no-such-file',
                os.path.join(self.worktree, 'dir', 'relative-nonexistent'))
            os.symlink(
                'dir/f',
                os.path.join(self.worktree, 'dir', 'not-absolute\u2764'))
            os.symlink('../README',
                       os.path.join(self.worktree, 'dir', 'relative-dotdot'))
            os.symlink('dir', os.path.join(self.worktree, 'link-to-dir'))
            os.symlink('README/f', os.path.join(self.worktree, 'not-a-dir'))
            os.symlink('loop1', os.path.join(self.worktree, 'loop2'))
            os.symlink('loop2', os.path.join(self.worktree, 'loop1'))

            subprocess.check_call([
                'git', 'add', 'README', 'dir', 'loop1', 'loop2', 'link-to-dir',
                'not-a-dir'
            ])
            subprocess.check_call([
                'git', 'commit', '-am', 'initial commit with decode -> \x81b'
            ])
            self.initial_rev = subprocess.check_output(
                ['git', 'rev-parse', 'HEAD']).strip()
            subprocess.check_call(['git', 'tag', 'first'])
            subprocess.check_call(['git', 'push', '--tags', 'depot', 'master'])
            subprocess.check_call(
                ['git', 'branch', '--set-upstream-to', 'depot/master'])

            with safe_open(self.readme_file, 'wb') as readme:
                readme.write('Hello World.\u2764'.encode('utf-8'))
            subprocess.check_call(['git', 'commit', '-am', 'Update README.'])

            self.current_rev = subprocess.check_output(
                ['git', 'rev-parse', 'HEAD']).strip()

        self.clone2 = safe_mkdtemp()
        with pushd(self.clone2):
            self.init_repo('origin', self.origin)
            subprocess.check_call(
                ['git', 'pull', '--tags', 'origin', 'master:master'])

            with safe_open(os.path.realpath('README'), 'a') as readme:
                readme.write('--')
            subprocess.check_call(['git', 'commit', '-am', 'Update README 2.'])
            subprocess.check_call(
                ['git', 'push', '--tags', 'origin', 'master'])

        self.git = Git(gitdir=self.gitdir, worktree=self.worktree)
示例#47
0
    def _run_tests(self, test_registry, output_dir, coverage=None):
        if coverage:
            extra_jvm_options = coverage.extra_jvm_options
            classpath_prepend = coverage.classpath_prepend
            classpath_append = coverage.classpath_append
        else:
            extra_jvm_options = []
            classpath_prepend = ()
            classpath_append = ()

        tests_by_properties = test_registry.index(
            lambda tgt: tgt.cwd if tgt.cwd is not None else self._working_dir,
            lambda tgt: tgt.test_platform,
            lambda tgt: tgt.payload.extra_jvm_options,
            lambda tgt: tgt.payload.extra_env_vars,
            lambda tgt: tgt.concurrency, lambda tgt: tgt.threads)

        # the below will be None if not set, and we'll default back to runtime_classpath
        classpath_product = self.context.products.get_data(
            'instrument_classpath')

        result = 0
        for properties, tests in tests_by_properties.items():
            (workdir, platform, target_jvm_options, target_env_vars,
             concurrency, threads) = properties
            for batch in self._partition(tests):
                # Batches of test classes will likely exist within the same targets: dedupe them.
                relevant_targets = {
                    test_registry.get_owning_target(t)
                    for t in batch
                }
                complete_classpath = OrderedSet()
                complete_classpath.update(classpath_prepend)
                complete_classpath.update(
                    JUnit.global_instance().runner_classpath(self.context))
                complete_classpath.update(
                    self.classpath(relevant_targets,
                                   classpath_product=classpath_product))
                complete_classpath.update(classpath_append)
                distribution = JvmPlatform.preferred_jvm_distribution(
                    [platform], self._strict_jvm_version)

                # Override cmdline args with values from junit_test() target that specify concurrency:
                args = self._args(output_dir) + [u'-xmlreport']

                if concurrency is not None:
                    args = remove_arg(args, '-default-parallel')
                    if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='SERIAL')
                    elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='PARALLEL_CLASSES')
                    elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='PARALLEL_METHODS')
                    elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                        args = ensure_arg(args,
                                          '-default-concurrency',
                                          param='PARALLEL_CLASSES_AND_METHODS')

                if threads is not None:
                    args = remove_arg(args,
                                      '-parallel-threads',
                                      has_param=True)
                    args += ['-parallel-threads', str(threads)]

                batch_test_specs = [test.render_test_spec() for test in batch]
                with argfile.safe_args(batch_test_specs,
                                       self.get_options()) as batch_tests:
                    self.context.log.debug('CWD = {}'.format(workdir))
                    self.context.log.debug('platform = {}'.format(platform))
                    with environment_as(**dict(target_env_vars)):
                        result += abs(
                            self._spawn_and_wait(
                                executor=SubprocessExecutor(distribution),
                                distribution=distribution,
                                classpath=complete_classpath,
                                main=JUnit.RUNNER_MAIN,
                                jvm_options=self.jvm_options +
                                extra_jvm_options + list(target_jvm_options),
                                args=args + batch_tests,
                                workunit_factory=self.context.new_workunit,
                                workunit_name='run',
                                workunit_labels=[WorkUnitLabel.TEST],
                                cwd=workdir,
                                synthetic_jar_dir=output_dir,
                                create_synthetic_jar=self.synthetic_classpath,
                            ))

                    if result != 0 and self._fail_fast:
                        break

        if result != 0:

            def error_handler(parse_error):
                # Just log and move on since the result is only used to characterize failures, and raising
                # an error here would just distract from the underlying test failures.
                self.context.log.error(
                    'Error parsing test result file {path}: {cause}'.format(
                        path=parse_error.junit_xml_path,
                        cause=parse_error.cause))

            target_to_failed_test = parse_failed_targets(
                test_registry, output_dir, error_handler)
            failed_targets = sorted(target_to_failed_test,
                                    key=lambda t: t.address.spec)
            error_message_lines = []
            if self._failure_summary:
                for target in failed_targets:
                    error_message_lines.append('\n{indent}{address}'.format(
                        indent=' ' * 4, address=target.address.spec))
                    for test in sorted(target_to_failed_test[target]):
                        error_message_lines.append(
                            '{indent}{classname}#{methodname}'.format(
                                indent=' ' * 8,
                                classname=test.classname,
                                methodname=test.methodname))
            error_message_lines.append(
                '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
                .format(main=JUnit.RUNNER_MAIN,
                        code=result,
                        failed=len(failed_targets),
                        targets=pluralize(len(failed_targets), 'target')))
            raise TestFailedTaskError('\n'.join(error_message_lines),
                                      failed_targets=list(failed_targets))
示例#48
0
    def test_integration(self):
        self.assertEqual(set(), self.git.changed_files())
        self.assertEqual({'README'},
                         self.git.changed_files(from_commit='HEAD^'))

        tip_sha = self.git.commit_id
        self.assertTrue(tip_sha)

        self.assertTrue(tip_sha in self.git.changelog())

        merge_base = self.git.merge_base()
        self.assertTrue(merge_base)

        self.assertTrue(merge_base in self.git.changelog())

        with self.assertRaises(Scm.LocalException):
            self.git.server_url

        with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):
            with self.mkremote('origin') as origin_uri:
                # We shouldn't be fooled by remotes with origin in their name.
                with self.mkremote('temp_origin'):
                    origin_url = self.git.server_url
                    self.assertEqual(origin_url, origin_uri)

        self.assertTrue(self.git.tag_name.startswith('first-'),
                        msg='un-annotated tags should be found')
        self.assertEqual('master', self.git.branch_name)

        def edit_readme():
            with open(self.readme_file, 'a') as fp:
                fp.write('More data.')

        edit_readme()
        with open(os.path.join(self.worktree, 'INSTALL'), 'w') as untracked:
            untracked.write('make install')
        self.assertEqual({'README'}, self.git.changed_files())
        self.assertEqual({'README', 'INSTALL'},
                         self.git.changed_files(include_untracked=True))

        # Confirm that files outside of a given relative_to path are ignored
        self.assertEqual(set(),
                         self.git.changed_files(relative_to='non-existent'))

        self.git.commit('API Changes.')
        try:
            # These changes should be rejected because our branch point from origin is 1 commit behind
            # the changes pushed there in clone 2.
            self.git.push()
        except Scm.RemoteException:
            with environment_as(GIT_DIR=self.gitdir,
                                GIT_WORK_TREE=self.worktree):
                subprocess.check_call(
                    ['git', 'reset', '--hard', 'depot/master'])
            self.git.refresh()
            edit_readme()

        self.git.commit('''API '"' " Changes.''')
        self.git.push()
        # HEAD is merged into master
        self.assertEqual(self.git.commit_date(self.git.merge_base()),
                         self.git.commit_date('HEAD'))
        self.assertEqual(self.git.commit_date('HEAD'),
                         self.git.commit_date('HEAD'))
        self.git.tag('second', message='''Tagged ' " Changes''')

        with temporary_dir() as clone:
            with pushd(clone):
                self.init_repo('origin', self.origin)
                subprocess.check_call(
                    ['git', 'pull', '--tags', 'origin', 'master:master'])

                with open(os.path.realpath('README'), 'r') as readme:
                    self.assertEqual('--More data.', readme.read())

                git = Git()

                # Check that we can pick up committed and uncommitted changes.
                with safe_open(os.path.realpath('CHANGES'), 'w') as changes:
                    changes.write('none')
                subprocess.check_call(['git', 'add', 'CHANGES'])
                self.assertEqual({'README', 'CHANGES'},
                                 git.changed_files(from_commit='first'))

                self.assertEqual('master', git.branch_name)
                self.assertEqual('second',
                                 git.tag_name,
                                 msg='annotated tags should be found')
示例#49
0
 def test_get_cachedir(self) -> None:
     with environment_as(XDG_CACHE_HOME=""):
         self.assertEqual(os.path.expanduser("~/.cache/pants"),
                          get_pants_cachedir())
示例#50
0
def create_isolated_git_repo():
    # Isolated Git Repo Structure:
    # worktree
    # |--README
    # |--pants.ini
    # |--3rdparty
    #    |--BUILD
    # |--src
    #    |--resources
    #       |--org/pantsbuild/resourceonly
    #          |--BUILD
    #          |--README.md
    #    |--java
    #       |--org/pantsbuild/helloworld
    #          |--BUILD
    #          |--helloworld.java
    #    |--python
    #       |--python_targets
    #          |--BUILD
    #          |--test_binary.py
    #          |--test_library.py
    #          |--test_unclaimed_src.py
    #       |--sources
    #          |--BUILD
    #          |--sources.py
    #          |--sources.txt
    # |--tests
    #    |--scala
    #       |--org/pantsbuild/cp-directories
    #          |--BUILD
    #          |--ClasspathDirectories.scala
    with temporary_dir(root_dir=get_buildroot()) as worktree:
        with safe_open(os.path.join(worktree, 'README'), 'w') as fp:
            fp.write('Just a test tree.')

        # Create an empty pants config file.
        touch(os.path.join(worktree, 'pants.ini'))

        # Copy .gitignore to new repo.
        shutil.copyfile('.gitignore', os.path.join(worktree, '.gitignore'))

        with initialize_repo(worktree=worktree,
                             gitdir=os.path.join(worktree, '.git')) as git:
            # Resource File
            resource_file = os.path.join(
                worktree,
                'src/resources/org/pantsbuild/resourceonly/README.md')
            with safe_open(resource_file, 'w') as fp:
                fp.write('Just resource.')

            resource_build_file = os.path.join(
                worktree, 'src/resources/org/pantsbuild/resourceonly/BUILD')
            with safe_open(resource_build_file, 'w') as fp:
                fp.write(
                    dedent("""
        resources(
          name='resource',
          sources=['README.md'],
        )
        """))

            git.add(resource_file, resource_build_file)
            git.commit('Check in a resource target.')

            # Java Program
            src_file = os.path.join(
                worktree, 'src/java/org/pantsbuild/helloworld/helloworld.java')
            with safe_open(src_file, 'w') as fp:
                fp.write(
                    dedent("""
        package org.pantsbuild.helloworld;

        class HelloWorld {
          public static void main(String[] args) {
            System.out.println("Hello, World!\n");
          }
        }
        """))

            src_build_file = os.path.join(
                worktree, 'src/java/org/pantsbuild/helloworld/BUILD')
            with safe_open(src_build_file, 'w') as fp:
                fp.write(
                    dedent("""
        jvm_binary(
          dependencies=[
            '{}',
          ],
          source='helloworld.java',
          main='org.pantsbuild.helloworld.HelloWorld',
        )
        """.format('src/resources/org/pantsbuild/resourceonly:resource')))

            git.add(src_file, src_build_file)
            git.commit(
                'hello world java program with a dependency on a resource file.'
            )

            # Scala Program
            scala_src_dir = os.path.join(
                worktree, 'tests/scala/org/pantsbuild/cp-directories')
            safe_mkdir(os.path.dirname(scala_src_dir))
            shutil.copytree(
                'testprojects/tests/scala/org/pantsbuild/testproject/cp-directories',
                scala_src_dir)
            git.add(scala_src_dir)
            git.commit('Check in a scala test target.')

            # Python library and binary
            python_src_dir = os.path.join(worktree,
                                          'src/python/python_targets')
            safe_mkdir(os.path.dirname(python_src_dir))
            shutil.copytree('testprojects/src/python/python_targets',
                            python_src_dir)
            git.add(python_src_dir)
            git.commit('Check in python targets.')

            # A `python_library` with `resources=['file.name']`.
            python_src_dir = os.path.join(worktree, 'src/python/sources')
            safe_mkdir(os.path.dirname(python_src_dir))
            shutil.copytree('testprojects/src/python/sources', python_src_dir)
            git.add(python_src_dir)
            git.commit('Check in a python library with resource dependency.')

            # Copy 3rdparty/BUILD.
            _3rdparty_build = os.path.join(worktree, '3rdparty/BUILD')
            safe_mkdir(os.path.dirname(_3rdparty_build))
            shutil.copyfile('3rdparty/BUILD', _3rdparty_build)
            git.add(_3rdparty_build)
            git.commit('Check in 3rdparty/BUILD.')

            with environment_as(PANTS_BUILDROOT_OVERRIDE=worktree):
                yield worktree
示例#51
0
 def wrapper(self, *args, **kwargs):
     for env_var_value in ('ivy', 'coursier'):
         with environment_as(HERMETIC_ENV='PANTS_RESOLVER_RESOLVER',
                             PANTS_RESOLVER_RESOLVER=env_var_value):
             f(self, *args, **kwargs)
示例#52
0
def env(**kwargs):
    environment = dict(JDK_HOME=None, JAVA_HOME=None, PATH=None)
    environment.update(**kwargs)
    with environment_as(**environment):
        yield
示例#53
0
 def test_stack_command_stack_root_overrides_user_stack_root(self):
     with environment_as(STACK_ROOT='/dev/null'):
         self.assert_stack_root()
def create_isolated_git_repo():
    # Isolated Git Repo Structure:
    # worktree
    # |--README
    # |--pants.toml
    # |--src
    #    |--resources
    #       |--org/pantsbuild/resourceonly
    #          |--BUILD
    #          |--README.md
    #    |--python
    #       |--python_targets
    #          |--BUILD
    #          |--test_binary.py
    #          |--test_library.py
    #          |--test_unclaimed_src.py
    #       |--sources
    #          |--BUILD
    #          |--sources.py
    #          |--sources.txt
    with temporary_dir(root_dir=get_buildroot()) as worktree:

        def create_file(path, content):
            """Creates a file in the isolated git repo."""
            return create_file_in(worktree, path, content)

        def copy_into(path, to_path=None):
            """Copies a file from the real git repo into the isolated git repo."""
            write_path = os.path.join(worktree, to_path or path)
            if os.path.isfile(path):
                safe_mkdir(os.path.dirname(write_path))
                shutil.copyfile(path, write_path)
            else:
                shutil.copytree(path, write_path)
            return write_path

        create_file("README", "N.B. This is just a test tree.")
        create_file(
            "pants.toml",
            """
            [GLOBAL]
            backend_packages.add = ["pants.backend.python"]
            """,
        )
        copy_into(".gitignore")

        with initialize_repo(worktree=worktree,
                             gitdir=os.path.join(worktree, ".git")) as git:

            def add_to_git(commit_msg, *files):
                git.add(*files)
                git.commit(commit_msg)

            add_to_git(
                "resource file",
                create_file(
                    "src/resources/org/pantsbuild/resourceonly/BUILD",
                    """
                    resources(
                      name='resource',
                      sources=['README.md']
                    )
                    """,
                ),
                create_file(
                    "src/resources/org/pantsbuild/resourceonly/README.md",
                    "Just a resource."),
            )

            add_to_git(
                "python targets",
                copy_into("testprojects/src/python/python_targets",
                          "src/python/python_targets"),
            )

            add_to_git(
                'a python_library with resources=["filename"]',
                copy_into("testprojects/src/python/sources",
                          "src/python/sources"),
            )

            with environment_as(PANTS_BUILDROOT_OVERRIDE=worktree):
                yield worktree
示例#55
0
    def _cov_setup(self, targets, chroot, coverage_modules=None):
        def compute_coverage_modules(target):
            if target.coverage:
                return target.coverage
            else:
                # This makes the assumption that tests/python/<target> will be testing src/python/<target>.
                # Note in particular that this doesn't work for pants' own tests, as those are under
                # the top level package 'pants_tests', rather than just 'pants'.
                # TODO(John Sirois): consider failing fast if there is no explicit coverage scheme; but also
                # consider supporting configuration of a global scheme whether that be parallel
                # dirs/packages or some arbitrary function that can be registered that takes a test target
                # and hands back the source packages or paths under test.
                return set(
                    os.path.dirname(source).replace(os.sep, '.')
                    for source in target.sources_relative_to_source_root())

        if coverage_modules is None:
            coverage_modules = set(
                itertools.chain(
                    *[compute_coverage_modules(t) for t in targets]))

        # Hack in turning off pytest_cov reporting to the console - we want control this ourselves.
        # Take the approach of registering a plugin that replaces the pycov plugin's
        # `pytest_terminal_summary` callback with a noop.
        with temporary_dir() as plugin_root:
            plugin_root = os.path.realpath(plugin_root)
            with safe_open(os.path.join(plugin_root, 'pants_reporter.py'),
                           'w') as fp:
                fp.write(
                    dedent("""
          def pytest_configure(__multicall__, config):
            # This executes the rest of the pytest_configures ensuring the `pytest_cov` plugin is
            # registered so we can grab it below.
            __multicall__.execute()
            pycov = config.pluginmanager.getplugin('_cov')
            # Squelch console reporting
            pycov.pytest_terminal_summary = lambda *args, **kwargs: None
        """))

            pythonpath = os.environ.get('PYTHONPATH')
            existing_pythonpath = pythonpath.split(
                os.pathsep) if pythonpath else []
            with environment_as(
                    PYTHONPATH=os.pathsep.join(existing_pythonpath +
                                               [plugin_root])):

                def is_python_lib(tgt):
                    return tgt.has_sources('.py') and not isinstance(
                        tgt, PythonTests)

                source_mappings = {}
                for target in targets:
                    libs = (tgt for tgt in target.closure()
                            if is_python_lib(tgt))
                    for lib in libs:
                        source_mappings[lib.target_base] = [chroot]

                cp = self._generate_coverage_config(
                    source_mappings=source_mappings)
                with temporary_file() as fp:
                    cp.write(fp)
                    fp.close()
                    coverage_rc = fp.name
                    args = [
                        '-p', 'pants_reporter', '-p', 'pytest_cov',
                        '--cov-config', coverage_rc
                    ]
                    for module in coverage_modules:
                        args.extend(['--cov', module])
                    yield args, coverage_rc
示例#56
0
    def test_setuptools_version(self):
        self.create_file('src/python/foo/__init__.py')
        self.create_python_library(
            relpath='src/python/foo/commands',
            name='commands',
            source_contents_map={
                'print_sys_path.py':
                dedent("""
          import os
          import sys
          from setuptools import Command


          class PrintSysPath(Command):
            user_options = []

            def initialize_options(self):
              pass

            def finalize_options(self):
              pass

            def run(self):
              with open(os.path.join(os.path.dirname(__file__), 'sys_path.txt'), 'w') as fp:
                fp.write(os.linesep.join(sys.path))
          """)
            },
        )
        foo = self.create_python_library(relpath='src/python/foo',
                                         name='foo',
                                         dependencies=[
                                             'src/python/foo/commands',
                                         ],
                                         provides=dedent("""
      setup_py(
        name='foo',
        version='0.0.0',
      )
      """))
        self.set_options(run='print_sys_path')

        # Make sure setup.py can see our custom distutils Command 'print_sys_path'.
        sdist_srcdir = os.path.join(self.distdir, 'foo-0.0.0', 'src')
        with environment_as(PYTHONPATH=sdist_srcdir):
            with self.run_execute(foo):
                with open(
                        os.path.join(sdist_srcdir, 'foo', 'commands',
                                     'sys_path.txt'), 'r') as fp:
                    load_package = lambda: Package.from_href(fp.readline().
                                                             strip())
                    # We don't care about the ordering of `wheel` and `setuptools` on the `sys.path`, just
                    # that they are 1st as a group.
                    extras = {
                        p.name: p
                        for p in (load_package(), load_package())
                    }

                    def assert_extra(name, expected_version):
                        package = extras.get(name)
                        self.assertIsNotNone(package)
                        self.assertEqual(expected_version, package.raw_version)

                    # The 1st two elements of the sys.path should be our custom SetupPyRunner Installer's
                    # setuptools and wheel mixins, which should match the setuptools and wheel versions
                    # specified by the PythonSetup subsystem.
                    init_subsystem(PythonSetup)
                    python_setup = PythonSetup.global_instance()
                    assert_extra('setuptools', python_setup.setuptools_version)
                    assert_extra('wheel', python_setup.wheel_version)
示例#57
0
    def run_tests(self, fail_fast, test_targets, output_dir, coverage):
        test_registry = self._collect_test_targets(test_targets)
        if test_registry.empty:
            return TestResult.successful

        coverage.instrument(output_dir)

        def parse_error_handler(parse_error):
            # Just log and move on since the result is only used to characterize failures, and raising
            # an error here would just distract from the underlying test failures.
            self.context.log.error(
                'Error parsing test result file {path}: {cause}'.format(
                    path=parse_error.xml_path, cause=parse_error.cause))

        # The 'instrument_classpath' product below below will be `None` if not set, and we'll default
        # back to runtime_classpath
        classpath_product = self.context.products.get_data(
            'instrument_classpath')

        result = 0
        for batch_id, (properties,
                       batch) in enumerate(self._iter_batches(test_registry)):
            (workdir, platform, target_jvm_options, target_env_vars,
             concurrency, threads) = properties

            batch_output_dir = output_dir
            if self._batched:
                batch_output_dir = os.path.join(batch_output_dir,
                                                'batch-{}'.format(batch_id))

            run_modifications = coverage.run_modifications(batch_output_dir)

            extra_jvm_options = run_modifications.extra_jvm_options

            # Batches of test classes will likely exist within the same targets: dedupe them.
            relevant_targets = {
                test_registry.get_owning_target(t)
                for t in batch
            }

            complete_classpath = OrderedSet()
            complete_classpath.update(run_modifications.classpath_prepend)
            complete_classpath.update(JUnit.global_instance().runner_classpath(
                self.context))
            complete_classpath.update(
                self.classpath(relevant_targets,
                               classpath_product=classpath_product))

            distribution = JvmPlatform.preferred_jvm_distribution(
                [platform], self._strict_jvm_version)

            # Override cmdline args with values from junit_test() target that specify concurrency:
            args = self._args(fail_fast, batch_output_dir) + [u'-xmlreport']

            if concurrency is not None:
                args = remove_arg(args, '-default-parallel')
                if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='SERIAL')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_CLASSES')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_METHODS')
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                    args = ensure_arg(args,
                                      '-default-concurrency',
                                      param='PARALLEL_CLASSES_AND_METHODS')

            if threads is not None:
                args = remove_arg(args, '-parallel-threads', has_param=True)
                args += ['-parallel-threads', str(threads)]

            batch_test_specs = [test.render_test_spec() for test in batch]
            with argfile.safe_args(batch_test_specs,
                                   self.get_options()) as batch_tests:
                with self.chroot(relevant_targets, workdir) as chroot:
                    self.context.log.debug('CWD = {}'.format(chroot))
                    self.context.log.debug('platform = {}'.format(platform))
                    with environment_as(**dict(target_env_vars)):
                        subprocess_result = self.spawn_and_wait(
                            executor=SubprocessExecutor(distribution),
                            distribution=distribution,
                            classpath=complete_classpath,
                            main=JUnit.RUNNER_MAIN,
                            jvm_options=self.jvm_options + extra_jvm_options +
                            list(target_jvm_options),
                            args=args + batch_tests,
                            workunit_factory=self.context.new_workunit,
                            workunit_name='run',
                            workunit_labels=[WorkUnitLabel.TEST],
                            cwd=chroot,
                            synthetic_jar_dir=batch_output_dir,
                            create_synthetic_jar=self.synthetic_classpath,
                        )
                        self.context.log.debug(
                            'JUnit subprocess exited with result ({})'.format(
                                subprocess_result))
                        result += abs(subprocess_result)

                tests_info = self.parse_test_info(batch_output_dir,
                                                  parse_error_handler,
                                                  ['classname'])
                for test_name, test_info in tests_info.items():
                    test_item = Test(test_info['classname'], test_name)
                    test_target = test_registry.get_owning_target(test_item)
                    self.report_all_info_for_single_test(
                        self.options_scope, test_target, test_name, test_info)

                if result != 0 and fail_fast:
                    break

        if result == 0:
            return TestResult.successful

        target_to_failed_test = parse_failed_targets(test_registry, output_dir,
                                                     parse_error_handler)

        def sort_owning_target(t):
            return t.address.spec if t else None

        failed_targets = sorted(target_to_failed_test, key=sort_owning_target)
        error_message_lines = []
        if self._failure_summary:

            def render_owning_target(t):
                return t.address.reference() if t else '<Unknown Target>'

            for target in failed_targets:
                error_message_lines.append('\n{indent}{owner}'.format(
                    indent=' ' * 4, owner=render_owning_target(target)))
                for test in sorted(target_to_failed_test[target]):
                    error_message_lines.append(
                        '{indent}{classname}#{methodname}'.format(
                            indent=' ' * 8,
                            classname=test.classname,
                            methodname=test.methodname))
        error_message_lines.append(
            '\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.'
            .format(main=JUnit.RUNNER_MAIN,
                    code=result,
                    failed=len(failed_targets),
                    targets=pluralize(len(failed_targets), 'target')))
        return TestResult(msg='\n'.join(error_message_lines),
                          rc=result,
                          failed_targets=failed_targets)
示例#58
0
 def wrapper(self, *args, **kwargs):
   for env_var_value in ('false', 'true'):
     with environment_as(HERMETIC_ENV='PANTS_ENABLE_V2_ENGINE', PANTS_ENABLE_V2_ENGINE=env_var_value):
       f(self, *args, **kwargs)
示例#59
0
 def wrapper(self, *args, **kwargs):
     for env_var_value in ("ivy", "coursier"):
         with environment_as(HERMETIC_ENV="PANTS_RESOLVER_RESOLVER",
                             PANTS_RESOLVER_RESOLVER=env_var_value):
             f(self, *args, **kwargs)
示例#60
0
    def test_changelog_utf8(self):
        with environment_as(GIT_DIR=self.gitdir, GIT_WORK_TREE=self.worktree):

            def commit_contents_to_files(message, encoding, content, *files):
                for path in files:
                    with safe_open(os.path.join(self.worktree, path),
                                   'w') as fp:
                        fp.write(content)
                subprocess.check_call(['git', 'add', '.'])

                subprocess.check_call([
                    'git', 'config', '--local', '--add', 'i18n.commitencoding',
                    encoding
                ])
                try:
                    subprocess.check_call(
                        ['git', 'commit', '-m',
                         message.encode(encoding)])
                finally:
                    subprocess.check_call([
                        'git', 'config', '--local', '--unset-all',
                        'i18n.commitencoding'
                    ])

                return subprocess.check_output(['git', 'rev-parse',
                                                'HEAD']).strip()

            # Mix in a non-UTF-8 author to all commits to exercise the corner described here does not
            # adversely impact the ability to render the changelog (even if rendering for certain
            # characters is incorrect): http://comments.gmane.org/gmane.comp.version-control.git/262685
            # NB: This method of override requires we include `user.name` and `user.email` even though we
            # only use `user.name` to exercise non-UTF-8.  Without `user.email`, it will be unset and
            # commits can then fail on machines without a proper hostname setup for git to fall back to
            # when concocting a last-ditch `user.email`.
            non_utf8_config = dedent("""
      [user]
        name = Noralf Trønnes
        email = [email protected]
      """).encode('iso-8859-1')

            with open(os.path.join(self.gitdir, 'config'), 'wb') as fp:
                fp.write(non_utf8_config)

            # Note the copyright symbol is used as the non-ascii character in the next 3 commits
            commit_contents_to_files('START1 © END', 'iso-8859-1', '1', 'foo')
            commit_contents_to_files('START2 © END', 'latin1', '1', 'bar')
            commit_contents_to_files('START3 © END', 'utf-8', '1', 'baz')

            commit_contents_to_files('START4 ~ END', 'us-ascii', '1', 'bip')

            # Prove our non-utf-8 encodings were stored in the commit metadata.
            log = subprocess.check_output(['git', 'log', '--format=%e'])
            self.assertEqual([b'us-ascii', b'latin1', b'iso-8859-1'],
                             [_f for _f in log.strip().splitlines() if _f])

            # And show that the git log successfully transcodes all the commits none-the-less to utf-8
            changelog = self.git.changelog()

            # The ascii commit should combine with the iso-8859-1 author an fail to transcode the
            # o-with-stroke character, and so it should be replaced with the utf-8 replacement character
            # \uFFF or �.
            self.assertIn('Noralf Tr�nnes', changelog)
            self.assertIn('Noralf Tr\uFFFDnnes', changelog)

            # For the other 3 commits, each of iso-8859-1, latin1 and utf-8 have an encoding for the
            # o-with-stroke character - \u00F8 or ø - so we should find it;
            self.assertIn('Noralf Trønnes', changelog)
            self.assertIn('Noralf Tr\u00F8nnes', changelog)

            self.assertIn('START1 © END', changelog)
            self.assertIn('START2 © END', changelog)
            self.assertIn('START3 © END', changelog)
            self.assertIn('START4 ~ END', changelog)