示例#1
0
 def _copy_files(dest_dir, target):
   if isinstance(target, Files):
     for source in target.sources_relative_to_buildroot():
       src = os.path.join(get_buildroot(), source)
       dest = os.path.join(dest_dir, source)
       safe_mkdir_for(dest)
       shutil.copy(src, dest)
示例#2
0
 def __init__(self, path=None):
   # Map path -> timing in seconds (a float)
   self._timings_by_path = defaultdict(float)
   self._tool_labels = set()
   self._path = path
   if path:
     safe_mkdir_for(self._path)
 def generate_jar(path, *class_name):
   jar_path = os.path.join(self.test_workdir, 'jars', path)
   safe_mkdir_for(jar_path)
   with open_zip(jar_path, 'w') as zipfile:
     for clazz in class_name:
       zipfile.write(clazz, os.path.relpath(clazz, self.classes_dir))
     return jar_path
示例#4
0
 def run_modifications(self, output_dir):
   datafile = os.path.join(output_dir, self._DATAFILE_NAME)
   safe_mkdir_for(datafile)
   shutil.copy(self.canonical_datafile, datafile)
   datafile_option = '-Dnet.sourceforge.cobertura.datafile={datafile}'.format(datafile=datafile)
   return self.RunModifications(classpath_prepend=self._settings.tool_classpath('cobertura-run'),
                                extra_jvm_options=[datafile_option])
示例#5
0
  def execute(self):
    binaries = self.context.targets(self.is_binary)

    # Check for duplicate binary names, since we write the pexes to <dist>/<name>.pex.
    names = {}
    for binary in binaries:
      name = binary.name
      if name in names:
        raise TaskError('Cannot build two binaries with the same name in a single invocation. '
                        '{} and {} both have the name {}.'.format(binary, names[name], name))
      names[name] = binary

    with self.invalidated(binaries, invalidate_dependents=True) as invalidation_check:
      python_deployable_archive = self.context.products.get('deployable_archives')
      python_pex_product = self.context.products.get('pex_archives')
      for vt in invalidation_check.all_vts:
        pex_path = os.path.join(vt.results_dir, '{}.pex'.format(vt.target.name))
        if not vt.valid:
          self.context.log.debug('cache for {} is invalid, rebuilding'.format(vt.target))
          self._create_binary(vt.target, vt.results_dir)
        else:
          self.context.log.debug('using cache for {}'.format(vt.target))

        basename = os.path.basename(pex_path)
        python_pex_product.add(binary, os.path.dirname(pex_path)).append(basename)
        python_deployable_archive.add(binary, os.path.dirname(pex_path)).append(basename)
        self.context.log.debug('created {}'.format(os.path.relpath(pex_path, get_buildroot())))

        # Create a copy for pex.
        pex_copy = os.path.join(self._distdir, os.path.basename(pex_path))
        safe_mkdir_for(pex_copy)
        atomic_copy(pex_path, pex_copy)
        self.context.log.info('created pex {}'.format(os.path.relpath(pex_copy, get_buildroot())))
示例#6
0
 def _store_tarball(self, cache_key, src):
   dest = self._cache_file_for_key(cache_key)
   safe_mkdir_for(dest)
   os.rename(src, dest)
   if self._permissions:
     os.chmod(dest, self._permissions)
   self.prune(os.path.dirname(dest))  # Remove old cache files.
   return dest
示例#7
0
 def report(self):
   self._logger.debug('Generating JUnit HTML report...')
   testsuites = self._parse_xml_files(self._xml_dir)
   safe_mkdir_for(self._report_file_path)
   with open(self._report_file_path, 'wb') as fp:
     fp.write(ensure_binary(self._generate_html(testsuites)))
   self._logger.debug('JUnit HTML report generated to {}'.format(self._report_file_path))
   return self._report_file_path
示例#8
0
文件: artifact.py 项目: amedina/pants
 def extract(self):
   for dir_name, _, filenames in safe_walk(self._directory):
     for filename in filenames:
       filename = os.path.join(dir_name, filename)
       relpath = os.path.relpath(filename, self._directory)
       dst = os.path.join(self._artifact_root, relpath)
       safe_mkdir_for(dst)
       shutil.copy(filename, dst)
       self._relpaths.add(relpath)
示例#9
0
 def _create_interpreter_path_file(self, interpreter_path_file, targets):
   interpreter_cache = self._interpreter_cache()
   interpreter = interpreter_cache.select_interpreter_for_targets(targets)
   safe_mkdir_for(interpreter_path_file)
   with open(interpreter_path_file, 'w') as outfile:
     outfile.write(b'{}\t{}\n'.format(interpreter.binary, str(interpreter.identity)))
     for dist, location in interpreter.extras.items():
       dist_name, dist_version = dist
       outfile.write(b'{}\t{}\t{}\n'.format(dist_name, dist_version, location))
示例#10
0
 def __init__(self, info_file):
   self._info_file = info_file
   safe_mkdir_for(self._info_file)
   self._info = {}
   if os.path.exists(self._info_file):
     with open(self._info_file, 'r') as infile:
       info = infile.read()
     for m in re.finditer("""^([^:]+):(.*)$""", info, re.MULTILINE):
       self._info[m.group(1).strip()] = m.group(2).strip()
示例#11
0
 def _symlink_lib(self, gopath, lib, source_iter, required_links):
   src_dir = os.path.join(gopath, 'src', lib.import_path)
   safe_mkdir(src_dir)
   for path, dest in source_iter:
     src_link = os.path.join(src_dir, dest)
     safe_mkdir_for(src_link)
     if not os.path.islink(src_link):
       os.symlink(path, src_link)
     required_links.add(src_link)
示例#12
0
 def output(self, name):
   """Returns the output buffer for the specified output name (e.g., 'stdout')."""
   m = WorkUnit._valid_name_re.match(name)
   if not m or m.group(0) != name:
     raise Exception('Invalid output name: %s' % name)
   if name not in self._outputs:
     path = os.path.join(self.run_tracker.info_dir, 'tool_outputs', '%s.%s' % (self.id, name))
     safe_mkdir_for(path)
     self._outputs[name] = FileBackedRWBuf(path)
   return self._outputs[name]
示例#13
0
 def report(self, output_dir):
   self._logger.debug('Generating JUnit HTML report...')
   testsuites = self._parse_xml_files()
   report_file_path = os.path.join(output_dir, 'reports', 'junit-report.html')
   safe_mkdir_for(report_file_path)
   with open(report_file_path, 'w') as fp:
     fp.write(self._generate_html(testsuites))
   self._logger.debug('JUnit HTML report generated to {}'.format(report_file_path))
   if self._open_report:
     return report_file_path
示例#14
0
  def add_file(self, root, path, content):
    """Add a file with specified contents

    :param str root: Root directory for path.
    :param str path: Path relative to root.
    :param str content: Content to write to file.
    """
    fullpath = os.path.join(root, path)
    safe_mkdir_for(fullpath)
    with open(fullpath, 'w') as outfile:
      outfile.write(content)
示例#15
0
 def _create_interpreter_path_file(self, interpreter_path_file, targets):
   interpreter_cache = PythonInterpreterCache(PythonSetup.global_instance(),
                                              PythonRepos.global_instance(),
                                              logger=self.context.log.debug)
   interpreter = interpreter_cache.select_interpreter_for_targets(targets)
   safe_mkdir_for(interpreter_path_file)
   with open(interpreter_path_file, 'w') as outfile:
     outfile.write(b'{}\n'.format(interpreter.binary))
     for dist, location in interpreter.extras.items():
       dist_name, dist_version = dist
       outfile.write(b'{}\t{}\t{}\n'.format(dist_name, dist_version, location))
 def _copy_sources(self, dist_tgt, dist_target_dir):
   # Copy sources and setup.py over to vt results directory for packaging.
   # NB: The directory structure of the destination directory needs to match 1:1
   # with the directory structure that setup.py expects.
   all_sources = list(dist_tgt.sources_relative_to_target_base())
   for src_relative_to_target_base in all_sources:
     src_rel_to_results_dir = os.path.join(dist_target_dir, src_relative_to_target_base)
     safe_mkdir_for(src_rel_to_results_dir)
     abs_src_path = os.path.join(get_buildroot(),
                                 dist_tgt.address.spec_path,
                                 src_relative_to_target_base)
     shutil.copyfile(abs_src_path, src_rel_to_results_dir)
示例#17
0
  def _bootstrap_shaded_jvm_tool(self, key, scope, tools, main, custom_rules=None):
    shaded_jar = os.path.join(self._tool_cache_path,
                              'shaded_jars', scope, key, '{}.jar'.format(main))

    targets = list(self._resolve_tool_targets(tools, key, scope))
    fingerprint_strategy = ShadedToolFingerprintStrategy(key, scope, main,
                                                         custom_rules=custom_rules)
    with self.invalidated(targets,
                          # We're the only dependent in reality since we shade.
                          invalidate_dependents=False,
                          fingerprint_strategy=fingerprint_strategy) as invalidation_check:

      if not invalidation_check.invalid_vts and os.path.exists(shaded_jar):
        return [shaded_jar]

      # Ensure we have a single binary jar we can shade.
      binary_jar = os.path.join(self._tool_cache_path,
                                'binary_jars', scope, key, '{}.jar'.format(main))
      safe_mkdir_for(binary_jar)

      classpath = self._bootstrap_classpath(key, targets)
      if len(classpath) == 1:
        shutil.copy(classpath[0], binary_jar)
      else:
        with self.open_jar(binary_jar) as jar:
          for classpath_jar in classpath:
            jar.writejar(classpath_jar)
          jar.main(main)

      # Now shade the binary jar and return that single jar as the safe tool classpath.
      safe_mkdir_for(shaded_jar)
      with self.shader.binary_shader(shaded_jar,
                                     main,
                                     binary_jar,
                                     custom_rules=custom_rules,
                                     jvm_options=self.get_options().jvm_options) as shader:
        try:
          result = util.execute_runner(shader,
                                       workunit_factory=self.context.new_workunit,
                                       workunit_name='shade-{}'.format(key))
          if result != 0:
            raise TaskError("Shading of tool '{key}' with main class {main} for {scope} failed "
                            "with exit code {result}, command run was:\n\t{cmd}"
                            .format(key=key, main=main, scope=scope, result=result, cmd=shader.cmd))
        except Executor.Error as e:
          raise TaskError("Shading of tool '{key}' with main class {main} for {scope} failed "
                          "with: {exception}".format(key=key, main=main, scope=scope, exception=e))

      if self.artifact_cache_writes_enabled():
        tool_vts = self.tool_vts(invalidation_check)
        self.update_artifact_cache([(tool_vts, [shaded_jar])])

      return [shaded_jar]
示例#18
0
 def get_cookie_jar(self):
   """Returns our cookie jar."""
   cookie_file = self._get_cookie_file()
   cookie_jar = LWPCookieJar(cookie_file)
   if os.path.exists(cookie_file):
     cookie_jar.load()
   else:
     safe_mkdir_for(cookie_file)
     # Save an empty cookie jar so we can change the file perms on it before writing data to it.
     with self._lock:
       cookie_jar.save()
     os.chmod(cookie_file, 0o600)
   return cookie_jar
示例#19
0
  def execute(self):
    interpreter = None
    python_tgts = self.context.targets(lambda tgt: isinstance(tgt, PythonTarget))
    fs = PythonInterpreterFingerprintStrategy(task=self)
    with self.invalidated(python_tgts, fingerprint_strategy=fs) as invalidation_check:
      # If there are no relevant targets, we still go through the motions of selecting
      # an interpreter, to prevent downstream tasks from having to check for this special case.
      if invalidation_check.all_vts:
        target_set_id = VersionedTargetSet.from_versioned_targets(
            invalidation_check.all_vts).cache_key.hash
      else:
        target_set_id = 'no_targets'
      interpreter_path_file = os.path.join(self.workdir, target_set_id, 'interpreter.path')
      if not os.path.exists(interpreter_path_file):
        interpreter_cache = PythonInterpreterCache(PythonSetup.global_instance(),
                                                   PythonRepos.global_instance(),
                                                   logger=self.context.log.debug)

        # We filter the interpreter cache itself (and not just the interpreters we pull from it)
        # because setting up some python versions (e.g., 3<=python<3.3) crashes, and this gives us
        # an escape hatch.
        filters = self.get_options().constraints or [b'']

        # Cache setup's requirement fetching can hang if run concurrently by another pants proc.
        self.context.acquire_lock()
        try:
          interpreter_cache.setup(filters=filters)
        finally:
          self.context.release_lock()

        interpreter = interpreter_cache.select_interpreter_for_targets(python_tgts)
        safe_mkdir_for(interpreter_path_file)
        with open(interpreter_path_file, 'w') as outfile:
          outfile.write(b'{}\t{}\n'.format(interpreter.binary, str(interpreter.identity)))
          for dist, location in interpreter.extras.items():
            dist_name, dist_version = dist
            outfile.write(b'{}\t{}\t{}\n'.format(dist_name, dist_version, location))

    if not interpreter:
      with open(interpreter_path_file, 'r') as infile:
        lines = infile.readlines()
        binary, identity = lines[0].strip().split('\t')
        extras = {}
        for line in lines[1:]:
          dist_name, dist_version, location = line.strip().split('\t')
          extras[(dist_name, dist_version)] = location

      interpreter = PythonInterpreter(binary, PythonIdentity.from_path(identity), extras)

    self.context.products.get_data(PythonInterpreter, lambda: interpreter)
示例#20
0
  def execute_codegen(self, target, target_workdir):
    for source in target.sources_relative_to_buildroot():
      abs_source = os.path.join(get_buildroot(), source)

      output_file = os.path.join(target_workdir, calculate_genfile(abs_source))
      safe_mkdir_for(output_file)

      args = [self.ragel_binary, '-J', '-o', output_file, abs_source]

      self.context.log.debug('Executing: {args}'.format(args=' '.join(args)))
      process = subprocess.Popen(args)
      result = process.wait()
      if result != 0:
        raise TaskError('{binary} ... exited non-zero ({result})'
                        .format(binary=self.ragel_binary, result=result))
示例#21
0
    def try_insert(self, cache_key, paths):
        tarfile = self._cache_file_for_key(cache_key)
        safe_mkdir_for(tarfile)
        # Write to a temporary name (on the same filesystem), and move it atomically, so if we
        # crash in the middle we don't leave an incomplete or missing artifact.
        tarfile_tmp = tarfile + '.' + str(uuid.uuid4()) + '.tmp'
        if os.path.exists(tarfile_tmp):
            os.unlink(tarfile_tmp)

        artifact = TarballArtifact(self.artifact_root, tarfile_tmp,
                                   self._compress)
        artifact.collect(paths)
        # Note: Race condition here if multiple pants runs (in different workspaces)
        # try to write the same thing at the same time. However since rename is atomic,
        # this should not result in corruption. It may however result in a missing artifact
        # If we crash between the unlink and the rename. But that's OK.
        if os.path.exists(tarfile):
            os.unlink(tarfile)
        os.rename(tarfile_tmp, tarfile)
示例#22
0
文件: workunit.py 项目: Xaelias/pants
    def output(self, name):
        """Returns the output buffer for the specified output name (e.g., 'stdout'), creating it if
    necessary.

    :API: public
    """
        m = WorkUnit._valid_name_re.match(name)
        if not m or m.group(0) != name:
            raise Exception('Invalid output name: {}'.format(name))
        if name not in self._outputs:
            workunit_name = re.sub(r'\W', '_', self.name)
            path = os.path.join(
                self.run_info_dir, 'tool_outputs',
                '{workunit_name}-{id}.{output_name}'.format(
                    workunit_name=workunit_name, id=self.id, output_name=name))
            safe_mkdir_for(path)
            self._outputs[name] = FileBackedRWBuf(path)
            self._output_paths[name] = path
        return self._outputs[name]
示例#23
0
    def _add_artifacts(self, dist_target_dir, shared_libs_product,
                       native_artifact_targets):
        all_shared_libs = []
        for tgt in native_artifact_targets:
            product_mapping = shared_libs_product.get(tgt)
            base_dir = assert_single_element(product_mapping.keys())
            shared_lib = assert_single_element(product_mapping[base_dir])
            all_shared_libs.append(shared_lib)

        for shared_lib in all_shared_libs:
            basename = os.path.basename(shared_lib.path)
            # NB: We convert everything to .so here so that the setup.py can just
            # declare .so to build for either platform.
            resolved_outname = re.sub(r'\..*\Z', '.so', basename)
            dest_path = os.path.join(dist_target_dir, resolved_outname)
            safe_mkdir_for(dest_path)
            shutil.copyfile(shared_lib.path, dest_path)

        return all_shared_libs
示例#24
0
    def execute(self):
        binaries = self.context.targets(self.is_binary)

        # Check for duplicate binary names, since we write the pexes to <dist>/<name>.pex.
        names = {}
        for binary in binaries:
            name = binary.name
            if name in names:
                raise TaskError(
                    f"Cannot build two binaries with the same name in a single invocation. "
                    "{binary} and {names[name]} both have the name {name}.")
            names[name] = binary

        with self.invalidated(
                binaries, invalidate_dependents=True) as invalidation_check:
            python_deployable_archive = self.context.products.get(
                "deployable_archives")
            python_pex_product = self.context.products.get("pex_archives")
            for vt in invalidation_check.all_vts:
                pex_path = os.path.join(vt.results_dir,
                                        f"{vt.target.name}.pex")
                if not vt.valid:
                    self.context.log.debug(
                        f"cache for {vt.target} is invalid, rebuilding")
                    self._create_binary(vt.target, vt.results_dir)
                else:
                    self.context.log.debug(f"using cache for {vt.target}")

                basename = os.path.basename(pex_path)
                python_pex_product.add(
                    vt.target, os.path.dirname(pex_path)).append(basename)
                python_deployable_archive.add(
                    vt.target, os.path.dirname(pex_path)).append(basename)
                self.context.log.debug("created {}".format(
                    os.path.relpath(pex_path, get_buildroot())))

                # Create a copy for pex.
                pex_copy = os.path.join(self._distdir,
                                        os.path.basename(pex_path))
                safe_mkdir_for(pex_copy)
                atomic_copy(pex_path, pex_copy)
                self.context.log.info("created pex {}".format(
                    os.path.relpath(pex_copy, get_buildroot())))
示例#25
0
    def execute_codegen(self, invalid_targets):
        for target in invalid_targets:
            output_dir = self.codegen_workdir(target)
            for source in target.sources_relative_to_buildroot():
                abs_source = os.path.join(get_buildroot(), source)

                output_file = os.path.join(output_dir,
                                           calculate_genfile(abs_source))
                safe_mkdir_for(output_file)

                args = [self.ragel_binary, '-J', '-o', output_file, abs_source]

                self.context.log.debug(
                    'Executing: {args}'.format(args=' '.join(args)))
                process = subprocess.Popen(args)
                result = process.wait()
                if result != 0:
                    raise TaskError(
                        '{binary} ... exited non-zero ({result})'.format(
                            binary=self.ragel_binary, result=result))
示例#26
0
  def execute(self):
    interpreter = None
    python_tgts = self.context.targets(lambda tgt: isinstance(tgt, PythonTarget))
    fs = PythonInterpreterFingerprintStrategy(task=self)
    with self.invalidated(python_tgts, fingerprint_strategy=fs) as invalidation_check:
      # If there are no relevant targets, we still go through the motions of selecting
      # an interpreter, to prevent downstream tasks from having to check for this special case.
      if invalidation_check.all_vts:
        target_set_id = VersionedTargetSet.from_versioned_targets(
            invalidation_check.all_vts).cache_key.hash
      else:
        target_set_id = 'no_targets'
      interpreter_path_file = os.path.join(self.workdir, target_set_id, 'interpreter.path')
      if not os.path.exists(interpreter_path_file):
        interpreter_cache = PythonInterpreterCache(PythonSetup.global_instance(),
                                                   PythonRepos.global_instance(),
                                                   logger=self.context.log.debug)

        # We filter the interpreter cache itself (and not just the interpreters we pull from it)
        # because setting up some python versions (e.g., 3<=python<3.3) crashes, and this gives us
        # an escape hatch.
        filters = self.get_options().interpreter or [b'']

        # Cache setup's requirement fetching can hang if run concurrently by another pants proc.
        self.context.acquire_lock()
        try:
          interpreter_cache.setup(filters=filters)
        finally:
          self.context.release_lock()

        interpreter = interpreter_cache.select_interpreter_for_targets(python_tgts)
        safe_mkdir_for(interpreter_path_file)
        with open(interpreter_path_file, 'w') as outfile:
          outfile.write('{}\t{}'.format(interpreter.binary, interpreter.identity))

    if not interpreter:
      with open(interpreter_path_file, 'r') as infile:
        binary, identity = infile.read().split('\t')
      interpreter = PythonInterpreter(binary, identity)

    self.context.products.get_data(PythonInterpreter, lambda: interpreter)
示例#27
0
    def execute(self):
        binaries = self.context.targets(self.is_binary)

        # Check for duplicate binary names, since we write the pexes to <dist>/<name>.pex.
        names = {}
        for binary in binaries:
            name = binary.name
            if name in names:
                raise TaskError(
                    'Cannot build two binaries with the same name in a single invocation. '
                    '{} and {} both have the name {}.'.format(
                        binary, names[name], name))
            names[name] = binary

        with self.invalidated(
                binaries, invalidate_dependents=True) as invalidation_check:
            python_deployable_archive = self.context.products.get(
                'deployable_archives')
            python_pex_product = self.context.products.get('pex_archives')
            for vt in invalidation_check.all_vts:
                pex_path = os.path.join(vt.results_dir,
                                        '{}.pex'.format(vt.target.name))
                if not vt.valid:
                    self.create_binary(vt.target, vt.results_dir)

                python_pex_product.add(binary,
                                       os.path.dirname(pex_path)).append(
                                           os.path.basename(pex_path))
                python_deployable_archive.add(
                    binary, os.path.dirname(pex_path)).append(
                        os.path.basename(pex_path))
                self.context.log.debug('created {}'.format(
                    os.path.relpath(pex_path, get_buildroot())))

                # Create a copy for pex.
                pex_copy = os.path.join(self._distdir,
                                        os.path.basename(pex_path))
                safe_mkdir_for(pex_copy)
                atomic_copy(pex_path, pex_copy)
                self.context.log.info('created pex copy {}'.format(
                    os.path.relpath(pex_copy, get_buildroot())))
示例#28
0
  def dist(self) -> Distribution:
    """Return the `Distribution` selected for Zinc based on execution strategy."""
    underlying_dist = self.underlying_dist
    if self._execution_strategy == NailgunTaskBase.HERMETIC:
      return underlying_dist
    # symlink .pants.d/.jdk -> /some/java/home/
    jdk_home_symlink = Path(
      self._zinc_factory.get_options().pants_workdir, '.jdk'
    ).relative_to(get_buildroot())

    # Since this code can be run in multi-threading mode due to multiple
    # zinc workers, we need to make sure the file operations below are atomic.
    with self._lock:
      # Create the symlink if it does not exist, or points to a file that doesn't exist,
      # (e.g., a JDK that is no longer present), or points to the wrong JDK.
      if not jdk_home_symlink.exists() or jdk_home_symlink.resolve() != Path(underlying_dist.home):
        safe_delete(str(jdk_home_symlink))  # Safe-delete, in case it's a broken symlink.
        safe_mkdir_for(jdk_home_symlink)
        jdk_home_symlink.symlink_to(underlying_dist.home)

    return Distribution(home_path=jdk_home_symlink)
示例#29
0
def setup_pexrc_with_pex_python_path(interpreter_paths):
  """A helper function for writing interpreter paths to a PEX_PYTHON_PATH variable in a .pexrc file.

  NB: Mutates HOME and XDG_CACHE_HOME to ensure a `~/.pexrc` that won't trample any existing file
  and will also be found.

  :param list interpreter_paths: a list of paths to interpreter binaries to include on
                                 PEX_PYTHON_PATH.
  """
  cache_dir = get_pants_cachedir()
  with temporary_dir() as home:
    xdg_cache_home = os.path.join(home, '.cache')
    with environment_as(HOME=home, XDG_CACHE_HOME=xdg_cache_home):
      target = os.path.join(xdg_cache_home, os.path.basename(cache_dir))
      safe_mkdir_for(target)
      os.symlink(cache_dir, target)

      with open(os.path.join(home, '.pexrc'), 'w') as pexrc:
        pexrc.write('PEX_PYTHON_PATH={}'.format(':'.join(interpreter_paths)))

      yield
  def execute(self):
    targets = self.get_targets(self._is_python_lambda)
    with self.invalidated(targets=targets, invalidate_dependents=True) as invalidation_check:
      python_lambda_product = self.context.products.get_data('python_aws_lambda', dict)
      for vt in invalidation_check.all_vts:
        lambda_path = os.path.join(vt.results_dir, '{}.pex'.format(vt.target.name))
        if not vt.valid:
          self.context.log.debug('Existing lambda for {} is invalid, rebuilding'.format(vt.target))
          self._create_lambda(vt.target, lambda_path)
        else:
          self.context.log.debug('Using existing lambda for {}'.format(vt.target))

        python_lambda_product[vt.target] = lambda_path
        self.context.log.debug('created {}'.format(os.path.relpath(lambda_path, get_buildroot())))

        # Put a copy in distdir.
        lambda_copy = os.path.join(self.get_options().pants_distdir, os.path.basename(lambda_path))
        safe_mkdir_for(lambda_copy)
        atomic_copy(lambda_path, lambda_copy)
        self.context.log.info('created lambda {}'.format(
          os.path.relpath(lambda_copy, get_buildroot())))
示例#31
0
  def _compile(self, target, results_dir, source):
    """Compile given source to an object file."""
    obj = self._objpath(target, results_dir, source)
    safe_mkdir_for(obj)

    abs_source = os.path.join(get_buildroot(), source)

    # TODO: include dir should include dependent work dir when headers are copied there.
    include_dirs = []
    for dep in target.dependencies:
      if self.is_library(dep):
        include_dirs.extend([os.path.join(get_buildroot(), dep.target_base)])

    cmd = [self.cpp_toolchain.compiler]
    cmd.extend(['-c'])
    cmd.extend(('-I{0}'.format(i) for i in include_dirs))
    cmd.extend(['-o' + obj, abs_source])
    cmd.extend(self.get_options().cc_options)

    # TODO: submit_async_work with self.run_command, [(cmd)] as a Work object.
    with self.context.new_workunit(name='cpp-compile', labels=[WorkUnitLabel.COMPILER]) as workunit:
      self.run_command(cmd, workunit)

    self.context.log.info('Built c++ object: {0}'.format(obj))
示例#32
0
 def _get_junit_xml_path(self, targets):
     xml_path = os.path.join(
         self.workdir, 'junitxml',
         'TEST-{}.xml'.format(Target.maybe_readable_identify(targets)))
     safe_mkdir_for(xml_path)
     return xml_path
示例#33
0
 def _select_interpreter(self, interpreter_path_file, targets):
   interpreter = self._interpreter_cache.select_interpreter_for_targets(targets)
   safe_mkdir_for(interpreter_path_file)
   with open(interpreter_path_file, 'w') as outfile:
     outfile.write('{}\n'.format(interpreter.binary))
   return interpreter
示例#34
0
 def add_file(path, content):
     fullpath = os.path.join(fake_buildroot, path)
     safe_mkdir_for(fullpath)
     with open(fullpath, 'w') as outfile:
         outfile.write(content)
示例#35
0
 def __init__(self, path=None):
     # Map path -> timing in seconds (a float)
     self._timings_by_path = defaultdict(float)
     self._tool_labels = set()
     self._path = path
     safe_mkdir_for(self._path)
 def write_file(file_path, contents):
   full_file_path = os.path.join(tmp_dir, *file_path.split(os.pathsep))
   safe_mkdir_for(full_file_path)
   with open(full_file_path, 'w') as fh:
     fh.write(contents)
示例#37
0
 def copy(src, rel_dst):
     dst = os.path.join(self.artifact_root, rel_dst)
     safe_mkdir_for(dst)
     shutil.copy(src, dst)
示例#38
0
def initialize_stdio(
        global_bootstrap_options: OptionValueContainer) -> Iterator[None]:
    """Mutates sys.std* and logging to route stdio for a Pants process to thread local destinations.

    In this context, `sys.std*` and logging handlers will route through Rust code that uses
    thread-local information to decide whether to write to a file, or to stdio file handles.

    To control the stdio destination set by this method, use the `stdio_destination` context manager.

    This is called in two different processes:
    * PantsRunner, after it has determined that LocalPantsRunner will be running in process, and
      immediately before setting a `stdio_destination` for the remainder of the run.
    * PantsDaemon, immediately on startup. The process will then default to sending stdio to the log
      until client connections arrive, at which point `stdio_destination` is used per-connection.
    """
    global_level = global_bootstrap_options.level
    log_show_rust_3rdparty = global_bootstrap_options.log_show_rust_3rdparty
    show_target = global_bootstrap_options.show_log_target
    log_levels_by_target = _get_log_levels_by_target(global_bootstrap_options)
    print_stacktrace = global_bootstrap_options.print_stacktrace
    local_cleanup = global_bootstrap_options.process_execution_local_cleanup

    literal_filters = []
    regex_filters = []
    for filt in cast("list[str]", global_bootstrap_options.ignore_warnings):
        if filt.startswith("$regex$"):
            regex_filters.append(strip_prefix(filt, "$regex$"))
        else:
            literal_filters.append(filt)

    # Set the pants log destination.
    log_path = str(
        pants_log_path(PurePath(global_bootstrap_options.pants_workdir)))
    safe_mkdir_for(log_path)

    # Initialize thread-local stdio, and replace sys.std* with proxies.
    original_stdin, original_stdout, original_stderr = sys.stdin, sys.stdout, sys.stderr
    try:
        raw_stdin, sys.stdout, sys.stderr = native_engine.stdio_initialize(
            global_level.level,
            log_show_rust_3rdparty,
            show_target,
            {k: v.level
             for k, v in log_levels_by_target.items()},
            tuple(literal_filters),
            tuple(regex_filters),
            log_path,
        )
        sys.stdin = TextIOWrapper(
            BufferedReader(raw_stdin),
            # NB: We set the default encoding explicitly to bypass logic in the TextIOWrapper
            # constructor that would poke the underlying file (which is not valid until a
            # `stdio_destination` is set).
            encoding=locale.getpreferredencoding(False),
        )

        sys.__stdin__, sys.__stdout__, sys.__stderr__ = sys.stdin, sys.stdout, sys.stderr
        # Install a Python logger that will route through the Rust logger.
        with _python_logging_setup(global_level,
                                   print_stacktrace=print_stacktrace,
                                   local_cleanup=local_cleanup):
            yield
    finally:
        sys.stdin, sys.stdout, sys.stderr = original_stdin, original_stdout, original_stderr
        sys.__stdin__, sys.__stdout__, sys.__stderr__ = sys.stdin, sys.stdout, sys.stderr
示例#39
0
 def _store_tarball(self, cache_key, src):
     dest = self._cache_file_for_key(cache_key)
     safe_mkdir_for(dest)
     os.rename(src, dest)
     self.prune(os.path.dirname(dest))  # Remove old cache files.
     return dest
示例#40
0
    def execute(self):

        exported_targets_map = self.generate_targets_map(
            self.context.targets())
        export_result = json.dumps(exported_targets_map,
                                   indent=4,
                                   separators=(',', ': '))

        with temporary_dir() as tmpdir:
            export_outfile = os.path.join(tmpdir, 'export-out.json')
            with open(export_outfile, 'wb') as outf:
                outf.write(export_result)

            ensime_gen_jar = self.context.products.get_data(EnsimeGenJar)
            ensime_gen_classpath = [ensime_gen_jar.tool_jar_path]

            # TODO: use JvmPlatform for jvm options!
            reported_scala_version = self.get_options().reported_scala_version
            if not reported_scala_version:
                reported_scala_version = self._scala_platform.version

            zinc_compile_dir = os.path.join(self.get_options().pants_workdir,
                                            'compile/zinc')

            output_file = os.path.join(get_buildroot(),
                                       self.get_options().output_file)
            safe_mkdir_for(output_file)

            # This is what we depend on in 3rdparty/jvm:ensime-server.
            ensime_server_version = '2.0.1'

            ensime_server_jars = self.tool_classpath_from_products(
                self.context.products,
                'ensime-server',
                scope=self.options_scope)

            scala_compiler_jars = self._scala_platform.compiler_classpath(
                self.context.products)

            argv = [
                get_buildroot(),
                reported_scala_version,
                self._make_ensime_cache_dir(),
                zinc_compile_dir,
                output_file,
                ensime_server_version,
            ]

            env = {
                'SCALAC_ARGS': json.dumps(self.get_options().scalac_options),
                'JAVAC_ARGS': json.dumps(self.get_options().javac_options),
                'ENSIME_SERVER_JARS_CLASSPATH': ':'.join(ensime_server_jars),
                'SCALA_COMPILER_JARS_CLASSPATH': ':'.join(scala_compiler_jars),
            }

            with open(export_outfile, 'rb') as inf:
                with environment_as(**env):
                    execute_java(ensime_gen_classpath,
                                 'pingpong.ensime.EnsimeFileGen',
                                 args=argv,
                                 workunit_name='ensime-gen-invoke',
                                 workunit_labels=[WorkUnitLabel.TOOL],
                                 distribution=DistributionLocator.cached(),
                                 stdin=inf)
示例#41
0
 def junitxml_path(self, *targets):
     xml_path = os.path.join(
         self.root_dir, 'junitxml',
         'TEST-{}.xml'.format(self.target_set_id(*targets)))
     safe_mkdir_for(xml_path)
     return xml_path
示例#42
0
文件: logging.py 项目: jriddy/pants
def initialize_stdio(
        global_bootstrap_options: OptionValueContainer) -> Iterator[None]:
    """Mutates sys.std* and logging to route stdio for a Pants process to thread local destinations.

    In this context, `sys.std*` and logging handlers will route through Rust code that uses
    thread-local information to decide whether to write to a file, or to stdio file handles.

    To control the stdio destination set by this method, use the `stdio_destination` context manager.

    This is called in two different processes:
    * PantsRunner, after it has determined that LocalPantsRunner will be running in process, and
      immediately before setting a `stdio_destination` for the remainder of the run.
    * PantsDaemon, immediately on startup. The process will then default to sending stdio to the log
      until client connections arrive, at which point `stdio_destination` is used per-connection.
    """
    global_level = global_bootstrap_options.level
    log_show_rust_3rdparty = global_bootstrap_options.log_show_rust_3rdparty
    use_color = global_bootstrap_options.colors
    show_target = global_bootstrap_options.show_log_target
    log_levels_by_target = _get_log_levels_by_target(global_bootstrap_options)
    message_regex_filters = global_bootstrap_options.ignore_pants_warnings
    print_stacktrace = global_bootstrap_options.print_stacktrace

    # Set the pants log destination.
    deprecated_log_path = os.path.join(global_bootstrap_options.pants_workdir,
                                       "pantsd", "pantsd.log")
    log_path = os.path.join(global_bootstrap_options.pants_workdir,
                            "pants.log")
    safe_mkdir_for(deprecated_log_path)
    safe_mkdir_for(log_path)
    # NB: We append to the deprecated log location with a deprecated conditional that never
    # triggers, because there is nothing that the user can do about the deprecation.
    deprecated_conditional(
        predicate=lambda: False,
        removal_version="2.5.0.dev0",
        entity_description=f"Logging to {deprecated_log_path}",
        hint_message=f"Refer to {log_path} instead.",
    )
    with open(deprecated_log_path, "a") as a:
        a.write(
            f"This log location is deprecated: please refer to {log_path} instead.\n"
        )

    # Initialize thread-local stdio, and replace sys.std* with proxies.
    original_stdin, original_stdout, original_stderr = sys.stdin, sys.stdout, sys.stderr
    try:
        sys.stdin, sys.stdout, sys.stderr = native_engine.stdio_initialize(
            global_level.level,
            log_show_rust_3rdparty,
            use_color,
            show_target,
            {k: v.level
             for k, v in log_levels_by_target.items()},
            tuple(message_regex_filters),
            log_path,
        )
        # Install a Python logger that will route through the Rust logger.
        with _python_logging_setup(global_level, print_stacktrace):
            yield
    finally:
        sys.stdin, sys.stdout, sys.stderr = original_stdin, original_stdout, original_stderr
示例#43
0
    def _bootstrap_shaded_jvm_tool(self, jvm_tool, targets):
        fingerprint_strategy = ShadedToolFingerprintStrategy(
            jvm_tool.main, custom_rules=jvm_tool.custom_rules)

        with self.invalidated(
                targets,
                # We're the only dependent in reality since we shade.
                invalidate_dependents=False,
                fingerprint_strategy=fingerprint_strategy
        ) as invalidation_check:

            # If there are no vts, then there are no resolvable targets, so we exit early with an empty
            # classpath.  This supports the optional tool classpath case.
            if not invalidation_check.all_vts:
                return []

            tool_vts = self.tool_vts(invalidation_check)
            jar_name = '{main}-{hash}.jar'.format(main=jvm_tool.main,
                                                  hash=tool_vts.cache_key.hash)
            shaded_jar = os.path.join(self._tool_cache_path, 'shaded_jars',
                                      jar_name)

            if not invalidation_check.invalid_vts and os.path.exists(
                    shaded_jar):
                return [shaded_jar]

            # Ensure we have a single binary jar we can shade.
            binary_jar = os.path.join(self._tool_cache_path, 'binary_jars',
                                      jar_name)
            safe_mkdir_for(binary_jar)

            classpath = self._bootstrap_classpath(jvm_tool, targets)
            if len(classpath) == 1:
                shutil.copy(classpath[0], binary_jar)
            else:
                with self.open_jar(binary_jar) as jar:
                    for classpath_jar in classpath:
                        jar.writejar(classpath_jar)
                    jar.main(jvm_tool.main)

            # Now shade the binary jar and return that single jar as the safe tool classpath.
            safe_mkdir_for(shaded_jar)
            with self.shader.binary_shader(
                    shaded_jar,
                    jvm_tool.main,
                    binary_jar,
                    custom_rules=jvm_tool.custom_rules,
                    jvm_options=self.get_options().jvm_options) as shader:
                try:
                    result = util.execute_runner(
                        shader,
                        workunit_factory=self.context.new_workunit,
                        workunit_name='shade-{}'.format(jvm_tool.key))
                    if result != 0:
                        raise TaskError(
                            "Shading of tool '{key}' with main class {main} for {scope} failed "
                            "with exit code {result}, command run was:\n\t{cmd}"
                            .format(key=jvm_tool.key,
                                    main=jvm_tool.main,
                                    scope=jvm_tool.scope,
                                    result=result,
                                    cmd=shader.cmd))
                except Executor.Error as e:
                    raise TaskError(
                        "Shading of tool '{key}' with main class {main} for {scope} failed "
                        "with: {exception}".format(key=jvm_tool.key,
                                                   main=jvm_tool.main,
                                                   scope=jvm_tool.scope,
                                                   exception=e))

            if self.artifact_cache_writes_enabled():
                self.update_artifact_cache([(tool_vts, [shaded_jar])])

            return [shaded_jar]
示例#44
0
文件: cookies.py 项目: wiwa/pants
 def _lock(self):
     """An identity-keyed inter-process lock around the cookie file."""
     lockfile = "{}.lock".format(self._get_cookie_file())
     safe_mkdir_for(lockfile)
     return OwnerPrintingInterProcessFileLock(lockfile)
示例#45
0
 def _store_tarball(self, cache_key, src):
     dest = self._cache_file_for_key(cache_key)
     safe_mkdir_for(dest)
     os.rename(src, dest)
     return dest
示例#46
0
 def _connection(self):
   safe_mkdir_for(self._path)
   conn = sqlite3.connect(self._path)
   yield conn
   conn.commit()
   conn.close()