def execute(self):
        dist_targets = self.context.targets(is_local_python_dist)

        if dist_targets:
            interpreter = self.context.products.get_data(PythonInterpreter)
            shared_libs_product = self.context.products.get(SharedLibrary)

            with self.invalidated(
                    dist_targets,
                    invalidate_dependents=True) as invalidation_check:
                for vt in invalidation_check.invalid_vts:
                    self._prepare_and_create_dist(interpreter,
                                                  shared_libs_product, vt)

                local_wheel_products = self.context.products.get(
                    'local_wheels')
                for vt in invalidation_check.all_vts:
                    dist = self._get_whl_from_dir(vt.results_dir)
                    req_lib_addr = Address.parse('{}__req_lib'.format(
                        vt.target.address.spec))
                    self._inject_synthetic_dist_requirements(
                        dist, req_lib_addr)
                    # Make any target that depends on the dist depend on the synthetic req_lib,
                    # for downstream consumption.
                    for dependent in self.context.build_graph.dependents_of(
                            vt.target.address):
                        self.context.build_graph.inject_dependency(
                            dependent, req_lib_addr)
                    dist_dir, dist_base = split_basename_and_dirname(dist)
                    local_wheel_products.add(vt.target,
                                             dist_dir).append(dist_base)
    def _inject_synthetic_dist_requirements(self, dist, req_lib_addr):
        """Inject a synthetic requirements library that references a local wheel.

    :param dist: Path of the locally built wheel to reference.
    :param req_lib_addr:  :class: `Address` to give to the synthetic target.
    :return: a :class: `PythonRequirementLibrary` referencing the locally-built wheel.
    """
        whl_dir, base = split_basename_and_dirname(dist)
        whl_metadata = base.split('-')
        req_name = '=='.join([whl_metadata[0], whl_metadata[1]])
        req = PythonRequirement(req_name, repository=whl_dir)
        self.context.build_graph.inject_synthetic_target(
            req_lib_addr, PythonRequirementLibrary, requirements=[req])
  def _inject_synthetic_dist_requirements(self, dist, req_lib_addr):
    """Inject a synthetic requirements library that references a local wheel.

    :param dist: Path of the locally built wheel to reference.
    :param req_lib_addr:  :class: `Address` to give to the synthetic target.
    :return: a :class: `PythonRequirementLibrary` referencing the locally-built wheel.
    """
    whl_dir, base = split_basename_and_dirname(dist)
    whl_metadata = base.split('-')
    req_name = '=='.join([whl_metadata[0], whl_metadata[1]])
    req = PythonRequirement(req_name, repository=whl_dir)
    self.context.build_graph.inject_synthetic_target(req_lib_addr, PythonRequirementLibrary,
                                                     requirements=[req])
Example #4
0
    def _invoke_xz(self, xz_input_file):
        """Run the xz command and yield a file object for its stdout.

    This allows streaming the decompressed tar archive directly into a tar decompression stream,
    which is significantly faster in practice than making a temporary file.
    """
        (xz_bin_dir,
         xz_filename) = split_basename_and_dirname(self._xz_binary_path)

        # TODO(cosmicexplorer): --threads=0 is supposed to use "the number of processor cores on the
        # machine", but I see no more than 100% cpu used at any point. This seems like it could be a
        # bug? If performance is an issue, investigate further.
        cmd = [
            xz_filename, '--decompress', '--stdout', '--keep', '--threads=0',
            xz_input_file
        ]
        env = {
            # Isolate the path so we know we're using our provided version of xz.
            'PATH': xz_bin_dir,
            # Only allow our xz's lib directory to resolve the liblzma.so dependency at runtime.
            'LD_LIBRARY_PATH': self._xz_library_path,
        }
        try:
            # Pipe stderr to our own stderr, but leave stdout open so we can yield it.
            process = subprocess.Popen(cmd,
                                       stdout=subprocess.PIPE,
                                       stderr=sys.stderr,
                                       env=env)
        except OSError as e:
            raise self.XZArchiverError(
                "Error invoking xz with command {} and environment {} for input file {}: {}"
                .format(cmd, env, xz_input_file, e), e)

        # This is a file object.
        yield process.stdout

        rc = process.wait()
        if rc != 0:
            raise self.XZArchiverError(
                "Error decompressing xz input with command {} and environment {} for input file {}. "
                "Exit code was: {}. ".format(cmd, env, xz_input_file, rc))
Example #5
0
  def _invoke_xz(self, xz_input_file):
    """Run the xz command and yield a file object for its stdout.

    This allows streaming the decompressed tar archive directly into a tar decompression stream,
    which is significantly faster in practice than making a temporary file.
    """
    (xz_bin_dir, xz_filename) = split_basename_and_dirname(self._xz_binary_path)

    # TODO(cosmicexplorer): --threads=0 is supposed to use "the number of processor cores on the
    # machine", but I see no more than 100% cpu used at any point. This seems like it could be a
    # bug? If performance is an issue, investigate further.
    cmd = [xz_filename, '--decompress', '--stdout', '--keep', '--threads=0', xz_input_file]
    env = {
      # Isolate the path so we know we're using our provided version of xz.
      'PATH': xz_bin_dir,
      # Only allow our xz's lib directory to resolve the liblzma.so dependency at runtime.
      'LD_LIBRARY_PATH': self._xz_library_path,
    }
    try:
      # Pipe stderr to our own stderr, but leave stdout open so we can yield it.
      process = subprocess.Popen(
        cmd,
        stdout=subprocess.PIPE,
        stderr=sys.stderr,
        env=env)
    except OSError as e:
      raise self.XZArchiverError(
        "Error invoking xz with command {} and environment {} for input file {}: {}"
        .format(cmd, env, xz_input_file, e),
        e)

    # This is a file object.
    yield process.stdout

    rc = process.wait()
    if rc != 0:
      raise self.XZArchiverError(
        "Error decompressing xz input with command {} and environment {} for input file {}. "
        "Exit code was: {}. "
        .format(cmd, env, xz_input_file, rc))
  def execute(self):
    dist_targets = self.context.targets(is_local_python_dist)

    if dist_targets:
      interpreter = self.context.products.get_data(PythonInterpreter)
      shared_libs_product = self.context.products.get(SharedLibrary)

      with self.invalidated(dist_targets, invalidate_dependents=True) as invalidation_check:
        for vt in invalidation_check.invalid_vts:
          self._prepare_and_create_dist(interpreter, shared_libs_product, vt)

        local_wheel_products = self.context.products.get('local_wheels')
        for vt in invalidation_check.all_vts:
          dist = self._get_whl_from_dir(vt.results_dir)
          req_lib_addr = Address.parse('{}__req_lib'.format(vt.target.address.spec))
          self._inject_synthetic_dist_requirements(dist, req_lib_addr)
          # Make any target that depends on the dist depend on the synthetic req_lib,
          # for downstream consumption.
          for dependent in self.context.build_graph.dependents_of(vt.target.address):
            self.context.build_graph.inject_dependency(dependent, req_lib_addr)
          dist_dir, dist_base = split_basename_and_dirname(dist)
          local_wheel_products.add(vt.target, dist_dir).append(dist_base)