Exemple #1
0
 def group(value_component_type, process_val_func) -> list[RankedValue]:
     # We group any values that are merged together, so that the history can reflect
     # merges vs. replacements in a useful way. E.g., if we merge [a, b] and [c],
     # and then replace it with [d, e], the history will contain:
     #   - [d, e] (from command-line flag)
     #   - [a, b, c] (from env var, from config)
     # And similarly for dicts.
     grouped: list[list[RankedValue]] = [[]]
     for ranked_val in ranked_vals:
         if ranked_val.value and ranked_val.value.action == value_component_type.REPLACE:
             grouped.append([])
         grouped[-1].append(ranked_val)
     return [
         RankedValue(
             grp[-1].rank,
             process_val_func(
                 value_component_type.merge(
                     rv.value for rv in grp
                     if rv.value is not None).val),
             ", ".join(rv.details for rv in grp if rv.details),
         ) for grp in grouped if grp
     ]
Exemple #2
0
    def format_option(self, ohi: OptionHelpInfo) -> List[str]:
        """Format the help output for a single option.

        :param ohi: Extracted information for option to print
        :return: Formatted help text for this option
        """

        def maybe_parens(s: Optional[str]) -> str:
            return f" ({s})" if s else ""

        def format_value(val: RankedValue, prefix: str, left_padding: str) -> List[str]:
            if isinstance(val.value, (list, dict)):
                val_lines = json.dumps(val.value, sort_keys=True, indent=4).split("\n")
            else:
                val_lines = [to_help_str(val.value)]
            val_lines[0] = f"{prefix}{val_lines[0]}"
            val_lines[-1] = f"{val_lines[-1]}{maybe_parens(val.details)}"
            val_lines = [self.maybe_cyan(f"{left_padding}{line}") for line in val_lines]
            return val_lines

        indent = "      "
        arg_lines = [f"  {self.maybe_magenta(args)}" for args in ohi.display_args]
        arg_lines.append(self.maybe_magenta(f"  {ohi.env_var}"))
        arg_lines.append(self.maybe_magenta(f"  {ohi.config_key}"))
        choices = "" if ohi.choices is None else f"one of: [{', '.join(ohi.choices)}]"
        choices_lines = [
            f"{indent}{'  ' if i != 0 else ''}{self.maybe_cyan(s)}"
            for i, s in enumerate(wrap(f"{choices}", 96))
        ]
        default_lines = format_value(RankedValue(Rank.HARDCODED, ohi.default), "default: ", indent)
        if not ohi.value_history:
            # Should never happen, but this keeps mypy happy.
            raise ValueError("No value history - options not parsed.")
        final_val = ohi.value_history.final_value
        curr_value_lines = format_value(final_val, "current value: ", indent)

        interesting_ranked_values = [
            rv
            for rv in reversed(ohi.value_history.ranked_values)
            if rv.rank not in (Rank.NONE, Rank.HARDCODED, final_val.rank)
        ]
        value_derivation_lines = [
            line
            for rv in interesting_ranked_values
            for line in format_value(rv, "overrode: ", f"{indent}    ")
        ]
        description_lines = ohi.help.splitlines()
        # wrap() returns [] for an empty line, but we want to emit those, hence the "or [line]".
        description_lines = [
            f"{indent}{s}" for line in description_lines for s in wrap(line, 96) or [line]
        ]
        lines = [
            *arg_lines,
            *choices_lines,
            *default_lines,
            *curr_value_lines,
            *value_derivation_lines,
            *description_lines,
        ]
        if ohi.deprecated_message:
            lines.append(self.maybe_red(f"{indent}{ohi.deprecated_message}."))
            if ohi.removal_hint:
                lines.append(self.maybe_red(f"{indent}{ohi.removal_hint}"))
        return lines
Exemple #3
0
  def _compute_value(self, dest, kwargs, flag_vals):
    """Compute the value to use for an option.

    The source of the default value is chosen according to the ranking in RankedValue.
    """
    is_fromfile = kwargs.get('fromfile', False)
    action = kwargs.get('action')
    if is_fromfile and action and action != 'append':
      raise ParseError('Cannot fromfile {} with an action ({}) in scope {}'
                       .format(dest, action, self._scope))

    config_section = GLOBAL_SCOPE_CONFIG_SECTION if self._scope == GLOBAL_SCOPE else self._scope
    udest = dest.upper()
    if self._scope == GLOBAL_SCOPE:
      # For convenience, we allow three forms of env var for global scope options.
      # The fully-specified env var is PANTS_DEFAULT_FOO, which is uniform with PANTS_<SCOPE>_FOO
      # for all the other scopes.  However we also allow simply PANTS_FOO. And if the option name
      # itself starts with 'pants-' then we also allow simply FOO. E.g., PANTS_WORKDIR instead of
      # PANTS_PANTS_WORKDIR or PANTS_DEFAULT_PANTS_WORKDIR. We take the first specified value we
      # find, in this order: PANTS_DEFAULT_FOO, PANTS_FOO, FOO.
      env_vars = ['PANTS_DEFAULT_{0}'.format(udest), 'PANTS_{0}'.format(udest)]
      if udest.startswith('PANTS_'):
        env_vars.append(udest)
    else:
      sanitized_env_var_scope = self._ENV_SANITIZER_RE.sub('_', config_section.upper())
      env_vars = ['PANTS_{0}_{1}'.format(sanitized_env_var_scope, udest)]

    value_type = self.str_to_bool if is_boolean_flag(kwargs) else kwargs.get('type', str)

    env_val_str = None
    if self._env:
      for env_var in env_vars:
        if env_var in self._env:
          env_val_str = self._env.get(env_var)
          break

    config_val_str = self._config.get(config_section, dest, default=None)
    config_source_file = self._config.get_source_for_option(config_section, dest)
    if config_source_file is not None:
      config_source_file = os.path.relpath(config_source_file)

    def expand(val_str):
      if is_fromfile and val_str and val_str.startswith('@') and not val_str.startswith('@@'):
        fromfile = val_str[1:]
        try:
          with open(fromfile) as fp:
            return fp.read().strip()
        except IOError as e:
          raise self.FromfileError('Failed to read {} in {} from file {}: {}'.format(
            dest, self._scope_str(), fromfile, e))
      else:
        # Support a literal @ for fromfile values via @@.
        return val_str[1:] if is_fromfile and val_str.startswith('@@') else val_str

    def parse_typed_list(val_str):
      return None if val_str is None else [value_type(x) for x in list_option(expand(val_str))]

    def parse_typed_item(val_str):
      return None if val_str is None else value_type(expand(val_str))

    flag_val = None
    if flag_vals:
      if action == 'append':
        flag_val = [parse_typed_item(v) for v in flag_vals]
      elif len(flag_vals) > 1:
        raise ParseError('Multiple cmd line flags specified for option {} in {}'.format(
          dest, self._scope_str()))
      else:
        flag_val = parse_typed_item(flag_vals[0])

    default, parse = ([], parse_typed_list) if action == 'append' else (None, parse_typed_item)

    config_val = parse(config_val_str)
    env_val = parse(env_val_str)
    hardcoded_val = kwargs.get('default')

    config_details = 'in {}'.format(config_source_file) if config_source_file else None

    # Note: ranked_vals is guaranteed to have at least one element, and none of the values
    # of any of its elements will be None.
    ranked_vals = list(reversed(list(RankedValue.prioritized_iter(
      flag_val, env_val, config_val, hardcoded_val, default))))
    choices = kwargs.get('choices')
    for ranked_val in ranked_vals:
      details = config_details if ranked_val.rank == RankedValue.CONFIG else None
      self._option_tracker.record_option(scope=self._scope,
                                         option=dest,
                                         value=ranked_val.value,
                                         rank=ranked_val.rank,
                                         deprecation_version=kwargs.get('deprecated_version'),
                                         details=details)

    def check(val):
      if choices is not None and val is not None and val not in choices:
        raise ParseError('{} is not an allowed value for option {} in {}. '
                         'Must be one of: {}'.format(
          val, dest, self._scope_str(), choices
        ))
      return val

    if action == 'append':
      merged_rank = ranked_vals[-1].rank
      merged_val = [check(val) for vals in ranked_vals for val in vals.value]
      return RankedValue(merged_rank, merged_val)
    else:
      map(lambda rv: check(rv.value), ranked_vals)
      return ranked_vals[-1]
Exemple #4
0
    def _compute_value(self, dest, kwargs, flag_val_strs):
        """Compute the value to use for an option.

    The source of the default value is chosen according to the ranking in RankedValue.
    """

        # Helper function to convert a string to a value of the option's type.
        def to_value_type(val_str):
            if val_str is None:
                return None
            elif kwargs.get('type') == bool:
                return self._ensure_bool(val_str)
            else:
                return self._wrap_type(kwargs.get('type', str))(val_str)

        # Helper function to expand a fromfile=True value string, if needed.
        def expand(val_str):
            if kwargs.get('fromfile',
                          False) and val_str and val_str.startswith('@'):
                if val_str.startswith(
                        '@@'
                ):  # Support a literal @ for fromfile values via @@.
                    return val_str[1:]
                else:
                    fromfile = val_str[1:]
                    try:
                        with open(fromfile) as fp:
                            return fp.read().strip()
                    except IOError as e:
                        raise self.FromfileError(
                            'Failed to read {} in {} from file {}: {}'.format(
                                dest, self._scope_str(), fromfile, e))
            else:
                return val_str

        # Get value from config files, and capture details about its derivation.
        config_details = None
        config_section = GLOBAL_SCOPE_CONFIG_SECTION if self._scope == GLOBAL_SCOPE else self._scope
        config_val_str = expand(
            self._config.get(config_section, dest, default=None))
        config_source_file = self._config.get_source_for_option(
            config_section, dest)
        if config_source_file is not None:
            config_source_file = os.path.relpath(config_source_file)
            config_details = 'in {}'.format(config_source_file)

        # Get value from environment, and capture details about its derivation.
        udest = dest.upper()
        if self._scope == GLOBAL_SCOPE:
            # For convenience, we allow three forms of env var for global scope options.
            # The fully-specified env var is PANTS_GLOBAL_FOO, which is uniform with PANTS_<SCOPE>_FOO
            # for all the other scopes.  However we also allow simply PANTS_FOO. And if the option name
            # itself starts with 'pants-' then we also allow simply FOO. E.g., PANTS_WORKDIR instead of
            # PANTS_PANTS_WORKDIR or PANTS_GLOBAL_PANTS_WORKDIR. We take the first specified value we
            # find, in this order: PANTS_GLOBAL_FOO, PANTS_FOO, FOO.
            env_vars = [
                'PANTS_GLOBAL_{0}'.format(udest), 'PANTS_{0}'.format(udest)
            ]
            if udest.startswith('PANTS_'):
                env_vars.append(udest)
        else:
            sanitized_env_var_scope = self._ENV_SANITIZER_RE.sub(
                '_', self._scope.upper())
            env_vars = ['PANTS_{0}_{1}'.format(sanitized_env_var_scope, udest)]

        env_val_str = None
        env_details = None
        if self._env:
            for env_var in env_vars:
                if env_var in self._env:
                    env_val_str = expand(self._env.get(env_var))
                    env_details = 'from env var {}'.format(env_var)
                    break

        # Get value from cmd-line flags.
        flag_vals = [to_value_type(expand(x)) for x in flag_val_strs]
        if is_list_option(kwargs):
            # Note: It's important to set flag_val to None if no flags were specified, so we can
            # distinguish between no flags set vs. explicit setting of the value to [].
            flag_val = ListValueComponent.merge(
                flag_vals) if flag_vals else None
        elif is_dict_option(kwargs):
            # Note: It's important to set flag_val to None if no flags were specified, so we can
            # distinguish between no flags set vs. explicit setting of the value to {}.
            flag_val = DictValueComponent.merge(
                flag_vals) if flag_vals else None
        elif len(flag_vals) > 1:
            raise ParseError(
                'Multiple cmd line flags specified for option {} in {}'.format(
                    dest, self._scope_str()))
        elif len(flag_vals) == 1:
            flag_val = flag_vals[0]
        else:
            flag_val = None

        # Rank all available values.
        # Note that some of these values may already be of the value type, but type conversion
        # is idempotent, so this is OK.

        values_to_rank = [
            to_value_type(x) for x in [
                flag_val, env_val_str, config_val_str,
                kwargs.get('default'), None
            ]
        ]
        # Note that ranked_vals will always have at least one element, and all elements will be
        # instances of RankedValue (so none will be None, although they may wrap a None value).
        ranked_vals = list(
            reversed(list(RankedValue.prioritized_iter(*values_to_rank))))

        # Record info about the derivation of each of the values.
        for ranked_val in ranked_vals:
            if ranked_val.rank == RankedValue.CONFIG:
                details = config_details
            elif ranked_val.rank == RankedValue.ENVIRONMENT:
                details = env_details
            else:
                details = None
            self._option_tracker.record_option(
                scope=self._scope,
                option=dest,
                value=ranked_val.value,
                rank=ranked_val.rank,
                deprecation_version=kwargs.get('removal_version'),
                details=details)

        # Helper function to check various validity constraints on final option values.
        def check(val):
            if val is not None:
                choices = kwargs.get('choices')
                if choices is not None and val not in choices:
                    raise ParseError(
                        '`{}` is not an allowed value for option {} in {}. '
                        'Must be one of: {}'.format(val, dest,
                                                    self._scope_str(),
                                                    choices))
                elif kwargs.get(
                        'type') == dir_option and not os.path.isdir(val):
                    raise ParseError(
                        'Directory value `{}` for option {} in {} does not exist.'
                        .format(val, dest, self._scope_str()))
                elif kwargs.get(
                        'type') == file_option and not os.path.isfile(val):
                    raise ParseError(
                        'File value `{}` for option {} in {} does not exist.'.
                        format(val, dest, self._scope_str()))

        # Generate the final value from all available values, and check that it (or its members,
        # if a list) are in the set of allowed choices.
        if is_list_option(kwargs):
            merged_rank = ranked_vals[-1].rank
            merged_val = ListValueComponent.merge(
                [rv.value for rv in ranked_vals if rv.value is not None]).val
            merged_val = [
                self._convert_member_type(kwargs.get('member_type', str), x)
                for x in merged_val
            ]
            map(check, merged_val)
            ret = RankedValue(merged_rank, merged_val)
        elif is_dict_option(kwargs):
            merged_rank = ranked_vals[-1].rank
            merged_val = DictValueComponent.merge(
                [rv.value for rv in ranked_vals if rv.value is not None]).val
            map(check, merged_val)
            ret = RankedValue(merged_rank, merged_val)
        else:
            ret = ranked_vals[-1]
            check(ret.value)

        # All done!
        return ret
Exemple #5
0
  def test_rsc_dep_for_scala_java_and_test_targets(self):
    self.set_options(workflow=RankedValue(
      value=RscCompile.JvmCompileWorkflowType.rsc_and_zinc,
      rank=RankedValue.CONFIG,
    ))
    self.init_dependencies_for_scala_libraries()

    scala_dep = self.make_target(
      'scala/classpath:scala_dep',
      target_type=ScalaLibrary,
      sources=['com/example/Bar.scala']
    )
    java_target = self.make_target(
      'java/classpath:java_lib',
      target_type=JavaLibrary,
      sources=['com/example/Foo.java'],
      dependencies=[scala_dep],
      tags={'use-compiler:zinc-only'}
    )
    scala_target = self.make_target(
      'scala/classpath:scala_lib',
      target_type=ScalaLibrary,
      sources=['com/example/Foo.scala'],
      dependencies=[scala_dep]
    )

    test_target = self.make_target(
      'scala/classpath:scala_test',
      target_type=JUnitTests,
      sources=['com/example/Test.scala'],
      dependencies=[scala_target],
      tags={'use-compiler:zinc-only'}
    )

    with temporary_dir() as tmp_dir:
      invalid_targets = [java_target, scala_target, scala_dep, test_target]
      task = self.create_task_with_target_roots(
        target_roots=[java_target, scala_target, test_target]
      )

      jobs = task._create_compile_jobs(
        compile_contexts=self.create_compile_contexts(invalid_targets, task, tmp_dir),
        invalid_targets=invalid_targets,
        invalid_vts=self.wrap_in_vts(invalid_targets),
        classpath_product=None)

      dependee_graph = self.construct_dependee_graph_str(jobs, task)

      self.assertEqual(dedent("""
                     zinc[zinc-java](java/classpath:java_lib) <- {}
                     rsc(scala/classpath:scala_lib) <- {
                       zinc[zinc-only](scala/classpath:scala_test)
                     }
                     zinc[rsc-and-zinc](scala/classpath:scala_lib) <- {}
                     rsc(scala/classpath:scala_dep) <- {
                       rsc(scala/classpath:scala_lib),
                       zinc[rsc-and-zinc](scala/classpath:scala_lib),
                       zinc[zinc-only](scala/classpath:scala_test)
                     }
                     zinc[rsc-and-zinc](scala/classpath:scala_dep) <- {
                       zinc[zinc-java](java/classpath:java_lib)
                     }
                     zinc[zinc-only](scala/classpath:scala_test) <- {}""").strip(),
        dependee_graph)
Exemple #6
0
    def format_option(self, ohi: OptionHelpInfo) -> List[str]:
        """Format the help output for a single option.

        :param ohi: Extracted information for option to print
        :return: Formatted help text for this option
        """
        def maybe_parens(s: Optional[str]) -> str:
            return f" ({s})" if s else ""

        def format_value(ranked_val: RankedValue, prefix: str,
                         left_padding: str) -> List[str]:
            if isinstance(ranked_val.value, (list, dict)):
                is_enum_list = (isinstance(ranked_val.value, list)
                                and len(ranked_val.value) > 0
                                and isinstance(ranked_val.value[0], Enum))
                normalized_val = ([
                    enum_elmt.value for enum_elmt in ranked_val.value
                ] if is_enum_list else ranked_val.value)
                val_lines = json.dumps(normalized_val,
                                       sort_keys=True,
                                       indent=4).split("\n")
            else:
                val_lines = [to_help_str(ranked_val.value)]
            val_lines[0] = f"{prefix}{val_lines[0]}"
            val_lines[
                -1] = f"{val_lines[-1]}{maybe_parens(ranked_val.details)}"
            val_lines = [
                self.maybe_cyan(f"{left_padding}{line}") for line in val_lines
            ]
            return val_lines

        indent = "      "
        arg_lines = [
            f"  {self.maybe_magenta(args)}" for args in ohi.display_args
        ]
        arg_lines.append(self.maybe_magenta(f"  {ohi.env_var}"))
        arg_lines.append(self.maybe_magenta(f"  {ohi.config_key}"))
        choices = "" if ohi.choices is None else f"one of: [{', '.join(ohi.choices)}]"
        choices_lines = [
            f"{indent}{'  ' if i != 0 else ''}{self.maybe_cyan(s)}"
            for i, s in enumerate(textwrap.wrap(f"{choices}", self._width))
        ]
        default_lines = format_value(RankedValue(Rank.HARDCODED, ohi.default),
                                     "default: ", indent)
        if not ohi.value_history:
            # Should never happen, but this keeps mypy happy.
            raise ValueError("No value history - options not parsed.")
        final_val = ohi.value_history.final_value
        curr_value_lines = format_value(final_val, "current value: ", indent)

        interesting_ranked_values = [
            rv for rv in reversed(ohi.value_history.ranked_values)
            if rv.rank not in (Rank.NONE, Rank.HARDCODED, final_val.rank)
        ]
        value_derivation_lines = [
            line for rv in interesting_ranked_values
            for line in format_value(rv, "overrode: ", f"{indent}    ")
        ]
        description_lines = hard_wrap(ohi.help,
                                      indent=len(indent),
                                      width=self._width)
        lines = [
            *arg_lines,
            *choices_lines,
            *default_lines,
            *curr_value_lines,
            *value_derivation_lines,
            *description_lines,
        ]
        if ohi.deprecated_message:
            maybe_colorize = self.maybe_red if ohi.deprecation_active else self.maybe_yellow
            lines.append(maybe_colorize(f"{indent}{ohi.deprecated_message}"))
            if ohi.removal_hint:
                lines.append(maybe_colorize(f"{indent}{ohi.removal_hint}"))
        return lines
Exemple #7
0
    def _compute_value(self, dest, kwargs, flag_val_strs):
        """Compute the value to use for an option.

        The source of the default value is chosen according to the ranking in RankedValue.
        """

        # Helper function to convert a string to a value of the option's type.
        def to_value_type(val_str):
            if val_str is None:
                return None
            if kwargs.get("type") == bool:
                return self._ensure_bool(val_str)
            type_arg = kwargs.get("type", str)
            try:
                return self._wrap_type(type_arg)(val_str)
            except (TypeError, ValueError) as e:
                raise ParseError(
                    f"Error applying type '{type_arg.__name__}' to option value '{val_str}', for option "
                    f"'--{dest}' in {self._scope_str()}: {e}")

        # Helper function to expand a fromfile=True value string, if needed.
        # May return a string or a dict/list decoded from a json/yaml file.
        def expand(val_or_str):
            if (kwargs.get("fromfile", True) and isinstance(val_or_str, str)
                    and val_or_str.startswith("@")):
                if val_or_str.startswith(
                        "@@"
                ):  # Support a literal @ for fromfile values via @@.
                    return val_or_str[1:]
                else:
                    fromfile = val_or_str[1:]
                    try:
                        with open(fromfile, "r") as fp:
                            s = fp.read().strip()
                            if fromfile.endswith(".json"):
                                return json.loads(s)
                            elif fromfile.endswith(
                                    ".yml") or fromfile.endswith(".yaml"):
                                return yaml.safe_load(s)
                            else:
                                return s
                    except (IOError, ValueError, yaml.YAMLError) as e:
                        raise FromfileError(
                            f"Failed to read {dest} in {self._scope_str()} from file {fromfile}: {e!r}"
                        )
            else:
                return val_or_str

        # Get value from config files, and capture details about its derivation.
        config_details = None
        config_section = GLOBAL_SCOPE_CONFIG_SECTION if self._scope == GLOBAL_SCOPE else self._scope
        config_default_val_or_str = expand(
            self._config.get(Config.DEFAULT_SECTION, dest, default=None))
        config_val_or_str = expand(
            self._config.get(config_section, dest, default=None))
        config_source_file = self._config.get_source_for_option(
            config_section, dest) or self._config.get_source_for_option(
                Config.DEFAULT_SECTION, dest)
        if config_source_file is not None:
            config_source_file = os.path.relpath(config_source_file)
            config_details = f"in {config_source_file}"

        # Get value from environment, and capture details about its derivation.
        udest = dest.upper()
        if self._scope == GLOBAL_SCOPE:
            # For convenience, we allow three forms of env var for global scope options.
            # The fully-specified env var is PANTS_GLOBAL_FOO, which is uniform with PANTS_<SCOPE>_FOO
            # for all the other scopes.  However we also allow simply PANTS_FOO. And if the option name
            # itself starts with 'pants-' then we also allow simply FOO. E.g., PANTS_WORKDIR instead of
            # PANTS_PANTS_WORKDIR or PANTS_GLOBAL_PANTS_WORKDIR. We take the first specified value we
            # find, in this order: PANTS_GLOBAL_FOO, PANTS_FOO, FOO.
            env_vars = [f"PANTS_GLOBAL_{udest}", f"PANTS_{udest}"]
            if udest.startswith("PANTS_"):
                env_vars.append(udest)
        else:
            sanitized_env_var_scope = self._ENV_SANITIZER_RE.sub(
                "_", self._scope.upper())
            env_vars = [f"PANTS_{sanitized_env_var_scope}_{udest}"]

        env_val_or_str = None
        env_details = None
        if self._env:
            for env_var in env_vars:
                if env_var in self._env:
                    env_val_or_str = expand(self._env.get(env_var))
                    env_details = f"from env var {env_var}"
                    break

        # Get value from cmd-line flags.
        flag_vals = [to_value_type(expand(x)) for x in flag_val_strs]
        if is_list_option(kwargs):
            # Note: It's important to set flag_val to None if no flags were specified, so we can
            # distinguish between no flags set vs. explicit setting of the value to [].
            flag_val = ListValueComponent.merge(
                flag_vals) if flag_vals else None
        elif is_dict_option(kwargs):
            # Note: It's important to set flag_val to None if no flags were specified, so we can
            # distinguish between no flags set vs. explicit setting of the value to {}.
            flag_val = DictValueComponent.merge(
                flag_vals) if flag_vals else None
        elif len(flag_vals) > 1:
            raise ParseError(
                "Multiple cmd line flags specified for option {} in {}".format(
                    dest, self._scope_str()))
        elif len(flag_vals) == 1:
            flag_val = flag_vals[0]
        else:
            flag_val = None

        # Rank all available values.
        # Note that some of these values may already be of the value type, but type conversion
        # is idempotent, so this is OK.

        values_to_rank = [
            to_value_type(x) for x in [
                flag_val,
                env_val_or_str,
                config_val_or_str,
                config_default_val_or_str,
                kwargs.get("default"),
                None,
            ]
        ]
        # Note that ranked_vals will always have at least one element, and all elements will be
        # instances of RankedValue (so none will be None, although they may wrap a None value).
        ranked_vals = list(
            reversed(list(RankedValue.prioritized_iter(*values_to_rank))))

        def record_option(value, rank, option_details=None):
            deprecation_version = kwargs.get("removal_version")
            self._option_tracker.record_option(
                scope=self._scope,
                option=dest,
                value=value,
                rank=rank,
                deprecation_version=deprecation_version,
                details=option_details,
            )

        # Record info about the derivation of each of the contributing values.
        detail_history = []
        for ranked_val in ranked_vals:
            if ranked_val.rank in (RankedValue.CONFIG,
                                   RankedValue.CONFIG_DEFAULT):
                details = config_details
            elif ranked_val.rank == RankedValue.ENVIRONMENT:
                details = env_details
            else:
                details = None
            if details:
                detail_history.append(details)
            record_option(value=ranked_val.value,
                          rank=ranked_val.rank,
                          option_details=details)

        # Helper function to check various validity constraints on final option values.
        def check(val):
            if val is None:
                return
            choices = kwargs.get("choices")
            type_arg = kwargs.get("type")
            if choices is None and "type" in kwargs:
                if inspect.isclass(type_arg) and issubclass(type_arg, Enum):
                    choices = list(type_arg)
            # TODO: convert this into an enum() pattern match!
            if choices is not None and val not in choices:
                raise ParseError(
                    "`{}` is not an allowed value for option {} in {}. "
                    "Must be one of: {}".format(val, dest, self._scope_str(),
                                                choices))

            if type_arg == file_option:
                check_file_exists(val)
            if type_arg == dir_option:
                check_dir_exists(val)

        def check_file_exists(val) -> None:
            error_prefix = f"File value `{val}` for option `{dest}` in `{self._scope_str()}`"
            try:
                path = Path(val)
                path_with_buildroot = Path(get_buildroot(), val)
            except TypeError:
                raise ParseError(
                    f"{error_prefix} cannot be parsed as a file path.")
            if not path.is_file() and not path_with_buildroot.is_file():
                raise ParseError(f"{error_prefix} does not exist.")

        def check_dir_exists(val) -> None:
            error_prefix = f"Directory value `{val}` for option `{dest}` in `{self._scope_str()}`"
            try:
                path = Path(val)
                path_with_buildroot = Path(get_buildroot(), val)
            except TypeError:
                raise ParseError(
                    f"{error_prefix} cannot be parsed as a directory path.")
            if not path.is_dir() and not path_with_buildroot.is_dir():
                raise ParseError(f"{error_prefix} does not exist.")

        # Generate the final value from all available values, and check that it (or its members,
        # if a list) are in the set of allowed choices.
        if is_list_option(kwargs):
            merged_rank = ranked_vals[-1].rank
            merged_val = ListValueComponent.merge(
                [rv.value for rv in ranked_vals if rv.value is not None]).val
            # TODO: run `check()` for all elements of a list option too!!!
            merged_val = [
                self._convert_member_type(kwargs.get("member_type", str), x)
                for x in merged_val
            ]
            if kwargs.get("member_type") == shell_str:
                merged_val = flatten_shlexed_list(merged_val)
            for val in merged_val:
                check(val)
            ret = RankedValue(merged_rank, merged_val)
        elif is_dict_option(kwargs):
            # TODO: convert `member_type` for dict values too!
            merged_rank = ranked_vals[-1].rank
            merged_val = DictValueComponent.merge(
                [rv.value for rv in ranked_vals if rv.value is not None]).val
            for val in merged_val:
                check(val)
            ret = RankedValue(merged_rank, merged_val)
        else:
            ret = ranked_vals[-1]
            check(ret.value)

        # Record info about the derivation of the final value.
        merged_details = ", ".join(detail_history) if detail_history else None
        record_option(value=ret.value,
                      rank=ret.rank,
                      option_details=merged_details)

        # All done!
        return ret
Exemple #8
0
    def execute(self):
        requested_compiler = JvmPlatform.global_instance().get_options(
        ).compiler
        if requested_compiler != self.compiler_name:
            return
        deprecated_conditional(
            lambda: requested_compiler == self.Compiler.ZINC,
            removal_version='1.20.0.dev0',
            entity_description='Requested a deprecated compiler: [{}].'.format(
                requested_compiler),
            hint_message='Compiler will be defaulted to [{}].'.format(
                self.compiler_name))

        if requested_compiler == self.Compiler.ZINC and self.compiler_name == self.Compiler.RSC:
            # Issue a deprecation warning (above) and rewrite zinc to rsc, as zinc is being deprecated.
            JvmPlatform.global_instance().get_options().compiler = RankedValue(
                0, self.compiler_name)
        elif requested_compiler != self.compiler_name:
            # If the requested compiler is not the one supported by this task, log and abort
            self.context.log.debug(
                'Requested an unsupported compiler [{}], aborting'.format(
                    requested_compiler))
            return

        # In case we have no relevant targets and return early, create the requested product maps.
        self.create_empty_extra_products()

        relevant_targets = list(self.context.targets(predicate=self.select))

        if not relevant_targets:
            return

        # Clone the compile_classpath to the runtime_classpath.
        classpath_product = self.create_runtime_classpath()

        fingerprint_strategy = DependencyContext.global_instance(
        ).create_fingerprint_strategy(classpath_product)
        # Note, JVM targets are validated (`vts.update()`) as they succeed.  As a result,
        # we begin writing artifacts out to the cache immediately instead of waiting for
        # all targets to finish.
        with self.invalidated(relevant_targets,
                              invalidate_dependents=True,
                              fingerprint_strategy=fingerprint_strategy,
                              topological_order=True) as invalidation_check:

            compile_contexts = {
                vt.target: self.create_compile_context(vt.target,
                                                       vt.results_dir)
                for vt in invalidation_check.all_vts
            }

            self.do_compile(
                invalidation_check,
                compile_contexts,
                classpath_product,
            )

            if not self.get_options().use_classpath_jars:
                # Once compilation has completed, replace the classpath entry for each target with
                # its jar'd representation.
                for ccs in compile_contexts.values():
                    cc = self.select_runtime_context(ccs)
                    for conf in self._confs:
                        classpath_product.remove_for_target(
                            cc.target, [(conf, cc.classes_dir)])
                        classpath_product.add_for_target(
                            cc.target, [(conf, cc.jar_file)])
Exemple #9
0
  def _test_outlining_dep_for_scala_java_and_test_targets(self, youtline):
    if youtline:
      workflow = RscCompile.JvmCompileWorkflowType.outline_and_zinc
      key_str = "outline"
    else:
      workflow = RscCompile.JvmCompileWorkflowType.rsc_and_zinc
      key_str = "rsc"

    self.set_options(workflow=RankedValue(
      value=workflow,
      rank=RankedValue.CONFIG,
    ))
    self.init_dependencies_for_scala_libraries()

    scala_dep = self.make_target(
      'scala/classpath:scala_dep',
      target_type=ScalaLibrary,
      sources=['com/example/Bar.scala']
    )
    java_target = self.make_target(
      'java/classpath:java_lib',
      target_type=JavaLibrary,
      sources=['com/example/Foo.java'],
      dependencies=[scala_dep],
      tags={'use-compiler:zinc-only'}
    )
    scala_target = self.make_target(
      'scala/classpath:scala_lib',
      target_type=ScalaLibrary,
      sources=['com/example/Foo.scala'],
      dependencies=[scala_dep]
    )

    test_target = self.make_target(
      'scala/classpath:scala_test',
      target_type=JUnitTests,
      sources=['com/example/Test.scala'],
      dependencies=[scala_target],
      tags={'use-compiler:zinc-only'}
    )

    with temporary_dir(root_dir=self.build_root) as tmp_dir:
      invalid_targets = [java_target, scala_target, scala_dep, test_target]
      task = self.create_task_with_target_roots(
        target_roots=[java_target, scala_target, test_target]
      )

      jobs = task._create_compile_jobs(
        compile_contexts=self.create_compile_contexts(invalid_targets, task, tmp_dir),
        invalid_targets=invalid_targets,
        invalid_vts=self.wrap_in_vts(invalid_targets),
        classpath_product=None)

      dependee_graph = self.construct_dependee_graph_str(jobs, task)

      self.maxDiff = None
      # Double curly braces {{}} because f-string
      self.assertEqual(dedent(f"""
                     double_check_cache(java/classpath:java_lib) <- {{
                       zinc[zinc-java](java/classpath:java_lib)
                     }}
                     zinc[zinc-java](java/classpath:java_lib) <- {{
                       write_to_cache(java/classpath:java_lib)
                     }}
                     write_to_cache(java/classpath:java_lib) <- {{}}
                     double_check_cache(scala/classpath:scala_lib) <- {{
                       {key_str}(scala/classpath:scala_lib),
                       zinc[{key_str}-and-zinc](scala/classpath:scala_lib)
                     }}
                     {key_str}(scala/classpath:scala_lib) <- {{
                       write_to_cache(scala/classpath:scala_lib),
                       double_check_cache(scala/classpath:scala_test),
                       zinc[zinc-only](scala/classpath:scala_test)
                     }}
                     zinc[{key_str}-and-zinc](scala/classpath:scala_lib) <- {{
                       write_to_cache(scala/classpath:scala_lib)
                     }}
                     write_to_cache(scala/classpath:scala_lib) <- {{}}
                     double_check_cache(scala/classpath:scala_dep) <- {{
                       {key_str}(scala/classpath:scala_dep),
                       zinc[{key_str}-and-zinc](scala/classpath:scala_dep)
                     }}
                     {key_str}(scala/classpath:scala_dep) <- {{
                       double_check_cache(scala/classpath:scala_lib),
                       {key_str}(scala/classpath:scala_lib),
                       zinc[{key_str}-and-zinc](scala/classpath:scala_lib),
                       write_to_cache(scala/classpath:scala_dep),
                       double_check_cache(scala/classpath:scala_test),
                       zinc[zinc-only](scala/classpath:scala_test)
                     }}
                     zinc[{key_str}-and-zinc](scala/classpath:scala_dep) <- {{
                       double_check_cache(java/classpath:java_lib),
                       zinc[zinc-java](java/classpath:java_lib),
                       write_to_cache(scala/classpath:scala_dep)
                     }}
                     write_to_cache(scala/classpath:scala_dep) <- {{}}
                     double_check_cache(scala/classpath:scala_test) <- {{
                       zinc[zinc-only](scala/classpath:scala_test)
                     }}
                     zinc[zinc-only](scala/classpath:scala_test) <- {{
                       write_to_cache(scala/classpath:scala_test)
                     }}
                     write_to_cache(scala/classpath:scala_test) <- {{}}
                     """).strip(),
        dependee_graph)
Exemple #10
0
    def _compute_value(self, dest, kwargs, flag_val_strs):
        """Compute the value to use for an option.

    The source of the default value is chosen according to the ranking in RankedValue.
    """

        # Helper function to convert a string to a value of the option's type.
        def to_value_type(val_str):
            if val_str is None:
                return None
            elif kwargs.get('type') == bool:
                return self._ensure_bool(val_str)
            else:
                type_arg = kwargs.get('type', str)
                try:
                    return self._wrap_type(type_arg)(val_str)
                except TypeError as e:
                    raise ParseError(
                        "Error applying type '{}' to option value '{}', for option '--{}' in {}: {}"
                        .format(type_arg.__name__, val_str, dest,
                                self._scope_str(), e))

        # Helper function to expand a fromfile=True value string, if needed.
        # May return a string or a dict/list decoded from a json/yaml file.
        def expand(val_or_str):
            if (kwargs.get('fromfile', True) and val_or_str
                    and isinstance(val_or_str, str)
                    and val_or_str.startswith('@')):
                if val_or_str.startswith(
                        '@@'
                ):  # Support a literal @ for fromfile values via @@.
                    return val_or_str[1:]
                else:
                    fromfile = val_or_str[1:]
                    try:
                        with open(fromfile, 'r') as fp:
                            s = fp.read().strip()
                            if fromfile.endswith('.json'):
                                return json.loads(s)
                            elif fromfile.endswith(
                                    '.yml') or fromfile.endswith('.yaml'):
                                return yaml.safe_load(s)
                            else:
                                return s
                    except (IOError, ValueError, yaml.YAMLError) as e:
                        raise self.FromfileError(
                            'Failed to read {} in {} from file {}: {}'.format(
                                dest, self._scope_str(), fromfile, e))
            else:
                return val_or_str

        # Get value from config files, and capture details about its derivation.
        config_details = None
        config_section = GLOBAL_SCOPE_CONFIG_SECTION if self._scope == GLOBAL_SCOPE else self._scope
        config_default_val_or_str = expand(
            self._config.get(Config.DEFAULT_SECTION, dest, default=None))
        config_val_or_str = expand(
            self._config.get(config_section, dest, default=None))
        config_source_file = (self._config.get_source_for_option(
            config_section, dest) or self._config.get_source_for_option(
                Config.DEFAULT_SECTION, dest))
        if config_source_file is not None:
            config_source_file = os.path.relpath(config_source_file)
            config_details = 'in {}'.format(config_source_file)

        # Get value from environment, and capture details about its derivation.
        udest = dest.upper()
        if self._scope == GLOBAL_SCOPE:
            # For convenience, we allow three forms of env var for global scope options.
            # The fully-specified env var is PANTS_GLOBAL_FOO, which is uniform with PANTS_<SCOPE>_FOO
            # for all the other scopes.  However we also allow simply PANTS_FOO. And if the option name
            # itself starts with 'pants-' then we also allow simply FOO. E.g., PANTS_WORKDIR instead of
            # PANTS_PANTS_WORKDIR or PANTS_GLOBAL_PANTS_WORKDIR. We take the first specified value we
            # find, in this order: PANTS_GLOBAL_FOO, PANTS_FOO, FOO.
            env_vars = [
                'PANTS_GLOBAL_{0}'.format(udest), 'PANTS_{0}'.format(udest)
            ]
            if udest.startswith('PANTS_'):
                env_vars.append(udest)
        else:
            sanitized_env_var_scope = self._ENV_SANITIZER_RE.sub(
                '_', self._scope.upper())
            env_vars = ['PANTS_{0}_{1}'.format(sanitized_env_var_scope, udest)]

        env_val_or_str = None
        env_details = None
        if self._env:
            for env_var in env_vars:
                if env_var in self._env:
                    env_val_or_str = expand(self._env.get(env_var))
                    env_details = 'from env var {}'.format(env_var)
                    break

        # Get value from cmd-line flags.
        flag_vals = [to_value_type(expand(x)) for x in flag_val_strs]
        if is_list_option(kwargs):
            # Note: It's important to set flag_val to None if no flags were specified, so we can
            # distinguish between no flags set vs. explicit setting of the value to [].
            flag_val = ListValueComponent.merge(
                flag_vals) if flag_vals else None
        elif is_dict_option(kwargs):
            # Note: It's important to set flag_val to None if no flags were specified, so we can
            # distinguish between no flags set vs. explicit setting of the value to {}.
            flag_val = DictValueComponent.merge(
                flag_vals) if flag_vals else None
        elif len(flag_vals) > 1:
            raise ParseError(
                'Multiple cmd line flags specified for option {} in {}'.format(
                    dest, self._scope_str()))
        elif len(flag_vals) == 1:
            flag_val = flag_vals[0]
        else:
            flag_val = None

        # Rank all available values.
        # Note that some of these values may already be of the value type, but type conversion
        # is idempotent, so this is OK.

        values_to_rank = [
            to_value_type(x) for x in [
                flag_val, env_val_or_str, config_val_or_str,
                config_default_val_or_str,
                kwargs.get('default'), None
            ]
        ]
        # Note that ranked_vals will always have at least one element, and all elements will be
        # instances of RankedValue (so none will be None, although they may wrap a None value).
        ranked_vals = list(
            reversed(list(RankedValue.prioritized_iter(*values_to_rank))))

        def record_option(value, rank, option_details=None):
            deprecation_version = kwargs.get('removal_version')
            self._option_tracker.record_option(
                scope=self._scope,
                option=dest,
                value=value,
                rank=rank,
                deprecation_version=deprecation_version,
                details=option_details)

        # Record info about the derivation of each of the contributing values.
        detail_history = []
        for ranked_val in ranked_vals:
            if ranked_val.rank in (RankedValue.CONFIG,
                                   RankedValue.CONFIG_DEFAULT):
                details = config_details
            elif ranked_val.rank == RankedValue.ENVIRONMENT:
                details = env_details
            else:
                details = None
            if details:
                detail_history.append(details)
            record_option(value=ranked_val.value,
                          rank=ranked_val.rank,
                          option_details=details)

        # Helper function to check various validity constraints on final option values.
        def check(val):
            if val is not None:
                choices = kwargs.get('choices')
                # If the `type` argument has an `all_variants` attribute, use that as `choices` if not
                # already set. Using an attribute instead of checking a subclass allows `type` arguments
                # which are functions to have an implicit fallback `choices` set as well.
                if choices is None and 'type' in kwargs:
                    type_arg = kwargs.get('type')
                    if hasattr(type_arg, 'all_variants'):
                        choices = list(type_arg.all_variants)
                # TODO: convert this into an enum() pattern match!
                if choices is not None and val not in choices:
                    raise ParseError(
                        '`{}` is not an allowed value for option {} in {}. '
                        'Must be one of: {}'.format(val, dest,
                                                    self._scope_str(),
                                                    choices))
                elif kwargs.get(
                        'type') == dir_option and not os.path.isdir(val):
                    raise ParseError(
                        'Directory value `{}` for option {} in {} does not exist.'
                        .format(val, dest, self._scope_str()))
                elif kwargs.get(
                        'type') == file_option and not os.path.isfile(val):
                    raise ParseError(
                        'File value `{}` for option {} in {} does not exist.'.
                        format(val, dest, self._scope_str()))

        # Generate the final value from all available values, and check that it (or its members,
        # if a list) are in the set of allowed choices.
        if is_list_option(kwargs):
            merged_rank = ranked_vals[-1].rank
            merged_val = ListValueComponent.merge(
                [rv.value for rv in ranked_vals if rv.value is not None]).val
            # TODO: run `check()` for all elements of a list option too!!!
            merged_val = [
                self._convert_member_type(kwargs.get('member_type', str), x)
                for x in merged_val
            ]
            for val in merged_val:
                check(val)
            ret = RankedValue(merged_rank, merged_val)
        elif is_dict_option(kwargs):
            # TODO: convert `member_type` for dict values too!
            merged_rank = ranked_vals[-1].rank
            merged_val = DictValueComponent.merge(
                [rv.value for rv in ranked_vals if rv.value is not None]).val
            for val in merged_val:
                check(val)
            ret = RankedValue(merged_rank, merged_val)
        else:
            ret = ranked_vals[-1]
            check(ret.value)

        # Record info about the derivation of the final value.
        merged_details = ', '.join(detail_history) if detail_history else None
        record_option(value=ret.value,
                      rank=ret.rank,
                      option_details=merged_details)

        # All done!
        return ret
Exemple #11
0
 def do_test(expected_default, **kwargs):
     kwargs["default"] = RankedValue(Rank.HARDCODED, kwargs["default"])
     assert expected_default == HelpInfoExtracter.compute_default(**kwargs)
Exemple #12
0
    def setUp(self):
        super().setUp()

        # We're tied tightly to pex implementation details here faking out a python binary that outputs
        # only one value no matter what arguments, environment or input stream it has attached. That
        # value is the interpreter identity which is a JSON dict with exactly the following keys:
        # binary, python_tag, abi_tag, platform_tag, version, supported_tags, env_markers.
        def fake_interpreter(python_tag: str, abi_tag: str,
                             version: Tuple[int, int, int]):
            interpreter_dir = safe_mkdtemp()
            binary = os.path.join(interpreter_dir, "python")
            values = dict(
                binary=binary,
                python_tag=python_tag,
                abi_tag=abi_tag,
                platform_tag="",
                version=version,
                supported_tags=[],
                env_markers={},
            )
            id_str = json.dumps(values)
            with open(binary, "w") as fp:
                fp.write(
                    dedent(f"""
                        #!{PythonInterpreter.get().binary}
                        from __future__ import print_function

                        print({id_str!r})
                        """).strip())
            chmod_plus_x(binary)
            return PythonInterpreter.from_binary(binary)

        # impl, abi, impl_version, major, minor, patch
        self.fake_interpreters = [
            fake_interpreter(python_tag="ip",
                             abi_tag="ip2",
                             version=(2, 77, 777)),
            fake_interpreter(python_tag="ip",
                             abi_tag="ip2",
                             version=(2, 88, 888)),
            fake_interpreter(python_tag="ip",
                             abi_tag="ip2",
                             version=(2, 99, 999)),
        ]

        self.set_options_for_scope(
            PythonSetup.options_scope,
            interpreter_constraints=RankedValue(Rank.CONFIG,
                                                ["IronPython>=2.55"]),
            interpreter_search_paths=[
                interpreter.binary for interpreter in self.fake_interpreters
            ],
        )

        self.reqtgt = self.make_target(
            spec="req",
            target_type=PythonRequirementLibrary,
            requirements=[],
        )
        self.tgt1 = self._fake_target("tgt1")
        self.tgt2 = self._fake_target("tgt2",
                                      compatibility=["IronPython>2.77.777"])
        self.tgt3 = self._fake_target("tgt3",
                                      compatibility=["IronPython>2.88.888"])
        self.tgt4 = self._fake_target("tgt4",
                                      compatibility=["IronPython<2.99.999"])
        self.tgt20 = self._fake_target("tgt20", dependencies=[self.tgt2])
        self.tgt30 = self._fake_target("tgt30", dependencies=[self.tgt3])
        self.tgt40 = self._fake_target("tgt40", dependencies=[self.tgt4])
Exemple #13
0
def test_is_python2(constraints, compatibilities):
  Subsystem.reset()
  init_subsystem(PythonSetup, {PythonSetup.options_scope: {
    'interpreter_constraints': RankedValue(RankedValue.CONFIG, constraints)}})
  assert is_python2(compatibilities, PythonSetup.global_instance())