def generate_run_combinations(named_tuple: NamedTupleMeta[T],
                              opts_dict: Dict[str, List[Optional[object]]],
                              loop_count: int = 1) -> Iterable[T]:
    """
  Create all possible combinations given the values in opts_dict[named_tuple._fields].

  :type T: type annotation for the named_tuple type.
  :param named_tuple: named tuple type, whose fields are used to make combinations for
  :param opts_dict: dictionary of keys to value list. keys correspond to the named_tuple fields.
  :param loop_count: number of repetitions.
  :return: an iterable over named_tuple instances.
  """
    combinations_list = []
    for k in named_tuple._fields:
        # the key can be either singular or plural , e.g. 'package' or 'packages'
        val = dict_lookup_any_key(opts_dict, k, k + "s")

        # treat {'x': None} key value pairs as if it was [None]
        # otherwise itertools.product throws an exception about not being able to iterate None.
        combinations_list.append(val or [None])

    print_utils.debug_print("opts_dict: ", opts_dict)
    print_utils.debug_print_nd("named_tuple: ", named_tuple)
    print_utils.debug_print("combinations_list: ", combinations_list)

    for i in range(loop_count):
        for combo in itertools.product(*combinations_list):
            yield named_tuple(*combo)
    def parse_metrics_output(input: str) -> List[Tuple[str, str, str]]:
        """Parses output of app startup to metrics and corresponding values.

    It converts 'a=b\nc=d\ne=f\n...' into '[(a,b,''),(c,d,''),(e,f,'')]'

    Returns:
      A list of tuples that including metric name, metric value and rest info.
    """
        all_metrics = []
        for line in input.split('\n'):
            if not line:
                continue
            splits = line.split('=')
            if len(splits) < 2:
                print_utils.error_print('Bad line "{}"'.format(line))
                continue
            metric_name = splits[0]
            metric_value = splits[1]
            rest = splits[2] if len(splits) > 2 else ''
            if rest:
                print_utils.error_print('Corrupt line "{}"'.format(line))
            print_utils.debug_print('metric: "{metric_name}", '
                                    'value: "{metric_value}" '.format(
                                        metric_name=metric_name,
                                        metric_value=metric_value))

            all_metrics.append((metric_name, metric_value))
        return all_metrics
def find_all_compiler_filters(dexopt_state: DexoptState,
                              package: str) -> List[CompilerFilterInfo]:

    lst = []
    package_tree = find_parse_subtree(dexopt_state,
                                      re.escape("[%s]" % package))

    if not package_tree:
        raise AssertionError(
            "Could not find any package subtree for package %s" % (package))

    print_utils.debug_print("package tree: ", package_tree)

    for path_tree in find_parse_children(package_tree, "path: "):
        print_utils.debug_print("path tree: ", path_tree)

        matchre = re.compile(
            "([^:]+):\s+\[status=([^\]]+)\]\s+\[reason=([^\]]+)\]")

        for isa_node in find_parse_children(path_tree, matchre):

            matches = re.match(matchre, isa_node.label).groups()

            info = CompilerFilterInfo(*matches)
            lst.append(info)

    return lst
def convert_to_dict(trace_file: TraceFile) -> Dict[str, FileEntry]:
  trace_file_index = trace_file.index

  # entries.id -> entry.file_name
  entries_map = {}

  index_entries = trace_file_index.entries
  for entry in index_entries:
    entries_map[entry.id] = entry.file_name

  final_map = {}

  file_entries_map = {}
  file_entries = trace_file.list.entries
  for entry in file_entries:
    print_utils.debug_print(entry)

    lst = file_entries_map.get(entry.index_id, [])
    file_entries_map[entry.index_id] = lst

    file_name = entries_map[entry.index_id]
    file_entry = \
        FileEntry(id=entry.index_id, name=file_name, offset=entry.file_offset, length=entry.file_length)

    lst.append(file_entry)

    final_map[file_name] = lst

  return final_map
Beispiel #5
0
def main():
    global _debug

    opts = parse_options()
    _debug = opts.debug
    if _DEBUG_FORCE is not None:
        _debug = _DEBUG_FORCE

    print_utils.DEBUG = _debug
    cmd_utils.SIMULATE = opts.simulate

    print_utils.debug_print("parsed options: ", opts)

    output_file = opts.output and open(opts.output, 'w') or sys.stdout

    combos = lambda: args_utils.generate_run_combinations(
        RunCommandArgs, coerce_to_list(vars(opts)), opts.loop_count)
    print_utils.debug_print_gen("run combinations: ", combos())

    grouped_combos = lambda: args_utils.generate_group_run_combinations(
        combos(), CollectorPackageInfo)

    print_utils.debug_print_gen("grouped run combinations: ", grouped_combos())
    requires_trace_collection = any(i in _TRACING_READAHEADS
                                    for i in opts.readaheads)
    exec = execute_run_combos(grouped_combos(), opts.simulate, opts.inodes,
                              opts.timeout, opts.compiler_type,
                              requires_trace_collection)

    results = gather_results(exec, _COMBINATORIAL_OPTIONS, combos())

    eval_and_save_to_csv(output_file, results)

    return 1
def dict_lookup_any_key(dictionary: dict, *keys: List[Any]):
    for k in keys:
        if k in dictionary:
            return dictionary[k]

    print_utils.debug_print(
        "None of the keys {} were in the dictionary".format(keys))
    return [None]
def find_first_compiler_filter(dexopt_state: DexoptState, package: str,
                               instruction_set: str) -> str:
    lst = find_all_compiler_filters(dexopt_state, package)

    print_utils.debug_print("all compiler filters: ", lst)

    for compiler_filter_info in lst:
        if not instruction_set:
            return compiler_filter_info

        if compiler_filter_info.isa == instruction_set:
            return compiler_filter_info

    return None
Beispiel #8
0
def build_ri_compiler_argv(inodes_path: str, perfetto_trace_file: str,
                           trace_duration: Optional[timedelta]) -> str:
    argv = ['-i', inodes_path, '--perfetto-trace', perfetto_trace_file]

    if trace_duration is not None:
        argv += [
            '--duration',
            str(
                int(trace_duration.total_seconds() *
                    PerfettoTraceCollector.MS_PER_SEC))
        ]

    print_utils.debug_print(argv)
    return argv
def main(argv: List[str]) -> int:
  opts = parse_options(argv[1:])
  if opts.debug:
    print_utils.DEBUG = opts.debug
  print_utils.debug_print(opts)

  prefetch_file = open_iorap_prefetch_file(opts.input)

  if opts.raw:
    print(prefetch_file)

  print_stats_summary(prefetch_file, opts.upper_percent)

  return 0
Beispiel #10
0
def parse_logcat_datetime(timestamp: str) -> Optional[datetime]:
    """Parses the timestamp of logcat.

  Params:
    timestamp: for example "2019-07-01 16:13:55.221".

  Returns:
    a datetime of timestamp with the year now.
  """
    try:
        # Match the format of logcat. For example: "2019-07-01 16:13:55.221",
        # because it doesn't have year, set current year to it.
        timestamp = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S.%f')
        return timestamp
    except ValueError as ve:
        print_utils.debug_print('Invalid line: ' + timestamp)
        return None
def parse_tab_subtree(label: str,
                      str_lines: List[str],
                      separator=' ',
                      indent=-1) -> ParseResult:
    children = []

    get_indent_level = lambda line: len(line) - len(line.lstrip())

    line_num = 0

    keep_going = True
    while keep_going:
        keep_going = False

        for line_num in range(len(str_lines)):
            line = str_lines[line_num]
            current_indent = get_indent_level(line)

            print_utils.debug_print("INDENT=%d, LINE=%s" %
                                    (current_indent, line))

            current_label = line.lstrip()

            # skip empty lines
            if line.lstrip() == "":
                continue

            if current_indent > indent:
                parse_result = parse_tab_subtree(current_label,
                                                 str_lines[line_num + 1::],
                                                 separator, current_indent)
                str_lines = parse_result.remainder
                children.append(parse_result.tree)
                keep_going = True
            else:
                # current_indent <= indent
                keep_going = False

            break

    new_remainder = str_lines[line_num::]
    print_utils.debug_print("NEW REMAINDER: ", new_remainder)

    parse_tree = ParseTree(label, children)
    return ParseResult(new_remainder, parse_tree)
    def configure_compiler_filter(self) -> bool:
        """Configures compiler filter (e.g. speed).

    Returns:
      A bool indicates whether configure of compiler filer succeeds or not.
    """
        if not self.compiler_filter:
            print_utils.debug_print('No --compiler-filter specified, don\'t'
                                    ' need to force it.')
            return True

        passed, current_compiler_filter_info = \
          cmd_utils.run_shell_command(
              '{} --package {}'.format(os.path.join(AppRunner.APP_STARTUP_DIR,
                                                    'query_compiler_filter.py'),
                                       self.package))

        if passed != 0:
            return passed

        # TODO: call query_compiler_filter directly as a python function instead of
        #  these shell calls.
        current_compiler_filter, current_reason, current_isa = \
          current_compiler_filter_info.split(' ')
        print_utils.debug_print('Compiler Filter={} Reason={} Isa={}'.format(
            current_compiler_filter, current_reason, current_isa))

        # Don't trust reasons that aren't 'unknown' because that means
        #  we didn't manually force the compilation filter.
        # (e.g. if any automatic system-triggered compilations are not unknown).
        if current_reason != 'unknown' or \
            current_compiler_filter != self.compiler_filter:
            passed, _ = adb_utils.run_shell_command(
                '{}/force_compiler_filter '
                '--compiler-filter "{}" '
                '--package "{}"'
                ' --activity "{}'.format(AppRunner.APP_STARTUP_DIR,
                                         self.compiler_filter, self.package,
                                         self.activity))
        else:
            adb_utils.debug_print('Queried compiler-filter matched requested '
                                  'compiler-filter, skip forcing.')
            passed = False
        return passed
    def preprocess(self):
        passed = self.validate_options()
        if not passed:
            return

        # Sets up adb environment.
        adb_utils.root()
        adb_utils.disable_selinux()
        time.sleep(1)

        # Kill any existing process of this app
        adb_utils.pkill(self.app_runner.package)

        if self.readahead != 'warm':
            print_utils.debug_print('Drop caches for non-warm start.')
            # Drop all caches to get cold starts.
            adb_utils.vm_drop_cache()

        if self.readahead != 'warm' and self.readahead != 'cold':
            iorapd_utils.enable_iorapd_readahead()
    def launch_app(self,
                   pre_launch_timestamp: str) -> Optional[List[Tuple[str]]]:
        """ Launches the app.

        Returns:
          A list of (metric, value) tuples.
    """
        print_utils.debug_print('Running with timeout {}'.format(self.timeout))

        passed, am_start_output = cmd_utils.run_shell_command(
            'timeout {timeout} '
            '"{DIR}/launch_application" '
            '"{package}" '
            '"{activity}"'.format(timeout=self.timeout,
                                  DIR=AppRunner.APP_STARTUP_DIR,
                                  package=self.package,
                                  activity=self.activity))
        if not passed and not self.simulate:
            return None

        return self.wait_for_app_finish(pre_launch_timestamp, am_start_output)
Beispiel #15
0
def gather_results(commands: Iterable[Tuple[DataFrame]], key_list: List[str],
                   value_list: List[Tuple[str, ...]]):
    print_utils.debug_print("gather_results: key_list = ", key_list)
    stringify_none = lambda s: s is None and "<none>" or s
    #  yield key_list + ["time(ms)"]
    for (run_result_list,
         values) in itertools.zip_longest(commands, value_list):
        print_utils.debug_print("run_result_list = ", run_result_list)
        print_utils.debug_print("values = ", values)

        if not run_result_list:
            continue

        # RunCommandArgs(package='com.whatever', readahead='warm', compiler_filter=None)
        # -> {'package':['com.whatever'], 'readahead':['warm'], 'compiler_filter':[None]}
        values_dict = {}
        for k, v in values._asdict().items():
            if not k in key_list:
                continue
            values_dict[k] = [stringify_none(v)]

        values_df = DataFrame(values_dict)
        # project 'values_df' to be same number of rows as run_result_list.
        values_df = values_df.repeat(run_result_list.data_row_len)

        # the results are added as right-hand-side columns onto the existing labels for the table.
        values_df.merge_data_columns(run_result_list)

        yield values_df
    def run(self) -> Optional[List[Tuple[str]]]:
        """Runs an app.

    Returns:
      A list of (metric, value) tuples.
    """
        print_utils.debug_print('==========================================')
        print_utils.debug_print('=====             START              =====')
        print_utils.debug_print('==========================================')
        # Run the preprocess.
        for listener in self.listeners:
            listener.preprocess()

        # Ensure the APK is currently compiled with whatever we passed in
        # via --compiler-filter.
        # No-op if this option was not passed in.
        if not self.configure_compiler_filter():
            print_utils.error_print('Compiler filter configuration failed!')
            return None

        pre_launch_timestamp = adb_utils.logcat_save_timestamp()
        # Launch the app.
        results = self.launch_app(pre_launch_timestamp)

        # Run the postprocess.
        for listener in self.listeners:
            listener.postprocess(pre_launch_timestamp)

        return results
def print_stats_summary(trace_file: TraceFile, upper_percent):
  tf_dict = convert_to_dict(trace_file)
  print_utils.debug_print(tf_dict)

  total_length = 0
  summaries = []
  for name, entries_list in tf_dict.items():
    summary = entries_sum(entries_list)
    summaries.append(summary)

    total_length += summary.length

  # Sort by length
  summaries.sort(reverse=True, key=lambda s: s.length)

  percent_sum = 0.0
  skipped_entries = 0

  print("===========================================")
  print("Total length: {:,} bytes".format(total_length))
  print("Displayed upper percent: {:0.2f}%".format(upper_percent))
  print("===========================================")
  print("")
  print("name,length,percent_of_total,upper_percent")
  for sum in summaries:
    percent_of_total = (sum.length * 1.0) / (total_length * 1.0) * 100.0

    percent_sum += percent_of_total

    if percent_sum > upper_percent:
      skipped_entries = skipped_entries + 1
      continue

    #print("%s,%d,%.2f%%" %(sum.name, sum.length, percent_of_total))
    print("{:s},{:d},{:0.2f}%,{:0.2f}%".format(sum.name, sum.length, percent_of_total, percent_sum))

  if skipped_entries > 0:
    print("[WARNING] Skipped {:d} entries, use -up=100 to show everything".format(skipped_entries))

  pass
def main() -> int:
    opts = parse_options()
    cmd_utils._debug = opts.debug
    if _DEBUG_FORCE is not None:
        cmd_utils._debug = _DEBUG_FORCE
    print_utils.debug_print("parsed options: ", opts)

    # Note: This can often 'fail' if the package isn't actually installed.
    package_dumpsys = remote_dumpsys_package(opts.package, opts.simulate)
    print_utils.debug_print("package dumpsys: ", package_dumpsys)
    dumpsys_parse_tree = parse_tab_tree(package_dumpsys, package_dumpsys)
    print_utils.debug_print("parse tree: ", dumpsys_parse_tree)
    dexopt_state = parse_dexopt_state(dumpsys_parse_tree)

    filter = find_first_compiler_filter(dexopt_state, opts.package,
                                        opts.instruction_set)

    if filter:
        print(filter.status, end=' ')
        print(filter.reason, end=' ')
        print(filter.isa)
    else:
        print(
            "ERROR: Could not find any compiler-filter for package %s, isa %s"
            % (opts.package, opts.instruction_set),
            file=sys.stderr)
        return 1

    return 0
Beispiel #19
0
async def _blocking_wait_for_logcat_pattern(
        timestamp: datetime, pattern: Pattern,
        timeout: datetime) -> Optional[str]:
    # Show the year in the timestampe.
    logcat_cmd = 'adb logcat -v UTC -v year -v threadtime -T'.split()
    logcat_cmd.append(str(timestamp))
    print_utils.debug_print('[LOGCAT]:' + ' '.join(logcat_cmd))

    # Create subprocess
    process = await asyncio.create_subprocess_exec(
        *logcat_cmd,
        # stdout must a pipe to be accessible as process.stdout
        stdout=asyncio.subprocess.PIPE)

    while (True):
        # Read one line of output.
        data = await process.stdout.readline()
        line = data.decode('utf-8').rstrip()

        # 2019-07-01 14:54:21.946 27365 27392 I ActivityTaskManager: Displayed
        # com.android.settings/.Settings: +927ms
        # TODO: Detect timeouts even when there is no logcat output.
        if _is_time_out(timeout, line):
            print_utils.debug_print('DID TIMEOUT BEFORE SEEING ANYTHING ('
                                    'timeout={timeout} seconds << {pattern} '
                                    '>>'.format(timeout=timeout,
                                                pattern=pattern))
            return None

        if pattern.match(line):
            print_utils.debug_print(
                'WE DID SEE PATTERN << "{}" >>.'.format(pattern))
            return line
Beispiel #20
0
def execute_run_using_perfetto_trace(
        collector_info, run_combos: Iterable[RunCommandArgs], simulate: bool,
        inodes_path: str, timeout: int, compiler_type: CompilerType,
        requires_trace_collection: bool) -> DataFrame:
    """ Executes run based on perfetto trace. """
    if requires_trace_collection:
        passed, perfetto_trace_file = run_perfetto_collector(
            collector_info, timeout, simulate)
        if not passed:
            raise RuntimeError('Cannot run perfetto collector!')
    else:
        perfetto_trace_file = tempfile.NamedTemporaryFile()

    with perfetto_trace_file:
        for combos in run_combos:
            if combos.readahead in _TRACING_READAHEADS:
                if simulate:
                    compiler_trace_file = tempfile.NamedTemporaryFile()
                else:
                    ri_compiler_argv = build_ri_compiler_argv(
                        inodes_path, perfetto_trace_file.name,
                        combos.trace_duration)
                    compiler_trace_file = compiler.compile(
                        compiler_type, inodes_path, ri_compiler_argv,
                        combos.package, combos.activity)

                with compiler_trace_file:
                    combos = combos._replace(input=compiler_trace_file.name)
                    print_utils.debug_print(combos)
                    output = PrefetchAppRunner(**combos._asdict()).run()
            else:
                print_utils.debug_print(combos)
                output = PrefetchAppRunner(**combos._asdict()).run()

            yield DataFrame(dict(
                (x, [y]) for x, y in output)) if output else None
Beispiel #21
0
    optional_named.add_argument('-a',
                                dest='activity',
                                help='Activity of the app to be compiled')
    optional_named.add_argument('-d',
                                dest='debug',
                                action='store_true',
                                help='Activity of the app to be compiled')

    return parser.parse_args(argv)


def main(inodes, package, activity, output, **kwargs) -> int:
    """Entries of the program."""
    if not activity:
        activity = AppRunner.get_activity(package)

    passed = iorapd_utils.compile_perfetto_trace_on_device(
        package, activity, inodes)
    if passed and output:
        iorapd_utils.get_iorapd_compiler_trace(package, activity, output)

    return 0


if __name__ == '__main__':
    opts = parse_options()
    if opts.debug:
        print_utils.DEBUG = opts.debug
    print_utils.debug_print(opts)
    sys.exit(main(**(vars(opts))))
Beispiel #22
0
 def purge_file(self, suffix: str) -> None:
   print_utils.debug_print('iorapd-perfetto: purge file in ' +
                           self._get_remote_path())
   adb_utils.delete_file_on_device(self._get_remote_path())