Пример #1
0
    def __init__(self, evaluator: IEvaluator, fixed_fields_front: List[str],
                 fixed_fields_back: List[str]):

        self.fsm = FilesystemConfig()

        self.summary_file = path.join(
            self.fsm.get_result_dir(),
            evaluator.get_eval_id() + "_" + ResultAnalyzer.RESULTS_SUMMARY)
        print(self.summary_file)

        self.evaluator = evaluator

        # caching
        self.subtasks = evaluator.get_subtask_ids_ordered()
        self.interpretations = evaluator.get_subtask_interpretation()
        self.required_subtasks = [
            subtask for subtask in self.subtasks
            if self.interpretations[subtask] == IEvaluator.REQUIRED
        ]
        self.assumed_subtasks = [
            subtask for subtask in self.subtasks
            if self.interpretations[subtask] == IEvaluator.ASSUMPTION
        ]
        self.dontcare_subtasks = [
            subtask for subtask in self.subtasks
            if self.interpretations[subtask] == IEvaluator.DONTCARE
        ]

        self.fixed_fields_front = fixed_fields_front
        self.fixed_fields_back = fixed_fields_back
        self.ordered_fieldnames = self.fixed_fields_front + self.subtasks + self.fixed_fields_back
Пример #2
0
    def __init__(self, group=None, target=None, name: str = "DeviceProcess", args=(), kwargs={},  # process args
                 control_channel=None, known_subtasks: List[str] = list(),  # reporter specific args
                 analyzer: IResultAnalyzer = None, eval_name: str = '<Unknown Eval>'):

        # cache
        fsc = FilesystemConfig()
        self.reports_dir = fsc.get_report_dir()
        self.results_dir = fsc.get_result_dir()

        # this worker generates its own queue
        queue = Queue(ReportWriter.queue_capacity)
        super(ReportWriter, self).__init__(group, target, name, args, kwargs, control_channel, queue, None,
                                           "report_writer")

        self.known_subtasks = known_subtasks
        self.eval = eval_name
        if not analyzer:
            raise AssertionError('No analyzer provided.')
        self.analyzer = analyzer
        # unless this is set, the reporter keeps running even though the queue is currently empty
        self.exit_after_empty_queue = False

        # csv
        self.csv_keys = [ReportWriter.KEY_PKG, ReportWriter.KEY_CATS] + self.known_subtasks + [ReportWriter.KEY_SUCC] \
                        + [ReportWriter.KEY_WORKER] + [ReportWriter.KEY_TIMESTAMP]
        with open(self.get_summary_path(), 'a+') as csv_summary:
            csv_summary.write(('#' * 3) + ' ' + str(datetime.now().isoformat()) + '\n')  # log date
            header_writer = DictWriter(csv_summary, self.csv_keys, delimiter=';', quotechar='"')
            header_writer.writeheader()

        # values for statistics
        tested, outs, failures, successes = self.analyzer.get_all()
        self.tested = len(tested)
        self.outs = len(outs)
        self.fails = len(failures)
        self.successes = len(successes)
Пример #3
0
def main() -> None:

    parser = create_parser()
    args = parser.parse_args()

    evaluation = args.evaluation
    task = args.task
    out_overwrite = args.out_folder

    print('eval: ' + evaluation)
    print('task: ' + task)
    print('out overwrite: ' + str(out_overwrite))

    fixed_fields_front = [ReportWriter.KEY_PKG, ReportWriter.KEY_CATS]
    fixed_fields_back = [
        ReportWriter.KEY_SUCC, ReportWriter.KEY_WORKER,
        ReportWriter.KEY_TIMESTAMP
    ]

    if evaluation not in Evaluations.MAP.keys():
        print('Evaluation not found: ' + evaluation)
        exit(-1)
    evaluator = Evaluations.MAP[evaluation]

    # setup singleton
    fsm_args = dict()
    if out_overwrite is not None:
        fsm_args['out'] = out_overwrite
    FilesystemConfig(**fsm_args)

    analyzer = evaluator.get_analyzer(fixed_fields_front=fixed_fields_front,
                                      fixed_fields_back=fixed_fields_back)

    api = analyzer.get_command_api()
    for command, method in api.items():
        if command == task:
            method()
            exit(0)
    print('Command ' + task + ' not found for evaluation ' + evaluator.EVAL_ID)
Пример #4
0
def read_apps(packages_file: str) -> Tuple[Dict[str, List[str]], int]:
    """
    Helper methods to read package names from list files.
    :param packages_file: the name of the list file. Search path is the current list directory. 
    :return: a tuple of the dictionary mapping from package names to categories, and the amount of unique apps
    """

    # realtive to search dir
    lists_dir = FilesystemConfig().get_lists_dir()
    packages_path = join(lists_dir, packages_file)

    # mapping: app -> list(category)
    app_dictionary = dict()
    # mock category for apps without a real category
    current_category = "<NO_CATEGORY>"
    # number of unique apps
    unique_count = 0
    with open(packages_path) as file:
        for line in file:
            stripped = str(line).strip()
            # drop lines that only contain whitespace and/or line breaks
            if not stripped:
                continue
            if stripped.startswith('~'):
                continue
            if stripped.startswith('#'):
                current_category = stripped.replace('#', '').strip()
            else:
                package_name = stripped

                # app not encountered yet
                if not package_name in app_dictionary.keys():
                    unique_count += 1
                    app_dictionary[package_name] = list()

                app_dictionary[package_name].append(current_category)
    return app_dictionary, unique_count
Пример #5
0
def main() -> None:
    # parsing general arguments
    parser = create_parser()
    args = parser.parse_args()

    evaluation_name = args.evaluation
    apk = args.apk_folder
    out = args.out_folder
    tmp = args.tmp_folder
    lists = args.list_folder

    # initialize singleton
    fsm_args = dict()
    if apk is not None:
        fsm_args['apk'] = apk
    if out is not None:
        fsm_args['out'] = out
    if tmp is not None:
        fsm_args['tmp'] = tmp
    if lists is not None:
        fsm_args['lists'] = lists
    fsm = FilesystemConfig(**fsm_args)

    # set dir values to the updated value
    # apk_dir = fsm.get_apk_dir()
    out_dir = fsm.get_out_dir()
    # tmp_dir = fsm.get_tmp_dir()
    report_dir = fsm.get_report_dir()
    result_dir = fsm.get_result_dir()
    # lists = fsm.get_lists_dir()

    # finding the right evaluation

    if len(argv) < 2:
        print('Did not provide evaluation name.')
        exit(-1)

    # ~ strategy pattern
    try:
        evaluator = Evaluations.MAP[evaluation_name]
        if evaluator is None:
            raise KeyError
    except KeyError as no_such_evaluator:
        print('No such evaluator: ' + evaluation_name)
        exit(-1)
        return  # ide workaround

    # initialization (e.g. eval-specific input parsing)
    evaluator.init()

    # preparing the queue

    analyzer = evaluator.get_analyzer(
        [ReportWriter.KEY_PKG, ReportWriter.KEY_CATS], [
            ReportWriter.KEY_SUCC, ReportWriter.KEY_WORKER,
            ReportWriter.KEY_TIMESTAMP
        ])
    tested = [
        app_row[ReportWriter.KEY_PKG] for app_row in analyzer.get_tested()
    ]
    # if unfinished_runs_exist(evaluation_name):
    #     cont = input("Evaluation was finished prematurely the last time. Do you want to proceed? (y/n)").lower()
    #     if cont == 'y' or cont == 'yes':
    #         # restore state if present (do not count from 0)
    #         (tested, succeeded) = read_progress(get_save_path(evaluation_name))
    #     else:
    #         if cont != 'n' and cont != 'no':
    #             print('No valid answer given. Treated as no.')
    #         print('Deleting saved progress.')
    #         remove(get_save_path(evaluation_name))

    skip = []
    if len(tested) > 0:
        sure = False
        while not sure:
            cont = input(
                "Evaluation was finished prematurely the last time. Do you want to proceed? (y/n)"
            ).lower()
            # continue
            if cont == 'y' or cont == 'yes':
                print('Proceeding with evaluation.')
                print('Current state:')
                analyzer.api_summary()
                skip = tested
                sure = True
            # do not continue
            else:
                if cont != 'n' and cont != 'no':
                    print('No valid answer given. Treated as no.')
                print('Do not proceed with evaluation.')

                delete = input(
                    'Are you sure you want to delete all persisted data from the last evaluation? (y/n)'
                ).lower()
                if delete == 'y' or delete == 'yes':
                    # delete data from last evaluation
                    shutil.rmtree(out_dir)
                    sure = True

    # ensure 'out' directories exist
    makedirs(report_dir, exist_ok=True)
    makedirs(result_dir, exist_ok=True)

    tasks = evaluator.create_task_queue(skip)

    # should be reliable since no one touched the queue yet
    # task_num = tasks.qsize()

    # preparing the devices

    devices = shellutils.list_devices()

    if devices is None:
        print('No devices available.')
        exit(0)

    device_workers = list()
    device_worker_connections = dict()

    helper_workers = list()
    helper_worker_connections = dict()

    # preparing the reporter process
    reporter_pipe_worker, reporter_pipe_main = Pipe(True)
    reporter = ReportWriter(name='ReportWriter',
                            control_channel=reporter_pipe_worker,
                            known_subtasks=evaluator.get_subtask_ids_ordered(),
                            analyzer=analyzer,
                            eval_name=evaluation_name)
    report_queue = reporter.get_task_queue()
    helper_workers.append(reporter)
    helper_worker_connections[reporter] = reporter_pipe_main
    reporter.start()

    # try: handle interrupts and errors
    try:
        # preparing and starting the device workers

        # we have an early bail-out in case there are no devices
        # noinspection PyTypeChecker
        for device in devices:
            recv, send = Pipe(False)
            worker = evaluator.create_device_worker(recv, tasks, device,
                                                    report_queue)
            device_workers.append(worker)
            device_worker_connections[worker] = send
            worker.start()
            print('started ' + device)

        # wait for workers to finish
        waited_rounds = 0
        while not tasks.empty():
            waited_rounds += 1
            sleep(5)
            if waited_rounds % 10 == 0:
                print('main process: queue not empty, sleeping')

        # signalling the reporter is should stop the next time its queue gets empty

        # give the workers a chance to finish the last task and send a report before telling the
        # reporter to finish the report queue and exit
        wait_for_workers(device_workers)

        # now signal the reporter to stop after finishing the current queue
        reporter_pipe_main.send(ReportWriter.msg_producers_done)
        # and wait for it to finish
        wait_for_workers([reporter])
        print('Evaluation completed.')

    except KeyboardInterrupt as abort:
        print('Keyboard interrupt.')
    except Exception as e:
        print('Encountered exception ' + str(e))
        # fallthrough to termination

    # depending on whether we aborted or succeeded, device workers might be running or not,
    # so we just terminate all of them
    print('Terminating worker processes that are possibly still running.')

    # merge worker lists and connection dicts
    all_workers = device_workers + helper_workers
    all_connections = device_worker_connections.copy()
    all_connections.update(helper_worker_connections)

    for worker in all_workers:
        if worker.is_alive():
            all_connections[worker].send(DeviceWorker.msg_terminate)
    wait_for_workers(all_workers)

    print('Evaluation finished.')

    analyzer.api_summary()

    # some threads (e.g. daemon threads of queue) need some time to finish before we finish the main process.
    sleep(2)
Пример #6
0
    def instrument(self, app: str) -> bool:
        """
        Trigger on-device recompilation of an application using an installed ARTist version.
        :param app: the app to be instrumented
        :return: success or failure
        """
        activity = self.artist_package + '/.' + self.artist_activity
        self.log('Starting ARTistGUI to instrument ' + app)
        (start_success, start_out) = adb_shell(
            'am start -n ' + activity + ' --es INTENT_EXTRA_PACKAGE ' + app,
            device=self.device_id)

        self.log(start_out)
        if not start_success:
            self.log('Could not start ARTistGUI')
            return False

        # path where ARTistGUI stores the instrumentation result file to
        result_path = self.instrumentation_result_path(app)

        # roughly how long to wait before considering it a fail
        wait_seconds = 1800  # 30 minutes

        instrumentation_success = False

        # busy wait
        self.log('Starting busy wait: checking ' + result_path +
                 ' until results appear or timeout occurs.')
        ls = 'ls ' + result_path
        for i in range(0, wait_seconds):
            # check if file exists
            (exists, ls_out) = adb_shell(ls, device=self.device_id)

            # not yet done
            if (not exists) and 'No such file or directory' in ls_out:
                if i % 10 == 0:
                    self.log(
                        str(i).zfill(4) + ' - ' + app +
                        ' - waiting for instrumentation to finish.')
                sleep(1)
            # unexpected error, should only occur during debugging (wrong permission etc)
            elif not exists:
                self.log('ls command failed. Abort.')
                instrumentation_success = False  # making it explicit
                self.log(ls_out)
                break
            else:
                self.log('Found instrumentation result file')

                tmp_result = path.join(FilesystemConfig().get_tmp_dir(),
                                       'ARTIST_RESULT')
                # (pulled, pulled_out) = shell('adb pull ' + result_path + ' ' + tmp_result)
                (pulled, pulled_out) = adb_pull(result_path,
                                                tmp_result,
                                                device=self.device_id)
                self.log('Pulling instrumentation result file from device ' +
                         ('succeeded' if pulled else 'failed') + ':')
                self.log(pulled_out)

                if not pulled:
                    instrumentation_success = False
                    break

                with open(tmp_result, 'r') as result:
                    result_line = result.readline().strip()
                    self.log('Read "' + result_line + '" from result file.')
                    if result_line == 'true':
                        self.log('Result file: compilation succeeded!')
                        instrumentation_success = True
                    else:
                        self.log('Result file: compilation failed!')
                        instrumentation_success = False

                # cleanup result file but do not abort if it does not work, just log
                (cleanup, cleanup_out) = shell('rm ' + tmp_result)
                self.log('Cleaning the result file ' +
                         ('succeeded' if cleanup else 'failed') + '.')
                self.log(cleanup_out)
                break

        self.log('Stopping ARTistGUI')
        (stop_success,
         stop_log) = adb_shell('am force-stop ' + self.artist_package,
                               device=self.device_id)
        self.log(stop_log)
        if not stop_success:
            self.log('Stopping ARTistGUI failed.')
            # TODO reinstall? what if reinstall fails??

        return instrumentation_success
Пример #7
0
class ResultAnalyzer(IResultAnalyzer):
    RESULTS_SUMMARY = 'summary.csv'

    CMD_SUMMARY = 'summary'
    CMD_SUCC = 'successes'
    CMD_FAILS = 'fails'
    CMD_OUTS = 'outs'
    CMD_CHECK = 'check'

    LOG_TAG = "ResultAnalyzer"

    def __init__(self, evaluator: IEvaluator, fixed_fields_front: List[str],
                 fixed_fields_back: List[str]):

        self.fsm = FilesystemConfig()

        self.summary_file = path.join(
            self.fsm.get_result_dir(),
            evaluator.get_eval_id() + "_" + ResultAnalyzer.RESULTS_SUMMARY)
        print(self.summary_file)

        self.evaluator = evaluator

        # caching
        self.subtasks = evaluator.get_subtask_ids_ordered()
        self.interpretations = evaluator.get_subtask_interpretation()
        self.required_subtasks = [
            subtask for subtask in self.subtasks
            if self.interpretations[subtask] == IEvaluator.REQUIRED
        ]
        self.assumed_subtasks = [
            subtask for subtask in self.subtasks
            if self.interpretations[subtask] == IEvaluator.ASSUMPTION
        ]
        self.dontcare_subtasks = [
            subtask for subtask in self.subtasks
            if self.interpretations[subtask] == IEvaluator.DONTCARE
        ]

        self.fixed_fields_front = fixed_fields_front
        self.fixed_fields_back = fixed_fields_back
        self.ordered_fieldnames = self.fixed_fields_front + self.subtasks + self.fixed_fields_back

    ### interface

    def is_success(self, summary_row: Dict[str, str]) -> bool:
        return self.interpret(summary_row) == IResultAnalyzer.SUCCESS

    def is_out(self, summary_row: Dict[str, str]) -> bool:
        return self.interpret(summary_row) == IResultAnalyzer.OUT

    def is_failure(self, summary_row: Dict[str, str]) -> bool:
        return self.interpret(summary_row) == IResultAnalyzer.FAIL

    def interpret(self, summary_row: Dict[str, str]) -> Union[str, None]:

        for subtask in self.subtasks:

            # sanity check
            if subtask not in self.interpretations.keys():
                self.log('Error! No interpretation available for subtask ' +
                         subtask)
                exit(-1)

            interpretation = self.interpretations[subtask]
            subtask_result = summary_row[subtask]
            if not self.is_app_row(summary_row):
                # invalid
                return None
            subtask_success = (subtask_result == 'True')
            # sanity check
            if not subtask_success and subtask_result != 'False':
                self.log('Unexpected value for subtask ' + subtask + ': ' +
                         subtask_result)
                self.log(str(summary_row))
                exit(-1)

            if interpretation == IEvaluator.DONTCARE:
                continue
            if not subtask_success:
                if interpretation == IEvaluator.REQUIRED:
                    # count as fail
                    return IResultAnalyzer.FAIL
                elif interpretation == IEvaluator.ASSUMPTION:
                    # remove from counting
                    return IResultAnalyzer.OUT
                else:
                    self.log('Error! Unexpected interpretation for subtask ' +
                             subtask + ': ' + str(interpretation))
                    exit(-1)
        # no fails (that we care about) occurred
        return IResultAnalyzer.SUCCESS

    #
    def get_all(
        self
    ) -> Tuple[List[Dict[str, str]], List[Dict[str, str]], List[Dict[
            str, str]], List[Dict[str, str]]]:
        # if this method ever gets too slow because the data is too big: implement in one interation
        tested = self.get_tested()
        outs = self.get_outs(csv_rows=tested)
        fails = self.get_fails(csv_rows=tested)
        successes = self.get_successes(csv_rows=tested)
        return tested, outs, fails, successes

    def get_tested(self) -> List[Dict[str, str]]:
        # filter out non-data rows (comments, headlines, ...)
        return self.find_matching_entries(
            lambda row: self.interpret(row) is not None)

    def get_outs(
        self,
        csv_rows: Union[List[Dict[str, str]], None] = None
    ) -> List[Dict[str, str]]:
        if not csv_rows:
            csv_rows = self.get_app_rows()
        return self.find_matching_entries(lambda x: self.is_out(x),
                                          csv_rows=csv_rows)

    def get_fails(
            self,
            csv_rows: Union[List[Dict[str,
                                      str]]] = None) -> List[Dict[str, str]]:
        if not csv_rows:
            csv_rows = self.get_app_rows()
        return self.find_matching_entries(lambda x: self.is_failure(x),
                                          csv_rows=csv_rows)

    # returns summary rows from apps that succeeded
    def get_successes(
        self,
        csv_rows: Union[List[Dict[str, str]], None] = None
    ) -> List[Dict[str, str]]:
        if not csv_rows:
            csv_rows = self.get_app_rows()
        return self.find_matching_entries(lambda x: self.is_success(x),
                                          csv_rows=csv_rows)

    def get_command_api(self) -> Dict[str, Callable[[], None]]:
        return {
            ResultAnalyzer.CMD_SUMMARY: self.api_summary,
            ResultAnalyzer.CMD_CHECK: self.api_check,
            ResultAnalyzer.CMD_SUCC: self.get_successes,
            ResultAnalyzer.CMD_FAILS: self.api_failures,
            ResultAnalyzer.CMD_OUTS: self.get_outs
        }

    ### API implementation ###

    def api_summary(self) -> None:
        """
        API method to print a summary of the current evaluation results.
        """

        # category -> (tests, outs, fails, successes)
        results = dict()

        # use list since tuples do not support item assignment
        overall = [0, 0, 0, 0]

        for row in self.get_app_rows():
            # app = row[ReportWriter.KEY_PKG]
            categories = row[ReportWriter.KEY_CATS].strip().split(
                ReportWriter.CSV_IN_CELL_SEPARATOR)
            for cat in categories:
                if cat not in results.keys():
                    # use list since tuples do not support item assignment
                    results[cat] = [0, 0, 0, 0]

                # increase number of tests
                results[cat][0] += 1
                overall[0] += 1

                # None not possible since we iterate over checked app rows
                interpretation = self.interpret(row)
                if interpretation == IResultAnalyzer.OUT:
                    results[cat][1] += 1
                    overall[1] += 1
                    continue
                if interpretation == IResultAnalyzer.FAIL:
                    results[cat][2] += 1
                    overall[2] += 1
                    continue
                if interpretation == IResultAnalyzer.SUCCESS:
                    results[cat][3] += 1
                    overall[3] += 1
                    continue
                raise AssertionError('Unknown interpretation: ' +
                                     interpretation)

        for cat in sorted(results.keys()):
            tests, outs, fails, successes = results[cat]
            included = tests - outs
            percentage = (successes / included) * 100 if included > 0 else 0
            self.log('Category ' + cat + ': Tested: ' + str(tests) +
                     ', removed: ' + str(outs) + ', success: ' +
                     str(successes) + '/' + str(included) + ' = ' +
                     str(percentage) + '%')

        overall_tests = overall[0]
        overall_outs = overall[1]
        overall_successes = overall[3]
        overall_included = overall_tests - overall_outs
        overall_percentage = (overall_successes / overall_included
                              ) * 100 if overall_included > 0 else 0
        self.log('Overall: Tested: ' + str(overall_tests) + ', removed: ' +
                 str(overall_outs) + ', success: ' + str(overall_successes) +
                 '/' + str(overall_included) + ' = ' +
                 str(overall_percentage) + '%')

    def api_successes(self, dump: bool = False) -> None:
        """
        API method to display all apps that were successfully tested. 
        """
        successes = self.get_successes()
        if dump:
            for row in successes:
                self.log(row[ReportWriter.KEY_PKG])

        self.log('Found ' + str(len(successes)) + ' successes.')

    def api_failures(self, dump: bool = False) -> None:
        """
        API method to display all apps that were successfully tested. 
        """
        failures = self.get_fails()
        if dump:
            for row in failures:
                self.log(row[ReportWriter.KEY_PKG])

        self.log('Found ' + str(len(failures)) + ' failures.')

    def api_outs(self, dump: bool = False) -> None:
        """
        API method to display all apps that were removed from the evaluation because they failed for reasons beyond our 
        control. 
        """
        outs = self.get_outs()
        if dump:
            for row in outs:
                self.log(row[ReportWriter.KEY_PKG])

        self.log('Found ' + str(len(outs)) + ' outs.')

    # API for command CMD_CHECK
    # check results for inconsistencies
    def api_check(self) -> None:
        """
        API method to check the current evaluation resilts for inconsistencies, such as duplicate app packages.
        """

        row_dict = self.get_app_rows()

        # count occurences of package names
        packages_counter = dict()  # mapping: package -> occurence
        for row in row_dict:

            for fieldname in row.keys():
                # check for unknown fieldname
                if not fieldname in self.fixed_fields_front + self.subtasks + self.fixed_fields_back:
                    self.log('Warning: Found unknown field: ' + fieldname)

            # remove header lines and comments
            if row[ReportWriter.KEY_TIMESTAMP] is None or row[
                    ReportWriter.KEY_TIMESTAMP] == ReportWriter.KEY_TIMESTAMP:
                # self.log('Skipping entry: ' + str(row))
                continue
            package_name = row[ReportWriter.KEY_PKG]
            if package_name in packages_counter.keys():
                packages_counter[package_name] += 1
            else:
                packages_counter[package_name] = 1

        # find and print duplicates
        duplicates = [(package, count)
                      for (package, count) in packages_counter.items()
                      if count > 1]
        if len(duplicates) > 0:
            self.log(str(len(duplicates)) + ' duplicates:')
            for pkg, count in duplicates:
                self.log(pkg + ': ' + str(count))
        else:
            self.log('No duplicates.')

            ### helper methods ###

    def log(self, s: str) -> None:
        print(ResultAnalyzer.LOG_TAG + ": " + str(s))

    def read_csv_dict(self) -> List[Dict[str, str]]:
        """
        Reads a csv into a list of row dictionaries. The keys are the ordered fieldnames (package, subtask, ...)saved 
        in the analyzer. Also contains entries such as header lines and empty lines.
        :return: list of dictionaries mapping from fieldnames to values
        """
        result = []

        if path.isfile(self.summary_file):
            with open(self.summary_file, 'r') as result_csv:
                csv_reader = DictReader(result_csv,
                                        delimiter=';',
                                        fieldnames=self.ordered_fieldnames)
                for row in csv_reader:
                    result.append(row)
        return result

    def find_matching_entries(
        self,
        condition: Callable[[Dict[str, str]], bool],
        csv_rows: Union[List[Dict[str, str]], None] = None
    ) -> List[Dict[str, str]]:
        """
        Scans the provided csv_rows for entries matching the provided condition. 
        :param condition: function mapping rows to booleans
        :param csv_rows: the rows of a read csv result file. Falls back to the current results if no rows are provided.
        :return: list of row dicts meeting the provided condition
        """
        results = []
        if csv_rows is None:
            csv_rows = self.read_csv_dict()

        # noinspection PyTypeChecker
        for row in csv_rows:
            if condition(row):
                results.append(row)
        return results

    # get all app entries from the summary csv, omitting categories and empty lines
    def get_app_rows(self) -> List[Dict[str, str]]:
        """
        Reads all app rows from the summary csv, omitting categories and empty lines.
        :return: list of row dicts
        """
        return self.find_matching_entries(ResultAnalyzer.is_app_row)

    # True if a given row is an app testing result, False otherwise (categories, header lines)
    @staticmethod
    def is_app_row(row: Dict[str, str]) -> bool:
        """
        Checks whether a csv row is an app entry.
        :param row: the csv row dict to check
        :return: true if app row, false if other (e.g., category, empty)
        """
        return row[ReportWriter.KEY_TIMESTAMP] is not None \
               and row[ReportWriter.KEY_TIMESTAMP] != ReportWriter.KEY_TIMESTAMP
Пример #8
0
 def __init__(self):
     self.root = FilesystemConfig().get_apk_dir()