Пример #1
0
class LightContext(object):
    def __init__(self, tm):
        self.tm = tm
        self.resolver = ResourceResolver()
        self.resolver.load()

    def get_resource(self, resource, strict=True):
        return self.resolver.get(resource, strict)

    def update_metadata(self, key, *args):
        pass

    get = get_resource
Пример #2
0
class LightContext(object):

    def __init__(self, tm):
        self.tm = tm
        self.resolver = ResourceResolver()
        self.resolver.load()

    def get_resource(self, resource, strict=True):
        return self.resolver.get(resource, strict)

    def update_metadata(self, key, *args):
        pass

    get = get_resource
Пример #3
0
class ExecutionContext(object):
    @property
    def previous_job(self):
        if not self.completed_jobs:
            return None
        return self.completed_jobs[-1]

    @property
    def next_job(self):
        if not self.job_queue:
            return None
        return self.job_queue[0]

    @property
    def spec_changed(self):
        if self.previous_job is None and self.current_job is not None:  # Start of run
            return True
        if self.previous_job is not None and self.current_job is None:  # End of run
            return True
        return self.current_job.spec.id != self.previous_job.spec.id

    @property
    def spec_will_change(self):
        if self.current_job is None and self.next_job is not None:  # Start of run
            return True
        if self.current_job is not None and self.next_job is None:  # End of run
            return True
        return self.current_job.spec.id != self.next_job.spec.id

    @property
    def workload(self):
        if self.current_job:
            return self.current_job.workload

    @property
    def job_output(self):
        if self.current_job:
            return self.current_job.output

    @property
    def output(self):
        if self.current_job:
            return self.job_output
        return self.run_output

    @property
    def output_directory(self):
        return self.output.basepath

    @property
    def reboot_policy(self):
        return self.cm.run_config.reboot_policy

    @property
    def target_info(self):
        return self.run_output.target_info

    def __init__(self, cm, tm, output):
        self.logger = logging.getLogger('context')
        self.cm = cm
        self.tm = tm
        self.run_output = output
        self.run_state = output.state
        self.job_queue = None
        self.completed_jobs = None
        self.current_job = None
        self.successful_jobs = 0
        self.failed_jobs = 0
        self.run_interrupted = False
        self._load_resource_getters()

    def start_run(self):
        self.output.info.start_time = datetime.utcnow()
        self.output.write_info()
        self.job_queue = copy(self.cm.jobs)
        self.completed_jobs = []
        self.run_state.status = Status.STARTED
        self.output.status = Status.STARTED
        self.output.write_state()

    def end_run(self):
        if self.successful_jobs:
            if self.failed_jobs:
                status = Status.PARTIAL
            else:
                status = Status.OK
        else:
            status = Status.FAILED
        self.run_state.status = status
        self.run_output.status = status
        self.run_output.info.end_time = datetime.utcnow()
        self.run_output.info.duration = (self.run_output.info.end_time -
                                         self.run_output.info.start_time)
        self.write_output()

    def finalize(self):
        self.tm.finalize()

    def start_job(self):
        if not self.job_queue:
            raise RuntimeError('No jobs to run')
        self.current_job = self.job_queue.pop(0)
        job_output = init_job_output(self.run_output, self.current_job)
        self.current_job.set_output(job_output)
        return self.current_job

    def end_job(self):
        if not self.current_job:
            raise RuntimeError('No jobs in progress')
        self.completed_jobs.append(self.current_job)
        self.output.write_result()
        self.current_job = None

    def set_status(self, status, force=False, write=True):
        if not self.current_job:
            raise RuntimeError('No jobs in progress')
        self.set_job_status(self.current_job, status, force, write)

    def set_job_status(self, job, status, force=False, write=True):
        job.set_status(status, force)
        if write:
            self.run_output.write_state()

    def extract_results(self):
        self.tm.extract_results(self)

    def move_failed(self, job):
        self.run_output.move_failed(job.output)

    def skip_job(self, job):
        self.set_job_status(job, Status.SKIPPED, force=True)
        self.completed_jobs.append(job)

    def skip_remaining_jobs(self):
        while self.job_queue:
            job = self.job_queue.pop(0)
            self.skip_job(job)
        self.write_state()

    def write_config(self):
        self.run_output.write_config(self.cm.get_config())

    def write_state(self):
        self.run_output.write_state()

    def write_output(self):
        self.run_output.write_info()
        self.run_output.write_state()
        self.run_output.write_result()

    def write_job_specs(self):
        self.run_output.write_job_specs(self.cm.job_specs)

    def add_augmentation(self, aug):
        self.cm.run_config.add_augmentation(aug)

    def get_resource(self, resource, strict=True):
        result = self.resolver.get(resource, strict)
        if result is None:
            return result
        if os.path.isfile(result):
            with open(result, 'rb') as fh:
                md5hash = hashlib.md5(fh.read())
                key = '{}/{}'.format(resource.owner, os.path.basename(result))
                self.update_metadata('hashes', key, md5hash.hexdigest())
        return result

    get = get_resource  # alias to allow a context to act as a resolver

    def get_metric(self, name):
        try:
            return self.output.get_metric(name)
        except HostError:
            if not self.current_job:
                raise
            return self.run_output.get_metric(name)

    def add_metric(self,
                   name,
                   value,
                   units=None,
                   lower_is_better=False,
                   classifiers=None):
        if self.current_job:
            classifiers = merge_config_values(self.current_job.classifiers,
                                              classifiers)
        self.output.add_metric(name, value, units, lower_is_better,
                               classifiers)

    def get_artifact(self, name):
        try:
            return self.output.get_artifact(name)
        except HostError:
            if not self.current_job:
                raise
            return self.run_output.get_artifact(name)

    def get_artifact_path(self, name):
        try:
            return self.output.get_artifact_path(name)
        except HostError:
            if not self.current_job:
                raise
            return self.run_output.get_artifact_path(name)

    def add_artifact(self,
                     name,
                     path,
                     kind,
                     description=None,
                     classifiers=None):
        self.output.add_artifact(name, path, kind, description, classifiers)

    def add_run_artifact(self,
                         name,
                         path,
                         kind,
                         description=None,
                         classifiers=None):
        self.run_output.add_artifact(name, path, kind, description,
                                     classifiers)

    def add_event(self, message):
        self.output.add_event(message)

    def add_classifier(self, name, value, overwrite=False):
        self.output.add_classifier(name, value, overwrite)
        if self.current_job:
            self.current_job.add_classifier(name, value, overwrite)

    def add_metadata(self, key, *args, **kwargs):
        self.output.add_metadata(key, *args, **kwargs)

    def update_metadata(self, key, *args):
        self.output.update_metadata(key, *args)

    def take_screenshot(self, filename):
        filepath = self._get_unique_filepath(filename)
        self.tm.target.capture_screen(filepath)
        if os.path.isfile(filepath):
            self.add_artifact('screenshot', filepath, kind='log')

    def take_uiautomator_dump(self, filename):
        filepath = self._get_unique_filepath(filename)
        self.tm.target.capture_ui_hierarchy(filepath)
        self.add_artifact('uitree', filepath, kind='log')

    def record_ui_state(self, basename):
        self.logger.info('Recording screen state...')
        self.take_screenshot('{}.png'.format(basename))
        target = self.tm.target
        if target.os == 'android' or\
           (target.os == 'chromeos' and target.has('android_container')):
            self.take_uiautomator_dump('{}.uix'.format(basename))

    def initialize_jobs(self):
        new_queue = []
        failed_ids = []
        for job in self.job_queue:
            if job.id in failed_ids:
                # Don't try to initialize a job if another job with the same ID
                # (i.e. same job spec) has failed - we can assume it will fail
                # too.
                self.skip_job(job)
                continue

            try:
                job.initialize(self)
            except WorkloadError as e:
                self.set_job_status(job, Status.FAILED, write=False)
                log.log_error(e, self.logger)
                failed_ids.append(job.id)

                if self.cm.run_config.bail_on_init_failure:
                    raise
            else:
                new_queue.append(job)

        self.job_queue = new_queue
        self.write_state()

    def _load_resource_getters(self):
        self.logger.debug('Loading resource discoverers')
        self.resolver = ResourceResolver(self.cm.plugin_cache)
        self.resolver.load()
        for getter in self.resolver.getters:
            self.cm.run_config.add_resource_getter(getter)

    def _get_unique_filepath(self, filename):
        filepath = os.path.join(self.output_directory, filename)
        rest, ext = os.path.splitext(filepath)
        i = 1
        new_filepath = '{}-{}{}'.format(rest, i, ext)

        if not os.path.exists(filepath) and not os.path.exists(new_filepath):
            return filepath
        elif not os.path.exists(new_filepath):
            # new_filepath does not exit, thefore filepath must exit.
            # this is the first collision
            shutil.move(filepath, new_filepath)

        while os.path.exists(new_filepath):
            i += 1
            new_filepath = '{}-{}{}'.format(rest, i, ext)
        return new_filepath
Пример #4
0
 def _load_resource_getters(self):
     self.logger.debug('Loading resource discoverers')
     self.resolver = ResourceResolver(self.cm.plugin_cache)
     self.resolver.load()
     for getter in self.resolver.getters:
         self.cm.run_config.add_resource_getter(getter)
Пример #5
0
 def __init__(self, tm):
     self.tm = tm
     self.resolver = ResourceResolver()
     self.resolver.load()
Пример #6
0
 def __init__(self, tm):
     self.tm = tm
     self.resolver = ResourceResolver()
     self.resolver.load()
Пример #7
0
def get_revent_binary(abi):
    resolver = ResourceResolver()
    resolver.load()
    resource = Executable(NO_ONE, abi, 'revent')
    return resolver.get(resource)
Пример #8
0
def get_revent_binary(abi):
    resolver = ResourceResolver()
    resolver.load()
    resource = Executable(NO_ONE, abi, 'revent')
    return resolver.get(resource)
Пример #9
0
class ExecutionContext(object):

    @property
    def previous_job(self):
        if not self.completed_jobs:
            return None
        return self.completed_jobs[-1]

    @property
    def next_job(self):
        if not self.job_queue:
            return None
        return self.job_queue[0]

    @property
    def spec_changed(self):
        if self.previous_job is None and self.current_job is not None:  # Start of run
            return True
        if self.previous_job is not None and self.current_job is None:  # End of run
            return True
        return self.current_job.spec.id != self.previous_job.spec.id

    @property
    def spec_will_change(self):
        if self.current_job is None and self.next_job is not None:  # Start of run
            return True
        if self.current_job is not None and self.next_job is None:  # End of run
            return True
        return self.current_job.spec.id != self.next_job.spec.id

    @property
    def workload(self):
        if self.current_job:
            return self.current_job.workload

    @property
    def job_output(self):
        if self.current_job:
            return self.current_job.output

    @property
    def output(self):
        if self.current_job:
            return self.job_output
        return self.run_output

    @property
    def output_directory(self):
        return self.output.basepath

    @property
    def reboot_policy(self):
        return self.cm.run_config.reboot_policy

    @property
    def target_info(self):
        return self.run_output.target_info

    def __init__(self, cm, tm, output):
        self.logger = logging.getLogger('context')
        self.cm = cm
        self.tm = tm
        self.run_output = output
        self.run_state = output.state
        self.job_queue = None
        self.completed_jobs = None
        self.current_job = None
        self.successful_jobs = 0
        self.failed_jobs = 0
        self.run_interrupted = False
        self._load_resource_getters()

    def start_run(self):
        self.output.info.start_time = datetime.utcnow()
        self.output.write_info()
        self.job_queue = copy(self.cm.jobs)
        self.completed_jobs = []
        self.run_state.status = Status.STARTED
        self.output.status = Status.STARTED
        self.output.write_state()

    def end_run(self):
        if self.successful_jobs:
            if self.failed_jobs:
                status = Status.PARTIAL
            else:
                status = Status.OK
        else:
            status = Status.FAILED
        self.run_state.status = status
        self.run_output.status = status
        self.run_output.info.end_time = datetime.utcnow()
        self.run_output.info.duration = (self.run_output.info.end_time -
                                         self.run_output.info.start_time)
        self.write_output()

    def finalize(self):
        self.tm.finalize()

    def start_job(self):
        if not self.job_queue:
            raise RuntimeError('No jobs to run')
        self.current_job = self.job_queue.pop(0)
        job_output = init_job_output(self.run_output, self.current_job)
        self.current_job.set_output(job_output)
        self.update_job_state(self.current_job)
        return self.current_job

    def end_job(self):
        if not self.current_job:
            raise RuntimeError('No jobs in progress')
        self.completed_jobs.append(self.current_job)
        self.update_job_state(self.current_job)
        self.output.write_result()
        self.current_job = None

    def set_status(self, status, force=False):
        if not self.current_job:
            raise RuntimeError('No jobs in progress')
        self.current_job.set_status(status, force)

    def extract_results(self):
        self.tm.extract_results(self)

    def move_failed(self, job):
        self.run_output.move_failed(job.output)

    def update_job_state(self, job):
        self.run_state.update_job(job)
        self.run_output.write_state()

    def skip_job(self, job):
        job.status = Status.SKIPPED
        self.run_state.update_job(job)
        self.completed_jobs.append(job)

    def skip_remaining_jobs(self):
        while self.job_queue:
            job = self.job_queue.pop(0)
            self.skip_job(job)
        self.write_state()

    def write_config(self):
        self.run_output.write_config(self.cm.get_config())

    def write_state(self):
        self.run_output.write_state()

    def write_output(self):
        self.run_output.write_info()
        self.run_output.write_state()
        self.run_output.write_result()

    def write_job_specs(self):
        self.run_output.write_job_specs(self.cm.job_specs)

    def add_augmentation(self, aug):
        self.cm.run_config.add_augmentation(aug)

    def get_resource(self, resource, strict=True):
        result = self.resolver.get(resource, strict)
        if result is None:
            return result
        if os.path.isfile(result):
            with open(result, 'rb') as fh:
                md5hash = hashlib.md5(fh.read())
                key = '{}/{}'.format(resource.owner, os.path.basename(result))
                self.update_metadata('hashes', key, md5hash.hexdigest())
        return result

    get = get_resource  # alias to allow a context to act as a resolver

    def get_metric(self, name):
        try:
            return self.output.get_metric(name)
        except HostError:
            if not self.current_job:
                raise
            return self.run_output.get_metric(name)

    def add_metric(self, name, value, units=None, lower_is_better=False,
                   classifiers=None):
        if self.current_job:
            classifiers = merge_config_values(self.current_job.classifiers,
                                              classifiers)
        self.output.add_metric(name, value, units, lower_is_better, classifiers)

    def get_artifact(self, name):
        try:
            return self.output.get_artifact(name)
        except HostError:
            if not self.current_job:
                raise
            return self.run_output.get_artifact(name)

    def get_artifact_path(self, name):
        try:
            return self.output.get_artifact_path(name)
        except HostError:
            if not self.current_job:
                raise
            return self.run_output.get_artifact_path(name)

    def add_artifact(self, name, path, kind, description=None, classifiers=None):
        self.output.add_artifact(name, path, kind, description, classifiers)

    def add_run_artifact(self, name, path, kind, description=None,
                         classifiers=None):
        self.run_output.add_artifact(name, path, kind, description, classifiers)

    def add_event(self, message):
        self.output.add_event(message)

    def add_metadata(self, key, *args, **kwargs):
        self.output.add_metadata(key, *args, **kwargs)

    def update_metadata(self, key, *args):
        self.output.update_metadata(key, *args)

    def take_screenshot(self, filename):
        filepath = self._get_unique_filepath(filename)
        self.tm.target.capture_screen(filepath)
        if os.path.isfile(filepath):
            self.add_artifact('screenshot', filepath, kind='log')

    def take_uiautomator_dump(self, filename):
        filepath = self._get_unique_filepath(filename)
        self.tm.target.capture_ui_hierarchy(filepath)
        self.add_artifact('uitree', filepath, kind='log')

    def record_ui_state(self, basename):
        self.logger.info('Recording screen state...')
        self.take_screenshot('{}.png'.format(basename))
        target = self.tm.target
        if target.os == 'android' or\
           (target.os == 'chromeos' and target.has('android_container')):
            self.take_uiautomator_dump('{}.uix'.format(basename))

    def initialize_jobs(self):
        new_queue = []
        failed_ids = []
        for job in self.job_queue:
            if job.id in failed_ids:
                # Don't try to initialize a job if another job with the same ID
                # (i.e. same job spec) has failed - we can assume it will fail
                # too.
                self.skip_job(job)
                continue

            try:
                job.initialize(self)
            except WorkloadError as e:
                job.set_status(Status.FAILED)
                log.log_error(e, self.logger)
                failed_ids.append(job.id)

                if self.cm.run_config.bail_on_init_failure:
                    raise
            else:
                new_queue.append(job)

        self.job_queue = new_queue

    def _load_resource_getters(self):
        self.logger.debug('Loading resource discoverers')
        self.resolver = ResourceResolver(self.cm.plugin_cache)
        self.resolver.load()
        for getter in self.resolver.getters:
            self.cm.run_config.add_resource_getter(getter)

    def _get_unique_filepath(self, filename):
        filepath = os.path.join(self.output_directory, filename)
        rest, ext = os.path.splitext(filepath)
        i = 1
        new_filepath = '{}-{}{}'.format(rest, i, ext)

        if not os.path.exists(filepath) and not os.path.exists(new_filepath):
            return filepath
        elif not os.path.exists(new_filepath):
            # new_filepath does not exit, thefore filepath must exit.
            # this is the first collision
            shutil.move(filepath, new_filepath)

        while os.path.exists(new_filepath):
            i += 1
            new_filepath = '{}-{}{}'.format(rest, i, ext)
        return new_filepath
Пример #10
0
 def _load_resource_getters(self):
     self.logger.debug('Loading resource discoverers')
     self.resolver = ResourceResolver(self.cm.plugin_cache)
     self.resolver.load()
     for getter in self.resolver.getters:
         self.cm.run_config.add_resource_getter(getter)