def launch(self, path: str) -> None: """ Launch all of the fuzzing templates """ for target, config in TARGETS.items(): if target not in self.targets: continue if config.os not in self.os: continue self.logger.info("launching: %s", target) setup = Directory(os.path.join( path, target)) if config.use_setup else None target_exe = File(os.path.join(path, target, config.target_exe)) inputs = (Directory(os.path.join(path, target, config.inputs)) if config.inputs else None) job: Optional[Job] = None if config.template == TemplateType.libfuzzer: job = self.of.template.libfuzzer.basic( self.project, target, BUILD, self.pools[config.os].name, target_exe=target_exe, inputs=inputs, setup_dir=setup, duration=1, vm_count=1, ) elif config.template == TemplateType.radamsa: job = self.of.template.radamsa.basic( self.project, target, BUILD, pool_name=self.pools[config.os].name, target_exe=target_exe, inputs=inputs, setup_dir=setup, check_asan_log=config.check_asan_log or False, disable_check_debugger=config.disable_check_debugger or False, duration=1, vm_count=1, ) elif config.template == TemplateType.afl: job = self.of.template.afl.basic( self.project, target, BUILD, pool_name=self.pools[config.os].name, target_exe=target_exe, inputs=inputs, setup_dir=setup, duration=1, vm_count=1, ) else: raise NotImplementedError if not job: raise Exception("missing job") self.containers[job.job_id] = [] for task in self.of.tasks.list(job_id=job.job_id): self.tasks[task.task_id] = job.job_id self.containers[job.job_id] += [ ContainerWrapper(self.of.containers.get(x.name).sas_url) for x in task.config.containers if x.type in TARGETS[job.config.name].wait_for_files ] self.jobs[job.job_id] = job self.target_jobs[job.job_id] = target
def check_jobs(self, poll: bool = False, stop_on_complete_check: bool = False) -> bool: """ Check all of the integration jobs """ jobs: Dict[UUID, Job] = {x.job_id: x for x in self.get_jobs()} job_tasks: Dict[UUID, List[Task]] = {} check_containers: Dict[UUID, Dict[Container, Tuple[ContainerWrapper, int]]] = {} for job in jobs.values(): if job.config.name not in TARGETS: self.logger.error("unknown job target: %s", job.config.name) continue tasks = self.of.jobs.tasks.list(job.job_id) job_tasks[job.job_id] = tasks check_containers[job.job_id] = {} for task in tasks: for container in task.config.containers: if container.type in TARGETS[ job.config.name].wait_for_files: count = TARGETS[job.config.name].wait_for_files[ container.type] check_containers[job.job_id][container.name] = ( ContainerWrapper( self.of.containers.get( container.name).sas_url), count, ) self.success = True self.logger.info("checking %d jobs", len(jobs)) self.cleared = False def clear() -> None: if not self.cleared: self.cleared = True if poll: print("") def check_jobs_impl() -> Tuple[bool, str, bool]: self.cleared = False failed_jobs: Set[UUID] = set() job_task_states: Dict[UUID, Set[TaskTestState]] = {} for job_id in check_containers: finished_containers: Set[Container] = set() for (container_name, container_impl) in check_containers[job_id].items(): container_client, count = container_impl if len(container_client.list_blobs()) >= count: clear() self.logger.info( "found files for %s - %s", jobs[job_id].config.name, container_name, ) finished_containers.add(container_name) for container_name in finished_containers: del check_containers[job_id][container_name] scalesets = self.of.scalesets.list() for job_id in job_tasks: finished_tasks: Set[UUID] = set() job_task_states[job_id] = set() for task in job_tasks[job_id]: if job_id not in jobs: continue task_result = self.check_task(jobs[job_id], task, scalesets) if task_result == TaskTestState.failed: self.success = False failed_jobs.add(job_id) elif task_result == TaskTestState.stopped: finished_tasks.add(task.task_id) else: job_task_states[job_id].add(task_result) job_tasks[job_id] = [ x for x in job_tasks[job_id] if x.task_id not in finished_tasks ] to_remove: Set[UUID] = set() for job in jobs.values(): # stop tracking failed jobs if job.job_id in failed_jobs: if job.job_id in check_containers: del check_containers[job.job_id] if job.job_id in job_tasks: del job_tasks[job.job_id] continue # stop checking containers once all the containers for the job # have checked out. if job.job_id in check_containers: if not check_containers[job.job_id]: clear() self.logger.info( "found files in all containers for %s", job.config.name) del check_containers[job.job_id] if job.job_id not in check_containers: if job.job_id in job_task_states: if set([TaskTestState.running ]).issuperset(job_task_states[job.job_id]): del job_tasks[job.job_id] if job.job_id not in job_tasks and job.job_id not in check_containers: clear() self.logger.info("%s completed", job.config.name) to_remove.add(job.job_id) for job_id in to_remove: if stop_on_complete_check: self.stop_job(jobs[job_id]) del jobs[job_id] msg = "waiting on: %s" % ",".join( sorted(x.config.name for x in jobs.values())) if poll and len(msg) > 80: msg = "waiting on %d jobs" % len(jobs) if not jobs: msg = "done all tasks" return (not bool(jobs), msg, self.success) if poll: return wait(check_jobs_impl) else: _, msg, result = check_jobs_impl() self.logger.info(msg) return result