def run(self) -> None: self.state = Status.running pool = ThreadPool(self.num_threads) pool_results = [] while True: try: stages = self.runnable_stages() if stages: for stage in stages: pool_results.append( pool.apply_async(self._run_stage, (stage, ))) time.sleep(1) except StopIteration: break pool.close() pool.join() results = [r.get() for r in pool_results] logger.info(self.status_string) for status in (Status.error, Status.failed): if any(map(lambda r: r == status, results)): return status return Status.passed
def _setup() -> None: for binary in ('docker', 'git'): try: _exec([binary]) except FileNotFoundError: logger.error('%s not installed', binary) try: os.mkdir(DockerContainer.workspace_dir) except FileExistsError: logger.info( 'workspace directory already exists at %s - this ' 'is harmless providing it\'s what you wanted', DockerContainer.workspace_dir) pass
def runnable_stages(self) -> List[Stage]: runnable_stages = [] if not any(s.state in (Status.created, Status.starting, Status.running) for s in self.stages.values()): raise StopIteration for stage in self.stages.values(): if stage.state == Status.created: if stage.requires: if all(r.state == Status.passed for r in stage.requires): runnable_stages.append(stage) elif any(r.state in (Status.failed, Status.skipped) for r in stage.requires): logger.info('skipping %s', stage.name) stage.state = Status.skipped else: runnable_stages.append(stage) return runnable_stages
def start(host, port, providers: List[WebhookProviders], sqlalchemy_args: dict = None): logger.info('starting...') app = Flask(__name__) database = Database(**sqlalchemy_args) for provider in providers: logger.info('starting webhook provider for %s', provider.name) if provider == WebhookProviders.github: make_github_webhook(app, database) @app.route('/') def root(): return app.run(host=host, port=port)
def __init__(self, in_config=None): logger.info('initializing') self.database = Database(**in_config.pop('sqlalchemy_args')) self.config = dict(db_filename='/tmp/zeus-ci.db', runner_threads=4, concurrent_builds=4) if in_config: self.config.update(in_config) logger.debug('using config %s', self.config) self.build_queue = multiprocessing.Queue() logger.info('spinning up build process pool') self.build_pool = multiprocessing.Pool( self.config['concurrent_builds'], self._run_from_queue, (self.build_queue, ))
def run(self): try: with self.database.get_session() as session: logger.info('Entering main loop') while True: if not self.build_queue.empty(): continue time.sleep(self.config['build_poll_sec']) runnable_builds = self._runnable_builds(session) if runnable_builds: logger.debug('runnable_builds: %s', runnable_builds) for build in reversed(runnable_builds): self.build_queue.put(build.id) except KeyboardInterrupt: logger.info('recieved exit command, closing build processes.') finally: for _ in range(self.config['concurrent_builds']): self.build_queue.put(None) self.build_pool.close() self.build_pool.join() pass
def run(self) -> None: with DockerContainer(self.name, self.spec.get('docker')[0].get('image'), self.exec_uuid, self.clone_url, self.working_directory, self.env_vars, ref=self.ref) as docker: self.steps = [ Step.factory(docker, step) for step in self.spec.get('steps') ] skip = False if self.run_condition.get('branch'): if not re.search(self.run_condition['branch'], self.branch): logger.debug( 'skipping %s because %s doesnt match condition %s', self.name, self.branch, self.run_condition['branch']) self.state = Status.skipped skip = True if self.run_condition.get('tag'): if not re.search(self.run_condition['tag'], self.tag): logger.debug( 'skipping %s because %s doesnt match condition %s', self.name, self.tag, self.run_condition['tag']) self.state = Status.skipped skip = True if not skip: self.state = Status.running try: logger.info('---- Running Job: %s ----', self.name) logger.debug('exec_uuid: %s, env_vars: %s', self.exec_uuid, self.env_vars) for step in self.steps: logger.info('Executing Step: %s', step) output = step.run() if not output: logger.error( f'Job Failed[{self.name}]\n{output.stderr}') self.state = Status.failed return self.state logger.info('Job (%s) Passed in %.2f seconds', self.name, docker.duration) except Exception as e: self.state = Status.failed raise e self.state = Status.passed return self.state