def cancel(self): with self.__cancel_lock: logger.info("Cancel build") self.__cancelled = True if self.__container_logs: logger.info("Close logs") self.__container_logs.close()
def update_build(build_id, body=None): if connexion.request.is_json: body = models.BuildData.from_dict( connexion.request.get_json()) # noqa: E501 with database.session_scope() as session: record = session.query(database.Build).filter_by(id=build_id).first() if not record: abort(404) try: record.status = build_status_table[body.data.attributes.status] if record.status == database.BuildStatus.new: record.log.logs = '' session.commit() logger.info('Trigger linux agent: process builds') try: linux_agent.process_builds() except MaxRetryError: logger.error("Failed to trigger Linux agent") logger.info('Trigger windows agent: process builds') try: windows_agent.process_builds() except MaxRetryError: logger.error("Failed to trigger Windows agent") except KeyError: abort(400) return models.BuildData(data=__create_build(record))
def _trigger_builds_for_package(session: database.Session, package: database.Package): # Get all failed builds which are waiting a package of the same recipe revision. In these cases the package ID # should match exactly. same_recipe_revision = session.query(database.Build).filter(database.Build.status == database.BuildStatus.error).\ filter(database.missing_package.columns['build_id'] == database.Build.id).\ filter(database.missing_package.columns['package_id'] == package.id).\ filter(database.Build.commit_id == database.Commit.id).\ filter(database.Commit.status == database.CommitStatus.building).\ all() # Get all failed builds which are waiting for a package of the same recipe but a different recipe revision. In these # cases a build is triggered regardless of the exact package ID (because the package ID might be computed # differently for a different recipe revision). different_recipe_revision = session.query(database.Build).\ filter(database.Build.status == database.BuildStatus.error).\ filter(database.missing_package.columns['build_id'] == database.Build.id).\ filter(database.missing_package.columns['package_id'] == database.Package.id).\ filter(database.Build.commit_id == database.Commit.id).\ filter(database.Commit.status == database.CommitStatus.building).\ filter(database.Package.recipe_revision_id == database.RecipeRevision.id).\ filter(database.RecipeRevision.revision != package.recipe_revision.revision).\ filter(database.RecipeRevision.recipe_id == package.recipe_revision.recipe.id).\ all() # re-trigger these builds builds = same_recipe_revision + different_recipe_revision for build in builds: logger.info("Set status of build '%d' to 'new'", build.id) build.status = database.BuildStatus.new logger.debug("Trigger builds for package '%s' (ID: '%s')", package.id, package.package_id) return builds
def __cancel_stopping_build(self, builder) -> bool: with database.session_scope() as session: build = session.query(database.Build) \ .filter_by(id=self.__build_id, status=database.BuildStatus.stopping) \ .first() if not build: return False logger.info("Cancel build '%d'", self.__build_id) builder.cancel() logger.info("Set status of build '%d' to 'stopped'", self.__build_id) build.status = database.BuildStatus.stopped self.__build_id = None return True
def __exit__(self, type, value, traceback): if not self.__container: return try: logger.info("Stop docker container '%s'", self.__container.short_id) self.__container.stop() except docker.errors.APIError: pass try: logger.info("Remove docker container '%s'", self.__container.short_id) self.__container.remove() except docker.errors.APIError: pass
def _trigger_builds_for_recipe(session: database.Session, recipe: database.Recipe): # get all failed builds which are waiting for this recipe builds = session.query(database.Build).filter(database.Build.status==database.BuildStatus.error).\ filter(database.missing_recipe.columns['build_id']==database.Build.id).\ filter(database.missing_recipe.columns['recipe_id']==recipe.id).\ filter(database.Build.commit_id==database.Commit.id).\ filter(database.Commit.status==database.CommitStatus.building).\ all() # re-trigger these builds for build in builds: logger.info("Set status of build '%d' to 'new'", build.id) build.status = database.BuildStatus.new logger.debug("Trigger builds for recipe '%s' ('%s/%s@%s/%s')", recipe.id, recipe.name, recipe.version, recipe.user, recipe.channel) return builds
def process_success(build_id, build_output) -> dict: result = dict() try: data = json.loads(build_output["create"]) except KeyError: logger.error( "Failed to obtain JSON output of the Conan create stage for build '%d'", build_id) return result with database.session_scope() as session: build = session.query(database.Build).filter_by(id=build_id).first() build.package = None build.missing_recipes = [] build.missing_packages = [] for recipe_compound in data["installed"]: recipe_data = recipe_compound["recipe"] if recipe_data["dependency"]: continue recipe_revision = _process_recipe_revision(session, recipe_data, build.profile.ecosystem) if not recipe_revision: continue for package_data in recipe_compound["packages"]: package = _process_package(session, package_data, recipe_revision) if not package: continue build.package = package if _trigger_builds_for_package(session, package): result['new_builds'] = True if _trigger_builds_for_recipe(session, recipe_revision.recipe): result['new_builds'] = True logger.info("Updated database for the successful build '%d'", build_id) return result
def run(self): with self.__cancel_lock: if self.__cancelled: logger.info("Build was cancelled") return logger.info("Start build in container '{0}'" \ .format(self.__container.short_id)) self.__container.start() self.__container_logs = self.__container.logs(stream=True, follow=True) for byte_data in self.__container_logs: line = byte_data.decode("utf-8").strip('\n\r') self.__logs.put(line) with self.__cancel_lock: self.__container_logs = None if self.__cancelled: logger.info("Build was cancelled") return result = self.__container.wait() try: data, _ = self.__container.get_archive(self.build_output_dir) self.build_output = extract_output_tar(data) except docker.errors.APIError: logger.error("Failed to obtain build output from container '%s'", self.__container.short_id) if result.get("StatusCode"): raise Exception( "Build in container '{0}' failed with status '{1}'".format( self.__container.short_id, result.get("StatusCode")))
def setup(self, parameters): logger.info("Setup docker container") self.__container = self.__client.containers.create( image=self.__image, command=self.build_command) logger.info("Created docker container '%s'", self.__container.short_id) config_url = "{0} --type=git".format(parameters["conan_config_url"]) config_branch = "--args \"-b {0}\"".format(parameters["conan_config_branch"])\ if parameters["conan_config_branch"] else "" config_path = "-sf {0}".format(parameters["conan_config_path"])\ if parameters["conan_config_path"] else "" patched_parameters = { **parameters, "conan_config_args": " ".join([config_url, config_branch, config_path]), "build_package_dir": self.build_package_dir, "escaped_build_package_dir": self.escaped_build_package_dir, "build_output_dir": self.build_output_dir } build_tar = create_build_tar(self.script_template, patched_parameters) result = self.__container.put_archive(self.root_dir, data=build_tar) if not result: raise Exception("Failed to copy build files to container '{0}'"\ .format(self.__container.short_id)) logger.info("Copied build files to container '%s'", self.__container.short_id)
def pull(self, parameters): m = re.match(docker_image_pattern, self.__image) if not m: raise Exception("The image '{0}' is not a valid " "docker image name".format(self.__image)) tag = m.group(4) repository = m.group(1) if tag == "local": logger.info("Do not pull local image '%s'", self.__image) return auth_config = None if parameters['docker_user']: auth_config = { "username": parameters['docker_user'], "password": parameters['docker_password'] } logger.info("Pull docker image '%s'", self.__image) self.__client.images.pull(repository=repository, tag=tag, auth_config=auth_config)
def process_failure(build_id, build_output) -> dict: result = dict() try: data = json.loads(build_output["create"]) except KeyError: logger.info( "Failed build contains no JSON output of the Conan create stage") return result if not data["error"]: logger.info( "Conan create for failed build '%d' was successful, no missing dependencies", build_id) with database.session_scope() as session: build = session.query(database.Build).filter_by(id=build_id).first() build.package = None build.missing_recipes = [] build.missing_packages = [] for recipe_compound in data["installed"]: recipe_data = recipe_compound["recipe"] if not recipe_data["dependency"]: continue if recipe_data["error"] and recipe_data["error"][ "type"] == "missing": recipe = _process_recipe(session, recipe_data, build.profile.ecosystem) build.missing_recipes.append(recipe) continue recipe_revision = _process_recipe_revision(session, recipe_data, build.profile.ecosystem) if not recipe_revision: continue for package_data in recipe_compound["packages"]: if package_data["error"] and package_data["error"][ "type"] == "missing": package = _process_package(session, package_data, recipe_revision) build.missing_packages.append(package) logger.info("Updated database for the failed build '%d'", build_id) return result
async def __process_commits(self): logger.info("Start processing commits") new_commits = False with database.session_scope() as session: commits = session.query( database.Commit).filter_by(status=database.CommitStatus.new) profiles = session.query(database.Profile).all() for commit in commits: logger.info("Process commit '%s' of repo '%s'", commit.sha[:7], commit.repo.url) exclude_labels = {label.value for label in commit.repo.exclude} for profile in profiles: labels = {label.value for label in profile.labels} if not labels.isdisjoint(exclude_labels): logger.info("Exclude build for '%s' with profile '%s'", commit.sha[:7], profile.name) continue new_commits = True logger.info("Schedule build for '%s' with profile '%s'", commit.sha[:7], profile.name) build = database.Build() build.profile = profile build.commit = commit build.status = database.BuildStatus.new build.log = database.Log() build.log.logs = '' session.add(build) logger.info("Set commit '%s' to 'building'", commit.sha[:7]) commit.status = database.CommitStatus.building if new_commits: logger.info("Finish processing commits with *new* builds") else: logger.info("Finish processing commits with *no* builds") with database.session_scope() as session: num_new_builds = session.query(database.Build).filter_by( status=database.BuildStatus.new).count() logger.info("Currently %d new builds exist", num_new_builds) if num_new_builds == 0: return new_commits logger.info('Trigger linux agent: process builds') try: self.__linux_agent.process_builds() except (ApiException, MaxRetryError): logger.error("Failed to trigger Linux agent") logger.info('Trigger windows agent: process builds') try: self.__windows_agent.process_builds() except (ApiException, MaxRetryError): logger.error("Failed to trigger Windows agent") return new_commits
async def __process_builds(self): # database.populate_database() # return logger.info("Start processing builds") platform = database.Platform.linux if sonja_os == "Linux" else database.Platform.windows with database.session_scope() as session: build = session\ .query(database.Build)\ .join(database.Build.profile)\ .filter(database.Profile.platform == platform,\ database.Build.status == database.BuildStatus.new)\ .populate_existing()\ .with_for_update(skip_locked=True, of=database.Build)\ .first() if not build: logger.info("Stop processing builds with *no* builds processed") return False logger.info("Set status of build '%d' to 'active'", build.id) self.__build_id = build.id build.status = database.BuildStatus.active build.log.logs = '' container = build.profile.container parameters = { "conan_config_url": build.profile.ecosystem.conan_config_url, "conan_config_path": build.profile.ecosystem.conan_config_path, "conan_config_branch": build.profile.ecosystem.conan_config_branch, "conan_remote": build.profile.ecosystem.conan_remote, "conan_user": build.profile.ecosystem.conan_user, "conan_password": build.profile.ecosystem.conan_password, "conan_profile": build.profile.conan_profile, "conan_options": " ".join(["-o {0}={1}".format(option.key, option.value) for option in build.commit.repo.options]), "git_url": build.commit.repo.url, "git_sha": build.commit.sha, "git_credentials": [ { "url": c.url, "username": c.username, "password": c.password } for c in build.profile.ecosystem.credentials ], "sonja_user": build.profile.ecosystem.user, "channel": build.commit.channel.conan_channel, "path": "./{0}/{1}".format(build.commit.repo.path, "conanfile.py") if build.commit.repo.path != "" else "./conanfile.py", "ssh_key": build.profile.ecosystem.ssh_key, "known_hosts": build.profile.ecosystem.known_hosts, "docker_user": build.profile.docker_user, "docker_password": build.profile.docker_password, "mtu": os.environ.get("SONJA_MTU", "1500") } try: with Builder(sonja_os, container) as builder: builder_task = asyncio.create_task(_run_build(builder, parameters)) while True: # wait 10 seconds done, _ = await asyncio.wait({builder_task}, timeout=10) self.__update_logs(builder) # if finished exit if done: builder_task.result() break # check if the build was stopped and cancel it # if necessary if self.__cancel_stopping_build(builder): return True logger.info("Process build output") result = manager.process_success(self.__build_id, builder.build_output) if result.get("new_builds", False): self.__trigger_scheduler() logger.info("Set status of build '%d' to 'success'", self.__build_id) self.__set_build_status(database.BuildStatus.success) self.__build_id = None except Exception as e: logger.info(e) manager.process_failure(self.__build_id, builder.build_output) logger.info("Set status of build '%d' to 'error'", self.__build_id) self.__set_build_status(database.BuildStatus.error) self.__build_id = None return True
def __trigger_scheduler(self): logger.info('Trigger scheduler: process commits') try: self.__scheduler.process_commits() except (ApiException, MaxRetryError): logger.error("Failed to trigger scheduler")
def cleanup(self): if not self.__build_id: return logger.info("Set status of build '%d' to 'new'", self.__build_id) self.__set_build_status(database.BuildStatus.new)
async def __process_repos(self): logger.info("Start crawling") loop = asyncio.get_running_loop() if not os.path.exists(data_dir): os.makedirs(data_dir, exist_ok=True) logger.info("Created directory '%s'", data_dir) new_commits = False with database.session_scope() as session: if datetime.datetime.now() >= self.__next_crawl: logger.info("Crawl all repos") repos = session.query(database.Repo).all() self.__next_crawl = datetime.datetime.now( ) + datetime.timedelta(seconds=CRAWLER_PERIOD_SECONDS) self.reschedule_internally(CRAWLER_PERIOD_SECONDS) else: logger.info("Crawl manually triggered repos") repo_ids = [repo for repo in self.__get_repos()] repos = session.query(database.Repo).filter( database.Repo.id.in_(repo_ids)).all() channels = session.query(database.Channel).all() for repo in repos: try: work_dir = os.path.join(data_dir, str(repo.id)) controller = RepoController(work_dir) if not controller.is_clone_of(repo.url): logger.info("Create repo for URL '%s' in '%s'", repo.url, work_dir) await loop.run_in_executor(None, controller.create_new_repo, repo.url) logger.info("Setup SSH in '%s'", work_dir) await loop.run_in_executor(None, controller.setup_ssh, repo.ecosystem.ssh_key, repo.ecosystem.known_hosts) logger.info("Setup HTTP credentials in '%s'", work_dir) credentials = [{ "url": c.url, "username": c.username, "password": c.password } for c in repo.ecosystem.credentials] await loop.run_in_executor(None, controller.setup_http, credentials) logger.info("Fetch repo '%s' for URL '%s'", work_dir, repo.url) await loop.run_in_executor(None, controller.fetch) branches = controller.get_remote_branches() for channel in channels: for branch in branches: if not re.fullmatch(channel.branch, branch): continue logger.info("Branch '%s' matches '%s'", branch, channel.branch) logger.info("Checkout branch '%s'", branch) controller.checkout(branch) sha = controller.get_sha() commits = session.query(database.Commit).filter_by( repo=repo, sha=sha, channel=channel) # continue if this commit has already been stored if list(commits): logger.info("Commit '%s' exists", sha[:7]) continue logger.info("Add commit '%s'", sha[:7]) commit = database.Commit() commit.sha = sha commit.message = controller.get_message() commit.user_name = controller.get_user_name() commit.user_email = controller.get_user_email() commit.repo = repo commit.channel = channel commit.status = database.CommitStatus.new session.add(commit) new_commits = True old_commits = session.query( database.Commit).filter( database.Commit.repo == repo, database.Commit.channel == channel, database.Commit.sha != sha, database.Commit.status != database.CommitStatus.old) for c in old_commits: logger.info("Set status of '%s' to 'old'", c.sha[:7]) c.status = database.CommitStatus.old except git.exc.GitError as e: logger.error( "Failed to process repo '%s' with message '%s'", repo.url, e) if new_commits: logger.info("Finish crawling with *new* commits") logger.info('Trigger scheduler: process commits') try: self.__scheduler.process_commits() except (ApiException, MaxRetryError): logger.error("Failed to trigger scheduler") else: logger.info("Finish crawling with *no* new commits")