def main(args): # create new job job = create_new_job() (appid, jobid, repo_url, repo_branch, repo_build_path, target_file, notify_email, desired_tag, depends_on, test_tag, jenkins_build_number, build_context) = args if repo_build_path == "/": pass # having '/' in a value used in os.path.join generates unexpected paths elif repo_build_path.startswith("/"): repo_build_path = repo_build_path[1:] if target_file.startswith("/"): target_file = target_file[1:] # populate job's keys with appropriate values job["uuid"] = str(uuid.uuid4()) job["appid"] = appid job["notify_email"] = notify_email job["logs_dir"] = '/srv/pipeline-logs/{}'.format(test_tag) job["action"] = "start_linter" job["jobid"] = jobid job["repo_url"] = repo_url job["repo_branch"] = repo_branch job["repo_build_path"] = repo_build_path job["target_file"] = target_file job["desired_tag"] = desired_tag job["depends_on"] = depends_on job["test_tag"] = test_tag job["jenkins_build_number"] = jenkins_build_number project_name = get_project_name(job) job["project_name"] = project_name job["namespace"] = job["project_name"] job["project_hash_key"] = get_job_hash(job["project_name"]) job["job_name"] = job["project_name"] job['image_name'] = "{}/{}:{}".format(job['appid'], job['jobid'], job['desired_tag']) job['output_image'] = \ "registry.centos.org/{}/{}:{}".format(appid, jobid, desired_tag) job['beanstalk_server'] = settings.BEANSTALKD_HOST job['image_under_test'] = "{}/{}/{}:{}".format( settings.REGISTRY_ENDPOINT[0], appid, jobid, test_tag) job['build_context'] = build_context # Create a build entry for project to track build project, created = Project.objects.get_or_create(name=project_name) Build.objects.create(uuid=job['uuid'], project=project, status='queued', start_time=timezone.now()) try: trigger_dockerfile_linter(job) except Exception: sys.exit(1)
def create_project(queue, job, logger): job_name = job.get("job_name") project_name_hash = utils.get_job_hash(job_name) openshift = Openshift(logger=logger) try: openshift.login("test-admin", "test") max_retry = 10 retry = 0 # waiting for delivery get completed before next job for the same # project overrides the job parameters while openshift.get_project(project_name_hash) and (retry < max_retry): time.sleep(50) retry += 1 if openshift.get_project(project_name_hash): logger.error( "OpenShift is not able to delete project: {}".format(job_name)) raise else: openshift.create(project_name_hash) except OpenshiftError: try: openshift.delete(project_name_hash) except OpenshiftError as e: logger.error(e) return try: template_path = os.path.join(os.path.dirname(__file__), 'template.json') openshift.upload_template( project_name_hash, template_path, { 'SOURCE_REPOSITORY_URL': job.get("repo_url"), 'REPO_BRANCH': job.get("repo_branch"), 'APPID': job.get("appid"), 'JOBID': job.get("jobid"), 'REPO_BUILD_PATH': job.get("repo_build_path"), 'TARGET_FILE': job.get("target_file"), 'NOTIFY_EMAIL': job.get("notify_email"), 'DESIRED_TAG': job.get("desired_tag"), 'TEST_TAG': job.get("test_tag") }) except OpenshiftError: try: openshift.delete(project_name_hash) except OpenshiftError as e: logger.error(e) return job["action"] = "start_build" queue.put(json.dumps(job), 'master_tube')
def save(self, *args, **kwargs): self.uuid = get_job_hash(self.name) return super(Project, self).save(*args, **kwargs)
def create_project(queue, job, logger): """ Creates a new project in OpenShift. This function expects a queue on which the job is to be put, the job itself and logger to be used. :param queue: beanstalkd queue on which job is to be put :param job: job to be put on the beanstalkd queue :param logger: logger to be used to log things :type queue: str :type job: dict :logger: logging.logger """ job_name = job.get("job_name") project_name_hash = utils.get_job_hash(job_name) openshift = Openshift(logger=logger) try: openshift.login("test-admin", "test") max_retry = 10 retry = 0 # waiting for delivery get completed before next job for the same # project overrides the job parameters while openshift.get_project(project_name_hash) and (retry < max_retry): time.sleep(50) retry += 1 if openshift.get_project(project_name_hash): logger.error( "OpenShift is not able to delete project: {}".format(job_name)) return False else: openshift.create(project_name_hash) except OpenshiftError: try: openshift.delete(project_name_hash) except OpenshiftError as e: logger.error(e) return False try: template_path = os.path.join(os.path.dirname(__file__), 'template.json') openshift.upload_template( project_name_hash, template_path, { 'SOURCE_REPOSITORY_URL': job.get("repo_url"), 'REPO_BRANCH': job.get("repo_branch"), 'APPID': job.get("appid"), 'JOBID': job.get("jobid"), 'REPO_BUILD_PATH': job.get("repo_build_path"), 'TARGET_FILE': job.get("target_file"), 'NOTIFY_EMAIL': job.get("notify_email"), 'DESIRED_TAG': job.get("desired_tag"), 'TEST_TAG': job.get("test_tag") }) except OpenshiftError: try: openshift.delete(project_name_hash) except OpenshiftError as e: logger.error(e) return False return True
def main(args): # create new job job = create_new_job() (appid, jobid, repo_url, repo_branch, repo_build_path, target_file, notify_email, desired_tag, depends_on, test_tag, jenkins_build_number, build_context) = args if repo_build_path == "/": pass # having '/' in a value used in os.path.join generates unexpected paths elif repo_build_path.startswith("/"): repo_build_path = repo_build_path[1:] if target_file.startswith("/"): target_file = target_file[1:] # populate job's keys with appropriate values job["uuid"] = str(uuid.uuid4()) job["appid"] = appid job["notify_email"] = notify_email job["logs_dir"] = '/srv/pipeline-logs/{}'.format(test_tag) job["action"] = "start_linter" job["jobid"] = jobid job["repo_url"] = repo_url job["repo_branch"] = repo_branch job["repo_build_path"] = repo_build_path job["target_file"] = target_file job["desired_tag"] = desired_tag job["depends_on"] = depends_on job["test_tag"] = test_tag job["jenkins_build_number"] = jenkins_build_number project_name = get_project_name(job) job["project_name"] = project_name job["namespace"] = job["project_name"] job["project_hash_key"] = get_job_hash(job["project_name"]) job["job_name"] = job["project_name"] job['image_name'] = "{}/{}:{}".format( job['appid'], job['jobid'], job['desired_tag']) job['output_image'] = \ "registry.centos.org/{}/{}:{}".format(appid, jobid, desired_tag) job['beanstalk_server'] = settings.BEANSTALKD_HOST job['image_under_test'] = "{}/{}/{}:{}".format( settings.REGISTRY_ENDPOINT[0], appid, jobid, test_tag) job['build_context'] = build_context # Create a build entry for project to track build project, created = Project.objects.get_or_create( name=project_name ) project.target_file_link = form_targetfile_link( repo_url, repo_build_path, repo_branch, target_file ) project.save() Build.objects.create(uuid=job['uuid'], project=project, status='queued', start_time=timezone.now()) try: trigger_dockerfile_linter(job) except Exception: sys.exit(1)