def _record_new_workspace(db, workspace, merge=False): try: new_ws_document = Workspace(name=workspace['name'], clone=workspace['clone_name'], mount=workspace['clone_mount'], pipeline=workspace['pipeline'], username=workspace['username'], uid=workspace['uid'], gid=workspace['gid'], source_pvc=workspace['source_pvc'], pipeline_pvc=workspace['pipeline_pvc'], build_name=workspace['build_name'], pod=workspace['pod'], pvc=workspace['pvc'], pv=workspace['pv_name'], service=workspace['service'], ide_url=workspace['ide']) if merge: new_ws_document.source_workspace_pvc = workspace[ 'source_workspace_pvc'] new_ws_document.store(db) except http.ResourceConflict as exc: # If DB operation fails, delete workspace PVC created from previous step KubernetesAPI.get_instance().delete_pvc(workspace['pvc']) raise GenericException( 500, "Error recording new workspace in the DB, please contact your administrator", "Database Exception")
def delete_workspace(name): """ Delete Kube service representing the workspace IDE Delete Kube pod associated with the service Delete Kube PVC representing the workspace (Trident will delete the associated PV and ONTAP clone) Delete Kube service representing the IDE """ # TODO: Handle exceptions on db connection failure, and failure on each of the kube operations try: config = get_db_config() db = connect_db() workspace = Database.get_document_by_name(db, name) pvc = workspace['pvc'] pod_name = workspace['pod'] service = workspace['service'] kube = KubernetesAPI.get_instance() kube.delete_service(service) logging.info("Workspace service deleted") kube.delete_pod(pod_name) logging.info("Workspace POD deleted") kube.delete_pvc(pvc) logging.info("Workspace PVC deleted") db.delete(workspace) except Exception as e: logging.error("Unable to delete workspace %s: %s" % (name, traceback.format_exc())) raise
def get_services(): """ Get information about all services associated with Build@Scale """ config_document = get_db_config() kube = KubernetesAPI.get_instance() services = [] # Retrieve scm service scm_service_name = config_document['scm_service_name'] scm_service_url = kube.get_service_url(scm_service_name) services.append( {'name': config_document['scm_type'], 'type': 'scm', 'url': scm_service_url}) # Retrieve registry service registry_service_name = config_document['registry_service_name'] registry_service_url = kube.get_service_url(registry_service_name) services.append( {'name': config_document['registry_type'], 'type': 'registry', 'url': registry_service_url}) # Retrieve ci service jenkins_service_name = config_document['jenkins_service_name'] jenkins_service_url = kube.get_service_url(jenkins_service_name) services.append({'name': 'jenkins', 'type': 'ci', 'url': jenkins_service_url}) # Retrieve database service database_service_name = config_document['database_service_name'] database_service_url = kube.get_service_url(database_service_name) services.append({'name': 'couchdb', 'type': 'database', 'url': database_service_url}) logging.error(services) return services
def connect_db(): """Connect to database and retrieve config document""" if app.config.get('DATABASE_URL') is None: app.config['DATABASE_URL'] = KubernetesAPI.get_instance().get_service_url( service_name=app.config['DATABASE_SERVICE_NAME']) logging.info("DATABASE_URL not known, fetching from Kubernetes " + app.config['DATABASE_URL']) try: database = Database.connect(app.config['DATABASE_URL'], app.config['DATABASE_USER'], app.config['DATABASE_PASS'], app.config['DATABASE_NAME']) except Exception as e: print("Unable to connect to database: %s" % traceback.format_exc()) raise e return database
def delete_pipeline(name): """ Delete all elements associated with a given pipeline(ONTAP volume/Jenkins job) When using Trident, it is sufficient to delete the PVC mapped to the project (Trident takes care of deleting the volume and PV) After deleting the pipeline's PVC, delete all the build clone PVCs associated with the pipeline (Don not proceed with this step, if there is at least one workspace tied to a pipeline build) """ # TODO: Be more specific on what goes in 'try' get_db_config() db = connect_db() # retrieve details for the current pipeline pipeline = Database.get_document_by_name(db, name) # if there aren't any workspaces, we can safely delete all the build clones build_pvc_list = get_all_build_pvc_for_pipeline(pipeline['pvc']) try: for build_pvc in build_pvc_list: # if this is a re-try (intermittent failure) PVC is already gone try: KubernetesAPI.get_instance().delete_pvc(build_pvc) except Exception: pass # delete the pipeline DB doc db_name = get_db_name_from_kube_resource(build_pvc) build = Database.get_document_by_name(db, db_name) db.delete(build) # when all build clones are deleted successfully, delete the pipeline PVC KubernetesAPI.get_instance().delete_pvc(pipeline['pvc']) # delete the pipeline DB doc db.delete(pipeline) # finally delete the Jenkins job jenkins = connect_jenkins() jenkins.delete_job(name) except Exception as e: logging.error("Unable to delete pipeline %s: %s" % (name, traceback.format_exc())) raise
def _complete_kubernetes_setup_for_workspace(workspace, merge=False): try: kube = KubernetesAPI.get_instance() kube_pvc_pod_response = kube.create_pvc_clone_and_pod(workspace, merge) except Exception as exc: logging.error("Unable to create Kubernetes Workspace PVC/Pod: %s" % traceback.format_exc()) raise GenericException( 500, "Unable to create Kubernetes Workspace PVC/Pod") if not helpers.verify_successful_response(kube_pvc_pod_response): logging.error("Unable to create Kubernetes Workspace PVC/Pod: %s" % kube_pvc_pod_response) raise GenericException( 500, "Unable to create Kubernetes Workspace PVC/Pod") # workspace['clone_name'] is populated from KubernetesAPI (retrieved from PV-PVC mapping) workspace['clone_mount'] = "/mnt/" + workspace['clone_name'] # Wait for IDE to be ready before returning # TODO: Change this to wait and proceed only when service is in Ready state (geta an IP assigned) try: time.sleep(60) workspace['ide'] = kube.get_service_url(workspace['service']) except: workspace['ide'] = "NA" logging.warning("WARNING: Unable to retrieve workspace URL") # Wait for pod to be ready before executing any commands # TODO: Add logic to proceed only when pod status is 'Running' # Set git user.email and user.name , we don't care if the command fails git_user_cmd = 'git config --global user.name %s' % request.form['username'] git_email_cmd = 'git config --global user.email %s' % workspace[ 'user_email'] try: kube.execute_command_in_pod(workspace['pod'], git_user_cmd) kube.execute_command_in_pod(workspace['pod'], git_email_cmd) except: logging.warning( "WARNING: Unable to configure GIT Username/Email on behalf of user: %s" % traceback.format_exc())
def volume_claim_clone(): """ Create Kube PVC clone This method is in place of snapshotting a source volume. Volume clones will be used instead of snapshots until Trident supports snapshot creation --- tags: - volumeclaim parameters: - in: body name: pvc_clone_name required: true description: name of the Kube PVC being created (cloned) type: string - in: body name: pvc_source_name required: true description: name of the Kube PVC that is being cloned from type: string - in: body name: build_status required: false description: specifies whether this clone is of a successful or failed build type: string responses: 200: description: PVC Clone was created successfully """ # TODO: document jenkins_build in docstring # TODO: do we need volume name? _validate_input_form_params(request.form, [ 'pvc_clone_name', 'pvc_source_name', 'build_status', 'jenkins_build', 'volume_name' ]) config_document = helpers.get_db_config() if not config_document: raise GenericException(500, GenericException.DB_CONFIG_DOC_NOT_FOUND, "Database Exception") build_status = request.form['build_status'] or 'N/A' if build_status not in ["passed", "failed", "N/A"]: raise GenericException( 406, "Invalid build_status type parameter: accepted values - 'passed', 'failed', 'N/A'" ) # TODO: this name should be created in KubernetesAPI, but currently will impact create_pvc_and_pod() kube = KubernetesAPI.get_instance() pvc_clone_name = kube.get_kube_resource_name( request.form['pvc_clone_name'], 'pvc') status = kube.create_pvc_clone_resource( clone=pvc_clone_name, source=request.form['pvc_source_name']) # record snapshot in db db_connect = helpers.connect_db() if not db_connect: raise GenericException(500, GenericException.DB_CONNECTION_ERROR, "Database Exception") # TODO: Replace Snapshot doc with Clone document # TODO: Do we need volume or pvc_source_name? snapshot_doc = Snapshot( name=request.form['pvc_clone_name'], pvc_name=pvc_clone_name, # TODO: Why do we need volume? Also, this is not the clone volume name, # but the parent pipeline volume name which we use later for only querying. # Reflect key-name appropriately parent_pipeline_pvc=request.form['pvc_source_name'], volume=request.form['volume_name'], pvc=pvc_clone_name, jenkins_build=request.form['jenkins_build'], build_status=build_status) snapshot_doc.store(db_connect) return jsonify(status)
def pipeline_create(): """ Setup a pipeline for an SCM project with a specific branch This endpoint is used by the DevOps admin At the end of successful execution, a Jenkins pipeline for the SCM project is created with required build parameters --- tags: - pipeline parameters: - in: path name: scm-url required: true description: SCM url for this project type: string - in: path name: scm-branch required: true description: SCM branch for this project type: string - in: path name: export-policy required: false description: export-policy for this project type: string responses: 200: description: Pipeline has been created successfully """ ##### # 1. Validate input form parameters # 2. Get config document for setting up the pipeline details # 3. Gather storage details for creating PVC for this pipeline # 4. Create a Kube PVC (Trident creates a PV and an ONTAP volume, maps it to this PVC. We manage only the PVCs) # 5. Create Jenkins job # 6. Setup Jenkins purge job for this pipeline # 7. Record all pipeline details in database ##### # Validate input web form parameters from the application _validate_input_form_params(request.form, ['scm-branch', 'scm-url']) connect, config = _get_config_from_db() # Gather storage details for creating PVC scm_project_url = helpers.sanitize_scm_url(request.form['scm-url']) if scm_project_url is None: raise GenericException(406, "Invalid SCM URL provided") pipeline = { 'name': '-'.join([ 'pipeline', helpers.extract_name_from_git_url(request.form['scm-url']), request.form['scm-branch'] ]), 'export_policy': request.form.get( 'export-policy', 'default'), # set default export policy if not specified 'scm_url': scm_project_url } # Create PVC. Once we create a Kube PVC, Trident creates an ONTAP volume and a PV for this PVC kube = KubernetesAPI.get_instance() vol_size = "10000" # set default vol size to 10Gig, 10000 in MB # TODO: Change this to default SC from Kube -- list_all_storage_classes and read annotations to find default storage_class = config.get('storage_class') if storage_class == '': storage_class = None # Don't set SC if SC is not passed in Helm, so that Kube can use the default storage class pvc_response = kube.create_pvc_resource(vol_name=pipeline['name'], vol_size=vol_size, storage_class=storage_class) if not helpers.verify_successful_response(pvc_response): raise GenericException(500, "Kubernetes PVC creation error") if pvc_response['phase'] != 'Bound': raise GenericException(500, "Kubernetes PVC cannot be bound") # setup params for Jenkins pipeline job pipeline_job = helpers.set_jenkins_job_params('ci-pipeline') pipeline_job['volume_claim_name'] = pvc_response['name'] pipeline_job['scm_url'] = request.form['scm-url'] pipeline_job['scm_branch'] = request.form['scm-branch'] pipeline_job['kube_namespace'] = config['kube_namespace'] # TODO: should this volume_name be populated as part of pvc_response? - # but might want to handle if PVC creation has failed in KubernetesAPI.py pipeline_job['volume_name'] = kube.get_volume_name_from_pvc( pvc_response['name']) # Get associated volume with PVC # TODO: This cannot be None. # Validate after bootstrapping, PVCs for all services to be part of the config document. # Remove this after including validation if config.get('scm_pvc_name') is None: pipeline_job['scm_volume_claim'] = kube.get_kube_resource_name( config['scm_volume'], 'pvc') purge_job = helpers.set_jenkins_job_params( 'trigger-purge') # setup params for Jenkins purge job purge_job['kube_namespace'] = config['kube_namespace'] # Create Jenkins CI and purge jobs for this pipeline # If Jenkins connection fails, delete the Kube PVC created from previous step try: jenkins = JenkinsAPI(config['jenkins_url'], config['jenkins_user'], config['jenkins_pass']) except Exception as exc: KubernetesAPI.get_instance().delete_pvc(pvc_response['name']) raise GenericException(500, "Jenkins connection error: %s" % str(exc)) # If job creation fails, delete the Kube PVC created from previous step try: jenkins_job_url = jenkins.create_job(job_name=pipeline['name'], params=pipeline_job, form_fields=request.form) jenkins.create_job(job_name='purge_policy_enforcer', params=purge_job, form_fields=None) except Exception as exc: KubernetesAPI.get_instance().delete_pvc(pvc_response['name']) traceback.print_exc() raise GenericException(500, "Jenkins Job Creation Error: %s" % str(exc)) # Complete gathering pipeline details pipeline['pvc'] = pvc_response['name'] pipeline['volume'] = pipeline_job['volume_name'] pipeline['jenkins_url'] = jenkins_job_url # Record new pipeline in database # TODO: type=pipeline document try: new_pipeline_document = Pipeline(**pipeline) new_pipeline_document.store(connect) except Exception as exc: # If DB operation fails, delete the Jenkins pipeline job, purge job and Kube PVC created from previous step jenkins.delete_job(pipeline['name']) KubernetesAPI.get_instance().delete_pvc(pvc_response['name']) raise GenericException( 500, "Error recording new project in the DB, please contact your administrator", "Database Exception" + str(exc)) # TODO: Can we do a better in-page rendering instead of navigating to a raw JSON msg? return jsonify({'project_name': pipeline['name']}), 200
def workspace_merge(): """ Merge developer workspace pod --- tags: - workspace parameters: - in: path name: workspace-name required: true description: Name of the new merge workspace being created type: string - in: path name: build-name required: true description: Build name (e.g. snapshot) from which clone should be created type: string - in: path name: username required: true description: Username type: string - in: path name: source-workspace-name required: true description: Source workspace type: integer responses: 200: description: merge workspace created successfully """ # Validate input web form parameters from the application _validate_input_form_params( request.form, ['workspace-name', 'build-name', 'username', 'source-workspace-name']) workspace = _setup_workspace(request.form, merge=True) # Run the merge commands in the new workspace. source ws will be mounted at /source_workspace/git # Destination ws will be mounted at /workspace/git merge_cmd = '/usr/local/bin/build_at_scale_merge.sh /source_workspace/git /workspace/git' try: response = KubernetesAPI.get_instance().execute_command_in_pod( workspace['pod'], merge_cmd) except: logging.error("Unable to successfully complete git merge !" % traceback.format_exc()) raise GenericException( 500, "Unable to successfully complete merge!. Please contact your administrator" ) if response == "0": message = "Merge workspace created successfully!" logging.info("Response from workspace POD:: %s" % response) elif response == "1": message = "Merge workspace created successfully but merge conflicts were found. " \ "Please check the workspace for conflicts which need to be resolved" logging.warning("Response from workspace POD:: %s" % response) else: # If pod operations fail, delete the workspace PVC and the DB document created from previous steps KubernetesAPI.get_instance().delete_pvc(workspace['pvc']) db = helpers.connect_db() db.delete(workspace['name']) logging.error("Response from workspace POD:: %s" % response) raise GenericException( 500, "Unable to successfully create a merged workspace! , please contact your administrator" ) return render_template('workspace_details.html', message=message, ontap_volume_name=workspace['clone_name'], workspace_ide=workspace['ide']), 200
def _setup_couchdb(): # Configure the couchdb cluster headers = {'Content-type': 'application/json'} db_cluster_config = {"action": "enable_cluster", "bind_address": "0.0.0.0", "username": "******", "password": "******", "node_count": "1"} # Retrieve Kube namespace kube_specs = { 'namespace': app.config['KUBE_NAMESPACE'], 'service_type': app.config['SERVICE_TYPE'] } KubernetesAPI(kube_specs) kube = KubernetesAPI.get_instance() # Retrieve customer configuration document from database database = connect_db() config_document = get_db_config() # Empty SCM URL this is a sign that setup has not been done yet if config_document['scm_url'] is None: try: r = requests.post("%s/_cluster_setup" % app.config['DATABASE_URL'], json=db_cluster_config, headers=headers) except Exception as exc: raise GenericException( 500, "Error configuring the couchdb cluster : %s, please contact your administrator" % str(exc)) # TODO: Most of these values should be fetched from Kubernetes. That way, we keep configuration for the DevOps # Admin very simple with only one place to config (helm charts). # No config data flows from app.config # (even for a Dev or Test environment, we would be able to fetch all the info from Kube) # The Kubernetes namespace and service_names to be set somewhere (within Kube?) to eliminate app.config reads # Populate rest of the configuration details from the ENV variables (set by DevOps Admin) # ONTAP config_document['ontap_svm_name'] = app.config['ONTAP_SVM_NAME'] config_document['ontap_aggr_name'] = app.config['ONTAP_AGGR_NAME'] config_document['ontap_data_ip'] = app.config['ONTAP_DATA_IP'] # SCM config_document['scm_type'] = app.config['SCM_TYPE'] config_document['scm_service_name'] = app.config['SCM_SERVICE_NAME'] # TODO: Once we add PVC names to annotations or labels in service, we can fetch this from Kube config_document['scm_pvc_name'] = app.config['SCM_PVC_NAME'] config_document['scm_url'] = kube.get_service_url(service_name=app.config['SCM_SERVICE_NAME']) config_document['scm_volume'] = kube.get_volume_name_from_pvc(pvc_name=app.config['SCM_PVC_NAME']) # Jenkins config_document['jenkins_service_name'] = app.config['JENKINS_SERVICE_NAME'] config_document['jenkins_pvc_name'] = app.config['JENKINS_PVC_NAME'] config_document['jenkins_url'] = kube.get_service_url(service_name=app.config['JENKINS_SERVICE_NAME']) # Database CouchDB config_document['database_service_name'] = app.config['DATABASE_SERVICE_NAME'] config_document['database_pvc_name'] = app.config['DATABASE_PVC_NAME'] # Artifactory config_document['registry_service_name'] = app.config['REGISTRY_SERVICE_NAME'] config_document['registry_pvc_name'] = app.config['REGISTRY_PVC_NAME'] config_document['registry_type'] = app.config['REGISTRY_TYPE'] # Webservice config_document['web_service_name'] = app.config['WEB_SERVICE_NAME'] config_document['web_pvc_name'] = app.config['WEB_PVC_NAME'] config_document['web_service_url'] = kube.get_service_url(service_name=app.config['WEB_SERVICE_NAME']) # Kube specs config_document['service_type'] = app.config['SERVICE_TYPE'] config_document['kube_namespace'] = app.config['KUBE_NAMESPACE'] # If Storage Class is "" , Kube does not set storage class to allow the PVC to be assigned to default SC config_document['storage_class'] = app.config['STORAGE_CLASS'] config_document.store(database)