def purge_old_workspaces():
    """
    Purge workspaces older than X days
    @return: count of workspaces deleted
    """
    database = helpers.connect_db()
    config = helpers.get_db_config()
    projects_in_db = Database.get_documents_by_type(database, doc_type='project')
    if not projects_in_db:
        return 0
    count = 0
    deleted_workspaces = list()
    for project in projects_in_db:
        workspaces_in_project = Database.get_workspaces_by_project(database,
        	                                                          project=project['name'])
        for workspace in workspaces_in_project:
            # ontap doesn't provide last_access_timestamp for volumes
            # hence, snapdiff latest snapshot with snapshot X days older \
            # to find if workspace is active
            ontap = OntapService(config['ontap_api'], config['ontap_apiuser'], config['ontap_apipass'], config['ontap_svm_name'], config['ontap_aggr_name'], config['ontap_data_ip'])
            deleted, error = ontap.get_snapdiff_and_delete(
                volume_name=workspace.value,
                count=project['workspace_purge_limit'])

            # delete inconsistent or old workspace that exceeded purge limit
            if error is not None or deleted is True:
                workspace_doc = Database.get_document_by_name(database, workspace.value)
                database.delete(workspace_doc)
                deleted_workspaces.append(workspace.value)
                logging.info("Purge: deleted workspace %s from DB",
                			          workspace.value)
                count += 1
    return count, deleted_workspaces
def purge_snapshots_by_volume(volume, purge_limit):
    """
    Purge snapshots per volume
    @return: count of snapshots purged
    """
    config = helpers.get_db_config()
    ontap = OntapService(config['ontap_api'], config['ontap_apiuser'], config['ontap_apipass'],
                         config['ontap_svm_name'], config['ontap_aggr_name'], config['ontap_data_ip'])
    ontap_snapshot_list = ontap.get_snapshot_list(volume)

    if ontap_snapshot_list is None:
        return 0

    delete_count = len(ontap_snapshot_list) - purge_limit

    if delete_count <= 0:
        return 0

    database = helpers.connect_db()

    sorted_by_timestamp = sorted(ontap_snapshot_list, key=lambda snap: snap['timestamp'])
    delete_snapshot_list = sorted_by_timestamp[:delete_count]
    for snap in delete_snapshot_list:
        status = ontap.delete_snapshot(volume, snap['snapshot_name'])
        if helpers.verify_successful_response(status):
            # delete snapshot document from db
            doc = Database.get_document_by_name(database, snap['snapshot_name'])
            if not doc:  # if snapshot to be deleted is not found in DB
                logging.info("Purge: snapshot document not found for %s", snap['snapshot_name'])
            else:
                database.delete(doc)
                logging.info("Purge: snapshot deleted from DB and ONTAP: %s",
                             snap['snapshot_name'])
    return delete_count
def delete_workspace(name):
    """
        Delete Kube service representing the workspace IDE
        Delete Kube pod associated with the service
        Delete Kube PVC representing the workspace (Trident will delete the associated PV and ONTAP clone)
        Delete Kube service representing the IDE
    """
    # TODO: Handle exceptions on db connection failure, and failure on each of the kube operations
    try:
        config = get_db_config()
        db = connect_db()
        workspace = Database.get_document_by_name(db, name)
        pvc = workspace['pvc']
        pod_name = workspace['pod']
        service = workspace['service']
        kube = KubernetesAPI.get_instance()
        kube.delete_service(service)
        logging.info("Workspace service deleted")
        kube.delete_pod(pod_name)
        logging.info("Workspace POD deleted")
        kube.delete_pvc(pvc)
        logging.info("Workspace PVC deleted")
        db.delete(workspace)
    except Exception as e:
        logging.error("Unable to delete workspace %s: %s" % (name, traceback.format_exc()))
        raise
def check_if_workspaces_exist_for_pipeline(name):
    get_db_config()
    db = connect_db()
    # retrieve details for the current pipeline
    pipeline = Database.get_document_by_name(db, name)
    # get all workspaces for the pipeline. If one or more workspaces exist, we don't delete the pipeline
    workspace_list = get_all_ws_pvc_for_pipeline(pipeline['pvc'])
    print("Got workspace list as:: ", str(workspace_list))
    return len(workspace_list) > 0, workspace_list
def get_db_config():
    """Connect to database and retrieve config document"""
    database = connect_db()
    try:
        config_document = Database.get_document_by_name(
            database, 'configuration')
    except Exception as e:
        print("Unable to retrieve configuration document from database: %s" % traceback.format_exc())
        raise e
    return config_document
def get_volume_name_for_pipeline(name):
    """
    Get volume name for given pipeline
    """
    try:
        config = get_db_config()
        db = connect_db()
        project = Database.get_document_by_name(db, name)
        volume = project['volume']
        return volume
    except Exception as e:
        logging.error("Unable to get volume name for pipeline %s: %s" % (name, traceback.format_exc()))
        raise
Exemple #7
0
def _setup_workspace(input_form, merge=False):
    # Retrieve customer configuration document from database
    connect, config = _get_config_from_db()

    # Validate if user hasn't exceeded the workspace limit
    try:
        exceeded, workspaces = workspace_obj.exceeded_workspace_count_for_user(
            input_form['username'], config['user_workspace_limit'])
        logging.debug("Workspace limit details:: %s %s" %
                      (exceeded, str(workspaces)))
    except Exception as exc:
        logging.warning(
            "WARNING: Unable to check user workspace limit (%s)  " %
            traceback.format_exc())
    if exceeded is True:
        raise GenericException(
            401, "User workspace limit of %s exceeded. "
            "Please delete one or more workspace(s) from %s and re-try" %
            (config['user_workspace_limit'], workspaces))

    # setup initial workspace params
    workspace = dict()
    if merge:
        # Retrieve project name from source_workspace document
        try:
            source_ws_document = Database.get_document_by_name(
                connect, request.form['source-workspace-name'])
        except:
            error_msg = "Error retrieving source workspace information from database"
            logging.error("%s: %s" % (error_msg, traceback.format_exc()))
            raise GenericException(500, error_msg, "Database Exception")
        # populate the workspace details
        workspace['source_workspace_name'] = input_form[
            'source-workspace-name']
        workspace['pipeline'] = source_ws_document['pipeline']
        workspace['build_name'] = request.form['build-name']
    else:
        workspace['pipeline'] = request.form['pipeline-name']
        # strip build_status and retain only the build_name
        workspace['build_name'] = request.form[
            'build-name-with-status'].rsplit('_', 1)[0]

    _populate_workspace_details(workspace, input_form, config, merge)

    # Create Kube PVC, Pod, Service, and execute commands in Pod to complete workspace setup
    _complete_kubernetes_setup_for_workspace(workspace, merge)

    # Record new workspace document in DB
    _record_new_workspace(db=connect, workspace=workspace, merge=merge)

    return workspace
def delete_pipeline(name):
    """
    Delete all elements associated with a given pipeline(ONTAP volume/Jenkins job)
    When using Trident, it is sufficient to delete the PVC mapped to the project
    (Trident takes care of deleting the volume and PV)
    After deleting the pipeline's PVC, delete all the build clone PVCs associated with the pipeline
    (Don not proceed with this step, if there is at least one workspace tied to a pipeline build)
    """
    # TODO: Be more specific on what goes in 'try'
    get_db_config()
    db = connect_db()
    # retrieve details for the current pipeline
    pipeline = Database.get_document_by_name(db, name)
    # if there aren't any workspaces, we can safely delete all the build clones
    build_pvc_list = get_all_build_pvc_for_pipeline(pipeline['pvc'])
    try:
        for build_pvc in build_pvc_list:
            # if this is a re-try (intermittent failure) PVC is already gone
            try:
                KubernetesAPI.get_instance().delete_pvc(build_pvc)
            except Exception:
                pass
            # delete the pipeline DB doc
            db_name = get_db_name_from_kube_resource(build_pvc)
            build = Database.get_document_by_name(db, db_name)
            db.delete(build)
        # when all build clones are deleted successfully, delete the pipeline PVC
        KubernetesAPI.get_instance().delete_pvc(pipeline['pvc'])
        # delete the pipeline DB doc
        db.delete(pipeline)
        # finally delete the Jenkins job
        jenkins = connect_jenkins()
        jenkins.delete_job(name)
    except Exception as e:
        logging.error("Unable to delete pipeline %s: %s" % (name, traceback.format_exc()))
        raise
def delete_pipeline(name):
    """
    Delete all elements associated with a given pipeline(ONTAP volume/Jenkins job)
    When using Trident, it is sufficient to delete the PVC mapped to the project
    (Trident takes care of deleting the volume and PV)
    """
    # TODO: Be more specific on what goes in 'try'
    try:
        get_db_config()
        db = connect_db()
        pipeline = Database.get_document_by_name(db, name)
        pvc = pipeline['pvc']
        KubernetesAPI().delete_pvc(pvc)
        db.delete(pipeline)
        jenkins = connect_jenkins()
        jenkins.delete_job(name)
    except Exception as e:
        logging.error("Unable to delete pipeline %s: %s" %
                      (name, traceback.format_exc()))
        raise
Exemple #10
0
def delete_project(name):
    '''
    Delete all elements associated with a given project/pipeline(ontap volume/jenkins job)
    '''
    try:
        config = get_db_config()
        db = connect_db()
        project = Database.get_document_by_name(db, name)
        volume = project['volume']
        ontap = OntapService(config['ontap_api'], config['ontap_apiuser'],
                             config['ontap_apipass'], config['ontap_svm_name'],
                             config['ontap_aggr_name'],
                             config['ontap_data_ip'])
        ontap.delete_volume(volume)
        db.delete(project)
        jenkins = connect_jenkins()
        jenkins.delete_job(name)

    except Exception as e:
        logging.error("Unable to delete project!: %s" % traceback.format_exc())
        raise
Exemple #11
0
def delete_workspace(name):
    '''

    '''
    try:
        config = get_db_config()
        db = connect_db()
        ontap = OntapService(config['ontap_api'], config['ontap_apiuser'],
                             config['ontap_apipass'], config['ontap_svm_name'],
                             config['ontap_aggr_name'],
                             config['ontap_data_ip'])
        ontap.delete_volume(name)
        workspace = Database.get_document_by_name(db, name)
        pod_name = workspace['pod_name']
        db.delete(workspace)
        kube = KubernetesAPI()
        kube.delete_pod(pod_name)

    except Exception as e:
        logging.error("Unable to delete workspace!: %s" %
                      traceback.format_exc())
        raise
Exemple #12
0
def workspace_merge():
    """
    merge developer workspace pod
    ---
    tags:
      - workspace
    parameters:
      - in: path
        name: workspace-name
        required: true
        description: Name of the new merge workspace being created
        type: string
      - in: path
        name: build-name
        required: true
        description: Build name (e.g. snapshot) from which clone should be created
        type: string
      - in: path
        name: username
        required: true
        description: Username
        type: string
      - in: path
        name: source-workspace-name
        required: true
        description: Source workspace
        type: integer
    responses:
      200:
        description: merge workspace created successfully

    """
    # Retrieve customer configuration document from database
    try:
        database = helpers.connect_db()
        config_document = helpers.get_db_config()
    except Exception as e:
        raise GenericException(
            500,
            "Customer configuration document not found, please contact your administrator",
            "Database Exception")
    if not config_document:
        raise GenericException(
            500,
            "Customer configuration document not found, please contact your administrator",
            "Database Exception")
    expected_keys = [
        'workspace-name', 'build-name', 'username', 'source-workspace-name'
    ]
    if not helpers.request_validator(request.form, expected_keys):
        raise GenericException(
            400,
            "workspace-name, build-name, username and source-workspace-name are required"
        )

    username = request.form['username']
    try:
        user_doc = helpers.get_db_user_document(username)
        uid = user_doc['uid']
        gid = user_doc['gid']
        email = user_doc['email']
    except:
        raise GenericException(
            500, "Error retrieving user information from database",
            "Database Exception")

    try:
        exceeded, workspaces = workspace_obj.exceeded_workspace_count_for_user(
            uid, config_document['user_workspace_limit'])
    except Exception as exc:
        logging.warning(
            "WARNING: Unable to check user workspace limit (%s)  " %
            traceback.format_exc())
    if exceeded is True:
        raise GenericException(
            401,
            "User workspace limit exceeded , please delete one or more workspace(s) from %s and re-try"
            % workspaces)

    # retrieve project name from on source-workspace document
    try:
        source_ws_document = Database.get_document_by_name(
            database, request.form['source-workspace-name'])
        project = source_ws_document['project'].rstrip()
    except:
        error_msg = "Error retrieving source workspace information from database"
        logging.error("%s: %s" % (error_msg, traceback.format_exc()))
        raise GenericException(500, error_msg, "Database Exception")

    # populate the workspace details
    workspace = dict()
    workspace[
        'source_workspace_name'] = helpers.replace_kube_invalid_characters(
            request.form['source-workspace-name'])
    namespace = 'default'
    workspace['project'] = project
    workspace['snapshot'] = request.form['build-name']
    volume_name = helpers.replace_ontap_invalid_char(workspace['project'])
    workspace['clone'] = volume_name + \
        "_workspace" + helpers.return_random_string(4)
    workspace['kb_clone_name'] = helpers.replace_kube_invalid_characters(
        workspace['clone'])
    workspace['uid'] = uid
    workspace['gid'] = gid
    workspace['username'] = username
    workspace['clone_size_mb'] = "900"
    workspace['pod_image'] = config_document['workspace_pod_image']
    workspace['clone_mount'] = "/mnt/" + workspace['kb_clone_name']
    workspace[
        'build_cmd'] = "No build commands have been specified for this project"
    workspace['service_type'] = config_document['service_type']

    try:
        ontap_instance = OntapService(config_document['ontap_api'],
                                      config_document['ontap_apiuser'],
                                      config_document['ontap_apipass'],
                                      config_document['ontap_svm_name'],
                                      config_document['ontap_aggr_name'],
                                      config_document['ontap_data_ip'])
        ontap_data_ip = ontap_instance.data_ip

        status, vol_size = ontap_instance.create_clone(volume_name,
                                                       workspace['uid'],
                                                       workspace['gid'],
                                                       workspace['clone'],
                                                       workspace['snapshot'])
    except Exception as exc:
        logging.error("Unable to create ontap workspace clone volume: %s" %
                      traceback.format_exc())
        raise GenericException(
            500, "Unable to create ontap workspace clone volume")

    if not helpers.verify_successful_response(status):
        logging.error("ONTAP Clone Creation Error: %s", repr(status))
        return render_template('error.html',
                               error="Workspace clone creation error"), 400

    try:
        kube = KubernetesAPI()
    except Exception as exc:
        logging.error("Unable to connect to Kubernetes: %s" %
                      traceback.format_exc())
        raise GenericException(500, "Unable to connect to Kubernetes")
    try:
        kube_pv_pvc_pod_response = kube.create_pv_and_pvc_and_pod(
            workspace, vol_size, 'default', ontap_data_ip)
    except Exception as exc:
        logging.error("Unable to create Kubernetes Workspace PV/PVC/Pod: %s" %
                      traceback.format_exc())
        raise GenericException(
            500, "Unable to create Kubernetes Workspace PV/PVC/Pod")
    for response in kube_pv_pvc_pod_response:
        status.append(response)

    if not helpers.verify_successful_response(status):
        logging.error("Unable to create Kubernetes Workspace PV/PVC/Pod: %s" %
                      response)
        raise GenericException(
            500, "Unable to create Kubernetes Workspace PV/PVC/Pod")

    workspace_pod = workspace['kb_clone_name'] + "-pod"
    try:
        workspace_ide = kube.get_service_url(workspace['kb_clone_name'] +
                                             "-service")
    except Exception as exc:
        logging.error(
            "Unable to determine workspace kubernetes service url: %s" %
            traceback.format_exc())
        raise GenericException(
            500,
            "Unable to determine workspace kubernetes service url, please contact your administrator"
        )

    # Record new workspace in database
    try:
        new_ws_document = Workspace(name=workspace['clone'],
                                    project=workspace['project'],
                                    username=workspace['username'],
                                    uid=workspace['uid'],
                                    gid=workspace['gid'],
                                    parent_snapshot=workspace['snapshot'],
                                    pod_name=workspace_pod)
        new_ws_document.store(database)
    except Exception:
        raise GenericException(
            500,
            "Error recording new workspace in the DB, please contact your administrator",
            "Database Exception")

    # Wait for pod to be ready before executing any commands
    time.sleep(180)
    # Set git user.email and user.name , we don't care if the command fails
    git_user_cmd = ['git', 'config', '--global', 'user.name', username]
    git_email_cmd = ['git', 'config', '--global', 'user.email', email]
    try:
        response = kube.execute_command_in_pod(workspace_pod, namespace,
                                               git_user_cmd)
        response = kube.execute_command_in_pod(workspace_pod, namespace,
                                               git_email_cmd)
    except:
        logging.warning(
            "WARNING: Unable to configure GIT Username/Email on behalf of user: %s"
            % traceback.format_exc())

    # run the merge commands in the new workspace
    # source ws will be mounted at /source_workspace/git
    # destination ws will be mounted at /workspace/git
    merge_cmd = [
        '/usr/local/bin/build_at_scale_merge.sh', '/source_workspace/git',
        '/workspace/git'
    ]
    try:
        response = kube.execute_command_in_pod(workspace_pod, namespace,
                                               merge_cmd)
    except:
        logging.error("Unable to successfully complete git merge !" %
                      traceback.format_exc())
        raise GenericException(
            500,
            "Unable to successfully complete merge ! , please contact your administrator"
        )

    if response == "0":
        message = "Merge workspace created successfully!"
    elif response == "1":
        message = "Merge workspace created successfully but merge conflicts were found. Please check the workspace for conflicts which need to be resolved"
    else:
        raise GenericException(
            500,
            "Unable to successfully complete merge ! , please contact your administrator"
        )
    # Wait for IDE to be ready before returning
    time.sleep(5)
    return render_template('workspace_details.html',
                           message=message,
                           ontap_data_ip=ontap_data_ip,
                           ontap_volume_name=workspace['clone'],
                           workspace_ide=workspace_ide), 200