def get_disks_meta(self):
        ceph_api = CephAPI()
        consul_api = ConsulAPI()
        ls = ceph_api.get_disks_meta()
        for disk in ls:
            if disk and hasattr(disk, "paths") and not disk.paths:
                disk.status = DisplayDiskStatus.unattached
            elif disk and hasattr(disk, "paths") and disk.paths:
                data = consul_api.find_disk(disk.id)
                if data is not None:
                    disk.status = DisplayDiskStatus.starting
                    if str(data.Flags) == "1":
                        disk.status = DisplayDiskStatus.stopping
                    elif consul_api.is_path_locked(disk.id):
                        disk.status = DisplayDiskStatus.started

                else:
                    disk.status = DisplayDiskStatus.stopped

                job_manager = JobManager()
                job_list = job_manager.get_running_job_list()

                for j in job_list:

                    # Check if the status running
                    if j.is_running:
                        # Set disk status [deleting]
                        if j.type == JobType.DELETE_DISK and str(
                                j.params).find(str(disk.id)) > -1:
                            disk.status = DisplayDiskStatus.deleting

        return ls
Пример #2
0
def storage(args):
    job_manager = JobManager()
    params = '-d {} '.format(args.d)
    for j in job_manager.get_running_job_list():
        if j.type == JobType.STORAGELOAD :
            logger.info("Cannot start storage load job for 'sar',")
            print("-1")
            return

    print( job_manager.add_job(JobType.STORAGELOAD,params))
    logger.info("Start storage load job for 'sar'")
    sys.exit(0)
Пример #3
0
def client(args):

    job_manager = JobManager()
    params = '-d {} -t {} -b {} -m {} -p {}'.format(args.d, args.t, args.b,
                                                    args.m, args.p)
    for j in job_manager.get_running_job_list():
        if j.type == JobType.CLIENTSTRESS:
            logger.info("Cannot start client stress job for 'rados',")
            print("-1")
            return

    print(job_manager.add_job(JobType.CLIENTSTRESS, params))
    logger.info("Start client stress job for rados")
    sys.exit(0)
def delete_cache(args):
    if not configuration().get_node_info().is_storage:
            print("-1")
            return
    job_manager = JobManager()
    params = '-disk_name {}'.format(args.disk_name)
    for j in job_manager.get_running_job_list():
        if j.type == JobType.DELETEOSD or j.type == JobType.ADDDISK or \
                j.type == JobType.ADDJOURNAL or j.type == JobType.DELETEJOURNAL or \
                j.type == JobType.ADDCACHE or j.type == JobType.DELETECACHE:
            logger.info("Cannot start delete job for cache of disk {},There ara running jobs. ".format(args.disk_name))
            print("-1")
            return

    print(job_manager.add_job(JobType.DELETECACHE,params))
    logger.info("Start delete job for cache of disk {}".format(args.disk_name))
    sys.exit()
Пример #5
0
    def start(self, type, duration_sec, threads, clients, pool, cleanup):
        job_manager = JobManager()
        clients = "" + ",".join(clients) + ""
        for j in job_manager.get_running_job_list():
            if j.type == JobType.BENCHMANAGER:
                logger.info(
                    "Cannot start benchmark manager there is a job already running."
                )
                return -1
        cleanup_val = 1
        if not cleanup:
            cleanup_val = 0
        params = '-d {} -t {} -type {} -c {} -p {} --cleanup {}'.format(
            duration_sec, threads, type, clients, pool, cleanup_val)
        id = job_manager.add_job(JobType.BENCHMANAGER, params)

        return id
Пример #6
0
def add_journal(args):
    if not configuration().get_node_info().is_storage:
        print("-1")
        return
    job_manager = JobManager()
    params = '-disk_name {}'.format(args.disk_name)
    for j in job_manager.get_running_job_list():
        if j.type == JobType.DELETEOSD or j.type == JobType.ADDDISK or\
                        j.type == JobType.ADDJOURNAL or j.type == JobType.DELETEJOURNAL:
            logger.info(
                "Cannot start add journal job to create journal for disk.{}.There ara running jobs. "
                .format(args.disk_name))
            print("-1")
            return

    print(job_manager.add_job(JobType.ADDJOURNAL, params))
    logger.info("Start add journal job for disk {}.".format(args.disk_name))
    sys.exit()
Пример #7
0
def add_osd(args):
    if not configuration().get_node_info().is_storage:
        print("-1")
        return

    job_manager = JobManager()

    # If no journal and no cache :
    if str(args.journal) == "" and str(args.cache) == "":
        params = '-disk_name {}'.format(args.disk_name)

    # If journal but no cache :
    elif str(args.journal) != "" and str(args.cache) == "":
        params = '-disk_name {} -journal {}'.format(args.disk_name,
                                                    args.journal)

    # If cache but no journal :
    elif str(args.journal) == "" and str(args.cache) != "" and str(
            args.cache_type) != "":
        params = '-disk_name {} -cache {} -cache_type {}'.format(
            args.disk_name, args.cache, args.cache_type)

    # If both journal and cache :
    else:
        params = '-disk_name {} -journal {} -cache {} -cache_type {}'.format(
            args.disk_name, args.journal, args.cache, args.cache_type)

    # Getting all running jobs :
    for j in job_manager.get_running_job_list():
        if j.type == JobType.DELETEOSD or j.type == JobType.ADDDISK or \
                        j.type == JobType.ADDJOURNAL or j.type == JobType.DELETEJOURNAL or \
                        j.type == JobType.ADDCACHE or j.type == JobType.DELETECACHE:
            logger.info(
                "Cannot start add job to create osd for disk : {}. There ara running jobs."
                .format(args.disk_name))
            print("-1")
            return

    print(job_manager.add_job(JobType.ADDDISK, params))
    logger.info("Start add osd job for disk {}.".format(args.disk_name))
    sys.exit()
def get_full_disk_list(pid=None):
    __output_split_text = "##petasan##"
    disk_list = []
    ceph_disk_list = get_disk_list()
    ph_disk_list = disk_util.get_disk_list()
    osd_dict = None

    try:
        osd_dict = ceph_osd.ceph_osd_tree(configuration().get_node_info().name)
    except Exception as e:
        logger.error(e.message)
    missing_disk_list = []
    # Set osd id and usage

    if ceph_disk_list and len(ceph_disk_list) > 0:
        for disk in ceph_disk_list:
            for ph_disk in ph_disk_list:
                if ph_disk.name == disk.name:
                    ph_disk.usage = disk.usage
                    ph_disk.osd_id = disk.osd_id
                    ph_disk.osd_uuid = disk.osd_uuid
                    ph_disk.linked_journal = disk.linked_journal
                    ph_disk.linked_osds = disk.linked_osds
                    ph_disk.linked_cache = disk.linked_cache
                    ph_disk.linked_cache_part_num = disk.linked_cache_part_num
                    ph_disk.vg_name = disk.vg_name
                    ph_disk.lv_name = disk.lv_name
                    ph_disk.linked_journal_part_num = disk.linked_journal_part_num
                    ph_disk.no_of_partitions = disk.no_of_partitions
                    ph_disk.no_available_partitions = disk.no_available_partitions
                    disk_list.append(ph_disk)
                    break
    else:
        disk_list.extend(ph_disk_list)

    health_test = Smart().get_overall_health()
    for disk in disk_list:
        if disk.name in health_test:
            disk.smart_test = health_test[disk.name]

    # get all running jobs
    job_manager = JobManager()
    job_list = job_manager.get_running_job_list()

    # Set disk osd status
    for node_disk in disk_list:
        # Set osd status [up, down]
        if node_disk.usage == DiskUsage.osd:
            status = None
            if osd_dict and node_disk.osd_id is not None:
                status = osd_dict.get(int(node_disk.osd_id), None)
            if str(ceph_osd.get_osd_id(node_disk.osd_uuid)) == "-1":
                node_disk.status = OsdStatus.no_status
                node_disk.usage = DiskUsage.mounted
                node_disk.osd_id = -1
            elif status is not None:
                node_disk.status = status
            else:
                node_disk.status = OsdStatus.no_status

        disk_name_parameter = "-disk_name {}".format(node_disk.name)
        disk_id_parameter = "-id {}".format(node_disk.osd_id)

        # loop on running job list
        for j in job_list:
            # Set osd status [deleting , adding]
            if j.type == JobType.ADDDISK and str(j.params).find(
                    str(disk_name_parameter)) > -1:
                node_disk.status = OsdStatus.adding
            elif j.type == JobType.ADDJOURNAL and str(j.params).find(
                    str(disk_name_parameter)) > -1:
                node_disk.status = OsdStatus.adding_journal
            elif j.type == JobType.ADDCACHE and str(j.params).find(
                    str(disk_name_parameter)) > -1:
                node_disk.status = OsdStatus.adding_cache
            elif j.type == JobType.DELETEOSD and (
                    str(j.params).find(str(disk_name_parameter)) > -1
                    or str(j.params).find(str(disk_id_parameter)) > -1):
                node_disk.status = OsdStatus.deleting
            elif j.type == JobType.DELETEJOURNAL and str(j.params).find(
                    str(disk_name_parameter)) > -1:
                node_disk.status = OsdStatus.deleting
            elif j.type == JobType.DELETECACHE and str(j.params).find(
                    str(disk_name_parameter)) > -1:
                node_disk.status = OsdStatus.deleting

            # Check if the job completed and has error to return it
            elif pid and j.id == int(pid):
                job_output = job_manager.get_job_output(j)
                if job_output is None:
                    continue
                job_output = str(job_output).strip()
                if job_output != "":
                    # We expect our custom messages appear after __output_split_text.
                    out_arr = job_output.split(__output_split_text)
                    if out_arr > 1:
                        node_disk.error_message = out_arr[1]
                        job_manager.remove_job(j.id)

    if not osd_dict or len(osd_dict.items()) == 0:
        return disk_list
    # If there is an osd found in ceph tree and this osd not has disk.
    for osd_id, osd_status in osd_dict.items():
        is_missing = True
        for disk in disk_list:
            if str(disk.osd_id) == str(osd_id):
                is_missing = False
                break
        if is_missing:
            disk = DiskInfo()
            disk.osd_id = osd_id
            disk.status = osd_status
            disk.usage = DiskUsage.osd
            missing_disk_list.append(disk)
    disk_list.extend(missing_disk_list)

    return disk_list