def get_dest_cluster_fsid(args):
    try:
        destination_fsid = ceph_disk.get_fsid(
            configuration().get_cluster_name())
        print(json.dumps(destination_fsid))
        sys.exit(0)

    except Exception as e:
        print("Error : {}".format(str(e.message)))
        sys.exit(-1)
 def __get_wwn(self, disk_id):
     wwn = disk_id
     app_config = ConfigAPI().read_app_config()
     if app_config.wwn_fsid_tag:
         logger.info('include_wwn_fsid_tag() is true')
         fsid = ceph_disk.get_fsid(configuration().get_cluster_name())
         fsid_split = fsid[:8]
         wwn = fsid_split + disk_id
     logger.info('add disk wwn is ' + wwn)
     return wwn
Exemple #3
0
    def activate(self):
        cluster_fsid = ceph_disk.get_fsid(configuration().get_cluster_name())
        vg_name = "ps-" + cluster_fsid + "-dmc"

        logger.info("Starting activating PetaSAN lvs")
        call_cmd("modprobe dm-cache")

        all_lvs_dict = get_api_vgs()

        for vg in all_lvs_dict:
            if vg['vg_name'].startswith(vg_name):
                logger.info("Activating {}".format(vg['vg_name']))
                call_cmd("vgchange -a y {}".format(vg['vg_name']))
Exemple #4
0
    def activate(self):
        cluster_fsid = ceph_disk.get_fsid(configuration().get_cluster_name())
        vg_name = "ps-" + cluster_fsid + "-wc"

        logger.info("Starting activating PetaSAN lvs")
        call_cmd("modprobe dm-writecache")

        all_lvs_dict = get_api_vgs()

        for vg in all_lvs_dict:
            if vg['vg_name'].startswith(vg_name):
                logger.info("Activating {}".format(vg['vg_name']))
                call_cmd("vgchange -a y {}".format(vg['vg_name']))
                logger.info("Setting writeback jobs number.")
                call_cmd(
                    'dmsetup message {}/main 0 writeback_jobs 102400'.format(
                        vg['vg_name']))
def disk_list(delete_job_id=-1, disk_id="", pool=""):
    mesg_err = ""
    mesg_success = ""
    mesg_warning = ""
    available_disk_list = []
    disk_status = None
    active_paths = None
    base_url = request.base_url
    disk_id = disk_id
    cluster_fsid = ""
    try:
        manage_disk = ManageDisk()
        available_disk_list = manage_disk.get_disks_meta()
        cluster_fsid = ceph_disk.get_fsid(configuration().get_cluster_name())
        disk_status = DisplayDiskStatus
        if "err" in session:
            mesg_err = session["err"]
            session.pop("err")
        elif "success" in session:
            mesg_success = session["success"]
            session.pop("success")
        elif "warning" in session:
            mesg_warning = session["warning"]
            session.pop("warning")

    except Exception as e:
        mesg_err = "error in loading page"

    return render_template('admin/disk/list.html',
                           diskList=available_disk_list,
                           diskStatus=disk_status,
                           cluster_fsid=cluster_fsid,
                           err=mesg_err,
                           base_url=base_url,
                           disk_id=disk_id,
                           delete_job_id=delete_job_id,
                           pool=pool,
                           success=mesg_success,
                           warning=mesg_warning)
Exemple #6
0
    def add_replication_job(self, job_entity, src_disk_meta):
        consul_api = ConsulAPI()
        jobs = consul_api.get_replication_jobs()

        for job_id, job in jobs.items():
            if job.job_name == job_entity.job_name:
                raise ReplicationException(ReplicationException.DUPLICATE_NAME, "Duplicate replication job name error.")

        manage_remote_replication = ManageRemoteReplication()

        manage_remote_replication.disk_id = job_entity.destination_disk_id
        manage_remote_replication.cluster_name = job_entity.destination_cluster_name
        job_id = consul_api.get_next_job_id()

        source_fsid = ceph_disk.get_fsid(configuration().get_cluster_name())
        job_entity.job_id = job_id
        job_entity.source_cluster_fsid = source_fsid

        src_disk_meta.replication_info["src_cluster_fsid"] = source_fsid

        mng_rep_info = ManageDiskReplicationInfo()
        src_disk_meta = mng_rep_info.set_replication_info(job_entity.destination_cluster_name, src_disk_meta)

        replication_info = src_disk_meta.replication_info

        # update source and destination disks meta.
        manage_remote_replication.update_replication_info(replication_info)
        mng_rep_info.update_replication_info(src_disk_meta, replication_info)
        system_date_time = str(datetime.datetime.now()).split('.')[0]

        # save job in consul
        consul_api.update_replication_job(job_entity)

        # Saving log in Consul :
        log_text = "{} - Job {} has been created.".format(system_date_time, job_id)
        self.log_replication_job(job_id, log_text)

        # start replication job:
        self.start_replication_job(job_entity)
def get_ceph_volumes_info():
    ceph_volumes_disks = {}
    cluster_fsid = ''
    vg_name = ""
    partitions_uuid = {}
    try:
        cluster_fsid = ceph_disk.get_fsid(configuration().get_cluster_name())
        partitions_uuid = ceph_disk.get_partitions_uuid()
    except Exception as e:
        logger.error(e)
        pass
    cmd = 'ceph-volume --log-path ' + CEPH_VOLUME_LOG_PATH + ' lvm list --format json'
    ret, stdout, stderr = exec_command_ex(cmd)
    if ret != 0:
        if stderr:
            logger.error(stderr)
    if len(stdout) > 0:
        ceph_volumes_info = json.loads(stdout)
        for osd_id, osd_info in ceph_volumes_info.iteritems():
            try:
                ceph_volume_disk_info = CephVolumeInfo()
                fsid = ''
                osd_name = ''
                for element in osd_info:
                    if element['type'] == 'block' or element['type'] == 'data':
                        fsid = element['tags']['ceph.cluster_fsid']
                        if len(fsid) > 0 and fsid != cluster_fsid:
                            continue
                        # if not ['tags']['ceph.cluster_fsid'] or element['tags']['ceph.cluster_fsid'] != cluster_fsid:
                        ceph_volume_disk_info.osd_id = osd_id
                        ceph_volume_disk_info.osd_uuid = element['tags'][
                            'ceph.osd_fsid']
                        if len(element['devices']) > 0:
                            for device in element['devices']:
                                part_name = ceph_disk.get_dev_name(device)
                                osd_name = ceph_disk.get_device_name(part_name)
                                ceph_volume_disk_info.devices.append(osd_name)

                        # if there is no devices (physical disks) exists
                        # get them from get_physical_disks function by volume group name

                        else:
                            vg_name = element['vg_name']
                            lv_name = element['lv_name']
                            ceph_volume_disk_info.lv_name = lv_name
                            ceph_volume_disk_info.vg_name = vg_name
                            physical_list = lvm_lib.get_physical_disks(vg_name)
                            main_devices = list(physical_list["main"])
                            writecache_devices = list(
                                physical_list["writecache"])
                            cache_devices = list(physical_list["dmcache"])

                            if len(main_devices) > 0:
                                for main_dev in main_devices:
                                    main_part_name = ceph_disk.get_dev_name(
                                        main_dev)
                                    main_dev_name = ceph_disk.get_device_name(
                                        main_part_name)
                                    ceph_volume_disk_info.devices.append(
                                        main_dev_name)
                            if len(writecache_devices) > 0:
                                for wcache in writecache_devices:
                                    wcache_partition_name = ceph_disk.get_dev_name(
                                        wcache)
                                    ceph_volume_disk_info.linked_cache_part_num.append(
                                        ceph_disk.get_partition_num(
                                            wcache_partition_name))
                                    ceph_volume_disk_info.linked_cache.append(
                                        wcache_partition_name)
                            elif len(cache_devices) > 0:
                                for cache in cache_devices:
                                    cache_partition_name = ceph_disk.get_dev_name(
                                        cache)
                                    ceph_volume_disk_info.linked_cache_part_num.append(
                                        ceph_disk.get_partition_num(
                                            cache_partition_name))
                                    ceph_volume_disk_info.linked_cache.append(
                                        cache_partition_name)

                        journal_path = ""

                        # if 'ceph.journal_device' in element['tags']:
                        #     journal_path = element['tags']['ceph.journal_device']
                        # if 'ceph.db_device' in element['tags']:
                        #     journal_path = element['tags']['ceph.db_device']
                        uuid = ""

                        # for filestore :
                        if 'ceph.journal_uuid' in element['tags']:
                            uuid = element['tags']['ceph.journal_uuid']

                        # for bluestore :
                        if 'ceph.db_uuid' in element['tags']:
                            uuid = element['tags']['ceph.db_uuid']
                        if len(uuid) > 0 and uuid in partitions_uuid:
                            journal_path = partitions_uuid[uuid]
                        if len(journal_path) > 0:
                            try:
                                if ceph_disk.is_partition(journal_path):
                                    journal_name = get_disk_by_partition(
                                        journal_path)
                                    journal_partition_name = ceph_disk.get_dev_name(
                                        journal_path)
                                    ceph_volume_disk_info.linked_journal_part_num = ceph_disk.get_partition_num(
                                        journal_partition_name)
                                    if len(osd_name
                                           ) > 0 and osd_name in journal_name:
                                        continue
                                    ceph_volume_disk_info.linked_journal = journal_name
                            except Exception as ex:
                                logger.error(ex)
                                continue
            except Exception as e:
                logger.exception(e)
                continue

            for device in ceph_volume_disk_info.devices:
                ceph_volumes_disks.update({device: ceph_volume_disk_info})

    return ceph_volumes_disks
def get_ceph_disk_list():
    disk_info_list = []

    # read fsid for our cluster from config file
    fsid = None
    try:
        fsid = ceph_disk.get_fsid(configuration().get_cluster_name())
    except Exception as e:
        pass

    journal_linked_osds = {}

    counter = 0

    while True:
        try:
            ceph_disk_list_devs = ceph_disk.list_devices()
            break
        except Exception as e:
            if counter == 120:
                return disk_info_list
            counter += 1
            logger.error(e)
            time.sleep(1)

    for device in ceph_disk_list_devs:

        no_of_partitions = 0
        no_of_availabe_partitions = 0

        path = device['path']
        name = ceph_disk.get_dev_name(path)

        # check for disk block devices
        if not name.startswith('sd') and not name.startswith(
                'xvd') and not name.startswith('nvme'):
            continue

        di = DiskInfo()
        disk_info_list.append(di)
        di.name = name
        di.usage = DiskUsage.no

        # check if disk is not partitioned
        if 'partitions' not in device:
            continue

        old_osd = False
        # first check for OSD partitions
        for partition in device['partitions']:
            if partition['ptype'] == ptype_osd and 'whoami' in partition:
                if fsid and partition['ceph_fsid'] == fsid:
                    di.usage = DiskUsage.osd
                    di.osd_id = partition['whoami']
                    di.osd_uuid = partition['uuid']

                    if 'journal_dev' in partition:
                        journal = partition['journal_dev']
                        journal_disk = get_disk_by_partition(journal)
                        if journal_disk != name:
                            di.linked_journal = journal_disk
                            if journal_disk not in journal_linked_osds:
                                journal_linked_osds[journal_disk] = []
                            journal_linked_osds[journal_disk].append(di.name)

                    if 'block.db_dev' in partition:
                        journal = partition['block.db_dev']
                        journal_disk = get_disk_by_partition(journal)
                        if journal_disk != name:
                            di.linked_journal = journal_disk
                            if journal_disk not in journal_linked_osds:
                                journal_linked_osds[journal_disk] = []
                            journal_linked_osds[journal_disk].append(di.name)

                    # do not check further partitons
                    break
                else:
                    old_osd = True

        if di.usage == DiskUsage.osd:
            # go to next disk
            continue

        # check for  journal disk
        if not old_osd:
            no_of_partitions = len(device['partitions'])
            for partition in device['partitions']:
                if partition['ptype'] == ptype_journal or partition[
                        'ptype'] == ptype_blockdb or partition[
                            'ptype'] == journal_avail_ptype:
                    di.usage = DiskUsage.journal

                    if partition['ptype'] == journal_avail_ptype:
                        no_of_availabe_partitions += 1
                    """
                    if 'journal_for' in partition:
                        journal_for = partition['journal_for']
                        journal_for_disk = get_disk_by_partition(journal_for)
                        di.linked_osds.append(journal_for_disk)
                    """
                # check for cache partition
                if partition['ptype'] == cache_used_ptype or partition[
                        'ptype'] == cache_avail_ptype:
                    di.usage = DiskUsage.cache

                    if partition['ptype'] == cache_avail_ptype:
                        no_of_availabe_partitions += 1

        if di.usage == DiskUsage.journal or di.usage == DiskUsage.cache:
            if di.usage == DiskUsage.cache and no_of_partitions > 0:
                di.no_of_partitions = no_of_partitions
                di.no_available_partitions = no_of_availabe_partitions
            # go to next disk
            continue

        # check for mounted partitions
        for partition in device['partitions']:
            if 'mount' in partition:
                mount_path = partition['mount']
                if mount_path is not None and 0 < len(mount_path):
                    di.usage = DiskUsage.mounted
                    # check for system disk
                    if mount_path == '/':
                        di.usage = DiskUsage.system
                        break

    for di in disk_info_list:
        if di.usage == DiskUsage.journal and di.name in journal_linked_osds:
            di.linked_osds = journal_linked_osds[di.name]

    return disk_info_list
Exemple #9
0
    def create(self, origin_device, cache_device):
        self.origin_device = origin_device
        self.cache_device = cache_device
        devices = []
        cluster_fsid = ceph_disk.get_fsid(configuration().get_cluster_name())

        logger.info("Getting the next OSD ID :")
        logger.info("-------------------------")

        next_osd_id = ceph_disk_lib.get_next_osd_id()
        if len(next_osd_id) == 0:
            logger.error("Can't get next OSD id.")
            return None

        logger.info("next_osd_id = {}\n".format(str(next_osd_id)))
        vg_name = "ps-" + cluster_fsid + "-wc-osd." + str(next_osd_id)
        logger.info("vg_name = {}\n".format(str(vg_name)))

        cache_lv_name = "cache"
        main_lv_name = "main"
        origin_part_name = ""
        cache_part = ""

        try:
            logger.info("==================================================")
            logger.info("=====          Building DM-Writecache        =====")
            logger.info("==================================================")

            # ======================================= Slow Device ======================================= #
            # (1) Zapping the slow disk device :
            # ----------------------------------
            logger.info("Step 1 : Preparing Slow Disk :")
            logger.info("------------------------------")

            # (2) Creating one partition for the slow disk ( with same size of disk size ) :
            # ------------------------------------------------------------------------------
            origin_part_num = ceph_disk.create_osd_partition(origin_device)
            origin_part = ceph_disk.get_partition_name(origin_device,
                                                       origin_part_num)

            if origin_part is not None:
                origin_part_name = '/dev/' + origin_part
                devices.append(origin_part_name)
                logger.info("origin_part_name = {}".format(
                    str(origin_part_name)))
                logger.info("done\n")

            else:
                logger.info(
                    "Creating partition for the slow disk has been failed.")
                ceph_disk_lib.clean_disk(origin_device)
                return None

            # ======================================= Fast Device ======================================= #
            # (4) Get available partition for cache disk :
            # --------------------------------------------
            logger.info("Step 2 : Preparing Fast Disk :")
            logger.info("------------------------------")
            avail_partitions_ls = ceph_disk_lib.get_partitions_by_type(
                cache_device, ceph_disk_lib.cache_avail_ptype)

            if len(avail_partitions_ls) > 0:
                # Sorting partitions list alphabetically :
                avail_partitions_ls.sort()

                cache_part = avail_partitions_ls[0]
                cache_part_path = '/dev/' + cache_part

                ceph_disk_lib.clean_disk(cache_part)

                # clean meta data super block
                self.clean_superblock(cache_part)

                devices.append(cache_part_path)
                logger.info("cache_part_path = {}".format(
                    str(cache_part_path)))
                logger.info("done\n")

            else:
                logger.info(
                    "Getting available partition for cache disk has been failed."
                )
                ceph_disk_lib.clean_disk(origin_device)
                return None

            # ==================================== Physical Volumes ===================================== #
            # (6) Creating Physical Volumes (pvs) :
            # -------------------------------------
            logger.info("Step 3 : Creating Physical Volumes :")
            logger.info("------------------------------------")
            if create_pv(origin_part_name) is None:
                logger.info(
                    "Creating Physical Volume on {} has been failed.".format(
                        origin_part_name))
                ceph_disk_lib.clean_disk(origin_device)
                return None

            if create_pv(cache_part_path) is None:
                logger.info(
                    "Creating Physical Volume on {} has been failed.".format(
                        cache_part_path))
                ceph_disk_lib.clean_disk(origin_device)
                ceph_disk_lib.clean_disk(cache_part)
                ceph_disk_lib.set_partition_type(
                    cache_part, ceph_disk_lib.cache_avail_ptype)
                return None

            logger.info("done\n")

            # ====================================== Volume Groups ====================================== #
            # (7) Creating Volume Groups (vgs) :
            # ----------------------------------
            logger.info("Step 4 : Creating Volume Group :")
            logger.info("--------------------------------")
            vg = get_vg(vg_name)

            while vg is not None:
                next_osd_id = ceph_disk_lib.get_next_osd_id()
                if len(next_osd_id) == 0:
                    logger.error("Can't get next OSD id.")
                    logger.error("Creating Volume Group has been failed.")
                    ceph_disk_lib.clean_disk(origin_device)
                    ceph_disk_lib.clean_disk(cache_part)
                    ceph_disk_lib.set_partition_type(
                        cache_part, ceph_disk_lib.cache_avail_ptype)
                    return None

                logger.info("new next_osd_id = {}\n".format(str(next_osd_id)))
                vg_name = "ps-" + cluster_fsid + "-wc-osd." + str(next_osd_id)
                logger.info("new vg_name = {}\n".format(str(vg_name)))

                vg = get_vg(vg_name)

            vg = create_vg(devices, vg_name)

            if vg is None:
                logger.info("Creating Volume Group has been failed.")
                ceph_disk_lib.clean_disk(origin_device)
                ceph_disk_lib.clean_disk(cache_part)
                ceph_disk_lib.set_partition_type(
                    cache_part, ceph_disk_lib.cache_avail_ptype)
                return None

            vg_name = vg.vg_name
            logger.info("vg_name = {}".format(str(vg_name)))
            logger.info("done\n")

            # ===================================== Logical Volumes ===================================== #
            # (8) Creating Logical Volumes (lvs) :
            # ------------------------------------
            logger.info("Step 5 : Creating Logical Volumes :")
            logger.info("-----------------------------------")
            main_lv_name = create_lv(main_lv_name, vg_name, origin_part_name)

            if main_lv_name is None:
                logger.info(
                    "Creating Logical Volume for main device has been failed.")
                activate_vg(vg_name)
                ceph_disk_lib.clean_disk(origin_device)
                ceph_disk_lib.clean_disk(cache_part)
                ceph_disk_lib.set_partition_type(
                    cache_part, ceph_disk_lib.cache_avail_ptype)

                return None

            logger.info("main_lv_name = {}".format(str(main_lv_name)))

            cache_lv_name = create_lv(cache_lv_name, vg_name, cache_part_path)

            if cache_lv_name is None:
                logger.info(
                    "Creating Logical Volume for cache device has been failed."
                )
                activate_vg(vg_name)
                ceph_disk_lib.clean_disk(origin_device)
                ceph_disk_lib.clean_disk(cache_part)
                ceph_disk_lib.set_partition_type(
                    cache_part, ceph_disk_lib.cache_avail_ptype)
                return None

            logger.info("cache_lv_name = {}".format(str(cache_lv_name)))
            logger.info("done\n")

            # =================================== Building writecache =================================== #
            logger.info("Step 6 : Building dm-writecache :")
            logger.info("---------------------------------")
            error = 0
            cmd = "lvchange -a n " + vg_name + "/" + cache_lv_name
            ret, stdout, stderr = exec_command_ex(cmd)

            if ret != 0:
                if stderr:
                    logger.info(
                        "Error running the command : {} , the error msg :\n{}".
                        format(cmd, stderr))
                    error += 1
            else:
                logger.info("cmd = {}  --->  done".format(str(cmd)))
                cmd = "modprobe dm-writecache"
                ret, stdout, stderr = exec_command_ex(cmd)

                if ret != 0:
                    if stderr:
                        logger.info(
                            "Error running the command : {} , the error msg :\n{}"
                            .format(cmd, stderr))
                        error += 1
                else:
                    logger.info("cmd = {}  --->  done".format(str(cmd)))
                    cmd = 'lvconvert --yes --type writecache --cachevol ' + cache_lv_name + \
                          " --cachesettings 'writeback_jobs=102400' " + vg_name + "/" + main_lv_name
                    ret, stdout, stderr = exec_command_ex(cmd)

                    if ret != 0:
                        if stderr:
                            logger.info(
                                "Error running the command : {} , the error msg :\n{}"
                                .format(cmd, stderr))
                            error += 1

            if error > 0:
                activate_vg(vg_name)
                ceph_disk_lib.clean_disk(origin_device)
                ceph_disk_lib.clean_disk(cache_part)

                ceph_disk_lib.set_partition_type(
                    cache_part, ceph_disk_lib.cache_avail_ptype)
                logger.info(
                    "Create WriteCache : Building writecache has been failed.")
                return None

            logger.info("cmd = {}  --->  done".format(str(cmd)))
            ceph_disk_lib.set_partition_type(cache_part,
                                             ceph_disk_lib.cache_used_ptype)

            lv_path = vg_name + "/" + main_lv_name
            logger.info("lv_path = {}".format(str(lv_path)))

            logger.info("done")
            logger.info("==================================================")

            return lv_path, cache_part

        except Exception as ex:
            err = "Cannot build writecache for disk {} and cache partition {} , Exception is : {}".format(
                origin_device, cache_device, ex.message)
            logger.exception(err)
            ceph_disk_lib.clean_disk(origin_device)
            ceph_disk_lib.clean_disk(cache_part)
            ceph_disk_lib.set_partition_type(cache_part,
                                             ceph_disk_lib.cache_avail_ptype)
            logger.info("==================================================")