def get_disks_meta(self):
        ceph_api = CephAPI()
        consul_api = ConsulAPI()
        ls = ceph_api.get_disks_meta()
        for disk in ls:
            if disk and hasattr(disk, "paths") and not disk.paths:
                disk.status = DisplayDiskStatus.unattached
            elif disk and hasattr(disk, "paths") and disk.paths:
                data = consul_api.find_disk(disk.id)
                if data is not None:
                    disk.status = DisplayDiskStatus.starting
                    if str(data.Flags) == "1":
                        disk.status = DisplayDiskStatus.stopping
                    elif consul_api.is_path_locked(disk.id):
                        disk.status = DisplayDiskStatus.started

                else:
                    disk.status = DisplayDiskStatus.stopped

                job_manager = JobManager()
                job_list = job_manager.get_running_job_list()

                for j in job_list:

                    # Check if the status running
                    if j.is_running:
                        # Set disk status [deleting]
                        if j.type == JobType.DELETE_DISK and str(
                                j.params).find(str(disk.id)) > -1:
                            disk.status = DisplayDiskStatus.deleting

        return ls
def get_consul_data():
    api = ConsulAPI()
    ob = api.find_disk("00001")
    if ManageDiskStatus.error == ob:
        print("error get consul data")
    else:
        print ob
 def stop(self, disk_id):
     try:
         consul_api = ConsulAPI()
         kv = consul_api.find_disk(disk_id)
         return consul_api.add_disk_resource(disk_id, "disk", 1,
                                             kv.CreateIndex)
     except Exception as ex:
         logger.error("stop disk exception :{}".format(ex.message))
         return ManageDiskStatus.error
示例#4
0
def _stop_disk(disk_id):
    try:
        consul_api = ConsulAPI()
        kv = consul_api.find_disk(disk_id)
        if not kv:
            return
        #consul_api.add_disk_resource(disk_id, 'disk', 1, kv.CreateIndex)
        consul_api.add_disk_resource(disk_id, 'disk', 1, kv.ModifyIndex)
    except Exception as ex:
        logger.error('Error stopping disk:{} {}'.format(disk_id, ex.message))
        raise ConsulException(ConsulException.GENERAL_EXCEPTION,
                              'General Consul Error')
    def edit_disk(self, disk_meta, auth_auto, pool):
        """

        :type disk_meta: DiskMeta
        """
        try:
            if not disk_meta.id or not disk_meta.disk_name or not disk_meta.size or type(
                    disk_meta.size) != int:
                return ManageDiskStatus.data_missing
            elif not auth_auto and (not disk_meta.user
                                    or not disk_meta.password):
                return ManageDiskStatus.data_missing

            old_disk_meta = self.get_disk(disk_meta.id, pool)

            disk_meta.paths = old_disk_meta.paths
            disk_meta.iqn = old_disk_meta.iqn

            disk_meta.pool = pool

            ceph_api = CephAPI()

            if auth_auto:
                disk_meta.user = ""
                disk_meta.password = ""

            consul_api = ConsulAPI()
            status = consul_api.find_disk(disk_meta.id)

            if status is not None:
                return ManageDiskStatus.disk_exists  # disk is running and attached
            else:
                status = ceph_api.add_disk(disk_meta, False, pool)

        except DiskListException as e:
            status = ManageDiskStatus.disk_get__list_error
            logger.exception(e.message)
        except Exception as e:
            status = ManageDiskStatus.error
            logger.exception(e.message)
        return status
示例#6
0
def stop_dest_disk(args):
    disk_id = args.disk_id
    consul_api = ConsulAPI()
    try:
        kv = consul_api.find_disk(disk_id)
        if kv is not None:
            manage_disk = ManageDisk()
            status = manage_disk.stop(disk_id)

            if status != Status.done:
                print('Error : Cannot stop disk , id = ' + disk_id)
                sys.exit(-1)

            sys.exit(0)

        else:
            print('Disk {} is already stopped.'.format(disk_id))
            sys.exit(0)

    except Exception as e:
        print("Error : Exception , {}".format(e.message))
        sys.exit(-1)
    def add_disk(self, disk_meta, manual_ips, path_type, paths_count,
                 auth_auto, auto_ip, pool):
        """
        :type path_type: PathType
        :type manual_ips: [string]
        :type paths_count: int
        :type disk_meta: DiskMeta
        """
        cfg = ManageConfig()
        paths = []
        try:
            if not disk_meta.disk_name or not disk_meta.size or type(
                    disk_meta.size) != int:
                return ManageDiskStatus.data_missing
            elif not auth_auto and (not disk_meta.user
                                    or not disk_meta.password):
                return ManageDiskStatus.data_missing
            elif not auto_ip and int(paths_count) > 2:
                return ManageDiskStatus.wrong_data
            elif not auto_ip and int(paths_count) != len(manual_ips):
                return ManageDiskStatus.wrong_data
            elif not auto_ip:
                ip_status = cfg.validate_new_iscsi_ips(manual_ips, path_type)
                if ip_status == NewIPValidation.valid:
                    for ip in manual_ips:
                        paths.append(cfg.get_path(ip))
                elif ip_status == NewIPValidation.used_already:
                    return ManageDiskStatus.used_already
                elif ip_status == NewIPValidation.wrong_subnet:
                    return ManageDiskStatus.wrong_subnet
                else:
                    return ManageDiskStatus.wrong_data
            elif auto_ip:
                paths.extend(cfg.get_new_iscsi_ips(path_type, paths_count))
            if not paths or len(paths) == 0:
                return ManageDiskStatus.ip_out_of_range

            new_id = self.__get_next_disk_id()
            disk_meta.id = new_id
            disk_meta.paths = paths
            disk_meta.wwn = self.__get_wwn(new_id)

            if auth_auto:
                disk_meta.user = ""
                disk_meta.password = ""

            disk_meta.iqn = ":".join([cfg.get_iqn_base(), new_id])
            consul_api = ConsulAPI()
            disk_data = consul_api.find_disk(disk_meta.id)

            if disk_data is not None:
                return ManageDiskStatus.disk_exists

            ceph_api = CephAPI()
            status = ceph_api.add_disk(disk_meta, True, pool)
            if status == ManageDiskStatus.done:
                consul_api.add_disk_resource(disk_meta.id, "disk")
                consul_api.add_disk_pool(disk_meta.id, pool)
                i = 0
                for p in paths:
                    i += 1
                    consul_api.add_disk_resource(
                        "/".join(["", disk_meta.id, str(i)]), None)

        except DiskListException as e:
            status = ManageDiskStatus.disk_get__list_error
            logger.exception(e.message)
        except Exception as e:
            status = ManageDiskStatus.error
            logger.exception(e.message)
        return status
    def attach_disk(self, disk_meta, manual_ips, path_type, paths_count,
                    auth_auto, auto_ip, pool):
        """

        :type disk_meta: DiskMeta
        """
        mange_config = ManageConfig()
        paths_list = []
        do_rename = False
        none_petasan_image = ""
        try:
            if not disk_meta.disk_name or not disk_meta.size or type(
                    disk_meta.size) != int:
                return ManageDiskStatus.data_missing
            elif not auth_auto and (not disk_meta.user
                                    or not disk_meta.password):
                return ManageDiskStatus.data_missing
            elif not auto_ip and int(paths_count) > 2:
                return ManageDiskStatus.wrong_data
            elif not auto_ip and int(paths_count) != len(manual_ips):
                return ManageDiskStatus.wrong_data
            elif not auto_ip:
                ip_status = mange_config.validate_new_iscsi_ips(
                    manual_ips, path_type)
                if ip_status == NewIPValidation.valid:
                    for ip in manual_ips:
                        paths_list.append(mange_config.get_path(ip))
                elif ip_status == NewIPValidation.used_already:
                    return ManageDiskStatus.used_already
                elif ip_status == NewIPValidation.wrong_subnet:
                    return ManageDiskStatus.wrong_subnet
            elif auto_ip:
                paths_list.extend(
                    mange_config.get_new_iscsi_ips(path_type, paths_count))

            if not paths_list or len(paths_list) == 0:
                return ManageDiskStatus.ip_out_of_range

            ceph_api = CephAPI()
            consul_api = ConsulAPI()
            image_name_prefix = ConfigAPI().get_image_name_prefix()
            if not "".join([image_name_prefix,
                            str(disk_meta.id)
                            ]) in ceph_api.get_rbd_images(pool):
                new_id = self.__get_next_disk_id()
                if ceph_api.is_image_busy(disk_meta.id, pool):
                    return ManageDiskStatus.is_busy
                do_rename = True
                none_petasan_image = disk_meta.id
                disk_meta.id = new_id

            disk_meta.pool = pool
            disk_meta.iqn = ":".join(
                [mange_config.get_iqn_base(), disk_meta.id])
            disk_meta.paths = paths_list
            if auth_auto:
                disk_meta.user = ""
                disk_meta.password = ""
            disk_meta.wwn = self.__get_wwn(disk_meta.id)

            status = consul_api.find_disk(disk_meta.id)
            if status is not None:
                return ManageDiskStatus.disk_exists  # disk is running and attached
            else:
                if do_rename:
                    ceph_api.rename_image_to_petasan_index(
                        none_petasan_image, image_name_prefix + new_id, pool)
                status = ceph_api.add_disk(disk_meta, False, pool)
                if status == ManageDiskStatus.done:
                    consul_api.add_disk_resource(disk_meta.id, "disk")
                    i = 0
                    for p in paths_list:
                        i += 1
                        consul_api.add_disk_resource(
                            "/".join(["", disk_meta.id,
                                      str(i)]), None)
                else:
                    if do_rename:
                        ceph_api.rename_image_to_petasan_index(
                            image_name_prefix + new_id, none_petasan_image,
                            pool)

        except DiskListException as e:
            status = ManageDiskStatus.disk_get__list_error
            logger.exception(e.message)
        except Exception as e:
            status = ManageDiskStatus.error
            logger.exception(e.message)
        return status
    def delete_disk(self, disk_id, pool):
        ceph_api = CephAPI()
        consul_api = ConsulAPI()

        ls = ceph_api.get_disks_meta_for_pool(pool)
        try:
            for disk in ls:
                if disk_id == disk.id:

                    if disk and hasattr(disk, "paths") and not disk.paths:
                        disk_status = DisplayDiskStatus.unattached
                    elif disk and hasattr(disk, "paths") and disk.paths:
                        data = consul_api.find_disk(disk.id)
                        if data is not None:
                            disk_status = DisplayDiskStatus.started
                            if str(data.Flags) == "1":
                                disk_status = DisplayDiskStatus.stopping
                        else:
                            disk_status = DisplayDiskStatus.stopped
                    break

                disk_status = None
        except:
            return StopDiskStatus.error

        if disk_status == DisplayDiskStatus.started or disk_status == DisplayDiskStatus.stopping:
            return StopDiskStatus.working

        elif disk_status is None:
            return StopDiskStatus.error

        elif disk_status == DisplayDiskStatus.stopped or disk_status == DisplayDiskStatus.unattached:
            # return ceph_api.delete_disk(disk_id,pool)

            # start: delete disk as a job
            __image_name_prefix = ConfigAPI().get_image_name_prefix()

            # set image_name by disk_id :
            image_name = disk_id

            # if PetaSAN disk :
            if disk_id.isdigit() and (len(disk_id) == 5):
                image_name = __image_name_prefix + str(disk_id)

            jm = JobManager()

            try:
                id = jm.add_job(JobType.DELETE_DISK, image_name + ' ' + pool)
                print("Start Delete image: ", image_name)

                if id > 0:
                    logger.info(
                        "Deleting disk: {} has been started as a job".format(
                            image_name))
                return id

            except Exception as ex:
                logger.error("Error Deleting disk: {}".format(image_name))
                # end: delete disk as a job #

        else:
            return StopDiskStatus.error
def clear_disk(args):
    disk_id = args.disk_id
    image_name = "image-" + disk_id

    try:
        # Get which ceph user is using this function & get his keyring file path #
        # ---------------------------------------------------------------------- #
        ceph_auth = CephAuthenticator()

        config = configuration()
        cluster_name = config.get_cluster_name()

        # Get disk metadata :
        # -------------------
        ceph_api = CephAPI()
        disk_metadata = ceph_api.get_diskmeta(disk_id)

        # Get pool name :
        # ---------------
        pool_name = disk_metadata.pool
        data_pool = ""

        # Check if disk has been created on replicated pool or erasure pool :
        # -------------------------------------------------------------------
        if len(disk_metadata.data_pool) > 0:
            data_pool = disk_metadata.data_pool

        tmp_image_name = "tmp_disk_" + disk_metadata.id

        # (1.) Check if a previous tmp image for this disk is still existed :
        # ===================================================================
        images_list = ceph_api.get_all_images(pool_name)

        for image in images_list:
            if tmp_image_name in image:
                # Delete image #
                cmd = "rbd rm {}/{} {} --cluster {}".format(
                    pool_name, image, ceph_auth.get_authentication_string(),
                    cluster_name)
                if not call_cmd(cmd):
                    print(
                        "Error : clear_disk.py script : cannot remove tmp image ,\ncmd : "
                        + cmd)
                    sys.exit(-1)

        print(
            "Stage 1 :\n\tCheck if a previous tmp image for this disk is still existed > (Completed)"
        )
        logger.info(
            "Stage 1 :\n\tCheck if a previous tmp image for this disk is still existed > (Completed)"
        )

        # (2.) Stop old disk :
        # ====================
        consul_api = ConsulAPI()
        kv = consul_api.find_disk(disk_id)
        if kv is not None:
            manage_disk = ManageDisk()
            status = manage_disk.stop(disk_id)

            if status != Status.done:
                print('Error : Cannot stop disk , id = ' + disk_id)
                sys.exit(-1)

            print("Stage 2 :\n\tStop old disk > (Completed)")
            logger.info("Stage 2 :\n\tStop old disk > (Completed)")
            time.sleep(3)

            # (3.) Check if old disk is stopped or not :
            # ==========================================
            if len(data_pool) > 0:
                pool_type = "erasure"
                _confirm_disk_stopped(data_pool, disk_id, pool_type)
            else:
                pool_type = "replicated"
                _confirm_disk_stopped(pool_name, disk_id, pool_type)

            print(
                "Stage 3 :\n\tConfirm that disk is completely stopped > (Completed)"
            )
            logger.info(
                "Stage 3 :\n\tConfirm that disk is completely stopped > (Completed)"
            )

        else:
            print("Stage 2 :\n\tStop old disk > (Completed)")
            logger.info("Stage 2 :\n\tStop old disk > (Completed)")

            print(
                "Stage 3 :\n\tConfirm that disk is completely stopped > (Completed)"
            )
            logger.info(
                "Stage 3 :\n\tConfirm that disk is completely stopped > (Completed)"
            )
            print('\tclear_disk.py script : disk {} is already stopped'.format(
                disk_id))

        # (4.) Create a tmp image (not PetaSAN image) :
        # =============================================
        # Generate a random value between 1 and 99999 #
        random_no = str(random.randint(1, 100000))
        tmp_image_name = tmp_image_name + "_" + str(random_no)
        image_size = disk_metadata.size * 1024

        if len(data_pool) > 0:
            cmd = "rbd create {}/{} --size {} --data-pool {} {} --cluster {}".format(
                pool_name, tmp_image_name, image_size, data_pool,
                ceph_auth.get_authentication_string(), cluster_name)
        else:
            cmd = "rbd create {}/{} --size {} {} --cluster {}".format(
                pool_name, tmp_image_name, image_size,
                ceph_auth.get_authentication_string(), cluster_name)

        if not call_cmd(cmd):
            print(
                "Error : clear_disk.py script : cannot create new tmp image ,\ncmd : "
                + cmd)
            sys.exit(-1)

        print("Stage 4 :\n\tCreate a tmp image called ( " + tmp_image_name +
              " ) > (Completed)")
        logger.info("Stage 4 :\n\tCreate a tmp image called ( " +
                    tmp_image_name + " ) > (Completed)")

        # (5.) Run script to copy "old disk" metadata to new "tmp_disk" :
        # ===============================================================
        metadata_script_file = ConfigAPI().get_disk_meta_script_path()

        # Function : read_disks_metadata :
        parser_key_1 = "read"
        arg_1 = "--image"
        arg_2 = "--pool"

        # Function : set_disk_metadata :
        parser_key_2 = "write"
        arg_3 = "--file"

        cmd = metadata_script_file + " " + parser_key_1 + " " + arg_1 + " " + image_name + " " + arg_2 + " " + pool_name +\
              " | " + metadata_script_file + " " + parser_key_2 + " " + arg_1 + " " + tmp_image_name + " " + arg_2 + " " + pool_name

        if not call_cmd(cmd):
            print(
                "Error : clear_disk.py script : cannot copy metadata from old disk to new tmp image ,\ncmd : "
                + cmd)
            sys.exit(-1)

        print(
            "Stage 5 :\n\tRun script to copy 'old disk' metadata to new 'tmp_disk' > (Completed)"
        )
        logger.info(
            "Stage 5 :\n\tRun script to copy 'old disk' metadata to new 'tmp_disk' > (Completed)"
        )

        time.sleep(3)

        # (6.) Remove metadata of old disk :
        # ===========================================================
        old_image_name = str(ceph_api.conf_api.get_image_name_prefix() +
                             disk_metadata.id)
        confirm = ceph_api.remove_disk_metadata(old_image_name,
                                                disk_metadata.pool)

        if not confirm:
            print(
                "Error : clear_disk.py script : cannot remove metadata of old disk"
            )
            # sys.exit(-1)

        print("Stage 6 :\n\tRemove metadata of old disk > (Completed)")
        logger.info("Stage 6 :\n\tRemove metadata of old disk > (Completed)")

        # (7.) Rename old disk image name with "deleted-" + disk_id + random_no:
        # ======================================================================
        new_image_name = "deleted-" + disk_metadata.id + "-" + random_no
        cmd = "rbd mv {}/{} {} {} --cluster {}".format(
            pool_name, image_name, new_image_name,
            ceph_auth.get_authentication_string(), cluster_name)
        if not call_cmd(cmd):
            print(
                "Error : clear_disk.py script : cannot rename old image from {} to {} ,\ncmd : {}"
                .format(image_name, new_image_name, cmd))
            sys.exit(-1)

        print("Stage 7 :\n\tRename old disk image name with ( " +
              new_image_name + " ) > (Completed)")
        logger.info("Stage 7 :\n\tRename old disk image name with ( " +
                    new_image_name + " ) > (Completed)")

        time.sleep(5)

        # (8.) Rename "tmp_disk" with old disk image name :
        # =================================================
        cmd = "rbd mv {}/{} {} {} --cluster {}".format(
            pool_name, tmp_image_name, image_name,
            ceph_auth.get_authentication_string(), cluster_name)
        if not call_cmd(cmd):
            print(
                "Error : clear_disk.py script : cannot rename \"tmp_disk\" from {} to {} ,\ncmd : {}"
                .format(tmp_image_name, image_name, cmd))
            sys.exit(-1)

        print(
            "Stage 8 :\n\tRename 'tmp_disk' with old disk image name > (Completed)"
        )
        logger.info(
            "Stage 8 :\n\tRename 'tmp_disk' with old disk image name > (Completed)"
        )

        time.sleep(5)

        jm = JobManager()
        id = jm.add_job(JobType.DELETE_DISK, new_image_name + ' ' + pool_name)

        print("Stage 9 :\n\tStart a job to remove old disk image , job id = " +
              str(id))
        logger.info(
            "Stage 9 :\n\tStart a job to remove old disk image , job id = " +
            str(id))

        sys.exit(0)

    except PoolException as e:
        print("Error : PoolException , {}".format(e.message))
        logger.error("Clear Disk Error : PoolException , {}".format(e.message))
        sys.exit(-1)

    except DiskListException as e:
        print("Error : DiskListException , {}".format(e.message))
        logger.error("Clear Disk Error : DiskListException , {}".format(
            e.message))
        sys.exit(-1)

    except CephException as e:
        if e.id == CephException.GENERAL_EXCEPTION:
            print("Error : CephException , {}".format(e.message))
        logger.error("Clear Disk Error : CephException , {}".format(e.message))
        sys.exit(-1)

    except MetadataException as e:
        print("Error : MetadataException , {}".format(e.message))
        logger.error("Clear Disk Error : MetadataException , {}".format(
            e.message))
        sys.exit(-1)

    except Exception as e:
        print("Error : Exception , {}".format(e.message))
        logger.error("Clear Disk Error : Exception , {}".format(e.message))
        sys.exit(-1)
示例#11
0
def is_disk_started(id):
    consul_api = ConsulAPI()
    status = consul_api.find_disk(id)
    if status is not None:
        return True
    return False