def start(self, disk_id, pool):
        try:
            ceph_api = CephAPI()
            consul_api = ConsulAPI()

            attr = ceph_api.read_image_metadata(
                ConfigAPI().get_image_name_prefix() + disk_id, pool)
            petasan_meta = attr.get(ConfigAPI().get_image_meta_key())
            disk_meta = DiskMeta()
            if petasan_meta:
                disk_meta.load_json(petasan_meta)
            else:
                return Status.error

            consul_api.add_disk_resource(disk_meta.id, "disk")
            consul_api.add_disk_pool(disk_meta.id, pool)
            i = 0
            for p in disk_meta.paths:
                i += 1
                consul_api.add_disk_resource(
                    "/".join(["", disk_meta.id, str(i)]), None)

        except Exception as e:
            logger.error("Can not start disk %s" % disk_id)
            logger.exception(e.message)
            return Status.error
        return Status.done
Example #2
0
def _stop_disk(disk_id):
    try:
        consul_api = ConsulAPI()
        kv = consul_api.find_disk(disk_id)
        if not kv:
            return
        #consul_api.add_disk_resource(disk_id, 'disk', 1, kv.CreateIndex)
        consul_api.add_disk_resource(disk_id, 'disk', 1, kv.ModifyIndex)
    except Exception as ex:
        logger.error('Error stopping disk:{} {}'.format(disk_id, ex.message))
        raise ConsulException(ConsulException.GENERAL_EXCEPTION,
                              'General Consul Error')
 def stop(self, disk_id):
     try:
         consul_api = ConsulAPI()
         kv = consul_api.find_disk(disk_id)
         return consul_api.add_disk_resource(disk_id, "disk", 1,
                                             kv.CreateIndex)
     except Exception as ex:
         logger.error("stop disk exception :{}".format(ex.message))
         return ManageDiskStatus.error
    def add_disk(self, disk_meta, manual_ips, path_type, paths_count,
                 auth_auto, auto_ip, pool):
        """
        :type path_type: PathType
        :type manual_ips: [string]
        :type paths_count: int
        :type disk_meta: DiskMeta
        """
        cfg = ManageConfig()
        paths = []
        try:
            if not disk_meta.disk_name or not disk_meta.size or type(
                    disk_meta.size) != int:
                return ManageDiskStatus.data_missing
            elif not auth_auto and (not disk_meta.user
                                    or not disk_meta.password):
                return ManageDiskStatus.data_missing
            elif not auto_ip and int(paths_count) > 2:
                return ManageDiskStatus.wrong_data
            elif not auto_ip and int(paths_count) != len(manual_ips):
                return ManageDiskStatus.wrong_data
            elif not auto_ip:
                ip_status = cfg.validate_new_iscsi_ips(manual_ips, path_type)
                if ip_status == NewIPValidation.valid:
                    for ip in manual_ips:
                        paths.append(cfg.get_path(ip))
                elif ip_status == NewIPValidation.used_already:
                    return ManageDiskStatus.used_already
                elif ip_status == NewIPValidation.wrong_subnet:
                    return ManageDiskStatus.wrong_subnet
                else:
                    return ManageDiskStatus.wrong_data
            elif auto_ip:
                paths.extend(cfg.get_new_iscsi_ips(path_type, paths_count))
            if not paths or len(paths) == 0:
                return ManageDiskStatus.ip_out_of_range

            new_id = self.__get_next_disk_id()
            disk_meta.id = new_id
            disk_meta.paths = paths
            disk_meta.wwn = self.__get_wwn(new_id)

            if auth_auto:
                disk_meta.user = ""
                disk_meta.password = ""

            disk_meta.iqn = ":".join([cfg.get_iqn_base(), new_id])
            consul_api = ConsulAPI()
            disk_data = consul_api.find_disk(disk_meta.id)

            if disk_data is not None:
                return ManageDiskStatus.disk_exists

            ceph_api = CephAPI()
            status = ceph_api.add_disk(disk_meta, True, pool)
            if status == ManageDiskStatus.done:
                consul_api.add_disk_resource(disk_meta.id, "disk")
                consul_api.add_disk_pool(disk_meta.id, pool)
                i = 0
                for p in paths:
                    i += 1
                    consul_api.add_disk_resource(
                        "/".join(["", disk_meta.id, str(i)]), None)

        except DiskListException as e:
            status = ManageDiskStatus.disk_get__list_error
            logger.exception(e.message)
        except Exception as e:
            status = ManageDiskStatus.error
            logger.exception(e.message)
        return status
    def attach_disk(self, disk_meta, manual_ips, path_type, paths_count,
                    auth_auto, auto_ip, pool):
        """

        :type disk_meta: DiskMeta
        """
        mange_config = ManageConfig()
        paths_list = []
        do_rename = False
        none_petasan_image = ""
        try:
            if not disk_meta.disk_name or not disk_meta.size or type(
                    disk_meta.size) != int:
                return ManageDiskStatus.data_missing
            elif not auth_auto and (not disk_meta.user
                                    or not disk_meta.password):
                return ManageDiskStatus.data_missing
            elif not auto_ip and int(paths_count) > 2:
                return ManageDiskStatus.wrong_data
            elif not auto_ip and int(paths_count) != len(manual_ips):
                return ManageDiskStatus.wrong_data
            elif not auto_ip:
                ip_status = mange_config.validate_new_iscsi_ips(
                    manual_ips, path_type)
                if ip_status == NewIPValidation.valid:
                    for ip in manual_ips:
                        paths_list.append(mange_config.get_path(ip))
                elif ip_status == NewIPValidation.used_already:
                    return ManageDiskStatus.used_already
                elif ip_status == NewIPValidation.wrong_subnet:
                    return ManageDiskStatus.wrong_subnet
            elif auto_ip:
                paths_list.extend(
                    mange_config.get_new_iscsi_ips(path_type, paths_count))

            if not paths_list or len(paths_list) == 0:
                return ManageDiskStatus.ip_out_of_range

            ceph_api = CephAPI()
            consul_api = ConsulAPI()
            image_name_prefix = ConfigAPI().get_image_name_prefix()
            if not "".join([image_name_prefix,
                            str(disk_meta.id)
                            ]) in ceph_api.get_rbd_images(pool):
                new_id = self.__get_next_disk_id()
                if ceph_api.is_image_busy(disk_meta.id, pool):
                    return ManageDiskStatus.is_busy
                do_rename = True
                none_petasan_image = disk_meta.id
                disk_meta.id = new_id

            disk_meta.pool = pool
            disk_meta.iqn = ":".join(
                [mange_config.get_iqn_base(), disk_meta.id])
            disk_meta.paths = paths_list
            if auth_auto:
                disk_meta.user = ""
                disk_meta.password = ""
            disk_meta.wwn = self.__get_wwn(disk_meta.id)

            status = consul_api.find_disk(disk_meta.id)
            if status is not None:
                return ManageDiskStatus.disk_exists  # disk is running and attached
            else:
                if do_rename:
                    ceph_api.rename_image_to_petasan_index(
                        none_petasan_image, image_name_prefix + new_id, pool)
                status = ceph_api.add_disk(disk_meta, False, pool)
                if status == ManageDiskStatus.done:
                    consul_api.add_disk_resource(disk_meta.id, "disk")
                    i = 0
                    for p in paths_list:
                        i += 1
                        consul_api.add_disk_resource(
                            "/".join(["", disk_meta.id,
                                      str(i)]), None)
                else:
                    if do_rename:
                        ceph_api.rename_image_to_petasan_index(
                            image_name_prefix + new_id, none_petasan_image,
                            pool)

        except DiskListException as e:
            status = ManageDiskStatus.disk_get__list_error
            logger.exception(e.message)
        except Exception as e:
            status = ManageDiskStatus.error
            logger.exception(e.message)
        return status
def add_disk():
    api = ConsulAPI()
    print api.add_disk_resource("00101", "")
    print api.add_disk_resource("00101/0", "")
    print api.add_disk_resource("00101/1", "")
    print api.add_disk_resource("00101/2", "")

    print api.add_disk_resource("00001", "")
    print api.add_disk_resource("00001/0", "")
    print api.add_disk_resource("00001/1", "")
    print api.add_disk_resource("00001/2", "")

    print api.add_disk_resource("09001", "")
    print api.add_disk_resource("09001/0", "")
    print api.add_disk_resource("09001/1", "")
    print api.add_disk_resource("09001/2", "")

    print api.add_disk_resource("00002", "")
    print api.add_disk_resource("00002/0", "")
    print api.add_disk_resource("00002/1", "")
    print api.add_disk_resource("00002/2", "")

    pass