def detach_disk(self, disk_id, pool): ceph_api = CephAPI() disk_meta = DiskMeta() disk_meta.id = disk_id disk_meta.disk_name = None return ceph_api.add_disk(disk_meta, False, pool)
def edit_disk(self, disk_meta, auth_auto, pool): """ :type disk_meta: DiskMeta """ try: if not disk_meta.id or not disk_meta.disk_name or not disk_meta.size or type( disk_meta.size) != int: return ManageDiskStatus.data_missing elif not auth_auto and (not disk_meta.user or not disk_meta.password): return ManageDiskStatus.data_missing old_disk_meta = self.get_disk(disk_meta.id, pool) disk_meta.paths = old_disk_meta.paths disk_meta.iqn = old_disk_meta.iqn disk_meta.pool = pool ceph_api = CephAPI() if auth_auto: disk_meta.user = "" disk_meta.password = "" consul_api = ConsulAPI() status = consul_api.find_disk(disk_meta.id) if status is not None: return ManageDiskStatus.disk_exists # disk is running and attached else: status = ceph_api.add_disk(disk_meta, False, pool) except DiskListException as e: status = ManageDiskStatus.disk_get__list_error logger.exception(e.message) except Exception as e: status = ManageDiskStatus.error logger.exception(e.message) return status
def create_disk(disk_id): ceph_api = CephAPI() manage_disk = ManageDisk() disk_meta = DiskMeta() disk_meta.disk_name = "sanatech" + str("sql1") disk_meta.size = 1 disk_meta.password = "******" disk_meta.user = "******" path1 = Path() path2 = Path() path1.ip = "192.168.57.150" path1.subnet_mask = "255.255.255.0" path1.eth = "eth0" path2.ip = "192.168.58.150" path2.subnet_mask = "255.255.255.0" path2.eth = "eth1" disk_meta.paths.append(path1) disk_meta.paths.append(path2) disk_meta.id = disk_id disk_meta.iqn = app_conf.read_app_config().iqn_base + ":" + disk_meta.id status = ceph_api.add_disk(disk_meta) if status == ManageDiskStatus.done: logger.info("done , create disk") attr = ceph_api.read_image_metadata("image-" + disk_id) xx = attr.get(app_conf.get_image_meta_key()) logger.info(xx) disk = DiskMeta() disk.load_json(xx) print("disk user is %s" % (disk.user)) print disk elif status == ManageDiskStatus.disk_name_exists: print("disk is Exists") else: print("error create disk")
def add_disk(self, disk_meta, manual_ips, path_type, paths_count, auth_auto, auto_ip, pool): """ :type path_type: PathType :type manual_ips: [string] :type paths_count: int :type disk_meta: DiskMeta """ cfg = ManageConfig() paths = [] try: if not disk_meta.disk_name or not disk_meta.size or type( disk_meta.size) != int: return ManageDiskStatus.data_missing elif not auth_auto and (not disk_meta.user or not disk_meta.password): return ManageDiskStatus.data_missing elif not auto_ip and int(paths_count) > 2: return ManageDiskStatus.wrong_data elif not auto_ip and int(paths_count) != len(manual_ips): return ManageDiskStatus.wrong_data elif not auto_ip: ip_status = cfg.validate_new_iscsi_ips(manual_ips, path_type) if ip_status == NewIPValidation.valid: for ip in manual_ips: paths.append(cfg.get_path(ip)) elif ip_status == NewIPValidation.used_already: return ManageDiskStatus.used_already elif ip_status == NewIPValidation.wrong_subnet: return ManageDiskStatus.wrong_subnet else: return ManageDiskStatus.wrong_data elif auto_ip: paths.extend(cfg.get_new_iscsi_ips(path_type, paths_count)) if not paths or len(paths) == 0: return ManageDiskStatus.ip_out_of_range new_id = self.__get_next_disk_id() disk_meta.id = new_id disk_meta.paths = paths disk_meta.wwn = self.__get_wwn(new_id) if auth_auto: disk_meta.user = "" disk_meta.password = "" disk_meta.iqn = ":".join([cfg.get_iqn_base(), new_id]) consul_api = ConsulAPI() disk_data = consul_api.find_disk(disk_meta.id) if disk_data is not None: return ManageDiskStatus.disk_exists ceph_api = CephAPI() status = ceph_api.add_disk(disk_meta, True, pool) if status == ManageDiskStatus.done: consul_api.add_disk_resource(disk_meta.id, "disk") consul_api.add_disk_pool(disk_meta.id, pool) i = 0 for p in paths: i += 1 consul_api.add_disk_resource( "/".join(["", disk_meta.id, str(i)]), None) except DiskListException as e: status = ManageDiskStatus.disk_get__list_error logger.exception(e.message) except Exception as e: status = ManageDiskStatus.error logger.exception(e.message) return status
def attach_disk(self, disk_meta, manual_ips, path_type, paths_count, auth_auto, auto_ip, pool): """ :type disk_meta: DiskMeta """ mange_config = ManageConfig() paths_list = [] do_rename = False none_petasan_image = "" try: if not disk_meta.disk_name or not disk_meta.size or type( disk_meta.size) != int: return ManageDiskStatus.data_missing elif not auth_auto and (not disk_meta.user or not disk_meta.password): return ManageDiskStatus.data_missing elif not auto_ip and int(paths_count) > 2: return ManageDiskStatus.wrong_data elif not auto_ip and int(paths_count) != len(manual_ips): return ManageDiskStatus.wrong_data elif not auto_ip: ip_status = mange_config.validate_new_iscsi_ips( manual_ips, path_type) if ip_status == NewIPValidation.valid: for ip in manual_ips: paths_list.append(mange_config.get_path(ip)) elif ip_status == NewIPValidation.used_already: return ManageDiskStatus.used_already elif ip_status == NewIPValidation.wrong_subnet: return ManageDiskStatus.wrong_subnet elif auto_ip: paths_list.extend( mange_config.get_new_iscsi_ips(path_type, paths_count)) if not paths_list or len(paths_list) == 0: return ManageDiskStatus.ip_out_of_range ceph_api = CephAPI() consul_api = ConsulAPI() image_name_prefix = ConfigAPI().get_image_name_prefix() if not "".join([image_name_prefix, str(disk_meta.id) ]) in ceph_api.get_rbd_images(pool): new_id = self.__get_next_disk_id() if ceph_api.is_image_busy(disk_meta.id, pool): return ManageDiskStatus.is_busy do_rename = True none_petasan_image = disk_meta.id disk_meta.id = new_id disk_meta.pool = pool disk_meta.iqn = ":".join( [mange_config.get_iqn_base(), disk_meta.id]) disk_meta.paths = paths_list if auth_auto: disk_meta.user = "" disk_meta.password = "" disk_meta.wwn = self.__get_wwn(disk_meta.id) status = consul_api.find_disk(disk_meta.id) if status is not None: return ManageDiskStatus.disk_exists # disk is running and attached else: if do_rename: ceph_api.rename_image_to_petasan_index( none_petasan_image, image_name_prefix + new_id, pool) status = ceph_api.add_disk(disk_meta, False, pool) if status == ManageDiskStatus.done: consul_api.add_disk_resource(disk_meta.id, "disk") i = 0 for p in paths_list: i += 1 consul_api.add_disk_resource( "/".join(["", disk_meta.id, str(i)]), None) else: if do_rename: ceph_api.rename_image_to_petasan_index( image_name_prefix + new_id, none_petasan_image, pool) except DiskListException as e: status = ManageDiskStatus.disk_get__list_error logger.exception(e.message) except Exception as e: status = ManageDiskStatus.error logger.exception(e.message) return status