def get_network_storages(data): network_storages = [] dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR) dev_symlink_list.sort() unmountable_regexp = re.compile("-part[0-9]+$") for line in data.split('\n'): if not line: continue (host, port, tpgt, iqn, activity, autostart) = line.split(' ', 6) node = { 'type': "iSCSI", 'hostname': host, 'port': port, 'tpgt': tpgt, 'iqn': iqn, 'activity': string.atoi(activity), 'autostart': string.atoi(autostart), 'disk_list': [], } if activity == '1': disk_list = [] symlink_regexp = re.compile( "^%s" % (re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn)))) unmountable_flag = {} for sym_link in dev_symlink_list: if symlink_regexp.search(sym_link): real_path = symlink2real("%s/%s" % (ISCSI_DEVICE_DIR, sym_link)) is_blockable = True if unmountable_regexp.search(sym_link): is_blockable = False unmountable_flag[unmountable_regexp.sub( "", sym_link)] = True disk_list.append({ 'symlink_name': sym_link, 'realpath_list': real_path, 'is_blockable': is_blockable, 'is_partitionable': False, }) for disk in disk_list: for key in unmountable_flag.keys(): if disk['symlink_name'] == key: disk['is_partitionable'] = True node['disk_list'] = disk_list network_storages.append(node) return network_storages
def get_network_storages(data): network_storages = [] dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR) dev_symlink_list.sort() unmountable_regexp = re.compile("-part[0-9]+$") for line in data.split("\n"): if not line: continue (host, port, tpgt, iqn, activity, autostart) = line.split(" ", 6) node = { "type": "iSCSI", "hostname": host, "port": port, "tpgt": tpgt, "iqn": iqn, "activity": string.atoi(activity), "autostart": string.atoi(autostart), "disk_list": [], } if activity == "1": disk_list = [] symlink_regexp = re.compile("^%s" % (re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn)))) unmountable_flag = {} for sym_link in dev_symlink_list: if symlink_regexp.search(sym_link): real_path = symlink2real("%s/%s" % (ISCSI_DEVICE_DIR, sym_link)) is_blockable = True if unmountable_regexp.search(sym_link): is_blockable = False unmountable_flag[unmountable_regexp.sub("", sym_link)] = True disk_list.append( { "symlink_name": sym_link, "realpath_list": real_path, "is_blockable": is_blockable, "is_partitionable": False, } ) for disk in disk_list: for key in unmountable_flag.keys(): if disk["symlink_name"] == key: disk["is_partitionable"] = True node["disk_list"] = disk_list network_storages.append(node) return network_storages
def get_network_storages(data): network_storages = [] dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR) dev_symlink_list.sort() unmountable_regexp = re.compile("-part[0-9]+$") for line in data.split('\n'): if not line: continue (host,port,tpgt,iqn,activity,autostart) = line.split(' ', 6) node = { 'type' : "iSCSI", 'hostname' : host, 'port' : port, 'tpgt' : tpgt, 'iqn' : iqn, 'activity' : string.atoi(activity), 'autostart' : string.atoi(autostart), 'disk_list' : [], } if activity == '1': disk_list = [] symlink_regexp = re.compile("^%s" % (re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn)))) unmountable_flag = {} for sym_link in dev_symlink_list: if symlink_regexp.search(sym_link): real_path = symlink2real("%s/%s" % (ISCSI_DEVICE_DIR, sym_link)) is_blockable = True if unmountable_regexp.search(sym_link): is_blockable = False unmountable_flag[unmountable_regexp.sub("", sym_link)] = True disk_list.append({'symlink_name' : sym_link, 'realpath_list' : real_path, 'is_blockable' : is_blockable, 'is_partitionable' : False, }) for disk in disk_list: for key in unmountable_flag.keys(): if disk['symlink_name'] == key: disk['is_partitionable'] = True node['disk_list'] = disk_list network_storages.append(node) return network_storages
def _PUT(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() if not validates_network_storage(self): self.logger.debug("Network storage change status failed. Did not validate.") return web.badrequest(self.view.alert) host = findbyhost1(self.orm, host_id) if is_param(self.input, "iqn"): iqn = self.input.iqn else: self.logger.debug("Network storage change status failed. Target IQN not found.") return web.badrequest() options = {'iqn' : iqn} job_order = 0 if is_param(self.input, "status"): status = self.input.status else: self.logger.debug("Network storage change status failed. Status type not found.") return web.badrequest() if is_param(self.input, "host") and is_param(self.input, "port"): host = self.input.host port = self.input.port else: self.logger.debug("Network storage change status failed. Target host and port not found.") return web.badrequest() active_used_pool = [] inactive_used_pool = [] kvc = KaresansuiVirtConnection() try: dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR) dev_symlink_list.sort() symlink_regexp = re.compile("^%s/%s" % (re.escape(ISCSI_DEVICE_DIR), re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn)))) active_pools = kvc.list_active_storage_pool() inactive_pools = kvc.list_inactive_storage_pool() now_pools = active_pools + inactive_pools for pool in now_pools: pool_type = kvc.get_storage_pool_type(pool) if pool_type == "iscsi": if iqn == kvc.get_storage_pool_sourcedevicepath(pool): if pool in active_pools: active_used_pool.append(pool) if pool in inactive_pools: inactive_used_pool.append(pool) elif pool_type == "fs": if symlink_regexp.match(kvc.get_storage_pool_sourcedevicepath(pool)): if pool in active_pools: active_used_pool.append(pool) if pool in inactive_pools: inactive_used_pool.append(pool) if status == NETWORK_STORAGE_STOP: for pool in active_used_pool: if kvc.is_used_storage_pool(name=pool, active_only=True) is True: self.logger.debug("Stop iSCSI failed. Target iSCSI is used by guest.") return web.badrequest("Target iSCSI is used by guest.") finally: kvc.close() if status == NETWORK_STORAGE_START: network_storage_cmd = ISCSI_COMMAND_START cmd_name = u'Start iSCSI' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) for pool in inactive_used_pool: pool_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_START_STORAGE_POOL), {"name" : pool}) pool_cmdname = "Start Storage Pool" jobgroup.jobs.append(Job('%s command' % pool_cmdname, 1, pool_cmd)) job_order = 0 elif status == NETWORK_STORAGE_STOP: network_storage_cmd = ISCSI_COMMAND_STOP cmd_name = u'Stop iSCSI' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) for pool in active_used_pool: pool_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DESTROY_STORAGE_POOL), {"name" : pool}) pool_cmdname = "Stop Storage Pool" jobgroup.jobs.append(Job('%s command' % pool_cmdname, 0, pool_cmd)) job_order = 1 else: return web.internalerror('Internal Server Error. (Param)') _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], network_storage_cmd), options) jobgroup.jobs.append(Job('%s command' % cmd_name, job_order, _cmd)) host = findbyhost1(self.orm, host_id) _machine2jobgroup = m2j_new(machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration(self.orm, self.pysilhouette.orm, _machine2jobgroup, jobgroup, ) return web.accepted()
def _DELETE(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() host = findbyhost1(self.orm, host_id) if is_param(self.input, "iqn"): iqn = self.input.iqn else: self.logger.debug("Network storage delete failed. Target IQN not found.") return web.badrequest() options = {'iqn' : iqn} job_order = 0 cmd_name = u'Delete iSCSI' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) if is_param(self.input, "host") and is_param(self.input, "port"): host = self.input.host port = self.input.port used_pool = [] active_used_pool = [] kvc = KaresansuiVirtConnection() try: dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR) dev_symlink_list.sort() symlink_regexp = re.compile("^%s/%s" % (re.escape(ISCSI_DEVICE_DIR), re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn)))) pools = kvc.list_active_storage_pool() + kvc.list_inactive_storage_pool() for pool in pools: pool_type = kvc.get_storage_pool_type(pool) if pool_type == "iscsi": if iqn == kvc.get_storage_pool_sourcedevicepath(pool): used_pool.append(pool) pool_objs = kvc.search_kvn_storage_pools(pool) if pool_objs[0].is_active(): active_used_pool.append(pool) elif pool_type == "fs": if symlink_regexp.match(kvc.get_storage_pool_sourcedevicepath(pool)): used_pool.append(pool) pool_objs = kvc.search_kvn_storage_pools(pool) if pool_objs[0].is_active(): active_used_pool.append(pool) finally: kvc.close() for pool in active_used_pool: stop_pool_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DESTROY_STORAGE_POOL), {"name" : pool}) stop_pool_cmdname = "Stop Storage Pool" jobgroup.jobs.append(Job('%s command' % stop_pool_cmdname, 0, stop_pool_cmd)) job_order = 1 for pool in used_pool: delete_pool_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DELETE_STORAGE_POOL), {"name" : pool}) delete_pool_cmdname = "Delete Storage Pool" jobgroup.jobs.append(Job('%s command' % delete_pool_cmdname, job_order, delete_pool_cmd)) job_order = 2 _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], ISCSI_COMMAND_DELETE), options) jobgroup.jobs.append(Job('%s command' % cmd_name, job_order, _cmd)) host = findbyhost1(self.orm, host_id) _machine2jobgroup = m2j_new(machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration(self.orm, self.pysilhouette.orm, _machine2jobgroup, jobgroup, ) return web.accepted()
def _GET(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() if self.is_mode_input() is True: self.view.host_id = host_id iqn = self.input.iqn options = {'iqn' : iqn} _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], ISCSI_COMMAND_GET), options) cmd_name = u'Get iSCSI Detail' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) jobgroup.jobs.append(Job('%s command' % cmd_name, 0, _cmd)) jobgroup.type = JOBGROUP_TYPE['PARALLEL'] host = findbyhost1(self.orm, host_id) _machine2jobgroup = m2j_new(machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) if corp(self.orm, self.pysilhouette.orm,_machine2jobgroup, jobgroup) is False: self.logger.debug("%s command failed. Return to timeout" % (cmd_name)) return web.internalerror('Internal Server Error. (Timeout)') cmd_res = jobgroup.jobs[0].action_stdout if not cmd_res: self.view.info = { 'type' : "iSCSI", 'hostname' : "", 'port' : "", 'tpgt' : "", 'iqn' : "", 'activity' : "", 'autostart' : "", 'auth' : "", 'user' : "", 'disk_list' : [], } return True (host,port,tpgt,iqn,activity,autostart,auth,user) = cmd_res.strip("\n").split(' ', 8) info = { 'type' : "iSCSI", 'hostname' : host, 'port' : port, 'tpgt' : tpgt, 'iqn' : iqn, 'activity' : string.atoi(activity), 'autostart' : string.atoi(autostart), 'auth' : auth, 'user' : user, 'disk_list' : [], } dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR) if activity == '1': disk_list = [] symlink_regexp = re.compile("^%s" % (re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn)))) for sym_link in dev_symlink_list: if symlink_regexp.match(sym_link): real_path = symlink2real("%s/%s" % (ISCSI_DEVICE_DIR, sym_link)) disk_list.append({'symlink_name' : sym_link, 'realpath_list' : real_path, }) info['disk_list'] = disk_list self.view.info = info return True
def _PUT(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() if not validates_network_storage(self): self.logger.debug( "Network storage change status failed. Did not validate.") return web.badrequest(self.view.alert) host = findbyhost1(self.orm, host_id) if is_param(self.input, "iqn"): iqn = self.input.iqn else: self.logger.debug( "Network storage change status failed. Target IQN not found.") return web.badrequest() options = {'iqn': iqn} job_order = 0 if is_param(self.input, "status"): status = self.input.status else: self.logger.debug( "Network storage change status failed. Status type not found.") return web.badrequest() if is_param(self.input, "host") and is_param(self.input, "port"): host = self.input.host port = self.input.port else: self.logger.debug( "Network storage change status failed. Target host and port not found." ) return web.badrequest() active_used_pool = [] inactive_used_pool = [] kvc = KaresansuiVirtConnection() try: dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR) dev_symlink_list.sort() symlink_regexp = re.compile("^%s/%s" % (re.escape(ISCSI_DEVICE_DIR), re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn)))) active_pools = kvc.list_active_storage_pool() inactive_pools = kvc.list_inactive_storage_pool() now_pools = active_pools + inactive_pools for pool in now_pools: pool_type = kvc.get_storage_pool_type(pool) if pool_type == "iscsi": if iqn == kvc.get_storage_pool_sourcedevicepath(pool): if pool in active_pools: active_used_pool.append(pool) if pool in inactive_pools: inactive_used_pool.append(pool) elif pool_type == "fs": if symlink_regexp.match( kvc.get_storage_pool_sourcedevicepath(pool)): if pool in active_pools: active_used_pool.append(pool) if pool in inactive_pools: inactive_used_pool.append(pool) if status == NETWORK_STORAGE_STOP: for pool in active_used_pool: if kvc.is_used_storage_pool(name=pool, active_only=True) is True: self.logger.debug( "Stop iSCSI failed. Target iSCSI is used by guest." ) return web.badrequest("Target iSCSI is used by guest.") finally: kvc.close() if status == NETWORK_STORAGE_START: network_storage_cmd = ISCSI_COMMAND_START cmd_name = u'Start iSCSI' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) for pool in inactive_used_pool: pool_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_START_STORAGE_POOL), {"name": pool}) pool_cmdname = "Start Storage Pool" jobgroup.jobs.append( Job('%s command' % pool_cmdname, 1, pool_cmd)) job_order = 0 elif status == NETWORK_STORAGE_STOP: network_storage_cmd = ISCSI_COMMAND_STOP cmd_name = u'Stop iSCSI' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) for pool in active_used_pool: pool_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DESTROY_STORAGE_POOL), {"name": pool}) pool_cmdname = "Stop Storage Pool" jobgroup.jobs.append( Job('%s command' % pool_cmdname, 0, pool_cmd)) job_order = 1 else: return web.internalerror('Internal Server Error. (Param)') _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], network_storage_cmd), options) jobgroup.jobs.append(Job('%s command' % cmd_name, job_order, _cmd)) host = findbyhost1(self.orm, host_id) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, jobgroup, ) return web.accepted()
def _DELETE(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() host = findbyhost1(self.orm, host_id) if is_param(self.input, "iqn"): iqn = self.input.iqn else: self.logger.debug( "Network storage delete failed. Target IQN not found.") return web.badrequest() options = {'iqn': iqn} job_order = 0 cmd_name = u'Delete iSCSI' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) if is_param(self.input, "host") and is_param(self.input, "port"): host = self.input.host port = self.input.port used_pool = [] active_used_pool = [] kvc = KaresansuiVirtConnection() try: dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR) dev_symlink_list.sort() symlink_regexp = re.compile("^%s/%s" % (re.escape(ISCSI_DEVICE_DIR), re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn)))) pools = kvc.list_active_storage_pool( ) + kvc.list_inactive_storage_pool() for pool in pools: pool_type = kvc.get_storage_pool_type(pool) if pool_type == "iscsi": if iqn == kvc.get_storage_pool_sourcedevicepath(pool): used_pool.append(pool) pool_objs = kvc.search_kvn_storage_pools(pool) if pool_objs[0].is_active(): active_used_pool.append(pool) elif pool_type == "fs": if symlink_regexp.match( kvc.get_storage_pool_sourcedevicepath(pool)): used_pool.append(pool) pool_objs = kvc.search_kvn_storage_pools(pool) if pool_objs[0].is_active(): active_used_pool.append(pool) finally: kvc.close() for pool in active_used_pool: stop_pool_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DESTROY_STORAGE_POOL), {"name": pool}) stop_pool_cmdname = "Stop Storage Pool" jobgroup.jobs.append( Job('%s command' % stop_pool_cmdname, 0, stop_pool_cmd)) job_order = 1 for pool in used_pool: delete_pool_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DELETE_STORAGE_POOL), {"name": pool}) delete_pool_cmdname = "Delete Storage Pool" jobgroup.jobs.append( Job('%s command' % delete_pool_cmdname, job_order, delete_pool_cmd)) job_order = 2 _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], ISCSI_COMMAND_DELETE), options) jobgroup.jobs.append(Job('%s command' % cmd_name, job_order, _cmd)) host = findbyhost1(self.orm, host_id) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, jobgroup, ) return web.accepted()
def _GET(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() if self.is_mode_input() is True: self.view.host_id = host_id iqn = self.input.iqn options = {'iqn': iqn} _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], ISCSI_COMMAND_GET), options) cmd_name = u'Get iSCSI Detail' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) jobgroup.jobs.append(Job('%s command' % cmd_name, 0, _cmd)) jobgroup.type = JOBGROUP_TYPE['PARALLEL'] host = findbyhost1(self.orm, host_id) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) if corp(self.orm, self.pysilhouette.orm, _machine2jobgroup, jobgroup) is False: self.logger.debug("%s command failed. Return to timeout" % (cmd_name)) return web.internalerror('Internal Server Error. (Timeout)') cmd_res = jobgroup.jobs[0].action_stdout if not cmd_res: self.view.info = { 'type': "iSCSI", 'hostname': "", 'port': "", 'tpgt': "", 'iqn': "", 'activity': "", 'autostart': "", 'auth': "", 'user': "", 'disk_list': [], } return True (host, port, tpgt, iqn, activity, autostart, auth, user) = cmd_res.strip("\n").split(' ', 8) info = { 'type': "iSCSI", 'hostname': host, 'port': port, 'tpgt': tpgt, 'iqn': iqn, 'activity': string.atoi(activity), 'autostart': string.atoi(autostart), 'auth': auth, 'user': user, 'disk_list': [], } dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR) if activity == '1': disk_list = [] symlink_regexp = re.compile( "^%s" % (re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn)))) for sym_link in dev_symlink_list: if symlink_regexp.match(sym_link): real_path = symlink2real("%s/%s" % (ISCSI_DEVICE_DIR, sym_link)) disk_list.append({ 'symlink_name': sym_link, 'realpath_list': real_path, }) info['disk_list'] = disk_list self.view.info = info return True