def DeleteLocalStore(name): ''' delete local Datastore ''' # check uuid store_uuid = str(uuid.uuid3(uuid.NAMESPACE_DNS, '%s' % name)) with DATASTORE_LIST.lock: if not DATASTORE_LIST.exists(store_uuid): logging.info('the %s datastore is not exist.' % name) raise store_exception.StoreInvalidException(name) DATASTORE_LIST.pop(store_uuid) # check xml cfg xmlfile = ('%s/%s.xml' % (util.STORE_XML_PATH, name)) if not os.path.exists(xmlfile): logging.error('the %s xml conf is not exist.' % name) raise store_exception.StoreInvalidException(name) # delete datastore.cfg datastore with storeagentd.DATASTORE_CONF.lock: if name != util.LOCALPOOL: storeagentd.DATASTORE_CONF.DeleteStore(name) # delete xml cfg os.remove(xmlfile) logging.info('Delete %s datastore success.' % name)
def DeleteBlockStore(name): ''' delete Block DataStore ''' _state = '' xmlfile = ('%s/%s.xml' % (util.STORE_XML_PATH, name)) if not os.path.exists(xmlfile): raise store_exception.StoreInvalidException(xmlfile) # get Store state, if inactive delete with storeagentd.DATASTORE_CONF.lock: _state = storeagentd.DATASTORE_CONF.GetValue(util.STATE, name) if _state == util.INACTIVE: # delete from DATASTORE_LIST store_uuid = uuid.uuid3(uuid.NAMESPACE_DNS, '%s' % name) with DATASTORE_LIST.lock: if not DATASTORE_LIST.exists(store_uuid): logging.error('the %s datastore is not exists.' % name) raise store_exception.StoreInvalidException(name) DATASTORE_LIST.pop(store_uuid) # delete from DATASTORE_CONF with storeagentd.DATASTORE_CONF.lock: storeagentd.DATASTORE_CONF.DeleteStore(name) # delete xml cfg os.remove(xmlfile) else: logging.info('the %s datastore is ACTIVE. please stop datastore.') raise store_exception.StoreInvalidException('the datastore is ACTIVE.') logging.info('delete datastore %s success.' % name)
def StartLocalStore(name): ''' start local store (include localpool) ''' # check xml cfg xmlfile = ('%s/%s.xml' % (util.STORE_XML_PATH, name)) if not os.path.exists(xmlfile): raise store_exception.StoreInvalidException(xmlfile) # check mountpoint, device xml_tree = ReadXml(xmlfile) if isinstance(xml_tree, etree._ElementTree): mountpoint = GetXmlElementByXpath(xml_tree, 'mountpoint').text naa = GetXmlElementByXpath(xml_tree, 'naa').text device = GetDeviceByNaa(naa) logging.info('mountpoint is %s, device is %s' % (mountpoint, device)) # check mountpoint /data/localpool. if mountpoint == util.LOCALPOOL_MOUNTPOINT: # get device by datastore.cfg. device = storeagentd.DATASTORE_CONF.GetValue(util.DEVICE, name) else: raise store_exception.StoreXmlException(xmlfile) # check auto_mount with storeagentd.DATASTORE_CONF.lock: _auto_mount = storeagentd.DATASTORE_CONF.GetValue(util.AUTO_MOUNT, name) if _auto_mount != util.TRUE: if not os.path.exists(mountpoint): os.mkdir(mountpoint) if util.CheckBusy(mountpoint): raise store_exception.StoreFileBusyException(mountpoint) if os.listdir(mountpoint): logging.error('mountpoint dir %s is not null.' % mountpoint) raise store_exception.StoreInvalidException(mountpoint) mount_ret = envoy.run('%s %s %s' % (util.MOUNT, device, mountpoint)) if mount_ret.status_code == 0: logging.info('the datastore %s mountting success.' % name) else: raise store_exception.StoreEnvoyException('mount %s %s failed.' % (device, mountpoint)) # update status cfg with storeagentd.DATASTORE_CONF.lock: storeagentd.DATASTORE_CONF.SetValue(util.STATE, util.ACTIVE, name) logging.info('Start datastore %s success.' % name)
def CreateFullVol(self, request, context): """ create a vol which preallocation is full :param request: request.json_data request.no_need_notify :returns """ json_data = request.json_data need_notify = not request.no_need_notify try: # check json_data j_data = json.loads(json_data) schema.validate(j_data) preallocation = j_data['preallocation'] if preallocation != "full": raise store_exception.StoreInvalidException(preallocation) except store_exception.StoreInvalidException: logging.critical(traceback.format_exc()) errno = util_pb2.STORE_INVALID_ERROR job_id = 0 return vols_pb2.FullVolCreateReply(errno=errno, job_id=job_id) opaque = {} opaque['json_data'] = json_data logging.info('json_data is %s' % json_data) job_id = storeagentd.WORKER.add_job(JobType.CREATE_FULL_VOL, opaque, need_notify) return vols_pb2.FullVolCreateReply(errno=util_pb2.STORE_OK, job_id=job_id)
def DisableMultipath(naa): client = MultipathClient() with open(util.MULTIPATH_CONF, 'r') as fd: multipath_config_string = fd.read() _config = config.Configuration.from_multipathd_conf( multipath_config_string) # delete whitelist wwid white_list = _config.whitelist.wwid multipath_list = _config.multipaths # check naa if ('"%s"' % naa) not in white_list: raise store_exception.StoreInvalidException(naa) white_list.remove('"%s"' % naa) # delete multipath section if have for multipath in multipath_list: if naa == multipath.wwid.strip('"'): multipath_list.remove(multipath) break # reconfigure client.write_to_multipathd_conf(_config) util.StoreCmdRun("%s reconfigure" % util.MULTIPATHD) logging.info("Naa %s is disable multipath." % naa)
def StartBlockStore(name): ''' start block DataStore ''' # check xml xmlfile = ('%s/%s.xml' % (util.STORE_XML_PATH, name)) if not os.path.exists(xmlfile): raise store_exception.StoreInvalidException(xmlfile) # get portal and target _xml_tree = ReadXml(xmlfile) if isinstance(_xml_tree, etree._ElementTree): _portal = GetXmlElementByXpath(_xml_tree, 'portal').text _target = GetXmlElementByXpath(_xml_tree, 'target').text # login, if already login we again login has no affect. _portal_list = _portal.split(',') for item in _portal_list: _portal_address = item.split(':')[0] _portal_port = item.split(':')[1] util.IscsiadmLogin('%s:%s' % ((_portal_address, _portal_port), _target)) # change DATASTORE_CONF with storeagentd.DATASTORE_CONF.lock: storeagentd.DATASTORE_CONF.SetValue(util.STATE, util.ACTIVE, name) logging.info('start %s datastore success.' % name)
def pop(self, uuid_str): if not self.__lock.locked(): raise store_exception.StoreLockException('the caller must hold DataStoreList lock') if not (uuid_str in self.__datastore_list): raise store_exception.StoreInvalidException('invalid datastore uuid') return self.__datastore_list.pop(uuid_str)
def SetPolicy(naa, policy): client = MultipathClient() with open(util.MULTIPATH_CONF, 'r') as fd: multipath_config_string = fd.read() _config = config.Configuration.from_multipathd_conf( multipath_config_string) white_list = _config.whitelist.wwid multipath_list = _config.multipaths # check naa if ('"%s"' % naa) not in white_list: raise store_exception.StoreInvalidException(naa) # delete old multipath section for multipath in multipath_list: if naa == multipath.wwid.strip('"'): multipath_list.remove(multipath) break # new multipath section new_section = config.MultipathEntry() if policy == adp_pb2.DEFAULT: logging.info("Naa %s policy change to DEFAULT." % naa) if policy == adp_pb2.RECENTLY_USED: new_section.wwid = '"%s"' % naa new_section.path_grouping_policy = '"failover"' new_section.failback = '"manual"' multipath_list.append(new_section) logging.info("Naa %s policy change to RECENTLY_USED." % naa) if policy == adp_pb2.FIXED: new_section.wwid = '"%s"' % naa new_section.path_grouping_policy = '"failover"' new_section.failback = '"immediate"' multipath_list.append(new_section) logging.info("Naa %s policy change to FIXED." % naa) if policy == adp_pb2.LOOP: new_section.wwid = '"%s"' % naa new_section.path_grouping_policy = '"multibus"' multipath_list.append(new_section) logging.info("Naa %s policy change to LOOP." % naa) if policy == adp_pb2.OPTIMAL: new_section.wwid = '"%s"' % naa new_section.path_grouping_policy = '"group_by_prio"' new_section.prio = '"alua"' new_section.failback = '"immediate"' multipath_list.append(new_section) logging.info("Naa %s policy change to OPTIMAL." % naa) # reconfigure client.write_to_multipathd_conf(_config) util.StoreCmdRun("%s reconfigure" % util.MULTIPATHD)
def append(self, datastore): assert isinstance(datastore, DataStore) if not self.__lock.locked(): raise store_exception.StoreLockException('the caller must hold DataStoreList lock') with datastore.lock: if datastore.uuid in self.__datastore_list: raise store_exception.StoreInvalidException('datastore(%s) already in datastore list' % datastore.uuid) else: self.__datastore_list[datastore.uuid] = datastore
def StopLocalStore(name): ''' stop local store ''' # check xml cfg xmlfile = ('%s/%s.xml' % (util.STORE_XML_PATH, name)) if not os.path.exists(xmlfile): raise store_exception.StoreInvalidException(xmlfile) # check mountpoint device xml_tree = ReadXml(xmlfile) if isinstance(xml_tree, etree._ElementTree): mountpoint = GetXmlElementByXpath(xml_tree, 'mountpoint').text if not mountpoint: raise store_exception.StoreInvalidException('mountpoint is null') # check mountpoint busy if util.CheckBusy(mountpoint): raise store_exception.StoreFileBusyException(mountpoint) # umount with storeagentd.DATASTORE_CONF.lock: _auto_mount = storeagentd.DATASTORE_CONF.GetValue(util.AUTO_MOUNT, name) if _auto_mount != util.TRUE: umount_ret = envoy.run('%s %s' % (util.UMOUNT, mountpoint)) if umount_ret.status_code == 0: logging.info('datastore %s umount %s success.' % (name, mountpoint)) else: raise store_exception.StoreEnvoyException('%s umount %s failed' % (name, mountpoint)) # update datastroe.cfg with storeagentd.DATASTORE_CONF.lock: storeagentd.DATASTORE_CONF.SetValue(util.STATE, util.INACTIVE, name) else: raise store_exception.StoreXmlException('%s.xml exception' % name) logging.info('datastore %s is stop success.' % name)
def GetPolicy(naa): # check naa _config = _GetConfigByMultipathd() white_list = _config.whitelist.wwid multipath_list = _config.multipaths if ('"%s"' % naa) not in white_list: raise store_exception.StoreInvalidException(naa) # get multipath section section = config.MultipathEntry() for multipath in multipath_list: if naa == multipath.wwid.strip('"'): section = multipath break if section.path_grouping_policy is None: path_grouping_policy = '' else: path_grouping_policy = section.path_grouping_policy.strip('"') if section.failback is None: failback = '' else: failback = section.failback.strip('"') policy = adp_pb2.DEFAULT if path_grouping_policy == "failover" and failback == "manual": policy = adp_pb2.RECENTLY_USED if path_grouping_policy == "failover" and failback == "immediate": policy = adp_pb2.FIXED if path_grouping_policy == "multibus": policy = adp_pb2.LOOP # if section path_grouping_policy is group_by_prio , the section option 'prio' has value # othrewise the section option 'prio' no value , and used 'section.prio' may error. if path_grouping_policy == "group_by_prio": if section.prio is None: prio = '' else: prio = section.prio.strip('"') if prio == "alua": policy = adp_pb2.OPTIMAL return policy
def GetVolInfo(self, request, context): """ get vol info :param request : request.p_name store name request.v_name vol name :returns : volInfo { volsize (KB) vol_usedsize (KB) } errno util_pb2.STORE_OK """ p_name = request.p_name v_name = request.v_name ret = util_pb2.STORE_OK v_info = vols_pb2.VolInfo() command = '' q_img_ret = util_pb2.QemuImgReply() try: # check filename filepath = ('%s/%s/%s' % (util.STORE_MOUNT_PATH, p_name, v_name)) if not os.path.exists(filepath): logging.error('the filename %s is not exists' % filepath) raise store_exception.StoreInvalidException(filepath) # command command = ('info %s --output json' % filepath) logging.info('command: %s' % command) # get info q_img_ret = util.QemuImg(command) j = json.loads(q_img_ret.std_out) s_volsize = str(j["virtual-size"]) s_volusedsize = str(j["actual-size"]) v_info.volsize = str(driver.StringToInt(s_volsize)) v_info.vol_usedsize = str(driver.StringToInt(s_volusedsize)) except OSError: logging.critical(traceback.format_exc()) ret = util_pb2.STORE_OSERROR except store_exception.StoreQemuImgCommandException: logging.critical(traceback.format_exc()) ret = util_pb2.STORE_QEMU_COMMAND_FAIL except store_exception.StoreInvalidException: logging.critical(traceback.format_exc()) ret = util_pb2.STORE_INVALID_ERROR return vols_pb2.VolInfoGetReply(errno=ret, info=v_info)
def CreateVol(self, request, context): """ create Raw vol , preallocation is off, falloc :param request : json_data :returns : errno util_pb2.STORE_OK create success util_pb2.STORE_INVALID_ERROR invalid error util_pb2.STORE_OSERROR the OSError util_pb2.STORE_XML_ERROR the xml error util_pb2.STORE_QEMU_COMMAND_FAIL run qemu command fail """ json_data = request.json_data ret = util_pb2.STORE_OK preallocation = '' try: # vol_type need off or falloc j_data = json.loads(json_data) schema.validate(j_data) preallocation = j_data['preallocation'] if preallocation == "full" or preallocation == "metadata": raise store_exception.StoreInvalidException(preallocation) ret = driver.QemuCreateVol(j_data) except OSError: logging.critical(traceback.format_exc()) ret = util_pb2.STORE_OSERROR except etree.LxmlError: logging.critical(traceback.format_exc()) ret = util_pb2.STORE_XML_ERROR except store_exception.StoreInvalidException: logging.critical(traceback.format_exc()) ret = util_pb2.STORE_INVALID_ERROR except store_exception.StoreJsonValidationException: logging.critical(traceback.format_exc()) ret = util_pb2.STORE_INVALID_ERROR except store_exception.StoreQemuImgCommandException: logging.critical(traceback.format_exc()) ret = util_pb2.STORE_QEMU_COMMAND_FAIL except store_exception.StoreFileBusyException: logging.critical(traceback.format_exc()) ret = util_pb2.STORE_IS_BUSY return vols_pb2.VolCreateReply(errno=ret)
def StopBlockStore(name): ''' stop block DataStore ''' lun_list = [] _portal_list = [] # check xml xmlfile = ('%s/%s.xml' % (util.STORE_XML_PATH, name)) if not os.path.exists(xmlfile): raise store_exception.StoreInvalidException(name) # get portal and target _xml_tree = ReadXml(xmlfile) if isinstance(_xml_tree, etree._ElementTree): _portal = GetXmlElementByXpath(_xml_tree, 'portal').text _target = GetXmlElementByXpath(_xml_tree, 'target').text # check portal target, get lun by target. check lun if is busy. lun_list = disk_manager.GetIscsiLunListByTarget(_portal, _target) for lun in lun_list: # check busy _naa = lun['SCSI_ID'] _device = GetDeviceByNaa(_naa) if util.CheckBusy(_device): logging.error('the device %s scsi_id %s is busy.' % (_device, _naa)) raise store_exception.StoreFileBusyException(_device) # logout _portal_list = _portal.split(',') for item in _portal_list: _portal_address = item.split(':')[0] _portal_port = item.split(':')[1] util.IscsiadmLogout('%s:%s' % (_portal_address, _portal_port), _target) # change state to inactive, DATASTORE_CONF. with storeagentd.DATASTORE_CONF.lock: storeagentd.DATASTORE_CONF.SetValue(util.STAT, util.INACTIVE, name) logging.info('stop %s datastore success.' % name)
def StringToInt(string): """ string to int :param request : string :returns : int default (KB) """ ret = '' num_re = re.match(r'\d+', string) if None is num_re: logging.error('invalid string %s' % string) raise store_exception.StoreInvalidException(num_re) num = int(num_re.group()) unit_re = re.search(r'[K,M,G,T]', string) if None is unit_re: unit = '' else: unit = unit_re.group() if unit == '': ret = num//1024 if unit == 'K': ret = num if unit == 'M': ret = num*1024 if unit == 'G': ret = num*1024*1024 if unit == 'T': ret = num*1024*1024*1024 return ret
def GetDataStoreInfoByName(store_name): ''' get DataStore info request : store_name return: pools_pb2. DataStoreInfo() ''' _store_info = pools_pb2.DataStoreInfo() _store_name = store_name _size_info = pools_pb2.DataStoreSizeInfo() # check xml conf xml_file = ('%s/%s.xml' % (util.STORE_XML_PATH, _store_name)) if os.path.exists(xml_file): logging.info('xml_file is %s.' % xml_file) else: raise store_exception.StoreInvalidException(_store_name) # read xml xml_tree = ReadXml(xml_file) if isinstance(xml_tree, etree._ElementTree): # get info _s_type = GetXmlElementByXpath(xml_tree, '[@type]').get('type') if not _s_type: raise store_exception.StoreXmlException(xml_tree) _s_naa = GetXmlElementByXpath(xml_tree, 'naa').text # block pool naa is none. if int(_s_type) != pools_pb2.ISCSI_BLOCK and int(_s_type) != pools_pb2.FC_BLOCK: if not _s_naa: raise store_exception.StoreXmlException(xml_tree) _s_portal = GetXmlElementByXpath(xml_tree, 'portal').text _s_target = GetXmlElementByXpath(xml_tree, 'target').text _s_mountpoint = GetXmlElementByXpath(xml_tree, 'mountpoint').text if not _s_mountpoint: raise store_exception.StoreXmlException(xml_tree) else: raise store_exception.StoreXmlException('read xml fail') # get size info if int(_s_type) != pools_pb2.ISCSI_BLOCK and int(_s_type) != pools_pb2.FC_BLOCK: if store_name == util.LOCALPOOL: with storeagentd.DATASTORE_CONF.lock: _s_device = storeagentd.DATASTORE_CONF.GetValue(util.DEVICE, store_name) else: _s_device = GetDeviceByNaa(_s_naa) _size_info = GetDeviceSizeInfo(_s_device) else: # get block pool size _lun_list = [] _lun_list = disk_manager.GetIscsiLunListByTarget(_s_portal, _s_target) for _lun in _lun_list: _size = 0 _size = _lun['Size'] _size_info.totalsize += _size _size_info.allocatedsize += _size _size_info.availablesize = 0 # get state with storeagentd.DATASTORE_CONF.lock: _s_state = storeagentd.DATASTORE_CONF.GetValue(util.STATE, store_name) _s_title = storeagentd.DATASTORE_CONF.GetValue(util.N_TITLE, store_name) _store_info.name = _store_name _store_info.p_type = int(_s_type) _store_info.naa = _s_naa _store_info.portal = _s_portal _store_info.target = _s_target _store_info.totalsize = _size_info.totalsize _store_info.allocatedsize = _size_info.allocatedsize _store_info.availablesize = _size_info.availablesize _store_info.state = str(_s_state) _store_info.n_title = str(_s_title) _store_info.mountpoint = _s_mountpoint return _store_info
def QemuCreateVol(json_data): """ create a vol with json_data :param request : json_data :returns : util_pb2.STORE_OK """ s_sizeinfo = pools_pb2.DataStoreSizeInfo() q_img_ret = util_pb2.QemuImgReply() # load json_data j_data = {} j_data = json_data logging.info('the json_data is %s' % j_data) p_name = j_data['p_name'] vol_path = j_data['vol_path'] vol_type = j_data['vol_type'] vol_size = int(j_data['vol_size']) preallocation = j_data['preallocation'] # check filepath filepath = ('%s/%s/%s' % (util.STORE_MOUNT_PATH, p_name, vol_path)) if os.path.exists(filepath): logging.error('the %s is already exists' % filepath) raise store_exception.StoreInvalidException(filepath) # check size s_sizeinfo = GetDeviceSizeInfoByName(p_name) avail_size = StringToInt(s_sizeinfo.availablesize) if avail_size < vol_size: logging.error('the DataStore not enough space. avail_size%(KB) vol_size%s(KB)' % (avail_size, vol_size)) raise store_exception.StoreInvalidException(vol_size) # command if vol_type == 'raw': command = ('create -q -f raw -o preallocation=%s %s %sK' % (preallocation, filepath, vol_size)) if vol_type == 'qcow2': compat = j_data['compat'] backing_file = j_data['backing_file'] backing_fmt = j_data['backing_fmt'] encryption = j_data['encryption'] cluster_size = int(j_data['cluster_size']) lazy_refcounts = j_data['lazy_refcounts'] # check cluster_size(512 -- 2M) if cluster_size < 0.5 or cluster_size > 4096: logging.error('cluster_size is %s.' % cluster_size) raise store_exception.StoreInvalidException(cluster_size) if backing_file == 'null': if not backing_fmt == 'null': raise store_exception.StoreInvalidException(backing_fmt) command = ('create -q -f qcow2' ' -o compat=%s' ' -o cluster_size=%dK' ' -o encryption=%s' ' -o lazy_refcounts=%s' ' -o preallocation=%s %s %s' % (compat, cluster_size, encryption, lazy_refcounts, preallocation, filepath, vol_size)) else: if not os.path.exists(backing_file): raise store_exception.StoreInvalidException(backing_file) # check backing_file busy if util.CheckBusy(backing_file): raise store_exception.StoreFileBusyException(backing_file) command = ('create -q -f qcow2' ' -o compat=%s' ' -o backing_file=%s' ' -o backing_fmt=%s' ' -o cluster_size=%dK' ' -o encryption=%s' ' -o lazy_refcounts=%s' ' -o preallocation=%s %s %s' % (compat, backing_file, backing_fmt, cluster_size, encryption, lazy_refcounts, preallocation, filepath, vol_size)) # create logging.info('command is %s.' % command) q_img_ret = util.QemuImg(command) return q_img_ret.errno
def CreateLocalStore(naa, name, n_title): ''' create local Datastore ''' global DATASTORE_LIST store_uuid = str(uuid.uuid3(uuid.NAMESPACE_DNS, '%s' % name)) with DATASTORE_LIST.lock: if DATASTORE_LIST.exists(store_uuid): logging.error('the name %s is already uesd.' % name) raise store_exception.StoreInvalidException(name) _store_node = DataStore(store_uuid) # check xml xmlfile = ('%s/%s.xml' % (util.STORE_XML_PATH, name)) if os.path.exists(xmlfile): raise store_exception.StoreInvalidException(xmlfile) # check device if name != util.LOCALPOOL: device = GetDeviceByNaa(naa) else: with storeagentd.DATASTORE_CONF.lock: device = storeagentd.DATASTORE_CONF.GetValue(util.DEVICE, util.LOCALPOOL) if not os.path.exists(device): logging.info('the device %s is not exists.' % device) raise store_exception.StoreInvalidException(device) _mountpoint = GetMountpoint(device) if len(_mountpoint): logging.info('the device %s is already mountting.' % device) raise store_exception.StoreInvalidException(device) _mountpoint = os.path.join(util.STORE_MOUNT_PATH, name) # CreateStorecfg xmlstr = _store_node.xml_template % { 'type': pools_pb2.LOCAL, 'name': name, 'uuid': store_uuid, 'naa': naa, 'portal': None, 'target': None, 'mountpoint': _mountpoint } if CreateStoreCfg(xmlstr, name): logging.info('Create %s xml success.' % name) else: raise store_exception.StoreXmlException('Create %s xml failed.' % name) # add datastore to DATASTORE_LIST with DATASTORE_LIST.lock: DATASTORE_LIST.append(_store_node) # add section on datastore.cfg with storeagentd.DATASTORE_CONF.lock: try: storeagentd.DATASTORE_CONF.SetValue(util.STATE, util.INACTIVE, name) storeagentd.DATASTORE_CONF.SetValue(util.N_TITLE, n_title, name) storeagentd.DATASTORE_CONF.SetValue(util.AUTO_MOUNT, util.TRUE, name) except store_exception.StoreDataConfigException: if name != util.LOCALPOOL: os.remove(xmlfile) with DATASTORE_LIST.lock: DATASTORE_LIST.pop(store_uuid) raise store_exception.StoreDataConfigException('%s add status failed.' % name) logging.info('create datastore %s success.' % name)
def CreateBlockStore(name, portal, target, n_title): ''' create Block DataStore ''' _xml_list = [] # check uuid store_uuid = str(uuid.uuid3(uuid.NAMESPACE_DNS, name)) with DATASTORE_LIST.lock: if DATASTORE_LIST.exists(store_uuid): logging.error('the name %s is already used.' % name) raise store_exception.StoreInvalidException(name) _store_node = DataStore(store_uuid) # check xml cfg xmlfile = ('%s/%s.xml' % (util.STORE_XML_PATH, name)) if os.path.exists(xmlfile): logging.error('the xml %s is already exists.' % xmlfile) raise store_exception.StoreInvalidException(name) # check device by portal, target # get xmlfile list _xml_list = os.listdir(util.STORE_XML_PATH) for xml in _xml_list: _file = '' _file = ('%s/%s' % (util.STORE_XML_PATH, xml)) _xml_tree = ReadXml(_file) if isinstance(_xml_tree, etree._ElementTree): _type = GetXmlElementByXpath(_xml_tree, '[@type]').get('type') if int(_type) == pools_pb2.LOCAL: continue _portal = GetXmlElementByXpath(_xml_tree, 'portal').text _target = GetXmlElementByXpath(_xml_tree, 'target').text if _portal == portal and _target == target: logging.error('the Datastroe %s is confict.' % xml) raise store_exception.StoreInvalidException('the portal %s, target %s is invalid.' % (portal, target)) else: raise store_exception.StoreInvalidException(_file) # createxmlcfg _xmlstr = _store_node.xml_template % { 'type': pools_pb2.ISCSI_BLOCK, 'name': name, 'uuid': store_uuid, 'naa': None, 'portal': portal, 'target': target, 'mountpoint': util.BLOCK_PATH } if CreateStoreCfg(_xmlstr, name): logging.info('Create %s xml success.' % name) else: raise store_exception.StoreXmlException('Create %s xml failed.' % name) # add to DATASTORE_LIST with DATASTORE_LIST.lock: DATASTORE_LIST.append(store_uuid) # add to DATASTORE_CONF with storeagentd.DATASTORE_CONF.lock: try: storeagentd.DATASTORE_CONF.SetValue(util.STATE, util.INACTIVE, name) storeagentd.DATASTORE_CONF.SetValue(util.STATE, util.N_TITLE, n_title) except store_exception.StoreDataConfigException: os.remove(xmlfile) with DATASTORE_LIST.lock: DATASTORE_LIST.pop(store_uuid) raise store_exception.StoreDataConfigException('%s update datastore.cfg failed.' % name) logging.info('create datasotre %s success.' % name)