def keypool_create(self, keypool, projectname, tenant, vpoolname, apitype, uid, secret): ''' creates a keypool parameters: keypool: label of the keypool project: project name tenant: tenant name vpool: vpool name apitype: api type(s3, swift or atmos) uid: user id secret: secret key Returns: JSON payload response ''' errorcontext = 0 if ((projectname) and (not common.is_uri(projectname))): from project import Project obj = Project(self.__ipAddr, self.__port) projectlst = obj.project_list(tenant) project_uri = None for projiter in projectlst: if (projiter['name'] == projectname): project_uri = projiter['id'] if (not project_uri): raise SOSError(SOSError.VALUE_ERR, "Porject " + projectname + ": not found") if ((vpoolname) and (not common.is_uri(vpoolname))): from virtualpool import VirtualPool obj = VirtualPool(self.__ipAddr, self.__port) vpool = obj.vpool_show(vpoolname, 'object') vpool_uri = vpool['id'] if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) try: if (apitype == 's3'): return s3_bucket_create(namespace, keypool, project_uri, vpool_uri, uid, secretkey) elif (apitype == 'swift'): return swift_container_create(namespace, keypool, project_uri, vpool_uri, uid, secretkey) elif (apitype == 'atmos'): return atmos_subtenant_create(namespace, tenant, keypool, project_uri, vpool_uri, uid, secretkey) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified") except SOSError as e: raise e
def datastore_create(self, type, label, varray, cos, size, token, mountpoint): if ((varray) and (not common.is_uri(varray))): from virtualarray import VirtualArray obj = VirtualArray(self.__ipAddr, self.__port) nbhinst = obj.varray_show(varray) varray = nbhinst['id'] if(not common.is_uri(cos)): from cos import Cos obj = Cos(self.__ipAddr, self.__port) # check this cosinst = obj.cos_show(cos, 'object') cos_uri = cosinst['id'] parms = { 'name': label, 'object_cos': cos_uri, } if (size): parms['size'] = size if (varray): parms['varray'] = varray if (mountpoint): parms['mount_point'] = mountpoint if (not token): token = 'cli-create-' + cos body = json.dumps(parms) uri = self.URI_DATA_STORE_LIST + "/" + type qparms = {'task': token} if (qparms): for qk in qparms.iterkeys(): if (qparms[qk] is not None): uri += '&' if ('?' in uri) else '?' uri += qk + '=' + qparms[qk] (s, h) = common.service_json_request(self.__ipAddr, self.__port, "POST", uri, body) o = common.json_decode(s)
def datastore_create(self, type, label, varray, cos, size, token, mountpoint): if ((varray) and (not common.is_uri(varray))): from virtualarray import VirtualArray obj = VirtualArray(self.__ipAddr, self.__port) nbhinst = obj.varray_show(varray) varray = nbhinst['id'] if (not common.is_uri(cos)): from cos import Cos obj = Cos(self.__ipAddr, self.__port) # check this cosinst = obj.cos_show(cos, 'object') cos_uri = cosinst['id'] parms = { 'name': label, 'object_cos': cos_uri, } if (size): parms['size'] = size if (varray): parms['varray'] = varray if (mountpoint): parms['mount_point'] = mountpoint if (not token): token = 'cli-create-' + cos body = json.dumps(parms) uri = self.URI_DATA_STORE_LIST + "/" + type qparms = {'task': token} if (qparms): for qk in qparms.iterkeys(): if (qparms[qk] is not None): uri += '&' if ('?' in uri) else '?' uri += qk + '=' + qparms[qk] (s, h) = common.service_json_request(self.__ipAddr, self.__port, "POST", uri, body) o = common.json_decode(s)
def key_write(self, key, filepath , keypool, tenant, apitype, uid, secret, type): ''' write a key parameters: key: key keypool: keypool tenant: tenant name value: value apitype: api to be used uid: user id secret: secret Returns: JSON payload response ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) if(apitype == 's3'): s3_key_write(namespace, keypool, key, filepath, uid, secret ) elif(apitype == 'swift'): swift_object_write(namespace, keypool, key, filepath, uid, secret ) elif(apitype == 'atmos'): atmos_object_write(namespace, keypool, key, filepath, uid, secret, type ) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified")
def project_query(self, name): ''' Retrieves UUID of project based on its name Parameters: name: name of project Returns: UUID of project Throws: SOSError - when project name is not found ''' if (common.is_uri(name)): return name (tenant_name, project_name) = common.get_parent_child_from_xpath(name) from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) try: tenant_uri = tenant_obj.tenant_query(tenant_name) projects = self.project_list(tenant_uri) if(projects and len(projects) > 0): for project in projects: if (project): project_detail = self.project_show_by_uri(project['id']) if(project_detail and project_detail['name'] == project_name): return project_detail['id'] raise SOSError(SOSError.NOT_FOUND_ERR, 'Project: ' + project_name + ' not found') except SOSError as e: raise e
def datastore_query(self, type, name): ''' Returns the UID of the datastore specified by the name ''' if (common.is_uri(name)): return name (s, h) = common.service_json_request(self.__ipAddr, self.__port, "GET", URI_DATA_STORE_LIST, None) o = common.json_decode(s) pools = o['data_store'] ids = [] if (not o): return () else: if (not isinstance(pools, list)): pools = [pools] for pool in pools: try: pool_details = self.datastore_show_by_uri( self.URI_DATA_STORE_LIST + '/' + type + '/' + uri) if (pool_details['name'] == name): return pool.get('id') except: pass raise Exception('Bad Data Store name')
def keypool_update(self, keypool, tenant, versioning, apitype, uid, secret): ''' update keypool versioning parameters: keypool: label of the keypool project: project name tenant: tenant name uid: user id secret: secret key Returns: JSON payload response ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) try: if (apitype == 's3'): return s3_bucket_update(namespace, keypool, versioning, uid, secretkey) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified") except SOSError as e: raise e
def key_list_versions(self, keypool, key, tenant, apitype, uid, secret): ''' Returns versions of the key` Parameters: key: label of the key keypool: label of the keypool project: project name tenant: tenant name apitype: api to be used uid: user id secret: secret Returns: JSON payload of key list ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) if(apitype == 's3'): s3_key_list_versions(namespace, keypool, key, uid, secret ) elif ( (apitype =='swift') or (apitype=='atmos')) : raise SOSError(SOSError.NOT_FOUND_ERR, "Versioning not available with API type " + apitype) else: raise SOSError(SOSError.VALUE_ERR, "Wroing API type " + apitype + " specified")
def keypool_delete(self, keypool, tenant, uid, secret): '''Makes a REST API call to delete a keypool by its name, project and tenant ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) try: if(apitype == 's3'): return s3_bucket_delete(namespace, keypool, uid, secretkey) elif(apitype == 'swift'): return swift_container_delete(namespace, keypool, uid, secretkey) elif(apitype == 'atmos'): return atmos_subtenant_delete(namespace, keypool, uid, secretkey) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified") except SOSError as e: raise e
def key_delete(self, key, keypool, tenant, apitype, version, uid, secret): ''' Makes a REST API call to delete a key by its name Parameters: key: label of the key keypool: label of the keypool project: project name tenant: tenant name apitype: api to be used uid: user id secret: secret Returns: JSON payload of key list ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) if(apitype == 's3'): s3_key_delete(namespace, keypool, key, version, uid, secret ) elif(apitype == 'swift'): swift_object_delete(namespace, keypool, key, version, uid, secret ) elif(apitype == 'atmos'): atmos_object_delete(namespace, keypool, key, uid, secret ) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified")
def keypool_show(self, keypool, tenant, uid, secret): ''' show keypool parameters: keypool: label of the keypool tenant: tenant name uid: user id secret: secret key Returns: JSON payload response ''' #uri = self.keypool_query(label) if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) try: if(apitype == 's3'): return s3_bucket_show(namespace, keypool, uid, secretkey) elif(apitype == 'swift'): return swift_container_show(namespace, keypool, uid, secretkey) elif(apitype == 'atmos'): return atmos_subtenant_show(namespace, keypool, uid, secretkey) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified") except SOSError as e: raise e
def key_list_versions(self, keypool, key, tenant, apitype, uid, secret): ''' Returns versions of the key` Parameters: key: label of the key keypool: label of the keypool project: project name tenant: tenant name apitype: api to be used uid: user id secret: secret Returns: JSON payload of key list ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) if (apitype == 's3'): s3_key_list_versions(namespace, keypool, key, uid, secret) elif ((apitype == 'swift') or (apitype == 'atmos')): raise SOSError( SOSError.NOT_FOUND_ERR, "Versioning not available with API type " + apitype) else: raise SOSError(SOSError.VALUE_ERR, "Wroing API type " + apitype + " specified")
def keypool_update( self, keypool, tenant, versioning, apitype, uid, secret): ''' update keypool versioning parameters: keypool: label of the keypool project: project name tenant: tenant name uid: user id secret: secret key Returns: JSON payload response ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) try: if(apitype == 's3'): return s3_bucket_update( namespace, keypool, versioning, uid, secretkey) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified") except SOSError as e: raise e
def project_query(self, name): ''' Retrieves UUID of project based on its name Parameters: name: name of project Returns: UUID of project Throws: SOSError - when project name is not found ''' if (common.is_uri(name)): return name (tenant_name, project_name) = common.get_parent_child_from_xpath(name) from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) try: tenant_uri = tenant_obj.tenant_query(tenant_name) projects = self.project_list(tenant_uri) if(projects and len(projects) > 0): for project in projects: if (project): project_detail = self.project_show_by_uri( project['id']) if(project_detail and project_detail['name'] == project_name): return project_detail['id'] raise SOSError(SOSError.NOT_FOUND_ERR, 'Project: ' + project_name + ' not found') except SOSError as e: raise e
def key_delete(self, key, keypool, tenant, apitype, version, uid, secret): ''' Makes a REST API call to delete a key by its name Parameters: key: label of the key keypool: label of the keypool project: project name tenant: tenant name apitype: api to be used uid: user id secret: secret Returns: JSON payload of key list ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) if (apitype == 's3'): s3_key_delete(namespace, keypool, key, version, uid, secret) elif (apitype == 'swift'): swift_object_delete(namespace, keypool, key, version, uid, secret) elif (apitype == 'atmos'): atmos_object_delete(namespace, keypool, key, uid, secret) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified")
def keypool_show(self, keypool, tenant, uid, secret): ''' show keypool parameters: keypool: label of the keypool tenant: tenant name uid: user id secret: secret key Returns: JSON payload response ''' #uri = self.keypool_query(label) if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) try: if (apitype == 's3'): return s3_bucket_show(namespace, keypool, uid, secretkey) elif (apitype == 'swift'): return swift_container_show(namespace, keypool, uid, secretkey) elif (apitype == 'atmos'): return atmos_subtenant_show(namespace, keypool, uid, secretkey) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified") except SOSError as e: raise e
def key_write(self, key, filepath, keypool, tenant, apitype, uid, secret, type): ''' write a key parameters: key: key keypool: keypool tenant: tenant name value: value apitype: api to be used uid: user id secret: secret Returns: JSON payload response ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) if (apitype == 's3'): s3_key_write(namespace, keypool, key, filepath, uid, secret) elif (apitype == 'swift'): swift_object_write(namespace, keypool, key, filepath, uid, secret) elif (apitype == 'atmos'): atmos_object_write(namespace, keypool, key, filepath, uid, secret, type) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified")
def vdc_query(self, name): if (common.is_uri(name)): return name vdclist = self.vdc_get_list() # each vdc name also should unique in vipr geo system for vdc in vdclist: if(vdc["name"] == name): return vdc["id"] raise SOSError(SOSError.NOT_FOUND_ERR, "VirtualDataCenter with name: " + name + " not found")
def vdc_query(self, name): if (common.is_uri(name)): return name vdclist = self.vdc_get_list() # each vdc name also should unique in vipr geo system for vdc in vdclist: if (vdc["name"] == name): return vdc["id"] raise SOSError(SOSError.NOT_FOUND_ERR, "VirtualDataCenter with name: " + name + " not found")
def get_tenant_by_name(self, tenant): uri = None if not tenant: uri = self.tenant_getid() else: if not common.is_uri(tenant): uri = self.tenant_query(tenant) else: uri = tenant if not uri: raise SOSError(SOSError.NOT_FOUND_ERR, "Tenant " + tenant + ": not found") return uri
def objectvpool_query(self, name): if (common.is_uri(name)): return name objcoslst = self.objectvpool_list() for cs in objcoslst: cos_res = self.objectvpool_show_by_uri(cs['id']) if ( (cos_res['name'] == name) and (cos_res['inactive'] == False) ): return cos_res['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "Object Vpool query failed: object vpool with name "+name+" not found")
def objectcos_query(self, name): if (common.is_uri(name)): return name objcoslst = self.objectcos_list() for cs in objcoslst: cos_res = self.objectcos_show_by_uri(cs['id']) if (cos_res['name'] == name): return cos_res['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "Object Cos query failed: object cos with name " + name + " not found")
def query_by_name(self, name): if common.is_uri(name): return name try: providers = self.list_storageproviders() except: raise SOSError(SOSError.NOT_FOUND_ERR, "Storage provider with name: " + name + " not found") for provider in providers: storageprovider = self.show_by_uri(provider["id"]) if storageprovider is not None and storageprovider["name"] == name: return storageprovider["id"] raise SOSError(SOSError.NOT_FOUND_ERR, "Storage provider with name: " + name + " not found")
def update(args): if(args.newname is None and args.description is None and args.add_volumes is None and args.remove_volumes is None and args.parent is None): raise SOSError(SOSError.CMD_LINE_ERR, "viprcli volume group update: error: at least one of " + "the arguments -np/-newname -d/-description -a/-add_volumes " + " -r/-remove_volumes is required") add_vols = [] if(args.add_volumes and len(args.add_volumes) > 0): for item in args.add_volumes.split(','): if (common.is_uri(item)): add_vols.append(item) else: vol = Volume(args.ip, args.port) volid = vol.show(item, False, False)['id'] add_vols.append(volid) rem_vols = [] if(args.remove_volumes and len(args.remove_volumes) > 0): for item in args.remove_volumes.split(','): if (common.is_uri(item)): rem_vols.append(item) else: vol = Volume(args.ip, args.port) try: volid = vol.show(item, False, False)['id'] rem_vols.append(volid) except: continue obj = VolumeGroup(args.ip, args.port) try: obj.update(args.name, args.newname, args.description, ",".join(add_vols), args.consistency_group, args.replication_group, ",".join(rem_vols), args.parent) except SOSError as e: raise e
def swift_container_create( container, namespace, tenant, project, vpool, uid, secret): if ((project) and (not common.is_uri(project))): from project import Project obj = Project(self.__ipAddr, self.__port) projectlst = obj.project_list(tenant) project_uri = None for projiter in projectlst: if(projiter['name'] == projectname): project_uri = projiter['id'] if(not project_uri): raise SOSError(SOSError.NOT_FOUND_ERR, "Project " + project + ": not found") project_uri = project_uri.strip() _headers['x-emc-project-id'] = project_uri if(vpool): from virtualpool import VirtualPool obj = VirtualPool(self.__ipAddr, self.__port) vpool_uri = obj.vpool_query(vpool, 'object') vpool_uri = vpool_uri.strip() _headers['x-emc-vpool'] = vpool_uri token = self.swift_authenticate(uid, secret) _headers = dict() _headers[common.SWIFT_AUTH_TOKEN] = token (s, h) = common.service_json_request(self.__ipAddr, S3_PORT, "PUT", self.URI_SWIFT_CONTAINER_INSTANCE. format(namespace, container), None, None, False, 'application/json', None, _headers) o = common.json_decode(s)
def vnasserver_query(self, name): if (common.is_uri(name)): return name uris = self.list_vnasservers() for uri in uris: vnasserver = self.vnasserver_show(uri, False) if (vnasserver): if (vnasserver['name'] == name): return vnasserver['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "vnasserver " + name + ": not found")
def vcenterdatacenter_query(self, name, vcenter, tenantname): ''' Returns the UID of the vcenterdatacenter specified by the name ''' if (common.is_uri(name)): return name vcenterdatacenters = self.vcenterdatacenter_list(vcenter, tenantname) for vcenterdatacenter in vcenterdatacenters: if (vcenterdatacenter['name'] == name): return vcenterdatacenter['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "vcenterdatacenter " + name + ": not found")
def keypool_list(self, projectname, tenant, apitype, uid, secret): ''' Returns all the keypools in a vdc Parameters: keypool: label of the keypool project: project name tenant: tenant name apitype: api type(s3, swift or atmos) uid: user id secret: secret key Returns: JSON payload of keypool list ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) kplst = [] try: if((not apitype) or (apitype == 's3')): kplst.append( s3_bucket_list( namespace, projectname, uid, secretkey)) if((not apitype) or (apitype == 'swift')): kplst.append( swift_container_list( namespace, projectname, uid, secretkey)) if((not apitype) or (apitype == 'atmos')): kplst.append( atmos_subtenant_list( namespace, projectname, uid, secretkey)) # need to convert to table format return kplst except SOSError as e: raise e
def networksystem_query(self, name): ''' Returns the UID of the networksystem specified by the name ''' if (common.is_uri(name)): return name systems = self.networksystem_list() for system in systems: if (system['name'] == name): ret = self.networksystem_show(system['id']) if (ret): return system['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "Networksystem " + name + " not found: ")
def vnasserver_query(self, name): if (common.is_uri(name)): return name uris = self.list_vnasservers() for uri in uris: vnasserver = self.vnasserver_show(uri, False) if(vnasserver): if(vnasserver['name'] == name): return vnasserver['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "vnasserver " + name + ": not found")
def networksystem_query(self, name): ''' Returns the UID of the networksystem specified by the name ''' if (common.is_uri(name)): return name systems = self.networksystem_list() for system in systems: if (system['name'] == name): ret = self.networksystem_show(system['id']) if(ret): return system['id'] raise SOSError( SOSError.NOT_FOUND_ERR, "Networksystem " + name + " not found: ")
def varray_query(self, name): ''' Returns the UID of the varray specified by the name ''' if (common.is_uri(name)): return name uris = self.varray_list() for uri in uris: varray = self.varray_show(uri, False) if(varray): if(varray['name'] == name): return varray['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "varray " + name + ": not found")
def varray_query(self, name): ''' Returns the UID of the varray specified by the name ''' if (common.is_uri(name)): return name uris = self.varray_list() for uri in uris: varray = self.varray_show(uri, False) if (varray): if (varray['name'] == name): return varray['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "varray " + name + ": not found")
def atmos_subtenant_create( namespace, tenant, keypool, project, vpool, uid, secretkey): if(vpool): from virtualpool import VirtualPool obj = VirtualPool(self.__ipAddr, self.__port) vpool_uri = obj.vpool_query(vpool, 'object') vpool_uri = vpool_uri.strip() if ((project) and (not common.is_uri(project))): from project import Project obj = Project(self.__ipAddr, self.__port) projectlst = obj.project_list(tenant) project_uri = None for projiter in projectlst: if(projiter['name'] == projectname): project_uri = projiter['id'] if(not project_uri): raise SOSError(SOSError.NOT_FOUND_ERR, "Project " + project + ": not found") _headers = dict() _headers['x-emc-namespace'] = namespace _headers['x-emc-vpool'] = vpool_uri _headers['x-emc-project-id'] = project_uri (s, h) = common.service_json_request(self.__ipAddr, ATMOS_PORT, "PUT", self.URI_SWIFT_CONTAINER_INSTANCE. format(namespace, container), None, None, False, 'application/json', None, _headers) if(h['subtenantID']): return h['subtenantID']
def snapshot_query(self, name, project, tenant, snapshotname): ''' This function will take the snapshot name and consistency group name as input and get uri of the first occurance of snapshot. paramters: name : Name of consistency group. snapshot name: Name of the snapshot return return with uri of the given snapshot. ''' if (common.is_uri(name)): return name uris = self.snapshot_list(name, project, tenant) for ss in uris: if (ss['name'] == snapshotname): return ss['id'] raise SOSError(SOSError.SOS_FAILURE_ERR, "Snapshot " + snapshotname + ": not found")
def consistencygroup_query(self, name, project, tenant): ''' This function will take consistency group name/id and project name as input and returns consistency group id. parameters: name : Name/id of the consistency group. return return with id of the consistency group. ''' if (common.is_uri(name)): return name uris = self.list(project, tenant) for uri in uris: congroup = self.show(uri, project, tenant) if(congroup): if (congroup['name'] == name ): return congroup['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "Consistency Group " + name + ": not found")
def key_read(self, key, keypool, tenant, apitype, version, filepath , uid, secret, type ): ''' Makes a REST API call to retrieve details of a key based on its key, keypool, apitype,uid, secret ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) if(apitype == 's3'): return s3_key_read(namespace, keypool, key, version, filepath, uid, secret ) elif(apitype == 'swift'): return swift_object_read(namespace, keypool, key, version, filepath, uid, secret ) elif(apitype == 'atmos'): return atmos_object_read(namespace, keypool, key, version, filepath, uid, secret, type ) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified")
def query_by_name(self, name): if (common.is_uri(name)): return name try: providers = self.list_storageproviders() except: raise SOSError(SOSError.NOT_FOUND_ERR, "Storage provider with name: " + name + " not found") for provider in providers: storageprovider = self.show_by_uri( provider['id']) if(storageprovider is not None and storageprovider['name'] == name): return storageprovider['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "Storage provider with name: " + name + " not found")
def vcenter_query(self, name, tenantname): ''' Returns the UID of the vcenter specified by the name ''' if (common.is_uri(name)): return name from tenant import Tenant obj = Tenant(self.__ipAddr, self.__port) tenanturi = obj.tenant_query(tenantname) vcenters = self.vcenter_list(tenanturi) for vcenter in vcenters: if (vcenter['name'] == name): return vcenter['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "vcenter " + name + ": not found")
def swift_container_create(container, namespace, tenant, project, vpool, uid, secret): if ((project) and (not common.is_uri(project))): from project import Project obj = Project(self.__ipAddr, self.__port) projectlst = obj.project_list(tenant) project_uri = None for projiter in projectlst: if (projiter['name'] == projectname): project_uri = projiter['id'] if (not project_uri): raise SOSError(SOSError.NOT_FOUND_ERR, "Project " + project + ": not found") project_uri = project_uri.strip() _headers['x-emc-project-id'] = project_uri if (vpool): from virtualpool import VirtualPool obj = VirtualPool(self.__ipAddr, self.__port) vpool_uri = obj.vpool_query(vpool, 'object') vpool_uri = vpool_uri.strip() _headers['x-emc-vpool'] = vpool_uri token = self.swift_authenticate(uid, secret) _headers = dict() _headers[common.SWIFT_AUTH_TOKEN] = token (s, h) = common.service_json_request( self.__ipAddr, S3_PORT, "PUT", self.URI_SWIFT_CONTAINER_INSTANCE.format(namespace, container), None, None, False, 'application/json', None, _headers) o = common.json_decode(s)
def ps_query(self, name): ''' This function will take the Recovery Point name and type of Recovery point as input and get uri of the first occurance of given Recovery Point. paramters: name : Name of the Protection system. return return with uri of the given Protection system. ''' if (common.is_uri(name)): return name uris = self.ps_list_uris() for uri in uris: rp = common.show_by_href(self.__ipAddr, self.__port, uri) if(rp): if (rp['name'] == name): return rp['id'] raise SOSError(SOSError.SOS_FAILURE_ERR, "Recovery Point:" + name + ": not found")
def consistencygroup_query(self, name, project, tenant): ''' This function will take consistency group name/id and project name as input and returns consistency group id. parameters: name : Name/id of the consistency group. return return with id of the consistency group. ''' if (common.is_uri(name)): return name uris = self.list(project, tenant) for uri in uris: congroup = self.show(uri, project, tenant) if (congroup): if (congroup['name'] == name): return congroup['id'] raise SOSError(SOSError.NOT_FOUND_ERR, "Consistency Group " + name + ": not found")
def list_initiators(self, hostName): """ Lists all initiators Parameters hostName : The name of the host for which initiators to be returned """ if common.is_uri(hostName) == False: hostUri = self.query_by_name(hostName, None) else: hostUri = hostName (s, h) = common.service_json_request( self.__ipAddr, self.__port, "GET", Host.URI_HOST_LIST_INITIATORS.format(hostUri), None ) o = common.json_decode(s) if not o or "initiator" not in o: return [] return common.get_node_value(o, "initiator")
def list_ipinterfaces(self, hostName): """ Lists all IPInterfaces belonging to a given host Parameters hostName : The name of the host for which ipinterfaces to be returned """ if common.is_uri(hostName) == False: hostUri = self.query_by_name(hostName, None) else: hostUri = hostName (s, h) = common.service_json_request( self.__ipAddr, self.__port, "GET", Host.URI_HOST_LIST_IPINTERFACES.format(hostUri), None ) o = common.json_decode(s) if not o or "ip_interface" not in o: return [] return common.get_node_value(o, "ip_interface")
def ps_query(self, name): ''' This function will take the RecoverPoint name and type of RecoverPoint as input and get uri of the first occurance of given RecoverPoint. parameters: name : Name of the Protection system. return return with uri of the given Protection system. ''' if (common.is_uri(name)): return name uris = self.ps_list_uris() for uri in uris: rp = common.show_by_href(self.__ipAddr, self.__port, uri) if (rp): if (rp['name'] == name): return rp['id'] raise SOSError(SOSError.SOS_FAILURE_ERR, "RecoverPoint:" + name + ": not found")
def exportgroup_query(self, name, project, tenant): """ This function will take export group name/id and project name as input and returns export group id. parameters: name : Name/id of the export group. return return with id of the export group. """ if common.is_uri(name): return name uris = self.exportgroup_list(project, tenant) for uri in uris: exportgroup = self.exportgroup_show(uri, project, tenant) if exportgroup: if exportgroup["name"] == name: return exportgroup["id"] raise SOSError(SOSError.NOT_FOUND_ERR, "Export Group " + name + ": not found") """
def keypool_list(self, projectname, tenant, apitype, uid, secret): ''' Returns all the keypools in a vdc Parameters: keypool: label of the keypool project: project name tenant: tenant name apitype: api type(s3, swift or atmos) uid: user id secret: secret key Returns: JSON payload of keypool list ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) kplst = [] try: if ((not apitype) or (apitype == 's3')): kplst.append( s3_bucket_list(namespace, projectname, uid, secretkey)) if ((not apitype) or (apitype == 'swift')): kplst.append( swift_container_list(namespace, projectname, uid, secretkey)) if ((not apitype) or (apitype == 'atmos')): kplst.append( atmos_subtenant_list(namespace, projectname, uid, secretkey)) # need to convert to table format return kplst except SOSError as e: raise e
def key_read(self, key, keypool, tenant, apitype, version, filepath, uid, secret, type): ''' Makes a REST API call to retrieve details of a key based on its key, keypool, apitype,uid, secret ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) if (apitype == 's3'): return s3_key_read(namespace, keypool, key, version, filepath, uid, secret) elif (apitype == 'swift'): return swift_object_read(namespace, keypool, key, version, filepath, uid, secret) elif (apitype == 'atmos'): return atmos_object_read(namespace, keypool, key, version, filepath, uid, secret, type) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified")
def storagesystem_query(self, devicename, devicetype, serialnumber=None): ''' Returns the URI of the storage system given either name or serialnumber Parameters: devicename:name of the system devicetype: type of system serialnumber: serial number Returns: System URI ''' from storagesystem import StorageSystem if (not common.is_uri(devicename)): obj = StorageSystem(self.__ipAddr, self.__port) if (serialnumber and len(serialnumber) > 0): device_id = obj.query_by_serial_number_and_type( serialnumber, devicetype) else: device = obj.show(name=devicename, type=devicetype) device_id = device['id'] else: device_id = devicename return device_id
def atmos_subtenant_create(namespace, tenant, keypool, project, vpool, uid, secretkey): if (vpool): from virtualpool import VirtualPool obj = VirtualPool(self.__ipAddr, self.__port) vpool_uri = obj.vpool_query(vpool, 'object') vpool_uri = vpool_uri.strip() if ((project) and (not common.is_uri(project))): from project import Project obj = Project(self.__ipAddr, self.__port) projectlst = obj.project_list(tenant) project_uri = None for projiter in projectlst: if (projiter['name'] == projectname): project_uri = projiter['id'] if (not project_uri): raise SOSError(SOSError.NOT_FOUND_ERR, "Project " + project + ": not found") _headers = dict() _headers['x-emc-namespace'] = namespace _headers['x-emc-vpool'] = vpool_uri _headers['x-emc-project-id'] = project_uri (s, h) = common.service_json_request( self.__ipAddr, ATMOS_PORT, "PUT", self.URI_SWIFT_CONTAINER_INSTANCE.format(namespace, container), None, None, False, 'application/json', None, _headers) if (h['subtenantID']): return h['subtenantID']
def keypool_delete(self, keypool, tenant, uid, secret): '''Makes a REST API call to delete a keypool by its name, project and tenant ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) try: if (apitype == 's3'): return s3_bucket_delete(namespace, keypool, uid, secretkey) elif (apitype == 'swift'): return swift_container_delete(namespace, keypool, uid, secretkey) elif (apitype == 'atmos'): return atmos_subtenant_delete(namespace, keypool, uid, secretkey) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified") except SOSError as e: raise e
if snapshot_params is not None and (len(snapshot_params) >0): update_request['snapshot_params'] = snapshot_params try: body = json.dumps(update_request) (s, h) = common.service_json_request(self.__ipAddr, self.__port, 'PUT', FilePolicy.URI_FILE_POLICY_UPDATE.format(filepolicy['id']), body) if not s: return None o = common.json_decode(s) return o except SOSError, e: errorMessage = str(e) if common.is_uri(filepolicy['id']): errorMessage = str(e).replace(filepolicy['id'], label) common.format_err_msg_and_raise('update', 'filepolicy', errorMessage, e.err_code) def filepolicy_assign( self, name, assign_to_vpools, project_assign_vpool, assign_to_projects, source_varray, target_varrays, ): filepolicy = self.filepolicy_query(name)
def storagepool_create(self, storagename, poolname, protocol, maxSnapshots, consistency, freeCapacity, totalCapacity, extensions, deviceType): ''' Creates a storagepool with specified parameters Parameters: storagename: name of the storage system poolname: name of the storage pool protocol: protocols supported by pool nativeId: native ID fothe pool extentions: extended parameters and attributes resiliency: resiliency assosiated with pool performance: performance associated with the storage pool efficiency: efficiency associated with the storage pool allocation allocation of storage pool maxSnapshots: maxSnapshots permitted on the storage pool consistency: consistency details of storage pool resiliencymap: resiliencymap of the storage pool freeCapacity: freeCapacity of the storage pool totalCapacity: totalCapacity of the storage pool returns: JSON payload of the created storagepool ''' uri = None sstype = None if (not common.is_uri(storagename)): from storagesystem import StorageSystem obj = StorageSystem(self.__ipAddr, self.__port) device = obj.show(name=storagename, type=deviceType) uri = device['id'] sstype = deviceType checklist = [] for iter in extensions: (key, value) = iter.split('=', 1) checklist.append(key) if ((sstype == 'vnxblock') or (sstype == 'vnxfile') or (sstype == 'vmax')): if ('NativeId' not in checklist) or ('PoolType' not in checklist): raise SOSError( SOSError.CMD_LINE_ERR, "error: For device type " + sstype + " -nativeid, -pooltype are required") if (sstype == 'vnxfile'): if ('Name' not in checklist): raise SOSError( SOSError.CMD_LINE_ERR, "error: For device type " + sstype + " -controllerpoolname is required") '''check for storage pool with existing name''' storage_pool_exists = True try: self.storagepool_show(poolname, storagename, None, deviceType) except SOSError as e: if (e.err_code == SOSError.NOT_FOUND_ERR): storage_pool_exists = False else: raise e if (storage_pool_exists): raise SOSError( SOSError.ENTRY_ALREADY_EXISTS_ERR, "Storage pool with name: " + poolname + " already exists") parms = dict() if (poolname): parms['name'] = poolname if (protocol): parms['protocols'] = protocol if (maxSnapshots): parms['max_snapshots'] = maxSnapshots if (consistency): parms['multi_volume_consistency'] = consistency if (extensions): parms['controller_params'] = self.__encode_map(extensions) if (freeCapacity): parms['free_capacity'] = freeCapacity if (totalCapacity): parms['total_capacity'] = totalCapacity body = None if (parms): body = json.dumps(parms) (s, h) = common.service_json_request( self.__ipAddr, self.__port, "POST", StoragePool.URI_STORAGEPOOLS.format(uri), body) o = common.json_decode(s) return o