def keypool_create(self, keypool, projectname, tenant, vpoolname, apitype, uid, secret): ''' creates a keypool parameters: keypool: label of the keypool project: project name tenant: tenant name vpool: vpool name apitype: api type(s3, swift or atmos) uid: user id secret: secret key Returns: JSON payload response ''' errorcontext = 0 if ((projectname) and (not common.is_uri(projectname))): from project import Project obj = Project(self.__ipAddr, self.__port) projectlst = obj.project_list(tenant) project_uri = None for projiter in projectlst: if (projiter['name'] == projectname): project_uri = projiter['id'] if (not project_uri): raise SOSError(SOSError.VALUE_ERR, "Porject " + projectname + ": not found") if ((vpoolname) and (not common.is_uri(vpoolname))): from virtualpool import VirtualPool obj = VirtualPool(self.__ipAddr, self.__port) vpool = obj.vpool_show(vpoolname, 'object') vpool_uri = vpool['id'] if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) try: if (apitype == 's3'): return s3_bucket_create(namespace, keypool, project_uri, vpool_uri, uid, secretkey) elif (apitype == 'swift'): return swift_container_create(namespace, keypool, project_uri, vpool_uri, uid, secretkey) elif (apitype == 'atmos'): return atmos_subtenant_create(namespace, tenant, keypool, project_uri, vpool_uri, uid, secretkey) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified") except SOSError as e: raise e
def key_list_versions(self, keypool, key, tenant, apitype, uid, secret): ''' Returns versions of the key` Parameters: key: label of the key keypool: label of the keypool project: project name tenant: tenant name apitype: api to be used uid: user id secret: secret Returns: JSON payload of key list ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) if (apitype == 's3'): s3_key_list_versions(namespace, keypool, key, uid, secret) elif ((apitype == 'swift') or (apitype == 'atmos')): raise SOSError( SOSError.NOT_FOUND_ERR, "Versioning not available with API type " + apitype) else: raise SOSError(SOSError.VALUE_ERR, "Wroing API type " + apitype + " specified")
def monitor_get_events(args): obj = Monitor(args.ip, args.port) try: if (int(args.year) <= 1900): print("error: year=" + args.year + " is before 1900, it require year >= 1900") return time_frame = common.get_formatted_time_string(args.year, args.month, args.day, args.hour, args.minute) res = obj.get_events(args.format, time_frame) if (args.format == "json"): return common.format_json_object(res) return res except ValueError as e: raise SOSError(SOSError.CMD_LINE_ERR, "error: " + str(e)) except SOSError as e: if (e.err_code == SOSError.SOS_FAILURE_ERR): raise SOSError(SOSError.SOS_FAILURE_ERR, "Unable to get requested usage events") else: raise e
def initiator_update(args): if (args.newprotocol is None and args.newinitiatorwwn is None and args.newinitiatorportwwn is None): raise SOSError( SOSError.CMD_LINE_ERR, sys.argv[0] + " " + sys.argv[1] + " " + sys.argv[2] + ": error:" + "At least one of the arguments :" "-newprotocol -newinitiatorwwn -newinitiatorportwwn" " should be provided to update the Host") if (args.newprotocol == "iSCSI" and args.newinitiatorwwn): raise SOSError( SOSError.CMD_LINE_ERR, sys.argv[0] + " " + sys.argv[1] + " " + sys.argv[2] + ": error: -newinititorwwn " + "is not required for iSCSI type initiator") initiatorObj = HostInitiator(args.ip, args.port) try: initiatorUri = initiatorObj.query_by_portwwn(args.initiatorportwwn, args.hostlabel, args.tenant) initiatorObj.update(initiatorUri, args.newprotocol, args.newinitiatorwwn, args.newinitiatorportwwn, args.newinitname) except SOSError as e: common.format_err_msg_and_raise("update", "initiator", e.err_text, e.err_code)
def storagepool_update(args): obj = StoragePool(args.ip, args.port) try: if (args.maxpoolutilization): if ((int(args.maxpoolutilization) > 100) or (int(args.maxpoolutilization) < 0)): raise SOSError( SOSError.CMD_LINE_ERR, "Please ensure max pool utilization is >=0 and <=100") if (args.maxthinpoolsubscription): if (0 > int(args.maxthinpoolsubscription)): raise SOSError( SOSError.CMD_LINE_ERR, "Please ensure max thin pool subscription is >=0") if (args.maxresources): if (0 > int(args.maxresources)): raise SOSError(SOSError.CMD_LINE_ERR, "Please ensure max resources is >=0") res = obj.storagepool_update(args.storagesystem, args.serialnumber, args.type, args.name, args.nhadd, args.nhrem, args.volumetype, args.maxresources, args.maxpoolutilization, args.maxthinpoolsubscription) except SOSError as e: if (e.err_code == SOSError.NOT_FOUND_ERR): raise SOSError(SOSError.NOT_FOUND_ERR, "Storagepool update failed: " + e.err_text) else: raise e
def storageport_update(args): # get uri of a storage device by name obj = Storageport(args.ip, args.port) try: # validate input if (args.network is None and args.varray_add is None and args.varray_remove is None and args.port_network_id is None): raise SOSError( SOSError.CMD_LINE_ERR, sys.argv[0] + " " + sys.argv[1] + " " + sys.argv[2] + ": error:" + "At least one of the arguments : -network " "-varray_add -varray_remove -port_network_id" " should be provided to update the storageport") #For port_network_id update, port name is mandatory as multiple ports # can not share the same port_network_id ( WWPN ) if ((args.port_network_id is not None) and (args.portname is None)): raise SOSError( SOSError.CMD_LINE_ERR, sys.argv[0] + " " + sys.argv[1] + " " + sys.argv[2] + ": error:" + "To update port network id, port name should " "be provided.") obj.command_validation(args.type, args.transporttype, args.port_network_id) obj.storageport_update(args.serialnumber, args.storagesystem, args.type, args.transporttype, args.network, args.varray_add, args.varray_remove, args.portname, args.group, args.port_network_id) except SOSError as e: common.format_err_msg_and_raise("update", "storageport", e.err_text, e.err_code)
def bucket_acl(args): obj = Bucket(args.ip, args.port) try: if(not args.tenant): args.tenant = "" if(not args.user and not args.permissions): raise SOSError(SOSError.CMD_LINE_ERR, "Anonymous user should be provided to add/update/delete acl rule") if(args.user and args.group): raise SOSError(SOSError.CMD_LINE_ERR, "User and Group cannot be specified together") res = obj.put_acl(args.tenant, args.project, args.name, args.operation, args.permissions, args.domain, args.user, args.group, args.customgroup) except SOSError as e: common.format_err_msg_and_raise("acl", "name", e.err_text, e.err_code)
def vdc_update(self, name, label, endpoint, key, certificatefile, privatekeyfile, description): uri = self.vdc_query(name) parms = {} if (name): parms["name"] = label if (endpoint): parms["api_endpoint"] = endpoint if (key): parms["key"] = key if (certificatefile is not None or privatekeyfile is not None): key_and_certificate = dict() if (certificatefile): try: certificatefs = open(certificatefile, 'r').read() key_and_certificate['certificate_chain'] = certificatefs except IOError as e: raise SOSError(e.errno, e.strerror) if (privatekeyfile): try: privatekey = open(privatekeyfile, 'r').read() key_and_certificate['private_key'] = privatekey except IOError as e: raise SOSError(e.errno, e.strerror) parms['key_and_certificate'] = key_and_certificate if (description): parms['description'] = description body = json.dumps(parms) (s, h) = common.service_json_request( self.__ipAddr, self.__port, "PUT", VirtualDatacenter.URI_VDC_GET.format(uri), body) o = common.json_decode(s) return o
def cluster_update(args): obj = Cluster(args.ip, args.port) try: if (args.label is None and args.tenant is None and args.datacenter is None and args.vcenter is None and args.autoexportsenabled is None): raise SOSError( SOSError.CMD_LINE_ERR, sys.argv[0] + " " + sys.argv[1] + " " + sys.argv[2] + ": error:" + "At least one of the" " arguments :-tenant -label -vcenter -datacenter" " -autoExportsEnabled " " should be provided to update the cluster") if (args.datacenter or args.vcenter): if (args.datacenter is None or args.vcenter is None): raise SOSError( SOSError.CMD_LINE_ERR, sys.argv[0] + " " + sys.argv[1] + " " + sys.argv[2] + ": error:" + "For a vcenter associated cluster, both " + "vcenter and datacenter needs to be specified") obj.cluster_update(args.name, args.tenant, args.datacenter, args.vcenter, args.label, args.autoexportsenabled) except SOSError as e: common.format_err_msg_and_raise("update", "cluster", e.err_text, e.err_code)
def storageport_update(self, serialNumber, storagedeviceName, storagedeviceType, transportType, tzone, varraysToAdd, varraysToRemove, portname, groupname, port_nw_id): # process tzuri = Network(self.__ipAddr, self.__port).query_by_name(tzone) ssuri = self.storagesystem_query(storagedeviceName, serialNumber, storagedeviceType) if (ssuri is not None): porturis = self.storageport_list_uri(ssuri) is_found = False if (portname is not None): for porturi in porturis: sport = self.storageport_show_id(ssuri, porturi['id']) if (sport['transport_type'] == transportType and portname == sport['port_name']): self.storageport_update_uri(sport['id'], tzuri, varraysToAdd, varraysToRemove, port_nw_id) is_found = True break # if port name is not found storage system, then raise not # found exception if (is_found is False): raise SOSError(SOSError.NOT_FOUND_ERR, "port name : %s is not found" % (portname)) elif (groupname is not None): for porturi in porturis: sport = self.storageport_show_id(ssuri, porturi['id']) if (sport['transport_type'] == transportType and groupname == sport['port_group']): self.storageport_update_uri(sport['id'], tzuri, varraysToAdd, varraysToRemove, None) is_found = True # if group name is not found storage system, then raise not # found exception if (is_found is False): raise SOSError(SOSError.NOT_FOUND_ERR, "port group: %s is not found" % (groupname)) elif (portname is None and groupname is None): for porturi in porturis: sport = self.storageport_show_id(ssuri, porturi['id']) if (sport['transport_type'] == transportType): self.storageport_update_uri(sport['id'], tzuri, varraysToAdd, varraysToRemove, None) None
def consistencygroup_update(args): if not args.sync and args.synctimeout != 0: raise SOSError(SOSError.CMD_LINE_ERR, "error: Cannot use synctimeout without Sync ") try: obj = ConsistencyGroup(args.ip, args.port) res = obj.update(args.name, args.project, args.tenant, args.add_volumes, args.remove_volumes, args.sync, args.synctimeout) except SOSError as e: raise SOSError( SOSError.SOS_FAILURE_ERR, "Consistency Group " + args.name + ": Update failed:\n" + e.err_text)
def command_validation(self, devicetype, tzonetype, port_name): if (devicetype == 'vnxfile' or devicetype == 'isilon'): if (tzonetype != 'IP'): raise SOSError( SOSError.CMD_LINE_ERR, devicetype + " transport type should be of IP type") elif (devicetype == 'vnxblock' or devicetype == 'vmax' or (devicetype == 'openstack' and (port_name is not None))): if (tzonetype == 'Ethernet'): raise SOSError( SOSError.CMD_LINE_ERR, devicetype + " transport type should be of FC or IP type") return
def truststore_update_certificates(self, certstoadd=None, certstoremove=None): ''' creates a truststore parameters: certstoadd : certificates to add certstoremove : certificates to remove Returns: JSON payload response ''' requestParams = dict() addcert = None remcert = None if(certstoadd): try: f1 = open(certstoadd, 'r') addcert = f1.read() except IOError as e: raise SOSError(e.errno, e.strerror) if(certstoremove): try: f2 = open(certstoremove, 'r') remcert = f2.read() except IOError as e: raise SOSError(e.errno, e.strerror) addlist = [] remlist = [] if(addcert): addlist.append(addcert) if(remcert): remlist.append(remcert) requestParams = { 'add': addlist, 'remove': remlist } body = json.dumps(requestParams) (s, h) = common.service_json_request( self.__ipAddr, self.__port, "PUT", TrustStore.URI_TRUSTSTORE, body) o = common.json_decode(s) return o
def vcenterdatacenter_update(self, label, vcenter, tenantname, newtenantname): ''' updates a vcenterdatacenter parameters: label: label of the vcenterdatacenter Returns: JSON payload response ''' try: check = self.vcenterdatacenter_show(label, vcenter, tenantname) if check: raise SOSError(SOSError.ENTRY_ALREADY_EXISTS_ERR, "vcenterdatacenter " + label + ": found") except SOSError as e: if e.err_code == SOSError.ENTRY_ALREADY_EXISTS_ERR: uri = self.vcenterdatacenter_query( label, vcenter, VcenterDatacenter.DATACENTERS_FROM_ALL_TENANTS) params = dict() params['name'] = label if newtenantname is not None and newtenantname != 'null': from tenant import Tenant obj = Tenant(self.__ipAddr, self.__port) params['tenant'] = obj.tenant_query(newtenantname) elif newtenantname is not None: params['tenant'] = newtenantname body = json.dumps(params) (s, h) = common.service_json_request( self.__ipAddr, self.__port, "PUT", VcenterDatacenter.URI_DATACENTER.format(uri), body) o = common.json_decode(s) return o else: raise e if not check: raise SOSError( SOSError.NOT_FOUND_ERR, "vcenterdatacenter with name " + label + " dost not exist")
def create_volume_from_snapshot(self, snapshot, volume): """Creates volume from given snapshot ( snapshot clone to volume ).""" self.authenticate_user() src_snapshot_name = snapshot['name'] src_vol_ref = self.volume_api.get(context.get_admin_context(), snapshot['volume_id']) src_vol_name = self._get_volume_name(src_vol_ref) new_volume_name = self._get_volume_name(volume) number_of_volumes = 1 from common import SOSError try: self.volume_obj.clone(self.configuration.vipr_tenant + "/" + self.configuration.vipr_project, new_volume_name, number_of_volumes, src_vol_name, src_snapshot_name, sync=True) except SOSError as e: if (e.err_code == SOSError.SOS_FAILURE_ERR): raise SOSError( SOSError.SOS_FAILURE_ERR, "Snapshot " + src_snapshot_name + ": clone failed\n" + e.err_text) else: raise e
def varray_create(self, label, autosanzoning, devicereg, protection): ''' creates a varray parameters: label: label of the varray Returns: JSON payload response ''' try: check = self.varray_show(label) except SOSError as e: if (e.err_code == SOSError.NOT_FOUND_ERR): params = dict() params['name'] = label if (autosanzoning): params['auto_san_zoning'] = autosanzoning if (devicereg): params['device_registered'] = devicereg if (protection): params['protection_type'] = protection body = json.dumps(params) (s, h) = common.service_json_request( self.__ipAddr, self.__port, "POST", VirtualArray.URI_VIRTUALARRAY, body) o = common.json_decode(s) return o else: raise e if (check): raise SOSError(SOSError.ENTRY_ALREADY_EXISTS_ERR, "varray with name " + label + " already exists")
def project_query(self, name): ''' Retrieves UUID of project based on its name Parameters: name: name of project Returns: UUID of project Throws: SOSError - when project name is not found ''' if (common.is_uri(name)): return name (tenant_name, project_name) = common.get_parent_child_from_xpath(name) from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) try: tenant_uri = tenant_obj.tenant_query(tenant_name) projects = self.project_list(tenant_uri) if(projects and len(projects) > 0): for project in projects: if (project): project_detail = self.project_show_by_uri( project['id']) if(project_detail and project_detail['name'] == project_name): return project_detail['id'] raise SOSError(SOSError.NOT_FOUND_ERR, 'Project: ' + project_name + ' not found') except SOSError as e: raise e
def storageport_register(self, serialNumber, storagedeviceName, storagedeviceType, transportType, portname): # process ssuri = self.storagesystem_query(storagedeviceName, serialNumber, storagedeviceType) if (ssuri is not None): porturis = self.storageport_list_uri(ssuri) if (portname is not None): for porturi in porturis: sport = self.storageport_show_id(ssuri, porturi['id']) if (sport['port_name'] == portname and sport['transport_type'] == transportType): return (self.storageport_register_uri( ssuri, porturi['id'])) raise SOSError(SOSError.NOT_FOUND_ERR, "Storage port : " + portname + " is not found") else: for porturi in porturis: sport = self.storageport_show_id(ssuri, porturi['id']) # check if unregister, then only register. if (sport['transport_type'] == transportType and sport['registration_status'] == 'UNREGISTERED'): self.storageport_register_uri(ssuri, porturi['id']) return None
def ps_list(args): obj = ProtectionSystem(args.ip, args.port) try: output = [] uris = obj.ps_list() if (len(uris) > 0): for item in obj.ps_list_by_hrefs(uris): output.append(item) if (args.verbose == True): return common.format_json_object(output) if (len(output) > 0): if (args.long == True): from common import TableGenerator TableGenerator(output, [ 'name', 'system_type', 'ip_address', 'port_number', 'installation_id', 'job_discovery_status' ]).printTable() else: from common import TableGenerator TableGenerator( output, ['name', 'system_type', 'ip_address', 'port_number' ]).printTable() except SOSError as e: if (e.err_code == SOSError.SOS_FAILURE_ERR): raise SOSError(SOSError.SOS_FAILURE_ERR, "Protection system list failed\n" + e.err_text) else: common.format_err_msg_and_raise("list", "protectionsystem", e.err_text, e.err_code)
def block_until_complete(self, resuri, task_id, synctimeout): if synctimeout: t = Timer(synctimeout, self.timeout_handler) else: t = Timer(self.timeout, self.timeout_handler) t.start() while (True): #out = self.show_by_uri(id) out = self.quotadirectory_show_task_opid(task_id) if (out): if (out["state"] == "ready"): # cancel the timer and return t.cancel() break # if the status of the task is 'error' then cancel the timer # and raise exception if (out["state"] == "error"): # cancel the timer t.cancel() error_message = "Please see logs for more details" if ("service_error" in out and "details" in out["service_error"]): error_message = out["service_error"]["details"] raise SOSError( SOSError.VALUE_ERR, "Task: " + task_id + " is failed with error: " + error_message) if (self.isTimeout): print "Operation timed out" self.isTimeout = False break return
def consistencygroup_list(args): obj = ConsistencyGroup(args.ip, args.port) try: uris = obj.list(args.project, args.tenant) output = [] rawoutput = [] for uri in uris: cg = obj.show(uri, args.project, args.tenant) if (cg): rawoutput.append(cg) from volume import Volume from storagesystem import StorageSystem cg["system_consistency_groups"] = " " if ("volumes" in cg): volumeuris = common.get_node_value(cg, "volumes") volobj = Volume(args.ip, args.port) volumenames = [] for volume in volumeuris: vol = volobj.show_by_uri(volume['id']) if (vol): volumenames.append(vol['name']) cg['volumes'] = volumenames volumenames = [] output.append(cg) if (not args.verbose): if (len(output)): TableGenerator(output, ['name', 'volumes']).printTable() else: if (len(rawoutput)): return common.format_json_object(rawoutput) except SOSError as e: raise SOSError(SOSError.SOS_FAILURE_ERR, "Consistency Group List failed:\n" + e.err_text)
def key_write(self, key, filepath, keypool, tenant, apitype, uid, secret, type): ''' write a key parameters: key: key keypool: keypool tenant: tenant name value: value apitype: api to be used uid: user id secret: secret Returns: JSON payload response ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) if (apitype == 's3'): s3_key_write(namespace, keypool, key, filepath, uid, secret) elif (apitype == 'swift'): swift_object_write(namespace, keypool, key, filepath, uid, secret) elif (apitype == 'atmos'): atmos_object_write(namespace, keypool, key, filepath, uid, secret, type) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified")
def list_tasks(self, host_name, initiatorportwwn, task_id=None, tenant=None): uri = self.query_by_portwwn(initiatorportwwn, host_name, tenant) hostinitiator = self.show_by_uri(uri) if (hostinitiator['initiator_port'] == initiatorportwwn): if (not task_id): return common.get_tasks_by_resourceuri("initiator", uri, self.__ipAddr, self.__port) else: res = common.get_task_by_resourceuri_and_taskId( "initiator", uri, task_id, self.__ipAddr, self.__port) if (res): return res raise SOSError( SOSError.NOT_FOUND_ERR, "Initiator with Initiatorportwwn : " + initiatorportwwn + " not found")
def storagepool_query(self, qualifiedname, storagesystem, serialnumber, devicetype): try: if (storagesystem): device_id = self.storagesystem_query(storagesystem, devicetype, None) else: device_id = self.storagesystem_query(None, devicetype, serialnumber) # storagesystem_query(self, devicename, devicetype, # serialnumber=None): except SOSError as e: raise e storagepool_ids = self.storagepool_list_by_uri(device_id) for uri in storagepool_ids: storpool = self.storagepool_show_by_uri(device_id, uri) if (storpool): try: if (storpool['pool_name'] == qualifiedname): return (device_id, storpool['id']) except KeyError as e: continue raise SOSError(SOSError.NOT_FOUND_ERR, "Storagepool of name " + qualifiedname + " not found")
def create_volume(self, vol): self.authenticate_user() name = self._get_volume_name(vol) size = int(vol['size']) * 1073741824 from common import SOSError vpool = self._get_vpool(vol) self.vpool = vpool['ViPR:VPOOL'] try: res = self.volume_obj.create( self.configuration.vipr_tenant + "/" + self.configuration.vipr_project, name, size, self.configuration.vipr_varray, self.vpool, protocol=None, # no longer specified in volume # creation sync=True, number_of_volumes=1, thin_provisioned=None, consistencygroup=None) except SOSError as e: if (e.err_code == SOSError.SOS_FAILURE_ERR): raise SOSError( SOSError.SOS_FAILURE_ERR, "Volume " + name + ": Tag failed\n" + e.err_text) else: raise e
def terminate_connection(self, volume, protocol, initiatorNodes, initiatorPorts, hostname): from common import SOSError try: self.authenticate_user() volumename = self._get_volume_name(volume) tenantproject = self.configuration.vipr_tenant + \ '/' + self.configuration.vipr_project voldetails = self.volume_obj.show(tenantproject + '/' + volumename) volid = voldetails['id'] # find the exportgroups exports = self.volume_obj.get_exports_by_uri(volid) exportgroups = set() itls = exports['itl'] for itl in itls: itl_port = itl['initiator']['port'] if (itl_port in initiatorPorts): exportgroups.add(itl['export']['id']) for exportgroup in exportgroups: res = self.exportgroup_obj.exportgroup_remove_volumes_by_uri( exportgroup, volid, True, None, None, None, None) else: LOG.info("No export group found for the host: " + hostname + "; this is considered already detached.") return itls except SOSError as e: raise SOSError( SOSError.SOS_FAILURE_ERR, "Detaching volume " + volumename + " from host " + hostname + " failed: " + e.err_text)
def create_snapshot(self, snapshot): self.authenticate_user() from common import SOSError try: snapshotname = snapshot['name'] vol = snapshot['volume'] volumename = self._get_volume_name(vol) projectname = self.configuration.vipr_project tenantname = self.configuration.vipr_tenant storageresType = 'block' storageresTypename = 'volumes' resourceUri = self.snapshot_obj.storageResource_query( storageresType, fileshareName=None, volumeName=volumename, cgName=None, project=projectname, tenant=tenantname) inactive = False rptype = None sync = True self.snapshot_obj.snapshot_create(storageresType, storageresTypename, resourceUri, snapshotname, inactive, rptype, sync) return except SOSError as e: if (e.err_code == SOSError.SOS_FAILURE_ERR): raise SOSError( SOSError.SOS_FAILURE_ERR, "Snapshot: " + snapshotname + ", Create Failed\n" + e.err_text) else: raise e
def keypool_update(self, keypool, tenant, versioning, apitype, uid, secret): ''' update keypool versioning parameters: keypool: label of the keypool project: project name tenant: tenant name uid: user id secret: secret key Returns: JSON payload response ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) try: if (apitype == 's3'): return s3_bucket_update(namespace, keypool, versioning, uid, secretkey) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified") except SOSError as e: raise e
def create(self, name, nwtype, varrays=None, endpoints=None): ''' Makes REST API call to create network Parameters: name: name of network type: type of transport protocol. FC, IP or Ethernet varrays : List of varrays to be associated endpoints : List of endpoints to be added to network Returns: Created task details in JSON response payload ''' networkId = self.query_by_name(name) if (networkId): raise SOSError(SOSError.ENTRY_ALREADY_EXISTS_ERR, "Network with name " + name + " already exists") request = dict() request['name'] = name request['transport_type'] = nwtype if (varrays): request['varrays'] = self.getVarrayList(varrays) if (endpoints): request['endpoints'] = self.getEndPointList(endpoints) body = json.dumps(request) (s, h) = common.service_json_request(self.__ipAddr, self.__port, "POST", Network.URI_NETWORKS, body) o = common.json_decode(s) return o
def key_delete(self, key, keypool, tenant, apitype, version, uid, secret): ''' Makes a REST API call to delete a key by its name Parameters: key: label of the key keypool: label of the keypool project: project name tenant: tenant name apitype: api to be used uid: user id secret: secret Returns: JSON payload of key list ''' if (not common.is_uri(tenant)): from tenant import Tenant tenant_obj = Tenant(self.__ipAddr, self.__port) namespace = tenant_obj.namespace_get(tenant) if (apitype == 's3'): s3_key_delete(namespace, keypool, key, version, uid, secret) elif (apitype == 'swift'): swift_object_delete(namespace, keypool, key, version, uid, secret) elif (apitype == 'atmos'): atmos_object_delete(namespace, keypool, key, uid, secret) else: raise SOSError(SOSError.VALUE_ERR, "Wrong API type " + apitype + " specified")