def create(self, version, node_id=None, node_type=None, name=None): """ Adds a node with a given node_id to the model :param version: Version of the client making the request :type version: int :param node_id: ID of the ALBA node to create :type node_id: str :param node_type: Type of the ALBA node to create :type node_type: str :param name: Name of the node (optional) :type name: str :return: Celery async task result :rtype: CeleryTask """ if version >= 9: if name is not None and not re.match(Toolbox.regex_preset, name): raise HttpNotAcceptableException( error='invalid_data', error_description= 'Invalid name specified. Minimum 3, maximum 20 alpha-numeric characters, dashes and underscores' ) if node_id is None and node_type != AlbaNode.NODE_TYPES.GENERIC: raise HttpNotAcceptableException( error='invalid_data', error_description= 'Field node_id is mandatory for node_type != GENERIC') return AlbaNodeController.register.delay(node_id, node_type, name)
def _discover_nodes(cls, ip=None, node_id=None): # type: (Optional[str], Optional[str]) -> Dict[str, AlbaNode] """ :param ip: IP of ALBA node to retrieve :type ip: str :param node_id: ID of the ALBA node :type node_id: str :return: Dict with guid of the node mapped to the node itself :rtype: Dict[str, AlbaNode] """ nodes = {} if ip is not None: # List the requested node node = AlbaNodeController.model_volatile_node( node_id, AlbaNode.NODE_TYPES.ASD, ip) data = node.client.get_metadata() if data['_success'] is False and data[ '_error'] == 'Invalid credentials': raise HttpNotAcceptableException( error='invalid_data', error_description='Invalid credentials') if data['node_id'] != node_id: raise HttpNotAcceptableException( error='invalid_data', error_description='Unexpected node identifier. {0} vs {1}'. format(data['node_id'], node_id)) nodes[node.guid] = node else: nodes.update(AlbaNodeController.discover_nodes()) return nodes
def expand_nsm_clusters(self, albabackend, version, cluster_names=None, amount=0): """ Internally managed NSM Arakoon clusters: Deploy and claim additional NSM Arakoon clusters Externally managed NSM Arakoon clusters: Claim additional NSM Arakoon clusters (Cluster names to claim can be passed in using the 'cluster_names' keyword) :param albabackend: ALBA Backend to expand the amount of NSM Arakoon clusters :type albabackend: ovs.dal.hybrids.albabackend.AlbaBackend :param version: Version requested by the client :type version: int :param cluster_names: Names of the cluster to claim (Only applicable for externally managed NSM Arakoon clusters) :type cluster_names: list :param amount: Amount of additional NSM clusters to deploy :type amount: int :return: Asynchronous result of a CeleryTask :rtype: celery.result.AsyncResult """ if cluster_names is None: cluster_names = [] if version >= 10 and amount > 0: raise HttpNotAcceptableException( error='invalid_data', error_description= "Parameter 'amount' has been deprecated since API version 10") if not isinstance(cluster_names, list): raise HttpNotAcceptableException( error='invalid_data', error_description= "Cluster names passed should be of type 'list'") return AlbaArakoonController.nsm_checkup.delay( alba_backend_guid=albabackend.guid, external_nsm_cluster_names=cluster_names)
def reset_osd(self, albanode, osd_id, safety): """ Removes and re-add an OSD :param albanode: ALBA node to remove a disk from :type albanode: ovs.dal.hybrids.albanode.AlbaNode :param osd_id: OSD ID to reset :type osd_id: str :param safety: Safety to maintain :type safety: dict :return: Celery async task result :rtype: CeleryTask """ if safety is None: raise HttpNotAcceptableException( error='invalid_data', error_description='Safety must be passed') if albanode.alba_node_cluster is not None: # The current node is treated as the 'active' side return AlbaNodeClusterController.reset_osd.delay( node_cluster_guid=albanode.alba_node_cluster.guid, node_guid=albanode.guid, osd_id=osd_id, safety=safety) return AlbaNodeController.reset_osd.delay(albanode.guid, osd_id, safety)
def create(self, request, role_guids=None): """ Creates a Client :param request: Raw request :type request: Request :param role_guids: The GUIDs of the roles where the client should get access to :type role_guids: str """ if 'role_guids' in request.DATA: del request.DATA['role_guids'] serializer = FullSerializer(Client, instance=Client(), data=request.DATA) client = serializer.deserialize() if client.user is not None: if client.user_guid == request.client.user_guid or ApiToolbox.is_client_in_roles(request.client, ['manage']): client.grant_type = 'CLIENT_CREDENTIALS' client.client_secret = OAuth2Toolbox.create_hash(64) client.save() if not role_guids: roles = [junction.role for junction in client.user.group.roles] else: possible_role_guids = [junction.role_guid for junction in client.user.group.roles] roles = [Role(guid) for guid in role_guids if guid in possible_role_guids] for role in roles: roleclient = RoleClient() roleclient.client = client roleclient.role = role roleclient.save() return client raise HttpNotAcceptableException(error_description='A client must have a user', error='invalid_data')
def shrink_vpool(self, vpool, storagerouter_guid): """ Remove the storagedriver linking the specified vPool and storagerouter_guid :param vpool: vPool to shrink (or delete if its the last storagerouter linked to it) :type vpool: VPool :param storagerouter_guid: Guid of the Storage Router :type storagerouter_guid: str """ if len(vpool.vdisks) > 0: # Check to prevent obsolete testing backend_info = vpool.metadata['backend']['backend_info'] preset_name = backend_info['preset'] # Check if the policy is satisfiable before shrinking - Doing it here so the issue is transparent in the GUI alba_backend_guid = backend_info['alba_backend_guid'] api_url = 'alba/backends/{0}'.format(alba_backend_guid) connection_info = backend_info['connection_info'] ovs_client = OVSClient.get_instance(connection_info=connection_info, cache_store=VolatileFactory.get_client()) _presets = ovs_client.get(api_url, params={'contents': 'presets'})['presets'] try: _preset = filter(lambda p: p['name'] == preset_name, _presets)[0] if _preset['is_available'] is False: raise RuntimeError('Policy is currently not satisfied: cannot shrink vPool {0} according to preset {1}'.format(vpool.name, preset_name)) except IndexError: pass # Raise if not satisfied sr = StorageRouter(storagerouter_guid) intersection = set(vpool.storagedrivers_guids).intersection(set(sr.storagedrivers_guids)) if not intersection: raise HttpNotAcceptableException(error='impossible_request', error_description='Storage Router {0} is not a member of vPool {1}'.format(sr.name, vpool.name)) return VPoolController.shrink_vpool.delay(VPoolController, list(intersection)[0])
def devicename_exists(self, vpool, name=None, names=None): """ Checks whether a given name can be created on the vpool :param vpool: vPool object :type vpool: VPool :param name: Candidate name :type name: str :param names: Candidate names :type names: list :return: Whether the devicename exists :rtype: bool """ error_message = None if not (name is None) ^ (names is None): error_message = 'Either the name (string) or the names (list of strings) parameter must be passed' if name is not None and not isinstance(name, basestring): error_message = 'The name parameter must be a string' if names is not None and not isinstance(names, list): error_message = 'The names parameter must be a list of strings' if error_message is not None: raise HttpNotAcceptableException(error='impossible_request', error_description=error_message) if name is not None: devicename = VDiskController.clean_devicename(name) return VDiskList.get_by_devicename_and_vpool(devicename, vpool) is not None for name in names: devicename = VDiskController.clean_devicename(name) if VDiskList.get_by_devicename_and_vpool(devicename, vpool) is not None: return True return False
def create(self, request): """ Creates a User :param request: The raw request :type request: Request """ serializer = FullSerializer(User, instance=User(), data=request.DATA, allow_passwords=True) user = serializer.deserialize() if UserList.get_user_by_username(user.username) is not None: raise HttpNotAcceptableException(error='duplicate', error_description='User with this username already exists') user.save() pw_client = Client() pw_client.ovs_type = 'INTERNAL' pw_client.grant_type = 'PASSWORD' pw_client.user = user pw_client.save() cc_client = Client() cc_client.ovs_type = 'INTERNAL' cc_client.grant_type = 'CLIENT_CREDENTIALS' cc_client.client_secret = ''.join(random.choice(string.ascii_letters + string.digits + '|_=+*#@!/-[]{}<>.?,\'";:~') for _ in range(128)) cc_client.user = user cc_client.save() for junction in user.group.roles: for client in [cc_client, pw_client]: roleclient = RoleClient() roleclient.client = client roleclient.role = junction.role roleclient.save() return user
def build_new_kwargs(original_function, request, instance, version, raw_version, passed_kwargs): # type: (callable, Union[WSGIRequest, Request], DataObject, int, str, **any) -> Tuple[dict, dict] """ Convert all positional arguments to keyword arguments :param original_function: The orignally decorated function :type original_function: callable :param request: API request object :type request: Union[WSGIRequest, Request] :param instance: The data object instance to inject :type instance: DataObject :param version: Parsed API version :type version: int :param raw_version: Unparsed API version :type raw_version: str :param passed_kwargs: Kwargs passed to the original function :type passed_kwargs: dict :return: The kwargs for the original function and the kwargs for the validator :rtype: Tuple[dict, dict] """ function_metadata = original_function.ovs_metadata kwargs = {} validator_kwargs = {} empty = object() # Special reserved keywords reserved = {'version': version, 'raw_version': raw_version, 'request': request, 'local_storagerouter': StorageRouterList.get_by_machine_id(settings.UNIQUE_ID)} if instance is not None: reserved[object_type.__name__.lower()] = instance for mandatory_vars, optional_vars, new_kwargs in [(function_metadata['load']['mandatory'][:], function_metadata['load']['optional'][:], kwargs), (validation_mandatory_vars[:], validation_optional_vars[:], validator_kwargs)]: for keyword, value in reserved.iteritems(): if keyword in mandatory_vars: new_kwargs[keyword] = value mandatory_vars.remove(keyword) # The rest of the parameters post_data = request.DATA if hasattr(request, 'DATA') else request.POST query_params = request.QUERY_PARAMS if hasattr(request, 'QUERY_PARAMS') else request.GET # Used to detect if anything was passed. Can't use None as the value passed might be None data_containers = [passed_kwargs, post_data, query_params] for parameters, mandatory in ((mandatory_vars, True), (optional_vars, False)): for name in parameters: val = empty for container in data_containers: val = container.get(name, empty) if val != empty: break if val != empty: # Embrace our design flaw. The query shouldn't be json dumped separately. if name == 'query': val = _try_parse(val) new_kwargs[name] = _try_convert_bool(val) elif mandatory: raise HttpNotAcceptableException(error_description='Invalid data passed: {0} is missing'.format(name), error='invalid_data') return kwargs, validator_kwargs
def delete_vtemplate(self, vdisk): """ Deletes a vDisk (template) :param vdisk: the vDisk (template) to delete :type vdisk: VDisk :return: Asynchronous result of a CeleryTask :rtype: celery.result.AsyncResult """ if not vdisk.is_vtemplate: raise HttpNotAcceptableException( error='impossible_request', error_description='vDisk should be a vTemplate') if len(vdisk.child_vdisks) > 0: raise HttpNotAcceptableException( error='impossible_request', error_description='vTemplate has clones') return VDiskController.delete.delay(vdisk_guid=vdisk.guid)
def create(self, name, size, vpool_guid, storagerouter_guid, pagecache_ratio=1.0, cache_quota=None): """ Create a new vdisk :param name: Name of the new vdisk :type name: str :param size: Size of virtual disk in bytes :type size: int :param vpool_guid: Guid of vPool to create new vdisk on :type vpool_guid: str :param storagerouter_guid: Guid of the storagerouter to assign disk to :type storagerouter_guid: str :param pagecache_ratio: Ratio (0 < x <= 1) of the pagecache size related to the size :type pagecache_ratio: float :param cache_quota: Maximum caching space(s) the new volume can consume (in Bytes) per cache type. :type cache_quota: dict :return: Asynchronous result of a CeleryTask :rtype: celery.result.AsyncResult """ if not re.match(VDisk.VDISK_NAME_REGEX, name): raise HttpNotAcceptableException( error_description= 'Provided name did not match with the vDisk name regex', error='invalid_data') storagerouter = StorageRouter(storagerouter_guid) for storagedriver in storagerouter.storagedrivers: if storagedriver.vpool_guid == vpool_guid: return VDiskController.create_new.delay( volume_name=name, volume_size=size, storagedriver_guid=storagedriver.guid, pagecache_ratio=pagecache_ratio, cache_quota=cache_quota) raise HttpNotAcceptableException( error='impossible_request', error_description= 'No storagedriver found for vPool: {0} and StorageRouter: {1}'. format(vpool_guid, storagerouter_guid))
def destroy(self, domain): """ Deletes a Domain :param domain: The domain to return :type domain: Domain :return: None :rtype: None """ if len(domain.storagerouters) > 0 or len(domain.backends) > 0 or len(domain.vdisks_dtl) > 0: raise HttpNotAcceptableException(error='in_use', error_description='The given Domain is still in use') domain.delete()
def set_as_template(self, vdisk): """ Sets a vDisk as template :param vdisk: Guid of the virtual disk to set as template :type vdisk: VDisk :return: Asynchronous result of a CeleryTask :rtype: celery.result.AsyncResult """ if len(vdisk.child_vdisks) > 0: raise HttpNotAcceptableException( error='impossible_request', error_description='vDisk has clones') return VDiskController.set_as_template.delay(vdisk_guid=vdisk.guid)
def delete(self, vdisk): """ Delete a given vDisk :param vdisk: The vDisk to delete :type vdisk: VDisk :return: Asynchronous result of a CeleryTask :rtype: celery.result.AsyncResult """ if len(vdisk.child_vdisks) > 0: raise HttpNotAcceptableException( error='impossible_request', error_description='vDisk has clones') return VDiskController.delete.delay(vdisk_guid=vdisk.guid)
def list(self, discover=False, ip=None, node_id=None): """ Lists all available ALBA Nodes :param discover: If True and IP provided, return list of single ALBA node, If True and no IP provided, return all ALBA nodes else return modeled ALBA nodes :type discover: bool :param ip: IP of ALBA node to retrieve :type ip: str :param node_id: ID of the ALBA node :type node_id: str :return: A list of ALBA nodes :rtype: ovs.dal.datalist.DataList """ if discover is False and (ip is not None or node_id is not None): raise HttpNotAcceptableException( error='invalid_data', error_description= 'Discover is mutually exclusive with IP and nodeID') if (ip is None and node_id is not None) or (ip is not None and node_id is None): raise HttpNotAcceptableException( error='invalid_data', error_description='Both IP and nodeID need to be specified') if discover is False: return AlbaNodeList.get_albanodes() # Discover nodes nodes = self._discover_nodes(ip=ip, node_id=node_id) # Build the DataList node_list = DataList(AlbaNode) node_list._executed = True node_list._guids = nodes.keys() node_list._objects = nodes node_list._data = dict([(node.guid, { 'guid': node.guid, 'data': node._data }) for node in nodes.values()]) return node_list
def link_alba_backends(self, albabackend, metadata, local_storagerouter, request): """ Link a GLOBAL ALBA Backend to a LOCAL or another GLOBAL ALBA Backend :param albabackend: ALBA backend to link another ALBA Backend to :type albabackend: AlbaBackend :param metadata: Metadata about the linked ALBA Backend :type metadata: dict :param local_storagerouter: The local storagerouter :type local_storagerouter: StorageRouter :param request: Raw request :type request: Request :return: Asynchronous result of a CeleryTask :rtype: celery.result.AsyncResult """ if 'backend_connection_info' not in metadata: raise HttpNotAcceptableException( error='invalid_data', error_description='Invalid metadata passed') connection_info = metadata['backend_connection_info'] if connection_info['host'] == '': client = None for _client in request.client.user.clients: if _client.ovs_type == 'INTERNAL' and _client.grant_type == 'CLIENT_CREDENTIALS': client = _client if client is None: raise HttpNotAcceptableException( error='invalid_data', error_description='Invalid metadata passed') connection_info['client_id'] = client.client_id connection_info['client_secret'] = client.client_secret connection_info['host'] = local_storagerouter.ip connection_info['port'] = 443 return AlbaController.link_alba_backends.s( alba_backend_guid=albabackend.guid, metadata=metadata).apply_async(queue='ovs_masters')
def scrub_multiple_vdisks(self, vpool, vdisk_guids=None): """ Scrubs the specified vDisks or all vDisks of the vPool if no guids are passed in :param vpool: The vPool to which the vDisks belong to scrub :type vpool: ovs.dal.hybrids.vpool.VPool :param vdisk_guids: The guids of the vDisks to scrub :type vdisk_guids: list :return: Asynchronous result of a CeleryTask :rtype: celery.result.AsyncResult """ if vdisk_guids is None: vdisk_guids = [] if set(vdisk_guids).difference(set(vpool.vdisks_guids)): raise HttpNotAcceptableException(error='invalid_data', error_description='Some of the vDisks specified do not belong to this vPool') return GenericController.execute_scrub.delay(vdisk_guids=vdisk_guids, manual=True)
def add_units(self, albabackend, osds): """ Add storage units to the backend and register with alba nsm DEPRECATED API call - Use 'add_osds' instead :param albabackend: ALBA backend to add units to :type albabackend: AlbaBackend :param osds: Dict of osd_id as key, disk_id as value :type osds: Dict :return: Asynchronous result of a CeleryTask :rtype: celery.result.AsyncResult """ # Currently backwards compatible, should be removed at some point # Map to fill slots for backwards compatibility # Old call data: # {osd_id: disk_id} osd_type = 'ASD' osd_info = [] stack = None for osd_id, disk_alias in osds.iteritems(): slot_id = disk_alias.split('/')[-1] # Add units is pushed for a single ALBA Node so stack should be fetched one if stack is None: for alba_node in AlbaNodeList.get_albanodes(): _stack = alba_node.stack if slot_id in _stack: stack = _stack break if stack is None: raise HttpNotAcceptableException( error='stack_not_found', error_description= 'Could not find the matching stack for slot with ID {0}'. format(slot_id)) _osd = stack[slot_id]['osds'].get(osd_id) if _osd is None: raise HttpNotFoundException( error='osd_not_found', error_description='Could not find OSD {0} on Slot {1}'. format(osd_id, slot_id)) osd_info.append({ 'slot_id': slot_id, 'osd_type': osd_type, 'ips': _osd['ips'], 'port': _osd['port'] }) return AlbaController.add_osds.s( albabackend.guid, osd_info).apply_async(queue='ovs_masters')
def create(self, request): """ Creates a Backend :param request: The raw request :type request: Request """ serializer = FullSerializer(Backend, instance=Backend(), data=request.DATA) backend = serializer.deserialize() duplicate = BackendList.get_by_name(backend.name) if duplicate is None: backend.save() return backend raise HttpNotAcceptableException( error='duplicate', error_description='Backend with this name already exists')
def create(self, request, contents=None): """ Creates a new Domain :param request: The raw request: :type request: Request :param contents: Requested contents (serializer hint) :type contents: str """ contents = None if contents is None else contents.split(',') serializer = FullSerializer(Domain, contents=contents, instance=Domain(), data=request.DATA) domain = serializer.deserialize() current_domains = DomainList.get_by_name(domain.name) if len(current_domains) > 0: raise HttpNotAcceptableException(error='duplicate', error_description='A Domain with the given name already exists') domain.save() return domain
def reset_asd(self, albanode, asd_id, safety): """ Removes and re-add an ASD DEPRECATED API call - Use 'reset_osd' instead :param albanode: ALBA node to remove a disk from :type albanode: ovs.dal.hybrids.albanode.AlbaNode :param asd_id: ASD ID to reset :type asd_id: str :param safety: Safety to maintain :type safety: dict :return: Celery async task result :rtype: CeleryTask """ if safety is None: raise HttpNotAcceptableException( error='invalid_data', error_description='Safety must be passed') return AlbaNodeController.reset_osd.delay(albanode.guid, asd_id, safety)
def clear_slot(slot_id): # type: (str) -> None """ Clears a slot :param slot_id: Identifier of the slot :type slot_id: str :return: None :rtype: NoneType """ try: disk = DiskList.get_by_alias(slot_id) except ObjectNotFoundException: API._logger.warning( 'Disk with ID {0} is no longer present (or cannot be managed)'. format(slot_id)) return None if disk.available is True: raise HttpNotAcceptableException( error='disk_not_configured', error_description='Disk not yet configured') with file_mutex('disk_{0}'.format(slot_id)): last_exception = None for asd in disk.asds: try: ASDController.remove_asd(asd=asd) except Exception as ex: last_exception = ex disk = Disk(disk.id) if len(disk.asds) == 0: DiskController.clean_disk(disk=disk) elif last_exception is not None: raise last_exception else: raise RuntimeError( 'Still some ASDs configured on Disk {0}'.format(slot_id))
def set_maintenance_config(self, albabackend, maintenance_config): # type : (AlbaBackend, int) -> None """ Set the maintenance config for the Backend :param albabackend: ALBA Backend to set the maintenance config for :type albabackend: ovs.dal.hybrids.albabackend.AlbaBackend :param maintenance_config: Maintenance config as it should be set Possible keys: - auto_cleanup_deleted_namespaces: Number of days to wait before cleaning up. Setting to 0 means disabling the auto cleanup and always clean up a namespace after removing it (int) :type maintenance_config: dict :return: Asynchronous result of a CeleryTask :rtype: celery.result.AsyncResult """ # API implementation can be changed in the future. The whole config is sent through the API but only one setting is used days = maintenance_config.get('auto_cleanup_deleted_namespaces') if not isinstance(days, int) or 0 > days: raise HttpNotAcceptableException( error='invalid_data', error_description= "'auto_cleanup_deleted_namespaces' should be a positive integer or 0" ) return AlbaController.set_auto_cleanup( alba_backend_guid=albabackend.guid, days=days)
def add_vpool(self, call_parameters, local_storagerouter, request): """ Adds a vPool to a given StorageRouter :param call_parameters: A complex (JSON encoded) dictionary containing all various parameters to create the vPool :type call_parameters: dict :param local_storagerouter: StorageRouter on which the call is executed :type local_storagerouter: ovs.dal.hybrids.storagerouter.StorageRouter :param request: The raw request :type request: Request :return: Asynchronous result of a CeleryTask :rtype: celery.result.AsyncResult """ def lacks_connection_info(_connection_info, check_none=False): if check_none is True and _connection_info is None: return True else: return 'host' not in _connection_info or _connection_info[ 'host'] in ['', None] def get_default_connection_info(_client, _connection_info): _connection_info['client_id'] = _client.client_id _connection_info['client_secret'] = _client.client_secret _connection_info['host'] = local_storagerouter.ip _connection_info['port'] = 443 _connection_info['local'] = True return _connection_info # API backwards compatibility if 'backend_connection_info' in call_parameters: raise HttpNotAcceptableException( error='invalid_data', error_description= 'Invalid data passed: "backend_connection_info" is deprecated') # API client translation (cover "local backend" selection in GUI) if 'backend_info' not in call_parameters or 'connection_info' not in call_parameters or 'config_params' not in call_parameters: raise HttpNotAcceptableException( error='invalid_data', error_description='Invalid call_parameters passed') connection_info = call_parameters['connection_info'] if 'backend_info_aa' in call_parameters: # Backwards compatibility call_parameters['backend_info_fc'] = call_parameters.pop( 'backend_info_aa') if 'connection_info_aa' in call_parameters: # Backwards compatibility call_parameters['connection_info_fc'] = call_parameters.pop( 'connection_info_aa') connection_info_fc = call_parameters.get('connection_info_fc') connection_info_bc = call_parameters.get('connection_info_bc') # Keeping '' for backwards compatibility if lacks_connection_info(connection_info) or lacks_connection_info( connection_info_fc, True) or lacks_connection_info( connection_info_bc, True): client = None for _client in request.client.user.clients: if _client.ovs_type == 'INTERNAL' and _client.grant_type == 'CLIENT_CREDENTIALS': client = _client if client is None: raise HttpNotAcceptableException( error='invalid_data', error_description='Invalid call_parameters passed') if lacks_connection_info(connection_info): connection_info = get_default_connection_info( client, connection_info) call_parameters['connection_info'] = connection_info if connection_info_fc is not None and lacks_connection_info( connection_info_fc): connection_info_fc = get_default_connection_info( client, connection_info_fc) call_parameters['connection_info_fc'] = connection_info_fc if connection_info_bc is not None and lacks_connection_info( connection_info_bc): connection_info_bc = get_default_connection_info( client, connection_info_bc) call_parameters['connection_info_bc'] = connection_info_bc if 'caching_info' not in call_parameters: call_parameters['caching_info'] = { 'cache_quota_bc': call_parameters.pop('cache_quota_bc', None), 'cache_quota_fc': call_parameters.pop('cache_quota_fc', None), 'block_cache_on_read': call_parameters.pop('block_cache_on_read', False), 'block_cache_on_write': call_parameters.pop('block_cache_on_write', False), 'fragment_cache_on_read': call_parameters.pop('fragment_cache_on_read', False), 'fragment_cache_on_write': call_parameters.pop('fragment_cache_on_write', False) } call_parameters.pop('type', None) call_parameters.pop('readcache_size', None) call_parameters['config_params'].pop('dedupe_mode', None) call_parameters['config_params'].pop('cache_strategy', None) # Finally, launching the add_vpool task return VPoolController.add_vpool.delay(VPoolController, call_parameters)
def new_function(*args, **kwargs): """ Wrapped function """ request = _find_request(args) start = time.time() new_kwargs = {} validation_new_kwargs = {} # Find out the arguments of the decorated function if validator is not None: f_info = inspect.getargspec(validator) if f_info.defaults is None: validation_mandatory_vars = f_info.args[1:] validation_optional_vars = [] else: validation_mandatory_vars = f_info.args[1:-len(f_info. defaults)] validation_optional_vars = f_info.args[ len(validation_mandatory_vars) + 1:] else: validation_mandatory_vars = [] validation_optional_vars = [] # Check version version_match = regex.match(request.META['HTTP_ACCEPT']) if version_match is not None: version = version_match.groupdict()['version'] else: version = settings.VERSION[-1] raw_version = version versions = (max(min_version, settings.VERSION[0]), min(max_version, settings.VERSION[-1])) if version == '*': # If accepting all versions, it defaults to the highest one version = versions[1] version = int(version) if version < versions[0] or version > versions[1]: logger.warning( 'API version requirements: {0} <= <version> <= {1}. Got {2}' .format(versions[0], versions[1], version)) raise HttpUpgradeNeededException( error='invalid_version', error_description= 'API version requirements: {0} <= <version> <= {1}. Got {2}' .format(versions[0], versions[1], version)) # Load some information instance = None if 'pk' in kwargs and object_type is not None: try: instance = object_type(kwargs['pk']) except ObjectNotFoundException: raise HttpNotFoundException( error='object_not_found', error_description= 'The requested object could not be found') # Build new kwargs for _mandatory_vars, _optional_vars, _new_kwargs in [ (f.ovs_metadata['load']['mandatory'][:], f.ovs_metadata['load']['optional'][:], new_kwargs), (validation_mandatory_vars, validation_optional_vars, validation_new_kwargs) ]: if 'version' in _mandatory_vars: _new_kwargs['version'] = version _mandatory_vars.remove('version') if 'raw_version' in _mandatory_vars: _new_kwargs['raw_version'] = raw_version _mandatory_vars.remove('raw_version') if 'request' in _mandatory_vars: _new_kwargs['request'] = request _mandatory_vars.remove('request') if instance is not None: typename = object_type.__name__.lower() if typename in _mandatory_vars: _new_kwargs[typename] = instance _mandatory_vars.remove(typename) if 'local_storagerouter' in _mandatory_vars: storagerouter = StorageRouterList.get_by_machine_id( settings.UNIQUE_ID) _new_kwargs['local_storagerouter'] = storagerouter _mandatory_vars.remove('local_storagerouter') # The rest of the mandatory parameters post_data = request.DATA if hasattr(request, 'DATA') else request.POST get_data = request.QUERY_PARAMS if hasattr( request, 'QUERY_PARAMS') else request.GET for name in _mandatory_vars: if name in kwargs: _new_kwargs[name] = kwargs[name] else: if name not in post_data: if name not in get_data: raise HttpNotAcceptableException( error='invalid_data', error_description= 'Invalid data passed: {0} is missing'. format(name)) _new_kwargs[name] = _try_parse(get_data[name]) else: _new_kwargs[name] = _try_parse(post_data[name]) # Try to fill optional parameters for name in _optional_vars: if name in kwargs: _new_kwargs[name] = kwargs[name] else: if name in post_data: _new_kwargs[name] = _try_parse(post_data[name]) elif name in get_data: _new_kwargs[name] = _try_parse(get_data[name]) # Execute validator if validator is not None: validator(args[0], **validation_new_kwargs) duration = time.time() - start # Call the function result = f(args[0], **new_kwargs) if isinstance(result, OVSResponse): result.timings['parsing'] = [duration, 'Request parsing'] return result