def parse_error(self, msg=None): error_msg = 'Unknown error' try: # Azure does give some meaningful errors, but is inconsistent # Some APIs respond with an XML error. Others just dump HTML body = self.parse_body() if type(body) == ElementTree.Element: code = body.findtext(fixxpath(xpath='Code')) message = body.findtext(fixxpath(xpath='Message')) message = message.split('\n')[0] error_msg = '%s: %s' % (code, message) except MalformedResponseError: pass if msg: error_msg = '%s - %s' % (msg, error_msg) if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]: raise InvalidCredsError(error_msg) raise LibcloudError('%s Status code: %d.' % (error_msg, self.status), driver=self)
def iterate_container_objects(self, container): """ @inherits: :class:`StorageDriver.iterate_container_objects` """ params = {'restype': 'container', 'comp': 'list', 'maxresults': RESPONSES_PER_REQUEST, 'include': 'metadata'} container_path = self._get_container_path(container) while True: response = self.connection.request(container_path, params=params) if response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container.name) elif response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) body = response.parse_body() blobs = body.find(fixxpath(xpath='Blobs')) blobs = blobs.findall(fixxpath(xpath='Blob')) for blob in blobs: yield self._xml_to_object(container, blob) params['marker'] = body.findtext('NextMarker') if not params['marker']: break
def _update_ipv6(self, node): """ Retrieves the ipv6 address for this node This is a hack. Code here should really go to the Libcloud driver in libcloud.compute.drivers.dimensiondata.py _to_node() """ try: element = self.region.connection.request_with_orgId_api_2("server/server/%s" % node.id).object has_network_info = element.find(fixxpath("networkInfo", TYPES_URN)) is not None ipv6 = ( element.find(fixxpath("networkInfo/primaryNic", TYPES_URN)).get("ipv6") if has_network_info else element.find(fixxpath("nic", TYPES_URN)).get("ipv6") ) node.extra["ipv6"] = ipv6 except Exception as feedback: if "RESOURCE_NOT_FOUND" in str(feedback): node.extra["ipv6"] = "" else: logging.info("Error: unable to retrieve IPv6 addresses ") logging.error(str(feedback))
def _to_firewall_address(self, element): ip = element.find(fixxpath('ip', TYPES_URN)) port = element.find(fixxpath('port', TYPES_URN)) return DimensionDataFirewallAddress( any_ip=ip.get('address') == 'ANY', ip_address=ip.get('address'), ip_prefix_size=ip.get('prefixSize'), port_begin=port.get('begin') if port is not None else None, port_end=port.get('end') if port is not None else None )
def _xml_to_container(self, node): """ Converts a container XML node to a container instance :param node: XML info of the container :type node: :class:`xml.etree.ElementTree.Element` :return: A container instance :rtype: :class:`Container` """ name = node.findtext(fixxpath(xpath='Name')) props = node.find(fixxpath(xpath='Properties')) metadata = node.find(fixxpath(xpath='Metadata')) extra = { 'url': node.findtext(fixxpath(xpath='Url')), 'last_modified': node.findtext(fixxpath(xpath='Last-Modified')), 'etag': props.findtext(fixxpath(xpath='Etag')), 'lease': { 'status': props.findtext(fixxpath(xpath='LeaseStatus')), 'state': props.findtext(fixxpath(xpath='LeaseState')), 'duration': props.findtext(fixxpath(xpath='LeaseDuration')), }, 'meta_data': {} } for meta in metadata.getchildren(): extra['meta_data'][meta.tag] = meta.text return Container(name=name, extra=extra, driver=self)
def _to_records(self, data, zone): records = [] elems = data.findall( fixxpath(xpath='ResourceRecordSets/ResourceRecordSet', namespace=NAMESPACE)) for elem in elems: record_set = elem.findall(fixxpath( xpath='ResourceRecords/ResourceRecord', namespace=NAMESPACE)) for index, record in enumerate(record_set): records.append(self._to_record(elem, zone, index)) return records
def _to_alert(self, element): alert = element.find(fixxpath('alerting', BACKUP_NS)) if alert is not None: notify_list = [ email_addr.text for email_addr in alert.findall(fixxpath('emailAddress', BACKUP_NS)) ] return DimensionDataBackupClientAlert( trigger=element.get('trigger'), notify_list=notify_list ) return None
def _to_records(self, data, zone): records = [] elems = data.findall( fixxpath(xpath='ResourceRecordSets/ResourceRecordSet', namespace=NAMESPACE)) for elem in elems: record_set = elem.findall(fixxpath( xpath='ResourceRecords/ResourceRecord', namespace=NAMESPACE)) record_count = len(record_set) multiple_value_record = (record_count > 1) record_set_records = [] for index, record in enumerate(record_set): # Need to special handling for records with multiple values for # update to work correctly record = self._to_record(elem=elem, zone=zone, index=index) record.extra['_multi_value'] = multiple_value_record if multiple_value_record: record.extra['_other_records'] = [] record_set_records.append(record) # Store reference to other records so update works correctly if multiple_value_record: for index in range(0, len(record_set_records)): record = record_set_records[index] for other_index, other_record in \ enumerate(record_set_records): if index == other_index: # Skip current record continue extra = copy.deepcopy(other_record.extra) extra.pop('_multi_value') extra.pop('_other_records') item = {'name': other_record.name, 'data': other_record.data, 'type': other_record.type, 'extra': extra} record.extra['_other_records'].append(item) records.extend(record_set_records) return records
def _to_irule(self, element): compatible = [] matches = element.findall( fixxpath('virtualListenerCompatibility', TYPES_URN)) for match_element in matches: compatible.append( DimensionDataVirtualListenerCompatibility( type=match_element.get('type'), protocol=match_element.get('protocol', None))) irule_element = element.find(fixxpath('irule', TYPES_URN)) return DimensionDataDefaultiRule( id=irule_element.get('id'), name=irule_element.get('name'), compatible_listeners=compatible )
def iterate_container_objects(self, container): params = {} last_key = None exhausted = False container_path = self._get_container_path(container) while not exhausted: if last_key: params['marker'] = last_key response = self.connection.request(container_path, params=params) if response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) objects = self._to_objs(obj=response.object, xpath='Contents', container=container) is_truncated = response.object.findtext(fixxpath( xpath='IsTruncated', namespace=self.namespace)).lower() exhausted = (is_truncated == 'false') last_key = None for obj in objects: last_key = obj.name yield obj
def _to_vlan(self, element, locations): status = self._to_status(element.find(fixxpath('state', TYPES_URN))) location_id = element.get('datacenterId') location = list(filter(lambda x: x.id == location_id, locations))[0] ip_range = element.find(fixxpath('privateIpv4Range', TYPES_URN)) return DimensionDataVlan( id=element.get('id'), name=findtext(element, 'name', TYPES_URN), description=findtext(element, 'description', TYPES_URN), private_ipv4_range_address=ip_range.get('address'), private_ipv4_range_size=ip_range.get('prefixSize'), location=location, status=status)
def _to_zones(self, data): zones = [] for element in data.findall(fixxpath(xpath='HostedZones/HostedZone', namespace=NAMESPACE)): zones.append(self._to_zone(element)) return zones
def _get_more(self, last_key, value_dict): container = value_dict['container'] params = {} if last_key: params['marker'] = last_key response = self.connection.request('/%s' % (container.name), params=params) if response.status == httplib.OK: objects = self._to_objs(obj=response.object, xpath='Contents', container=container) is_truncated = response.object.findtext(fixxpath(xpath='IsTruncated', namespace=self.namespace)).lower() exhausted = (is_truncated == 'false') if (len(objects) > 0): last_key = objects[-1].name else: last_key = None return objects, last_key, exhausted raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self)
def _to_record(self, elem, zone, index=0): name = findtext(element=elem, xpath='Name', namespace=NAMESPACE) name = name[:-len(zone.domain) - 1] type = self._string_to_record_type(findtext(element=elem, xpath='Type', namespace=NAMESPACE)) ttl = int(findtext(element=elem, xpath='TTL', namespace=NAMESPACE)) value_elem = elem.findall( fixxpath(xpath='ResourceRecords/ResourceRecord', namespace=NAMESPACE))[index] data = findtext(element=(value_elem), xpath='Value', namespace=NAMESPACE) extra = {'ttl': ttl} if type == 'MX': split = data.split() priority, data = split extra['priority'] = int(priority) elif type == 'SRV': split = data.split() priority, weight, port, data = split extra['priority'] = int(priority) extra['weight'] = int(weight) extra['port'] = int(port) id = ':'.join((self.RECORD_TYPE_MAP[type], name)) record = Record(id=id, name=name, type=type, data=data, zone=zone, driver=self, extra=extra) return record
def _to_persistence_profiles(self, object): profiles = [] matches = object.findall( fixxpath('defaultPersistenceProfile', TYPES_URN)) for element in matches: profiles.append(self._to_persistence_profile(element)) return profiles
def _to_balancer(self, element): ipaddress = findtext(element, 'listenerIpAddress', TYPES_URN) name = findtext(element, 'name', TYPES_URN) port = findtext(element, 'port', TYPES_URN) extra = {} pool_element = element.find(fixxpath( 'pool', TYPES_URN)) if pool_element is None: extra['pool_id'] = None else: extra['pool_id'] = pool_element.get('id') extra['network_domain_id'] = findtext(element, 'networkDomainId', TYPES_URN) balancer = LoadBalancer( id=element.get('id'), name=name, state=self._VALUE_TO_STATE_MAP.get( findtext(element, 'state', TYPES_URN), State.UNKNOWN), ip=ipaddress, port=port, driver=self.connection.driver, extra=extra ) return balancer
def _to_irules(self, object): irules = [] matches = object.findall( fixxpath('defaultIrule', TYPES_URN)) for element in matches: irules.append(self._to_irule(element)) return irules
def _to_node(self, element): if findtext(element, 'started', TYPES_URN) == 'true': state = NodeState.RUNNING else: state = NodeState.TERMINATED status = self._to_status(element.find(fixxpath('progress', TYPES_URN))) has_network_info \ = element.find(fixxpath('networkInfo', TYPES_URN)) is not None extra = { 'description': findtext(element, 'description', TYPES_URN), 'sourceImageId': findtext(element, 'sourceImageId', TYPES_URN), 'networkId': findtext(element, 'networkId', TYPES_URN), 'networkDomainId': element.find(fixxpath('networkInfo', TYPES_URN)) .get('networkDomainId') if has_network_info else None, 'datacenterId': element.get('datacenterId'), 'deployedTime': findtext(element, 'createTime', TYPES_URN), 'cpuCount': int(findtext( element, 'cpuCount', TYPES_URN)), 'memoryMb': int(findtext( element, 'memoryGb', TYPES_URN)) * 1024, 'OS_id': element.find(fixxpath( 'operatingSystem', TYPES_URN)).get('id'), 'OS_type': element.find(fixxpath( 'operatingSystem', TYPES_URN)).get('family'), 'OS_displayName': element.find(fixxpath( 'operatingSystem', TYPES_URN)).get('displayName'), 'status': status } public_ip = findtext(element, 'publicIpAddress', TYPES_URN) private_ip = element.find( fixxpath('networkInfo/primaryNic', TYPES_URN)) \ .get('privateIpv4') \ if has_network_info else \ element.find(fixxpath('nic', TYPES_URN)).get('privateIpv4') n = Node(id=element.get('id'), name=findtext(element, 'name', TYPES_URN), state=state, public_ips=[public_ip] if public_ip is not None else [], private_ips=[private_ip] if private_ip is not None else [], driver=self.connection.driver, extra=extra) return n
def _to_base_images(self, object): images = [] locations = self.list_locations() for element in object.findall(fixxpath("image", SERVER_NS)): images.append(self._to_base_image(element, locations)) return images
def _to_records(self, data, zone): records = [] for elem in data.findall( fixxpath(xpath='ResourceRecordSets/ResourceRecordSet', namespace=NAMESPACE)): records.append(self._to_record(elem, zone)) return records
def _to_nat_rule(self, element, network_domain): status = self._to_status(element.find(fixxpath('state', TYPES_URN))) return DimensionDataNatRule( id=element.get('id'), network_domain=network_domain, internal_ip=findtext(element, 'internalIp', TYPES_URN), external_ip=findtext(element, 'externalIp', TYPES_URN), status=status)
def _to_securityGroups(self, object): """ Convert a list from aws to security group objects. """ groups = {} for el in object.findall(fixxpath(xpath='securityGroupInfo/item', namespace=NAMESPACE)): group = self._to_securityGroup(el) groups[group.name] = group return groups
def _upload_multipart(self, response, data, iterator, container, object_name, calculate_hash=True): """ Callback invoked for uploading data to S3 using Amazon's multipart upload mechanism :param response: Response object from the initial POST request :type response: :class:`S3RawResponse` :param data: Any data from the initial POST request :type data: ``str`` :param iterator: The generator for fetching the upload data :type iterator: ``generator`` :param container: The container owning the object to which data is being uploaded :type container: :class:`Container` :param object_name: The name of the object to which we are uploading :type object_name: ``str`` :keyword calculate_hash: Indicates if we must calculate the data hash :type calculate_hash: ``bool`` :return: A tuple of (status, checksum, bytes transferred) :rtype: ``tuple`` """ object_path = self._get_object_path(container, object_name) # Get the upload id from the response xml response.body = response.response.read() body = response.parse_body() upload_id = body.find(fixxpath(xpath='UploadId', namespace=self.namespace)).text try: # Upload the data through the iterator result = self._upload_from_iterator(iterator, object_path, upload_id, calculate_hash) (chunks, data_hash, bytes_transferred) = result # Commit the chunk info and complete the upload etag = self._commit_multipart(object_path, upload_id, chunks) except Exception: exc = sys.exc_info()[1] # Amazon provides a mechanism for aborting an upload. self._abort_multipart(object_path, upload_id) raise exc # Modify the response header of the first request. This is used # by other functions once the callback is done response.headers['etag'] = etag return (True, data_hash, bytes_transferred)
def _recv_wait_set(self): params = {'Action': 'DescribeSpotInstanceRequests'} object = self.conn.connection.request(self.conn.path, params=params).object wait_ids = [] for elem in object.findall(fixxpath(xpath='spotInstanceRequestSet/item', namespace=NAMESPACE)): inst_id = findtext(element=elem, xpath='spotInstanceRequestId',namespace=NAMESPACE) status = findtext(element=elem, xpath='status/code',namespace=NAMESPACE) if status == 'pending-evaluation' or status == 'pending-fulfillment': wait_ids.append(inst_id) return wait_ids
def _to_member(self, element): port = findtext(element, 'port', TYPES_URN) if port is not None: port = int(port) pool_member = DimensionDataPoolMember( id=element.get('id'), name=element.find(fixxpath( 'node', TYPES_URN)).get('name'), status=findtext(element, 'state', TYPES_URN), node_id=element.find(fixxpath( 'node', TYPES_URN)).get('id'), ip=element.find(fixxpath( 'node', TYPES_URN)).get('ipAddress'), port=port ) return pool_member
def _list_secondary_interfaces(self, node): """ Retrieves the list of secondary interfaces This is a hack. Code here should really go to the Libcloud driver in libcloud.compute.drivers.dimensiondata.py _to_node() """ element = self.region.connection.request_with_orgId_api_2("server/server/%s" % node.id).object if element.find(fixxpath("networkInfo", TYPES_URN)) is None: return [] interfaces = [] items = element.findall(fixxpath("networkInfo/additionalNic", TYPES_URN)) for item in items: interfaces.append({"id": item.get("id"), "network": item.get("vlanName")}) return interfaces
def _to_backup_job(self, element, target, client_id): running_job = element.find(fixxpath('runningJob', BACKUP_NS)) if running_job is not None: return BackupTargetJob( id=running_job.get('id'), status=running_job.get('status'), progress=int(running_job.get('percentageComplete')), driver=self.connection.driver, target=target, extra={'clientId': client_id} ) return None
def _to_firewall_rule(self, element, locations, network_domain): status = self._to_status(element.find(fixxpath('state', TYPES_URN))) location_id = element.get('datacenterId') location = list(filter(lambda x: x.id == location_id, locations))[0] return DimensionDataFirewallRule( id=element.get('id'), network_domain=network_domain, name=findtext(element, 'name', TYPES_URN), action=findtext(element, 'action', TYPES_URN), ip_version=findtext(element, 'ipVersion', TYPES_URN), protocol=findtext(element, 'protocol', TYPES_URN), enabled=findtext(element, 'enabled', TYPES_URN), source=self._to_firewall_address( element.find(fixxpath('source', TYPES_URN))), destination=self._to_firewall_address( element.find(fixxpath('destination', TYPES_URN))), location=location, status=status)
def iterate_containers(self): """ @inherits: :class:`StorageDriver.iterate_containers` """ params = {'comp': 'list', 'maxresults': RESPONSES_PER_REQUEST, 'include': 'metadata'} while True: response = self.connection.request('/', params) if response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) body = response.parse_body() containers = body.find(fixxpath(xpath='Containers')) containers = containers.findall(fixxpath(xpath='Container')) for container in containers: yield self._xml_to_container(container) params['marker'] = body.findtext('NextMarker') if not params['marker']: break
def _list_inaccessible_nodes(self, node_list): '''returns node ids which aren't accessible via ssh yet''' params = {'Action' : 'DescribeInstanceStatus'} for index in range(len(node_list)): params['InstanceId.%d' % (index + 1,)] =\ node_list[index].id object = self.conn.connection.request(self.conn.path, params=params).object bad_ids = [] for elem in object.findall(fixxpath(xpath='instanceStatusSet/item', namespace=NAMESPACE)): inst_id = findtext(element=elem, xpath='instanceId',namespace=NAMESPACE) sys_stat = findtext(element=elem, xpath='systemStatus/status',namespace=NAMESPACE) inst_stat = findtext(element=elem, xpath='instanceStatus/status',namespace=NAMESPACE) if sys_stat != 'ok' and inst_stat != 'ok': bad_ids.append(inst_id) return bad_ids
def finder(node, text): return node.findtext(fixxpath(xpath=text, namespace=self.namespace))
def _to_spot_requests(self, object, xpath): return [ self._to_spot_request(el) for el in object.findall( fixxpath(xpath=xpath, namespace=NAMESPACE)) ]
def _to_locations(self, object): locations = [] for element in object.findall(fixxpath('datacenter', DATACENTER_NS)): locations.append(self._to_location(element)) return locations
def _to_pools(self, object): pools = [] for element in object.findall(fixxpath("pool", TYPES_URN)): pools.append(self._to_pool(element)) return pools
def _xml_to_object(self, container, blob): """ Converts a BLOB XML node to an object instance :param container: Instance of the container holding the blob :type: :class:`Container` :param blob: XML info of the blob :type blob: L{} :return: An object instance :rtype: :class:`Object` """ name = blob.findtext(fixxpath(xpath="Name")) props = blob.find(fixxpath(xpath="Properties")) metadata = blob.find(fixxpath(xpath="Metadata")) etag = props.findtext(fixxpath(xpath="Etag")) size = int(props.findtext(fixxpath(xpath="Content-Length"))) extra = { "content_type": props.findtext(fixxpath(xpath="Content-Type")), "etag": etag, "md5_hash": props.findtext(fixxpath(xpath="Content-MD5")), "last_modified": props.findtext(fixxpath(xpath="Last-Modified")), "url": blob.findtext(fixxpath(xpath="Url")), "hash": props.findtext(fixxpath(xpath="Etag")), "lease": { "status": props.findtext(fixxpath(xpath="LeaseStatus")), "state": props.findtext(fixxpath(xpath="LeaseState")), "duration": props.findtext(fixxpath(xpath="LeaseDuration")), }, "content_encoding": props.findtext(fixxpath(xpath="Content-Encoding")), "content_language": props.findtext(fixxpath(xpath="Content-Language")), "blob_type": props.findtext(fixxpath(xpath="BlobType")), } if extra["md5_hash"]: value = binascii.hexlify(base64.b64decode(b(extra["md5_hash"]))) value = value.decode("ascii") extra["md5_hash"] = value meta_data = {} if metadata is not None: for meta in list(metadata): meta_data[meta.tag] = meta.text return Object( name=name, size=size, hash=etag, meta_data=meta_data, extra=extra, container=container, driver=self, )
def _to_health_monitors(self, object): monitors = [] matches = object.findall(fixxpath('defaultHealthMonitor', TYPES_URN)) for element in matches: monitors.append(self._to_health_monitor(element)) return monitors
def _to_balancers(self, object): loadbalancers = [] for element in object.findall(fixxpath("virtualListener", TYPES_URN)): loadbalancers.append(self._to_balancer(element)) return loadbalancers
def _to_regions(self, object, xpath): return [ self._to_region(el) for el in object.findall( fixxpath(xpath=xpath, namespace=NAMESPACE)) ]
def ex_iterate_multipart_uploads( self, container, prefix=None, delimiter=None, max_uploads=MAX_UPLOADS_PER_RESPONSE, ): """ Extension method for listing all in-progress OSS multipart uploads. Each multipart upload which has not been committed or aborted is considered in-progress. :param container: The container holding the uploads :type container: :class:`Container` :keyword prefix: Print only uploads of objects with this prefix :type prefix: ``str`` :keyword delimiter: The object/key names are grouped based on being split by this delimiter :type delimiter: ``str`` :keyword max_uploads: The max uplod items returned for one request :type max_uploads: ``int`` :return: A generator of OSSMultipartUpload instances. :rtype: ``generator`` of :class:`OSSMultipartUpload` """ if not self.supports_multipart_upload: raise LibcloudError("Feature not supported", driver=self) request_path = "/?uploads" params = {"max-uploads": max_uploads} if prefix: params["prefix"] = prefix if delimiter: params["delimiter"] = delimiter def finder(node, text): return node.findtext(fixxpath(xpath=text, namespace=self.namespace)) while True: response = self.connection.request(request_path, params=params, container=container) if response.status != httplib.OK: raise LibcloudError( "Error fetching multipart uploads. " "Got code: %s" % response.status, driver=self, ) body = response.parse_body() # pylint: disable=maybe-no-member for node in body.findall( fixxpath(xpath="Upload", namespace=self.namespace)): key = finder(node, "Key") upload_id = finder(node, "UploadId") initiated = finder(node, "Initiated") yield OSSMultipartUpload(key, upload_id, initiated) # Check if this is the last entry in the listing # pylint: disable=maybe-no-member is_truncated = body.findtext( fixxpath(xpath="IsTruncated", namespace=self.namespace)) if is_truncated.lower() == "false": break # Provide params for the next request upload_marker = body.findtext( fixxpath(xpath="NextUploadIdMarker", namespace=self.namespace)) key_marker = body.findtext( fixxpath(xpath="NextKeyMarker", namespace=self.namespace)) params["key-marker"] = key_marker params["upload-id-marker"] = upload_marker
def _to_images(self, object): return [self._to_image(el) for el in object.findall( fixxpath(xpath='imagesSet/item', namespace=NAMESPACE) )]
def _to_region(self, element): name = element.find(fixxpath('regionName', namespace=NAMESPACE)).text endpoint = element.find(fixxpath('regionEndpoint', namespace=NAMESPACE)).text return ExEC2Region(name, endpoint)
def _to_nodes(self, object, xpath, groups=None): return [self._to_node(el, groups=groups) for el in object.findall(fixxpath(xpath=xpath, namespace=NAMESPACE))]
def _to_containers(self, obj, xpath): return [ self._to_container(element) for element in \ obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))]
def _to_nodes(self, object): node_elements = object.findall(fixxpath('DeployedServer', SERVER_NS)) node_elements.extend( object.findall(fixxpath('PendingDeployServer', SERVER_NS))) return [self._to_node(el) for el in node_elements]
def _to_containers(self, obj, xpath): for element in obj.findall( fixxpath(xpath=xpath, namespace=self.namespace)): yield self._to_container(element)
def _to_schedule_policies(self, object): elements = object.findall(fixxpath("schedulePolicy", BACKUP_NS)) return [self._to_schedule_policy(el) for el in elements]
def _to_objs(self, obj, xpath, container): return [ self._to_obj(element, container) for element in obj.findall( fixxpath(xpath=xpath, namespace=self.namespace)) ]
def _to_client_types(self, object): elements = object.findall(fixxpath("backupClientType", BACKUP_NS)) return [self._to_client_type(el) for el in elements]
def _to_nodes(self, object): nodes = [] for element in object.findall(fixxpath("node", TYPES_URN)): nodes.append(self._to_node(element)) return nodes
def _to_nodes(self, object): node_elements = object.findall(fixxpath('Server', TYPES_URN)) return [self._to_node(el) for el in node_elements]
def _to_members(self, object): members = [] for element in object.findall(fixxpath("poolMember", TYPES_URN)): members.append(self._to_member(element)) return members
def _to_clients(self, object, target): elements = object.findall(fixxpath("backupClient", BACKUP_NS)) return [self._to_client(el, target) for el in elements]
def _to_irules(self, object): irules = [] matches = object.findall(fixxpath('defaultIrule', TYPES_URN)) for element in matches: irules.append(self._to_irule(element)) return irules
def _upload_multipart(self, response, data, iterator, container, object_name, calculate_hash=True): """ Callback invoked for uploading data to S3 using Amazon's multipart upload mechanism :param response: Response object from the initial POST request :type response: :class:`S3RawResponse` :param data: Any data from the initial POST request :type data: ``str`` :param iterator: The generator for fetching the upload data :type iterator: ``generator`` :param container: The container owning the object to which data is being uploaded :type container: :class:`Container` :param object_name: The name of the object to which we are uploading :type object_name: ``str`` :keyword calculate_hash: Indicates if we must calculate the data hash :type calculate_hash: ``bool`` :return: A tuple of (status, checksum, bytes transferred) :rtype: ``tuple`` """ object_path = self._get_object_path(container, object_name) # Get the upload id from the response xml response.body = response.response.read() body = response.parse_body() upload_id = body.find( fixxpath(xpath='UploadId', namespace=self.namespace)).text try: # Upload the data through the iterator result = self._upload_from_iterator(iterator, object_path, upload_id, calculate_hash) (chunks, data_hash, bytes_transferred) = result # Commit the chunk info and complete the upload etag = self._commit_multipart(object_path, upload_id, chunks) except Exception: exc = sys.exc_info()[1] # Amazon provides a mechanism for aborting an upload. self._abort_multipart(object_path, upload_id) raise exc # Modify the response header of the first request. This is used # by other functions once the callback is done response.headers['etag'] = etag return (True, data_hash, bytes_transferred)
def _to_locations(self, object): locations = [] for element in object.findall(fixxpath('datacenter', TYPES_URN)): locations.append(self._to_location(element)) return locations
def ex_iterate_multipart_uploads(self, container, prefix=None, delimiter=None): """ Extension method for listing all in-progress S3 multipart uploads. Each multipart upload which has not been committed or aborted is considered in-progress. :param container: The container holding the uploads :type container: :class:`Container` :keyword prefix: Print only uploads of objects with this prefix :type prefix: ``str`` :keyword delimiter: The object/key names are grouped based on being split by this delimiter :type delimiter: ``str`` :return: A generator of S3MultipartUpload instances. :rtype: ``generator`` of :class:`S3MultipartUpload` """ if not self.supports_s3_multipart_upload: raise LibcloudError('Feature not supported', driver=self) # Get the data for a specific container request_path = '%s/?uploads' % (self._get_container_path(container)) params = {'max-uploads': RESPONSES_PER_REQUEST} if prefix: params['prefix'] = prefix if delimiter: params['delimiter'] = delimiter def finder(node, text): return node.findtext(fixxpath(xpath=text, namespace=self.namespace)) while True: response = self.connection.request(request_path, params=params) if response.status != httplib.OK: raise LibcloudError('Error fetching multipart uploads. ' 'Got code: %s' % response.status, driver=self) body = response.parse_body() # pylint: disable=maybe-no-member for node in body.findall( fixxpath(xpath='Upload', namespace=self.namespace)): initiator = node.find( fixxpath(xpath='Initiator', namespace=self.namespace)) owner = node.find( fixxpath(xpath='Owner', namespace=self.namespace)) key = finder(node, 'Key') upload_id = finder(node, 'UploadId') created_at = finder(node, 'Initiated') initiator = finder(initiator, 'DisplayName') owner = finder(owner, 'DisplayName') yield S3MultipartUpload(key, upload_id, created_at, initiator, owner) # Check if this is the last entry in the listing # pylint: disable=maybe-no-member is_truncated = body.findtext( fixxpath(xpath='IsTruncated', namespace=self.namespace)) if is_truncated.lower() == 'false': break # Provide params for the next request upload_marker = body.findtext( fixxpath(xpath='NextUploadIdMarker', namespace=self.namespace)) key_marker = body.findtext( fixxpath(xpath='NextKeyMarker', namespace=self.namespace)) params['key-marker'] = key_marker params['upload-id-marker'] = upload_marker
def _xml_to_object(self, container, blob): """ Converts a BLOB XML node to an object instance :param container: Instance of the container holding the blob :type: :class:`Container` :param blob: XML info of the blob :type blob: L{} :return: An object instance :rtype: :class:`Object` """ name = blob.findtext(fixxpath(xpath='Name')) props = blob.find(fixxpath(xpath='Properties')) metadata = blob.find(fixxpath(xpath='Metadata')) etag = props.findtext(fixxpath(xpath='Etag')) size = int(props.findtext(fixxpath(xpath='Content-Length'))) extra = { 'content_type': props.findtext(fixxpath(xpath='Content-Type')), 'etag': etag, 'md5_hash': props.findtext(fixxpath(xpath='Content-MD5')), 'last_modified': props.findtext(fixxpath(xpath='Last-Modified')), 'url': blob.findtext(fixxpath(xpath='Url')), 'hash': props.findtext(fixxpath(xpath='Etag')), 'lease': { 'status': props.findtext(fixxpath(xpath='LeaseStatus')), 'state': props.findtext(fixxpath(xpath='LeaseState')), 'duration': props.findtext(fixxpath(xpath='LeaseDuration')), }, 'content_encoding': props.findtext(fixxpath(xpath='Content-Encoding')), 'content_language': props.findtext(fixxpath(xpath='Content-Language')), 'blob_type': props.findtext(fixxpath(xpath='BlobType')) } if extra['md5_hash']: value = binascii.hexlify(base64.b64decode(b(extra['md5_hash']))) value = value.decode('ascii') extra['md5_hash'] = value meta_data = {} for meta in metadata.getchildren(): meta_data[meta.tag] = meta.text return Object(name=name, size=size, hash=etag, meta_data=meta_data, extra=extra, container=container, driver=self)
def _to_storage_policies(self, object): elements = object.findall(fixxpath('storagePolicy', BACKUP_NS)) return [self._to_storage_policy(el) for el in elements]
def _to_base_images(self, object): images = [] for element in object.findall(fixxpath("ServerImage", SERVER_NS)): images.append(self._to_base_image(element)) return images
def _to_targets(self, object): node_elements = object.findall(fixxpath("server", TYPES_URN)) return [self._to_target(el) for el in node_elements]