Ejemplo n.º 1
0
    def attach_volume(self, node, volume, device='disk',
                      ex_bus='virtio', ex_name=None):
        """
        params: bus, name , device (disk or lun)
        """
        # volume must be bound to a claim
        if not volume.extra['is_bound']:
            volume = self._bind_volume(volume, node.extra['namespace'])
            if volume is None:
                raise LibcloudError("Selected Volume (PV) could not be bound "
                                    "(to a PVC), please select another volume",
                                    driver=self)

        claimName = volume.extra['pvc']['name']
        if ex_name is None:
            name = claimName
        else:
            name = ex_name
        namespace = volume.extra['pvc']['namespace']
        # check if vm is stopped
        self.stop_node(node)
        # check if it is the same namespace
        if node.extra['namespace'] != namespace:
            msg = "The PVC and the VM must be in the same namespace"
            raise ValueError(msg)
        vm = node.name
        req = KUBEVIRT_URL + 'namespaces/' + namespace + '/virtualmachines/'\
            + vm
        disk_dict = {device: {'bus': ex_bus}, 'name': name}
        volumes_dict = {'persistentVolumeClaim': {'claimName': claimName},
                        'name': name}
        # Get all the volumes of the vm
        try:
            result = self.connection.request(req).object
        except Exception:
            raise
        disks = result['spec']['template']['spec']['domain'][
            'devices']['disks']
        volumes = result['spec']['template']['spec']['volumes']
        disks.append(disk_dict)
        volumes.append(volumes_dict)
        # now patch the new volumes and disks lists into the resource
        headers = {"Content-Type": "application/merge-patch+json"}
        data = {'spec': {
            'template': {
                'spec': {
                    'volumes': volumes,
                    'domain': {
                        'devices':
                        {'disks': disks}
                    }
                }
            }
        }
        }
        try:
            result = self.connection.request(req, method="PATCH",
                                             data=json.dumps(data),
                                             headers=headers)
            if 'pvcs' in node.extra:
                node.extra['pvcs'].append(claimName)
            else:
                node.extra['pvcs'] = [claimName]
            return result in VALID_RESPONSE_CODES
        except Exception:
            raise
Ejemplo n.º 2
0
    def ex_iterate_multipart_uploads(self, container, prefix=None,
                                     delimiter=None):
        """
        Extension method for listing all in-progress S3 multipart uploads.

        Each multipart upload which has not been committed or aborted is
        considered in-progress.

        :param container: The container holding the uploads
        :type container: :class:`Container`

        :keyword prefix: Print only uploads of objects with this prefix
        :type prefix: ``str``

        :keyword delimiter: The object/key names are grouped based on
            being split by this delimiter
        :type delimiter: ``str``

        :return: A generator of S3MultipartUpload instances.
        :rtype: ``generator`` of :class:`S3MultipartUpload`
        """

        if not self.supports_s3_multipart_upload:
            raise LibcloudError('Feature not supported', driver=self)

        # Get the data for a specific container
        request_path = '%s/?uploads' % (self._get_container_path(container))
        params = {'max-uploads': RESPONSES_PER_REQUEST}

        if prefix:
            params['prefix'] = prefix

        if delimiter:
            params['delimiter'] = delimiter

        finder = lambda node, text: node.findtext(fixxpath(xpath=text,
                                                  namespace=self.namespace))

        while True:
            response = self.connection.request(request_path, params=params)

            if response.status != httplib.OK:
                raise LibcloudError('Error fetching multipart uploads. '
                                    'Got code: %s' % (response.status),
                                    driver=self)

            body = response.parse_body()
            for node in body.findall(fixxpath(xpath='Upload',
                                              namespace=self.namespace)):

                initiator = node.find(fixxpath(xpath='Initiator',
                                               namespace=self.namespace))
                owner = node.find(fixxpath(xpath='Owner',
                                           namespace=self.namespace))

                key = finder(node, 'Key')
                upload_id = finder(node, 'UploadId')
                created_at = finder(node, 'Initiated')
                initiator = finder(initiator, 'DisplayName')
                owner = finder(owner, 'DisplayName')

                yield S3MultipartUpload(key, upload_id, created_at,
                                        initiator, owner)

            # Check if this is the last entry in the listing
            is_truncated = body.findtext(fixxpath(xpath='IsTruncated',
                                                  namespace=self.namespace))

            if is_truncated.lower() == 'false':
                break

            # Provide params for the next request
            upload_marker = body.findtext(fixxpath(xpath='NextUploadIdMarker',
                                                   namespace=self.namespace))
            key_marker = body.findtext(fixxpath(xpath='NextKeyMarker',
                                                namespace=self.namespace))

            params['key-marker'] = key_marker
            params['upload-id-marker'] = upload_marker
Ejemplo n.º 3
0
    def _save_object(self,
                     response,
                     obj,
                     destination_path,
                     overwrite_existing=False,
                     delete_on_failure=True,
                     chunk_size=None):
        """
        Save object to the provided path.

        @type response: C{RawResponse}
        @param response: RawResponse instance.

        @type obj: C{Object}
        @param obj: Object instance.

        @type destination_path: C{Str}
        @param destination_path: Destination directory.

        @type delete_on_failure: C{bool}
        @param delete_on_failure: True to delete partially downloaded object if
                                  the download fails.
        @type overwrite_existing: C{bool}
        @param overwrite_existing: True to overwrite a local path if it already
                                   exists.

        @type chunk_size: C{int}
        @param chunk_size: Optional chunk size (defaults to CHUNK_SIZE)

        @return C{bool} True on success, False otherwise.
        """

        chunk_size = chunk_size or CHUNK_SIZE

        base_name = os.path.basename(destination_path)

        if not base_name and not os.path.exists(destination_path):
            raise LibcloudError(value='Path %s does not exist' %
                                (destination_path),
                                driver=self)

        if not base_name:
            file_path = pjoin(destination_path, obj.name)
        else:
            file_path = destination_path

        if os.path.exists(file_path) and not overwrite_existing:
            raise LibcloudError(value='File %s already exists, but ' %
                                (file_path) + 'overwrite_existing=False',
                                driver=self)

        stream = utils.read_in_chunks(response, chunk_size)

        try:
            data_read = stream.next()
        except StopIteration:
            # Empty response?
            return False

        bytes_transferred = 0

        with open(file_path, 'wb') as file_handle:
            while len(data_read) > 0:
                file_handle.write(data_read)
                bytes_transferred += len(data_read)

                try:
                    data_read = stream.next()
                except StopIteration:
                    data_read = ''

        if obj.size != bytes_transferred:
            # Transfer failed, support retry?
            if delete_on_failure:
                try:
                    os.unlink(file_path)
                except Exception:
                    pass

            return False

        return True
Ejemplo n.º 4
0
    def _save_object(self,
                     response,
                     obj,
                     destination_path,
                     overwrite_existing=False,
                     delete_on_failure=True,
                     chunk_size=None):
        """
        Save object to the provided path.

        :param response: RawResponse instance.
        :type response: :class:`RawResponse`

        :param obj: Object instance.
        :type obj: :class:`Object`

        :param destination_path: Destination directory.
        :type destination_path: ``str``

        :param delete_on_failure: True to delete partially downloaded object if
                                  the download fails.
        :type delete_on_failure: ``bool``

        :param overwrite_existing: True to overwrite a local path if it already
                                   exists.
        :type overwrite_existing: ``bool``

        :param chunk_size: Optional chunk size
            (defaults to ``libcloud.storage.base.CHUNK_SIZE``, 8kb)
        :type chunk_size: ``int``

        :return: ``True`` on success, ``False`` otherwise.
        :rtype: ``bool``
        """

        chunk_size = chunk_size or CHUNK_SIZE

        base_name = os.path.basename(destination_path)

        if not base_name and not os.path.exists(destination_path):
            raise LibcloudError(value='Path %s does not exist' %
                                (destination_path),
                                driver=self)

        if not base_name:
            file_path = pjoin(destination_path, obj.name)
        else:
            file_path = destination_path

        if os.path.exists(file_path) and not overwrite_existing:
            raise LibcloudError(value='File %s already exists, but ' %
                                (file_path) + 'overwrite_existing=False',
                                driver=self)

        bytes_transferred = 0

        with open(file_path, 'wb') as file_handle:
            for chunk in response._response.iter_content(chunk_size):
                file_handle.write(b(chunk))
                bytes_transferred += len(chunk)

        if int(obj.size) != int(bytes_transferred):
            # Transfer failed, support retry?
            if delete_on_failure:
                try:
                    os.unlink(file_path)
                except Exception:
                    pass

            return False

        return True
Ejemplo n.º 5
0
    def _put_object(self,
                    container,
                    object_name,
                    stream,
                    extra=None,
                    verify_hash=True,
                    headers=None,
                    blob_size=None,
                    file_path=None,
                    use_lease=False):
        """
        Control function that does the real job of uploading data to a blob
        """
        extra = extra or {}
        content_type = extra.get('content_type', None)
        meta_data = extra.get('meta_data', {})

        object_path = self._get_object_path(container, object_name)

        # Get a lease if required and do the operations
        with AzureBlobLease(self, object_path, use_lease) as lease:
            if blob_size is not None and blob_size <= AZURE_UPLOAD_CHUNK_SIZE:
                result_dict = self._upload_directly(stream=stream,
                                                    object_path=object_path,
                                                    lease=lease,
                                                    blob_size=blob_size,
                                                    meta_data=meta_data,
                                                    headers=headers,
                                                    content_type=content_type,
                                                    object_name=object_name,
                                                    file_path=file_path)
            else:
                result_dict = self._upload_in_chunks(stream=stream,
                                                     object_path=object_path,
                                                     lease=lease,
                                                     meta_data=meta_data,
                                                     headers=headers,
                                                     content_type=content_type,
                                                     object_name=object_name,
                                                     file_path=file_path,
                                                     verify_hash=verify_hash)

            response = result_dict['response']
            bytes_transferred = result_dict['bytes_transferred']
            data_hash = result_dict['data_hash']
            headers = response.headers

        if response.status != httplib.CREATED:
            raise LibcloudError('Unexpected status code, status_code=%s' %
                                (response.status),
                                driver=self)

        server_hash = headers.get('content-md5')

        if server_hash:
            server_hash = binascii.hexlify(base64.b64decode(b(server_hash)))
            server_hash = server_hash.decode('utf-8')
        else:
            # TODO: HACK - We could poll the object for a while and get
            # the hash
            pass

        if (verify_hash and server_hash and data_hash != server_hash):
            raise ObjectHashMismatchError(
                value='MD5 hash checksum does not match',
                object_name=object_name,
                driver=self)

        return Object(name=object_name,
                      size=bytes_transferred,
                      hash=headers['etag'],
                      extra=None,
                      meta_data=meta_data,
                      container=container,
                      driver=self)
Ejemplo n.º 6
0
    def create_node(self,
                    name,
                    size,
                    image,
                    auth,
                    ex_resource_group,
                    ex_storage_account,
                    ex_blob_container="vhds",
                    location=None,
                    ex_user_name="azureuser",
                    ex_network=None,
                    ex_subnet=None,
                    ex_nic=None,
                    ex_tags={},
                    ex_customdata=""):
        """Create a new node instance. This instance will be started
        automatically.

        This driver supports the ``ssh_key`` feature flag for ``created_node``
        so you can upload a public key into the new instance::

            >>> from libcloud.compute.drivers.azure_arm import AzureNodeDriver
            >>> driver = AzureNodeDriver(...)
            >>> auth = NodeAuthSSHKey('pubkey data here')
            >>> node = driver.create_node("test_node", auth=auth)

        This driver also supports the ``password`` feature flag for
        ``create_node``
        so you can set a password::

            >>> driver = AzureNodeDriver(...)
            >>> auth = NodeAuthPassword('mysecretpassword')
            >>> node = driver.create_node("test_node", auth=auth, ...)

        If you don't provide the ``auth`` argument libcloud will assign
        a password::

            >>> driver = AzureNodeDriver(...)
            >>> node = driver.create_node("test_node", ...)
            >>> password = node.extra["properties"] \
                           ["osProfile"]["adminPassword"]

        :param name:   String with a name for this new node (required)
        :type name:   ``str``

        :param size:   The size of resources allocated to this node.
                            (required)
        :type size:   :class:`.NodeSize`

        :param image:  OS Image to boot on node. (required)
        :type image:  :class:`.AzureImage`

        :param location: Which data center to create a node in.
        (if None, use default location specified as 'region' in __init__)
        :type location: :class:`.NodeLocation`

        :param auth:   Initial authentication information for the node
                            (optional)
        :type auth:   :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword`

        :param ex_resource_group:  The resource group in which to create the
        node
        :type ex_resource_group: ``str``

        :param ex_storage_account:  The storage account id in which to store
        the node's disk image.
        Note: when booting from a user image (AzureVhdImage)
        the source image and the node image must use the same storage account.
        :type ex_storage_account: ``str``

        :param ex_blob_container:  The name of the blob container on the
        storage account in which to store the node's disk image (optional,
        default "vhds")
        :type ex_blob_container: ``str``

        :param ex_user_name:  User name for the initial admin user
        (optional, default "azureuser")
        :type ex_user_name: ``str``

        :param ex_network: The virtual network the node will be attached to.
        Must provide either `ex_network` (to create a default NIC for the
        node on the given network) or `ex_nic` (to supply the NIC explicitly).
        :type ex_network: ``str``

        :param ex_subnet: If ex_network is provided, the subnet of the
        virtual network the node will be attached to.  Optional, default
        is the "default" subnet.
        :type ex_subnet: ``str``

        :param ex_nic: A virtual NIC to attach to this Node, from
        `ex_create_network_interface` or `ex_get_nic`.
        Must provide either `ex_nic` (to supply the NIC explicitly) or
        ex_network (to create a default NIC for the node on the
        given network).
        :type ex_nic: :class:`AzureNic`

        :param ex_tags: Optional tags to associate with this node.
        :type ex_tags: ``dict``

        :param ex_customdata: Custom data that will
            be placed in the file /var/lib/waagent/CustomData
            https://azure.microsoft.com/en-us/documentation/ \
            articles/virtual-machines-how-to-inject-custom-data/
        :type ex_customdata: ``str``

        :return: The newly created node.
        :rtype: :class:`.Node`

        """

        if location is None:
            location = self.default_location
        if ex_nic is None:
            if ex_network is None:
                raise ValueError("Must provide either ex_network or ex_nic")
            if ex_subnet is None:
                ex_subnet = "default"

            subnet_id = "/subscriptions/%s/resourceGroups/%s/providers" \
                        "/Microsoft.Network/virtualnetworks/%s/subnets/%s" % \
                        (self.subscription_id, ex_resource_group,
                         ex_network, ex_subnet)
            subnet = AzureSubnet(subnet_id, ex_subnet, {})
            ex_nic = self.ex_create_network_interface(name + "-nic",
                                                      subnet,
                                                      ex_resource_group,
                                                      location)

        auth = self._get_and_check_auth(auth)

        target = "/subscriptions/%s/resourceGroups/%s/providers" \
                 "/Microsoft.Compute/virtualMachines/%s" % \
                 (self.subscription_id, ex_resource_group, name)

        instance_vhd = "https://%s.blob.core.windows.net" \
                       "/%s/%s-os.vhd" \
                       % (ex_storage_account,
                          ex_blob_container,
                          name
                          )

        if isinstance(image, AzureVhdImage):
            storageProfile = {
                "osDisk": {
                    "name": "virtualmachine-osDisk",
                    "osType": "linux",
                    "caching": "ReadWrite",
                    "createOption": "FromImage",
                    "image": {
                        "uri": image.id
                    },
                    "vhd": {
                        "uri": instance_vhd
                    }
                }
            }
        elif isinstance(image, AzureImage):
            storageProfile = {
                "imageReference": {
                    "publisher": image.publisher,
                    "offer": image.offer,
                    "sku": image.sku,
                    "version": image.version
                },
                "osDisk": {
                    "name": "virtualmachine-osDisk",
                    "vhd": {
                        "uri": instance_vhd
                    },
                    "caching": "ReadWrite",
                    "createOption": "FromImage"
                }
            }
        else:
            raise LibcloudError(
                "Unknown image type %s,"
                "expected one of AzureImage, AzureVhdImage",
                type(image))

        data = {
            "id": target,
            "name": name,
            "type": "Microsoft.Compute/virtualMachines",
            "location": location.id,
            "tags": ex_tags,
            "properties": {
                "hardwareProfile": {
                    "vmSize": size.id
                },
                "storageProfile": storageProfile,
                "osProfile": {
                    "computerName": name
                },
                "networkProfile": {
                    "networkInterfaces": [
                        {
                            "id": ex_nic.id
                        }
                    ]
                }
            }
        }

        if ex_customdata:
            data["properties"]["osProfile"]["customData"] = \
                base64.b64encode(ex_customdata)

        data["properties"]["osProfile"]["adminUsername"] = ex_user_name

        if isinstance(auth, NodeAuthSSHKey):
            data["properties"]["osProfile"]["adminPassword"] = \
                binascii.hexlify(os.urandom(20)).decode("utf-8")
            data["properties"]["osProfile"]["linuxConfiguration"] = {
                "disablePasswordAuthentication": "true",
                "ssh": {
                    "publicKeys": [
                        {
                            "path": '/home/%s/.ssh/authorized_keys' % (
                                ex_user_name),
                            "keyData": auth.pubkey
                        }
                    ]
                }
            }
        elif isinstance(auth, NodeAuthPassword):
            data["properties"]["osProfile"]["linuxConfiguration"] = {
                "disablePasswordAuthentication": "false"
            }
            data["properties"]["osProfile"]["adminPassword"] = auth.password
        else:
            raise ValueError(
                "Must provide NodeAuthSSHKey or NodeAuthPassword in auth")

        r = self.connection.request(target,
                                    params={"api-version": "2015-06-15"},
                                    data=data,
                                    method="PUT")

        while r.object is None:
            time.sleep(1)

        node = self._to_node(r.object)
        node.size = size
        node.image = image
        return node
Ejemplo n.º 7
0
    def ex_iterate_multipart_uploads(self,
                                     container,
                                     prefix=None,
                                     delimiter=None,
                                     max_uploads=MAX_UPLOADS_PER_RESPONSE):
        """
        Extension method for listing all in-progress OSS multipart uploads.

        Each multipart upload which has not been committed or aborted is
        considered in-progress.

        :param container: The container holding the uploads
        :type container: :class:`Container`

        :keyword prefix: Print only uploads of objects with this prefix
        :type prefix: ``str``

        :keyword delimiter: The object/key names are grouped based on
            being split by this delimiter
        :type delimiter: ``str``

        :keyword max_uploads: The max uplod items returned for one request
        :type max_uploads: ``int``

        :return: A generator of OSSMultipartUpload instances.
        :rtype: ``generator`` of :class:`OSSMultipartUpload`
        """

        if not self.supports_multipart_upload:
            raise LibcloudError('Feature not supported', driver=self)

        request_path = '/?uploads'
        params = {'max-uploads': max_uploads}

        if prefix:
            params['prefix'] = prefix

        if delimiter:
            params['delimiter'] = delimiter

        def finder(node, text):
            return node.findtext(fixxpath(xpath=text,
                                          namespace=self.namespace))

        while True:
            response = self.connection.request(request_path,
                                               params=params,
                                               container=container)

            if response.status != httplib.OK:
                raise LibcloudError('Error fetching multipart uploads. '
                                    'Got code: %s' % response.status,
                                    driver=self)

            body = response.parse_body()
            # pylint: disable=maybe-no-member
            for node in body.findall(
                    fixxpath(xpath='Upload', namespace=self.namespace)):

                key = finder(node, 'Key')
                upload_id = finder(node, 'UploadId')
                initiated = finder(node, 'Initiated')

                yield OSSMultipartUpload(key, upload_id, initiated)

            # Check if this is the last entry in the listing
            # pylint: disable=maybe-no-member
            is_truncated = body.findtext(
                fixxpath(xpath='IsTruncated', namespace=self.namespace))

            if is_truncated.lower() == 'false':
                break

            # Provide params for the next request
            upload_marker = body.findtext(
                fixxpath(xpath='NextUploadIdMarker', namespace=self.namespace))
            key_marker = body.findtext(
                fixxpath(xpath='NextKeyMarker', namespace=self.namespace))

            params['key-marker'] = key_marker
            params['upload-id-marker'] = upload_marker
Ejemplo n.º 8
0
    def _upload_object(self,
                       object_name,
                       content_type,
                       upload_func,
                       upload_func_kwargs,
                       request_path,
                       request_method='PUT',
                       headers=None,
                       file_path=None,
                       iterator=None):
        """
        Helper function for setting common request headers and calling the
        passed in callback which uploads an object.
        """
        headers = headers or {}

        if file_path and not os.path.exists(file_path):
            raise OSError('File %s does not exist' % (file_path))

        if iterator is not None and not hasattr(iterator, 'next'):
            raise AttributeError('iterator object must implement next() ' +
                                 'method.')

        if not content_type:
            if file_path:
                name = file_path
            else:
                name = object_name
            content_type, _ = utils.guess_file_mime_type(name)

            if not content_type:
                raise AttributeError(
                    'File content-type could not be guessed and' +
                    ' no content_type value provided')

        file_size = None

        if iterator:
            if self.supports_chunked_encoding:
                headers['Transfer-Encoding'] = 'chunked'
                upload_func_kwargs['chunked'] = True
            else:
                # Chunked transfer encoding is not supported. Need to buffer all
                # the data in memory so we can determine file size.
                iterator = utils.read_in_chunks(iterator=iterator)
                data = utils.exhaust_iterator(iterator=iterator)

                file_size = len(data)
                upload_func_kwargs['data'] = data
        else:
            file_size = os.path.getsize(file_path)
            upload_func_kwargs['chunked'] = False

        if file_size:
            headers['Content-Length'] = file_size

        headers['Content-Type'] = content_type
        response = self.connection.request(request_path,
                                           method=request_method,
                                           data=None,
                                           headers=headers,
                                           raw=True)

        upload_func_kwargs['response'] = response
        success, data_hash, bytes_transferred = upload_func(
            **upload_func_kwargs)

        if not success:
            raise LibcloudError(
                value='Object upload failed, Perhaps a timeout?', driver=self)

        result_dict = {
            'response': response,
            'data_hash': data_hash,
            'bytes_transferred': bytes_transferred
        }
        return result_dict
Ejemplo n.º 9
0
    def _put_object(self,
                    container,
                    object_name,
                    upload_func,
                    upload_func_kwargs,
                    method='PUT',
                    query_args=None,
                    extra=None,
                    file_path=None,
                    iterator=None,
                    verify_hash=False):
        """
        Create an object and upload data using the given function.
        """
        headers = {}
        extra = extra or {}

        content_type = extra.get('content_type', None)
        meta_data = extra.get('meta_data', None)
        acl = extra.get('acl', None)

        if meta_data:
            for key, value in list(meta_data.items()):
                key = self.http_vendor_prefix + 'meta-%s' % (key)
                headers[key] = value

        if acl:
            if acl not in ['public-read', 'private', 'public-read-write']:
                raise AttributeError('invalid acl value: %s' % acl)
            headers[self.http_vendor_prefix + 'object-acl'] = acl

        request_path = self._get_object_path(container, object_name)

        if query_args:
            request_path = '?'.join((request_path, query_args))

        # TODO: Let the underlying exceptions bubble up and capture the SIGPIPE
        # here.
        # SIGPIPE is thrown if the provided container does not exist or the
        # user does not have correct permission
        result_dict = self._upload_object(
            object_name=object_name,
            content_type=content_type,
            upload_func=upload_func,
            upload_func_kwargs=upload_func_kwargs,
            request_path=request_path,
            request_method=method,
            headers=headers,
            file_path=file_path,
            iterator=iterator,
            container=container)

        response = result_dict['response']
        bytes_transferred = result_dict['bytes_transferred']
        headers = response.headers
        response = response.response
        server_hash = headers['etag'].replace('"', '')

        if (verify_hash and result_dict['data_hash'].upper() != server_hash):
            raise ObjectHashMismatchError(
                value='MD5 hash checksum does not match',
                object_name=object_name,
                driver=self)
        elif response.status == httplib.OK:
            obj = Object(name=object_name,
                         size=bytes_transferred,
                         hash=server_hash,
                         extra={'acl': acl},
                         meta_data=meta_data,
                         container=container,
                         driver=self)

            return obj
        else:
            raise LibcloudError('Unexpected status code, status_code=%s' %
                                (response.status),
                                driver=self)
Ejemplo n.º 10
0
    def _upload_in_chunks(self,
                          response,
                          data,
                          iterator,
                          object_path,
                          blob_type,
                          lease,
                          calculate_hash=True):
        """
        Uploads data from an interator in fixed sized chunks to S3

        :param response: Response object from the initial POST request
        :type response: :class:`RawResponse`

        :param data: Any data from the initial POST request
        :type data: ``str``

        :param iterator: The generator for fetching the upload data
        :type iterator: ``generator``

        :param object_path: The path of the object to which we are uploading
        :type object_name: ``str``

        :param blob_type: The blob type being uploaded
        :type blob_type: ``str``

        :param lease: The lease object to be used for renewal
        :type lease: :class:`AzureBlobLease`

        :keyword calculate_hash: Indicates if we must calculate the data hash
        :type calculate_hash: ``bool``

        :return: A tuple of (status, checksum, bytes transferred)
        :rtype: ``tuple``
        """

        # Get the upload id from the response xml
        if response.status != httplib.CREATED:
            raise LibcloudError('Error initializing upload. Code: %d' %
                                (response.status),
                                driver=self)

        data_hash = None
        if calculate_hash:
            data_hash = self._get_hash_function()

        bytes_transferred = 0
        count = 1
        chunks = []
        headers = {}

        lease.update_headers(headers)

        if blob_type == 'BlockBlob':
            params = {'comp': 'block'}
        else:
            params = {'comp': 'page'}

        # Read the input data in chunk sizes suitable for AWS
        for data in read_in_chunks(iterator, AZURE_CHUNK_SIZE):
            data = b(data)
            content_length = len(data)
            offset = bytes_transferred
            bytes_transferred += content_length

            if calculate_hash:
                data_hash.update(data)

            chunk_hash = self._get_hash_function()
            chunk_hash.update(data)
            chunk_hash = base64.b64encode(b(chunk_hash.digest()))

            headers['Content-MD5'] = chunk_hash.decode('utf-8')
            headers['Content-Length'] = str(content_length)

            if blob_type == 'BlockBlob':
                # Block id can be any unique string that is base64 encoded
                # A 10 digit number can hold the max value of 50000 blocks
                # that are allowed for azure
                block_id = base64.b64encode(b('%10d' % (count)))
                block_id = block_id.decode('utf-8')
                params['blockid'] = block_id

                # Keep this data for a later commit
                chunks.append(block_id)
            else:
                headers['x-ms-page-write'] = 'update'
                headers['x-ms-range'] = 'bytes=%d-%d' % \
                    (offset, (bytes_transferred - 1))

            # Renew lease before updating
            lease.renew()

            resp = self.connection.request(object_path,
                                           method='PUT',
                                           data=data,
                                           headers=headers,
                                           params=params)

            if resp.status != httplib.CREATED:
                resp.parse_error()
                raise LibcloudError('Error uploading chunk %d. Code: %d' %
                                    (count, resp.status),
                                    driver=self)

            count += 1

        if calculate_hash:
            data_hash = data_hash.hexdigest()

        if blob_type == 'BlockBlob':
            self._commit_blocks(object_path, chunks, lease)

        # The Azure service does not return a hash immediately for
        # chunked uploads. It takes some time for the data to get synced
        response.headers['content-md5'] = None

        return (True, data_hash, bytes_transferred)
Ejemplo n.º 11
0
 def _vcl_request(self, method, *args):
     res = self.connection.request(method, *args)
     if (res['status'] == 'error'):
         raise LibcloudError(res['errormsg'], driver=self)
     return res
Ejemplo n.º 12
0
    def wait_until_running(self, nodes, wait_period=3, timeout=600,
                           ssh_interface='public_ips', force_ipv4=True):
        """
        Block until the given nodes are fully booted and have an IP address
        assigned.

        :param nodes: list of node instances.
        :type nodes: ``List`` of :class:`.Node`

        :param wait_period: How many seconds to between each loop
                                 iteration (default is 3)
        :type wait_period: ``int``

        :param timeout: How many seconds to wait before timing out
                             (default is 600)
        :type timeout: ``int``

        :param ssh_interface: The interface to wait for.
                                   Default is 'public_ips', other option is
                                   'private_ips'.
        :type ssh_interface: ``str``

        :param force_ipv4: Ignore ipv6 IP addresses (default is True).
        :type force_ipv4: ``bool``

        :return: ``[(Node, ip_addresses)]`` list of tuple of Node instance and
                 list of ip_address on success.
        :rtype: ``list`` of ``tuple``
        """
        def is_supported(address):
            """Return True for supported address"""
            if force_ipv4 and not is_valid_ip_address(address=address,
                                                      family=socket.AF_INET):
                return False
            return True

        def filter_addresses(addresses):
            """Return list of supported addresses"""
            return [a for a in addresses if is_supported(a)]

        start = time.time()
        end = start + timeout

        if ssh_interface not in ['public_ips', 'private_ips']:
            raise ValueError('ssh_interface argument must either be' +
                             'public_ips or private_ips')

        uuids = set([n.uuid for n in nodes])
        while time.time() < end:
            nodes = self.list_nodes()
            nodes = list([n for n in nodes if n.uuid in uuids])

            if len(nodes) > len(uuids):
                found_uuids = [n.uuid for n in nodes]
                msg = ('Unable to match specified uuids ' +
                       '(%s) with existing nodes. Found ' % (uuids) +
                       'multiple nodes with same uuid: (%s)' % (found_uuids))
                raise LibcloudError(value=msg, driver=self)

            running_nodes = [n for n in nodes if n.state == NodeState.RUNNING]
            addresses = [filter_addresses(getattr(n, ssh_interface)) for n in
                         running_nodes]
            if len(running_nodes) == len(uuids) == len(addresses):
                return list(zip(running_nodes, addresses))
            else:
                time.sleep(wait_period)
                continue

        raise LibcloudError(value='Timed out after %s seconds' % (timeout),
                            driver=self)
Ejemplo n.º 13
0
 def get_balancer(self, balancer_id):
     balancers = self.list_balancers(ex_balancer_ids=[balancer_id])
     if len(balancers) != 1:
         raise LibcloudError('could not find load balancer with id %s' %
                             balancer_id)
     return balancers[0]
Ejemplo n.º 14
0
    def _put_object(self, container, object_name, upload_func,
                    upload_func_kwargs, method='PUT', query_args=None,
                    extra=None, file_path=None, iterator=None,
                    verify_hash=True, storage_class=None):
        headers = {}
        extra = extra or {}
        storage_class = storage_class or 'standard'
        if storage_class not in ['standard', 'reduced_redundancy']:
            raise ValueError(
                'Invalid storage class value: %s' % (storage_class))

        headers['x-amz-storage-class'] = storage_class.upper()

        content_type = extra.get('content_type', None)
        meta_data = extra.get('meta_data', None)
        acl = extra.get('acl', None)

        if meta_data:
            for key, value in list(meta_data.items()):
                key = 'x-amz-meta-%s' % (key)
                headers[key] = value

        if acl:
            headers['x-amz-acl'] = acl

        request_path = self._get_object_path(container, object_name)

        if query_args:
            request_path = '?'.join((request_path, query_args))

        # TODO: Let the underlying exceptions bubble up and capture the SIGPIPE
        # here.
        # SIGPIPE is thrown if the provided container does not exist or the
        # user does not have correct permission
        result_dict = self._upload_object(
            object_name=object_name, content_type=content_type,
            upload_func=upload_func, upload_func_kwargs=upload_func_kwargs,
            request_path=request_path, request_method=method,
            headers=headers, file_path=file_path, iterator=iterator)

        response = result_dict['response']
        bytes_transferred = result_dict['bytes_transferred']
        headers = response.headers
        response = response.response
        server_hash = headers['etag'].replace('"', '')

        if (verify_hash and result_dict['data_hash'] != server_hash):
            raise ObjectHashMismatchError(
                value='MD5 hash checksum does not match',
                object_name=object_name, driver=self)
        elif response.status == httplib.OK:
            obj = Object(
                name=object_name, size=bytes_transferred, hash=server_hash,
                extra={'acl': acl}, meta_data=meta_data, container=container,
                driver=self)

            return obj
        else:
            raise LibcloudError(
                'Unexpected status code, status_code=%s' % (response.status),
                driver=self)
Ejemplo n.º 15
0
    def _put_object(self,
                    container,
                    object_name,
                    method='PUT',
                    query_args=None,
                    extra=None,
                    file_path=None,
                    stream=None,
                    verify_hash=False):
        """
        Create an object and upload data using the given function.
        """
        headers = {}
        extra = extra or {}

        content_type = extra.get('content_type', None)
        meta_data = extra.get('meta_data', None)
        acl = extra.get('acl', None)

        if meta_data:
            for key, value in list(meta_data.items()):
                key = self.http_vendor_prefix + 'meta-%s' % (key)
                headers[key] = value

        if acl:
            if acl not in ['public-read', 'private', 'public-read-write']:
                raise AttributeError('invalid acl value: %s' % acl)
            headers[self.http_vendor_prefix + 'object-acl'] = acl

        request_path = self._get_object_path(container, object_name)

        if query_args:
            request_path = '?'.join((request_path, query_args))

        result_dict = self._upload_object(object_name=object_name,
                                          content_type=content_type,
                                          request_path=request_path,
                                          request_method=method,
                                          headers=headers,
                                          file_path=file_path,
                                          stream=stream)

        response = result_dict['response']
        bytes_transferred = result_dict['bytes_transferred']
        headers = response.headers

        server_hash = headers['etag'].replace('"', '')

        if (verify_hash and result_dict['data_hash'] != server_hash):
            raise ObjectHashMismatchError(
                value='MD5 hash {0} checksum does not match {1}'.format(
                    server_hash, result_dict['data_hash']),
                object_name=object_name,
                driver=self)
        elif response.status == httplib.OK:
            obj = Object(name=object_name,
                         size=bytes_transferred,
                         hash=server_hash,
                         extra={'acl': acl},
                         meta_data=meta_data,
                         container=container,
                         driver=self)

            return obj
        else:
            raise LibcloudError('Unexpected status code, status_code=%s' %
                                (response.status),
                                driver=self)
Ejemplo n.º 16
0
    def async_request(self, action, params=None, data=None, headers=None,
                      method='GET', context=None):
        """
        Perform an 'async' request to the specified path. Keep in mind that
        this function is *blocking* and 'async' in this case means that the
        hit URL only returns a job ID which is the periodically polled until
        the job has completed.

        This function works like this:

        - Perform a request to the specified path. Response should contain a
          'job_id'.

        - Returned 'job_id' is then used to construct a URL which is used for
          retrieving job status. Constructed URL is then periodically polled
          until the response indicates that the job has completed or the
          timeout of 'self.timeout' seconds has been reached.

        :type action: ``str``
        :param action: A path

        :type params: ``dict``
        :param params: Optional mapping of additional parameters to send. If
            None, leave as an empty ``dict``.

        :type data: ``unicode``
        :param data: A body of data to send with the request.

        :type headers: ``dict``
        :param headers: Extra headers to add to the request
            None, leave as an empty ``dict``.

        :type method: ``str``
        :param method: An HTTP method such as "GET" or "POST".

        :type context: ``dict``
        :param context: Context dictionary which is passed to the functions
                        which construct initial and poll URL.

        :return: An :class:`Response` instance.
        :rtype: :class:`Response` instance
        """

        request = getattr(self, self.request_method)
        kwargs = self.get_request_kwargs(action=action, params=params,
                                         data=data, headers=headers,
                                         method=method,
                                         context=context)
        response = request(**kwargs)
        kwargs = self.get_poll_request_kwargs(response=response,
                                              context=context,
                                              request_kwargs=kwargs)

        end = time.time() + self.timeout
        completed = False
        while time.time() < end and not completed:
            response = request(**kwargs)
            completed = self.has_completed(response=response)
            if not completed:
                time.sleep(self.poll_interval)

        if not completed:
            raise LibcloudError('Job did not complete in %s seconds' %
                                (self.timeout))

        return response
Ejemplo n.º 17
0
    def wait_until_running(self,
                           nodes,
                           wait_period=3,
                           timeout=600,
                           ssh_interface='public_ips',
                           force_ipv4=True):
        """
        Block until the provided nodes are considered running.

        Node is considered running when it's state is "running" and when it has
        at least one IP address assigned.

        :param nodes: List of nodes to wait for.
        :type nodes: ``list`` of :class:`.Node`

        :param wait_period: How many seconds to wait between each loop
                            iteration. (default is 3)
        :type wait_period: ``int``

        :param timeout: How many seconds to wait before giving up.
                        (default is 600)
        :type timeout: ``int``

        :param ssh_interface: Which attribute on the node to use to obtain
                              an IP address. Valid options: public_ips,
                              private_ips. Default is public_ips.
        :type ssh_interface: ``str``

        :param force_ipv4: Ignore IPv6 addresses (default is True).
        :type force_ipv4: ``bool``

        :return: ``[(Node, ip_addresses)]`` list of tuple of Node instance and
                 list of ip_address on success.
        :rtype: ``list`` of ``tuple``
        """
        def is_supported(address):
            """
            Return True for supported address.
            """
            if force_ipv4 and not is_valid_ip_address(address=address,
                                                      family=socket.AF_INET):
                return False
            return True

        def filter_addresses(addresses):
            """
            Return list of supported addresses.
            """
            return [address for address in addresses if is_supported(address)]

        if ssh_interface not in ['public_ips', 'private_ips']:
            raise ValueError('ssh_interface argument must either be' +
                             'public_ips or private_ips')

        start = time.time()
        end = start + timeout

        uuids = set([node.uuid for node in nodes])

        while time.time() < end:
            all_nodes = self.list_nodes()
            matching_nodes = list(
                [node for node in all_nodes if node.uuid in uuids])

            if len(matching_nodes) > len(uuids):
                found_uuids = [node.uuid for node in matching_nodes]
                msg = ('Unable to match specified uuids ' +
                       '(%s) with existing nodes. Found ' % (uuids) +
                       'multiple nodes with same uuid: (%s)' % (found_uuids))
                raise LibcloudError(value=msg, driver=self)

            running_nodes = [
                node for node in matching_nodes
                if node.state == NodeState.RUNNING
            ]
            addresses = [
                filter_addresses(getattr(node, ssh_interface))
                for node in running_nodes
            ]

            if len(running_nodes) == len(uuids) == len(addresses):
                return list(zip(running_nodes, addresses))
            else:
                time.sleep(wait_period)
                continue

        raise LibcloudError(value='Timed out after %s seconds' % (timeout),
                            driver=self)
Ejemplo n.º 18
0
 def _get_first_ip(self, location=None):
     ips = self.ex_list_ips(public=True, assigned=False, location=location)
     try:
         return ips[0].ip
     except IndexError:
         raise LibcloudError('No public unassigned IPs left', self.driver)
Ejemplo n.º 19
0
 def parse_error(self):
     if self.status in [httplib.UNAUTHORIZED]:
         raise InvalidCredsError(self.body)
     raise LibcloudError('Unknown error. Status code: %d' % (self.status),
                         driver=self.connection.driver)
Ejemplo n.º 20
0
    def _upload_in_chunks(self, stream, object_path, lease, meta_data,
                          content_type, object_name, file_path, verify_hash,
                          headers):
        """
        Uploads data from an interator in fixed sized chunks to Azure Storage
        """

        data_hash = None
        if verify_hash:
            data_hash = self._get_hash_function()

        bytes_transferred = 0
        count = 1
        chunks = []
        headers = headers or {}

        lease.update_headers(headers)

        params = {'comp': 'block'}

        # Read the input data in chunk sizes suitable for Azure
        for data in read_in_chunks(stream,
                                   AZURE_UPLOAD_CHUNK_SIZE,
                                   fill_size=True):
            data = b(data)
            content_length = len(data)
            bytes_transferred += content_length

            if verify_hash:
                data_hash.update(data)

            chunk_hash = self._get_hash_function()
            chunk_hash.update(data)
            chunk_hash = base64.b64encode(b(chunk_hash.digest()))

            headers['Content-MD5'] = chunk_hash.decode('utf-8')
            headers['Content-Length'] = str(content_length)

            # Block id can be any unique string that is base64 encoded
            # A 10 digit number can hold the max value of 50000 blocks
            # that are allowed for azure
            block_id = base64.b64encode(b('%10d' % (count)))
            block_id = block_id.decode('utf-8')
            params['blockid'] = block_id

            # Keep this data for a later commit
            chunks.append(block_id)

            # Renew lease before updating
            lease.renew()

            resp = self.connection.request(object_path,
                                           method='PUT',
                                           data=data,
                                           headers=headers,
                                           params=params)

            if resp.status != httplib.CREATED:
                resp.parse_error()
                raise LibcloudError('Error uploading chunk %d. Code: %d' %
                                    (count, resp.status),
                                    driver=self)

            count += 1

        if verify_hash:
            data_hash = base64.b64encode(b(data_hash.digest()))
            data_hash = data_hash.decode('utf-8')

        response = self._commit_blocks(object_path=object_path,
                                       chunks=chunks,
                                       lease=lease,
                                       meta_data=meta_data,
                                       content_type=content_type,
                                       data_hash=data_hash,
                                       object_name=object_name,
                                       file_path=file_path)

        # According to the Azure docs:
        # > This header refers to the content of the request, meaning, in this
        # > case, the list of blocks, and not the content of the blob itself.
        # However, the validation code assumes that the content-md5 in the
        # server response refers to the object so we must discard the value
        response.headers['content-md5'] = None

        return {
            'response': response,
            'data_hash': data_hash,
            'bytes_transferred': bytes_transferred,
        }
Ejemplo n.º 21
0
    def _put_object(self,
                    container,
                    object_name,
                    method='PUT',
                    query_args=None,
                    extra=None,
                    file_path=None,
                    stream=None,
                    verify_hash=True,
                    storage_class=None):
        headers = {}
        extra = extra or {}

        headers.update(self._to_storage_class_headers(storage_class))

        content_type = extra.get('content_type', None)
        meta_data = extra.get('meta_data', None)
        acl = extra.get('acl', None)

        if meta_data:
            for key, value in list(meta_data.items()):
                key = self.http_vendor_prefix + '-meta-%s' % (key)
                headers[key] = value

        if acl:
            headers[self.http_vendor_prefix + '-acl'] = acl

        request_path = self._get_object_path(container, object_name)

        if query_args:
            request_path = '?'.join((request_path, query_args))

        result_dict = self._upload_object(object_name=object_name,
                                          content_type=content_type,
                                          request_path=request_path,
                                          request_method=method,
                                          headers=headers,
                                          file_path=file_path,
                                          stream=stream)

        response = result_dict['response']
        bytes_transferred = result_dict['bytes_transferred']
        headers = response.headers
        response = response
        server_hash = headers.get('etag', '').replace('"', '')

        if (verify_hash and result_dict['data_hash'] != server_hash):
            raise ObjectHashMismatchError(
                value='MD5 hash {0} checksum does not match {1}'.format(
                    server_hash, result_dict['data_hash']),
                object_name=object_name,
                driver=self)
        elif response.status == httplib.OK:
            obj = Object(name=object_name,
                         size=bytes_transferred,
                         hash=server_hash,
                         extra={'acl': acl},
                         meta_data=meta_data,
                         container=container,
                         driver=self)

            return obj
        else:
            raise LibcloudError('Unexpected status code, status_code=%s' %
                                (response.status),
                                driver=self)
Ejemplo n.º 22
0
    def _put_object(self,
                    container,
                    object_name,
                    upload_func,
                    upload_func_kwargs,
                    extra=None,
                    file_path=None,
                    iterator=None,
                    verify_hash=True):
        extra = extra or {}
        container_name_cleaned = self._clean_container_name(container.name)
        object_name_cleaned = self._clean_object_name(object_name)
        content_type = extra.get('content_type', None)
        meta_data = extra.get('meta_data', None)

        headers = {}
        if meta_data:
            for key, value in list(meta_data.items()):
                key = 'X-Object-Meta-%s' % (key)
                headers[key] = value

        request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned)
        result_dict = self._upload_object(
            object_name=object_name,
            content_type=content_type,
            upload_func=upload_func,
            upload_func_kwargs=upload_func_kwargs,
            request_path=request_path,
            request_method='PUT',
            headers=headers,
            file_path=file_path,
            iterator=iterator)

        response = result_dict['response'].response
        bytes_transferred = result_dict['bytes_transferred']
        server_hash = result_dict['response'].headers.get('etag', None)

        if response.status == httplib.EXPECTATION_FAILED:
            raise LibcloudError(value='Missing content-type header',
                                driver=self)
        elif verify_hash and not server_hash:
            raise LibcloudError(value='Server didn\'t return etag',
                                driver=self)
        elif (verify_hash and result_dict['data_hash'] != server_hash):
            raise ObjectHashMismatchError(
                value=('MD5 hash checksum does not match (expected=%s, ' +
                       'actual=%s)') % (result_dict['data_hash'], server_hash),
                object_name=object_name,
                driver=self)
        elif response.status == httplib.CREATED:
            obj = Object(name=object_name,
                         size=bytes_transferred,
                         hash=server_hash,
                         extra=None,
                         meta_data=meta_data,
                         container=container,
                         driver=self)

            return obj
        else:
            # @TODO: Add test case for this condition (probably 411)
            raise LibcloudError('status_code=%s' % (response.status),
                                driver=self)
Ejemplo n.º 23
0
    def ex_configure_load_balancer(self, balancer, port=80,
                                   protocol='http',
                                   algorithm=DEFAULT_ALGORITHM,
                                   ex_allocation=100):
        """
        Configure the loadbalancer by adding it with a front-end port (aka
        a service group in the Softlayer loadbalancer model).

        Softlayer loadbalancer may be defined with multiple service
        groups (front-end ports) each defined with a unique port number.

        :param balancer: The loadbalancer.
        :type  balancer: :class:`LoadBalancer`

        :param port: Port of the service group, defaults to 80.
        :type  port: ``int``

        :param protocol: Loadbalancer protocol, defaults to http.
        :type  protocol: ``str``

        :param algorithm: Load balancing algorithm, defaults to
                            Algorithm.ROUND_ROBIN
        :type  algorithm: :class:`Algorithm`

        :param ex_allocation: The percentage of the total connection
                              allocations to allocate for this group.
        :type  ex_allocation: ``int``

        :return: ``True`` if ex_add_service_group was successful.
        :rtype: ``bool``
        """
        _types = self._get_routing_types()
        _methods = self._get_routing_methods()

        rt = find(_types, lambda t: t['keyname'] == protocol.upper())
        if not rt:
            raise LibcloudError(value='Invalid protocol %s' % protocol,
                                driver=self)

        value = self._algorithm_to_value(algorithm)
        meth = find(_methods, lambda m: m['keyname'] == value)
        if not meth:
            raise LibcloudError(value='Invalid algorithm %s' % algorithm,
                                driver=self)

        service_group_template = {
            'port': port,
            'allocation': ex_allocation,
            'serviceGroups': [{
                'routingTypeId': rt['id'],
                'routingMethodId': meth['id']
            }]
        }

        lb = self._get_balancer_model(balancer.id)
        if len(lb['virtualServers']) > 0:
            port = lb['virtualServers'][0]['port']
            raise LibcloudError(value='Loadbalancer already configured with '
                                'a service group (front-end port)' % port,
                                driver=self)

        lb['virtualServers'].append(service_group_template)
        self.connection.request(lb_service, 'editObject', lb, id=balancer.id)
        return True
Ejemplo n.º 24
0
    def _put_object(
        self,
        container,
        object_name,
        extra=None,
        file_path=None,
        stream=None,
        verify_hash=True,
        headers=None,
    ):
        extra = extra or {}
        container_name_encoded = self._encode_container_name(container.name)
        object_name_encoded = self._encode_object_name(object_name)
        content_type = extra.get("content_type", None)
        meta_data = extra.get("meta_data", None)
        content_disposition = extra.get("content_disposition", None)

        headers = headers or {}
        if meta_data:
            for key, value in list(meta_data.items()):
                key = "X-Object-Meta-%s" % (key)
                headers[key] = value

        if content_disposition is not None:
            headers["Content-Disposition"] = content_disposition

        request_path = "/%s/%s" % (container_name_encoded, object_name_encoded)
        result_dict = self._upload_object(
            object_name=object_name,
            content_type=content_type,
            request_path=request_path,
            request_method="PUT",
            headers=headers,
            file_path=file_path,
            stream=stream,
        )

        response = result_dict["response"]
        bytes_transferred = result_dict["bytes_transferred"]
        server_hash = result_dict["response"].headers.get("etag", None)

        if response.status == httplib.EXPECTATION_FAILED:
            raise LibcloudError(value="Missing content-type header",
                                driver=self)
        elif verify_hash and not server_hash:
            raise LibcloudError(value="Server didn't return etag", driver=self)
        elif verify_hash and result_dict["data_hash"] != server_hash:
            raise ObjectHashMismatchError(
                value=("MD5 hash checksum does not match (expected=%s, " +
                       "actual=%s)") % (result_dict["data_hash"], server_hash),
                object_name=object_name,
                driver=self,
            )
        elif response.status == httplib.CREATED:
            obj = Object(
                name=object_name,
                size=bytes_transferred,
                hash=server_hash,
                extra=None,
                meta_data=meta_data,
                container=container,
                driver=self,
            )

            return obj
        else:
            # @TODO: Add test case for this condition (probably 411)
            raise LibcloudError("status_code=%s" % (response.status),
                                driver=self)
Ejemplo n.º 25
0
    def _upload_from_iterator(self, iterator, object_path, upload_id,
                              calculate_hash=True):
        """
        Uploads data from an interator in fixed sized chunks to S3

        :param iterator: The generator for fetching the upload data
        :type iterator: ``generator``

        :param object_path: The path of the object to which we are uploading
        :type object_name: ``str``

        :param upload_id: The upload id allocated for this multipart upload
        :type upload_id: ``str``

        :keyword calculate_hash: Indicates if we must calculate the data hash
        :type calculate_hash: ``bool``

        :return: A tuple of (chunk info, checksum, bytes transferred)
        :rtype: ``tuple``
        """

        data_hash = None
        if calculate_hash:
            data_hash = self._get_hash_function()

        bytes_transferred = 0
        count = 1
        chunks = []
        params = {'uploadId': upload_id}

        # Read the input data in chunk sizes suitable for AWS
        for data in read_in_chunks(iterator, chunk_size=CHUNK_SIZE,
                                   fill_size=True, yield_empty=True):
            bytes_transferred += len(data)

            if calculate_hash:
                data_hash.update(data)

            chunk_hash = self._get_hash_function()
            chunk_hash.update(data)
            chunk_hash = base64.b64encode(chunk_hash.digest()).decode('utf-8')

            # This provides an extra level of data check and is recommended
            # by amazon
            headers = {'Content-MD5': chunk_hash}
            params['partNumber'] = count

            request_path = '?'.join((object_path, urlencode(params)))

            resp = self.connection.request(request_path, method='PUT',
                                           data=data, headers=headers)

            if resp.status != httplib.OK:
                raise LibcloudError('Error uploading chunk', driver=self)

            server_hash = resp.headers['etag']

            # Keep this data for a later commit
            chunks.append((count, server_hash))
            count += 1

        if calculate_hash:
            data_hash = data_hash.hexdigest()

        return (chunks, data_hash, bytes_transferred)
Ejemplo n.º 26
0
 def __enter__(self):
     try:
         self.lock.acquire(timeout=0.1)
     except LockTimeout:
         raise LibcloudError('Lock timeout')
Ejemplo n.º 27
0
    def _put_object(self,
                    container,
                    object_name,
                    object_size,
                    upload_func,
                    upload_func_kwargs,
                    file_path=None,
                    extra=None,
                    verify_hash=True,
                    blob_type=None,
                    use_lease=False):
        """
        Control function that does the real job of uploading data to a blob
        """
        extra = extra or {}
        meta_data = extra.get('meta_data', {})
        content_type = extra.get('content_type', None)

        headers = self._prepare_upload_headers(object_name, object_size, extra,
                                               meta_data, blob_type)

        object_path = self._get_object_path(container, object_name)

        # Get a lease if required and do the operations
        with AzureBlobLease(self, object_path, use_lease) as lease:
            if 'lease' in upload_func_kwargs:
                upload_func_kwargs['lease'] = lease

            lease.update_headers(headers)

            iterator = iter('')
            result_dict = self._upload_object(object_name,
                                              content_type,
                                              upload_func,
                                              upload_func_kwargs,
                                              object_path,
                                              headers=headers,
                                              file_path=file_path,
                                              iterator=iterator)

            response = result_dict['response']
            bytes_transferred = result_dict['bytes_transferred']
            data_hash = result_dict['data_hash']
            headers = response.headers
            response = response.response

        if response.status != httplib.CREATED:
            raise LibcloudError('Unexpected status code, status_code=%s' %
                                (response.status),
                                driver=self)

        server_hash = headers['content-md5']

        if server_hash:
            server_hash = binascii.hexlify(base64.b64decode(b(server_hash)))
            server_hash = server_hash.decode('utf-8')
        else:
            # TODO: HACK - We could poll the object for a while and get
            # the hash
            pass

        if (verify_hash and server_hash and data_hash != server_hash):
            raise ObjectHashMismatchError(
                value='MD5 hash checksum does not match',
                object_name=object_name,
                driver=self)

        return Object(name=object_name,
                      size=bytes_transferred,
                      hash=headers['etag'],
                      extra=None,
                      meta_data=meta_data,
                      container=container,
                      driver=self)