コード例 #1
0
def store(image, entity, entity_id):
    blob_service = BlobService(account_name='shnergledata',
                               account_key=os.environ['BLOB_KEY'])
    myblob = image.read()
    name = '/' + entity + '/' + entity_id
    blob_service.put_blob('images', name, myblob, x_ms_blob_type='BlockBlob')
    return True
コード例 #2
0
ファイル: test_urls.py プロジェクト: KuduApps/PythonApp
def test_azure_call(request):
    import os
    try:
        from azure.storage import BlobService
        bs = BlobService(os.environ["AZURE_STORAGE_ACCOUNT"], os.environ["AZURE_STORAGE_ACCESS_KEY"])
        import random
        container_name = hex(int(random.random() * 1000000000))

        bs.create_container(container_name)
        bs.put_blob(container_name, 'testblob', 'hello world\n', 'BlockBlob')
        blob = bs.get_blob(container_name, 'testblob')
        if blob != 'hello world\n':
            return HttpResponse("Failed!", status = '404')
        
        bs.delete_blob(container_name, 'testblob')
        bs.delete_container(container_name)

        return HttpResponse("Succeeded!")
    except:
        try:
            import traceback
        
            return HttpResponse(traceback.format_exc() + str(os.environ.keys()))
        except:
            import traceback
            return HttpResponse(traceback.format_exc())
コード例 #3
0
ファイル: wabs_util.py プロジェクト: runway20/wal-e
def uri_put_file(access_key, secret_key, uri, fp, content_encoding=None):
    assert fp.tell() == 0
    data = fp.read()

    assert uri.startswith('wabs://')
    url_tup = urlparse(uri)
    check_sum = base64.encodestring(md5(data).digest())
    kwargs = dict(x_ms_blob_type='BlockBlob',
                  content_md5=check_sum.strip('\n'))
    if content_encoding is not None:
        kwargs['x_ms_blob_content_encoding'] = content_encoding

    conn = BlobService(access_key, secret_key, protocol='https')
    conn.put_blob(url_tup.netloc, url_tup.path, data, **kwargs)
    # To maintain consistency with the S3 version of this function we must
    # return an object with a certain set of attributes.  Currently, that set
    # of attributes consists of only 'size'
    return _Key(size=len(data))
コード例 #4
0
ファイル: storage.py プロジェクト: jacobis/couple-mission
class AzureBlobStorage(Storage):

    '''
    classdocs
    '''

    def __init__(self, azure_profile):
        '''
        Constructor
        '''

        if not azure_profile:
            raise Exception()
        else:
            container_name = azure_profile['container_name']
            account_name = azure_profile['account_name']
            account_key = azure_profile['key']
            base_url = azure_profile['base_url']

        self.blob = BlobService(
            account_name=account_name, account_key=account_key)

        self.container = container_name
        self.base_url = base_url

    def delete(self, name):
        """
        Delete file.
        """
        try:
            self.blob.delete_blob(self.container, name)
        except WindowsAzureMissingResourceError:
            return False
        else:
            return True

    def delete_files(self, files=None):
        """
        Delete files in container.
        """
        if not files:
            files = self.listdir(self.container)[1]

        for _file in files:
            self.delete(_file)

    def exists(self, name, with_properties=False):
        """
        Existing check.
        """
        result = False
        blob_properties = None

        try:
            blob_properties = self.blob.get_blob_properties(
                self.container, name)
        except WindowsAzureMissingResourceError:
            result = False
        else:
            result = True

        if with_properties:
            return result, blob_properties
        else:
            return result

    def get_available_name(self, name):
        return super(AzureBlobStorage, self).get_available_name(name.replace('\\', '/'))

    def get_valid_name(self, name):

        return name

    def _list(self, path, prefix, maxresults):
        result = []
        blobs = self.blob.list_blobs(path, prefix, maxresults)

        for _blob in blobs:
            result.append(_blob.name)

        return result

    def listdir(self, path=None, prefix=None, maxresults=None):
        """
        Catalog file list.
        """
        if not path:
            path = self.container
        return [], self._list(path, prefix, maxresults)

    def size(self, name):
        """
        File size.
        """

        result, properties = self.exists(name, with_properties=True)

        if result:
            return int(properties['content-length'])
        else:
            return 0

    def url(self, name, chk_exist=False):
        """
        URL for file downloading.
        """

        if chk_exist:
            if self.exists(name):
                return '%s%s/%s' % (self.base_url, self.container, name)
            else:
                return None
        else:
            return '%s%s/%s' % (self.base_url, self.container, name)

    def _open(self, name, mode='rb'):
        """
        Open file.
        """

        in_mem_file = StringIO.StringIO(
            self.blob.get_blob(self.container, name))
        in_mem_file.name = name
        in_mem_file.mode = mode
        return File(in_mem_file)

    def _save(self, name, blob_to_upload, x_ms_blob_type='BlockBlob', content_type=None):
        """
        Save file.
        """

        if hasattr(blob_to_upload, 'content_type'):
            content_type = blob_to_upload.content_type or None

        if content_type is None:
            content_type = mimetypes.guess_type(name)[0] or None

        blob_to_upload.seek(0)

        self.blob.put_blob(self.container, name, blob_to_upload,
                           x_ms_blob_type, x_ms_blob_content_type=content_type)

        return name

    def modified_time(self, name):
        """
        Last modification time.
        """

        result, properties = self.exists(name, with_properties=True)

        if result:
            date_string = properties['last-modified']
            modified_dt = parser.parse(date_string)

            if timezone.is_naive(modified_dt):
                return modified_dt
            else:
                return timezone.make_naive(modified_dt, timezone.get_current_timezone())
        else:
            return None

    created_time = accessed_time = modified_time
コード例 #5
0
def generate_and_upload(gauge_factory, config, logger):
    start = datetime.datetime.now()
    twitter_followers = gauge_factory('twitter.followers')
    twitter_tweets = gauge_factory('twitter.tweets')
    fb_friends = gauge_factory('facebook.friends')
    foursq_checkins = gauge_factory('foursquare.checkins')
    klout_score = gauge_factory('klout.score')
    runkeeper_activities = gauge_factory('runkeeper.activities')
    runkeeper_calories = gauge_factory('runkeeper.calories_burned')
    runkeeper_weight = gauge_factory('runkeeper.weight')
    tmp102_celsius = gauge_factory('tmp102.te  mperature', gauge_type='hourly')
    lastfm_listened = gauge_factory('lastfm.listened')
    jawbone_sleeps = gauge_factory('jawbone.sleeps')
    jawbone_steps = gauge_factory('jawbone.steps')
    jawbone_caffeine = gauge_factory('jawbone.caffeine')

    data = {}
    data_sources = [
        # (output key, gauge, days back, aggregator, postprocessors)
        ('twitter.followers', twitter_followers, 30, None,
            [zero_fill_daily, interpolators.linear]),
        ('twitter.tweets', twitter_tweets, 20, None, [zero_fill_daily]),
        ('facebook.friends', fb_friends, 180, monthly_max, None),
        ('foursquare.checkins', foursq_checkins, 14, None, [zero_fill_daily]),
        ('lastfm.listened', lastfm_listened, 14, None, [zero_fill_daily]),
        ('klout.score', klout_score, 30, weekly_max, [zero_fill_weekly,
                                                      interpolators.linear]),
        ('runkeeper.calories', runkeeper_calories, 60, weekly_sum,
            [zero_fill_weekly]),
        ('runkeeper.activities', runkeeper_activities, 60,weekly_sum,
            [zero_fill_weekly]),
        ('runkeeper.weight', runkeeper_weight, 180,weekly_min,
            [zero_fill_weekly, interpolators.linear]),
        ('sleeps', jawbone_sleeps, 14, None, [zero_fill_daily,
            interpolators.linear]),
        ('steps', jawbone_steps, 14, None, [zero_fill_daily,
            interpolators.linear]),
        ('caffeine', jawbone_caffeine, 30, None, [zero_fill_daily]),
        ('tmp102.temperature', tmp102_celsius, 2.5, None, None)
    ]

    for ds in data_sources:
        data[ds[0]] = ds[1].aggregate(today_utc() - timedelta(days=ds[2]),
                                      aggregator=ds[3],
                                      post_processors=ds[4])

    report = {
        'generated': str(now_utc()),
        'data': data,
        'took': (datetime.datetime.now() - start).seconds
    }
    report_json = json.dumps(report, indent=4, default=json_date_serializer)
    report_content = '{0}({1})'.format(JSONP_CALLBACK_NAME, report_json)
    
    blob_service = BlobService(config['azure.account'], config['azure.key'])
    blob_service.create_container(config['azure.blob.container'])
    blob_service.set_container_acl(config['azure.blob.container'],
                                   x_ms_blob_public_access='container')
    blob_service.put_blob(config['azure.blob.container'],
                          config['azure.blob.name'], report_content, 'BlockBlob')

    took = (datetime.datetime.now() - start).seconds
    logger.info('Report generated and uploaded. Took {0} s.'.format(took))
コード例 #6
0
class AzureFS(LoggingMixIn, Operations):
    """Azure Blob Storage filesystem"""

    blobs = None
    containers = dict()  # <cname, dict(stat:dict,
                                    #files:None|dict<fname, stat>)
    fds = dict()  # <fd, (path, bytes, dirty)>
    fd = 0

    def __init__(self, account, key):
        self.blobs = BlobService(account, key)
        self.rebuild_container_list()

    def convert_to_epoch(self, date):
        """Converts Tue, 31 Jul 2012 07:17:34 GMT format to epoch"""
        return int(time.mktime(time.strptime(date, TIME_FORMAT)))

    def rebuild_container_list(self):
        cmap = dict()
        cnames = set()
        for c in self.blobs.list_containers():
            date = c.properties.last_modified
            cstat = dict(st_mode=(S_IFDIR | 0755), st_uid=getuid(), st_size=0,
                         st_mtime=self.convert_to_epoch(date))
            cname = c.name
            cmap['/' + cname] = dict(stat=cstat, files=None)
            cnames.add(cname)

        cmap['/'] = dict(files={},
                         stat=dict(st_mode=(S_IFDIR | 0755),
                                     st_uid=getuid(), st_size=0,
                                     st_mtime=int(time.time())))

        self.containers = cmap   # destroys fs tree cache resistant to misses

    def _parse_path(self, path):    # returns </dir, file(=None)>
        if path.count('/') > 1:     # file
            return str(path[:path.rfind('/')]), str(path[path.rfind('/') + 1:])
        else:                       # dir
            pos = path.rfind('/', 1)
            if pos == -1:
                return path, None
            else:
                return str(path[:pos]), None

    def parse_container(self, path):
        base_container = path[1:]   # /abc/def/g --> abc
        if base_container.find('/') > -1:
            base_container = base_container[:base_container.find('/')]
        return str(base_container)

    def _get_dir(self, path, contents_required=False):
        if not self.containers:
            self.rebuild_container_list()

        if path in self.containers and not (contents_required and \
                self.containers[path]['files'] is None):
            return self.containers[path]

        cname = self.parse_container(path)

        if '/' + cname not in self.containers:
            raise FuseOSError(ENOENT)
        else:
            if self.containers['/' + cname]['files'] is None:
                # fetch contents of container
                log.info("------> CONTENTS NOT FOUND: %s" % cname)

                blobs = self.blobs.list_blobs(cname)

                dirstat = dict(st_mode=(S_IFDIR | 0755), st_size=0,
                               st_uid=getuid(), st_mtime=time.time())

                if self.containers['/' + cname]['files'] is None:
                    self.containers['/' + cname]['files'] = dict()

                for f in blobs:
                    blob_name = f.name
                    blob_date = f.properties.last_modified
                    blob_size = long(f.properties.content_length)

                    node = dict(st_mode=(S_IFREG | 0644), st_size=blob_size,
                                st_mtime=self.convert_to_epoch(blob_date),
                                st_uid=getuid())

                    if blob_name.find('/') == -1:  # file just under container
                        self.containers['/' + cname]['files'][blob_name] = node

            return self.containers['/' + cname]
        return None

    def _get_file(self, path):
        d, f = self._parse_path(path)
        dir = self._get_dir(d, True)
        if dir is not None and f in dir['files']:
            return dir['files'][f]

    def getattr(self, path, fh=None):
        d, f = self._parse_path(path)

        if f is None:
            dir = self._get_dir(d)
            return dir['stat']
        else:
            file = self._get_file(path)

            if file:
                return file

        raise FuseOSError(ENOENT)

    # FUSE
    def mkdir(self, path, mode):
        if path.count('/') <= 1:    # create on root
            name = path[1:]

            if not 3 <= len(name) <= 63:
                log.error("Container names can be 3 through 63 chars long.")
                raise FuseOSError(ENAMETOOLONG)
            if name is not name.lower():
                log.error("Container names cannot contain uppercase \
                        characters.")
                raise FuseOSError(EACCES)
            if name.count('--') > 0:
                log.error('Container names cannot contain consecutive \
                        dashes (-).')
                raise FuseOSError(EAGAIN)
            #TODO handle all "-"s must be preceded by letter or numbers
            #TODO starts with only letter or number, can contain letter, nr,'-'

            resp = self.blobs.create_container(name)

            if resp:
                self.rebuild_container_list()
                log.info("CONTAINER %s CREATED" % name)
            else:
                raise FuseOSError(EACCES)
                log.error("Invalid container name or container already \
                        exists.")
        else:
            raise FuseOSError(ENOSYS)  # TODO support 2nd+ level mkdirs

    def rmdir(self, path):
        if path.count('/') == 1:
            c_name = path[1:]
            resp = self.blobs.delete_container(c_name)

            if resp:
                if path in self.containers:
                    del self.containers[path]
            else:
                raise FuseOSError(EACCES)
        else:
            raise FuseOSError(ENOSYS)  # TODO support 2nd+ level mkdirs

    def create(self, path, mode):
        node = dict(st_mode=(S_IFREG | mode), st_size=0, st_nlink=1,
                     st_uid=getuid(), st_mtime=time.time())
        d, f = self._parse_path(path)

        if not f:
            log.error("Cannot create files on root level: /")
            raise FuseOSError(ENOSYS)

        dir = self._get_dir(d, True)
        if not dir:
            raise FuseOSError(EIO)
        dir['files'][f] = node

        return self.open(path, data='')     # reusing handler provider

    def open(self, path, flags=0, data=None):
        if data == None:                    # download contents
            c_name = self.parse_container(path)
            f_name = path[path.find('/', 1) + 1:]

            try:
                data = self.blobs.get_blob(c_name, f_name)
            except WindowsAzureMissingResourceError:
                dir = self._get_dir('/' + c_name, True)
                if f_name in dir['files']:
                    del dir['files'][f_name]
                raise FuseOSError(ENOENT)
            except WindowsAzureError as e:
                log.error("Read blob failed HTTP %d" % e.code)
                raise FuseOSError(EAGAIN)

        self.fd += 1
        self.fds[self.fd] = (path, data, False)

        return self.fd

    def flush(self, path, fh=None):
        if not fh:
            raise FuseOSError(EIO)
        else:
            if fh not in self.fds:
                raise FuseOSError(EIO)
            path = self.fds[fh][0]
            data = self.fds[fh][1]
            dirty = self.fds[fh][2]

            if not dirty:
                return 0     # avoid redundant write

            d, f = self._parse_path(path)
            c_name = self.parse_container(path)

            if data is None:
                data = ''

            try:
                if len(data) < 64 * 1024 * 1024:   # 64 mb
                    self.blobs.put_blob(c_name, f, data, 'BlockBlob')
                else:
                    # divide file by blocks and upload
                    block_size = 8 * 1024 * 1024
                    num_blocks = int(math.ceil(len(data) * 1.0 / block_size))
                    rd = str(random.randint(1, 1e8))
                    block_ids = list()

                    for i in range(num_blocks):
                        part = data[i * block_size:min((i + 1) * block_size,
                            len(data))]
                        block_id = base64.encodestring('%s_%s' % (rd,
                            (8 - len(str(i))) * '0' + str(i)))
                        self.blobs.put_block(c_name, f, part, block_id)
                        block_ids.append(block_id)

                    self.blobs.put_block_list(c_name, f, block_ids)
            except WindowsAzureError:
                raise FuseOSError(EAGAIN)

            dir = self._get_dir(d, True)
            if not dir or f not in dir['files']:
                raise FuseOSError(EIO)

            # update local data
            dir['files'][f]['st_size'] = len(data)
            dir['files'][f]['st_mtime'] = time.time()
            self.fds[fh] = (path, data, False)  # mark as not dirty
            return 0

    def release(self, path, fh=None):
        if fh is not None and fh in self.fds:
            del self.fds[fh]

    def truncate(self, path, length, fh=None):
        return 0     # assume done, no need

    def write(self, path, data, offset, fh=None):
        if not fh or fh not in self.fds:
            raise FuseOSError(ENOENT)
        else:
            d = self.fds[fh][1]
            if d is None:
                d = ""
            self.fds[fh] = (self.fds[fh][0], d[:offset] + data, True)
            return len(data)

    def unlink(self, path):
        c_name = self.parse_container(path)
        d, f = self._parse_path(path)

        try:
            self.blobs.delete_blob(c_name, f)

            _dir = self._get_dir(path, True)
            if _dir and f in _dir['files']:
                del _dir['files'][f]
            return 0
        except WindowsAzureMissingResourceError:
            raise FuseOSError(ENOENT)
        except Exception as e:
            raise FuseOSError(EAGAIN)

    def readdir(self, path, fh):
        if path == '/':
            return ['.', '..'] + [x[1:] for x in self.containers.keys() \
                    if x is not '/']

        dir = self._get_dir(path, True)
        if not dir:
            raise FuseOSError(ENOENT)
        return ['.', '..'] + dir['files'].keys()

    def read(self, path, size, offset, fh):
        if not fh or fh not in self.fds:
            raise FuseOSError(ENOENT)

        f_name = path[path.find('/', 1) + 1:]
        c_name = path[1:path.find('/', 1)]

        try:
            data = self.blobs.get_blob(c_name, f_name)
            self.fds[fh] = (self.fds[fh][0], data, False)
            return data[offset:offset + size]
        except URLError, e:
            if e.code == 404:
                raise FuseOSError(ENOENT)
            elif e.code == 403:
                raise FUSEOSError(EPERM)
            else:
                log.error("Read blob failed HTTP %d" % e.code)
                raise FuseOSError(EAGAIN)
        data = self.fds[fh][1]
        if data is None:
            data = ""
        return data[offset:offset + size]
コード例 #7
0
class AzureStorageBlockDeviceAPI(object):
    """
    An ``IBlockDeviceAsyncAPI`` which uses Azure Storage Backed Block Devices
    Current Support: Azure SMS API
    """

    def __init__(self, **azure_config):
        """
        :param ServiceManagement azure_client: an instance of the azure
        serivce managment api client.
        :param String service_name: The name of the cloud service
        :param
            names of Azure volumes to identify cluster
        :returns: A ``BlockDeviceVolume``.
        """
        self._instance_id = self.compute_instance_id()
        self._azure_service_client = ServiceManagementService(
            azure_config['subscription_id'],
            azure_config['management_certificate_path'])
        self._service_name = azure_config['service_name']
        self._azure_storage_client = BlobService(
            azure_config['storage_account_name'],
            azure_config['storage_account_key'])
        self._storage_account_name = azure_config['storage_account_name']
        self._disk_container_name = azure_config['disk_container_name']

        if azure_config['debug']:
            to_file(sys.stdout)

    def allocation_unit(self):
        """
        1GiB is the minimum allocation unit for azure disks
        return int: 1 GiB
        """

        return int(GiB(1).to_Byte().value)

    def compute_instance_id(self):
        """
        Azure Stored a UUID in the SDC kernel module.
        """

        # Node host names should be unique within a vnet

        return unicode(socket.gethostname())

    def create_volume(self, dataset_id, size):
        """
        Create a new volume.
        :param UUID dataset_id: The Flocker dataset ID of the dataset on this
            volume.
        :param int size: The size of the new volume in bytes.
        :returns: A ``Deferred`` that fires with a ``BlockDeviceVolume`` when
            the volume has been created.
        """

        size_in_gb = Byte(size).to_GiB().value

        if size_in_gb % 1 != 0:
            raise UnsupportedVolumeSize(dataset_id)

        self._create_volume_blob(size, dataset_id)

        label = self._disk_label_for_dataset_id(str(dataset_id))
        return BlockDeviceVolume(
            blockdevice_id=unicode(label),
            size=size,
            attached_to=None,
            dataset_id=self._dataset_id_for_disk_label(label))

    def destroy_volume(self, blockdevice_id):
        """
        Destroy an existing volume.
        :param unicode blockdevice_id: The unique identifier for the volume to
            destroy.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :return: ``None``
        """
        log_info('Destorying block device: ' + str(blockdevice_id))
        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:

            raise UnknownVolume(blockdevice_id)

        request = None

        if lun is not None:
            request = \
                self._azure_service_client.delete_data_disk(
                    service_name=self._service_name,
                    deployment_name=self._service_name,
                    role_name=target_disk.attached_to.role_name,
                    lun=lun,
                    delete_vhd=True)
        else:
            if target_disk.__class__.__name__ == 'Blob':
                # unregistered disk
                self._azure_storage_client.delete_blob(
                    self._disk_container_name, target_disk.name)
            else:
                request = self._azure_service_client.delete_disk(
                    target_disk.name, True)

        if request is not None:
            self._wait_for_async(request.request_id, 5000)
            self._wait_for_detach(blockdevice_id)

    def attach_volume(self, blockdevice_id, attach_to):
        """
        Attach ``blockdevice_id`` to ``host``.
        :param unicode blockdevice_id: The unique identifier for the block
            device being attached.
        :param unicode attach_to: An identifier like the one returned by the
            ``compute_instance_id`` method indicating the node to which to
            attach the volume.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises AlreadyAttachedVolume: If the supplied ``blockdevice_id`` is
            already attached.
        :returns: A ``BlockDeviceVolume`` with a ``host`` attribute set to
            ``host``.
        """

        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:
            raise UnknownVolume(blockdevice_id)

        if lun is not None:
            raise AlreadyAttachedVolume(blockdevice_id)

        log_info('Attempting to attach ' + str(blockdevice_id)
                 + ' to ' + str(attach_to))

        disk_size = self._attach_disk(blockdevice_id, target_disk, attach_to)

        self._wait_for_attach(blockdevice_id)

        log_info('disk attached')

        return self._blockdevicevolume_from_azure_volume(blockdevice_id,
                                                         disk_size,
                                                         attach_to)

    def detach_volume(self, blockdevice_id):
        """
        Detach ``blockdevice_id`` from whatever host it is attached to.
        :param unicode blockdevice_id: The unique identifier for the block
            device being detached.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises UnattachedVolume: If the supplied ``blockdevice_id`` is
            not attached to anything.
        :returns: ``None``
        """

        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:
            raise UnknownVolume(blockdevice_id)

        if lun is None:
            raise UnattachedVolume(blockdevice_id)

        # contrary to function name it doesn't delete by default, just detachs

        request = \
            self._azure_service_client.delete_data_disk(
                service_name=self._service_name,
                deployment_name=self._service_name,
                role_name=role_name, lun=lun)

        self._wait_for_async(request.request_id, 5000)

        self._wait_for_detach(blockdevice_id)

    def get_device_path(self, blockdevice_id):
        """
        Return the device path that has been allocated to the block device on
        the host to which it is currently attached.
        :param unicode blockdevice_id: The unique identifier for the block
            device.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises UnattachedVolume: If the supplied ``blockdevice_id`` is
            not attached to a host.
        :returns: A ``FilePath`` for the device.
        """

        (target_disk_or_blob, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk_or_blob is None:
            raise UnknownVolume(blockdevice_id)

        if lun is None:
            raise UnattachedVolume(blockdevice_id)

        return Lun.get_device_path_for_lun(lun)

    def list_volumes(self):
        """
        List all the block devices available via the back end API.
        :returns: A ``list`` of ``BlockDeviceVolume``s.
        """
        media_url_prefix = 'https://' + self._storage_account_name \
            + '.blob.core.windows.net/' + self._disk_container_name
        disks = self._azure_service_client.list_disks()
        disk_list = []
        all_blobs = self._get_flocker_blobs()
        for d in disks:

            if media_url_prefix not in d.media_link or \
                    'flocker-' not in d.label:
                    continue

            role_name = None

            if d.attached_to is not None \
                    and d.attached_to.role_name is not None:

                    role_name = d.attached_to.role_name

            disk_list.append(self._blockdevicevolume_from_azure_volume(
                d.label, self._gibytes_to_bytes(d.logical_disk_size_in_gb),
                role_name))

            if d.label in all_blobs:
                del all_blobs[d.label]

        for key in all_blobs:
            # include unregistered 'disk' blobs
            disk_list.append(self._blockdevicevolume_from_azure_volume(
                all_blobs[key].name,
                all_blobs[key].properties.content_length,
                None))

        return disk_list

    def _attach_disk(
            self,
            blockdevice_id,
            target_disk,
            attach_to):

        """
        Attaches disk to specified VM
        :param string blockdevice_id: The identifier of the disk
        :param DataVirtualHardDisk/Blob target_disk: The Blob
               or Disk to be attached
        :returns int: The size of the attached disk
        """

        lun = Lun.compute_next_lun(
            self._azure_service_client,
            self._service_name,
            str(attach_to))
        common_params = {
            'service_name': self._service_name,
            'deployment_name': self._service_name,
            'role_name': attach_to,
            'lun': lun
        }
        disk_size = None

        if target_disk.__class__.__name__ == 'Blob':
            # exclude 512 byte footer
            disk_size = target_disk.properties.content_length

            common_params['source_media_link'] = \
                'https://' + self._storage_account_name \
                + '.blob.core.windows.net/' + self._disk_container_name \
                + '/' + blockdevice_id

            common_params['disk_label'] = blockdevice_id

        else:

            disk_size = self._gibytes_to_bytes(
                target_disk.logical_disk_size_in_gb)

            common_params['disk_name'] = target_disk.name

        request = self._azure_service_client.add_data_disk(**common_params)
        self._wait_for_async(request.request_id, 5000)

        return disk_size

    def _create_volume_blob(self, size, dataset_id):
        # Create a new page blob as a blank disk
        self._azure_storage_client.put_blob(
            container_name=self._disk_container_name,
            blob_name=self._disk_label_for_dataset_id(dataset_id),
            blob=None,
            x_ms_blob_type='PageBlob',
            x_ms_blob_content_type='application/octet-stream',
            x_ms_blob_content_length=size)

        # for disk to be a valid vhd it requires a vhd footer
        # on the last 512 bytes
        vhd_footer = Vhd.generate_vhd_footer(size)

        self._azure_storage_client.put_page(
            container_name=self._disk_container_name,
            blob_name=self._disk_label_for_dataset_id(dataset_id),
            page=vhd_footer,
            x_ms_page_write='update',
            x_ms_range='bytes=' + str((size - 512)) + '-' + str(size - 1))

    def _disk_label_for_dataset_id(self, dataset_id):
        """
        Returns a disk label for a given Dataset ID
        :param unicode dataset_id: The identifier of the dataset
        :returns string: A string representing the disk label
        """

        label = 'flocker-' + str(dataset_id)
        return label

    def _dataset_id_for_disk_label(self, disk_label):
        """
        Returns a UUID representing the Dataset ID for the given disk
        label
        :param string disk_label: The disk label
        :returns UUID: The UUID of the dataset
        """
        return UUID(disk_label.replace('flocker-', ''))

    def _get_disk_vmname_lun(self, blockdevice_id):
        target_disk = None
        target_lun = None
        role_name = None
        disk_list = self._azure_service_client.list_disks()

        for d in disk_list:

            if 'flocker-' not in d.label:
                continue
            if d.label == str(blockdevice_id):
                target_disk = d
                break

        if target_disk is None:
            # check for unregisterd disk
            blobs = self._get_flocker_blobs()
            blob = None

            if str(blockdevice_id) in blobs:
                blob = blobs[str(blockdevice_id)]

            return blob, None, None

        vm_info = None

        if hasattr(target_disk.attached_to, 'role_name'):
            vm_info = self._azure_service_client.get_role(
                self._service_name, self._service_name,
                target_disk.attached_to.role_name)

            for d in vm_info.data_virtual_hard_disks:
                if d.disk_name == target_disk.name:
                    target_lun = d.lun
                    break

            role_name = target_disk.attached_to.role_name

        return (target_disk, role_name, target_lun)

    def _get_flocker_blobs(self):
        all_blobs = {}

        blobs = self._azure_storage_client.list_blobs(
            self._disk_container_name,
            prefix='flocker-')

        for b in blobs:
            # todo - this could be big!
            all_blobs[b.name] = b

        return all_blobs

    def _wait_for_detach(self, blockdevice_id):
        role_name = ''
        lun = -1

        timeout_count = 0

        log_info('waiting for azure to ' + 'report disk as detached...')

        while role_name is not None or lun is not None:
            (target_disk, role_name, lun) = \
                self._get_disk_vmname_lun(blockdevice_id)
            time.sleep(1)
            timeout_count += 1

            if timeout_count > 5000:
                raise AsynchronousTimeout()

        log_info('Disk Detached')

    def _wait_for_attach(self, blockdevice_id):
        timeout_count = 0
        lun = None

        log_info('waiting for azure to report disk as attached...')

        while lun is None:
            (target_disk, role_name, lun) = \
                self._get_disk_vmname_lun(blockdevice_id)
            time.sleep(.001)
            timeout_count += 1

            if timeout_count > 5000:
                raise AsynchronousTimeout()

    def _wait_for_async(self, request_id, timeout):
        count = 0
        result = self._azure_service_client.get_operation_status(request_id)
        while result.status == 'InProgress':
            count = count + 1
            if count > timeout:
                log_error('Timed out waiting for async operation to complete.')
                raise AsynchronousTimeout()
            time.sleep(.001)
            log_info('.')
            result = self._azure_service_client.get_operation_status(
                request_id)
            if result.error:
                log_error(result.error.code)
                log_error(str(result.error.message))

        log_error(result.status + ' in ' + str(count * 5) + 's')

    def _gibytes_to_bytes(self, size):

        return int(GiB(size).to_Byte().value)

    def _blockdevicevolume_from_azure_volume(self, label, size,
                                             attached_to_name):

        # azure will report the disk size excluding the 512 byte footer
        # however flocker expects the exact value it requested for disk size
        # so offset the reported size to flocker by 512 bytes
        return BlockDeviceVolume(
            blockdevice_id=unicode(label),
            size=int(size),
            attached_to=attached_to_name,
            dataset_id=self._dataset_id_for_disk_label(label)
        )  # disk labels are formatted as flocker-<data_set_id>
コード例 #8
0
class Command(BaseCommand):
    help = "Synchronizes static media to cloud files."

    option_list = BaseCommand.option_list + (
        optparse.make_option('-w', '--wipe',
            action='store_true', dest='wipe', default=False,
            help="Wipes out entire contents of container first."),
        optparse.make_option('-t', '--test-run',
            action='store_true', dest='test_run', default=False,
            help="Performs a test run of the sync."),
        optparse.make_option('-c', '--container',
            dest='container', help="Override STATIC_CONTAINER."),
    )

    # settings from azurite.settings
    ACCOUNT_NAME     = AZURITE['ACCOUNT_NAME']
    ACCOUNT_KEY      = AZURITE['ACCOUNT_KEY']
    STATIC_CONTAINER = AZURITE['STATIC_CONTAINER']

    # paths
    DIRECTORY        = os.path.abspath(settings.STATIC_ROOT)
    STATIC_URL       = settings.STATIC_URL

    if not DIRECTORY.endswith('/'):
        DIRECTORY = DIRECTORY + '/'

    if STATIC_URL.startswith('/'):
        STATIC_URL = STATIC_URL[1:]

    local_object_names = []
    create_count = 0
    upload_count = 0
    update_count = 0
    skip_count = 0
    delete_count = 0
    service = None

    def handle(self, *args, **options):
        self.wipe = options.get('wipe')
        self.test_run = options.get('test_run')
        self.verbosity = int(options.get('verbosity'))
        if hasattr(options, 'container'):
            self.STATIC_CONTAINER = options.get('container')
        self.sync_files()

    def sync_files(self):
        self.service = BlobService(account_name=self.ACCOUNT_NAME,
            account_key=self.ACCOUNT_KEY)

        try:
            self.service.get_container_properties(self.STATIC_CONTAINER)
        except WindowsAzureMissingResourceError:
            self.service.create_container(self.STATIC_CONTAINER,
                x_ms_blob_public_access='blob')

        self.service.set_container_acl(self.STATIC_CONTAINER, x_ms_blob_public_access='blob')

        # if -w option is provided, wipe out the contents of the container
        if self.wipe:
            blob_count = len(self.service.list_blobs(self.STATIC_CONTAINER))

            if self.test_run:
                print "Wipe would delete %d objects." % blob_count
            else:
                print "Deleting %d objects..." % blob_count
                for blob in self.service.list_blobs(self.STATIC_CONTAINER):
                    self.service.delete_blob(self.STATIC_CONTAINER, blob.name)

        # walk through the directory, creating or updating files on the cloud
        os.path.walk(self.DIRECTORY, self.upload_files, "foo")

        # remove any files on remote that don't exist locally
        self.delete_files()

        # print out the final tally to the cmd line
        self.update_count = self.upload_count - self.create_count
        print
        if self.test_run:
            print "Test run complete with the following results:"
        print "Skipped %d. Created %d. Updated %d. Deleted %d." % (
            self.skip_count, self.create_count, self.update_count, self.delete_count)

    def upload_files(self, arg, dirname, names):
        # upload or skip items
        for item in names:
            file_path = os.path.join(dirname, item)
            if os.path.isdir(file_path):
                continue # Don't try to upload directories

            object_name = self.STATIC_URL + file_path.split(self.DIRECTORY)[1]
            self.local_object_names.append(object_name)

            try:
                properties = self.service.get_blob_properties(self.STATIC_CONTAINER,
                    object_name)
            except WindowsAzureMissingResourceError:
                properties = {}
                self.create_count += 1

            cloud_datetime = None
            if 'last-modified' in properties:
                cloud_datetime = (properties['last-modified'] and
                                  datetime.datetime.strptime(
                                    properties['last-modified'],
                                    "%a, %d %b %Y %H:%M:%S %Z"
                                  ) or None)

            local_datetime = datetime.datetime.utcfromtimestamp(
                                               os.stat(file_path).st_mtime)

            if cloud_datetime and local_datetime < cloud_datetime:
                self.skip_count += 1
                if self.verbosity > 1:
                    print "Skipped %s: not modified." % object_name
                continue

            if not self.test_run:
                file_contents = open(file_path, 'r').read()
                content_type, encoding = mimetypes.guess_type(file_path)
                print "content-type", content_type
                print "encoding", encoding
                self.service.put_blob(self.STATIC_CONTAINER, object_name, file_contents,
                    x_ms_blob_type='BlockBlob', x_ms_blob_content_type=content_type,
                    content_encoding=encoding)
                # sync_headers(cloud_obj)
            self.upload_count += 1
            if self.verbosity > 1:
                print "Uploaded", object_name

    def delete_files(self):
        # remove any objects in the container that don't exist locally
        for blob in self.service.list_blobs(self.STATIC_CONTAINER):
            if blob.name not in self.local_object_names:
                self.delete_count += 1
                if self.verbosity > 1:
                    print "Deleted %s" % blob.name
                if not self.test_run:
                    self.service.delete_blob(self.STATIC_CONTAINER, blob.name)
コード例 #9
0
class Storage(driver.Base):

    supports_bytes_range = True

    def __init__(self, path=None, config=None):
        self._config = config
        self._container = self._config.azure_storage_container

        protocol = 'https' if self._config.azure_use_https else 'http'
        acct_name = self._config.azure_storage_account_name
        acct_key = self._config.azure_storage_account_key
        self._blob = BlobService(
            account_name=acct_name, account_key=acct_key, protocol=protocol)

        self._init_container()
        logger.debug("Initialized azureblob storage driver")

    def _init_container(self):
        '''Initializes image container on Azure blob storage if the container
        does not exist.
        '''
        created = self._blob.create_container(
            self._container, x_ms_blob_public_access='blob',
            fail_on_exist=False)
        if created:
            logger.info('Created blob container for image registry.')
        else:
            logger.debug('Registry container already exists.')
        return created

    @lru.get
    def get_content(self, path):
        try:
            return self._blob.get_blob(self._container, path)
        except azure.WindowsAzureMissingResourceError:
            raise exceptions.FileNotFoundError('%s is not there' % path)

    @lru.set
    def put_content(self, path, content):
        self._blob.put_blob(self._container, path, content, 'BlockBlob')
        return path

    def stream_read(self, path, bytes_range=None):
        try:
            f = io.BytesIO()
            self._blob.get_blob_to_file(self._container, path, f)

            if bytes_range:
                f.seek(bytes_range[0])
                total_size = bytes_range[1] - bytes_range[0] + 1
            else:
                f.seek(0)

            while True:
                buf = None
                if bytes_range:
                    # Bytes Range is enabled
                    buf_size = self.buffer_size
                    if nb_bytes + buf_size > total_size:
                        # We make sure we don't read out of the range
                        buf_size = total_size - nb_bytes
                    if buf_size > 0:
                        buf = f.read(buf_size)
                        nb_bytes += len(buf)
                    else:
                        # We're at the end of the range
                        buf = ''
                else:
                    buf = f.read(self.buffer_size)

                if not buf:
                    break

                yield buf
        except IOError:
            raise exceptions.FileNotFoundError('%s is not there' % path)

    def stream_write(self, path, fp):
        self._blob.put_block_blob_from_file(self._container, path, fp)

    def list_directory(self, path=None):
        if not path.endswith('/'):
            path += '/'  # path=a would list a/b.txt as well as 'abc.txt'

        blobs = list(self._blob.list_blobs(self._container, path))
        if not blobs:
            raise exceptions.FileNotFoundError('%s is not there' % path)

        return [b.name for b in blobs]

    def exists(self, path):
        try:
            self._blob.get_blob_properties(self._container, path)
            return True
        except azure.WindowsAzureMissingResourceError:
            return False

    @lru.remove
    def remove(self, path):
        is_blob = self.exists(path)
        if is_blob:
            self._blob.delete_blob(self._container, path)
            return

        exists = False
        blobs = list(self._blob.list_blobs(self._container, path))
        if not blobs:
            raise exceptions.FileNotFoundError('%s is not there' % path)

        for b in blobs:
            self._blob.delete_blob(self._container, b.name)

    def get_size(self, path):
        try:
            properties = self._blob.get_blob_properties(self._container, path)
            return int(properties['content-length'])  # auto-converted to long
        except azure.WindowsAzureMissingResourceError:
            raise exceptions.FileNotFoundError('%s is not there' % path)
コード例 #10
0
    blobs.

    blob_service: Nombre del servicio de gestion de blobs @class BlobService
    """
    for i in blob_service.list_containers().containers:
        print("Nombre del contenedor: {}".format(i.name))
        print("Url del contenedor: {}".format(i.url))
        print("##############################")
        for j in blob_service.list_blobs(i.name).blobs:
            print("\tNombre del Blob: {}".format(j.name))
            print("\tUrl del Blob: {}".format(j.url))
            print("\t------------------------------")

blob_service = BlobService(credentials.account_name, credentials.account_key)

getContainersWithBlobs(blob_service)


f_blob = open('Ejercicio10.txt', "w")
for i in blob_service.list_containers().containers:
        f_blob.write("Nombre del contenedor: {}".format(i.name))
        f_blob.write("Url del contenedor: {}".format(i.url))
        f_blob.write("##############################")
        for j in blob_service.list_blobs(i.name).blobs:
            f_blob.write("\tNombre del Blob: {}".format(j.name))
            f_blob.write("\tUrl del Blob: {}".format(j.url))
            f_blob.write("\t------------------------------")
f_blob.close()

blob_service.put_blob('code', 'f_blob.txt', f_blob, 'BlockBlob')
コード例 #11
0
class Storage(driver.Base):

    supports_bytes_range = True

    def __init__(self, path=None, config=None):
        self._config = config
        self._container = self._config.azure_storage_container

        protocol = 'https' if self._config.azure_use_https else 'http'
        acct_name = self._config.azure_storage_account_name
        acct_key = self._config.azure_storage_account_key
        self._blob = BlobService(account_name=acct_name,
                                 account_key=acct_key,
                                 protocol=protocol)

        self._init_container()
        logger.debug("Initialized azureblob storage driver")

    def _init_container(self):
        '''Initializes image container on Azure blob storage if the container
        does not exist.
        '''
        created = self._blob.create_container(self._container,
                                              x_ms_blob_public_access='blob',
                                              fail_on_exist=False)
        if created:
            logger.info('Created blob container for image registry.')
        else:
            logger.debug('Registry container already exists.')
        return created

    @lru.get
    def get_content(self, path):
        try:
            return self._blob.get_blob(self._container, path)
        except azure.WindowsAzureMissingResourceError:
            raise exceptions.FileNotFoundError('%s is not there' % path)

    @lru.set
    def put_content(self, path, content):
        self._blob.put_blob(self._container, path, content, 'BlockBlob')
        return path

    def stream_read(self, path, bytes_range=None):
        try:
            f = io.BytesIO()
            self._blob.get_blob_to_file(self._container, path, f)

            if bytes_range:
                f.seek(bytes_range[0])
                total_size = bytes_range[1] - bytes_range[0] + 1
            else:
                f.seek(0)

            while True:
                buf = None
                if bytes_range:
                    # Bytes Range is enabled
                    buf_size = self.buffer_size
                    if nb_bytes + buf_size > total_size:
                        # We make sure we don't read out of the range
                        buf_size = total_size - nb_bytes
                    if buf_size > 0:
                        buf = f.read(buf_size)
                        nb_bytes += len(buf)
                    else:
                        # We're at the end of the range
                        buf = ''
                else:
                    buf = f.read(self.buffer_size)

                if not buf:
                    break

                yield buf
        except IOError:
            raise exceptions.FileNotFoundError('%s is not there' % path)

    def stream_write(self, path, fp):
        self._blob.put_block_blob_from_file(self._container, path, fp)

    def list_directory(self, path=None):
        if not path.endswith('/'):
            path += '/'  # path=a would list a/b.txt as well as 'abc.txt'

        blobs = list(self._blob.list_blobs(self._container, path))
        if not blobs:
            raise exceptions.FileNotFoundError('%s is not there' % path)

        return [b.name for b in blobs]

    def exists(self, path):
        try:
            self._blob.get_blob_properties(self._container, path)
            return True
        except azure.WindowsAzureMissingResourceError:
            return False

    @lru.remove
    def remove(self, path):
        is_blob = self.exists(path)
        if is_blob:
            self._blob.delete_blob(self._container, path)
            return

        exists = False
        blobs = list(self._blob.list_blobs(self._container, path))
        if not blobs:
            raise exceptions.FileNotFoundError('%s is not there' % path)

        for b in blobs:
            self._blob.delete_blob(self._container, b.name)

    def get_size(self, path):
        try:
            properties = self._blob.get_blob_properties(self._container, path)
            return int(properties['content-length'])  # auto-converted to long
        except azure.WindowsAzureMissingResourceError:
            raise exceptions.FileNotFoundError('%s is not there' % path)
コード例 #12
0
def generate_and_upload(gauge_factory, config, logger):
    start = datetime.datetime.now()
    twitter_followers = gauge_factory('twitter.followers')
    twitter_tweets = gauge_factory('twitter.tweets')
    fb_friends = gauge_factory('facebook.friends')
    foursq_checkins = gauge_factory('foursquare.checkins')
    klout_score = gauge_factory('klout.score')
    runkeeper_activities = gauge_factory('runkeeper.activities')
    runkeeper_calories = gauge_factory('runkeeper.calories_burned')
    runkeeper_weight = gauge_factory('runkeeper.weight')
    tmp102_celsius = gauge_factory('tmp102.temperature', gauge_type='hourly')
    lastfm_listened = gauge_factory('lastfm.listened')
    jawbone_sleeps = gauge_factory('jawbone.sleeps')
    jawbone_heartrate = gauge_factory('jawbone.resting_heartrate')
    jawbone_steps = gauge_factory('jawbone.steps')
    jawbone_caffeine = gauge_factory('jawbone.caffeine')

    data = {}
    data_sources = [
        # (output key, gauge, days back, aggregator, postprocessors)
        ('twitter.followers', twitter_followers, 30, None,
         [zero_fill_daily, interpolators.linear]),
        ('twitter.tweets', twitter_tweets, 20, None, [zero_fill_daily]),
        ('facebook.friends', fb_friends, 180, monthly_max, None),
        ('foursquare.checkins', foursq_checkins, 14, None, [zero_fill_daily]),
        ('lastfm.listened', lastfm_listened, 14, None, [zero_fill_daily]),
        ('klout.score', klout_score, 30, weekly_max,
         [zero_fill_weekly, interpolators.linear]),
        ('runkeeper.calories', runkeeper_calories, 60, weekly_sum,
         [zero_fill_weekly]),
        ('runkeeper.activities', runkeeper_activities, 60, weekly_sum,
         [zero_fill_weekly]),
        ('runkeeper.weight', runkeeper_weight, 180, weekly_min,
         [zero_fill_weekly, interpolators.linear]),
        ('sleeps', jawbone_sleeps, 14, None,
         [zero_fill_daily, interpolators.linear]),
        ('heartrate', jawbone_heartrate, 21, None,
         [zero_fill_daily, interpolators.linear]),
        ('steps', jawbone_steps, 14, None,
         [zero_fill_daily, interpolators.linear]),
        ('caffeine', jawbone_caffeine, 30, None, [zero_fill_daily]),
        ('tmp102.temperature', tmp102_celsius, 2.5, None, None)
    ]

    for ds in data_sources:
        data[ds[0]] = ds[1].aggregate(today_utc() - timedelta(days=ds[2]),
                                      aggregator=ds[3],
                                      post_processors=ds[4])

    report = {
        'generated': str(now_utc()),
        'data': data,
        'took': (datetime.datetime.now() - start).seconds
    }
    report_json = json.dumps(report, indent=4, default=json_date_serializer)
    report_content = '{0}({1})'.format(JSONP_CALLBACK_NAME, report_json)

    blob_service = BlobService(config['azure.account'], config['azure.key'])
    blob_service.create_container(config['azure.blob.container'])
    blob_service.set_container_acl(config['azure.blob.container'],
                                   x_ms_blob_public_access='container')
    blob_service.put_blob(config['azure.blob.container'],
                          config['azure.blob.name'], report_content,
                          'BlockBlob')

    took = (datetime.datetime.now() - start).seconds
    logger.info('Report generated and uploaded. Took {0} s.'.format(took))
コード例 #13
0
class AzureStorageBlockDeviceAPI(object):
    """
    An ``IBlockDeviceAsyncAPI`` which uses Azure Storage Backed Block Devices
    Current Support: Azure SMS API
    """
    def __init__(self, **azure_config):
        """
        :param ServiceManagement azure_client: an instance of the azure
        serivce managment api client.
        :param String service_name: The name of the cloud service
        :param
            names of Azure volumes to identify cluster
        :returns: A ``BlockDeviceVolume``.
        """
        self._instance_id = self.compute_instance_id()
        self._azure_service_client = ServiceManagementService(
            azure_config['subscription_id'],
            azure_config['management_certificate_path'])
        self._service_name = azure_config['service_name']
        self._azure_storage_client = BlobService(
            azure_config['storage_account_name'],
            azure_config['storage_account_key'])
        self._storage_account_name = azure_config['storage_account_name']
        self._disk_container_name = azure_config['disk_container_name']

        if azure_config['debug']:
            to_file(sys.stdout)

    def allocation_unit(self):
        """
        1GiB is the minimum allocation unit for azure disks
        return int: 1 GiB
        """

        return int(GiB(1).to_Byte().value)

    def compute_instance_id(self):
        """
        Azure Stored a UUID in the SDC kernel module.
        """

        # Node host names should be unique within a vnet

        return unicode(socket.gethostname())

    def create_volume(self, dataset_id, size):
        """
        Create a new volume.
        :param UUID dataset_id: The Flocker dataset ID of the dataset on this
            volume.
        :param int size: The size of the new volume in bytes.
        :returns: A ``Deferred`` that fires with a ``BlockDeviceVolume`` when
            the volume has been created.
        """

        size_in_gb = Byte(size).to_GiB().value

        if size_in_gb % 1 != 0:
            raise UnsupportedVolumeSize(dataset_id)

        self._create_volume_blob(size, dataset_id)

        label = self._disk_label_for_dataset_id(str(dataset_id))
        return BlockDeviceVolume(
            blockdevice_id=unicode(label),
            size=size,
            attached_to=None,
            dataset_id=self._dataset_id_for_disk_label(label))

    def destroy_volume(self, blockdevice_id):
        """
        Destroy an existing volume.
        :param unicode blockdevice_id: The unique identifier for the volume to
            destroy.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :return: ``None``
        """
        log_info('Destorying block device: ' + str(blockdevice_id))
        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:

            raise UnknownVolume(blockdevice_id)

        request = None

        if lun is not None:
            request = \
                self._azure_service_client.delete_data_disk(
                    service_name=self._service_name,
                    deployment_name=self._service_name,
                    role_name=target_disk.attached_to.role_name,
                    lun=lun,
                    delete_vhd=True)
        else:
            if target_disk.__class__.__name__ == 'Blob':
                # unregistered disk
                self._azure_storage_client.delete_blob(
                    self._disk_container_name, target_disk.name)
            else:
                request = self._azure_service_client.delete_disk(
                    target_disk.name, True)

        if request is not None:
            self._wait_for_async(request.request_id, 5000)
            self._wait_for_detach(blockdevice_id)

    def attach_volume(self, blockdevice_id, attach_to):
        """
        Attach ``blockdevice_id`` to ``host``.
        :param unicode blockdevice_id: The unique identifier for the block
            device being attached.
        :param unicode attach_to: An identifier like the one returned by the
            ``compute_instance_id`` method indicating the node to which to
            attach the volume.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises AlreadyAttachedVolume: If the supplied ``blockdevice_id`` is
            already attached.
        :returns: A ``BlockDeviceVolume`` with a ``host`` attribute set to
            ``host``.
        """

        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:
            raise UnknownVolume(blockdevice_id)

        if lun is not None:
            raise AlreadyAttachedVolume(blockdevice_id)

        log_info('Attempting to attach ' + str(blockdevice_id) + ' to ' +
                 str(attach_to))

        disk_size = self._attach_disk(blockdevice_id, target_disk, attach_to)

        self._wait_for_attach(blockdevice_id)

        log_info('disk attached')

        return self._blockdevicevolume_from_azure_volume(
            blockdevice_id, disk_size, attach_to)

    def detach_volume(self, blockdevice_id):
        """
        Detach ``blockdevice_id`` from whatever host it is attached to.
        :param unicode blockdevice_id: The unique identifier for the block
            device being detached.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises UnattachedVolume: If the supplied ``blockdevice_id`` is
            not attached to anything.
        :returns: ``None``
        """

        (target_disk, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk is None:
            raise UnknownVolume(blockdevice_id)

        if lun is None:
            raise UnattachedVolume(blockdevice_id)

        # contrary to function name it doesn't delete by default, just detachs

        request = \
            self._azure_service_client.delete_data_disk(
                service_name=self._service_name,
                deployment_name=self._service_name,
                role_name=role_name, lun=lun)

        self._wait_for_async(request.request_id, 5000)

        self._wait_for_detach(blockdevice_id)

    def get_device_path(self, blockdevice_id):
        """
        Return the device path that has been allocated to the block device on
        the host to which it is currently attached.
        :param unicode blockdevice_id: The unique identifier for the block
            device.
        :raises UnknownVolume: If the supplied ``blockdevice_id`` does not
            exist.
        :raises UnattachedVolume: If the supplied ``blockdevice_id`` is
            not attached to a host.
        :returns: A ``FilePath`` for the device.
        """

        (target_disk_or_blob, role_name, lun) = \
            self._get_disk_vmname_lun(blockdevice_id)

        if target_disk_or_blob is None:
            raise UnknownVolume(blockdevice_id)

        if lun is None:
            raise UnattachedVolume(blockdevice_id)

        return Lun.get_device_path_for_lun(lun)

    def list_volumes(self):
        """
        List all the block devices available via the back end API.
        :returns: A ``list`` of ``BlockDeviceVolume``s.
        """
        media_url_prefix = 'https://' + self._storage_account_name \
            + '.blob.core.windows.net/' + self._disk_container_name
        disks = self._azure_service_client.list_disks()
        disk_list = []
        all_blobs = self._get_flocker_blobs()
        for d in disks:

            if media_url_prefix not in d.media_link or \
                    'flocker-' not in d.label:
                continue

            role_name = None

            if d.attached_to is not None \
                    and d.attached_to.role_name is not None:

                role_name = d.attached_to.role_name

            disk_list.append(
                self._blockdevicevolume_from_azure_volume(
                    d.label, self._gibytes_to_bytes(d.logical_disk_size_in_gb),
                    role_name))

            if d.label in all_blobs:
                del all_blobs[d.label]

        for key in all_blobs:
            # include unregistered 'disk' blobs
            disk_list.append(
                self._blockdevicevolume_from_azure_volume(
                    all_blobs[key].name,
                    all_blobs[key].properties.content_length, None))

        return disk_list

    def _attach_disk(self, blockdevice_id, target_disk, attach_to):
        """
        Attaches disk to specified VM
        :param string blockdevice_id: The identifier of the disk
        :param DataVirtualHardDisk/Blob target_disk: The Blob
               or Disk to be attached
        :returns int: The size of the attached disk
        """

        lun = Lun.compute_next_lun(self._azure_service_client,
                                   self._service_name, str(attach_to))
        common_params = {
            'service_name': self._service_name,
            'deployment_name': self._service_name,
            'role_name': attach_to,
            'lun': lun
        }
        disk_size = None

        if target_disk.__class__.__name__ == 'Blob':
            # exclude 512 byte footer
            disk_size = target_disk.properties.content_length

            common_params['source_media_link'] = \
                'https://' + self._storage_account_name \
                + '.blob.core.windows.net/' + self._disk_container_name \
                + '/' + blockdevice_id

            common_params['disk_label'] = blockdevice_id

        else:

            disk_size = self._gibytes_to_bytes(
                target_disk.logical_disk_size_in_gb)

            common_params['disk_name'] = target_disk.name

        request = self._azure_service_client.add_data_disk(**common_params)
        self._wait_for_async(request.request_id, 5000)

        return disk_size

    def _create_volume_blob(self, size, dataset_id):
        # Create a new page blob as a blank disk
        self._azure_storage_client.put_blob(
            container_name=self._disk_container_name,
            blob_name=self._disk_label_for_dataset_id(dataset_id),
            blob=None,
            x_ms_blob_type='PageBlob',
            x_ms_blob_content_type='application/octet-stream',
            x_ms_blob_content_length=size)

        # for disk to be a valid vhd it requires a vhd footer
        # on the last 512 bytes
        vhd_footer = Vhd.generate_vhd_footer(size)

        self._azure_storage_client.put_page(
            container_name=self._disk_container_name,
            blob_name=self._disk_label_for_dataset_id(dataset_id),
            page=vhd_footer,
            x_ms_page_write='update',
            x_ms_range='bytes=' + str((size - 512)) + '-' + str(size - 1))

    def _disk_label_for_dataset_id(self, dataset_id):
        """
        Returns a disk label for a given Dataset ID
        :param unicode dataset_id: The identifier of the dataset
        :returns string: A string representing the disk label
        """

        label = 'flocker-' + str(dataset_id)
        return label

    def _dataset_id_for_disk_label(self, disk_label):
        """
        Returns a UUID representing the Dataset ID for the given disk
        label
        :param string disk_label: The disk label
        :returns UUID: The UUID of the dataset
        """
        return UUID(disk_label.replace('flocker-', ''))

    def _get_disk_vmname_lun(self, blockdevice_id):
        target_disk = None
        target_lun = None
        role_name = None
        disk_list = self._azure_service_client.list_disks()

        for d in disk_list:

            if 'flocker-' not in d.label:
                continue
            if d.label == str(blockdevice_id):
                target_disk = d
                break

        if target_disk is None:
            # check for unregisterd disk
            blobs = self._get_flocker_blobs()
            blob = None

            if str(blockdevice_id) in blobs:
                blob = blobs[str(blockdevice_id)]

            return blob, None, None

        vm_info = None

        if hasattr(target_disk.attached_to, 'role_name'):
            vm_info = self._azure_service_client.get_role(
                self._service_name, self._service_name,
                target_disk.attached_to.role_name)

            for d in vm_info.data_virtual_hard_disks:
                if d.disk_name == target_disk.name:
                    target_lun = d.lun
                    break

            role_name = target_disk.attached_to.role_name

        return (target_disk, role_name, target_lun)

    def _get_flocker_blobs(self):
        all_blobs = {}

        blobs = self._azure_storage_client.list_blobs(
            self._disk_container_name, prefix='flocker-')

        for b in blobs:
            # todo - this could be big!
            all_blobs[b.name] = b

        return all_blobs

    def _wait_for_detach(self, blockdevice_id):
        role_name = ''
        lun = -1

        timeout_count = 0

        log_info('waiting for azure to ' + 'report disk as detached...')

        while role_name is not None or lun is not None:
            (target_disk, role_name, lun) = \
                self._get_disk_vmname_lun(blockdevice_id)
            time.sleep(1)
            timeout_count += 1

            if timeout_count > 5000:
                raise AsynchronousTimeout()

        log_info('Disk Detached')

    def _wait_for_attach(self, blockdevice_id):
        timeout_count = 0
        lun = None

        log_info('waiting for azure to report disk as attached...')

        while lun is None:
            (target_disk, role_name, lun) = \
                self._get_disk_vmname_lun(blockdevice_id)
            time.sleep(.001)
            timeout_count += 1

            if timeout_count > 5000:
                raise AsynchronousTimeout()

    def _wait_for_async(self, request_id, timeout):
        count = 0
        result = self._azure_service_client.get_operation_status(request_id)
        while result.status == 'InProgress':
            count = count + 1
            if count > timeout:
                log_error('Timed out waiting for async operation to complete.')
                raise AsynchronousTimeout()
            time.sleep(.001)
            log_info('.')
            result = self._azure_service_client.get_operation_status(
                request_id)
            if result.error:
                log_error(result.error.code)
                log_error(str(result.error.message))

        log_error(result.status + ' in ' + str(count * 5) + 's')

    def _gibytes_to_bytes(self, size):

        return int(GiB(size).to_Byte().value)

    def _blockdevicevolume_from_azure_volume(self, label, size,
                                             attached_to_name):

        # azure will report the disk size excluding the 512 byte footer
        # however flocker expects the exact value it requested for disk size
        # so offset the reported size to flocker by 512 bytes
        return BlockDeviceVolume(
            blockdevice_id=unicode(label),
            size=int(size),
            attached_to=attached_to_name,
            dataset_id=self._dataset_id_for_disk_label(
                label))  # disk labels are formatted as flocker-<data_set_id>
コード例 #14
0
class Command(BaseCommand):
    help = "Synchronizes static media to cloud files."

    option_list = BaseCommand.option_list + (
        optparse.make_option(
            '-w',
            '--wipe',
            action='store_true',
            dest='wipe',
            default=False,
            help="Wipes out entire contents of container first."),
        optparse.make_option('-t',
                             '--test-run',
                             action='store_true',
                             dest='test_run',
                             default=False,
                             help="Performs a test run of the sync."),
        optparse.make_option('-c',
                             '--container',
                             dest='container',
                             help="Override STATIC_CONTAINER."),
    )

    # settings from azurite.settings
    ACCOUNT_NAME = AZURITE['ACCOUNT_NAME']
    ACCOUNT_KEY = AZURITE['ACCOUNT_KEY']
    STATIC_CONTAINER = AZURITE['STATIC_CONTAINER']

    # paths
    DIRECTORY = os.path.abspath(settings.STATIC_ROOT)
    STATIC_URL = settings.STATIC_URL

    if not DIRECTORY.endswith('/'):
        DIRECTORY = DIRECTORY + '/'

    if STATIC_URL.startswith('/'):
        STATIC_URL = STATIC_URL[1:]

    local_object_names = []
    create_count = 0
    upload_count = 0
    update_count = 0
    skip_count = 0
    delete_count = 0
    service = None

    def handle(self, *args, **options):
        self.wipe = options.get('wipe')
        self.test_run = options.get('test_run')
        self.verbosity = int(options.get('verbosity'))
        if hasattr(options, 'container'):
            self.STATIC_CONTAINER = options.get('container')
        self.sync_files()

    def sync_files(self):
        self.service = BlobService(account_name=self.ACCOUNT_NAME,
                                   account_key=self.ACCOUNT_KEY)

        try:
            self.service.get_container_properties(self.STATIC_CONTAINER)
        except WindowsAzureMissingResourceError:
            self.service.create_container(self.STATIC_CONTAINER,
                                          x_ms_blob_public_access='blob')

        self.service.set_container_acl(self.STATIC_CONTAINER,
                                       x_ms_blob_public_access='blob')

        # if -w option is provided, wipe out the contents of the container
        if self.wipe:
            blob_count = len(self.service.list_blobs(self.STATIC_CONTAINER))

            if self.test_run:
                print "Wipe would delete %d objects." % blob_count
            else:
                print "Deleting %d objects..." % blob_count
                for blob in self.service.list_blobs(self.STATIC_CONTAINER):
                    self.service.delete_blob(self.STATIC_CONTAINER, blob.name)

        # walk through the directory, creating or updating files on the cloud
        os.path.walk(self.DIRECTORY, self.upload_files, "foo")

        # remove any files on remote that don't exist locally
        self.delete_files()

        # print out the final tally to the cmd line
        self.update_count = self.upload_count - self.create_count
        print
        if self.test_run:
            print "Test run complete with the following results:"
        print "Skipped %d. Created %d. Updated %d. Deleted %d." % (
            self.skip_count, self.create_count, self.update_count,
            self.delete_count)

    def upload_files(self, arg, dirname, names):
        # upload or skip items
        for item in names:
            file_path = os.path.join(dirname, item)
            if os.path.isdir(file_path):
                continue  # Don't try to upload directories

            object_name = self.STATIC_URL + file_path.split(self.DIRECTORY)[1]
            self.local_object_names.append(object_name)

            try:
                properties = self.service.get_blob_properties(
                    self.STATIC_CONTAINER, object_name)
            except WindowsAzureMissingResourceError:
                properties = {}
                self.create_count += 1

            cloud_datetime = None
            if 'last-modified' in properties:
                cloud_datetime = (
                    properties['last-modified']
                    and datetime.datetime.strptime(properties['last-modified'],
                                                   "%a, %d %b %Y %H:%M:%S %Z")
                    or None)

            local_datetime = datetime.datetime.utcfromtimestamp(
                os.stat(file_path).st_mtime)

            if cloud_datetime and local_datetime < cloud_datetime:
                self.skip_count += 1
                if self.verbosity > 1:
                    print "Skipped %s: not modified." % object_name
                continue

            if not self.test_run:
                file_contents = open(file_path, 'r').read()
                content_type, encoding = mimetypes.guess_type(file_path)
                self.service.put_blob(self.STATIC_CONTAINER,
                                      object_name,
                                      file_contents,
                                      x_ms_blob_type='BlockBlob',
                                      x_ms_blob_content_type=content_type,
                                      content_encoding=encoding)
                # sync_headers(cloud_obj)
            self.upload_count += 1
            if self.verbosity > 1:
                print "Uploaded", object_name

    def delete_files(self):
        # remove any objects in the container that don't exist locally
        for blob in self.service.list_blobs(self.STATIC_CONTAINER):
            if blob.name not in self.local_object_names:
                self.delete_count += 1
                if self.verbosity > 1:
                    print "Deleted %s" % blob.name
                if not self.test_run:
                    self.service.delete_blob(self.STATIC_CONTAINER, blob.name)
コード例 #15
0
ファイル: wabs_util.py プロジェクト: ikatson/wal-e
def uri_put_file(creds, uri, fp, content_encoding=None):
    assert fp.tell() == 0
    assert uri.startswith('wabs://')

    def log_upload_failures_on_error(exc_tup, exc_processor_cxt):
        def standard_detail_message(prefix=''):
            return (prefix + '  There have been {n} attempts to upload  '
                    'file {url} so far.'.format(n=exc_processor_cxt, url=uri))

        typ, value, tb = exc_tup
        del exc_tup

        # Screen for certain kinds of known-errors to retry from
        if issubclass(typ, socket.error):
            socketmsg = value[1] if isinstance(value, tuple) else value

            logger.info(
                msg='Retrying upload because of a socket error',
                detail=standard_detail_message(
                    "The socket error's message is '{0}'.".format(socketmsg)))
        else:
            # For all otherwise untreated exceptions, report them as a
            # warning and retry anyway -- all exceptions that can be
            # justified should be treated and have error messages
            # listed.
            logger.warning(
                msg='retrying file upload from unexpected exception',
                detail=standard_detail_message(
                    'The exception type is {etype} and its value is '
                    '{evalue} and its traceback is {etraceback}'.format(
                        etype=typ,
                        evalue=value,
                        etraceback=''.join(traceback.format_tb(tb)))))

        # Help Python GC by resolving possible cycles
        del tb

    # Because we're uploading in chunks, catch rate limiting and
    # connection errors which occur for each individual chunk instead of
    # failing the whole file and restarting.
    @retry(retry_with_count(log_upload_failures_on_error))
    def upload_chunk(chunk, block_id):
        check_sum = base64.encodestring(md5(chunk).digest()).strip('\n')
        conn.put_block(url_tup.netloc,
                       url_tup.path,
                       chunk,
                       block_id,
                       content_md5=check_sum)

    url_tup = urlparse(uri)
    kwargs = dict(x_ms_blob_type='BlockBlob')
    if content_encoding is not None:
        kwargs['x_ms_blob_content_encoding'] = content_encoding

    conn = BlobService(creds.account_name, creds.account_key, protocol='https')
    conn.put_blob(url_tup.netloc, url_tup.path, '', **kwargs)

    # WABS requires large files to be uploaded in 4MB chunks
    block_ids = []
    length, index = 0, 0
    pool_size = os.getenv('WABS_UPLOAD_POOL_SIZE', 5)
    p = gevent.pool.Pool(size=pool_size)
    while True:
        data = fp.read(WABS_CHUNK_SIZE)
        if data:
            length += len(data)
            block_id = base64.b64encode(str(index))
            p.wait_available()
            p.spawn(upload_chunk, data, block_id)
            block_ids.append(block_id)
            index += 1
        else:
            p.join()
            break

    conn.put_block_list(url_tup.netloc, url_tup.path, block_ids)

    # To maintain consistency with the S3 version of this function we must
    # return an object with a certain set of attributes.  Currently, that set
    # of attributes consists of only 'size'
    return _Key(size=len(data))
コード例 #16
0
ファイル: wabs_util.py プロジェクト: boldfield/wal-e
def uri_put_file(creds, uri, fp, content_encoding=None):
    assert fp.tell() == 0
    assert uri.startswith('wabs://')

    def log_upload_failures_on_error(exc_tup, exc_processor_cxt):
        def standard_detail_message(prefix=''):
            return (prefix + '  There have been {n} attempts to upload  '
                    'file {url} so far.'.format(n=exc_processor_cxt, url=uri))
        typ, value, tb = exc_tup
        del exc_tup

        # Screen for certain kinds of known-errors to retry from
        if issubclass(typ, socket.error):
            socketmsg = value[1] if isinstance(value, tuple) else value

            logger.info(
                msg='Retrying upload because of a socket error',
                detail=standard_detail_message(
                    "The socket error's message is '{0}'."
                    .format(socketmsg)))
        else:
            # For all otherwise untreated exceptions, report them as a
            # warning and retry anyway -- all exceptions that can be
            # justified should be treated and have error messages
            # listed.
            logger.warning(
                msg='retrying file upload from unexpected exception',
                detail=standard_detail_message(
                    'The exception type is {etype} and its value is '
                    '{evalue} and its traceback is {etraceback}'
                    .format(etype=typ, evalue=value,
                            etraceback=''.join(traceback.format_tb(tb)))))

        # Help Python GC by resolving possible cycles
        del tb

    # Because we're uploading in chunks, catch rate limiting and
    # connection errors which occur for each individual chunk instead of
    # failing the whole file and restarting.
    @retry(retry_with_count(log_upload_failures_on_error))
    def upload_chunk(chunk, block_id):
        check_sum = base64.encodestring(md5(chunk).digest()).strip('\n')
        conn.put_block(url_tup.netloc, url_tup.path, chunk,
                       block_id, content_md5=check_sum)

    url_tup = urlparse(uri)
    kwargs = dict(x_ms_blob_type='BlockBlob')
    if content_encoding is not None:
        kwargs['x_ms_blob_content_encoding'] = content_encoding

    conn = BlobService(creds.account_name, creds.account_key, protocol='https')
    conn.put_blob(url_tup.netloc, url_tup.path, '', **kwargs)

    # WABS requires large files to be uploaded in 4MB chunks
    block_ids = []
    length, index = 0, 0
    pool_size = os.getenv('WABS_UPLOAD_POOL_SIZE', 5)
    p = gevent.pool.Pool(size=pool_size)
    while True:
        data = fp.read(WABS_CHUNK_SIZE)
        if data:
            length += len(data)
            block_id = base64.b64encode(str(index))
            p.wait_available()
            p.spawn(upload_chunk, data, block_id)
            block_ids.append(block_id)
            index += 1
        else:
            p.join()
            break

    conn.put_block_list(url_tup.netloc, url_tup.path, block_ids)

    # To maintain consistency with the S3 version of this function we must
    # return an object with a certain set of attributes.  Currently, that set
    # of attributes consists of only 'size'
    return _Key(size=len(data))
コード例 #17
0
ファイル: snippet.py プロジェクト: szabo92/gistable
class AzureBlobStorage(Storage):

    def __init__(self, account='nyxstorage', container='pxo'):
        self.base_storage_uri = 'http://%s.blob.core.windows.net/%s/' % (
            account, container)
        self.blob_service = BlobService(
            account, get_env_variable('AZURE_BLOB_STORAGE_KEY'))
        self.container = container

    def _open(self, name, mode='rb'):
        data = self.blob_service.get_blob(self.container, name)
        return ContentFile(data)

    def _save(self, name, content):
        _file = content.read()
        file_name = content.name[-35:]
        self.blob_service.put_blob(
            self.container, file_name, _file, x_ms_blob_type='BlockBlob')
        return self.base_storage_uri + file_name

    def create_container(self, container_name):
        result = self.blob_service.create_container(
            container_name, x_ms_blob_public_access='container')
        return result

    def delete(self, name):
        self.blob_service.delete_blob(self.container, name)

    def exists(self, name):
        try:
            self.blob_service.get_blob_properties(self.container, name)
        except:
            return False
        else:
            return True

    def get_available_name(self, name):
        return name

    def get_blobs(self):
        blobs = self.blob_service.list_blobs(self.container)
        return blobs

    def get_valid_name(self, name):
        return name

    def modified_time(self, name):
        metadata = self.blob_service.get_blob_metadata(self.container, name)
        modified_time = float(metadata.get('x-ms-meta-modified_time'))
        return datetime.fromtimestamp(modified_time)

    def set_public_container(self, container_name):
        result = self.blob_service.set_container_acl(
            container_name, x_ms_blob_public_access='container')
        return result

    def size(self, name):
        properties = self.blob_service.get_blob_properties(
            self.container, name)
        return properties.get('content-length')

    def url(self, name):
        blob = self.blob_service.list_blobs(self.container, prefix=name)
        return blob.blobs[0].url