Esempio n. 1
0
def test_azure_call(request):
    import os
    try:
        from azure.storage import BlobService
        bs = BlobService(os.environ["AZURE_STORAGE_ACCOUNT"], os.environ["AZURE_STORAGE_ACCESS_KEY"])
        import random
        container_name = hex(int(random.random() * 1000000000))

        bs.create_container(container_name)
        bs.put_blob(container_name, 'testblob', 'hello world\n', 'BlockBlob')
        blob = bs.get_blob(container_name, 'testblob')
        if blob != 'hello world\n':
            return HttpResponse("Failed!", status = '404')
        
        bs.delete_blob(container_name, 'testblob')
        bs.delete_container(container_name)

        return HttpResponse("Succeeded!")
    except:
        try:
            import traceback
        
            return HttpResponse(traceback.format_exc() + str(os.environ.keys()))
        except:
            import traceback
            return HttpResponse(traceback.format_exc())
class FileService(Component):
    def __init__(self):
        self.blob_service = None

    def generate_blob_service(self):
        if self.blob_service is None:
            # if storage info doesn't exist in config.py upload file function stop working
            self.blob_service = BlobService(account_name=self.util.get_config("storage.azure.account_name"),
                                            account_key=self.util.get_config("storage.azure.account_key"),
                                            host_base=self.util.get_config("storage.azure.blob_service_host_base"))

    def create_container_in_storage(self, container_name, access):
        """
        create a container if doesn't exist
        :param container_name:
        :param access:
        :return:
        """
        self.generate_blob_service()
        try:
            names = map(lambda x: x.name, self.blob_service.list_containers())
            if container_name not in names:
                self.blob_service.create_container(container_name, x_ms_blob_public_access=access)
            else:
                self.log.debug("container already exsit in storage")
            return True
        except Exception as e:
            self.log.error(e)
            return False

    def upload_file_to_azure(self, stream, container_name, blob_name):
        try:
            if self.create_container_in_storage(container_name, 'container'):
                self.blob_service.put_block_blob_from_file(container_name, blob_name, stream)
                return self.blob_service.make_blob_url(container_name, blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def upload_file_to_azure_from_path(self, path, container_name, blob_name):
        try:
            if self.create_container_in_storage(container_name, 'container'):
                self.blob_service.put_block_blob_from_path(container_name, blob_name, path)
                return self.blob_service.make_blob_url(container_name, blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def delete_file_from_azure(self, container_name, blob_name):
        try:
            if self.create_container_in_storage(container_name, 'container'):
                self.blob_service.delete_blob(container_name, blob_name)
        except Exception as e:
            self.log.error(e)
            return None
Esempio n. 3
0
class AzureTransfer(BaseTransfer):
    def __init__(self, account_name, account_key, container_name):
        BaseTransfer.__init__(self)
        self.account_name = account_name
        self.account_key = account_key
        self.container_name = container_name
        self.conn = BlobService(account_name=self.account_name, account_key=self.account_key)
        self.container = self.get_or_create_container(self.container_name)
        self.log.debug("AzureTransfer initialized")

    def get_metadata_for_key(self, key):
        key = fix_path(key)
        return self.list_path(key)[0]['metadata']

    def list_path(self, path):
        return_list = []
        path = fix_path(path)
        self.log.info("Asking for listing of: %r", path)
        for r in self.conn.list_blobs(self.container_name, prefix=path, delimiter="/",
                                      include="metadata"):
            entry = {"name": r.name, "size": r.properties.content_length,
                     "last_modified": dateutil.parser.parse(r.properties.last_modified),
                     "metadata": r.metadata}
            return_list.append(entry)
        return return_list

    def delete_key(self, key_name):
        key_name = fix_path(key_name)
        self.log.debug("Deleting key: %r", key_name)
        return self.conn.delete_blob(self.container_name, key_name)

    def get_contents_to_file(self, obj_key, filepath_to_store_to):
        obj_key = fix_path(obj_key)
        self.log.debug("Starting to fetch the contents of: %r to: %r", obj_key, filepath_to_store_to)
        return self.conn.get_blob_to_path(self.container_name, obj_key, filepath_to_store_to)

    def get_contents_to_string(self, obj_key):
        obj_key = fix_path(obj_key)
        self.log.debug("Starting to fetch the contents of: %r", obj_key)
        return self.conn.get_blob_to_bytes(self.container_name, obj_key), self.get_metadata_for_key(obj_key)

    def store_file_from_memory(self, key, memstring, metadata=None):
        # For whatever reason Azure requires all values to be strings at the point of sending
        metadata_to_send = dict((str(k), str(v)) for k, v in metadata.items())
        self.conn.put_block_blob_from_bytes(self.container_name, key, memstring,
                                            x_ms_meta_name_values=metadata_to_send)

    def store_file_from_disk(self, key, filepath, metadata=None):
        # For whatever reason Azure requires all values to be strings at the point of sending
        metadata_to_send = dict((str(k), str(v)) for k, v in metadata.items())
        self.conn.put_block_blob_from_path(self.container_name, key, filepath,
                                           x_ms_meta_name_values=metadata_to_send)

    def get_or_create_container(self, container_name):
        start_time = time.time()
        self.conn.create_container(container_name)
        self.log.debug("Got/Created container: %r successfully, took: %.3fs", container_name, time.time() - start_time)
        return container_name
def main():
    service = BlobService(
        credentials.getStorageServicesName(),
        credentials.getStorageServicesKey(),
    )

    service.create_container(CONTAINER_NAME)

    process(service, LOCAL_BLOCK_BLOB_FILES, CONNECTION_COUNTS, is_page_blob=False)
    process(service, LOCAL_PAGE_BLOB_FILES, CONNECTION_COUNTS, is_page_blob=True)
def main(argv):
    options = _parse_options(argv)

    blob_service = BlobService(options[_opt_azure_acc_name], options[_opt_azure_acc_key])
    _print_container_names(blob_service)

    blob_service.create_container(options[_opt_azure_container_name])
    _print_blobs(blob_service, options[_opt_azure_container_name])

    export_dir = _export_repo(options)
    export_zip = _zip_export(export_dir)
    _upload_zip(blob_service, options[_opt_azure_container_name], export_zip)
Esempio n. 6
0
File: cloud.py Progetto: EQ4/DRR
def connect(config=False):
  # Connect to the cloud service. 
  if not config: config = misc.config['_private']

  from azure.storage import BlobService
  container = 'streams'

  if not 'azure' in config:
    return None, None

  blob_service = BlobService(config['azure']['storage_account_name'], config['azure']['primary_access_key'])
  blob_service.create_container(container, x_ms_blob_public_access='container')
  return blob_service, container
Esempio n. 7
0
def create_container(storage_account_name, container_name, storage_keys):
    """
    Creates a file share in the specified Microsoft Azure Storage account.
    A container is like a folder within a storage account
    :param storage_account_name:
    :param container_name:
    :param storage_keys:
    :return:
    """
    logging.info('Creating Container \'{0}\' in Storage account {1}'.format(container_name, storage_account_name))
    blob_svc = BlobService(account_name=storage_account_name, account_key=storage_keys.storage_service_keys.primary)
    blob_svc.create_container(container_name)
    logging.info('Creating Container \'{0}\' in Storage account {1} complete'.format(container_name, storage_account_name))
Esempio n. 8
0
File: cloud.py Progetto: EQ4/DRR
def connect(config=False):
    # Connect to the cloud service.
    if not config: config = misc.config['_private']

    from azure.storage import BlobService
    container = 'streams'

    if not 'azure' in config:
        return None, None

    blob_service = BlobService(config['azure']['storage_account_name'],
                               config['azure']['primary_access_key'])
    blob_service.create_container(container,
                                  x_ms_blob_public_access='container')
    return blob_service, container
Esempio n. 9
0
 def _ensureStorageContainersExist(self):
     """
     Creates Blob storage containers required by the service.
     """
     logger.info("Checking for existence of Blob containers.")
     account_name = self.config.getServiceStorageAccountName()
     account_key = self._getStorageAccountKey(account_name)
     blob_service = BlobService(account_name, account_key)
     name_and_access_list = [(self.config.getServicePublicStorageContainer(), 'blob'),
                             (self.config.getServiceBundleStorageContainer(), None)]
     for name, access in name_and_access_list:
         logger.info("Checking for existence of Blob container %s.", name)
         blob_service.create_container(name, x_ms_blob_public_access=access, fail_on_exist=False)
         access_info = 'private' if access is None else 'public {0}'.format(access)
         logger.info("Blob container %s is ready (access: %s).", name, access_info)
Esempio n. 10
0
 def _ensureStorageContainersExist(self):
     """
     Creates Blob storage containers required by the service.
     """
     logger.info("Checking for existence of Blob containers.")
     account_name = self.config.getServiceStorageAccountName()
     account_key = self._getStorageAccountKey(account_name)
     blob_service = BlobService(account_name, account_key)
     name_and_access_list = [(self.config.getServicePublicStorageContainer(), 'blob'),
                             (self.config.getServiceBundleStorageContainer(), None)]
     for name, access in name_and_access_list:
         logger.info("Checking for existence of Blob container %s.", name)
         blob_service.create_container(name, x_ms_blob_public_access=access, fail_on_exist=False)
         access_info = 'private' if access is None else 'public {0}'.format(access)
         logger.info("Blob container %s is ready (access: %s).", name, access_info)
def main():
    service = BlobService(
        credentials.getStorageServicesName(),
        credentials.getStorageServicesKey(),
    )

    service.create_container(CONTAINER_NAME)

    process(service,
            LOCAL_BLOCK_BLOB_FILES,
            CONNECTION_COUNTS,
            is_page_blob=False)
    process(service,
            LOCAL_PAGE_BLOB_FILES,
            CONNECTION_COUNTS,
            is_page_blob=True)
def do_step(context):

    settings = context.meta['settings']

    # Prepare the containers
    storage_account_name = settings["STORAGE-ACCOUNT-NAME"]
    storage_access_key = settings["STORAGE-ACCESS-KEY"]
    blob_service = BlobService(storage_account_name, storage_access_key)
    blob_service.create_container('bosh')
    blob_service.create_container(container_name='stemcell', x_ms_blob_public_access='blob')

    # Prepare the table for storing meta datas of storage account and stemcells
    table_service = TableService(storage_account_name, storage_access_key)
    table_service.create_table('stemcells')

    context.meta['settings'] = settings
    return context
def do_step(context):

    settings = context.meta['settings']

    # Prepare the containers
    storage_account_name = settings["STORAGE-ACCOUNT-NAME"]
    storage_access_key = settings["STORAGE-ACCESS-KEY"]
    blob_service = BlobService(storage_account_name, storage_access_key)
    blob_service.create_container('bosh')
    blob_service.create_container(container_name='stemcell', x_ms_blob_public_access='blob')

    # Prepare the table for storing meta datas of storage account and stemcells
    table_service = TableService(storage_account_name, storage_access_key)
    table_service.create_table('stemcells')

    context.meta['settings'] = settings
    return context
Esempio n. 14
0
class AzureBlobStorage(StorageBase):

    def __init__(self, account_name, account_key, container_name):
        self.__container_name = container_name
        self.__blob_service = BlobService(account_name=account_name,
                                          account_key=account_key)
        self.__blob_service.create_container(container_name)

    def get(self, key, default=None):
        logger.info('get: key = %s' % key)
        return pickle.loads(self.__blob_service
                            .get_blob_to_text(self.__container_name, key))

    def set(self, key, value):
        logger.info('get: key = %s, value = %s' % (key, value))
        self.__blob_service.put_block_blob_from_text(self.__container_name,
                                                     key, pickle.dumps(value))
Esempio n. 15
0
def create_container(storage_account_name, container_name, storage_keys):
    """
    Creates a file share in the specified Microsoft Azure Storage account.
    A container is like a folder within a storage account
    :param storage_account_name:
    :param container_name:
    :param storage_keys:
    :return:
    """
    logging.info('Creating Container \'{0}\' in Storage account {1}'.format(
        container_name, storage_account_name))
    blob_svc = BlobService(
        account_name=storage_account_name,
        account_key=storage_keys.storage_service_keys.primary)
    blob_svc.create_container(container_name)
    logging.info(
        'Creating Container \'{0}\' in Storage account {1} complete'.format(
            container_name, storage_account_name))
Esempio n. 16
0
class BlobCache:
    '''
    Simplistic cache toolkit targetting an Azure Blob Service.

    name:      the name of a storage account.
    key:       the access key for the storage account.
    container: the name of the container to use.
    '''
    def __init__(self, name, key, container):
        self.container = container
        self.blobstore = BlobService(name, key)
        self.blobstore.create_container(self.container)

    def getresponse(self, cachekey):
        '''
        Get a value from the cache.

        cachekey: The key.

        Kilroy notes that this throws an exception rather than returning a 
        value on failure.
        '''
        return self.blobstore.get_blob_to_text(self.container,
                                               str2blobname(cachekey))

    def putresponse(self, cachekey, value):
        '''
        Put a value in the cache with the given key.

        cachekey: The key.

        value: The value to associate with the key.
        '''
        return self.blobstore.put_block_blob_from_text(self.container,
                                                       str2blobname(cachekey),
                                                       value)

    def invalidate(self, cachekey):
        '''
        Invalidate a value in the cache. Immediate. Permanent.

        cachekey: The key.
        '''
        self.blobstore.delete_blob(self.container, str2blobname(cachekey))
Esempio n. 17
0
class BlobCache:
    '''
    Simplistic cache toolkit targetting an Azure Blob Service.

    name:      the name of a storage account.
    key:       the access key for the storage account.
    container: the name of the container to use.
    '''

    def __init__(self, name, key, container):
        self.container = container
        self.blobstore = BlobService(name, key)
        self.blobstore.create_container(self.container)

    def getresponse(self, cachekey):
        '''
        Get a value from the cache.

        cachekey: The key.

        Kilroy notes that this throws an exception rather than returning a 
        value on failure.
        '''
        return self.blobstore.get_blob_to_text(self.container,str2blobname(cachekey))

    def putresponse(self, cachekey, value):
        '''
        Put a value in the cache with the given key.

        cachekey: The key.

        value: The value to associate with the key.
        '''
        return self.blobstore.put_block_blob_from_text(self.container, str2blobname(cachekey), value)

    def invalidate(self, cachekey):
        '''
        Invalidate a value in the cache. Immediate. Permanent.

        cachekey: The key.
        '''
        self.blobstore.delete_blob(self.container, str2blobname(cachekey))
Esempio n. 18
0
class AzureBackend(BakthatBackend):
    """Backend to handle Azure upload/download."""
    def __init__(self, conf={}, profile="default"):
        BakthatBackend.__init__(self, conf, profile)

        self.blob_service = BlobService(self.conf["account_name"], self.conf["account_key"])
       
         
        try:
            self.containe_properties =  self.blob_service.get_container_properties(self.conf["azure_container"])
        except WindowsAzureError, e:
            if type(e)==WindowsAzureMissingResourceError:
                self.containe_properties =self.blob_service.create_container(self.conf["azure_container"])
            else:
                raise e

        self.container = self.conf["azure_container"]
        self.container_key = "azure_container"
Esempio n. 19
0
class AzureTransfer(BaseTransfer):
    def __init__(self, account_name, account_key, container_name, prefix=None):
        # NOTE: Azure wants all paths to start with a slash
        prefix = "/{}".format(prefix.lstrip("/") if prefix else "")
        super().__init__(prefix=prefix)
        self.account_name = account_name
        self.account_key = account_key
        self.container_name = container_name
        self.conn = BlobService(account_name=self.account_name, account_key=self.account_key)
        self.container = self.get_or_create_container(self.container_name)
        self.log.debug("AzureTransfer initialized")
        # XXX: AzureTransfer isn't actively tested and hasn't its error handling is probably lacking
        self.log.warning("AzureTransfer is experimental and has not been thoroughly tested")

    def get_metadata_for_key(self, key):
        key = self.format_key_for_backend(key)
        return self._list_blobs(key)[0]["metadata"]

    def _metadata_for_key(self, key):
        return self._list_blobs(key)[0]["metadata"]

    def list_path(self, key):
        path = self.format_key_for_backend(key, trailing_slash=True)
        return self._list_blobs(path)

    def _list_blobs(self, path):
        self.log.debug("Listing path %r", path)
        items = self.conn.list_blobs(self.container_name, prefix=path, delimiter="/", include="metadata")
        result = []
        for item in items:
            result.append({
                "last_modified": dateutil.parser.parse(item.properties.last_modified),
                "metadata": item.metadata,
                "name": self.format_key_from_backend(item.name),
                "size": item.properties.content_length,
            })
        return result

    def delete_key(self, key):
        key = self.format_key_for_backend(key)
        self.log.debug("Deleting key: %r", key)
        return self.conn.delete_blob(self.container_name, key)

    def get_contents_to_file(self, key, filepath_to_store_to):
        key = self.format_key_for_backend(key)
        self.log.debug("Starting to fetch the contents of: %r to: %r", key, filepath_to_store_to)
        return self.conn.get_blob_to_path(self.container_name, key, filepath_to_store_to)

    def get_contents_to_fileobj(self, key, fileobj_to_store_to):
        key = self.format_key_for_backend(key)
        self.log.debug("Starting to fetch the contents of: %r", key)
        return self.conn.get_blob_to_file(self.container_name, key, fileobj_to_store_to)

    def get_contents_to_string(self, key):
        key = self.format_key_for_backend(key)
        self.log.debug("Starting to fetch the contents of: %r", key)
        return self.conn.get_blob_to_bytes(self.container_name, key), self._metadata_for_key(key)

    def store_file_from_memory(self, key, memstring, metadata=None):
        key = self.format_key_for_backend(key)
        # Azure requires all metadata keys and values to be strings
        metadata_to_send = {str(k): str(v) for k, v in metadata.items()}
        self.conn.put_block_blob_from_bytes(self.container_name, key, memstring,
                                            x_ms_meta_name_values=metadata_to_send)

    def store_file_from_disk(self, key, filepath, metadata=None, multipart=None):
        key = self.format_key_for_backend(key)
        # Azure requires all metadata keys and values to be strings
        metadata_to_send = {str(k): str(v) for k, v in metadata.items()}
        self.conn.put_block_blob_from_path(self.container_name, key, filepath,
                                           x_ms_meta_name_values=metadata_to_send)

    def get_or_create_container(self, container_name):
        start_time = time.time()
        self.conn.create_container(container_name)
        self.log.debug("Got/Created container: %r successfully, took: %.3fs", container_name, time.time() - start_time)
        return container_name
class Storage(driver.Base):

    supports_bytes_range = True

    def __init__(self, path=None, config=None):
        self._config = config
        self._container = self._config.azure_storage_container

        protocol = 'https' if self._config.azure_use_https else 'http'
        acct_name = self._config.azure_storage_account_name
        acct_key = self._config.azure_storage_account_key
        self._blob = BlobService(account_name=acct_name,
                                 account_key=acct_key,
                                 protocol=protocol)

        self._init_container()
        logger.debug("Initialized azureblob storage driver")

    def _init_container(self):
        '''Initializes image container on Azure blob storage if the container
        does not exist.
        '''
        created = self._blob.create_container(self._container,
                                              x_ms_blob_public_access='blob',
                                              fail_on_exist=False)
        if created:
            logger.info('Created blob container for image registry.')
        else:
            logger.debug('Registry container already exists.')
        return created

    @lru.get
    def get_content(self, path):
        try:
            return self._blob.get_blob(self._container, path)
        except azure.WindowsAzureMissingResourceError:
            raise exceptions.FileNotFoundError('%s is not there' % path)

    @lru.set
    def put_content(self, path, content):
        self._blob.put_blob(self._container, path, content, 'BlockBlob')
        return path

    def stream_read(self, path, bytes_range=None):
        try:
            f = io.BytesIO()
            self._blob.get_blob_to_file(self._container, path, f)

            if bytes_range:
                f.seek(bytes_range[0])
                total_size = bytes_range[1] - bytes_range[0] + 1
            else:
                f.seek(0)

            while True:
                buf = None
                if bytes_range:
                    # Bytes Range is enabled
                    buf_size = self.buffer_size
                    if nb_bytes + buf_size > total_size:
                        # We make sure we don't read out of the range
                        buf_size = total_size - nb_bytes
                    if buf_size > 0:
                        buf = f.read(buf_size)
                        nb_bytes += len(buf)
                    else:
                        # We're at the end of the range
                        buf = ''
                else:
                    buf = f.read(self.buffer_size)

                if not buf:
                    break

                yield buf
        except IOError:
            raise exceptions.FileNotFoundError('%s is not there' % path)

    def stream_write(self, path, fp):
        self._blob.put_block_blob_from_file(self._container, path, fp)

    def list_directory(self, path=None):
        if not path.endswith('/'):
            path += '/'  # path=a would list a/b.txt as well as 'abc.txt'

        blobs = list(self._blob.list_blobs(self._container, path))
        if not blobs:
            raise exceptions.FileNotFoundError('%s is not there' % path)

        return [b.name for b in blobs]

    def exists(self, path):
        try:
            self._blob.get_blob_properties(self._container, path)
            return True
        except azure.WindowsAzureMissingResourceError:
            return False

    @lru.remove
    def remove(self, path):
        is_blob = self.exists(path)
        if is_blob:
            self._blob.delete_blob(self._container, path)
            return

        exists = False
        blobs = list(self._blob.list_blobs(self._container, path))
        if not blobs:
            raise exceptions.FileNotFoundError('%s is not there' % path)

        for b in blobs:
            self._blob.delete_blob(self._container, b.name)

    def get_size(self, path):
        try:
            properties = self._blob.get_blob_properties(self._container, path)
            return int(properties['content-length'])  # auto-converted to long
        except azure.WindowsAzureMissingResourceError:
            raise exceptions.FileNotFoundError('%s is not there' % path)
class BlobServiceAdapter(Component):
    """The :class:`BlobServiceAdapter` class is a thin wrapper over azure.storage.BlobService.

    All the attributes of the wrapper stream are proxied by the adapter so
    it's possible to do ``adapter.create_container()`` instead of the long form
    ``adapter.blob_service.adapter()``.
    """

    def __init__(self):
        self.blob_service = BlobService(account_name=self.util.get_config("storage.azure.account_name"),
                                        account_key=self.util.get_config("storage.azure.account_key"),
                                        host_base=self.util.get_config("storage.azure.blob_service_host_base"))

    def __getattr__(self, name):
        return getattr(self.blob_service, name)

    def create_container_in_storage(self, container_name, access="container"):
        """create a container if doesn't exist

        :type container_name: str|unicode
        :param container_name: Name of container to create.

        :type access: str|unicode
        :param access: Optional. Possible values include: container, blob
        :return:
        """
        try:
            names = [x.name for x in self.blob_service.list_containers()]
            if container_name not in names:
                return self.blob_service.create_container(container_name, x_ms_blob_public_access=access)
            else:
                self.log.debug("container already exists in storage")
                return True
        except Exception as e:
            self.log.error(e)
            return False

    def upload_file_to_azure(self, container_name, blob_name, stream):
        """
        Creates a new block blob from a file/stream, or updates the content of
        an existing block blob, with automatic chunking and progress
        notifications.

        :type container_name: str|unicode
        :param container_name: Name of existing container.

        :type blob_name: str | unicode
        :param blob_name: Name of blob to create or update.

        :type stream: file
        :param stream: Opened file/stream to upload as the blob content.
        """
        try:
            if self.create_container_in_storage(container_name, 'container'):
                self.blob_service.put_block_blob_from_file(container_name, blob_name, stream)
                return self.blob_service.make_blob_url(container_name, blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def upload_file_to_azure_from_bytes(self, container_name, blob_name, blob):
        """
        Creates a new block blob from an array of bytes, or updates the content
        of an existing block blob, with automatic chunking and progress
        notifications.

        :type container_name: str|unicode
        :param container_name: Name of existing container.

        :type blob_name: str|unicode
        :param blob_name: Name of blob to create or update.

        :type blob: bytes
        :param blob: Content of blob as an array of bytes.
        """
        try:
            if self.create_container_in_storage(container_name, 'container'):
                self.blob_service.put_block_blob_from_bytes(container_name, blob_name, blob)
                return self.blob_service.make_blob_url(container_name, blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def upload_file_to_azure_from_text(self, container_name, blob_name, text):
        """
        Creates a new block blob from str/unicode, or updates the content of an
        existing block blob, with automatic chunking and progress notifications.

        :type container_name: str|unicode
        :param container_name: Name of existing container.

        :type blob_name: str|unicode
        :param blob_name: Name of blob to create or update.

        :type text: str|unicode
        :param text: Text to upload to the blob.
        """
        try:
            if self.create_container_in_storage(container_name, 'container'):
                self.blob_service.put_block_blob_from_text(container_name, blob_name, text)
                return self.blob_service.make_blob_url(container_name, blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def upload_file_to_azure_from_path(self, container_name, blob_name, path):
        """
        Creates a new page blob from a file path, or updates the content of an
        existing page blob, with automatic chunking and progress notifications.

        :type container_name: str|unicode
        :param container_name: Name of existing container.

        :type blob_name: str|unicode
        :param blob_name: Name of blob to create or update.

        :type path: str|unicode
        :param path: Path of the file to upload as the blob content.
        """
        try:
            if self.create_container_in_storage(container_name, 'container'):
                self.blob_service.put_block_blob_from_path(container_name, blob_name, path)
                return self.blob_service.make_blob_url(container_name, blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def delete_file_from_azure(self, container_name, blob_name):
        try:
            if self.create_container_in_storage(container_name, 'container'):
                self.blob_service.delete_blob(container_name, blob_name)
        except Exception as e:
            self.log.error(e)
            return None
Esempio n. 22
0
def upload():

    file = request.files['fileInput']
    print "File is" + file.filename


    if file:
        data = file.read()


        blob_service = BlobService(account_name='squadshots', account_key='UgxaWKAKv2ZvhHrPt0IHi4EQedPpZw35r+RXkAYB2eICPrG3TjSwk2G8gUzG/PNDDTV+4CVCYWCvZSiad5xMQQ==')
        blob_service.create_container('album')

        blob_service.put_block_blob_from_bytes(
            'album',
            file.filename + "_blob",
            data,
            x_ms_blob_content_type='image/png'
        )

        if 'username' in session:
            un = session['username']
        else:
            print "not in session"

        blob_service.set_blob_metadata(container_name="album",
                                   blob_name=file.filename + "_blob",
                                   x_ms_meta_name_values={'metaun': un})

        blob_service.get_blob_to_path('album',file.filename + "_blob",'static/output.png')
        f = open('input_person.png','w+')
        f.write(data)
        f.close()


        [X,y] = read_images(OUTPUT_DIRECTORY, (256,256))
    # Convert labels to 32bit integers. This is a workaround for 64bit machines,
        y = np.asarray(y, dtype=np.int32)

    # Create the Eigenfaces model.
        model = cv2.createEigenFaceRecognizer()
    # Learn the model. Remember our function returns Python lists,
    # so we use np.asarray to turn them into NumPy lists to make
    # the OpenCV wrapper happy:
        model.train(np.asarray(X), np.asarray(y))

    # Save the model for later use
        model.save("eigenModel.xml")



           # Create an Eign Face recogniser
        t = float(100000)
        model = cv2.createEigenFaceRecognizer(threshold=t)

        # Load the model
        model.load("eigenModel.xml")

       # Read the image we're looking for
        try:
            sampleImage = cv2.imread('static/output.png', cv2.IMREAD_GRAYSCALE)
            if sampleImage != None:
                sampleImage = cv2.resize(sampleImage, (256,256))
            else:
                print "sample image is  null"
        except IOError:
            print "IO error"

      # Look through the model and find the face it matches
        [p_label, p_confidence] = model.predict(sampleImage)

    # Print the confidence levels
        print "Predicted label = %d (confidence=%.2f)" % (p_label, p_confidence)

    # If the model found something, print the file path
        if (p_label > -1):
            count = 0
            for dirname, dirnames, filenames in os.walk(OUTPUT_DIRECTORY):
                for subdirname in dirnames:
                    subject_path = os.path.join(dirname, subdirname)
                    if (count == p_label):
                        for filename in os.listdir(subject_path):
                            print "subject path = " + subject_path

                    count = count+1

    return "uploaded"
Esempio n. 23
0
class AzureFS(LoggingMixIn, Operations):
    """Azure Blob Storage filesystem"""

    blobs = None
    containers = dict()  # <cname, dict(stat:dict,
                                    #files:None|dict<fname, stat>)
    fds = dict()  # <fd, (path, bytes, dirty)>
    fd = 0

    def __init__(self, account, key):
        self.blobs = BlobService(account, key)
        self.rebuild_container_list()

    def convert_to_epoch(self, date):
        """Converts Tue, 31 Jul 2012 07:17:34 GMT format to epoch"""
        return int(time.mktime(time.strptime(date, TIME_FORMAT)))

    def rebuild_container_list(self):
        cmap = dict()
        cnames = set()
        for c in self.blobs.list_containers():
            date = c.properties.last_modified
            cstat = dict(st_mode=(S_IFDIR | 0755), st_uid=getuid(), st_size=0,
                         st_mtime=self.convert_to_epoch(date))
            cname = c.name
            cmap['/' + cname] = dict(stat=cstat, files=None)
            cnames.add(cname)

        cmap['/'] = dict(files={},
                         stat=dict(st_mode=(S_IFDIR | 0755),
                                     st_uid=getuid(), st_size=0,
                                     st_mtime=int(time.time())))

        self.containers = cmap   # destroys fs tree cache resistant to misses

    def _parse_path(self, path):    # returns </dir, file(=None)>
        if path.count('/') > 1:     # file
            return str(path[:path.rfind('/')]), str(path[path.rfind('/') + 1:])
        else:                       # dir
            pos = path.rfind('/', 1)
            if pos == -1:
                return path, None
            else:
                return str(path[:pos]), None

    def parse_container(self, path):
        base_container = path[1:]   # /abc/def/g --> abc
        if base_container.find('/') > -1:
            base_container = base_container[:base_container.find('/')]
        return str(base_container)

    def _get_dir(self, path, contents_required=False):
        if not self.containers:
            self.rebuild_container_list()

        if path in self.containers and not (contents_required and \
                self.containers[path]['files'] is None):
            return self.containers[path]

        cname = self.parse_container(path)

        if '/' + cname not in self.containers:
            raise FuseOSError(ENOENT)
        else:
            if self.containers['/' + cname]['files'] is None:
                # fetch contents of container
                log.info("------> CONTENTS NOT FOUND: %s" % cname)

                blobs = self.blobs.list_blobs(cname)

                dirstat = dict(st_mode=(S_IFDIR | 0755), st_size=0,
                               st_uid=getuid(), st_mtime=time.time())

                if self.containers['/' + cname]['files'] is None:
                    self.containers['/' + cname]['files'] = dict()

                for f in blobs:
                    blob_name = f.name
                    blob_date = f.properties.last_modified
                    blob_size = long(f.properties.content_length)

                    node = dict(st_mode=(S_IFREG | 0644), st_size=blob_size,
                                st_mtime=self.convert_to_epoch(blob_date),
                                st_uid=getuid())

                    if blob_name.find('/') == -1:  # file just under container
                        self.containers['/' + cname]['files'][blob_name] = node

            return self.containers['/' + cname]
        return None

    def _get_file(self, path):
        d, f = self._parse_path(path)
        dir = self._get_dir(d, True)
        if dir is not None and f in dir['files']:
            return dir['files'][f]

    def getattr(self, path, fh=None):
        d, f = self._parse_path(path)

        if f is None:
            dir = self._get_dir(d)
            return dir['stat']
        else:
            file = self._get_file(path)

            if file:
                return file

        raise FuseOSError(ENOENT)

    # FUSE
    def mkdir(self, path, mode):
        if path.count('/') <= 1:    # create on root
            name = path[1:]

            if not 3 <= len(name) <= 63:
                log.error("Container names can be 3 through 63 chars long.")
                raise FuseOSError(ENAMETOOLONG)
            if name is not name.lower():
                log.error("Container names cannot contain uppercase \
                        characters.")
                raise FuseOSError(EACCES)
            if name.count('--') > 0:
                log.error('Container names cannot contain consecutive \
                        dashes (-).')
                raise FuseOSError(EAGAIN)
            #TODO handle all "-"s must be preceded by letter or numbers
            #TODO starts with only letter or number, can contain letter, nr,'-'

            resp = self.blobs.create_container(name)

            if resp:
                self.rebuild_container_list()
                log.info("CONTAINER %s CREATED" % name)
            else:
                raise FuseOSError(EACCES)
                log.error("Invalid container name or container already \
                        exists.")
        else:
            raise FuseOSError(ENOSYS)  # TODO support 2nd+ level mkdirs

    def rmdir(self, path):
        if path.count('/') == 1:
            c_name = path[1:]
            resp = self.blobs.delete_container(c_name)

            if resp:
                if path in self.containers:
                    del self.containers[path]
            else:
                raise FuseOSError(EACCES)
        else:
            raise FuseOSError(ENOSYS)  # TODO support 2nd+ level mkdirs

    def create(self, path, mode):
        node = dict(st_mode=(S_IFREG | mode), st_size=0, st_nlink=1,
                     st_uid=getuid(), st_mtime=time.time())
        d, f = self._parse_path(path)

        if not f:
            log.error("Cannot create files on root level: /")
            raise FuseOSError(ENOSYS)

        dir = self._get_dir(d, True)
        if not dir:
            raise FuseOSError(EIO)
        dir['files'][f] = node

        return self.open(path, data='')     # reusing handler provider

    def open(self, path, flags=0, data=None):
        if data == None:                    # download contents
            c_name = self.parse_container(path)
            f_name = path[path.find('/', 1) + 1:]

            try:
                data = self.blobs.get_blob(c_name, f_name)
            except WindowsAzureMissingResourceError:
                dir = self._get_dir('/' + c_name, True)
                if f_name in dir['files']:
                    del dir['files'][f_name]
                raise FuseOSError(ENOENT)
            except WindowsAzureError as e:
                log.error("Read blob failed HTTP %d" % e.code)
                raise FuseOSError(EAGAIN)

        self.fd += 1
        self.fds[self.fd] = (path, data, False)

        return self.fd

    def flush(self, path, fh=None):
        if not fh:
            raise FuseOSError(EIO)
        else:
            if fh not in self.fds:
                raise FuseOSError(EIO)
            path = self.fds[fh][0]
            data = self.fds[fh][1]
            dirty = self.fds[fh][2]

            if not dirty:
                return 0     # avoid redundant write

            d, f = self._parse_path(path)
            c_name = self.parse_container(path)

            if data is None:
                data = ''

            try:
                if len(data) < 64 * 1024 * 1024:   # 64 mb
                    self.blobs.put_blob(c_name, f, data, 'BlockBlob')
                else:
                    # divide file by blocks and upload
                    block_size = 8 * 1024 * 1024
                    num_blocks = int(math.ceil(len(data) * 1.0 / block_size))
                    rd = str(random.randint(1, 1e8))
                    block_ids = list()

                    for i in range(num_blocks):
                        part = data[i * block_size:min((i + 1) * block_size,
                            len(data))]
                        block_id = base64.encodestring('%s_%s' % (rd,
                            (8 - len(str(i))) * '0' + str(i)))
                        self.blobs.put_block(c_name, f, part, block_id)
                        block_ids.append(block_id)

                    self.blobs.put_block_list(c_name, f, block_ids)
            except WindowsAzureError:
                raise FuseOSError(EAGAIN)

            dir = self._get_dir(d, True)
            if not dir or f not in dir['files']:
                raise FuseOSError(EIO)

            # update local data
            dir['files'][f]['st_size'] = len(data)
            dir['files'][f]['st_mtime'] = time.time()
            self.fds[fh] = (path, data, False)  # mark as not dirty
            return 0

    def release(self, path, fh=None):
        if fh is not None and fh in self.fds:
            del self.fds[fh]

    def truncate(self, path, length, fh=None):
        return 0     # assume done, no need

    def write(self, path, data, offset, fh=None):
        if not fh or fh not in self.fds:
            raise FuseOSError(ENOENT)
        else:
            d = self.fds[fh][1]
            if d is None:
                d = ""
            self.fds[fh] = (self.fds[fh][0], d[:offset] + data, True)
            return len(data)

    def unlink(self, path):
        c_name = self.parse_container(path)
        d, f = self._parse_path(path)

        try:
            self.blobs.delete_blob(c_name, f)

            _dir = self._get_dir(path, True)
            if _dir and f in _dir['files']:
                del _dir['files'][f]
            return 0
        except WindowsAzureMissingResourceError:
            raise FuseOSError(ENOENT)
        except Exception as e:
            raise FuseOSError(EAGAIN)

    def readdir(self, path, fh):
        if path == '/':
            return ['.', '..'] + [x[1:] for x in self.containers.keys() \
                    if x is not '/']

        dir = self._get_dir(path, True)
        if not dir:
            raise FuseOSError(ENOENT)
        return ['.', '..'] + dir['files'].keys()

    def read(self, path, size, offset, fh):
        if not fh or fh not in self.fds:
            raise FuseOSError(ENOENT)

        f_name = path[path.find('/', 1) + 1:]
        c_name = path[1:path.find('/', 1)]

        try:
            data = self.blobs.get_blob(c_name, f_name)
            self.fds[fh] = (self.fds[fh][0], data, False)
            return data[offset:offset + size]
        except URLError, e:
            if e.code == 404:
                raise FuseOSError(ENOENT)
            elif e.code == 403:
                raise FUSEOSError(EPERM)
            else:
                log.error("Read blob failed HTTP %d" % e.code)
                raise FuseOSError(EAGAIN)
        data = self.fds[fh][1]
        if data is None:
            data = ""
        return data[offset:offset + size]
Esempio n. 24
0
imagesQueue = 'imagesqueue'

tableName = 'photos'
tablePartitionKey = 'allphotos'

# Get queue credentials
# accountName = environ["AZURE_STORAGE_ACCOUNT"]
with open("ASA.key", "r") as myfile:
    accountName = myfile.read().replace('\n', '')
# accountKey = environ["AZURE_STORAGE_ACCESS_KEY"]
with open("ASK.key", "r") as myfile:
    accountKey = myfile.read().replace('\n', '')

# Create blob service
blob_service = BlobService(account_name=accountName, account_key=accountKey)
blob_service.create_container(blob_container)
blob_service.create_container(blob_analysis)

# Open queue with given credentials
queue_service = QueueService(account_name=accountName, account_key=accountKey)

# Open table service
table_service = TableService(account_name=accountName, account_key=accountKey)

# Analysis results
results = None
# Regions for analysis
region = 4

# Repeat
while (True):
Esempio n. 25
0
imagesQueue = 'imagesqueue'

tableName = 'photos'
tablePartitionKey = 'allphotos'

# Get queue credentials
# accountName = environ["AZURE_STORAGE_ACCOUNT"]
with open ("ASA.key", "r") as myfile:
  accountName=myfile.read().replace('\n', '')
# accountKey = environ["AZURE_STORAGE_ACCESS_KEY"]
with open ("ASK.key", "r") as myfile:
  accountKey=myfile.read().replace('\n', '')

# Create blob service
blob_service = BlobService( account_name=accountName, account_key=accountKey )
blob_service.create_container( blob_container )
blob_service.create_container( blob_analysis )

# Open queue with given credentials
queue_service = QueueService( account_name=accountName, account_key=accountKey )

# Open table service
table_service = TableService( account_name=accountName, account_key=accountKey )

# Analysis results
results = None
# Regions for analysis
region = 4

# Repeat
while(True):
timeStamp = datetime.today().strftime("%Y-%m-%dT%H%M%SZ")

pathToZip = ''
sevenZip = ''
fileName = 'Backup_'+timeStamp+'.7z'
activeContainer = ''

if platform.system() == 'Windows' :
    pathToZip = 'C:\\SQLBackups\\' + fileName
    sevenZip = 'C:\\SQLBackups\\7Zip\\7za.exe'
    activeContainer = regVPS
else:
    # sudo apt-get install p7zip-full
    pathToZip = '/var/tmp/'+fileName
    sevenZip = '7z'
    activeContainer = DOVPS

#Create archive.7z containing Backups Directory with max compression
zipArgs = "a "+ pathToZip +" Backups -mx9"

subprocess.call(sevenZip + " " + zipArgs)

blob_service = BlobService('storageAccountName', 'storageKey')

blob_service.create_container(regVPS)
blob_service.create_container(DOVPS)

blob_service.put_block_blob_from_path(activeContainer, fileName, pathToZip)

os.remove(fileName)
Esempio n. 27
0
class FileService(Component):
    def __init__(self):
        self.blob_service = None

    def generate_blob_service(self):
        if self.blob_service is None:
            # if storage info doesn't exist in config.py upload file function stop working
            self.blob_service = BlobService(
                account_name=self.util.get_config(
                    "storage.azure.account_name"),
                account_key=self.util.get_config("storage.azure.account_key"),
                host_base=self.util.get_config(
                    "storage.azure.blob_service_host_base"))

    def create_container_in_storage(self, container_name, access):
        """
        create a container if doesn't exist
        :param container_name:
        :param access:
        :return:
        """
        self.generate_blob_service()
        try:
            names = map(lambda x: x.name, self.blob_service.list_containers())
            if container_name not in names:
                self.blob_service.create_container(
                    container_name, x_ms_blob_public_access=access)
            else:
                self.log.debug("container already exsit in storage")
            return True
        except Exception as e:
            self.log.error(e)
            return False

    def upload_file_to_azure(self, stream, container_name, blob_name):
        try:
            if self.create_container_in_storage(container_name, 'container'):
                self.blob_service.put_block_blob_from_file(
                    container_name, blob_name, stream)
                return self.blob_service.make_blob_url(container_name,
                                                       blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def upload_file_to_azure_from_path(self, path, container_name, blob_name):
        try:
            if self.create_container_in_storage(container_name, 'container'):
                self.blob_service.put_block_blob_from_path(
                    container_name, blob_name, path)
                return self.blob_service.make_blob_url(container_name,
                                                       blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def delete_file_from_azure(self, container_name, blob_name):
        try:
            if self.create_container_in_storage(container_name, 'container'):
                self.blob_service.delete_blob(container_name, blob_name)
        except Exception as e:
            self.log.error(e)
            return None
Esempio n. 28
0
# Get settings from CustomScriptForLinux extension configurations
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')
hutil = Util.HandlerUtility(waagent.Log, waagent.Error, "bosh-deploy-script")
hutil.do_parse_context("enable")
settings = hutil.get_public_settings()
with open(os.path.join('bosh', 'settings'), "w") as tmpfile:
    tmpfile.write(json.dumps(settings, indent=4, sort_keys=True))
username = settings["username"]
home_dir = os.path.join("/home", username)
install_log = os.path.join(home_dir, "install.log")

# Prepare the containers
storage_account_name = settings["STORAGE-ACCOUNT-NAME"]
storage_access_key = settings["STORAGE-ACCESS-KEY"]
blob_service = BlobService(storage_account_name, storage_access_key)
blob_service.create_container('bosh')
blob_service.create_container('stemcell')

# Generate the private key and certificate
call("sh create_cert.sh", shell=True)
call("cp bosh.key ./bosh/bosh", shell=True)
with open('bosh_cert.pem', 'r') as tmpfile:
    ssh_cert = tmpfile.read()
indentation = " " * 8
ssh_cert = ("\n" + indentation).join([line for line in ssh_cert.split('\n')])

# Render the yml template for bosh-init
bosh_template = 'bosh.yml'
if os.path.exists(bosh_template):
    with open(bosh_template, 'r') as tmpfile:
        contents = tmpfile.read()
Esempio n. 29
0
def do_step(context):
    settings = context.meta['settings']
    index_file = context.meta['index-file']
    pivnetAPIToken = settings["pivnet-api-token"]

    f = open("manifests/{0}".format(index_file))
    manifests = yaml.safe_load(f)
    f.close()

    eula_urls = [
        "https://network.pivotal.io/api/v2/products/{0}/releases/{1}/eula_acceptance"
        .format(m['release-name'], m['release-number'])
        for m in manifests['manifests']
    ]

    release_urls = [
        "https://network.pivotal.io/api/v2/products/{0}/releases/{1}/product_files/{2}/download"
        .format(m['release-name'], m['release-number'], m['file-number'])
        for m in manifests['manifests']
    ]

    stemcell_urls = [m['stemcell'] for m in manifests['manifests']]

    # accept eula for each product
    for url in eula_urls:
        print url
        if not "concourse" in url:
            res = authorizedPost(url, pivnetAPIToken)
            code = res.getcode()

    # releases
    is_release_file = re.compile("^releases\/.+")
    if not os.path.exists("/tmp/releases"):
        os.makedirs("/tmp/releases")

    client = bosh_client.BoshClient("https://10.0.0.4:25555", "admin", "admin")
    storage_account_name = settings["STORAGE-ACCOUNT-NAME"]
    storage_access_key = settings["STORAGE-ACCESS-KEY"]

    blob_service = BlobService(storage_account_name, storage_access_key)
    blob_service.create_container(container_name='tempreleases',
                                  x_ms_blob_public_access='container')

    print "Processing releases."
    for url in release_urls:

        print "Downloading {0}.".format(url)

        if "concourse" in url:
            release_url = "https://s3-us-west-2.amazonaws.com/bosh-azure-releases/concourse.zip"
            res = urllib2.urlopen(release_url)
        else:
            res = authorizedPost(url, pivnetAPIToken)

        code = res.getcode()

        length = int(res.headers["Content-Length"])

        # content-length
        if code is 200:

            total = 0
            pcent = 0.0
            CHUNK = 16 * 1024

            with tempfile.TemporaryFile() as temp:
                while True:
                    chunk = res.read(CHUNK)
                    total += CHUNK
                    pcent = (float(total) / float(length)) * 100

                    sys.stdout.write("Download progress: %.2f%% (%.2fM)\r" %
                                     (pcent, total / 1000000.0))
                    sys.stdout.flush()

                    if not chunk:
                        break

                    temp.write(chunk)

                print "Download complete."

                z = zipfile.ZipFile(temp)
                for name in z.namelist():

                    # is this a release?
                    if is_release_file.match(name):

                        release_filename = "/tmp/{0}".format(name)

                        print "Unpacking {0}.".format(name)
                        z.extract(name, "/tmp")

                        print "Uploading {0} to Azure blob store".format(name)

                        blob_service.put_block_blob_from_path(
                            'tempreleases',
                            name,
                            "/tmp/{0}".format(name),
                            x_ms_blob_content_type='application/x-compressed')

                        os.unlink(release_filename)
                        blob_url = "http://{0}.blob.core.windows.net/{1}/{2}".format(
                            storage_account_name, 'tempreleases', name)

                        print "Uploading release {0} to BOSH director.".format(
                            name)

                        task_id = client.upload_release(blob_url)
                        client.wait_for_task(task_id)

                z.close()
                temp.close()

    blob_service.delete_container("tempreleases")

    # stemcells
    print "Processing stemcells."

    for url in stemcell_urls:
        print "Processing stemcell {0}".format(url)
        task_id = client.upload_stemcell(url)
        client.wait_for_task(task_id)

    return context
Esempio n. 30
0
class AzureTransfer(BaseTransfer):
    def __init__(self, account_name, account_key, container_name, prefix=None):
        # NOTE: Azure wants all paths to start with a slash
        prefix = "/{}".format(prefix.lstrip("/") if prefix else "")
        super().__init__(prefix=prefix)
        self.account_name = account_name
        self.account_key = account_key
        self.container_name = container_name
        self.conn = BlobService(account_name=self.account_name,
                                account_key=self.account_key)
        self.container = self.get_or_create_container(self.container_name)
        self.log.debug("AzureTransfer initialized")
        # XXX: AzureTransfer isn't actively tested and hasn't its error handling is probably lacking
        self.log.warning(
            "AzureTransfer is experimental and has not been thoroughly tested")

    def get_metadata_for_key(self, key):
        key = self.format_key_for_backend(key)
        return self._list_blobs(key)[0]["metadata"]

    def _metadata_for_key(self, key):
        return self._list_blobs(key)[0]["metadata"]

    def list_path(self, key):
        path = self.format_key_for_backend(key, trailing_slash=True)
        return self._list_blobs(path)

    def _list_blobs(self, path):
        self.log.debug("Listing path %r", path)
        items = self.conn.list_blobs(self.container_name,
                                     prefix=path,
                                     delimiter="/",
                                     include="metadata")
        result = []
        for item in items:
            result.append({
                "last_modified":
                dateutil.parser.parse(item.properties.last_modified),
                "metadata":
                item.metadata,
                "name":
                self.format_key_from_backend(item.name),
                "size":
                item.properties.content_length,
            })
        return result

    def delete_key(self, key):
        key = self.format_key_for_backend(key)
        self.log.debug("Deleting key: %r", key)
        return self.conn.delete_blob(self.container_name, key)

    def get_contents_to_file(self, key, filepath_to_store_to):
        key = self.format_key_for_backend(key)
        self.log.debug("Starting to fetch the contents of: %r to: %r", key,
                       filepath_to_store_to)
        return self.conn.get_blob_to_path(self.container_name, key,
                                          filepath_to_store_to)

    def get_contents_to_fileobj(self, key, fileobj_to_store_to):
        key = self.format_key_for_backend(key)
        self.log.debug("Starting to fetch the contents of: %r", key)
        return self.conn.get_blob_to_file(self.container_name, key,
                                          fileobj_to_store_to)

    def get_contents_to_string(self, key):
        key = self.format_key_for_backend(key)
        self.log.debug("Starting to fetch the contents of: %r", key)
        return self.conn.get_blob_to_bytes(self.container_name,
                                           key), self._metadata_for_key(key)

    def store_file_from_memory(self, key, memstring, metadata=None):
        key = self.format_key_for_backend(key)
        # Azure requires all metadata keys and values to be strings
        metadata_to_send = {str(k): str(v) for k, v in metadata.items()}
        self.conn.put_block_blob_from_bytes(
            self.container_name,
            key,
            memstring,
            x_ms_meta_name_values=metadata_to_send)

    def store_file_from_disk(self,
                             key,
                             filepath,
                             metadata=None,
                             multipart=None):
        key = self.format_key_for_backend(key)
        # Azure requires all metadata keys and values to be strings
        metadata_to_send = {str(k): str(v) for k, v in metadata.items()}
        self.conn.put_block_blob_from_path(
            self.container_name,
            key,
            filepath,
            x_ms_meta_name_values=metadata_to_send)

    def get_or_create_container(self, container_name):
        start_time = time.time()
        self.conn.create_container(container_name)
        self.log.debug("Got/Created container: %r successfully, took: %.3fs",
                       container_name,
                       time.time() - start_time)
        return container_name
Esempio n. 31
0
def generate_and_upload(gauge_factory, config, logger):
    start = datetime.datetime.now()
    twitter_followers = gauge_factory('twitter.followers')
    twitter_tweets = gauge_factory('twitter.tweets')
    fb_friends = gauge_factory('facebook.friends')
    foursq_checkins = gauge_factory('foursquare.checkins')
    klout_score = gauge_factory('klout.score')
    runkeeper_activities = gauge_factory('runkeeper.activities')
    runkeeper_calories = gauge_factory('runkeeper.calories_burned')
    runkeeper_weight = gauge_factory('runkeeper.weight')
    tmp102_celsius = gauge_factory('tmp102.temperature', gauge_type='hourly')
    lastfm_listened = gauge_factory('lastfm.listened')
    jawbone_sleeps = gauge_factory('jawbone.sleeps')
    jawbone_heartrate = gauge_factory('jawbone.resting_heartrate')
    jawbone_steps = gauge_factory('jawbone.steps')
    jawbone_caffeine = gauge_factory('jawbone.caffeine')

    data = {}
    data_sources = [
        # (output key, gauge, days back, aggregator, postprocessors)
        ('twitter.followers', twitter_followers, 30, None,
         [zero_fill_daily, interpolators.linear]),
        ('twitter.tweets', twitter_tweets, 20, None, [zero_fill_daily]),
        ('facebook.friends', fb_friends, 180, monthly_max, None),
        ('foursquare.checkins', foursq_checkins, 14, None, [zero_fill_daily]),
        ('lastfm.listened', lastfm_listened, 14, None, [zero_fill_daily]),
        ('klout.score', klout_score, 30, weekly_max,
         [zero_fill_weekly, interpolators.linear]),
        ('runkeeper.calories', runkeeper_calories, 60, weekly_sum,
         [zero_fill_weekly]),
        ('runkeeper.activities', runkeeper_activities, 60, weekly_sum,
         [zero_fill_weekly]),
        ('runkeeper.weight', runkeeper_weight, 180, weekly_min,
         [zero_fill_weekly, interpolators.linear]),
        ('sleeps', jawbone_sleeps, 14, None,
         [zero_fill_daily, interpolators.linear]),
        ('heartrate', jawbone_heartrate, 21, None,
         [zero_fill_daily, interpolators.linear]),
        ('steps', jawbone_steps, 14, None,
         [zero_fill_daily, interpolators.linear]),
        ('caffeine', jawbone_caffeine, 30, None, [zero_fill_daily]),
        ('tmp102.temperature', tmp102_celsius, 2.5, None, None)
    ]

    for ds in data_sources:
        data[ds[0]] = ds[1].aggregate(today_utc() - timedelta(days=ds[2]),
                                      aggregator=ds[3],
                                      post_processors=ds[4])

    report = {
        'generated': str(now_utc()),
        'data': data,
        'took': (datetime.datetime.now() - start).seconds
    }
    report_json = json.dumps(report, indent=4, default=json_date_serializer)
    report_content = '{0}({1})'.format(JSONP_CALLBACK_NAME, report_json)

    blob_service = BlobService(config['azure.account'], config['azure.key'])
    blob_service.create_container(config['azure.blob.container'])
    blob_service.set_container_acl(config['azure.blob.container'],
                                   x_ms_blob_public_access='container')
    blob_service.put_blob(config['azure.blob.container'],
                          config['azure.blob.name'], report_content,
                          'BlockBlob')

    took = (datetime.datetime.now() - start).seconds
    logger.info('Report generated and uploaded. Took {0} s.'.format(took))
Esempio n. 32
0
from azure import WindowsAzureMissingResourceError
import glob
import os
import engaged.data.azure_connect as AC

ACCOUNT_NAME = 'sounds'
ACCOUNT_KEY  = AC.getAccountKey() # primary access key
HOST_BASE    = '.blob.core.windows.net'

blob_service = BlobService(account_name=ACCOUNT_NAME,
                           account_key=ACCOUNT_KEY,
                           host_base=HOST_BASE)

CONTAINER = 'bat-detective' # or whatever else you like

created = blob_service.create_container(CONTAINER, x_ms_blob_public_access='container')
print "Created" if created else "Not created (probably already existing)"

audio_dir = '../../data/wav/'
SOUND_FILES = glob.glob(audio_dir + '*.wav')

for f in SOUND_FILES:
    print "uploading", os.path.basename(f)
    blob_service.put_block_blob_from_path(
        CONTAINER,                          # container
        os.path.basename(f),                # blob
        f,                                  # path
        x_ms_blob_content_type='audio/wav'
    )

Esempio n. 33
0
class Command(BaseCommand):
    help = "Synchronizes static media to cloud files."

    option_list = BaseCommand.option_list + (
        optparse.make_option(
            '-w',
            '--wipe',
            action='store_true',
            dest='wipe',
            default=False,
            help="Wipes out entire contents of container first."),
        optparse.make_option('-t',
                             '--test-run',
                             action='store_true',
                             dest='test_run',
                             default=False,
                             help="Performs a test run of the sync."),
        optparse.make_option('-c',
                             '--container',
                             dest='container',
                             help="Override STATIC_CONTAINER."),
    )

    # settings from azurite.settings
    ACCOUNT_NAME = AZURITE['ACCOUNT_NAME']
    ACCOUNT_KEY = AZURITE['ACCOUNT_KEY']
    STATIC_CONTAINER = AZURITE['STATIC_CONTAINER']

    # paths
    DIRECTORY = os.path.abspath(settings.STATIC_ROOT)
    STATIC_URL = settings.STATIC_URL

    if not DIRECTORY.endswith('/'):
        DIRECTORY = DIRECTORY + '/'

    if STATIC_URL.startswith('/'):
        STATIC_URL = STATIC_URL[1:]

    local_object_names = []
    create_count = 0
    upload_count = 0
    update_count = 0
    skip_count = 0
    delete_count = 0
    service = None

    def handle(self, *args, **options):
        self.wipe = options.get('wipe')
        self.test_run = options.get('test_run')
        self.verbosity = int(options.get('verbosity'))
        if hasattr(options, 'container'):
            self.STATIC_CONTAINER = options.get('container')
        self.sync_files()

    def sync_files(self):
        self.service = BlobService(account_name=self.ACCOUNT_NAME,
                                   account_key=self.ACCOUNT_KEY)

        try:
            self.service.get_container_properties(self.STATIC_CONTAINER)
        except WindowsAzureMissingResourceError:
            self.service.create_container(self.STATIC_CONTAINER,
                                          x_ms_blob_public_access='blob')

        self.service.set_container_acl(self.STATIC_CONTAINER,
                                       x_ms_blob_public_access='blob')

        # if -w option is provided, wipe out the contents of the container
        if self.wipe:
            blob_count = len(self.service.list_blobs(self.STATIC_CONTAINER))

            if self.test_run:
                print "Wipe would delete %d objects." % blob_count
            else:
                print "Deleting %d objects..." % blob_count
                for blob in self.service.list_blobs(self.STATIC_CONTAINER):
                    self.service.delete_blob(self.STATIC_CONTAINER, blob.name)

        # walk through the directory, creating or updating files on the cloud
        os.path.walk(self.DIRECTORY, self.upload_files, "foo")

        # remove any files on remote that don't exist locally
        self.delete_files()

        # print out the final tally to the cmd line
        self.update_count = self.upload_count - self.create_count
        print
        if self.test_run:
            print "Test run complete with the following results:"
        print "Skipped %d. Created %d. Updated %d. Deleted %d." % (
            self.skip_count, self.create_count, self.update_count,
            self.delete_count)

    def upload_files(self, arg, dirname, names):
        # upload or skip items
        for item in names:
            file_path = os.path.join(dirname, item)
            if os.path.isdir(file_path):
                continue  # Don't try to upload directories

            object_name = self.STATIC_URL + file_path.split(self.DIRECTORY)[1]
            self.local_object_names.append(object_name)

            try:
                properties = self.service.get_blob_properties(
                    self.STATIC_CONTAINER, object_name)
            except WindowsAzureMissingResourceError:
                properties = {}
                self.create_count += 1

            cloud_datetime = None
            if 'last-modified' in properties:
                cloud_datetime = (
                    properties['last-modified']
                    and datetime.datetime.strptime(properties['last-modified'],
                                                   "%a, %d %b %Y %H:%M:%S %Z")
                    or None)

            local_datetime = datetime.datetime.utcfromtimestamp(
                os.stat(file_path).st_mtime)

            if cloud_datetime and local_datetime < cloud_datetime:
                self.skip_count += 1
                if self.verbosity > 1:
                    print "Skipped %s: not modified." % object_name
                continue

            if not self.test_run:
                file_contents = open(file_path, 'r').read()
                content_type, encoding = mimetypes.guess_type(file_path)
                self.service.put_blob(self.STATIC_CONTAINER,
                                      object_name,
                                      file_contents,
                                      x_ms_blob_type='BlockBlob',
                                      x_ms_blob_content_type=content_type,
                                      content_encoding=encoding)
                # sync_headers(cloud_obj)
            self.upload_count += 1
            if self.verbosity > 1:
                print "Uploaded", object_name

    def delete_files(self):
        # remove any objects in the container that don't exist locally
        for blob in self.service.list_blobs(self.STATIC_CONTAINER):
            if blob.name not in self.local_object_names:
                self.delete_count += 1
                if self.verbosity > 1:
                    print "Deleted %s" % blob.name
                if not self.test_run:
                    self.service.delete_blob(self.STATIC_CONTAINER, blob.name)
Esempio n. 34
0
class AzureBackend(duplicity.backend.Backend):
    """
    Backend for Azure Blob Storage Service
    """
    def __init__(self, parsed_url):
        duplicity.backend.Backend.__init__(self, parsed_url)

        # Import Microsoft Azure Storage SDK for Python library.
        try:
            import azure
            import azure.storage
            if hasattr(azure.storage, 'BlobService'):
                # v0.11.1 and below
                from azure.storage import BlobService
                self.AzureMissingResourceError = azure.WindowsAzureMissingResourceError
                self.AzureConflictError = azure.WindowsAzureConflictError
            else:
                # v1.0.0 and above
                import azure.storage.blob
                if hasattr(azure.storage.blob, 'BlobService'):
                    from azure.storage.blob import BlobService
                else:
                    from azure.storage.blob.blockblobservice import BlockBlobService as BlobService
                self.AzureMissingResourceError = azure.common.AzureMissingResourceHttpError
                self.AzureConflictError = azure.common.AzureConflictHttpError
        except ImportError as e:
            raise BackendException("""\
Azure backend requires Microsoft Azure Storage SDK for Python (https://pypi.python.org/pypi/azure-storage/).
Exception: %s""" % str(e))

        # TODO: validate container name
        self.container = parsed_url.path.lstrip('/')

        if 'AZURE_ACCOUNT_NAME' not in os.environ:
            raise BackendException('AZURE_ACCOUNT_NAME environment variable not set.')

        if 'AZURE_ACCOUNT_KEY' in os.environ:
            if 'AZURE_ENDPOINT_SUFFIX' in os.environ:
                self.blob_service = BlobService(account_name=os.environ['AZURE_ACCOUNT_NAME'],
                                                account_key=os.environ['AZURE_ACCOUNT_KEY'],
                                                endpoint_suffix=os.environ['AZURE_ENDPOINT_SUFFIX'])
            else:
                self.blob_service = BlobService(account_name=os.environ['AZURE_ACCOUNT_NAME'],
                                                account_key=os.environ['AZURE_ACCOUNT_KEY'])
            self._create_container()
        elif 'AZURE_SHARED_ACCESS_SIGNATURE' in os.environ:
            if 'AZURE_ENDPOINT_SUFFIX' in os.environ:
                self.blob_service = BlobService(account_name=os.environ['AZURE_ACCOUNT_NAME'],
                                                sas_token=os.environ['AZURE_SHARED_ACCESS_SIGNATURE'],
                                                endpoint_suffix=os.environ['AZURE_ENDPOINT_SUFFIX'])
            else:
                self.blob_service = BlobService(account_name=os.environ['AZURE_ACCOUNT_NAME'],
                                                sas_token=os.environ['AZURE_SHARED_ACCESS_SIGNATURE'])
        else:
            raise BackendException(
                'Neither AZURE_ACCOUNT_KEY nor AZURE_SHARED_ACCESS_SIGNATURE environment variable not set.')

        if globals.azure_max_single_put_size:
            # check if we use azure-storage>=0.30.0
            try:
                _ = self.blob_service.MAX_SINGLE_PUT_SIZE
                self.blob_service.MAX_SINGLE_PUT_SIZE = globals.azure_max_single_put_size
            # fallback for azure-storage<0.30.0
            except AttributeError:
                self.blob_service._BLOB_MAX_DATA_SIZE = globals.azure_max_single_put_size

        if globals.azure_max_block_size:
            # check if we use azure-storage>=0.30.0
            try:
                _ = self.blob_service.MAX_BLOCK_SIZE
                self.blob_service.MAX_BLOCK_SIZE = globals.azure_max_block_size
            # fallback for azure-storage<0.30.0
            except AttributeError:
                self.blob_service._BLOB_MAX_CHUNK_DATA_SIZE = globals.azure_max_block_size

    def _create_container(self):
        try:
            self.blob_service.create_container(self.container, fail_on_exist=True)
        except self.AzureConflictError:
            # Indicates that the resource could not be created because it already exists.
            pass
        except Exception as e:
            log.FatalError("Could not create Azure container: %s"
                           % unicode(e.message).split('\n', 1)[0],
                           log.ErrorCode.connection_failed)

    def _put(self, source_path, remote_filename):
        kwargs = {}
        if globals.azure_max_connections:
            kwargs['max_connections'] = globals.azure_max_connections

        # https://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#upload-a-blob-into-a-container
        try:
            self.blob_service.create_blob_from_path(self.container, remote_filename, source_path.name, **kwargs)
        except AttributeError:  # Old versions use a different method name
            self.blob_service.put_block_blob_from_path(self.container, remote_filename, source_path.name, **kwargs)

    def _get(self, remote_filename, local_path):
        # https://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#download-blobs
        self.blob_service.get_blob_to_path(self.container, remote_filename, local_path.name)

    def _list(self):
        # https://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#list-the-blobs-in-a-container
        blobs = []
        marker = None
        while True:
            batch = self.blob_service.list_blobs(self.container, marker=marker)
            blobs.extend(batch)
            if not batch.next_marker:
                break
            marker = batch.next_marker
        return [blob.name for blob in blobs]

    def _delete(self, filename):
        # http://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#delete-blobs
        self.blob_service.delete_blob(self.container, filename)

    def _query(self, filename):
        prop = self.blob_service.get_blob_properties(self.container, filename)
        try:
            info = {'size': int(prop.properties.content_length)}
        except AttributeError:
            # old versions directly returned the properties
            info = {'size': int(prop['content-length'])}
        return info

    def _error_code(self, operation, e):
        if isinstance(e, self.AzureMissingResourceError):
            return log.ErrorCode.backend_not_found
Esempio n. 35
0
imagesQueue = 'mosaicqueue'

tableName = 'photos'
tablePartitionKey = 'allphotos'

# Get queue credentials
# accountName = environ["AZURE_STORAGE_ACCOUNT"]
with open("ASA.key", "r") as myfile:
    accountName = myfile.read().replace('\n', '')
# accountKey = environ["AZURE_STORAGE_ACCESS_KEY"]
with open("ASK.key", "r") as myfile:
    accountKey = myfile.read().replace('\n', '')

# Create blob service
blob_service = BlobService(account_name=accountName, account_key=accountKey)
blob_service.create_container(blob_small)
blob_service.create_container(blob_big)
blob_service.create_container(blob_analysis)

# Open queue with given credentials
queue_service = QueueService(account_name=accountName, account_key=accountKey)

# Open table service
table_service = TableService(account_name=accountName, account_key=accountKey)

# Repeat
while (True):

    # get images form *imagesQueue* - it is invoked by CRON
    messages = queue_service.get_messages(imagesQueue)
    if len(messages) == 0:
# Get settings from CustomScriptForLinux extension configurations
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')
hutil =  Util.HandlerUtility(waagent.Log, waagent.Error, "bosh-deploy-script")
hutil.do_parse_context("enable")
settings = hutil.get_public_settings()
with open (os.path.join('bosh','settings'), "w") as tmpfile:
    tmpfile.write(json.dumps(settings, indent=4, sort_keys=True))
username = settings["username"]
home_dir = os.path.join("/home", username)
install_log = os.path.join(home_dir, "install.log")

# Prepare the containers
storage_account_name = settings["STORAGE-ACCOUNT-NAME"]
storage_access_key = settings["STORAGE-ACCESS-KEY"]
blob_service = BlobService(storage_account_name, storage_access_key)
blob_service.create_container('bosh')
blob_service.create_container('stemcell')

# Generate the private key and certificate
call("sh create_cert.sh", shell=True)
call("cp bosh.key ./bosh/bosh", shell=True)
with open ('bosh_cert.pem', 'r') as tmpfile:
    ssh_cert = tmpfile.read()
indentation = " " * 8
ssh_cert=("\n"+indentation).join([line for line in ssh_cert.split('\n')])

# Render the yml template for bosh-init
bosh_template = 'bosh.yml'
if os.path.exists(bosh_template):
    with open (bosh_template, 'r') as tmpfile:
        contents = tmpfile.read()
def generate_and_upload(gauge_factory, config, logger):
    start = datetime.datetime.now()
    twitter_followers = gauge_factory('twitter.followers')
    twitter_tweets = gauge_factory('twitter.tweets')
    fb_friends = gauge_factory('facebook.friends')
    foursq_checkins = gauge_factory('foursquare.checkins')
    klout_score = gauge_factory('klout.score')
    runkeeper_activities = gauge_factory('runkeeper.activities')
    runkeeper_calories = gauge_factory('runkeeper.calories_burned')
    runkeeper_weight = gauge_factory('runkeeper.weight')
    tmp102_celsius = gauge_factory('tmp102.te  mperature', gauge_type='hourly')
    lastfm_listened = gauge_factory('lastfm.listened')
    jawbone_sleeps = gauge_factory('jawbone.sleeps')
    jawbone_steps = gauge_factory('jawbone.steps')
    jawbone_caffeine = gauge_factory('jawbone.caffeine')

    data = {}
    data_sources = [
        # (output key, gauge, days back, aggregator, postprocessors)
        ('twitter.followers', twitter_followers, 30, None,
            [zero_fill_daily, interpolators.linear]),
        ('twitter.tweets', twitter_tweets, 20, None, [zero_fill_daily]),
        ('facebook.friends', fb_friends, 180, monthly_max, None),
        ('foursquare.checkins', foursq_checkins, 14, None, [zero_fill_daily]),
        ('lastfm.listened', lastfm_listened, 14, None, [zero_fill_daily]),
        ('klout.score', klout_score, 30, weekly_max, [zero_fill_weekly,
                                                      interpolators.linear]),
        ('runkeeper.calories', runkeeper_calories, 60, weekly_sum,
            [zero_fill_weekly]),
        ('runkeeper.activities', runkeeper_activities, 60,weekly_sum,
            [zero_fill_weekly]),
        ('runkeeper.weight', runkeeper_weight, 180,weekly_min,
            [zero_fill_weekly, interpolators.linear]),
        ('sleeps', jawbone_sleeps, 14, None, [zero_fill_daily,
            interpolators.linear]),
        ('steps', jawbone_steps, 14, None, [zero_fill_daily,
            interpolators.linear]),
        ('caffeine', jawbone_caffeine, 30, None, [zero_fill_daily]),
        ('tmp102.temperature', tmp102_celsius, 2.5, None, None)
    ]

    for ds in data_sources:
        data[ds[0]] = ds[1].aggregate(today_utc() - timedelta(days=ds[2]),
                                      aggregator=ds[3],
                                      post_processors=ds[4])

    report = {
        'generated': str(now_utc()),
        'data': data,
        'took': (datetime.datetime.now() - start).seconds
    }
    report_json = json.dumps(report, indent=4, default=json_date_serializer)
    report_content = '{0}({1})'.format(JSONP_CALLBACK_NAME, report_json)
    
    blob_service = BlobService(config['azure.account'], config['azure.key'])
    blob_service.create_container(config['azure.blob.container'])
    blob_service.set_container_acl(config['azure.blob.container'],
                                   x_ms_blob_public_access='container')
    blob_service.put_blob(config['azure.blob.container'],
                          config['azure.blob.name'], report_content, 'BlockBlob')

    took = (datetime.datetime.now() - start).seconds
    logger.info('Report generated and uploaded. Took {0} s.'.format(took))
Esempio n. 38
0
import glob
import os
import time

logtime = time.strftime("%Y%m%d-%H%M%S")
logmessage = "uploadmp3.py started"
command = "sed -i '1s/^/" + logtime + " " + logmessage + "\\n/' /home/pi/selcuk/log.txt"
os.system(command)

from azure.storage import BlobService
blob_service = BlobService(account_name='account_name', account_key='account_key')
blob_service.create_container('record')
blob_service.create_container('record', x_ms_blob_public_access='container')
blob_service.set_container_acl('record', x_ms_blob_public_access='container')

directory = "/home/pi/selcuk/mp3"

os.chdir(directory)
for file in glob.glob("*.mp3"):
    full_path = directory + "/" + file
    blob_service.put_block_blob_from_path(
        'record',
        file,
        full_path,
        x_ms_blob_content_type='audio/mpeg3'
    )
    delete_command = "rm " + file
    os.system(delete_command)

    logtime = time.strftime("%Y%m%d-%H%M%S")
    logmessage = file + " uploaded to cloud and deleted from device"
Esempio n. 39
0
class AzureBackend(duplicity.backend.Backend):
    """
    Backend for Azure Blob Storage Service
    """
    def __init__(self, parsed_url):
        duplicity.backend.Backend.__init__(self, parsed_url)

        # Import Microsoft Azure Storage SDK for Python library.
        try:
            import azure
            import azure.storage
            if hasattr(azure.storage, 'BlobService'):
                # v0.11.1 and below
                from azure.storage import BlobService
                self.AzureMissingResourceError = azure.WindowsAzureMissingResourceError
                self.AzureConflictError = azure.WindowsAzureConflictError
            else:
                # v1.0.0 and above
                from azure.storage.blob import BlobService
                self.AzureMissingResourceError = azure.common.AzureMissingResourceHttpError
                self.AzureConflictError = azure.common.AzureConflictHttpError
        except ImportError as e:
            raise BackendException("""\
Azure backend requires Microsoft Azure Storage SDK for Python (https://pypi.python.org/pypi/azure-storage/).
Exception: %s""" % str(e))

        if 'AZURE_ACCOUNT_NAME' not in os.environ:
            raise BackendException('AZURE_ACCOUNT_NAME environment variable not set.')
        if 'AZURE_ACCOUNT_KEY' not in os.environ:
            raise BackendException('AZURE_ACCOUNT_KEY environment variable not set.')
        self.blob_service = BlobService(account_name=os.environ['AZURE_ACCOUNT_NAME'],
                                        account_key=os.environ['AZURE_ACCOUNT_KEY'])

        # TODO: validate container name
        self.container = parsed_url.path.lstrip('/')
        try:
            self.blob_service.create_container(self.container, fail_on_exist=True)
        except self.AzureConflictError:
            # Indicates that the resource could not be created because it already exists.
            pass
        except Exception as e:
            log.FatalError("Could not create Azure container: %s"
                           % unicode(e.message).split('\n', 1)[0],
                           log.ErrorCode.connection_failed)

    def _put(self, source_path, remote_filename):
        # https://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#upload-a-blob-into-a-container
        self.blob_service.put_block_blob_from_path(self.container, remote_filename, source_path.name)

    def _get(self, remote_filename, local_path):
        # https://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#download-blobs
        self.blob_service.get_blob_to_path(self.container, remote_filename, local_path.name)

    def _list(self):
        # https://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#list-the-blobs-in-a-container
        blobs = []
        marker = None
        while True:
            batch = self.blob_service.list_blobs(self.container, marker=marker)
            blobs.extend(batch)
            if not batch.next_marker:
                break
            marker = batch.next_marker
        return [blob.name for blob in blobs]

    def _delete(self, filename):
        # http://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#delete-blobs
        self.blob_service.delete_blob(self.container, filename)

    def _query(self, filename):
        prop = self.blob_service.get_blob_properties(self.container, filename)
        return {'size': int(prop['content-length'])}

    def _error_code(self, operation, e):
        if isinstance(e, self.AzureMissingResourceError):
            return log.ErrorCode.backend_not_found
Esempio n. 40
0
from azure.storage import BlobService
import sys

key = raw_input("Please enter azure vidoepath blob storage key: ")
blob_service = BlobService(account_name='videopathmobilefiles',
                           account_key=key)

source = sys.argv[1]
target = sys.argv[2]

print source + " -> " + target

blob_service.create_container(target, x_ms_blob_public_access='container')

# blob_service.copy_blob('test2', 'copiedkey', '/videopathmobilefiles/test/key')

blobs = blob_service.list_blobs(source)

for b in blobs:
    name = b.name
    source_path = '/videopathmobilefiles/' + source + '/' + name
    blob_service.copy_blob(target, name, source_path)
    print name
Esempio n. 41
0
class AzureBlobStorage(Storage):

    def __init__(self, account='nyxstorage', container='pxo'):
        self.base_storage_uri = 'http://%s.blob.core.windows.net/%s/' % (
            account, container)
        self.blob_service = BlobService(
            account, get_env_variable('AZURE_BLOB_STORAGE_KEY'))
        self.container = container

    def _open(self, name, mode='rb'):
        data = self.blob_service.get_blob(self.container, name)
        return ContentFile(data)

    def _save(self, name, content):
        _file = content.read()
        file_name = content.name[-35:]
        self.blob_service.put_blob(
            self.container, file_name, _file, x_ms_blob_type='BlockBlob')
        return self.base_storage_uri + file_name

    def create_container(self, container_name):
        result = self.blob_service.create_container(
            container_name, x_ms_blob_public_access='container')
        return result

    def delete(self, name):
        self.blob_service.delete_blob(self.container, name)

    def exists(self, name):
        try:
            self.blob_service.get_blob_properties(self.container, name)
        except:
            return False
        else:
            return True

    def get_available_name(self, name):
        return name

    def get_blobs(self):
        blobs = self.blob_service.list_blobs(self.container)
        return blobs

    def get_valid_name(self, name):
        return name

    def modified_time(self, name):
        metadata = self.blob_service.get_blob_metadata(self.container, name)
        modified_time = float(metadata.get('x-ms-meta-modified_time'))
        return datetime.fromtimestamp(modified_time)

    def set_public_container(self, container_name):
        result = self.blob_service.set_container_acl(
            container_name, x_ms_blob_public_access='container')
        return result

    def size(self, name):
        properties = self.blob_service.get_blob_properties(
            self.container, name)
        return properties.get('content-length')

    def url(self, name):
        blob = self.blob_service.list_blobs(self.container, prefix=name)
        return blob.blobs[0].url
Esempio n. 42
0
        try:
            blob_service.put_block_blob_from_path(
                container_name,
                key,
                source,
                x_ms_blob_content_type=content_type,
                x_ms_blob_cache_control='public, max-age=600')
            break
        except:
            print "retrying..."
            time.sleep(1)


# walk all files in dir and push to bucket

codepath = os.path.dirname(os.path.abspath(__file__)) + "/upload/" + videoname
container_name = videoname.lower()
blob_service.create_container(container_name,
                              x_ms_blob_public_access='container')

for path, subdirs, files in os.walk(codepath):
    for name in files:
        # don't upload hidden files
        if name[0] == ".":
            continue

        pathname = os.path.join(path, name)
        keyname = pathname.replace(codepath, "")[1:]

        upload_file(pathname, container_name, name)
class BlobServiceAdapter(Component):
    """The :class:`BlobServiceAdapter` class is a thin wrapper over azure.storage.BlobService.

    All the attributes of the wrapper stream are proxied by the adapter so
    it's possible to do ``adapter.create_container()`` instead of the long form
    ``adapter.blob_service.adapter()``.
    """

    def __init__(self):
        self.blob_service = BlobService(
            account_name=self.util.get_config("storage.azure.account_name"),
            account_key=self.util.get_config("storage.azure.account_key"),
            host_base=self.util.get_config("storage.azure.blob_service_host_base"),
        )

    def __getattr__(self, name):
        return getattr(self.blob_service, name)

    def create_container_in_storage(self, container_name, access="container"):
        """create a container if doesn't exist

        :type container_name: str|unicode
        :param container_name: Name of container to create.

        :type access: str|unicode
        :param access: Optional. Possible values include: container, blob
        :return:
        """
        try:
            names = [x.name for x in self.blob_service.list_containers()]
            if container_name not in names:
                return self.blob_service.create_container(container_name, x_ms_blob_public_access=access)
            else:
                self.log.debug("container already exists in storage")
                return True
        except Exception as e:
            self.log.error(e)
            return False

    def upload_file_to_azure(self, container_name, blob_name, stream):
        """
        Creates a new block blob from a file/stream, or updates the content of
        an existing block blob, with automatic chunking and progress
        notifications.

        :type container_name: str|unicode
        :param container_name: Name of existing container.

        :type blob_name: str | unicode
        :param blob_name: Name of blob to create or update.

        :type stream: file
        :param stream: Opened file/stream to upload as the blob content.
        """
        try:
            if self.create_container_in_storage(container_name, "container"):
                self.blob_service.put_block_blob_from_file(container_name, blob_name, stream)
                return self.blob_service.make_blob_url(container_name, blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def upload_file_to_azure_from_bytes(self, container_name, blob_name, blob):
        """
        Creates a new block blob from an array of bytes, or updates the content
        of an existing block blob, with automatic chunking and progress
        notifications.

        :type container_name: str|unicode
        :param container_name: Name of existing container.

        :type blob_name: str|unicode
        :param blob_name: Name of blob to create or update.

        :type blob: bytes
        :param blob: Content of blob as an array of bytes.
        """
        try:
            if self.create_container_in_storage(container_name, "container"):
                self.blob_service.put_block_blob_from_bytes(container_name, blob_name, blob)
                return self.blob_service.make_blob_url(container_name, blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def upload_file_to_azure_from_text(self, container_name, blob_name, text):
        """
        Creates a new block blob from str/unicode, or updates the content of an
        existing block blob, with automatic chunking and progress notifications.

        :type container_name: str|unicode
        :param container_name: Name of existing container.

        :type blob_name: str|unicode
        :param blob_name: Name of blob to create or update.

        :type text: str|unicode
        :param text: Text to upload to the blob.
        """
        try:
            if self.create_container_in_storage(container_name, "container"):
                self.blob_service.put_block_blob_from_text(container_name, blob_name, text)
                return self.blob_service.make_blob_url(container_name, blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def upload_file_to_azure_from_path(self, container_name, blob_name, path):
        """
        Creates a new page blob from a file path, or updates the content of an
        existing page blob, with automatic chunking and progress notifications.

        :type container_name: str|unicode
        :param container_name: Name of existing container.

        :type blob_name: str|unicode
        :param blob_name: Name of blob to create or update.

        :type path: str|unicode
        :param path: Path of the file to upload as the blob content.
        """
        try:
            if self.create_container_in_storage(container_name, "container"):
                self.blob_service.put_block_blob_from_path(container_name, blob_name, path)
                return self.blob_service.make_blob_url(container_name, blob_name)
            else:
                return None
        except Exception as e:
            self.log.error(e)
            return None

    def delete_file_from_azure(self, container_name, blob_name):
        try:
            if self.create_container_in_storage(container_name, "container"):
                self.blob_service.delete_blob(container_name, blob_name)
        except Exception as e:
            self.log.error(e)
            return None
Esempio n. 44
0
from azure.storage import BlobService
from azure import WindowsAzureError, WindowsAzureMissingResourceError

import logging
from logging.handlers import RotatingFileHandler

app = Flask(__name__)

service_name = 'azure-storage'
vcap_services = json.loads(os.environ['VCAP_SERVICES'])
account_name = vcap_services[service_name][0]['credentials']['storage_account_name']
account_key = vcap_services[service_name][0]['credentials']['primary_access_key']
blob_service = BlobService(account_name, account_key)

container_name = '{0}{1}'.format('cloud-foundry-', str(uuid.uuid4()).replace('-', ''))
blob_service.create_container(container_name)

@app.route('/')
def index():
    blobs = blob_service.list_blobs(container_name)
    return render_template("index.html", filenames=[blob.name for blob in blobs])

@app.route('/upload', methods= ['POST'])
def upload():
    file = request.files['file']
    if not file or not file.filename:
        msg = 'The file to upload is invalid'
        blobs = blob_service.list_blobs(container_name)
        return render_template("index.html", msg=msg, filenames=[blob.name for blob in blobs])
    try:
        app.logger.info('uploading file {0}'.format(file.filename))
Esempio n. 45
0
def upload_to_blob(filename):
	#uploads to azure blob storage
	blob_service = BlobService(account_name=storage_account_name, account_key=storage_account_key)
	blob_service.create_container('pubsubcat-pics')
	blob_service.put_block_blob_from_path("pubsubcat-pics", hostname + "/" + filename, 'temp/' + filename)
Esempio n. 46
0
class AzureJobStore(AbstractJobStore):
    """
    A job store that uses Azure's blob store for file storage and
    Table Service to store job info with strong consistency."""

    def __init__(self, accountName, namePrefix, config=None, jobChunkSize=maxAzureTablePropertySize):
        self.jobChunkSize = jobChunkSize
        self.keyPath = None

        self.account_key = _fetchAzureAccountKey(accountName)

        # Table names have strict requirements in Azure
        self.namePrefix = self._sanitizeTableName(namePrefix)
        log.debug("Creating job store with name prefix '%s'" % self.namePrefix)

        # These are the main API entrypoints.
        self.tableService = TableService(account_key=self.account_key, account_name=accountName)
        self.blobService = BlobService(account_key=self.account_key, account_name=accountName)

        # Register our job-store in the global table for this storage account
        self.registryTable = self._getOrCreateTable('toilRegistry')
        exists = self.registryTable.get_entity(row_key=self.namePrefix)
        self._checkJobStoreCreation(config is not None, exists, accountName + ":" + self.namePrefix)
        self.registryTable.insert_or_replace_entity(row_key=self.namePrefix,
                                                    entity={'exists': True})

        # Serialized jobs table
        self.jobItems = self._getOrCreateTable(self.qualify('jobs'))
        # Job<->file mapping table
        self.jobFileIDs = self._getOrCreateTable(self.qualify('jobFileIDs'))

        # Container for all shared and unshared files
        self.files = self._getOrCreateBlobContainer(self.qualify('files'))

        # Stats and logging strings
        self.statsFiles = self._getOrCreateBlobContainer(self.qualify('statsfiles'))
        # File IDs that contain stats and logging strings
        self.statsFileIDs = self._getOrCreateTable(self.qualify('statsFileIDs'))

        super(AzureJobStore, self).__init__(config=config)

        if self.config.cseKey is not None:
            self.keyPath = self.config.cseKey

    # Table names must be alphanumeric
    nameSeparator = 'xx'

    # Length of a jobID - used to test if a stats file has been read already or not
    jobIDLength = len(str(uuid.uuid4()))

    def qualify(self, name):
        return self.namePrefix + self.nameSeparator + name

    def jobs(self):
        for jobEntity in self.jobItems.query_entities():
            yield AzureJob.fromEntity(jobEntity)

    def create(self, command, memory, cores, disk,
               predecessorNumber=0):
        jobStoreID = self._newJobID()
        job = AzureJob(jobStoreID=jobStoreID,
                       command=command, memory=memory, cores=cores, disk=disk,
                       remainingRetryCount=self._defaultTryCount(), logJobStoreFileID=None,
                       predecessorNumber=predecessorNumber)
        entity = job.toItem(chunkSize=self.jobChunkSize)
        entity['RowKey'] = jobStoreID
        self.jobItems.insert_entity(entity=entity)
        return job

    def exists(self, jobStoreID):
        if self.jobItems.get_entity(row_key=jobStoreID) is None:
            return False
        return True

    def load(self, jobStoreID):
        jobEntity = self.jobItems.get_entity(row_key=jobStoreID)
        if jobEntity is None:
            raise NoSuchJobException(jobStoreID)
        return AzureJob.fromEntity(jobEntity)

    def update(self, job):
        self.jobItems.update_entity(row_key=job.jobStoreID,
                                    entity=job.toItem(chunkSize=self.jobChunkSize))

    def delete(self, jobStoreID):
        try:
            self.jobItems.delete_entity(row_key=jobStoreID)
        except WindowsAzureMissingResourceError:
            # Job deletion is idempotent, and this job has been deleted already
            return
        filterString = "PartitionKey eq '%s'" % jobStoreID
        for fileEntity in self.jobFileIDs.query_entities(filter=filterString):
            jobStoreFileID = fileEntity.RowKey
            self.deleteFile(jobStoreFileID)

    def deleteJobStore(self):
        self.registryTable.delete_entity(row_key=self.namePrefix)
        self.jobItems.delete_table()
        self.jobFileIDs.delete_table()
        self.files.delete_container()
        self.statsFiles.delete_container()
        self.statsFileIDs.delete_table()

    def getEnv(self):
        return dict(AZURE_ACCOUNT_KEY=self.account_key)

    def writeFile(self, localFilePath, jobStoreID=None):
        jobStoreFileID = self._newFileID()
        self.updateFile(jobStoreFileID, localFilePath)
        self._associateFileWithJob(jobStoreFileID, jobStoreID)
        return jobStoreFileID

    def updateFile(self, jobStoreFileID, localFilePath):
        with open(localFilePath) as read_fd:
            with self._uploadStream(jobStoreFileID, self.files) as write_fd:
                while True:
                    buf = read_fd.read(self._maxAzureBlockBytes)
                    write_fd.write(buf)
                    if len(buf) == 0:
                        break

    def readFile(self, jobStoreFileID, localFilePath):
        try:
            with self._downloadStream(jobStoreFileID, self.files) as read_fd:
                with open(localFilePath, 'w') as write_fd:
                    while True:
                        buf = read_fd.read(self._maxAzureBlockBytes)
                        write_fd.write(buf)
                        if not buf: break
        except WindowsAzureMissingResourceError:
            raise NoSuchFileException(jobStoreFileID)

    def deleteFile(self, jobStoreFileID):
        try:
            self.files.delete_blob(blob_name=jobStoreFileID)
            self._dissociateFileFromJob(jobStoreFileID)
        except WindowsAzureMissingResourceError:
            pass

    def fileExists(self, jobStoreFileID):
        # As Azure doesn't have a blob_exists method (at least in the
        # python API) we just try to download the metadata, and hope
        # the metadata is small so the call will be fast.
        try:
            self.files.get_blob_metadata(blob_name=jobStoreFileID)
            return True
        except WindowsAzureMissingResourceError:
            return False

    @contextmanager
    def writeFileStream(self, jobStoreID=None):
        # TODO: this (and all stream methods) should probably use the
        # Append Blob type, but that is not currently supported by the
        # Azure Python API.
        jobStoreFileID = self._newFileID()
        with self._uploadStream(jobStoreFileID, self.files) as fd:
            yield fd, jobStoreFileID
        self._associateFileWithJob(jobStoreFileID, jobStoreID)

    @contextmanager
    def updateFileStream(self, jobStoreFileID):
        with self._uploadStream(jobStoreFileID, self.files, checkForModification=True) as fd:
            yield fd

    def getEmptyFileStoreID(self, jobStoreID=None):
        jobStoreFileID = self._newFileID()
        self.files.put_blob(blob_name=jobStoreFileID, blob='',
                            x_ms_blob_type='BlockBlob')
        self._associateFileWithJob(jobStoreFileID, jobStoreID)
        return jobStoreFileID

    @contextmanager
    def readFileStream(self, jobStoreFileID):
        if not self.fileExists(jobStoreFileID):
            raise NoSuchFileException(jobStoreFileID)
        with self._downloadStream(jobStoreFileID, self.files) as fd:
            yield fd

    @contextmanager
    def writeSharedFileStream(self, sharedFileName, isProtected=None):
        sharedFileID = self._newFileID(sharedFileName)
        with self._uploadStream(sharedFileID, self.files, encrypted=isProtected) as fd:
            yield fd

    @contextmanager
    def readSharedFileStream(self, sharedFileName):
        sharedFileID = self._newFileID(sharedFileName)
        if not self.fileExists(sharedFileID):
            raise NoSuchFileException(sharedFileID)
        with self._downloadStream(sharedFileID, self.files) as fd:
            yield fd

    def writeStatsAndLogging(self, statsAndLoggingString):
        # TODO: would be a great use case for the append blobs, once implemented in the Azure SDK
        jobStoreFileID = self._newFileID()
        encrypted = self.keyPath is not None
        if encrypted:
            statsAndLoggingString = encryption.encrypt(statsAndLoggingString, self.keyPath)
        self.statsFiles.put_block_blob_from_text(blob_name=jobStoreFileID,
                                                 text=statsAndLoggingString,
                                                 x_ms_meta_name_values=dict(
                                                     encrypted=str(encrypted)))
        self.statsFileIDs.insert_entity(entity={'RowKey': jobStoreFileID})

    def readStatsAndLogging(self, callback, readAll=False):
        suffix = '_old'
        numStatsFiles = 0
        for entity in self.statsFileIDs.query_entities():
            jobStoreFileID = entity.RowKey
            hasBeenRead = len(jobStoreFileID) > self.jobIDLength
            if not hasBeenRead:
                with self._downloadStream(jobStoreFileID, self.statsFiles) as fd:
                    callback(fd)
                # Mark this entity as read by appending the suffix
                self.statsFileIDs.insert_entity(entity={'RowKey': jobStoreFileID + suffix})
                self.statsFileIDs.delete_entity(row_key=jobStoreFileID)
                numStatsFiles += 1
            elif readAll:
                # Strip the suffix to get the original ID
                jobStoreFileID = jobStoreFileID[:-len(suffix)]
                with self._downloadStream(jobStoreFileID, self.statsFiles) as fd:
                    callback(fd)
                numStatsFiles += 1
        return numStatsFiles

    _azureTimeFormat = "%Y-%m-%dT%H:%M:%SZ"

    def getPublicUrl(self, jobStoreFileID):
        try:
            self.files.get_blob_properties(blob_name=jobStoreFileID)
        except WindowsAzureMissingResourceError:
            raise NoSuchFileException(jobStoreFileID)
        # Compensate of a little bit of clock skew
        startTimeStr = (datetime.utcnow() - timedelta(minutes=5)).strftime(self._azureTimeFormat)
        endTime = datetime.utcnow() + self.publicUrlExpiration
        endTimeStr = endTime.strftime(self._azureTimeFormat)
        sap = SharedAccessPolicy(AccessPolicy(startTimeStr, endTimeStr,
                                              BlobSharedAccessPermissions.READ))
        sas_token = self.files.generate_shared_access_signature(blob_name=jobStoreFileID,
                                                                shared_access_policy=sap)
        return self.files.make_blob_url(blob_name=jobStoreFileID) + '?' + sas_token

    def getSharedPublicUrl(self, sharedFileName):
        jobStoreFileID = self._newFileID(sharedFileName)
        return self.getPublicUrl(jobStoreFileID)

    def _newJobID(self):
        # raw UUIDs don't work for Azure property names because the '-' character is disallowed.
        return str(uuid.uuid4()).replace('-', '_')

    # A dummy job ID under which all shared files are stored.
    sharedFileJobID = uuid.UUID('891f7db6-e4d9-4221-a58e-ab6cc4395f94')

    def _newFileID(self, sharedFileName=None):
        if sharedFileName is None:
            ret = str(uuid.uuid4())
        else:
            ret = str(uuid.uuid5(self.sharedFileJobID, str(sharedFileName)))
        return ret.replace('-', '_')

    def _associateFileWithJob(self, jobStoreFileID, jobStoreID=None):
        if jobStoreID is not None:
            self.jobFileIDs.insert_entity(entity={'PartitionKey': jobStoreID,
                                                  'RowKey': jobStoreFileID})

    def _dissociateFileFromJob(self, jobStoreFileID):
        entities = self.jobFileIDs.query_entities(filter="RowKey eq '%s'" % jobStoreFileID)
        if entities:
            assert len(entities) == 1
            jobStoreID = entities[0].PartitionKey
            self.jobFileIDs.delete_entity(partition_key=jobStoreID, row_key=jobStoreFileID)

    def _getOrCreateTable(self, tableName):
        # This will not fail if the table already exists.
        for attempt in retry_on_error():
            with attempt:
                self.tableService.create_table(tableName)
        return AzureTable(self.tableService, tableName)

    def _getOrCreateBlobContainer(self, containerName):
        for attempt in retry_on_error():
            with attempt:
                self.blobService.create_container(containerName)
        return AzureBlobContainer(self.blobService, containerName)

    def _sanitizeTableName(self, tableName):
        """
        Azure table names must start with a letter and be alphanumeric.

        This will never cause a collision if uuids are used, but
        otherwise may not be safe.
        """
        return 'a' + filter(lambda x: x.isalnum(), tableName)

    # Maximum bytes that can be in any block of an Azure block blob
    # https://github.com/Azure/azure-storage-python/blob/4c7666e05a9556c10154508335738ee44d7cb104/azure/storage/blob/blobservice.py#L106
    _maxAzureBlockBytes = 4 * 1024 * 1024

    @contextmanager
    def _uploadStream(self, jobStoreFileID, container, checkForModification=False, encrypted=None):
        """
        :param encrypted: True to enforce encryption (will raise exception unless key is set),
        False to prevent encryption or None to encrypt if key is set.
        """
        if checkForModification:
            try:
                expectedVersion = container.get_blob_properties(blob_name=jobStoreFileID)['etag']
            except WindowsAzureMissingResourceError:
                expectedVersion = None

        if encrypted is None:
            encrypted = self.keyPath is not None
        elif encrypted:
            if self.keyPath is None:
                raise RuntimeError('Encryption requested but no key was provided')

        maxBlockSize = self._maxAzureBlockBytes
        if encrypted:
            # There is a small overhead for encrypted data.
            maxBlockSize -= encryption.overhead
        readable_fh, writable_fh = os.pipe()
        with os.fdopen(readable_fh, 'r') as readable:
            with os.fdopen(writable_fh, 'w') as writable:
                def reader():
                    blockIDs = []
                    try:
                        while True:
                            buf = readable.read(maxBlockSize)
                            if len(buf) == 0:
                                # We're safe to break here even if we never read anything, since
                                # putting an empty block list creates an empty blob.
                                break
                            if encrypted:
                                buf = encryption.encrypt(buf, self.keyPath)
                            blockID = self._newFileID()
                            container.put_block(blob_name=jobStoreFileID,
                                                block=buf,
                                                blockid=blockID)
                            blockIDs.append(blockID)
                    except:
                        # This is guaranteed to delete any uncommitted
                        # blocks.
                        container.delete_blob(blob_name=jobStoreFileID)
                        raise

                    if checkForModification and expectedVersion is not None:
                        # Acquire a (60-second) write lock,
                        leaseID = container.lease_blob(blob_name=jobStoreFileID,
                                                       x_ms_lease_action='acquire')['x-ms-lease-id']
                        # check for modification,
                        blobProperties = container.get_blob_properties(blob_name=jobStoreFileID)
                        if blobProperties['etag'] != expectedVersion:
                            container.lease_blob(blob_name=jobStoreFileID,
                                                 x_ms_lease_action='release',
                                                 x_ms_lease_id=leaseID)
                            raise ConcurrentFileModificationException(jobStoreFileID)
                        # commit the file,
                        container.put_block_list(blob_name=jobStoreFileID,
                                                 block_list=blockIDs,
                                                 x_ms_lease_id=leaseID,
                                                 x_ms_meta_name_values=dict(
                                                     encrypted=str(encrypted)))
                        # then release the lock.
                        container.lease_blob(blob_name=jobStoreFileID,
                                             x_ms_lease_action='release',
                                             x_ms_lease_id=leaseID)
                    else:
                        # No need to check for modification, just blindly write over whatever
                        # was there.
                        container.put_block_list(blob_name=jobStoreFileID,
                                                 block_list=blockIDs,
                                                 x_ms_meta_name_values=dict(
                                                     encrypted=str(encrypted)))

                thread = ExceptionalThread(target=reader)
                thread.start()
                yield writable
            # The writable is now closed. This will send EOF to the readable and cause that
            # thread to finish.
            thread.join()

    @contextmanager
    def _downloadStream(self, jobStoreFileID, container):
        # The reason this is not in the writer is so we catch non-existant blobs early
        blobProps = container.get_blob_properties(blob_name=jobStoreFileID)

        encrypted = strict_bool(blobProps['x-ms-meta-encrypted'])
        if encrypted and self.keyPath is None:
            raise AssertionError('Content is encrypted but no key was provided.')

        readable_fh, writable_fh = os.pipe()
        with os.fdopen(readable_fh, 'r') as readable:
            with os.fdopen(writable_fh, 'w') as writable:
                def writer():
                    try:
                        chunkStartPos = 0
                        fileSize = int(blobProps['Content-Length'])
                        while chunkStartPos < fileSize:
                            chunkEndPos = chunkStartPos + self._maxAzureBlockBytes - 1
                            buf = container.get_blob(blob_name=jobStoreFileID,
                                                     x_ms_range="bytes=%d-%d" % (chunkStartPos,
                                                                                 chunkEndPos))
                            if encrypted:
                                buf = encryption.decrypt(buf, self.keyPath)
                            writable.write(buf)
                            chunkStartPos = chunkEndPos + 1
                    finally:
                        # Ensure readers aren't left blocking if this thread crashes.
                        # This close() will send EOF to the reading end and ultimately cause the
                        # yield to return. It also makes the implict .close() done by the enclosing
                        # "with" context redundant but that should be ok since .close() on file
                        # objects are idempotent.
                        writable.close()

                thread = ExceptionalThread(target=writer)
                thread.start()
                yield readable
                thread.join()
Esempio n. 47
0
File: sys.py Progetto: gomes-/alx
class SAzure(SyncStorage):
    def __init__(self):
        super().__init__()
        self.msg_key_na = _('Key not available')
        try:
            import alxlib.key

            key = alxlib.key.Key()
            if os.path.isfile(key.get_path()):
                sys.path.insert(0, key.get_dir())

                import alxkey

                self.key = alxkey.alxkey_azure
                """self.blob = BlobService(account_name=self.key['AZURE_STORAGE_ACCOUNT_NAME'],
                                        account_key=self.key['AZURE_ACCESS_KEY'])"""
            else:
                # raise (self.msg_key_na)
                self.key = None
        except:
            pass
            # raise (self.msg_key_na)

    def connect(self):
        try:

            self.blob = BlobService(account_name=self.key['AZURE_STORAGE_ACCOUNT_NAME'],
                                    account_key=self.key['AZURE_ACCESS_KEY'])

            return self.blob.list_containers(maxresults=1)

        except:
            return None

    def connect_blob(self, az_account_name=None, az_account_key=None):

        try:
            if az_account_name != None:
                self.key['AZURE_STORAGE_ACCOUNT_NAME'] = az_account_name
                self.key['AZURE_ACCESS_KEY'] = az_account_key

            return self.connect()

        except:
            return None

    def path_clean(self, path: str):
        try:
            i = path.index("//") + 2
            self.container = path[0:i]
            if path[len(path) - 1] != "/":
                path += "/"

            return path[i:]
        except:
            print(_("Bad Path"))
            exit(1)

    def spath(self, container, root, b):
        spath = SyncPath()
        spath.BasePath = container
        if b.name[len(b.name)-1]=="/":
            spath.IsDir= True
        else:
            spath.IsFile= True
        spath.AbsPath = b.name
        if len(root)>0:
            spath.SPath = b.name[len(root) - 1:]
        else:
            spath.SPath=b.name
        spath.Size = b.properties.content_length
        import alxlib.time_help

        spath.ModifiedTS = alxlib.time_help.to_timestamp(b.properties.last_modified)
        spath.MD5 = b.properties.content_md5
        spath.sys="azure"
        return spath

    def path_split(self, path: str):
        try:
            list = path.split("/")
            container = list[0]
            uri = ""
            if len(list) > 1:
                uri = "/".join(map(str, list[1:]))

            return container, uri
        except:
            print(_("Bad path"))
            exit(1)

    def path_list_blobs(self, container, uri):

        try:
            if len(uri)>0:
                blobs = self.blob.list_blobs(container, prefix=uri)
            else:
                blobs = self.blob.list_blobs(container)


            """for blob in blobs:
                print(blob.properties.__dict__)
                print(blob.name)
                print(blob.url)"""
            return blobs
        except Exception as e:
            print(_("Bad connection"))
            logging.warning("container {0}, path {1}".format(container, uri))
            exit(1)

    def path_list(self, path):
        try:
            logging.debug("path_list {0}".format(path))

            container, uri = self.path_split(path)
            logging.debug("Container: {0}, Uri: {1}".format(container, uri))

            self.connect()
            self.blob.create_container(container)

            blobs = self.path_list_blobs(container, uri)

            d = {}

            for b in blobs:
                spath = self.spath(container, uri, b)
                # print(b.__dict__)
                #print(str(b.properties.last_modified.__dict__))
                #print(str(spath.ModifiedTS))
                d[spath.SPath] = spath
            # print(d)
            return d
        except Exception as e:
            print(e)

    def remove(self, src: SyncPath):
        try:
            logging.debug("Removing {0}".format(src.AbsPath))
            self.connect()
            self.blob.create_container(src.BasePath)
            self.blob.delete_blob(src.BasePath, src.AbsPath)
        except:
            pass


    def copy_local2azure(self, src, base_dir):
        try:

            container, uri = self.path_split(base_dir)

            if len(src.SPath)>0 and src.SPath[0]=="/":
                path= uri+ src.SPath[1:]
            else:
                path= uri+src.SPath
            logging.debug("copy_local2azure Spath {0}. path:{1}".format(src.SPath, path))
            self.connect()
            if not src.IsDir:
                self.blob.put_block_blob_from_path (container, path, src.AbsPath)
            else:
                self.blob.put_block_blob_from_text(container, path+"/", "")
        except Exception as e:
            print("Error Copying")
            print(e)

    def copy_azure2local(self, src, base_dir):
        try:

            if len(src.SPath)>0 and (src.SPath[0] == "/" or src.SPath[0] == "\\") :
                path = src.SPath[1:]
            else:
                path = src.SPath


            path= os.path.normpath(os.path.join(base_dir, path))
            logging.debug("copy_azure2local basedir:{0} Spath {1}, path {2}, abs: {3}".format( base_dir, src.SPath, path, src.AbsPath))


            if not os.path.isdir(path):
               os.makedirs(os.path.dirname(path), exist_ok=True)
            #print( os.path.dirname(path)+"***************")

            if not (len(src.AbsPath)>0 and src.AbsPath[len(src.AbsPath)-1]=="/"):
                self.blob.get_blob_to_path(src.BasePath, src.AbsPath, path)




            """container, uri = self.path_split(base_dir)

            if len(src.SPath)>0 and src.SPath[0]=="/":
                path= uri+ src.SPath[1:]
            else:
                path= uri+src.SPath
            self.connect()
            if not src.IsDir:
                self.blob.get_blob_to_path(src.BasePath, path, src.AbsPath)
            else:
                self.blob.put_block_blob_from_text(container, path, "")"""
        except Exception as e:
            print("Error copying")
            print(e)
def do_step(context):
    settings = context.meta['settings']
    index_file = context.meta['index-file']
    pivnetAPIToken = settings["pivnet-api-token"]

    f = open("manifests/{0}".format(index_file))
    manifests = yaml.safe_load(f)
    f.close()

    eula_urls = [
        "https://network.pivotal.io/api/v2/products/{0}/releases/{1}/eula_acceptance".format(
            m['release-name'],
            m['release-number']) for m in manifests['manifests']]

    release_urls = [
        "https://network.pivotal.io/api/v2/products/{0}/releases/{1}/product_files/{2}/download".format(
            m['release-name'],
            m['release-number'],
            m['file-number']) for m in manifests['manifests']]

    stemcell_urls = [m['stemcell'] for m in manifests['manifests']]

    # accept eula for each product
    for url in eula_urls:
        print url
        if not "concourse" in url:
            res = authorizedPost(url, pivnetAPIToken)
            code = res.getcode()

    # releases
    is_release_file = re.compile("^releases\/.+")
    if not os.path.exists("/tmp/releases"):
        os.makedirs("/tmp/releases")

    client = bosh_client.BoshClient("https://10.0.0.4:25555", "admin", "admin")
    storage_account_name = settings["STORAGE-ACCOUNT-NAME"]
    storage_access_key = settings["STORAGE-ACCESS-KEY"]

    blob_service = BlobService(storage_account_name, storage_access_key)
    blob_service.create_container(
        container_name='tempreleases',
        x_ms_blob_public_access='container')

    print "Processing releases."
    for url in release_urls:

        print "Downloading {0}.".format(url)

        if "concourse" in url:
            release_url = "https://s3-us-west-2.amazonaws.com/bosh-azure-releases/concourse.zip"
            res = urllib2.urlopen(release_url)
        else:
            res = authorizedPost(url, pivnetAPIToken)

        code = res.getcode()

        length = int(res.headers["Content-Length"])

        # content-length
        if code is 200:

            total = 0
            pcent = 0.0
            CHUNK = 16 * 1024

            with tempfile.TemporaryFile() as temp:
                while True:
                    chunk = res.read(CHUNK)
                    total += CHUNK
                    pcent = (float(total) / float(length)) * 100

                    sys.stdout.write(
                        "Download progress: %.2f%% (%.2fM)\r" %
                        (pcent, total / 1000000.0))
                    sys.stdout.flush()

                    if not chunk:
                        break

                    temp.write(chunk)

                print "Download complete."

                z = zipfile.ZipFile(temp)
                for name in z.namelist():
                    
                    # is this a release?
                    if is_release_file.match(name):

                        release_filename = "/tmp/{0}".format(name)

                        print "Unpacking {0}.".format(name)
                        z.extract(name, "/tmp")

                        print "Uploading {0} to Azure blob store".format(name)

                        blob_service.put_block_blob_from_path(
                            'tempreleases',
                            name,
                            "/tmp/{0}".format(name),
                            x_ms_blob_content_type='application/x-compressed'
                        )

                        os.unlink(release_filename)
                        blob_url = "http://{0}.blob.core.windows.net/{1}/{2}".format(
                            storage_account_name, 'tempreleases', name)

                        print "Uploading release {0} to BOSH director.".format(name)

                        task_id = client.upload_release(blob_url)
                        client.wait_for_task(task_id)

                z.close()
                temp.close()

    blob_service.delete_container("tempreleases")

    # stemcells
    print "Processing stemcells."

    for url in stemcell_urls:
        print "Processing stemcell {0}".format(url)
        task_id = client.upload_stemcell(url)
        client.wait_for_task(task_id)

    return context
Esempio n. 49
0
import logging
from logging.handlers import RotatingFileHandler

app = Flask(__name__)

service_name = 'azure-storage'
vcap_services = json.loads(os.environ['VCAP_SERVICES'])
account_name = vcap_services[service_name][0]['credentials'][
    'storage_account_name']
account_key = vcap_services[service_name][0]['credentials'][
    'primary_access_key']
blob_service = BlobService(account_name, account_key)

container_name = '{0}{1}'.format('cloud-foundry-',
                                 str(uuid.uuid4()).replace('-', ''))
blob_service.create_container(container_name)


@app.route('/')
def index():
    blobs = blob_service.list_blobs(container_name)
    return render_template("index.html",
                           filenames=[blob.name for blob in blobs])


@app.route('/upload', methods=['POST'])
def upload():
    file = request.files['file']
    if not file or not file.filename:
        msg = 'The file to upload is invalid'
        blobs = blob_service.list_blobs(container_name)
# Get settings from CustomScriptForLinux extension configurations
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')
hutil = Util.HandlerUtility(waagent.Log, waagent.Error, "bosh-deploy-script")
hutil.do_parse_context("enable")
settings = hutil.get_public_settings()
with open(os.path.join('bosh', 'settings'), "w") as tmpfile:
    tmpfile.write(json.dumps(settings, indent=4, sort_keys=True))
username = settings["username"]
home_dir = os.path.join("/home", username)
install_log = os.path.join(home_dir, "install.log")

# Prepare the containers
storage_account_name = settings["STORAGE-ACCOUNT-NAME"]
storage_access_key = settings["STORAGE-ACCESS-KEY"]
blob_service = BlobService(storage_account_name, storage_access_key)
blob_service.create_container('bosh')
blob_service.create_container(container_name='stemcell',
                              x_ms_blob_public_access='blob')

# Prepare the table for storing meta datas of storage account and stemcells
table_service = TableService(storage_account_name, storage_access_key)
table_service.create_table('stemcells')

# Generate the private key and certificate
call("sh create_cert.sh", shell=True)
call("cp bosh.key ./bosh/bosh", shell=True)
with open('bosh_cert.pem', 'r') as tmpfile:
    ssh_cert = tmpfile.read()
ssh_cert = "|\n" + ssh_cert
ssh_cert = "\n        ".join([line for line in ssh_cert.split('\n')])
Esempio n. 51
0
class AzureJobStore(AbstractJobStore):
    """
    A job store that uses Azure's blob store for file storage and
    Table Service to store job info with strong consistency."""
    def __init__(self,
                 accountName,
                 namePrefix,
                 config=None,
                 jobChunkSize=maxAzureTablePropertySize):
        self.jobChunkSize = jobChunkSize
        self.keyPath = None

        self.account_key = _fetchAzureAccountKey(accountName)

        # Table names have strict requirements in Azure
        self.namePrefix = self._sanitizeTableName(namePrefix)
        logger.debug("Creating job store with name prefix '%s'" %
                     self.namePrefix)

        # These are the main API entrypoints.
        self.tableService = TableService(account_key=self.account_key,
                                         account_name=accountName)
        self.blobService = BlobService(account_key=self.account_key,
                                       account_name=accountName)

        # Register our job-store in the global table for this storage account
        self.registryTable = self._getOrCreateTable('toilRegistry')
        exists = self.registryTable.get_entity(row_key=self.namePrefix)
        self._checkJobStoreCreation(config is not None, exists,
                                    accountName + ":" + self.namePrefix)
        self.registryTable.insert_or_replace_entity(row_key=self.namePrefix,
                                                    entity={'exists': True})

        # Serialized jobs table
        self.jobItems = self._getOrCreateTable(self.qualify('jobs'))
        # Job<->file mapping table
        self.jobFileIDs = self._getOrCreateTable(self.qualify('jobFileIDs'))

        # Container for all shared and unshared files
        self.files = self._getOrCreateBlobContainer(self.qualify('files'))

        # Stats and logging strings
        self.statsFiles = self._getOrCreateBlobContainer(
            self.qualify('statsfiles'))
        # File IDs that contain stats and logging strings
        self.statsFileIDs = self._getOrCreateTable(
            self.qualify('statsFileIDs'))

        super(AzureJobStore, self).__init__(config=config)

        if self.config.cseKey is not None:
            self.keyPath = self.config.cseKey

    # Table names must be alphanumeric
    nameSeparator = 'xx'

    # Length of a jobID - used to test if a stats file has been read already or not
    jobIDLength = len(str(uuid.uuid4()))

    def qualify(self, name):
        return self.namePrefix + self.nameSeparator + name

    def jobs(self):

        # How many jobs have we done?
        total_processed = 0

        for jobEntity in self.jobItems.query_entities_auto():
            # Process the items in the page
            yield AzureJob.fromEntity(jobEntity)
            total_processed += 1

            if total_processed % 1000 == 0:
                # Produce some feedback for the user, because this can take
                # a long time on, for example, Azure
                logger.info("Processed %d total jobs" % total_processed)

        logger.info("Processed %d total jobs" % total_processed)

    def create(self, command, memory, cores, disk, predecessorNumber=0):
        jobStoreID = self._newJobID()
        job = AzureJob(jobStoreID=jobStoreID,
                       command=command,
                       memory=memory,
                       cores=cores,
                       disk=disk,
                       remainingRetryCount=self._defaultTryCount(),
                       logJobStoreFileID=None,
                       predecessorNumber=predecessorNumber)
        entity = job.toItem(chunkSize=self.jobChunkSize)
        entity['RowKey'] = jobStoreID
        self.jobItems.insert_entity(entity=entity)
        return job

    def exists(self, jobStoreID):
        if self.jobItems.get_entity(row_key=jobStoreID) is None:
            return False
        return True

    def load(self, jobStoreID):
        jobEntity = self.jobItems.get_entity(row_key=jobStoreID)
        if jobEntity is None:
            raise NoSuchJobException(jobStoreID)
        return AzureJob.fromEntity(jobEntity)

    def update(self, job):
        self.jobItems.update_entity(
            row_key=job.jobStoreID,
            entity=job.toItem(chunkSize=self.jobChunkSize))

    def delete(self, jobStoreID):
        try:
            self.jobItems.delete_entity(row_key=jobStoreID)
        except WindowsAzureMissingResourceError:
            # Job deletion is idempotent, and this job has been deleted already
            return
        filterString = "PartitionKey eq '%s'" % jobStoreID
        for fileEntity in self.jobFileIDs.query_entities(filter=filterString):
            jobStoreFileID = fileEntity.RowKey
            self.deleteFile(jobStoreFileID)

    def deleteJobStore(self):
        self.registryTable.delete_entity(row_key=self.namePrefix)
        self.jobItems.delete_table()
        self.jobFileIDs.delete_table()
        self.files.delete_container()
        self.statsFiles.delete_container()
        self.statsFileIDs.delete_table()

    def getEnv(self):
        return dict(AZURE_ACCOUNT_KEY=self.account_key)

    def writeFile(self, localFilePath, jobStoreID=None):
        jobStoreFileID = self._newFileID()
        self.updateFile(jobStoreFileID, localFilePath)
        self._associateFileWithJob(jobStoreFileID, jobStoreID)
        return jobStoreFileID

    def updateFile(self, jobStoreFileID, localFilePath):
        with open(localFilePath) as read_fd:
            with self._uploadStream(jobStoreFileID, self.files) as write_fd:
                while True:
                    buf = read_fd.read(self._maxAzureBlockBytes)
                    write_fd.write(buf)
                    if len(buf) == 0:
                        break

    def readFile(self, jobStoreFileID, localFilePath):
        try:
            with self._downloadStream(jobStoreFileID, self.files) as read_fd:
                with open(localFilePath, 'w') as write_fd:
                    while True:
                        buf = read_fd.read(self._maxAzureBlockBytes)
                        write_fd.write(buf)
                        if not buf: break
        except WindowsAzureMissingResourceError:
            raise NoSuchFileException(jobStoreFileID)

    def deleteFile(self, jobStoreFileID):
        try:
            self.files.delete_blob(blob_name=jobStoreFileID)
            self._dissociateFileFromJob(jobStoreFileID)
        except WindowsAzureMissingResourceError:
            pass

    def fileExists(self, jobStoreFileID):
        # As Azure doesn't have a blob_exists method (at least in the
        # python API) we just try to download the metadata, and hope
        # the metadata is small so the call will be fast.
        try:
            self.files.get_blob_metadata(blob_name=jobStoreFileID)
            return True
        except WindowsAzureMissingResourceError:
            return False

    @contextmanager
    def writeFileStream(self, jobStoreID=None):
        # TODO: this (and all stream methods) should probably use the
        # Append Blob type, but that is not currently supported by the
        # Azure Python API.
        jobStoreFileID = self._newFileID()
        with self._uploadStream(jobStoreFileID, self.files) as fd:
            yield fd, jobStoreFileID
        self._associateFileWithJob(jobStoreFileID, jobStoreID)

    @contextmanager
    def updateFileStream(self, jobStoreFileID):
        with self._uploadStream(jobStoreFileID,
                                self.files,
                                checkForModification=True) as fd:
            yield fd

    def getEmptyFileStoreID(self, jobStoreID=None):
        jobStoreFileID = self._newFileID()
        self.files.put_blob(blob_name=jobStoreFileID,
                            blob='',
                            x_ms_blob_type='BlockBlob')
        self._associateFileWithJob(jobStoreFileID, jobStoreID)
        return jobStoreFileID

    @contextmanager
    def readFileStream(self, jobStoreFileID):
        if not self.fileExists(jobStoreFileID):
            raise NoSuchFileException(jobStoreFileID)
        with self._downloadStream(jobStoreFileID, self.files) as fd:
            yield fd

    @contextmanager
    def writeSharedFileStream(self, sharedFileName, isProtected=None):
        sharedFileID = self._newFileID(sharedFileName)
        with self._uploadStream(sharedFileID,
                                self.files,
                                encrypted=isProtected) as fd:
            yield fd

    @contextmanager
    def readSharedFileStream(self, sharedFileName):
        sharedFileID = self._newFileID(sharedFileName)
        if not self.fileExists(sharedFileID):
            raise NoSuchFileException(sharedFileID)
        with self._downloadStream(sharedFileID, self.files) as fd:
            yield fd

    def writeStatsAndLogging(self, statsAndLoggingString):
        # TODO: would be a great use case for the append blobs, once implemented in the Azure SDK
        jobStoreFileID = self._newFileID()
        encrypted = self.keyPath is not None
        if encrypted:
            statsAndLoggingString = encryption.encrypt(statsAndLoggingString,
                                                       self.keyPath)
        self.statsFiles.put_block_blob_from_text(
            blob_name=jobStoreFileID,
            text=statsAndLoggingString,
            x_ms_meta_name_values=dict(encrypted=str(encrypted)))
        self.statsFileIDs.insert_entity(entity={'RowKey': jobStoreFileID})

    def readStatsAndLogging(self, callback, readAll=False):
        suffix = '_old'
        numStatsFiles = 0
        for entity in self.statsFileIDs.query_entities():
            jobStoreFileID = entity.RowKey
            hasBeenRead = len(jobStoreFileID) > self.jobIDLength
            if not hasBeenRead:
                with self._downloadStream(jobStoreFileID,
                                          self.statsFiles) as fd:
                    callback(fd)
                # Mark this entity as read by appending the suffix
                self.statsFileIDs.insert_entity(
                    entity={'RowKey': jobStoreFileID + suffix})
                self.statsFileIDs.delete_entity(row_key=jobStoreFileID)
                numStatsFiles += 1
            elif readAll:
                # Strip the suffix to get the original ID
                jobStoreFileID = jobStoreFileID[:-len(suffix)]
                with self._downloadStream(jobStoreFileID,
                                          self.statsFiles) as fd:
                    callback(fd)
                numStatsFiles += 1
        return numStatsFiles

    _azureTimeFormat = "%Y-%m-%dT%H:%M:%SZ"

    def getPublicUrl(self, jobStoreFileID):
        try:
            self.files.get_blob_properties(blob_name=jobStoreFileID)
        except WindowsAzureMissingResourceError:
            raise NoSuchFileException(jobStoreFileID)
        # Compensate of a little bit of clock skew
        startTimeStr = (datetime.utcnow() - timedelta(minutes=5)).strftime(
            self._azureTimeFormat)
        endTime = datetime.utcnow() + self.publicUrlExpiration
        endTimeStr = endTime.strftime(self._azureTimeFormat)
        sap = SharedAccessPolicy(
            AccessPolicy(startTimeStr, endTimeStr,
                         BlobSharedAccessPermissions.READ))
        sas_token = self.files.generate_shared_access_signature(
            blob_name=jobStoreFileID, shared_access_policy=sap)
        return self.files.make_blob_url(
            blob_name=jobStoreFileID) + '?' + sas_token

    def getSharedPublicUrl(self, sharedFileName):
        jobStoreFileID = self._newFileID(sharedFileName)
        return self.getPublicUrl(jobStoreFileID)

    def _newJobID(self):
        # raw UUIDs don't work for Azure property names because the '-' character is disallowed.
        return str(uuid.uuid4()).replace('-', '_')

    # A dummy job ID under which all shared files are stored.
    sharedFileJobID = uuid.UUID('891f7db6-e4d9-4221-a58e-ab6cc4395f94')

    def _newFileID(self, sharedFileName=None):
        if sharedFileName is None:
            ret = str(uuid.uuid4())
        else:
            ret = str(uuid.uuid5(self.sharedFileJobID, str(sharedFileName)))
        return ret.replace('-', '_')

    def _associateFileWithJob(self, jobStoreFileID, jobStoreID=None):
        if jobStoreID is not None:
            self.jobFileIDs.insert_entity(entity={
                'PartitionKey': jobStoreID,
                'RowKey': jobStoreFileID
            })

    def _dissociateFileFromJob(self, jobStoreFileID):
        entities = self.jobFileIDs.query_entities(filter="RowKey eq '%s'" %
                                                  jobStoreFileID)
        if entities:
            assert len(entities) == 1
            jobStoreID = entities[0].PartitionKey
            self.jobFileIDs.delete_entity(partition_key=jobStoreID,
                                          row_key=jobStoreFileID)

    def _getOrCreateTable(self, tableName):
        # This will not fail if the table already exists.
        for attempt in retry_on_error():
            with attempt:
                self.tableService.create_table(tableName)
        return AzureTable(self.tableService, tableName)

    def _getOrCreateBlobContainer(self, containerName):
        for attempt in retry_on_error():
            with attempt:
                self.blobService.create_container(containerName)
        return AzureBlobContainer(self.blobService, containerName)

    def _sanitizeTableName(self, tableName):
        """
        Azure table names must start with a letter and be alphanumeric.

        This will never cause a collision if uuids are used, but
        otherwise may not be safe.
        """
        return 'a' + filter(lambda x: x.isalnum(), tableName)

    # Maximum bytes that can be in any block of an Azure block blob
    # https://github.com/Azure/azure-storage-python/blob/4c7666e05a9556c10154508335738ee44d7cb104/azure/storage/blob/blobservice.py#L106
    _maxAzureBlockBytes = 4 * 1024 * 1024

    @contextmanager
    def _uploadStream(self,
                      jobStoreFileID,
                      container,
                      checkForModification=False,
                      encrypted=None):
        """
        :param encrypted: True to enforce encryption (will raise exception unless key is set),
        False to prevent encryption or None to encrypt if key is set.
        """
        if checkForModification:
            try:
                expectedVersion = container.get_blob_properties(
                    blob_name=jobStoreFileID)['etag']
            except WindowsAzureMissingResourceError:
                expectedVersion = None

        if encrypted is None:
            encrypted = self.keyPath is not None
        elif encrypted:
            if self.keyPath is None:
                raise RuntimeError(
                    'Encryption requested but no key was provided')

        maxBlockSize = self._maxAzureBlockBytes
        if encrypted:
            # There is a small overhead for encrypted data.
            maxBlockSize -= encryption.overhead
        readable_fh, writable_fh = os.pipe()
        with os.fdopen(readable_fh, 'r') as readable:
            with os.fdopen(writable_fh, 'w') as writable:

                def reader():
                    blockIDs = []
                    try:
                        while True:
                            buf = readable.read(maxBlockSize)
                            if len(buf) == 0:
                                # We're safe to break here even if we never read anything, since
                                # putting an empty block list creates an empty blob.
                                break
                            if encrypted:
                                buf = encryption.encrypt(buf, self.keyPath)
                            blockID = self._newFileID()
                            container.put_block(blob_name=jobStoreFileID,
                                                block=buf,
                                                blockid=blockID)
                            blockIDs.append(blockID)
                    except:
                        # This is guaranteed to delete any uncommitted
                        # blocks.
                        container.delete_blob(blob_name=jobStoreFileID)
                        raise

                    if checkForModification and expectedVersion is not None:
                        # Acquire a (60-second) write lock,
                        leaseID = container.lease_blob(
                            blob_name=jobStoreFileID,
                            x_ms_lease_action='acquire')['x-ms-lease-id']
                        # check for modification,
                        blobProperties = container.get_blob_properties(
                            blob_name=jobStoreFileID)
                        if blobProperties['etag'] != expectedVersion:
                            container.lease_blob(blob_name=jobStoreFileID,
                                                 x_ms_lease_action='release',
                                                 x_ms_lease_id=leaseID)
                            raise ConcurrentFileModificationException(
                                jobStoreFileID)
                        # commit the file,
                        container.put_block_list(blob_name=jobStoreFileID,
                                                 block_list=blockIDs,
                                                 x_ms_lease_id=leaseID,
                                                 x_ms_meta_name_values=dict(
                                                     encrypted=str(encrypted)))
                        # then release the lock.
                        container.lease_blob(blob_name=jobStoreFileID,
                                             x_ms_lease_action='release',
                                             x_ms_lease_id=leaseID)
                    else:
                        # No need to check for modification, just blindly write over whatever
                        # was there.
                        container.put_block_list(blob_name=jobStoreFileID,
                                                 block_list=blockIDs,
                                                 x_ms_meta_name_values=dict(
                                                     encrypted=str(encrypted)))

                thread = ExceptionalThread(target=reader)
                thread.start()
                yield writable
            # The writable is now closed. This will send EOF to the readable and cause that
            # thread to finish.
            thread.join()

    @contextmanager
    def _downloadStream(self, jobStoreFileID, container):
        # The reason this is not in the writer is so we catch non-existant blobs early
        blobProps = container.get_blob_properties(blob_name=jobStoreFileID)

        encrypted = strict_bool(blobProps['x-ms-meta-encrypted'])
        if encrypted and self.keyPath is None:
            raise AssertionError(
                'Content is encrypted but no key was provided.')

        readable_fh, writable_fh = os.pipe()
        with os.fdopen(readable_fh, 'r') as readable:
            with os.fdopen(writable_fh, 'w') as writable:

                def writer():
                    try:
                        chunkStartPos = 0
                        fileSize = int(blobProps['Content-Length'])
                        while chunkStartPos < fileSize:
                            chunkEndPos = chunkStartPos + self._maxAzureBlockBytes - 1
                            buf = container.get_blob(
                                blob_name=jobStoreFileID,
                                x_ms_range="bytes=%d-%d" %
                                (chunkStartPos, chunkEndPos))
                            if encrypted:
                                buf = encryption.decrypt(buf, self.keyPath)
                            writable.write(buf)
                            chunkStartPos = chunkEndPos + 1
                    finally:
                        # Ensure readers aren't left blocking if this thread crashes.
                        # This close() will send EOF to the reading end and ultimately cause the
                        # yield to return. It also makes the implict .close() done by the enclosing
                        # "with" context redundant but that should be ok since .close() on file
                        # objects are idempotent.
                        writable.close()

                thread = ExceptionalThread(target=writer)
                thread.start()
                yield readable
                thread.join()
# Get settings from CustomScriptForLinux extension configurations
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')
hutil =  Util.HandlerUtility(waagent.Log, waagent.Error, "bosh-deploy-script")
hutil.do_parse_context("enable")
settings = hutil.get_public_settings()
with open (os.path.join('bosh','settings'), "w") as tmpfile:
    tmpfile.write(json.dumps(settings, indent=4, sort_keys=True))
username = settings["username"]
home_dir = os.path.join("/home", username)
install_log = os.path.join(home_dir, "install.log")

# Prepare the containers
storage_account_name = settings["STORAGE-ACCOUNT-NAME"]
storage_access_key = settings["STORAGE-ACCESS-KEY"]
blob_service = BlobService(storage_account_name, storage_access_key)
blob_service.create_container('bosh')
blob_service.create_container(container_name='stemcell',
    x_ms_blob_public_access='blob'
)

# Prepare the table for storing meta datas of storage account and stemcells
table_service = TableService(storage_account_name, storage_access_key)
table_service.create_table('stemcells')

# Generate the private key and certificate
call("sh create_cert.sh", shell=True)
call("cp bosh.key ./bosh/bosh", shell=True)
with open ('bosh_cert.pem', 'r') as tmpfile:
    ssh_cert = tmpfile.read()
ssh_cert = "|\n" + ssh_cert
ssh_cert="\n        ".join([line for line in ssh_cert.split('\n')])
class AzureBackend(duplicity.backend.Backend):
    u"""
    Backend for Azure Blob Storage Service
    """
    def __init__(self, parsed_url):
        duplicity.backend.Backend.__init__(self, parsed_url)

        # Import Microsoft Azure Storage SDK for Python library.
        try:
            import azure
            import azure.storage
            if hasattr(azure.storage, u'BlobService'):
                # v0.11.1 and below
                from azure.storage import BlobService
                self.AzureMissingResourceError = azure.WindowsAzureMissingResourceError
                self.AzureConflictError = azure.WindowsAzureConflictError
            else:
                # v1.0.0 and above
                import azure.storage.blob
                if hasattr(azure.storage.blob, u'BlobService'):
                    from azure.storage.blob import BlobService
                else:
                    from azure.storage.blob.blockblobservice import BlockBlobService as BlobService
                self.AzureMissingResourceError = azure.common.AzureMissingResourceHttpError
                self.AzureConflictError = azure.common.AzureConflictHttpError
        except ImportError as e:
            raise BackendException(u"""\
Azure backend requires Microsoft Azure Storage SDK for Python (https://pypi.python.org/pypi/azure-storage/).
Exception: %s""" % str(e))

        # TODO: validate container name
        self.container = parsed_url.path.lstrip(u'/')

        if u'AZURE_ACCOUNT_NAME' not in os.environ:
            raise BackendException(u'AZURE_ACCOUNT_NAME environment variable not set.')

        if u'AZURE_ACCOUNT_KEY' in os.environ:
            if u'AZURE_ENDPOINT_SUFFIX' in os.environ:
                self.blob_service = BlobService(account_name=os.environ[u'AZURE_ACCOUNT_NAME'],
                                                account_key=os.environ[u'AZURE_ACCOUNT_KEY'],
                                                endpoint_suffix=os.environ[u'AZURE_ENDPOINT_SUFFIX'])
            else:
                self.blob_service = BlobService(account_name=os.environ[u'AZURE_ACCOUNT_NAME'],
                                                account_key=os.environ[u'AZURE_ACCOUNT_KEY'])
            self._create_container()
        elif u'AZURE_SHARED_ACCESS_SIGNATURE' in os.environ:
            if u'AZURE_ENDPOINT_SUFFIX' in os.environ:
                self.blob_service = BlobService(account_name=os.environ[u'AZURE_ACCOUNT_NAME'],
                                                sas_token=os.environ[u'AZURE_SHARED_ACCESS_SIGNATURE'],
                                                endpoint_suffix=os.environ[u'AZURE_ENDPOINT_SUFFIX'])
            else:
                self.blob_service = BlobService(account_name=os.environ[u'AZURE_ACCOUNT_NAME'],
                                                sas_token=os.environ[u'AZURE_SHARED_ACCESS_SIGNATURE'])
        else:
            raise BackendException(
                u'Neither AZURE_ACCOUNT_KEY nor AZURE_SHARED_ACCESS_SIGNATURE environment variable not set.')

        if globals.azure_max_single_put_size:
            # check if we use azure-storage>=0.30.0
            try:
                _ = self.blob_service.MAX_SINGLE_PUT_SIZE
                self.blob_service.MAX_SINGLE_PUT_SIZE = globals.azure_max_single_put_size
            # fallback for azure-storage<0.30.0
            except AttributeError:
                self.blob_service._BLOB_MAX_DATA_SIZE = globals.azure_max_single_put_size

        if globals.azure_max_block_size:
            # check if we use azure-storage>=0.30.0
            try:
                _ = self.blob_service.MAX_BLOCK_SIZE
                self.blob_service.MAX_BLOCK_SIZE = globals.azure_max_block_size
            # fallback for azure-storage<0.30.0
            except AttributeError:
                self.blob_service._BLOB_MAX_CHUNK_DATA_SIZE = globals.azure_max_block_size

    def _create_container(self):
        try:
            self.blob_service.create_container(self.container, fail_on_exist=True)
        except self.AzureConflictError:
            # Indicates that the resource could not be created because it already exists.
            pass
        except Exception as e:
            log.FatalError(u"Could not create Azure container: %s"
                           % str(e.message).split(u'\n', 1)[0],
                           log.ErrorCode.connection_failed)

    def _put(self, source_path, remote_filename):
        remote_filename = fsdecode(remote_filename)
        kwargs = {}
        if globals.azure_max_connections:
            kwargs[u'max_connections'] = globals.azure_max_connections

        # https://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#upload-a-blob-into-a-container
        try:
            self.blob_service.create_blob_from_path(self.container, remote_filename, source_path.name, **kwargs)
        except AttributeError:  # Old versions use a different method name
            self.blob_service.put_block_blob_from_path(self.container, remote_filename, source_path.name, **kwargs)

        self._set_tier(remote_filename)

    def _set_tier(self, remote_filename):
        if globals.azure_blob_tier is not None:
            try:
                self.blob_service.set_standard_blob_tier(self.container, remote_filename, globals.azure_blob_tier)
            except AttributeError:  # might not be available in old API
                pass

    def _get(self, remote_filename, local_path):
        # https://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#download-blobs
        self.blob_service.get_blob_to_path(self.container, fsdecode(remote_filename), local_path.name)

    def _list(self):
        # https://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#list-the-blobs-in-a-container
        blobs = []
        marker = None
        while True:
            batch = self.blob_service.list_blobs(self.container, marker=marker)
            blobs.extend(batch)
            if not batch.next_marker:
                break
            marker = batch.next_marker
        return [blob.name for blob in blobs]

    def _delete(self, filename):
        # http://azure.microsoft.com/en-us/documentation/articles/storage-python-how-to-use-blob-storage/#delete-blobs
        self.blob_service.delete_blob(self.container, fsdecode(filename))

    def _query(self, filename):
        prop = self.blob_service.get_blob_properties(self.container, fsdecode(filename))
        try:
            info = {u'size': int(prop.properties.content_length)}
        except AttributeError:
            # old versions directly returned the properties
            info = {u'size': int(prop[u'content-length'])}
        return info

    def _error_code(self, operation, e):
        if isinstance(e, self.AzureMissingResourceError):
            return log.ErrorCode.backend_not_found