def deprovision(instance_id):
    """
    Deprovision an existing instance of this service

    DELETE /v2/service_instances/<instance_id>:
        <instance_id> is the Cloud Controller provided
          value used to provision the instance

    return:
        As of API 2.3, an empty JSON document
        is expected
    """
    global subscription_id
    global cert
    global account_name
    global account_key

    if account_name and account_key:
        blob_service = BlobService(account_name, account_key)
        container_name = '{0}-{1}'.format(CONTAINER_NAME_PREFIX, instance_id)
        blob_service.delete_container(container_name)

        if account_name.startswith(STORAGE_ACCOUNT_NAME_PREFIX):
            sms = ServiceManagementService(subscription_id, cert_file)
            sms.delete_storage_account(account_name)

    return jsonify({})
コード例 #2
0
ファイル: AzureInstance.py プロジェクト: Hawkgirl/ext_cloud
    def delete(self):

        properties = self.__SMS.get_deployment_by_name(self.name, self.name)
        media_link = properties.role_list.roles[0].os_virtual_hard_disk.media_link
        storage_name = media_link[media_link.find("//") + 2:media_link.find(".blob")]

        from Azure.AzureVolumes.AzureVolumes import AzureVolumescls
        volume_service = AzureVolumescls(credentials=self._credentials)
        volumes = volume_service.list_volumes()
        volume_to_be_deleted = None
        for volume in volumes:
            if volume.instance_id == self.name:
                volume_to_be_deleted = volume
                break

        self.__SMS.delete_deployment(self.name, self.name)
        self.__SMS.delete_hosted_service(self.name)
        volume_to_be_deleted.delete()
        # delete image from storge
        from azure.storage import BlobService

        keys = self.__SMS.get_storage_account_keys(storage_name)
        blob_service = BlobService(account_name=storage_name, account_key=keys.storage_service_keys.primary)

        blob_service.delete_container(self.name, fail_not_exist=True)
コード例 #3
0
ファイル: test_urls.py プロジェクト: KuduApps/PythonApp
def test_azure_call(request):
    import os
    try:
        from azure.storage import BlobService
        bs = BlobService(os.environ["AZURE_STORAGE_ACCOUNT"], os.environ["AZURE_STORAGE_ACCESS_KEY"])
        import random
        container_name = hex(int(random.random() * 1000000000))

        bs.create_container(container_name)
        bs.put_blob(container_name, 'testblob', 'hello world\n', 'BlockBlob')
        blob = bs.get_blob(container_name, 'testblob')
        if blob != 'hello world\n':
            return HttpResponse("Failed!", status = '404')
        
        bs.delete_blob(container_name, 'testblob')
        bs.delete_container(container_name)

        return HttpResponse("Succeeded!")
    except:
        try:
            import traceback
        
            return HttpResponse(traceback.format_exc() + str(os.environ.keys()))
        except:
            import traceback
            return HttpResponse(traceback.format_exc())
コード例 #4
0
def do_step(context):
    settings = context.meta['settings']
    index_file = context.meta['index-file']
    pivnetAPIToken = settings["pivnet-api-token"]

    f = open("manifests/{0}".format(index_file))
    manifests = yaml.safe_load(f)
    f.close()

    eula_urls = [
        "https://network.pivotal.io/api/v2/products/{0}/releases/{1}/eula_acceptance".format(
            m['release-name'],
            m['release-number']) for m in manifests['manifests']]

    release_urls = [
        "https://network.pivotal.io/api/v2/products/{0}/releases/{1}/product_files/{2}/download".format(
            m['release-name'],
            m['release-number'],
            m['file-number']) for m in manifests['manifests']]

    stemcell_urls = [m['stemcell'] for m in manifests['manifests']]

    # accept eula for each product
    for url in eula_urls:
        print url
        if not "concourse" in url:
            res = authorizedPost(url, pivnetAPIToken)
            code = res.getcode()

    # releases
    is_release_file = re.compile("^releases\/.+")
    if not os.path.exists("/tmp/releases"):
        os.makedirs("/tmp/releases")

    client = bosh_client.BoshClient("https://10.0.0.4:25555", "admin", "admin")
    storage_account_name = settings["STORAGE-ACCOUNT-NAME"]
    storage_access_key = settings["STORAGE-ACCESS-KEY"]

    blob_service = BlobService(storage_account_name, storage_access_key)
    blob_service.create_container(
        container_name='tempreleases',
        x_ms_blob_public_access='container')

    print "Processing releases."
    for url in release_urls:

        print "Downloading {0}.".format(url)

        if "concourse" in url:
            release_url = "https://s3-us-west-2.amazonaws.com/bosh-azure-releases/concourse.zip"
            res = urllib2.urlopen(release_url)
        else:
            res = authorizedPost(url, pivnetAPIToken)

        code = res.getcode()

        length = int(res.headers["Content-Length"])

        # content-length
        if code is 200:

            total = 0
            pcent = 0.0
            CHUNK = 16 * 1024

            with tempfile.TemporaryFile() as temp:
                while True:
                    chunk = res.read(CHUNK)
                    total += CHUNK
                    pcent = (float(total) / float(length)) * 100

                    sys.stdout.write(
                        "Download progress: %.2f%% (%.2fM)\r" %
                        (pcent, total / 1000000.0))
                    sys.stdout.flush()

                    if not chunk:
                        break

                    temp.write(chunk)

                print "Download complete."

                z = zipfile.ZipFile(temp)
                for name in z.namelist():
                    
                    # is this a release?
                    if is_release_file.match(name):

                        release_filename = "/tmp/{0}".format(name)

                        print "Unpacking {0}.".format(name)
                        z.extract(name, "/tmp")

                        print "Uploading {0} to Azure blob store".format(name)

                        blob_service.put_block_blob_from_path(
                            'tempreleases',
                            name,
                            "/tmp/{0}".format(name),
                            x_ms_blob_content_type='application/x-compressed'
                        )

                        os.unlink(release_filename)
                        blob_url = "http://{0}.blob.core.windows.net/{1}/{2}".format(
                            storage_account_name, 'tempreleases', name)

                        print "Uploading release {0} to BOSH director.".format(name)

                        task_id = client.upload_release(blob_url)
                        client.wait_for_task(task_id)

                z.close()
                temp.close()

    blob_service.delete_container("tempreleases")

    # stemcells
    print "Processing stemcells."

    for url in stemcell_urls:
        print "Processing stemcell {0}".format(url)
        task_id = client.upload_stemcell(url)
        client.wait_for_task(task_id)

    return context
コード例 #5
0
class AzureFS(LoggingMixIn, Operations):
    """Azure Blob Storage filesystem"""

    blobs = None
    containers = dict()  # <cname, dict(stat:dict,
                                    #files:None|dict<fname, stat>)
    fds = dict()  # <fd, (path, bytes, dirty)>
    fd = 0

    def __init__(self, account, key):
        self.blobs = BlobService(account, key)
        self.rebuild_container_list()

    def convert_to_epoch(self, date):
        """Converts Tue, 31 Jul 2012 07:17:34 GMT format to epoch"""
        return int(time.mktime(time.strptime(date, TIME_FORMAT)))

    def rebuild_container_list(self):
        cmap = dict()
        cnames = set()
        for c in self.blobs.list_containers():
            date = c.properties.last_modified
            cstat = dict(st_mode=(S_IFDIR | 0755), st_uid=getuid(), st_size=0,
                         st_mtime=self.convert_to_epoch(date))
            cname = c.name
            cmap['/' + cname] = dict(stat=cstat, files=None)
            cnames.add(cname)

        cmap['/'] = dict(files={},
                         stat=dict(st_mode=(S_IFDIR | 0755),
                                     st_uid=getuid(), st_size=0,
                                     st_mtime=int(time.time())))

        self.containers = cmap   # destroys fs tree cache resistant to misses

    def _parse_path(self, path):    # returns </dir, file(=None)>
        if path.count('/') > 1:     # file
            return str(path[:path.rfind('/')]), str(path[path.rfind('/') + 1:])
        else:                       # dir
            pos = path.rfind('/', 1)
            if pos == -1:
                return path, None
            else:
                return str(path[:pos]), None

    def parse_container(self, path):
        base_container = path[1:]   # /abc/def/g --> abc
        if base_container.find('/') > -1:
            base_container = base_container[:base_container.find('/')]
        return str(base_container)

    def _get_dir(self, path, contents_required=False):
        if not self.containers:
            self.rebuild_container_list()

        if path in self.containers and not (contents_required and \
                self.containers[path]['files'] is None):
            return self.containers[path]

        cname = self.parse_container(path)

        if '/' + cname not in self.containers:
            raise FuseOSError(ENOENT)
        else:
            if self.containers['/' + cname]['files'] is None:
                # fetch contents of container
                log.info("------> CONTENTS NOT FOUND: %s" % cname)

                blobs = self.blobs.list_blobs(cname)

                dirstat = dict(st_mode=(S_IFDIR | 0755), st_size=0,
                               st_uid=getuid(), st_mtime=time.time())

                if self.containers['/' + cname]['files'] is None:
                    self.containers['/' + cname]['files'] = dict()

                for f in blobs:
                    blob_name = f.name
                    blob_date = f.properties.last_modified
                    blob_size = long(f.properties.content_length)

                    node = dict(st_mode=(S_IFREG | 0644), st_size=blob_size,
                                st_mtime=self.convert_to_epoch(blob_date),
                                st_uid=getuid())

                    if blob_name.find('/') == -1:  # file just under container
                        self.containers['/' + cname]['files'][blob_name] = node

            return self.containers['/' + cname]
        return None

    def _get_file(self, path):
        d, f = self._parse_path(path)
        dir = self._get_dir(d, True)
        if dir is not None and f in dir['files']:
            return dir['files'][f]

    def getattr(self, path, fh=None):
        d, f = self._parse_path(path)

        if f is None:
            dir = self._get_dir(d)
            return dir['stat']
        else:
            file = self._get_file(path)

            if file:
                return file

        raise FuseOSError(ENOENT)

    # FUSE
    def mkdir(self, path, mode):
        if path.count('/') <= 1:    # create on root
            name = path[1:]

            if not 3 <= len(name) <= 63:
                log.error("Container names can be 3 through 63 chars long.")
                raise FuseOSError(ENAMETOOLONG)
            if name is not name.lower():
                log.error("Container names cannot contain uppercase \
                        characters.")
                raise FuseOSError(EACCES)
            if name.count('--') > 0:
                log.error('Container names cannot contain consecutive \
                        dashes (-).')
                raise FuseOSError(EAGAIN)
            #TODO handle all "-"s must be preceded by letter or numbers
            #TODO starts with only letter or number, can contain letter, nr,'-'

            resp = self.blobs.create_container(name)

            if resp:
                self.rebuild_container_list()
                log.info("CONTAINER %s CREATED" % name)
            else:
                raise FuseOSError(EACCES)
                log.error("Invalid container name or container already \
                        exists.")
        else:
            raise FuseOSError(ENOSYS)  # TODO support 2nd+ level mkdirs

    def rmdir(self, path):
        if path.count('/') == 1:
            c_name = path[1:]
            resp = self.blobs.delete_container(c_name)

            if resp:
                if path in self.containers:
                    del self.containers[path]
            else:
                raise FuseOSError(EACCES)
        else:
            raise FuseOSError(ENOSYS)  # TODO support 2nd+ level mkdirs

    def create(self, path, mode):
        node = dict(st_mode=(S_IFREG | mode), st_size=0, st_nlink=1,
                     st_uid=getuid(), st_mtime=time.time())
        d, f = self._parse_path(path)

        if not f:
            log.error("Cannot create files on root level: /")
            raise FuseOSError(ENOSYS)

        dir = self._get_dir(d, True)
        if not dir:
            raise FuseOSError(EIO)
        dir['files'][f] = node

        return self.open(path, data='')     # reusing handler provider

    def open(self, path, flags=0, data=None):
        if data == None:                    # download contents
            c_name = self.parse_container(path)
            f_name = path[path.find('/', 1) + 1:]

            try:
                data = self.blobs.get_blob(c_name, f_name)
            except WindowsAzureMissingResourceError:
                dir = self._get_dir('/' + c_name, True)
                if f_name in dir['files']:
                    del dir['files'][f_name]
                raise FuseOSError(ENOENT)
            except WindowsAzureError as e:
                log.error("Read blob failed HTTP %d" % e.code)
                raise FuseOSError(EAGAIN)

        self.fd += 1
        self.fds[self.fd] = (path, data, False)

        return self.fd

    def flush(self, path, fh=None):
        if not fh:
            raise FuseOSError(EIO)
        else:
            if fh not in self.fds:
                raise FuseOSError(EIO)
            path = self.fds[fh][0]
            data = self.fds[fh][1]
            dirty = self.fds[fh][2]

            if not dirty:
                return 0     # avoid redundant write

            d, f = self._parse_path(path)
            c_name = self.parse_container(path)

            if data is None:
                data = ''

            try:
                if len(data) < 64 * 1024 * 1024:   # 64 mb
                    self.blobs.put_blob(c_name, f, data, 'BlockBlob')
                else:
                    # divide file by blocks and upload
                    block_size = 8 * 1024 * 1024
                    num_blocks = int(math.ceil(len(data) * 1.0 / block_size))
                    rd = str(random.randint(1, 1e8))
                    block_ids = list()

                    for i in range(num_blocks):
                        part = data[i * block_size:min((i + 1) * block_size,
                            len(data))]
                        block_id = base64.encodestring('%s_%s' % (rd,
                            (8 - len(str(i))) * '0' + str(i)))
                        self.blobs.put_block(c_name, f, part, block_id)
                        block_ids.append(block_id)

                    self.blobs.put_block_list(c_name, f, block_ids)
            except WindowsAzureError:
                raise FuseOSError(EAGAIN)

            dir = self._get_dir(d, True)
            if not dir or f not in dir['files']:
                raise FuseOSError(EIO)

            # update local data
            dir['files'][f]['st_size'] = len(data)
            dir['files'][f]['st_mtime'] = time.time()
            self.fds[fh] = (path, data, False)  # mark as not dirty
            return 0

    def release(self, path, fh=None):
        if fh is not None and fh in self.fds:
            del self.fds[fh]

    def truncate(self, path, length, fh=None):
        return 0     # assume done, no need

    def write(self, path, data, offset, fh=None):
        if not fh or fh not in self.fds:
            raise FuseOSError(ENOENT)
        else:
            d = self.fds[fh][1]
            if d is None:
                d = ""
            self.fds[fh] = (self.fds[fh][0], d[:offset] + data, True)
            return len(data)

    def unlink(self, path):
        c_name = self.parse_container(path)
        d, f = self._parse_path(path)

        try:
            self.blobs.delete_blob(c_name, f)

            _dir = self._get_dir(path, True)
            if _dir and f in _dir['files']:
                del _dir['files'][f]
            return 0
        except WindowsAzureMissingResourceError:
            raise FuseOSError(ENOENT)
        except Exception as e:
            raise FuseOSError(EAGAIN)

    def readdir(self, path, fh):
        if path == '/':
            return ['.', '..'] + [x[1:] for x in self.containers.keys() \
                    if x is not '/']

        dir = self._get_dir(path, True)
        if not dir:
            raise FuseOSError(ENOENT)
        return ['.', '..'] + dir['files'].keys()

    def read(self, path, size, offset, fh):
        if not fh or fh not in self.fds:
            raise FuseOSError(ENOENT)

        f_name = path[path.find('/', 1) + 1:]
        c_name = path[1:path.find('/', 1)]

        try:
            data = self.blobs.get_blob(c_name, f_name)
            self.fds[fh] = (self.fds[fh][0], data, False)
            return data[offset:offset + size]
        except URLError, e:
            if e.code == 404:
                raise FuseOSError(ENOENT)
            elif e.code == 403:
                raise FUSEOSError(EPERM)
            else:
                log.error("Read blob failed HTTP %d" % e.code)
                raise FuseOSError(EAGAIN)
        data = self.fds[fh][1]
        if data is None:
            data = ""
        return data[offset:offset + size]
コード例 #6
0
def apathetic_container_delete(container_name, *args, **kwargs):
    conn = BlobService(*args, **kwargs)
    conn.delete_container(container_name)

    return conn
コード例 #7
0
ファイル: dcu-fetch.py プロジェクト: adulau/dcu-tools
blob_service = BlobService(account_name, account_key)

for container in blob_service.list_containers():
    c = container.name
    if c == "heartbeat": continue
    if options.date and not ( c == "processed-"+options.date ): continue
    if debug: sys.stderr.write("Processing container: "+str(c)+"\n")
    for b in blob_service.list_blobs(c):
        if debug: sys.stderr.write("Processing blob: "+str(b.name)+"\n")
        data = blob_service.get_blob(c, b.name)
        cs = StringIO.StringIO(data)
        gzipstream = gzip.GzipFile(fileobj=cs)
        if output_format == "txt":
            print gzipstream.read()
        elif output_format == "json":
            d = {}
            i = 0
            ds = gzipstream.read()
            # some DCU entries contains more than 28 values (outside the
            # definition of the headers)
            for x in ds.strip().split("\t")[:27]:
                d[headers[i]] = x
                i=i+1
            print (json.dumps(d, sort_keys=True))
        if options.clear:
            if debug: sys.stderr.write("Deleting blob: "+str(b.name)+"\n")
            blob_service.delete_blob(c, b.name)
    if options.clear:
        if debug: sys.stderr.write("Deleting container: "+str(c)+"\n")
        blob_service.delete_container(c)
コード例 #8
0
def do_step(context):
    settings = context.meta['settings']
    index_file = context.meta['index-file']
    pivnetAPIToken = settings["pivnet-api-token"]

    f = open("manifests/{0}".format(index_file))
    manifests = yaml.safe_load(f)
    f.close()

    eula_urls = [
        "https://network.pivotal.io/api/v2/products/{0}/releases/{1}/eula_acceptance"
        .format(m['release-name'], m['release-number'])
        for m in manifests['manifests']
    ]

    release_urls = [
        "https://network.pivotal.io/api/v2/products/{0}/releases/{1}/product_files/{2}/download"
        .format(m['release-name'], m['release-number'], m['file-number'])
        for m in manifests['manifests']
    ]

    stemcell_urls = [m['stemcell'] for m in manifests['manifests']]

    # accept eula for each product
    for url in eula_urls:
        print url
        if not "concourse" in url:
            res = authorizedPost(url, pivnetAPIToken)
            code = res.getcode()

    # releases
    is_release_file = re.compile("^releases\/.+")
    if not os.path.exists("/tmp/releases"):
        os.makedirs("/tmp/releases")

    client = bosh_client.BoshClient("https://10.0.0.4:25555", "admin", "admin")
    storage_account_name = settings["STORAGE-ACCOUNT-NAME"]
    storage_access_key = settings["STORAGE-ACCESS-KEY"]

    blob_service = BlobService(storage_account_name, storage_access_key)
    blob_service.create_container(container_name='tempreleases',
                                  x_ms_blob_public_access='container')

    print "Processing releases."
    for url in release_urls:

        print "Downloading {0}.".format(url)

        if "concourse" in url:
            release_url = "https://s3-us-west-2.amazonaws.com/bosh-azure-releases/concourse.zip"
            res = urllib2.urlopen(release_url)
        else:
            res = authorizedPost(url, pivnetAPIToken)

        code = res.getcode()

        length = int(res.headers["Content-Length"])

        # content-length
        if code is 200:

            total = 0
            pcent = 0.0
            CHUNK = 16 * 1024

            with tempfile.TemporaryFile() as temp:
                while True:
                    chunk = res.read(CHUNK)
                    total += CHUNK
                    pcent = (float(total) / float(length)) * 100

                    sys.stdout.write("Download progress: %.2f%% (%.2fM)\r" %
                                     (pcent, total / 1000000.0))
                    sys.stdout.flush()

                    if not chunk:
                        break

                    temp.write(chunk)

                print "Download complete."

                z = zipfile.ZipFile(temp)
                for name in z.namelist():

                    # is this a release?
                    if is_release_file.match(name):

                        release_filename = "/tmp/{0}".format(name)

                        print "Unpacking {0}.".format(name)
                        z.extract(name, "/tmp")

                        print "Uploading {0} to Azure blob store".format(name)

                        blob_service.put_block_blob_from_path(
                            'tempreleases',
                            name,
                            "/tmp/{0}".format(name),
                            x_ms_blob_content_type='application/x-compressed')

                        os.unlink(release_filename)
                        blob_url = "http://{0}.blob.core.windows.net/{1}/{2}".format(
                            storage_account_name, 'tempreleases', name)

                        print "Uploading release {0} to BOSH director.".format(
                            name)

                        task_id = client.upload_release(blob_url)
                        client.wait_for_task(task_id)

                z.close()
                temp.close()

    blob_service.delete_container("tempreleases")

    # stemcells
    print "Processing stemcells."

    for url in stemcell_urls:
        print "Processing stemcell {0}".format(url)
        task_id = client.upload_stemcell(url)
        client.wait_for_task(task_id)

    return context
コード例 #9
0
ファイル: AzureCleaner.py プロジェクト: tdong6/DBLikeProject
def delete_blobs():
    print "deleting blobs: " + ", ".join(BLOB_CONTAINERS)
    for i in range(len(ACCOUNT)):
        bs = BlobService(ACCOUNT[i], KEY[i])
        for container in BLOB_CONTAINERS:
            bs.delete_container(container)